repo_name
stringlengths
6
130
hexsha
list
file_path
list
code
list
apis
list
jinxu06/pixel-cnn
[ "9cad98f3f801bd772815dbb403fb6649ff704dfa" ]
[ "data/svhn_data.py" ]
[ "\"\"\"\nUtilities for downloading and unpacking the CIFAR-10 dataset, originally published\nby Krizhevsky et al. and hosted here: https://www.cs.toronto.edu/~kriz/cifar.html\n\"\"\"\n\nimport os\nimport sys\nimport numpy as np\nfrom PIL import Image\n\ndef read_imgs(dir):\n loadmat(dir)\n dirpath, dirnames, filenames = next(os.walk(dir))\n filenames = sorted(filenames)\n imgs = np.array([np.array(Image.open(os.path.join(dir, filename))) for filename in filenames]).astype(np.uint8)\n return imgs\n\ndef load(data_dir, subset='train'):\n if subset in ['train', 'valid', 'test']:\n data = np.load(os.path.join(data_dir, \"SVHN32-{0}.npz\".format(subset)))\n trainx, trainy = data['X'], data['y'][:, 0]\n return trainx, trainy\n\n else:\n raise NotImplementedError('subset should be either train, valid or test')\n\nclass DataLoader(object):\n \"\"\" an object that generates batches of CelebA data for training \"\"\"\n\n def __init__(self, data_dir, subset, batch_size, rng=None, shuffle=False, return_labels=False):\n \"\"\"\n - data_dir is location where to store files\n - subset is train|test\n - batch_size is int, of #examples to load at once\n - rng is np.random.RandomState object for reproducibility\n \"\"\"\n\n self.data_dir = data_dir\n self.batch_size = batch_size\n self.shuffle = shuffle\n self.return_labels = return_labels\n\n # create temporary storage for the data, if not yet created\n if not os.path.exists(data_dir):\n print('creating folder', data_dir)\n os.makedirs(data_dir)\n\n # load CIFAR-10 training data to RAM\n self.data, self.labels = load(self.data_dir, subset=subset)\n # self.data = np.transpose(self.data, (0,2,3,1)) # (N,3,32,32) -> (N,32,32,3)\n\n self.p = 0 # pointer to where we are in iteration\n self.rng = np.random.RandomState(1) if rng is None else rng\n\n def get_observation_size(self):\n return self.data.shape[1:]\n\n def get_num_labels(self):\n return np.amax(self.labels) + 1\n\n def reset(self):\n self.p = 0\n\n def __iter__(self):\n return self\n\n def __next__(self, n=None):\n \"\"\" n is the number of examples to fetch \"\"\"\n if n is None: n = self.batch_size\n\n # on first iteration lazily permute all data\n if self.p == 0 and self.shuffle:\n inds = self.rng.permutation(self.data.shape[0])\n self.data = self.data[inds]\n self.labels = self.labels[inds]\n\n # on last iteration reset the counter and raise StopIteration\n if self.p + n > self.data.shape[0]:\n self.reset() # reset for next time we get called\n raise StopIteration\n\n # on intermediate iterations fetch the next batch\n x = self.data[self.p : self.p + n]\n y = self.labels[self.p : self.p + n]\n self.p += self.batch_size\n\n if self.return_labels:\n return x,y\n else:\n return x\n\n next = __next__ # Python 2 compatibility (https://stackoverflow.com/questions/29578469/how-to-make-an-object-both-a-python2-and-python3-iterator)\n" ]
[ [ "numpy.amax", "numpy.random.RandomState" ] ]
philip-shen/note_python
[ "db0ad84af25464a22ac52e348960107c81e74a56" ]
[ "Zip_UnZip/test/test_json.py" ]
[ "# 3/28/2020 Convert Nested JSON to Pandas DataFrame and Flatten List in a Column \n# https://gist.github.com/rafaan/4ddc91ae47ea46a46c0b\n########################################################\n\nimport json\nfrom pandas.io.json import json_normalize\nimport pandas as pd\n\nimport os,sys,time\n\nstrabspath=os.path.abspath(__file__)\nstrdirname=os.path.dirname(strabspath)\nstr_split=os.path.split(strdirname)\nprevdirname=str_split[0]\ndirnamelib=os.path.join(prevdirname,\"lib\")\ndirnamelog=os.path.join(prevdirname,\"logs\")\nsys.path.append(dirnamelib)\n\nfrom logger import logger\n\nwith open('config.json') as f:\n data = json.load(f)\n\ndf = pd.DataFrame(data) \n\n#normalized_df = pd.json_normalize(df[\"WiFi_ThroughputTest\"])\n#normalized_df.columns = normalized_df.columns.map(lambda x: x.split(\".\")[-1])\n\n'''column is a string of the column's name.\nfor each value of the column's element (which might be a list),\nduplicate the rest of columns at the corresponding row with the (each) value.\n'''\n\ndef flattenColumn(input, column):\n column_flat = pd.DataFrame([[i, c_flattened] for i, y in input[column].apply(list).iteritems() for c_flattened in y], columns=['I', column])\n column_flat = column_flat.set_index('I')\n return input.drop(column, 1).merge(column_flat, left_index=True, right_index=True)\n \n#new_df = flattenColumn(normalized_df, 'column_name')\n\nmsg = 'df: {}'\nlogger.info(msg.format( df))\n\n'''\ndf[\"WiFi_ThroughputTest\"]: \n0 {'description': 'Wireless_PerformanceTest', 'd...\n1 {'AC_88': {'model': 'ac88', 'description': 'AS...\n2 {'DWA_192': {'model': 'dwa192', 'description':...\n3 {'AC_8260': {'model': 'ac8260', 'description':...\n4 {'MACBOOK': {'model': 'macbook', 'description'...\n5 {'AX_200': {'model': 'ax200', 'description': '...\nName: WiFi_ThroughputTest, dtype: object\n'''\nmsg = 'df[\"WiFi_ThroughputTest\"]: {}'\nlogger.info(msg.format( df[\"WiFi_ThroughputTest\"]))\n\n'''\ndf[\"WiFi_ThroughputTest\"][0]: \n{'description': 'Wireless_PerformanceTest', \n'driver': 'd', \n'folder': 'DHCPNATThroughputTest', \n'folder_zip': \n'TestResultTemp', \n'folder_zip_backup': 'The latest Test Result'}\n'''\nmsg = 'df[\"WiFi_ThroughputTest\"][0]: {}'\nlogger.info(msg.format( df[\"WiFi_ThroughputTest\"][0]))\n\n'''\ndf[\"WiFi_ThroughputTest\"][0][\"description\"]: Wireless_PerformanceTest\n'''\nmsg = 'df[\"WiFi_ThroughputTest\"][0][\"description\"]: {}'\nlogger.info(msg.format( df[\"WiFi_ThroughputTest\"][0][\"description\"]))\n\n'''\ndf[\"WiFi_ThroughputTest\"][1][\"AC_88\"][\"wlan_ip_address\"]: 192.168.0.101\n'''\nmsg = 'df[\"WiFi_ThroughputTest\"][1][\"AC_88\"][\"wlan_ip_address\"]: {}'\nlogger.info(msg.format( df[\"WiFi_ThroughputTest\"][1][\"AC_88\"][\"wlan_ip_address\"]))\n\n\n" ]
[ [ "pandas.DataFrame" ] ]
simchanu29/simple_teleop
[ "aa70d7edb05c7eddc88b00cf3e49cc718cc51838" ]
[ "src/message_handler/Interpreter_position_speed.py" ]
[ "import rospy\nfrom std_msgs.msg import Float32\nfrom Interpreter import Interpreter\nimport numpy as np\n\n\nclass Interpreter_position_speed(Interpreter):\n def __init__(self, interpreter_info):\n super(Interpreter_position_speed, self).__init__(interpreter_info)\n self.cmd.val = 0.0 # position\n self.speed = 0.0\n self.pub = rospy.Publisher(self._config['topic'], Float32, queue_size=1)\n print('created publisher on', self._config['topic'])\n\n # Override\n def process_input(self, val, cmd_type):\n if cmd_type == self.SLIDER:\n self.speed = val\n\n if cmd_type == self.BUTTON:\n # BACK keyword\n if val == self.BACK and self.speed != 0.0:\n self.speed = max(min(-self.speed / abs(self.speed), 1.0), -1.0)\n # STOP keyword\n elif val == self.STOP:\n self.speed = 0.0\n # Cas classique\n else:\n self.speed += val * self._config['key_precision']\n\n # Saturation\n self.speed = np.clip(self.speed, -1.0, 1.0)\n\n def send_msg(self):\n\n cmd_val_tmp = self.cmd.val + self.speed * self._config['gain_speed']\n\n if cmd_val_tmp != self.cmd.val:\n self.cmd.val = cmd_val_tmp\n msg = Float32()\n min_cmd = float(self._config['min'])\n max_cmd = float(self._config['max'])\n range_cmd = (max_cmd - min_cmd)/2.0 # car de -1 a 1 ca fait range = 2\n offset = range_cmd + min_cmd\n\n msg.data = np.clip(self.cmd.val * range_cmd + offset, min_cmd, max_cmd)\n\n self.pub.publish(msg)\n" ]
[ [ "numpy.clip" ] ]
Delegation-numerique-en-sante/mesconseilsprevention
[ "7e4598ac05d69b0aa8b7c8ad05329b3deb2a933f" ]
[ "api/test_preprocess.py" ]
[ "from pandas._testing import assert_frame_equal\nimport pandas\nimport pytest\n\nfrom preprocess import MAX_AGE\n\n\ndef test_format_list():\n from preprocess import format_list\n\n assert format_list(\n [\n \"La santé des adolescents (11 à 17 ans)\",\n \"La santé des jeunes adultes (18 à 35 ans)\",\n \"La santé des adultes (35 à 55 ans)\",\n ]\n ) == (\n \"[\"\n '\"La santé des adolescents (11 à 17 ans)\",'\n '\"La santé des jeunes adultes (18 à 35 ans)\",'\n '\"La santé des adultes (35 à 55 ans)\"'\n \"]\"\n )\n\n\ndef test_extract_age_facets():\n from preprocess import extract_age_facets\n\n assert (\n extract_age_facets(\n [\n \"La santé des adolescents (11 à 17 ans)\",\n \"La santé des jeunes adultes (18 à 35 ans)\",\n \"La santé des adultes (35 à 55 ans)\",\n ]\n )\n == (11, 55)\n )\n\n\ndef test_extract_categories():\n from preprocess import extract_categories\n\n assert extract_categories(\n {\n \"Nouvelle cat 1\": \"Bien manger et bouger\",\n \"Nouvelle cat 2 \": \"Grossesse et 1000 premiers jours\",\n \"Nouvelle cat 3\": \"Vaccination et dépistage\",\n \"Nouvelle cat 4\": \"Santé et environnement\",\n }\n ) == [\n \"Bien manger et bouger\",\n \"Grossesse et 1000 premiers jours\",\n \"Vaccination et dépistage\",\n \"Santé et environnement\",\n ]\n\n\[email protected](\n \"text,min_,max_\",\n [\n (\"\", pandas.NA, pandas.NA),\n (\"Sur la santé entre 11 et 12 ans\", 11, 12),\n (\"Informations pour préserver sa santé (11 - 12 ans / Femme)\", 11, 12),\n (\"Informations pour préserver sa santé (59 - 64 ans)\", 59, 64),\n (\"Informations dédiées à la santé des nourrissons (0 - 1 an)\", 0, 1),\n (\"Informations pour préserver sa santé à partir de 65 ans\", 65, MAX_AGE),\n (\"La santé des personnes âgées (85 ans et plus)\", 85, MAX_AGE),\n ],\n)\ndef test_extract_ages_range(text, min_, max_):\n from preprocess import extract_age_range\n\n assert extract_age_range(text) == (min_, max_)\n\n\[email protected](\n \"text,sex\",\n [\n (\"Sur la santé entre 11 et 12 ans\", {\"femmes\", \"hommes\"}),\n (\"Informations pour préserver sa santé (11 - 12 ans / Femme)\", {\"femmes\"}),\n (\"Informations pour préserver sa santé (13 - 14 ans / Femme)\", {\"femmes\"}),\n (\"Informations pour préserver sa santé (15 - 16 ans / Femme)\", {\"femmes\"}),\n (\"Informations pour préserver sa santé (17 - 18 ans / Femme)\", {\"femmes\"}),\n (\"Informations pour préserver sa santé (19 - 24 ans / Femme)\", {\"femmes\"}),\n (\"Informations pour préserver sa santé (25 - 35 ans / Femme)\", {\"femmes\"}),\n (\"Informations pour préserver sa santé (36 - 49 ans / Femme)\", {\"femmes\"}),\n (\"Informations pour préserver sa santé (50 - 54 ans / Femme)\", {\"femmes\"}),\n (\"Informations pour préserver sa santé (55 - 58 ans / Femme)\", {\"femmes\"}),\n (\"Informations pour préserver sa santé (59 - 64 ans / Femme)\", {\"femmes\"}),\n (\n \"Informations pour préserver sa santé à partir de 65 ans (Femmes)\",\n {\"femmes\"},\n ),\n (\"Sur la santé des adolescentes entre 11 et 12 ans\", {\"femmes\"}),\n (\"Sur la santé des femmes entre 17 et 18 ans\", {\"femmes\"}),\n (\"Sur la santé des femmes entre 19 et 24 ans\", {\"femmes\"}),\n (\"Sur la santé des femmes entre 25 et 35 ans\", {\"femmes\"}),\n (\"Sur la santé des femmes entre 36 et 49 ans\", {\"femmes\"}),\n (\"Sur la santé des femmes entre 50 et 54 ans\", {\"femmes\"}),\n (\"Sur la santé des femmes entre 55 et 58 ans\", {\"femmes\"}),\n (\"Sur la santé des femmes entre 59 et 64 ans\", {\"femmes\"}),\n (\"Sur la santé des jeunes femmes entre 13 et 14 ans\", {\"femmes\"}),\n (\"Sur la santé des jeunes femmes entre 15 et 16 ans\", {\"femmes\"}),\n (\n \"Informations pour préserver sa santé en cas de grossesse (11 - 12 ans)\",\n {\"femmes\"},\n ),\n (\n \"Informations pour préserver sa santé en cas de grossesse (13 - 14 ans)\",\n {\"femmes\"},\n ),\n (\n \"Informations pour préserver sa santé en cas de grossesse (15 - 16 ans)\",\n {\"femmes\"},\n ),\n (\n \"Informations pour préserver sa santé en cas de grossesse (17 - 18 ans)\",\n {\"femmes\"},\n ),\n (\n \"Informations pour préserver sa santé en cas de grossesse (19 - 24 ans)\",\n {\"femmes\"},\n ),\n (\n \"Informations pour préserver sa santé en cas de grossesse (25 - 35 ans)\",\n {\"femmes\"},\n ),\n (\n \"Informations pour préserver sa santé en cas de grossesse (36 - 49 ans)\",\n {\"femmes\"},\n ),\n (\n \"Informations pour préserver sa santé en cas de grossesse (50 - 54 ans)\",\n {\"femmes\"},\n ),\n (\"Sur la santé entre 11 et 12 ans - en cas de grossesse\", {\"femmes\"}),\n (\"Sur la santé entre 13 et 14 ans - en cas de grossesse\", {\"femmes\"}),\n (\"Sur la santé entre 15 et 16 ans - en cas de grossesse\", {\"femmes\"}),\n (\"Sur la santé entre 17 et 18 ans - en cas de grossesse\", {\"femmes\"}),\n (\"Sur la santé entre 19 et 24 ans - en cas de grossesse\", {\"femmes\"}),\n (\"Sur la santé entre 25 et 35 ans - en cas de grossesse\", {\"femmes\"}),\n (\"Sur la santé entre 36 et 49 ans - en cas de grossesse\", {\"femmes\"}),\n (\"Sur la santé entre 50 et 54 ans - en cas de grossesse\", {\"femmes\"}),\n (\"Informations pour préserver sa santé (50 - 54 ans / Homme)\", {\"hommes\"}),\n (\"Informations pour préserver sa santé (59 - 64 ans / Homme)\", {\"hommes\"}),\n (\"Informations pour préserver sa santé (11 - 12 ans / Homme)\", {\"hommes\"}),\n (\"Informations pour préserver sa santé (13 - 14 ans / Homme)\", {\"hommes\"}),\n (\"Informations pour préserver sa santé (15 - 16 ans / Homme)\", {\"hommes\"}),\n (\"Informations pour préserver sa santé (17 - 18 ans / Homme)\", {\"hommes\"}),\n (\"Informations pour préserver sa santé (19 - 24 ans / Homme)\", {\"hommes\"}),\n (\"Informations pour préserver sa santé (25 - 35 ans / Homme)\", {\"hommes\"}),\n (\"Informations pour préserver sa santé (36 - 49 ans / Homme)\", {\"hommes\"}),\n (\"Informations pour préserver sa santé (55 - 58 ans / Homme)\", {\"hommes\"}),\n (\n \"Informations pour préserver sa santé à partir de 65 ans (Hommes)\",\n {\"hommes\"},\n ),\n (\"Sur la santé des adolescents entre 11 et 12 ans\", {\"hommes\"}),\n (\"Sur la santé des jeunes hommes entre 13 et 14 ans\", {\"hommes\"}),\n (\"Sur la santé des jeunes hommes entre 15 et 16 ans\", {\"hommes\"}),\n ],\n)\ndef test_extract_sex(text, sex):\n from preprocess import extract_sex\n\n assert extract_sex(text) == sex\n\n\[email protected](\n \"text,grossesse\",\n [\n (\"Sur la santé entre 11 et 12 ans\", False),\n (\"Informations pour préserver sa santé (11 - 12 ans / Femme)\", False),\n (\"Informations pour préserver sa santé (13 - 14 ans / Femme)\", False),\n (\"Informations pour préserver sa santé (15 - 16 ans / Femme)\", False),\n (\"Informations pour préserver sa santé (17 - 18 ans / Femme)\", False),\n (\"Informations pour préserver sa santé (19 - 24 ans / Femme)\", False),\n (\"Informations pour préserver sa santé (25 - 35 ans / Femme)\", False),\n (\"Informations pour préserver sa santé (36 - 49 ans / Femme)\", False),\n (\"Informations pour préserver sa santé (50 - 54 ans / Femme)\", False),\n (\"Informations pour préserver sa santé (55 - 58 ans / Femme)\", False),\n (\"Informations pour préserver sa santé (59 - 64 ans / Femme)\", False),\n (\n \"Informations pour préserver sa santé à partir de 65 ans (Femmes)\",\n False,\n ),\n (\"Sur la santé des adolescentes entre 11 et 12 ans\", False),\n (\"Sur la santé des femmes entre 17 et 18 ans\", False),\n (\"Sur la santé des femmes entre 19 et 24 ans\", False),\n (\"Sur la santé des femmes entre 25 et 35 ans\", False),\n (\"Sur la santé des femmes entre 36 et 49 ans\", False),\n (\"Sur la santé des femmes entre 50 et 54 ans\", False),\n (\"Sur la santé des femmes entre 55 et 58 ans\", False),\n (\"Sur la santé des femmes entre 59 et 64 ans\", False),\n (\"Sur la santé des jeunes femmes entre 13 et 14 ans\", False),\n (\"Sur la santé des jeunes femmes entre 15 et 16 ans\", False),\n (\n \"Informations pour préserver sa santé en cas de grossesse (11 - 12 ans)\",\n True,\n ),\n (\n \"Informations pour préserver sa santé en cas de grossesse (13 - 14 ans)\",\n True,\n ),\n (\n \"Informations pour préserver sa santé en cas de grossesse (15 - 16 ans)\",\n True,\n ),\n (\n \"Informations pour préserver sa santé en cas de grossesse (17 - 18 ans)\",\n True,\n ),\n (\n \"Informations pour préserver sa santé en cas de grossesse (19 - 24 ans)\",\n True,\n ),\n (\n \"Informations pour préserver sa santé en cas de grossesse (25 - 35 ans)\",\n True,\n ),\n (\n \"Informations pour préserver sa santé en cas de grossesse (36 - 49 ans)\",\n True,\n ),\n (\n \"Informations pour préserver sa santé en cas de grossesse (50 - 54 ans)\",\n True,\n ),\n (\"Sur la santé entre 11 et 12 ans - en cas de grossesse\", True),\n (\"Sur la santé entre 13 et 14 ans - en cas de grossesse\", True),\n (\"Sur la santé entre 15 et 16 ans - en cas de grossesse\", True),\n (\"Sur la santé entre 17 et 18 ans - en cas de grossesse\", True),\n (\"Sur la santé entre 19 et 24 ans - en cas de grossesse\", True),\n (\"Sur la santé entre 25 et 35 ans - en cas de grossesse\", True),\n (\"Sur la santé entre 36 et 49 ans - en cas de grossesse\", True),\n (\"Sur la santé entre 50 et 54 ans - en cas de grossesse\", True),\n (\"Informations pour préserver sa santé (50 - 54 ans / Homme)\", False),\n (\"Informations pour préserver sa santé (59 - 64 ans / Homme)\", False),\n (\"Informations pour préserver sa santé (11 - 12 ans / Homme)\", False),\n (\"Informations pour préserver sa santé (13 - 14 ans / Homme)\", False),\n (\"Informations pour préserver sa santé (15 - 16 ans / Homme)\", False),\n (\"Informations pour préserver sa santé (17 - 18 ans / Homme)\", False),\n (\"Informations pour préserver sa santé (19 - 24 ans / Homme)\", False),\n (\"Informations pour préserver sa santé (25 - 35 ans / Homme)\", False),\n (\"Informations pour préserver sa santé (36 - 49 ans / Homme)\", False),\n (\"Informations pour préserver sa santé (55 - 58 ans / Homme)\", False),\n (\n \"Informations pour préserver sa santé à partir de 65 ans (Hommes)\",\n False,\n ),\n (\"Sur la santé des adolescents entre 11 et 12 ans\", False),\n (\"Sur la santé des jeunes hommes entre 13 et 14 ans\", False),\n (\"Sur la santé des jeunes hommes entre 15 et 16 ans\", False),\n ],\n)\ndef test_related_to_grossesse(text, grossesse):\n from preprocess import related_to_grossesse\n\n assert related_to_grossesse(text) == grossesse\n\n\ndef test_transform_dataframe():\n from preprocess import transform_dataframe\n\n result = transform_dataframe(\n pandas.DataFrame(\n [\n {\n \"Séquence de vie\": (\n \"La santé des adolescents (11 à 17 ans), \"\n \"La santé des jeunes adultes (18 à 35 ans), \"\n \"La santé des adultes (35 à 55 ans)\"\n ),\n \"Canonical URL\": \"https://www.sante.fr/endometriose-1\",\n \"Nouvelle cat 1\": \"Maladies transmissibles\",\n \"Nouvelle cat 2 \": \"Santé sexuelle\",\n \"Nouvelle cat 3\": \"\",\n \"Nouvelle cat 4\": \"\",\n }\n ]\n )\n )\n expected = pandas.DataFrame(\n [\n {\n \"Séquence de vie\": (\n \"[\"\n '\"La santé des adolescents (11 à 17 ans)\",'\n '\"La santé des jeunes adultes (18 à 35 ans)\",'\n '\"La santé des adultes (35 à 55 ans)\"'\n \"]\"\n ),\n \"Canonical URL\": \"https://www.sante.fr/endometriose-1\",\n \"Age_min\": 11,\n \"Age_max\": 55,\n \"Sexe\": '[\"femmes\",\"hommes\"]',\n \"Grossesse\": False,\n \"Catégories\": '[\"Maladies transmissibles\",\"Santé sexuelle\"]',\n }\n ]\n )\n expected[\"Age_min\"] = expected[\"Age_min\"].astype(\"Int64\")\n expected[\"Age_max\"] = expected[\"Age_max\"].astype(\"Int64\")\n assert_frame_equal(result, expected)\n\n\ndef test_transform_dataframe_without_keys_is_noop():\n from preprocess import transform_dataframe\n\n result = transform_dataframe(pandas.DataFrame([{\"foo\": \"bar\"}]))\n expected = pandas.DataFrame([{\"foo\": \"bar\"}])\n assert_frame_equal(result, expected)\n" ]
[ [ "pandas.DataFrame", "pandas._testing.assert_frame_equal" ] ]
CR320/DeeCamp-10Group-Unsupervised-Defect-Segmentation
[ "f5f75093f21cee03b0b3c4c085fd9e22d75fc47e" ]
[ "model/trainer.py" ]
[ "\"\"\"training container\n\nauthor: Haixin wang\ne-mail: [email protected]\n\"\"\"\n\n\nimport torch\nimport torch.nn as nn\n\n\nclass Network(nn.Module):\n def __init__(self, model, loss):\n super(Network, self).__init__()\n self.model = model\n self.loss = loss\n\n def forward(self, images):\n preds = self.model(images)\n loss = self.loss(preds, images)\n\n return loss\n\n\nclass Trainer():\n def __init__(self, net, loss, loss_name, optimizer, ngpu):\n self.net = net\n self.loss = loss\n self.loss_name = loss_name\n self.loss_value = None\n self.optimizer = optimizer\n self.network = torch.nn.DataParallel(Network(self.net, self.loss), device_ids=list(range(ngpu)))\n self.network.train()\n self.network.cuda()\n torch.backends.cudnn.benchmark = True\n\n def save_params(self, save_path):\n print(\"saving model to {}\".format(save_path))\n with open(save_path, \"wb\") as f:\n params = self.net.state_dict()\n torch.save(params, f)\n\n def load_params(self, path):\n from collections import OrderedDict\n new_state_dict = OrderedDict()\n w_dict = torch.load(path)\n for k, v in w_dict.items():\n head = k[:7]\n if head == 'module.':\n name = k[7:] # remove `module.`\n else:\n name = k\n new_state_dict[name] = v\n self.net.load_state_dict(new_state_dict)\n\n def set_lr(self, lr):\n # print(\"setting learning rate to: {}\".format(lr))\n for param_group in self.optimizer.param_groups:\n param_group[\"lr\"] = lr\n\n def train(self, input_tensor):\n if self.loss_name == 'SSIM_loss' or self.loss_name == 'VAE_loss':\n self.optimizer.zero_grad()\n loss = self.network(input_tensor)\n loss = loss.mean()\n loss.backward()\n self.optimizer.step()\n self.loss_value = loss.item()\n\n elif self.loss_name == 'Multi_SSIM_loss':\n self.loss_value = list()\n total_loss = list()\n self.optimizer.zero_grad()\n loss_multi = self.network(input_tensor)\n for loss in loss_multi:\n loss = loss.mean()\n total_loss.append(loss)\n self.loss_value.append(loss.item())\n total_loss = torch.stack(total_loss, 0).sum()\n total_loss.backward()\n self.optimizer.step()\n\n else:\n raise Exception('Wrong loss name')\n\n def get_loss_message(self):\n if self.loss_name == 'SSIM_loss' or self.loss_name == 'VAE_loss':\n mes = 'ssim loss:{:.4f};'.format(self.loss_value)\n\n elif self.loss_name == 'Multi_SSIM_loss':\n mes = ''\n for k, loss in enumerate(self.loss_value):\n mes += 'size{:d} ssim loss:{:.4f}; '.format(k, loss)\n else:\n raise Exception('Wrong loss name')\n\n return mes" ]
[ [ "torch.save", "torch.stack", "torch.load" ] ]
sethmerkel/qiskit-aer
[ "b95e95fd4aa073a13b9599053dbdf29a06b35a2e" ]
[ "test/terra/backends/aer_simulator/instructions/test_save_state.py" ]
[ "# This code is part of Qiskit.\n#\n# (C) Copyright IBM 2018, 2021.\n#\n# This code is licensed under the Apache License, Version 2.0. You may\n# obtain a copy of this license in the LICENSE.txt file in the root directory\n# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.\n#\n# Any modifications or derivative works of this code must retain this\n# copyright notice, and modified files need to carry a notice indicating\n# that they have been altered from the originals.\n\"\"\"\nIntegration Tests for SaveState instruction\n\"\"\"\n\nimport numpy as np\nfrom ddt import ddt\nfrom test.terra.backends.aer_simulator.aer_simulator_test_case import (\n AerSimulatorTestCase, supported_methods)\nfrom qiskit import QuantumCircuit, transpile\nfrom qiskit.providers.aer.library import (\n SaveStatevector, SaveDensityMatrix, SaveStabilizer,\n SaveMatrixProductState, SaveUnitary, SaveSuperOp)\n\n\n@ddt\nclass TestSaveState(AerSimulatorTestCase):\n \"\"\"Test instructions for saving simulator state.\"\"\"\n\n @supported_methods(['automatic', 'statevector', 'density_matrix',\n 'stabilizer', 'matrix_product_state',\n 'unitary', 'superop'])\n def test_save_state(self, method, device):\n \"\"\"Test save_amplitudes instruction\"\"\"\n\n REFERENCE_SAVE = {\n 'automatic': SaveStabilizer,\n 'stabilizer': SaveStabilizer,\n 'statevector': SaveStatevector,\n 'density_matrix': SaveDensityMatrix,\n 'matrix_product_state': SaveMatrixProductState,\n 'unitary': SaveUnitary,\n 'superop': SaveSuperOp\n }\n\n backend = self.backend(method=method, device=device)\n if method == 'automatic':\n label = 'stabilizer'\n else:\n label = method\n\n # Stabilizer test circuit\n num_qubits = 2\n target_instr = REFERENCE_SAVE[method](num_qubits, label='target')\n circ = QuantumCircuit(num_qubits)\n circ.h(0)\n for i in range(1, num_qubits):\n circ.cx(i - 1, i)\n circ.save_state()\n circ.append(target_instr, range(num_qubits))\n\n # Run\n result = backend.run(transpile(\n circ, backend, optimization_level=0), shots=1).result()\n self.assertTrue(result.success)\n simdata = result.data(0)\n self.assertIn(label, simdata)\n self.assertIn('target', simdata)\n value = simdata[label]\n target = simdata['target']\n if method == 'matrix_product_state':\n for val, targ in zip(value[0], target[0]):\n self.assertTrue(np.allclose(val, targ))\n for val, targ in zip(value[1], target[1]):\n self.assertTrue(np.allclose(val, targ))\n else:\n self.assertTrue(np.all(value == target))\n\n @supported_methods(['statevector', 'density_matrix'])\n def test_save_state_cache_blocking(self, method, device):\n \"\"\"Test save_amplitudes instruction\"\"\"\n\n REFERENCE_SAVE = {\n 'statevector': SaveStatevector,\n 'density_matrix': SaveDensityMatrix,\n }\n\n backend = self.backend(method=method, device=device,\n blocking_qubits=2, max_parallel_threads=1)\n\n # Stabilizer test circuit\n num_qubits = 4\n target_instr = REFERENCE_SAVE[method](num_qubits, label='target')\n circ = QuantumCircuit(num_qubits)\n circ.h(0)\n for i in range(1, num_qubits):\n circ.cx(i - 1, i)\n circ.save_state()\n circ.append(target_instr, range(num_qubits))\n\n # Run\n result = backend.run(transpile(\n circ, backend, optimization_level=0), shots=1).result()\n self.assertTrue(result.success)\n simdata = result.data(0)\n self.assertIn(method, simdata)\n self.assertIn('target', simdata)\n value = simdata[method]\n target = simdata['target']\n self.assertTrue(np.all(value == target))\n" ]
[ [ "numpy.all", "numpy.allclose" ] ]
delgadom/geoclaw
[ "8893cf487e2972bf67e455bbb58b236ec24150bb" ]
[ "examples/tsunami/chile2010/setplot.py" ]
[ "\n\"\"\" \nSet up the plot figures, axes, and items to be done for each frame.\n\nThis module is imported by the plotting routines and then the\nfunction setplot is called to set the plot parameters.\n \n\"\"\" \n\nfrom __future__ import absolute_import\nfrom __future__ import print_function\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nfrom clawpack.geoclaw import topotools\nfrom six.moves import range\n\ntry:\n TG32412 = np.loadtxt('32412_notide.txt')\nexcept:\n print(\"*** Could not load DART data file\")\n\n#--------------------------\ndef setplot(plotdata=None):\n#--------------------------\n \n \"\"\" \n Specify what is to be plotted at each frame.\n Input: plotdata, an instance of pyclaw.plotters.data.ClawPlotData.\n Output: a modified version of plotdata.\n \n \"\"\" \n\n\n from clawpack.visclaw import colormaps, geoplot\n from numpy import linspace\n\n if plotdata is None:\n from clawpack.visclaw.data import ClawPlotData\n plotdata = ClawPlotData()\n\n\n plotdata.clearfigures() # clear any old figures,axes,items data\n plotdata.format = 'ascii' # 'ascii' or 'binary' to match setrun.py\n\n\n # To plot gauge locations on pcolor or contour plot, use this as\n # an afteraxis function:\n\n def addgauges(current_data):\n from clawpack.visclaw import gaugetools\n gaugetools.plot_gauge_locations(current_data.plotdata, \\\n gaugenos='all', format_string='ko', add_labels=True)\n \n\n #-----------------------------------------\n # Figure for surface\n #-----------------------------------------\n plotfigure = plotdata.new_plotfigure(name='Surface', figno=0)\n\n # Set up for axes in this figure:\n plotaxes = plotfigure.new_plotaxes('pcolor')\n plotaxes.title = 'Surface'\n plotaxes.scaled = True\n\n def fixup(current_data):\n import pylab\n addgauges(current_data)\n t = current_data.t\n t = t / 3600. # hours\n pylab.title('Surface at %4.2f hours' % t, fontsize=20)\n pylab.xticks(fontsize=15)\n pylab.yticks(fontsize=15)\n plotaxes.afteraxes = fixup\n\n # Water\n plotitem = plotaxes.new_plotitem(plot_type='2d_pcolor')\n #plotitem.plot_var = geoplot.surface\n plotitem.plot_var = geoplot.surface_or_depth\n plotitem.pcolor_cmap = geoplot.tsunami_colormap\n plotitem.pcolor_cmin = -0.2\n plotitem.pcolor_cmax = 0.2\n plotitem.add_colorbar = True\n plotitem.amr_celledges_show = [0,0,0]\n plotitem.patchedges_show = 1\n\n # Land\n plotitem = plotaxes.new_plotitem(plot_type='2d_pcolor')\n plotitem.plot_var = geoplot.land\n plotitem.pcolor_cmap = geoplot.land_colors\n plotitem.pcolor_cmin = 0.0\n plotitem.pcolor_cmax = 100.0\n plotitem.add_colorbar = False\n plotitem.amr_celledges_show = [1,1,0]\n plotitem.patchedges_show = 1\n plotaxes.xlimits = [-120,-60]\n plotaxes.ylimits = [-60,0]\n\n # add contour lines of bathy if desired:\n plotitem = plotaxes.new_plotitem(plot_type='2d_contour')\n plotitem.show = False\n plotitem.plot_var = geoplot.topo\n plotitem.contour_levels = linspace(-3000,-3000,1)\n plotitem.amr_contour_colors = ['y'] # color on each level\n plotitem.kwargs = {'linestyles':'solid','linewidths':2}\n plotitem.amr_contour_show = [1,0,0] \n plotitem.celledges_show = 0\n plotitem.patchedges_show = 0\n\n\n #-----------------------------------------\n # Figures for gauges\n #-----------------------------------------\n plotfigure = plotdata.new_plotfigure(name='Surface at gauges', figno=300, \\\n type='each_gauge')\n plotfigure.clf_each_gauge = True\n\n # Set up for axes in this figure:\n plotaxes = plotfigure.new_plotaxes()\n plotaxes.xlimits = 'auto'\n plotaxes.ylimits = 'auto'\n plotaxes.title = 'Surface'\n\n # Plot surface as blue curve:\n plotitem = plotaxes.new_plotitem(plot_type='1d_plot')\n plotitem.plot_var = 3\n plotitem.plotstyle = 'b-'\n\n # Plot topo as green curve:\n plotitem = plotaxes.new_plotitem(plot_type='1d_plot')\n plotitem.show = False\n\n def gaugetopo(current_data):\n q = current_data.q\n h = q[0,:]\n eta = q[3,:]\n topo = eta - h\n return topo\n \n plotitem.plot_var = gaugetopo\n plotitem.plotstyle = 'g-'\n\n def add_zeroline(current_data):\n from pylab import plot, legend, xticks, floor, axis, xlabel\n t = current_data.t \n gaugeno = current_data.gaugeno\n\n if gaugeno == 32412:\n try:\n plot(TG32412[:,0], TG32412[:,1], 'r')\n legend(['GeoClaw','Obs'],loc='lower right')\n except: pass\n axis((0,t.max(),-0.3,0.3))\n\n plot(t, 0*t, 'k')\n n = int(floor(t.max()/3600.) + 2)\n xticks([3600*i for i in range(n)], ['%i' % i for i in range(n)])\n xlabel('time (hours)')\n\n plotaxes.afteraxes = add_zeroline\n\n\n\n #-----------------------------------------\n \n # Parameters used only when creating html and/or latex hardcopy\n # e.g., via pyclaw.plotters.frametools.printframes:\n\n plotdata.printfigs = True # print figures\n plotdata.print_format = 'png' # file format\n plotdata.print_framenos = 'all' # list of frames to print\n plotdata.print_gaugenos = 'all' # list of gauges to print\n plotdata.print_fignos = 'all' # list of figures to print\n plotdata.html = True # create html files of plots?\n plotdata.html_homelink = '../README.html' # pointer for top of index\n plotdata.latex = True # create latex file of plots?\n plotdata.latex_figsperline = 2 # layout of plots\n plotdata.latex_framesperline = 1 # layout of plots\n plotdata.latex_makepdf = False # also run pdflatex?\n plotdata.parallel = True # make multiple frame png's at once\n\n return plotdata\n\n" ]
[ [ "numpy.linspace", "numpy.loadtxt" ] ]
pmacosta/putil
[ "416cea52df8221981727e25d133e9b4e3f464798" ]
[ "docs/support/plot_example_7.py" ]
[ "# plot_example_7.py\n# Copyright (c) 2013-2016 Pablo Acosta-Serafini\n# See LICENSE for details\n# pylint: disable=C0111,C0410\n\nfrom __future__ import print_function\nimport numpy, putil.plot\n\ndef figure_iterator_example(no_print):\n source1 = putil.plot.BasicSource(\n indep_var=numpy.array([1, 2, 3, 4]),\n dep_var=numpy.array([1, -10, 10, 5])\n )\n source2 = putil.plot.BasicSource(\n indep_var=numpy.array([100, 200, 300, 400]),\n dep_var=numpy.array([50, 75, 100, 125])\n )\n series1 = putil.plot.Series(\n data_source=source1,\n label='Goals'\n )\n series2 = putil.plot.Series(\n data_source=source2,\n label='Saves',\n color='b',\n marker=None,\n interp='STRAIGHT',\n line_style='--'\n )\n panel1 = putil.plot.Panel(\n series=series1,\n primary_axis_label='Average',\n primary_axis_units='A',\n display_indep_axis=False\n )\n panel2 = putil.plot.Panel(\n series=series2,\n primary_axis_label='Standard deviation',\n primary_axis_units=r'$\\sqrt{{A}}$',\n display_indep_axis=True\n )\n figure = putil.plot.Figure(\n panels=[panel1, panel2],\n indep_var_label='Time',\n indep_var_units='sec',\n title='Sample Figure'\n )\n if not no_print:\n for num, panel in enumerate(figure):\n print('Panel {0}:'.format(num+1))\n print(panel)\n print('')\n else:\n return figure\n" ]
[ [ "numpy.array" ] ]
duolinwang/DM3Loc
[ "efb859841a40e92ae7fd78c01140144fc2405233" ]
[ "Multihead_train.py" ]
[ "import datetime\nimport itertools\nfrom collections import OrderedDict\nimport argparse\nimport os\nimport sys\n\nos.environ[\"CUDA_VISIBLE_DEVICES\"] = \"0\"\nbasedir='./'\nsys.path.append(basedir)\n#sys.path.append(os.path.dirname(os.path.abspath(__file__)))\n\nimport tensorflow as tf\n\ngpu_options = tf.GPUOptions()\ngpu_options.allow_growth = True\nconfig = tf.ConfigProto(gpu_options=gpu_options)\nsess = tf.Session(config=config)\nfrom keras.backend.tensorflow_backend import set_session\nimport keras.backend as K\nset_session(session=sess)\n\nfrom multihead_attention_model import *\nfrom Genedata import Gene_data\nfrom keras.preprocessing.sequence import pad_sequences\nfrom keras.layers import MaxPooling1D\nfrom sklearn.model_selection import KFold, StratifiedKFold\n\nencoding_seq = OrderedDict([\n ('UNK', [0, 0, 0, 0]),\n ('A', [1, 0, 0, 0]),\n ('C', [0, 1, 0, 0]),\n ('G', [0, 0, 1, 0]),\n ('T', [0, 0, 0, 1]),\n ('N', [0.25, 0.25, 0.25, 0.25]), # A or C or G or T\n])\n\nseq_encoding_keys = list(encoding_seq.keys())\nseq_encoding_vectors = np.array(list(encoding_seq.values()))\n\ngene_ids = None\n\ndef calculating_class_weights(y_true):\n from sklearn.utils.class_weight import compute_class_weight\n number_dim = np.shape(y_true)[1]\n weights = np.empty([number_dim, 2])\n for i in range(number_dim):\n weights[i] = compute_class_weight('balanced', [0., 1.], y_true[:, i])\n return weights\n\n\ndef get_id_label_seq_Dict(gene_data):\n id_label_seq_Dict = OrderedDict()\n for gene in gene_data:\n label = gene.label\n gene_id = gene.id.strip()\n id_label_seq_Dict[gene_id] = {}\n id_label_seq_Dict[gene_id][label]= (gene.seqleft,gene.seqright)\n \n return id_label_seq_Dict\n\n\ndef get_label_id_Dict(id_label_seq_Dict):\n label_id_Dict = OrderedDict()\n for eachkey in id_label_seq_Dict.keys():\n label = list(id_label_seq_Dict[eachkey].keys())[0]\n label_id_Dict.setdefault(label,set()).add(eachkey)\n \n return label_id_Dict\n\ndef typeicalSampling(ids, k):\n kf = KFold(n_splits=k, shuffle=True, random_state=1234)\n folds = kf.split(ids)\n train_fold_ids = OrderedDict()\n val_fold_ids = OrderedDict()\n test_fold_ids=OrderedDict()\n for i, (train_indices, test_indices) in enumerate(folds):\n size_all = len(train_indices)\n train_fold_ids[i] = []\n val_fold_ids[i] = []\n test_fold_ids[i] =[]\n train_indices2 = train_indices[:int(size_all * 0.8)]\n val_indices = train_indices[int(size_all * 0.8):]\n for s in train_indices2:\n train_fold_ids[i].append(ids[s])\n \n for s in val_indices:\n val_fold_ids[i].append(ids[s])\n \n for s in test_indices:\n test_fold_ids[i].append(ids[s])\n \n \n return train_fold_ids,val_fold_ids,test_fold_ids\n\ndef group_sample(label_id_Dict,datasetfolder,foldnum=8):\n Train = OrderedDict()\n Test = OrderedDict()\n Val = OrderedDict()\n for i in range(foldnum):\n Train.setdefault(i,list())\n Test.setdefault(i,list())\n Val.setdefault(i,list())\n \n for eachkey in label_id_Dict:\n label_ids = list(label_id_Dict[eachkey])\n if len(label_ids)<foldnum:\n for i in range(foldnum):\n Train[i].extend(label_ids)\n \n continue\n \n [train_fold_ids, val_fold_ids,test_fold_ids] = typeicalSampling(label_ids, foldnum)\n for i in range(foldnum):\n Train[i].extend(train_fold_ids[i])\n Val[i].extend(val_fold_ids[i])\n Test[i].extend(test_fold_ids[i])\n print('label:%s finished sampling! Train length: %s, Test length: %s, Val length:%s'%(eachkey, len(train_fold_ids[i]), len(test_fold_ids[i]),len(val_fold_ids[i])))\n \n for i in range(foldnum):\n print('Train length: %s, Test length: %s, Val length: %s'%(len(Train[i]),len(Test[i]),len(Val[i])))\n #print(type(Train[i]))\n #print(Train[0][:foldnum])\n np.savetxt(datasetfolder+'/Train8'+str(i)+'.txt', np.asarray(Train[i]),fmt=\"%s\")\n np.savetxt(datasetfolder+'/Test8'+str(i)+'.txt', np.asarray(Test[i]),fmt=\"%s\")\n np.savetxt(datasetfolder+'/Val8'+str(i)+'.txt', np.asarray(Val[i]),fmt=\"%s\")\n \n return Train, Test, Val\n\ndef label_dist(dist):\n #assert (len(dist) == 4)\n return [int(x) for x in dist]\n\ndef maxpooling_mask(input_mask,pool_length=3):\n #input_mask is [N,length]\n max_index = int(input_mask.shape[1]/pool_length)-1\n max_all=np.zeros([input_mask.shape[0],int(input_mask.shape[1]/pool_length)])\n for i in range(len(input_mask)):\n index=0\n for j in range(0,len(input_mask[i]),pool_length):\n if index<=max_index:\n max_all[i,index] = np.max(input_mask[i,j:(j+pool_length)])\n index+=1\n \n return max_all\n\n\ndef preprocess_data(left, right,dataset,padmod='center',pooling_size=3):\n gene_data = Gene_data.load_sequence(dataset, left, right)\n id_label_seq_Dict = get_id_label_seq_Dict(gene_data)\n label_id_Dict = get_label_id_Dict(id_label_seq_Dict)\n Train=OrderedDict()\n Test=OrderedDict()\n Val=OrderedDict()\n datasetfolder=os.path.dirname(dataset)\n if os.path.exists(datasetfolder+'/Train8'+str(0)+'.txt'):\n for i in range(8):\n Train[i] = np.loadtxt(datasetfolder+'/Train8'+str(i)+'.txt',dtype='str')#HDF5Matrix(os.path.join('../mRNA_multi_data_keepnum_code/', 'datafold'+str(i)+'.h5'), 'Train')[:]\n Test[i] = np.loadtxt(datasetfolder+'/Test8'+str(i)+'.txt',dtype='str')#HDF5Matrix(os.path.join('../mRNA_multi_data_keepnum_code/', 'datafold'+str(i)+'.h5'), 'Test')[:]\n Val[i] = np.loadtxt(datasetfolder+'/Val8'+str(i)+'.txt',dtype='str')#HDF5Matrix(os.path.join('../mRNA_multi_data_keepnum_code/', 'datafold'+str(i)+'.h5'), 'Val')[:]\n else:\n [Train, Test,Val] = group_sample(label_id_Dict,datasetfolder)\n \n Xtrain={}\n Xtest={}\n Xval={}\n Ytrain={}\n Ytest={}\n Yval={}\n Train_mask_label={}\n Test_mask_label={}\n Val_mask_label={}\n maxpoolingmax = int((left+right)/pooling_size)\n \n for i in range(8):\n #if i <2:\n # continue\n \n print('padding and indexing data')\n encoding_keys = seq_encoding_keys\n encoding_vectors = seq_encoding_vectors\n #train\n #padd center\n X_left = [[encoding_keys.index(c) for c in list(id_label_seq_Dict[id].values())[0][0]] for id in Train[i]]\n X_right = [[encoding_keys.index(c) for c in list(id_label_seq_Dict[id].values())[0][1]] for id in Train[i]]\n if padmod =='center':\n mask_label_left = np.array([np.concatenate([np.ones(len(gene)),np.zeros(left-len(gene))]) for gene in X_left],dtype='float32')\n mask_label_right = np.array([np.concatenate([np.zeros(right-len(gene)),np.ones(len(gene))]) for gene in X_right],dtype='float32')\n mask_label = np.concatenate([mask_label_left,mask_label_right],axis=-1)\n Train_mask_label[i]=maxpooling_mask(mask_label,pool_length=pooling_size)\n X_left = pad_sequences(X_left,maxlen=left,\n dtype=np.int8, value=encoding_keys.index('UNK'),padding='post') #padding after sequence\n \n X_right = pad_sequences(X_right,maxlen=right,\n dtype=np.int8, value=encoding_keys.index('UNK'),padding='pre')# padding before sequence\n \n Xtrain[i] = np.concatenate([X_left,X_right],axis = -1)\n else:\n #merge left and right and padding after sequence\n Xall = [np.concatenate([x,y],axis=-1) for x,y in zip(X_left,X_right)]\n Xtrain[i] = pad_sequences(Xall,maxlen=left+right,dtype=np.int8, value=encoding_keys.index('UNK'),padding='post')\n #mask_label = np.array([np.concatenate([np.ones(len(gene)),np.zeros(left+right-len(gene))]) for gene in Xall],dtype='float32')\n #Train_mask_label[i]=maxpooling_mask(mask_label,pool_length=pooling_size)\n Train_mask_label[i]=np.array([np.concatenate([np.ones(int(len(gene)/pooling_size)),np.zeros(maxpoolingmax-int(len(gene)/pooling_size))]) for gene in Xall],dtype='float32')\n \n Ytrain[i] = np.array([label_dist(list(id_label_seq_Dict[id].keys())[0]) for id in Train[i]])\n print(\"training shapes\"+str(Xtrain[i].shape)+\" \"+str(Ytrain[i].shape))\n \n #test\n X_left = [[encoding_keys.index(c) for c in list(id_label_seq_Dict[id].values())[0][0]] for id in Test[i]]\n X_right = [[encoding_keys.index(c) for c in list(id_label_seq_Dict[id].values())[0][1]] for id in Test[i]]\n if padmod =='center':\n mask_label_left = np.array([np.concatenate([np.ones(len(gene)),np.zeros(left-len(gene))]) for gene in X_left],dtype='float32')\n mask_label_right = np.array([np.concatenate([np.zeros(right-len(gene)),np.ones(len(gene))]) for gene in X_right],dtype='float32')\n mask_label = np.concatenate([mask_label_left,mask_label_right],axis=-1)\n Test_mask_label[i]=maxpooling_mask(mask_label,pool_length=pooling_size)\n X_left = pad_sequences(X_left,maxlen=left,\n dtype=np.int8, value=encoding_keys.index('UNK'),padding='post') #padding after sequence\n \n X_right = pad_sequences(X_right,maxlen=right,\n dtype=np.int8, value=encoding_keys.index('UNK'),padding='pre')# padding before sequence\n \n Xtest[i] = np.concatenate([X_left,X_right],axis = -1)\n else:\n #merge left and right and padding after sequence\n Xall = [np.concatenate([x,y],axis=-1) for x,y in zip(X_left,X_right)]\n Xtest[i] = pad_sequences(Xall,maxlen=left+right,dtype=np.int8, value=encoding_keys.index('UNK'),padding='post')\n #mask_label = np.array([np.concatenate([np.ones(len(gene)),np.zeros(left+right-len(gene))]) for gene in Xall],dtype='float32')\n #Test_mask_label[i]=maxpooling_mask(mask_label,pool_length=pooling_size)\n Test_mask_label[i]=np.array([np.concatenate([np.ones(int(len(gene)/pooling_size)),np.zeros(maxpoolingmax-int(len(gene)/pooling_size))]) for gene in Xall],dtype='float32')\n \n Ytest[i] = np.array([label_dist(list(id_label_seq_Dict[id].keys())[0]) for id in Test[i]])\n #validation\n X_left = [[encoding_keys.index(c) for c in list(id_label_seq_Dict[id].values())[0][0]] for id in Val[i]]\n X_right = [[encoding_keys.index(c) for c in list(id_label_seq_Dict[id].values())[0][1]] for id in Val[i]]\n if padmod=='center':\n mask_label_left = np.array([np.concatenate([np.ones(len(gene)),np.zeros(left-len(gene))]) for gene in X_left],dtype='float32')\n mask_label_right = np.array([np.concatenate([np.zeros(right-len(gene)),np.ones(len(gene))]) for gene in X_right],dtype='float32')\n mask_label = np.concatenate([mask_label_left,mask_label_right],axis=-1)\n Val_mask_label[i]=maxpooling_mask(mask_label,pool_length=pooling_size)\n X_left = pad_sequences(X_left,maxlen=left,\n dtype=np.int8, value=encoding_keys.index('UNK'),padding='post') #padding after sequence\n \n X_right = pad_sequences(X_right,maxlen=right,\n dtype=np.int8, value=encoding_keys.index('UNK'),padding='pre')# padding before sequence\n \n Xval[i] = np.concatenate([X_left,X_right],axis = -1)\n else:\n #merge left and right and padding after sequence\n Xall = [np.concatenate([x,y],axis=-1) for x,y in zip(X_left,X_right)]\n Xval[i] = pad_sequences(Xall,maxlen=left+right,dtype=np.int8, value=encoding_keys.index('UNK'),padding='post')\n #mask_label = np.array([np.concatenate([np.ones(len(gene)),np.zeros(left+right-len(gene))]) for gene in Xall],dtype='float32')\n #Val_mask_label[i]=maxpooling_mask(mask_label,pool_length=pooling_size)\n Val_mask_label[i]=np.array([np.concatenate([np.ones(int(len(gene)/pooling_size)),np.zeros(maxpoolingmax-int(len(gene)/pooling_size))]) for gene in Xall],dtype='float32')\n \n Yval[i] = np.array([label_dist(list(id_label_seq_Dict[id].keys())[0]) for id in Val[i]])\n \n return Xtrain,Ytrain,Train_mask_label,Xtest, Ytest,Test_mask_label,Xval,Yval,Val_mask_label, encoding_keys, encoding_vectors\n\n\n# starts training in CNN model\ndef run_model(lower_bound, upper_bound, max_len, dataset, **kwargs):\n \n pooling_size = kwargs['pooling_size'] #\n \n #pooling_size = int(kwargs['pooling_size']*kwargs['num_encoder']*2)\n print(\"pooling_size\")\n print(pooling_size)\n Xtrain,Ytrain,Train_mask_label,Xtest, Ytest,Test_mask_label,Xval,Yval,Val_mask_label, encoding_keys, encoding_vectors = preprocess_data(kwargs['left'], kwargs['right'], dataset,padmod = kwargs['padmod'],pooling_size=pooling_size)\n max_len = kwargs['left']+kwargs['right']\n \n # model mode maybe overridden by other parameter settings\n for i in range(1):#(kwargs['foldnum']):\n print(Xtrain[i].shape)\n print(Train_mask_label[i].shape)\n print('Evaluating KFolds {}/10'.format(i + 1))\n model = multihead_attention(max_len, kwargs['nb_classes'], OUTPATH, kfold_index=i) # initialize\n model.build_model_multihead_attention_multiscaleCNN4_covermore(\n load_weights = kwargs['load_pretrain'],\n weight_dir = kwargs['weights_dir'],\n dim_attention=kwargs['dim_attention'],\n headnum=kwargs['headnum'],\n embedding_vec=encoding_vectors,\n nb_filters=kwargs['nb_filters'],\n filters_length1=kwargs['filters_length1'],\n filters_length2=kwargs['filters_length2'],\n filters_length3=kwargs['filters_length3'],\n pooling_size=kwargs['pooling_size'],\n drop_input=kwargs['drop_input'],\n drop_cnn=kwargs['drop_cnn'],\n drop_flat=kwargs['drop_flat'],\n W1_regularizer=kwargs['W1_regularizer'],\n W2_regularizer=kwargs['W2_regularizer'],\n Att_regularizer_weight=kwargs['Att_regularizer_weight'],\n BatchNorm=kwargs['BatchNorm'],\n fc_dim = kwargs['fc_dim'],\n fcnum = kwargs['fcnum'],\n posembed=kwargs['posembed'],\n pos_dmodel=kwargs['pos_dmodel'],\n pos_nwaves = kwargs['pos_nwaves'],\n posmod = kwargs['posmod'],\n regularfun = kwargs['regularfun'],\n huber_delta=kwargs['huber_delta'],\n activation = kwargs['activation'],\n activationlast = kwargs['activationlast'],\n add_avgpooling = kwargs['add_avgpooling'],\n poolingmod = kwargs['poolingmod'], #1 maxpooling 2 avgpooling\n normalizeatt=kwargs['normalizeatt'],\n attmod=kwargs['attmod'],\n sharp_beta=kwargs['sharp_beta'],\n lr = kwargs['lr']\n )\n \n if kwargs['nb_classes'] == 7:\n class_weights={0:1,1:1,2:7,3:1,4:3,5:5,6:8}\n \n model.train(Xtrain[i], Ytrain[i],Train_mask_label[i], kwargs['batch_size'], kwargs['epochs'],Xval[i],Yval[i],Val_mask_label[i],loadFinal=kwargs['loadFinal'],classweight = kwargs['classweight'],class_weights=class_weights)\n model.evaluate(Xtest[i], Ytest[i],Test_mask_label[i])\n \n K.clear_session()\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n '''Model parameters'''\n parser.add_argument('--lower_bound', type=int, default=0, help='set lower bound on sample sequence length')\n parser.add_argument('--upper_bound', type=int, default=4000, help='set upper bound on sample sequence length')\n parser.add_argument('--max_len', type=int, default=4000,\n help=\"pad or slice sequences to a fixed length in preprocessing\")\n \n parser.add_argument('--left', type=int, default=4000, help='set left on sample sequence length')\n parser.add_argument('--right', type=int, default=4000, help='set left on sample sequence length')\n \n parser.add_argument('--dim_attention', type=int, default=80, help='dim_attention')\n parser.add_argument('--headnum', type=int, default=5, help='number of multiheads') #select one from 3\n parser.add_argument('--dim_capsule', type=int, default=4, help='capsule dimention')\n parser.add_argument('--drop_rate', type=float, default=0.1, help='dropout ratio')\n parser.add_argument('--drop_input', type=float, default=0.06, help='dropout ratio')\n parser.add_argument('--drop_cnn', type=float, default=0.25, help='dropout ratio')\n parser.add_argument('--drop_flat', type=float, default=0.26, help='dropout ratio')\n \n parser.add_argument('--W1_regularizer', type=float, default=0.001, help='W1_regularizer')\n parser.add_argument('--W2_regularizer', type=float, default=0.001, help='W2_regularizer')\n parser.add_argument('--Att_regularizer_weight', type=float, default=0.001, help='Att_regularizer_weight')\n \n parser.add_argument('--dataset', type=str, default='../../mRNAsubloci_train.fasta', help='input sequence data')\n parser.add_argument('--epochs', type=int, default=50, help='')\n parser.add_argument('--nb_filters', type=int, default=64, help='number of CNN filters') \n parser.add_argument('--filters_length1', type=int, default=9, help='kernel length for CNN filters1')\n parser.add_argument('--filters_length2', type=int, default=20, help='kernel length for CNN filters2') \n parser.add_argument('--filters_length3', type=int, default=49, help='kernel length for CNN filters3') \n parser.add_argument('--pooling_size', type=int, default=8, help='pooling_size') \n parser.add_argument('--att_weight', type=float, default=1, help='number of att_weight') #select one from 3\n parser.add_argument(\"--BatchNorm\", action=\"store_true\",help=\"use BatchNorm\")\n parser.add_argument(\"--loadFinal\", action=\"store_true\",help=\"whether loadFinal model\")\n parser.add_argument('--fc_dim', type=int, default=100, help='fc_dim')\n parser.add_argument('--fcnum', type=int, default=1, help='fcnum')\n parser.add_argument('--sigmoidatt', type=int, default=0, help='whether sigmoidatt 0 no 1 yes') #select one from 3\n parser.add_argument(\"--message\", type=str, default=\"\", help=\"append to the dir name\")\n parser.add_argument(\"--load_pretrain\", action=\"store_true\",\n help=\"load pretrained CNN weights to the first convolutional layers\")\n \n parser.add_argument(\"--weights_dir\", type=str, default=\"\",\n help=\"Must specificy pretrained weights dir, if load_pretrain is set to true. Only enter the relative path respective to the root of this project.\") \n \n parser.add_argument(\"--randomization\", type=int, default=None,\n help=\"Running randomization test with three settings - {1,2,3}.\") #use default none\n parser.add_argument(\"--posembed\", action=\"store_true\",help=\"use posembed\")\n parser.add_argument(\"--pos_dmodel\", type=int,default=40,help=\"pos_dmodel\")\n parser.add_argument(\"--pos_nwaves\", type=int,default=20,help=\"pos_nwaves\")\n parser.add_argument(\"--posmod\", type=str,default='concat',help=\"posmod\")\n parser.add_argument(\"--regularfun\",type=int,default=1,help = 'regularfun for l1 or l2 3 for huber_loss')\n parser.add_argument(\"--huber_delta\",type=float,default=1.0,help = 'huber_delta')\n \n parser.add_argument(\"--activation\",type=str,default='gelu',help = 'activation')\n parser.add_argument(\"--activationlast\",type=str,default='gelu',help = 'activationlast')\n \n parser.add_argument(\"--add_avgpooling\", action=\"store_true\",help=\"add_avgpooling\")\n parser.add_argument('--poolingmod',type=int,default=1,help = '1:maxpooling 2:avgpooling')\n parser.add_argument('--classweight', action=\"store_true\", help='classweight')\n parser.add_argument('--batch_size', type=int, default=256, help='batch_size')\n parser.add_argument(\"--padmod\", type=str,default='after',help=\"padmod: center, after\")\n parser.add_argument(\"--normalizeatt\", action=\"store_true\",help=\"normalizeatt\")\n parser.add_argument('--num_encoder', type=int, default=1, help='num_encoder')\n parser.add_argument('--lastCNN_length', type=int, default=1, help='lastCNN_length')\n parser.add_argument('--lastCNN_filter', type=int, default=128, help='lastCNN_filter')\n parser.add_argument(\"--attmod\", type=str, default=\"smooth\",help=\"attmod\")\n parser.add_argument(\"--sharp_beta\", type=int, default=1,help=\"sharp_beta\")\n parser.add_argument(\"--lr\",type=float,default=0.001,help = 'lr')\n parser.add_argument(\"--nb_classes\",type=int,default=7,help = 'nb_classes')\n parser.add_argument('--foldnum', type=int, default=8, help='number of cross-validation folds') \n \n args = parser.parse_args()\n OUTPATH = os.path.join(basedir,'Results/'+args.message + '/')\n if not os.path.exists(OUTPATH):\n os.makedirs(OUTPATH)\n print('OUTPATH:', OUTPATH)\n del args.message\n \n args.weights_dir = os.path.join(basedir, args.weights_dir)\n \n for k, v in vars(args).items():\n print(k, ':', v)\n \n run_model(**vars(args))\n\n\n\n#use the remove data direct from fold\n#python3 Multihead_train.py --normalizeatt --classweight --dataset ../direct_8_fold_data/modified_multi_complete_to_cdhit.fasta --epochs 500 --message direct_8fold_model --weights_dir 'model_after_cdhit'\n\n\n#python3 Multihead_train.py --normalizeatt --classweight --dataset ../modified_multi_complete_to_cdhit.fasta --epochs 500 --message cnn64_smooth_l1\n" ]
[ [ "sklearn.utils.class_weight.compute_class_weight", "tensorflow.Session", "tensorflow.ConfigProto", "sklearn.model_selection.KFold", "tensorflow.GPUOptions" ] ]
ahojukka5/scikit-fem
[ "f80b8c9ea8742c23586818e61a2d6ca148833e0c" ]
[ "skfem/assembly/form/form.py" ]
[ "from typing import Callable\n\nimport numpy as np\nfrom numpy import ndarray\nfrom scipy.sparse import coo_matrix\n\nfrom ...element import DiscreteField\n\n\nclass FormDict(dict):\n \"\"\"Passed to forms as 'w'.\"\"\"\n\n def __getattr__(self, attr):\n return self[attr].value\n\n\nclass Form:\n\n def __init__(self, form: Callable):\n self.form = form\n\n def __call__(self, *args):\n return self.assemble(self.kernel(*args))\n\n def _kernel(self):\n raise NotImplementedError\n\n def assemble(self):\n raise NotImplementedError\n\n @staticmethod\n def dictify(w):\n \"\"\"Support some legacy input formats for 'w'.\"\"\"\n for k in w:\n if isinstance(w[k], DiscreteField):\n continue\n elif isinstance(w[k], ndarray):\n w[k] = DiscreteField(w[k])\n elif isinstance(w[k], list):\n w[k] = DiscreteField(np.array([z.f for z in w[k]]),\n np.array([z.df for z in w[k]]))\n elif isinstance(w[k], tuple):\n w[k] = DiscreteField(*w[k])\n else:\n raise ValueError(\"The given type '{}' for the list of extra \"\n \"form parameters w cannot be converted to \"\n \"DiscreteField.\".format(type(w)))\n return w\n\n @staticmethod\n def _assemble_scipy_matrix(data, rows, cols, shape=None):\n K = coo_matrix((data, (rows, cols)), shape=shape)\n K.eliminate_zeros()\n return K.tocsr()\n\n @staticmethod\n def _assemble_numpy_vector(data, rows, cols, shape=None):\n return coo_matrix((data, (rows, cols)),\n shape=shape).toarray().T[0]\n" ]
[ [ "scipy.sparse.coo_matrix", "numpy.array" ] ]
dwhan89/comsmimikyu
[ "927c56f76a9c9f7c517a86a52586b8958c308328" ]
[ "bin/sehgal_training_pixgan_wgp.py" ]
[ "import os\n\nimport mlflow\nimport numpy as np\nimport torch\nfrom orphics import maps\n\nfrom cosmikyu import gan, config, datasets, transforms\nfrom cosmikyu import nn as cnn\n\ndata_dir = config.default_data_dir\nsehgal_dir = os.path.join(data_dir, 'sehgal')\ncuda = True\ncompts = [\"kappa\", \"ksz\", \"tsz\", \"ir_pts\", \"rad_pts\"]\ncompt_idxes = [0, 1, 2, 3, 4]\nshape = (len(compt_idxes), 128, 128)\nsample_interval = 500\nsave_interval = 1\nbatch_size = 32\nnepochs = 100\nnorm_info_file = \"/home/dwhan89/workspace/cosmikyu/data/sehgal/281220_logz_normalization_info_validation.npz\"\n\n_, wcs = maps.rect_geometry(width_arcmin=64., px_res_arcmin=0.5)\n\n# Configure data loader\nos.makedirs(data_dir, exist_ok=True)\nos.makedirs(sehgal_dir, exist_ok=True)\nSDN = transforms.SehgalDataNormalizerScaledLogZShrink(norm_info_file)\nSC = transforms.SehgalSubcomponets(compt_idxes)\nRF = transforms.RandomFlips(p_v=0.5, p_h=0.5)\nSDS_train = datasets.SehgalDataSet(sehgal_dir, \"train_secondary281220_fromcat\", transforms=[SDN, RF, SC],\n dummy_label=True, dtype=np.float32)\n\ndataloader = torch.utils.data.DataLoader(\n SDS_train,\n batch_size=batch_size,\n shuffle=True,\n)\n\nSTanh = cnn.ScaledTanh(15, 2 / 15)\nLF = cnn.LinearFeature(4, 4, bias=True)\nexperiment_id = \"06b9a352b8bb4051b50f91660ebc4cfe\"\nmodel_dir = \"/home/dwhan89/workspace/cosmikyu/output/sehgal_pixganwgp_281220/{}/model\".format(experiment_id)\nPIXGAN = gan.PIXGAN_WGP(\"sehgal_pixganwgp_281220\", shape, nconv_fcgen=64,\n nconv_fcdis=64, cuda=cuda, ngpu=4, nconv_layer_gen=4, nconv_layer_disc=5, kernal_size=4,\n stride=2,\n padding=1, output_padding=0, gen_act=[LF, STanh], nin_channel=1, nout_channel=4,\n nthresh_layer_gen=3, nthresh_layer_disc=0, dropout_rate=0.)\n\nmlflow.set_experiment(PIXGAN.identifier)\nwith mlflow.start_run(experiment_id=PIXGAN.experiment.experiment_id) as mlflow_run:\n torch.cuda.empty_cache()\n PIXGAN.train(\n dataloader,\n nepochs=nepochs,\n ncritics=5,\n sample_interval=sample_interval,\n save_interval=save_interval,\n load_states=True,\n save_states=True,\n verbose=True,\n mlflow_run=mlflow_run,\n lr=1e-4,\n betas=(0.5, 0.9),\n lambda_gp=10.,\n lambda_l1=100.\n )\n" ]
[ [ "torch.cuda.empty_cache", "torch.utils.data.DataLoader" ] ]
bikash/pyhsmm
[ "94fab0ea66072a639b20163c40db04c18069496c" ]
[ "util/profiling.py" ]
[ "from __future__ import division\nimport numpy as np\nimport sys, StringIO, inspect, os, functools, time, collections\n\n### use @timed for really basic timing\n\n_timings = collections.defaultdict(list)\n\ndef timed(func):\n @functools.wraps(func)\n def wrapped(*args,**kwargs):\n tic = time.time()\n out = func(*args,**kwargs)\n _timings[func].append(time.time() - tic)\n return out\n return wrapped\n\ndef show_timings(stream=None):\n if stream is None:\n stream = sys.stdout\n if len(_timings) > 0:\n results = [(inspect.getsourcefile(f),f.__name__,\n len(vals),np.sum(vals),np.mean(vals),np.std(vals))\n for f, vals in _timings.iteritems()]\n filename_lens = max(len(filename) for filename, _, _, _, _, _ in results)\n name_lens = max(len(name) for _, name, _, _, _, _ in results)\n\n fmt = '{:>%d} {:>%d} {:>10} {:>10} {:>10} {:>10}' % (filename_lens, name_lens)\n print >>stream, fmt.format('file','name','ncalls','tottime','avg time','std dev')\n\n fmt = '{:>%d} {:>%d} {:>10} {:>10.3} {:>10.3} {:>10.3}' % (filename_lens, name_lens)\n print >>stream, '\\n'.join(fmt.format(*tup) for tup in sorted(results))\n\n### use @line_profiled for a thin wrapper around line_profiler\n\ntry:\n import line_profiler\n _prof = line_profiler.LineProfiler()\n\n def line_profiled(func):\n mod = inspect.getmodule(func)\n if 'PROFILING' in os.environ or (hasattr(mod,'PROFILING') and mod.PROFILING):\n return _prof(func)\n return func\n\n def show_line_stats(stream=None):\n _prof.print_stats(stream=stream)\nexcept ImportError:\n line_profiled = lambda x: x\n\n" ]
[ [ "numpy.std", "numpy.sum", "numpy.mean" ] ]
rupinderjdn/OpenCV
[ "76055d393a36ad747fdf840d3ae542d3a94c0e68" ]
[ "15 Gaussian Blurring.py" ]
[ "import cv2 as cv\nimport numpy as np\nfrom matplotlib import pyplot as plt\nimg = cv.imread('dia mirza.jpg')\nblur = cv.GaussianBlur(img,(5,5),1)\nplt.subplot(121),plt.imshow(img),plt.title('Original')\nplt.xticks([]), plt.yticks([])\nplt.subplot(122),plt.imshow(blur),plt.title('Blurred')\nplt.xticks([]), plt.yticks([])\nplt.show()" ]
[ [ "matplotlib.pyplot.subplot", "matplotlib.pyplot.title", "matplotlib.pyplot.yticks", "matplotlib.pyplot.show", "matplotlib.pyplot.xticks", "matplotlib.pyplot.imshow" ] ]
fullcyxuc/HPCnet_examples
[ "999ef3f33ba73ba1d86f4d5df3f0160b46016be8" ]
[ "tools/HausdorffTest/getGtFeature.py" ]
[ "import torch\nfrom typing import Tuple\nfrom torch.autograd import Variable\nfrom torch.autograd import Function\nimport torch.nn as nn\nimport math\nfrom HausdorffTest.ReadShapes import read_keyfile\nfrom HausdorffTest.ReadShapes import LoadGivenShapes\nimport time\nimport os\nimport sys\nimport torch.multiprocessing as mp\n\nimport numpy as np\nimport HPCnet_cuda as HPCnet\n\nclass getGtFeature(Function):\n @staticmethod\n def forward(ctx, whole_points: torch.Tensor, keypoints: torch.Tensor, \\\n neighbor_points: torch.Tensor, radius: float, neighbor_point_num: float)\\\n -> torch.Tensor:\n \"\"\"\n whole_points: B C N\n keypoints: B N C\n neighbor_points: B N nsample C\n output: feature: B M gt_num\n \"\"\"\n # print(whole_points[:,:,:].size())\n root = \"/home/xue/Documents/Pirors/\" + str(radius)\n prior_points, dis_dicts = LoadGivenShapes(root)\n\n dis_dicts = torch.cuda.FloatTensor(dis_dicts)\n prior_points = torch.cuda.FloatTensor(prior_points)\n # gt_feature_len = len(dis_dicts)\n\n voxel_dim = 30\n voxel_len = 2*radius / voxel_dim\n voxel_dim = int(2*radius/voxel_len + 1)\n\n batch_size, keypoint_num, point_dim= keypoints.size()\n whole_point_num = whole_points.size()[0]\n\n feature = torch.cuda.FloatTensor(batch_size, keypoint_num, len(prior_points)).zero_()\n\n HPCnet.get_hausdorff_dis_wrapper(neighbor_points, feature, radius,\\\n batch_size, \\\n whole_point_num, keypoint_num, neighbor_point_num, \\\n prior_points, dis_dicts,\\\n voxel_len)\n\n return feature\n\n # @staticmethod\n # def backward(feature, a = None):\n # return None, None, None, None, None\n\nget_gt_feature = getGtFeature.apply\n" ]
[ [ "torch.cuda.FloatTensor" ] ]
chudur-budur/visualization
[ "8013fbdef55fac770d439454207dc07be88fe7c3" ]
[ "viz/plotting/pcp.py" ]
[ "\"\"\"pcp.py -- A customized and more flexible Parallel-coordinate plotting module. \n\n This module provides a customized and more flexible function for Parallel-coordinate \n Plot (PCP) [1]_ visualization. This module also provides different relevant fucntions, \n parameters and tools.\n\n Copyright (C) 2016\n Computational Optimization and Innovation (COIN) Laboratory\n Department of Computer Science and Engineering\n Michigan State University\n 428 S. Shaw Lane, Engineering Building\n East Lansing, MI 48824-1226, USA\n \n References\n ----------\n .. [1] A. Inselberg and T. Avidan, \"Classification and visualization for high-dimensional \n data\", Proc. 6th ACM SIGKDD Int. Conf. Knowledge Discovery and Data Mining (KDD ‘00), \n pp. 370-374, 2000.\n\n.. moduleauthor:: AKM Khaled Talukder <[email protected]>\n\n\"\"\"\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport matplotlib.cm as cm\nimport matplotlib.colors as mc\nfrom matplotlib.colors import ListedColormap\nfrom viz.plotting.utils import pop \nfrom viz.utils import transform as tr\nfrom viz.utils import dm\n\n__all__ = [\"plot\"]\n\nxmargins = {10: [0.7, 0.3], 9:[0.6, 0.3], 8:[0.5, 0.3], 7:[0.45, 0.3], 6:[0.3, 0.2]}\nymargins = {10: [-0.1, 0.08], 9:[-0.09, 0.075], 8:[-0.09, 0.075], 7:[-0.08, 0.075], 6:[-0.08, 0.075], 5:[-0.8, 0.08], 4:[-0.08, 0.08], 3:[-0.08, 0.08]}\n\ndef is_xticklabels_off(ax):\n r\"\"\"Checks if axes has already xtick labels\n\n Checks if an `matplotlib.axes.Axes` object has already\n xtick labels. \n\n Parameters\n ----------\n ax : matplotlib axes object\n An `matplotlib.axes.Axes` object.\n \n Returns\n -------\n True/False : bool\n \"\"\"\n\n xtl = ax.get_xticklabels()\n for s in xtl:\n if str(s) != \"Text(0, 0, \\'\\')\":\n return False\n return True\n\ndef get_yaxis_bounds(A):\n r\"\"\"\n \"\"\"\n ub = dm.nadir(A)\n lb = dm.ideal(A)\n ubs = [\"{:1.1e}\".format(v) for v in ub]\n lbs = [\"{:1.1e}\".format(v) for v in lb]\n return [lbs, ubs]\n\ndef plot(A, ax=None, show_bounds=True, c=mc.TABLEAU_COLORS['tab:blue'], lw=1.0, labels=None, \\\n xtick_labels=None, draw_vertical_lines=True, draw_grid=False, **kwargs):\n r\"\"\"A customized and more enhanced Parallel-coordinate plot.\n\n This Parallel-coordinate plot (PCP) [1]_ is customized for the experiments. \n A lot of settings are customizable and configurable. Also it gives more \n flexibility to the user compared to similar functions implemented in other \n libraries like Pandas and seaborn. \n \n Parameters\n ----------\n A : ndarray \n `n` number of `m` dim. points to be plotted.\n ax : An `mpl_toolkits.mplot3d.axes.Axes3D` object, optional\n Default `None` when optional.\n show_bounds : bool, optional\n If `True` then the plot will show the lower and upper bounds of each data\n point (i.e. lines). Default `False` when optional.\n c : A `matplotlib.colors` object, str or an array RGBA color values.\n Colors to be used. Default `mc.TABLEAU_COLORS['tab:blue']` when \n optional.\n lw : float, optional\n The line-width of each line in PCP. Default 1.0 when optional.\n labels : str, array_like or list of str, optional\n A string or an array/list of strings for labeling each line. Which basically\n means the class label of each row. Default `None` when optional. This will be\n used to set the legend in the figure. If `None` there will be no legend.\n xtick_labels : str, array_like or list of str, optional\n A string or an array/list of strings for xtick labels, for each column.\n Default `None` when optional. In that case, the labels will be `f_0`, `f_1` etc.\n draw_vertical_lines : bool, optional\n Decide whether we are going to put vertical y-axis lines in the plot for each\n column/feature. Default `True` when optional.\n draw_grid : bool, optional\n Decide whether we are going to put x-axis grid-lines in the plot. Default\n `False` when optional.\n\n Other Parameters\n ----------------\n title : str, optional\n The title of the figure. Default `None` when optional.\n column_indices : array_like or list of int, optional\n The indices of the columns of `A` to be plotted. Default `None` when optional.\n colorbar : (Cbc, Cbg, Cbl) a tuple of two ndarray and a str, optional\n If a user wants to put a colorbar, a tuple `(Cbc, Cbg, Cbl)` can be provided. \n `Cbc` is an array of RGBA color values or an `matplotlib.colors` object. The \n gradient of the colorbar is specified in `Cbg` which is an 1-D array of float. \n Cbl is the label of the colorbar, a string. Default `None` when optional.\n axvline_width : float, optional\n The width of the vertical lines. Default 1.0 when optional.\n axvline_color : A `matplotlib.colors` object, str or an array RGBA color values.\n The color of the vertical lines. Default `black` when optional.\n **kwargs : dict\n All other keyword args for matplotlib `plot()` function.\n\n Returns\n -------\n ax : `mpl_toolkits.mplot3d.axes.Axes3D` object\n An `mpl_toolkits.mplot3d.axes.Axes3D` object.\n\n References\n ----------\n .. [1] A. Inselberg and T. Avidan, \"Classification and visualization for high-dimensional \n data\", Proc. 6th ACM SIGKDD Int. Conf. Knowledge Discovery and Data Mining (KDD ‘00), \n pp. 370-374, 2000.\n \"\"\"\n \n # collect extra kwargs\n title = kwargs['title'] if kwargs and 'title' in kwargs else None \n column_indices = kwargs['column_indices'] if kwargs and 'column_indices' in kwargs else None \n colorbar = kwargs['colorbar'] if kwargs and 'colorbar' in kwargs else None\n axvline_width = kwargs['axvline_width'] if kwargs and 'axvline_width' in kwargs else 1.0\n axvline_color = kwargs['axvline_color'] if kwargs and 'axvline_color' in kwargs else 'black'\n \n # remove once they are read\n kwargs = pop(kwargs, 'title')\n kwargs = pop(kwargs, 'column_indices')\n kwargs = pop(kwargs, 'colorbar')\n kwargs = pop(kwargs, 'axvline_width')\n kwargs = pop(kwargs, 'axvline_color')\n \n if not ax:\n ax = plt.figure().gca() \n\n lbs, ubs = get_yaxis_bounds(A)\n F = tr.normalize(A, lb=np.zeros(A.shape[1]), ub=np.ones(A.shape[1]))\n\n # build color list for each data point\n if (not isinstance(c, list)) and (not isinstance(c, np.ndarray)):\n c_ = c\n c = np.array([c_ for _ in range(F.shape[0])])\n elif (isinstance(c, list) and len(c) != F.shape[0]) \\\n or (isinstance(c, np.ndarray) and c.shape[0] != F.shape[0]):\n raise ValueError(\"The length of c needs to be same as F.shape[0].\")\n \n # build linewidth list for each data point\n if (not isinstance(lw, list)) and (not isinstance(lw, np.ndarray)):\n lw_ = lw\n lw = np.array([lw_ for _ in range(F.shape[0])])\n elif (isinstance(lw, list) and len(lw) != F.shape[0]) \\\n or (isinstance(lw, np.ndarray) and lw.shape[0] != F.shape[0]):\n raise ValueError(\"The length of lw needs to be same as F.shape[0].\")\n\n # get a list of column indices\n if column_indices:\n x = np.array(column_indices)\n else:\n x = np.arange(0,F.shape[1],1).astype(int)\n if len(x) < 2:\n raise ValueError(\"column_indices must be of length > 1.\")\n \n # get a list of xtick_labels\n if xtick_labels is None:\n xtick_labels = [\"$f_{:d}$\".format(i) for i in range(F.shape[1])]\n \n # get a list of line labels, i.e. class labels\n if labels is not None and isinstance(labels, str):\n label = labels\n labels = np.array([label for _ in range(F.shape[0])])\n \n # draw the actual plot\n used_legends = set()\n for i in range(F.shape[0]):\n y = F[i,x]\n if labels is not None:\n label = labels[i]\n if label not in used_legends:\n used_legends.add(label)\n ax.plot(x, y, color=c[i], label=label, linewidth=lw[i], **kwargs)\n else:\n ax.plot(x, y, color=c[i], linewidth=lw[i], **kwargs)\n else:\n ax.plot(x, y, color=c[i], linewidth=lw[i], **kwargs)\n \n # decide on vertical axes\n if draw_vertical_lines:\n for i in x:\n ax.axvline(i, linewidth=axvline_width, color=axvline_color)\n\n # draw grid?\n if draw_grid:\n ax.grid()\n else:\n ax.spines['right'].set_visible(False)\n ax.spines['left'].set_visible(False)\n \n # decide on xtick_labels\n if xtick_labels is not None:\n if is_xticklabels_off(ax):\n ax.set_xticks(x)\n ax.set_xticklabels(xtick_labels)\n # Now completely change the axis ticks and labels\n # if there are bounds to be shown\n if show_bounds:\n ax.set_yticks([])\n ax.set_yticklabels([])\n plt.setp(ax.get_xticklabels(), fontsize=11, \n rotation=-45, ha=\"left\", rotation_mode=\"anchor\")\n ax.set_ylim([-0.1, 1.1])\n bottom, top = -0.1 + ymargins[F.shape[1]][0], 1.1 + ymargins[F.shape[1]][1]\n for i in range(A.shape[1]):\n ax.text(i + ((0.68/10) * A.shape[1]), bottom, lbs[i], fontsize=11, \\\n ha='center', va='center', rotation=-45)\n ax.text(i + ((0.3/10) * A.shape[1]), top, ubs[i], fontsize=11, \\\n ha='center', va='center', rotation=45)\n else:\n ax.set_xlim(x[0], x[-1])\n else:\n if len(ax.get_xticklabels()) < len(x):\n ax.set_xticks(x)\n ax.set_xticklabels(xtick_labels)\n if not show_bounds:\n xl, xr = ax.get_xlim()\n xl = x[0] if x[0] <= xl else xl\n xr = x[-1] if x[-1] >= xr else xr\n ax.set_xlim(xl, xr)\n\n if not show_bounds or is_xticklabels_off(ax):\n ax.tick_params(axis='x', labelsize=12)\n ax.tick_params(axis='y', labelsize=12) \n \n # where to put the legend\n if labels is not None:\n ax.legend(loc=\"upper right\") \n \n # colorbar?\n if colorbar and isinstance(colorbar, tuple) and len(colorbar) >= 2 \\\n and isinstance(colorbar[0], np.ndarray) and isinstance(colorbar[1], np.ndarray):\n vmin,vmax = 0.0, 1.0\n cbc, cbg = colorbar[0], colorbar[1]\n cbl = colorbar[2] if len(colorbar) > 2 and colorbar[2] else None\n Id = np.column_stack((cbg,cbc)).astype(object)\n Id = Id[np.argsort(Id[:, 0])] \n c, g = Id[:,1:].astype(float), Id[:,0].astype(float)\n vmin, vmax = np.min(g), np.max(g)\n norm = mc.Normalize(vmin=vmin, vmax=vmax)\n cmap = ListedColormap(c)\n if cbl:\n ax.figure.colorbar(cm.ScalarMappable(norm=norm, cmap=cmap), \\\n orientation='vertical', label=cbl, pad=0.01, shrink=0.99)\n else:\n ax.figure.colorbar(cm.ScalarMappable(norm=norm, cmap=cmap), \\\n orientation='vertical', pad=0.01, shrink=0.99)\n\n # title?\n ax.set_title(title)\n\n return ax\n" ]
[ [ "numpy.max", "matplotlib.cm.ScalarMappable", "numpy.array", "numpy.column_stack", "numpy.zeros", "numpy.ones", "numpy.min", "matplotlib.pyplot.figure", "numpy.arange", "matplotlib.colors.Normalize", "numpy.argsort", "matplotlib.colors.ListedColormap" ] ]
MatthieuDartiailh/oculy
[ "0cdae49aa1b546f26b31d14e944c7d481d368361" ]
[ "oculy/plotting/backends/matplotlib/axes.py" ]
[ "# --------------------------------------------------------------------------------------\n# Copyright 2021 by Oculy Authors, see git history for more details.\n#\n# Distributed under the terms of the BSD license.\n#\n# The full license is in the file LICENCE, distributed with this software.\n# --------------------------------------------------------------------------------------\n\"\"\"Matplotlib proxy for axis, axes, colorbar and cursor.\n\n\"\"\"\nfrom atom.api import Dict, Typed\nfrom matplotlib.axes import Axes\nfrom matplotlib.axis import Axis\nfrom matplotlib.colorbar import make_axes\n\nfrom oculy.plotting.plots import AxesProxy, AxisProxy, ColorbarProxy, CursorProxy\n\n\nclass MatplotlibAxisProxy(AxisProxy):\n \"\"\"Matplotlib proxy for a single axis.\"\"\"\n\n def activate(self):\n \"\"\"Activate the proxy axis.\"\"\"\n el = self.element\n axes = self.element.axes\n\n if axes is None:\n raise RuntimeError(\"Cannot activate the proxy for an Axis with no axes\")\n\n # Identify direction\n ax_dir = \"\"\n for direction in (\"left\", \"bottom\", \"right\", \"top\"):\n if getattr(axes, f\"{direction}_axis\") is el:\n ax_dir = direction\n break\n\n if not ax_dir:\n raise RuntimeError(\"Axis does not exist on parent Axes object\")\n\n if ax_dir in (\"bottom\", \"top\"):\n for c in (\"left\", \"right\"):\n if (ax_dir, c) in axes.proxy._axes:\n self._axis = axes.proxy._axes[(ax_dir, c)].xaxis\n else:\n for c in (\"bottom\", \"top\"):\n if (c, ax_dir) in axes.proxy._axes:\n self._axis = axes.proxy._axes[(c, ax_dir)].yaxis\n\n if not self._axis:\n raise RuntimeError(\"Failed to find backend axis.\")\n\n def deactivate(self):\n \"\"\"Deactivate the proxy figure.\"\"\"\n del self._axis\n\n # @mark_backend_unsupported\n # def set_axis_scale(self, scale): # lin, log\n # raise NotImplementedError()\n\n # @mark_backend_unsupported\n # def set_autoscaling(self, setting: bool):\n # pass\n\n # @mark_backend_unsupported\n # def set_limits(self, limits): # Limited to axis with no breaks\n # pass\n\n # @mark_backend_unsupported\n # def set_limits_with_breaks(self, limits):\n # pass\n\n # @mark_backend_unsupported\n # def invert_axis(self, state: bool):\n # pass\n\n # @mark_backend_unsupported\n # def set_label(self, title: str, font: Mapping[str, Any]):\n # pass\n\n # @mark_backend_unsupported\n # def set_tick_labels(self, labels: Sequence[str], font: Mapping[str, Any]):\n # pass\n\n # --- Private API\n\n _axis = Typed(Axis)\n\n\nclass MatplotlibColorbarProxy(ColorbarProxy):\n \"\"\"Matplotlib proxy for a colorbar.\"\"\"\n\n def activate(self):\n \"\"\"Activate the proxy colorbar.\"\"\"\n # Create matplotlib axes which will hold the colorbar.\n axes = tuple(self.element.axes.proxy._axes.values())[0]\n caxes = make_axes(\n axes, location=self.element.location, aspect=self.element.aspect_ratio\n )[0]\n self._caxes = caxes\n\n def deactivate(self):\n \"\"\"Deactivate the proxy colorbar.\"\"\"\n self._caxes.clear()\n del self._caxes\n\n def connect_mappable(self, mappable):\n \"\"\"Create a new colorbar for a mappable.\"\"\"\n self._caxes.clear()\n self.element.axes.figure.proxy._figure.colorbar(mappable, self._caxes)\n\n # @mark_backend_unsupported\n # def set_axis_scale(self, scale): # lin, log\n # raise NotImplementedError()\n\n # @mark_backend_unsupported\n # def set_autoscaling(self, setting: bool):\n # pass\n\n # @mark_backend_unsupported\n # def set_limits(self, limits): # Limited to axis with no breaks\n # pass\n\n # @mark_backend_unsupported\n # def set_limits_with_breaks(self, limits):\n # pass\n\n # @mark_backend_unsupported\n # def set_label(self, title: str, font: Mapping[str, Any]):\n # pass\n\n # @mark_backend_unsupported\n # def set_tick_labels(self, labels: Sequence[str], font: Mapping[str, Any]):\n # pass\n\n # --- Private API\n\n _caxes = Typed(Axes)\n\n\n# FIXME implement later\nclass MatplotlibCursorProxy(CursorProxy):\n \"\"\"\"\"\"\n\n pass\n\n\nclass MatplotlibAxesProxy(AxesProxy):\n \"\"\"Matplotlib proxy for axes.\"\"\"\n\n def activate(self):\n \"\"\"Activate the proxy axes.\"\"\"\n super().activate()\n el = self.element\n fig = el.figure\n if len(fig.axes_set) > 1:\n raise RuntimeError() # Add support for more than one axis.\n else:\n first_axes = fig.proxy._figure.add_subplot(\n projection=el.projection if el.projection != \"cartesian\" else None,\n )\n\n active_axes = {\n direction: getattr(el, f\"{direction}_axis\")\n for direction in (\"left\", \"bottom\", \"right\", \"top\")\n if getattr(el, f\"{direction}_axis\")\n }\n\n if len(active_axes) == 2:\n if \"right\" in active_axes:\n first_axes.yaxis.set_tick_position(\"right\")\n if \"top\" in active_axes:\n first_axes.xaxis.set_tick_position(\"top\")\n self._axes = {\n (\n \"bottom\" if \"bottom\" in active_axes else \"top\",\n \"left\" if \"left\" in active_axes else \"right\",\n ): first_axes\n }\n else:\n raise RuntimeError(\"Support is currently limited to 2 axes\")\n self.element.figure.proxy.request_redraw()\n\n def deactivate(self):\n \"\"\"Deactivate the proxy axes.\"\"\"\n self._axes.clear()\n del self._axes\n super().deactivate()\n\n def get_default_axes_mapping(self):\n \"\"\"Get teh default axes mapping for plots.\"\"\"\n\n # @mark_backend_unsupported\n # def enable_zooming(self, bound: str, button: str):\n # pass\n\n # @mark_backend_unsupported\n # def disable_zooming(self):\n # pass\n\n # @mark_backend_unsupported\n # def enable_panning(self, button: str):\n # pass\n\n # @mark_backend_unsupported\n # def disable_panning(self):\n # pass\n\n # @mark_backend_unsupported\n # def add_axis(self, axes=None):\n # pass\n\n # @mark_backend_unsupported\n # def remove_axis(self):\n # pass\n\n # @mark_backend_unsupported\n # def set_projections(self):\n # pass\n\n # @mark_backend_unsupported\n # def add_cursor(\n # self, axes=None\n # ): # Need to specify to which axes the cursor is bound\n # pass\n\n # @mark_backend_unsupported\n # def remove_cursor(self):\n # pass\n\n # @mark_backend_unsupported\n # def enable_major_grid(self):\n # pass\n\n # @mark_backend_unsupported\n # def disable_major_grid(self):\n # pass\n\n # @mark_backend_unsupported\n # def enable_minor_grid(self):\n # pass\n\n # @mark_backend_unsupported\n # def disable_minor_grid(self):\n # pass\n\n # @mark_backend_unsupported\n # def set_legend(self, legend: Mapping[str, str]):\n # pass\n\n # @mark_backend_unsupported\n # def remove_plot(self, id):\n # pass\n\n # @mark_backend_unsupported\n # def add_line(\n # self,\n # id: str,\n # orientation: str,\n # position: float,\n # bounds: Optional[Tuple[float, float]] = None,\n # ):\n # pass\n\n # @mark_backend_unsupported\n # def remove_line(self, id: str) -> None:\n # pass\n\n #: --- Private API\n\n _axes = Dict(tuple, Axes)\n" ]
[ [ "matplotlib.colorbar.make_axes" ] ]
greenstar1151/pytorch-benchmark
[ "8b7808d3be6b7ca1d57f1812e35fd2df5e470f8b" ]
[ "torchbenchmark/models/fastNLP_Bert/__init__.py" ]
[ "\"\"\"\nfastNLP model (TorchBenchmark Version)\nThis model resembles the \"BertEmedding Q&A\" task in [fastNLP Tutorial](https://fastnlp.readthedocs.io/zh/latest/tutorials/extend_1_bert_embedding.html).\n\nInput data simulates [CMRC2018 dataset](https://ymcui.com/cmrc2018/).\nThe program runs only for benchmark purposes and doesn't provide correctness results.\n\"\"\"\nimport os\nimport torch\nimport random\nimport inspect\nimport numpy as np\nfrom fastNLP.embeddings import BertEmbedding\nfrom fastNLP.models import BertForQuestionAnswering\nfrom fastNLP.core.callback import CallbackManager\nfrom fastNLP.core.batch import DataSetIter\nfrom fastNLP.core.losses import CMRC2018Loss\nfrom fastNLP.core.metrics import CMRC2018Metric\nfrom fastNLP.io.pipe.qa import CMRC2018BertPipe\nfrom fastNLP import WarmupCallback, GradientClipCallback\nfrom fastNLP.core.optimizer import AdamW\nfrom fastNLP import BucketSampler\n\n# Import CMRC2018 data generator\nfrom .cmrc2018_simulator import generate_inputs\nfrom .cmrc2018_simulator import CMRC2018_DIR, CMRC2018_CONFIG_DIR\nfrom .cmrc2018_simulator import CMRC2018_TRAIN_SPEC, CMRC2018_DEV_SPEC\n\n# TorchBench imports\nfrom torchbenchmark.util.model import BenchmarkModel\nfrom torchbenchmark.tasks import NLP\n\ntorch.manual_seed(1337)\nrandom.seed(1337)\nnp.random.seed(1337)\ntorch.backends.cudnn.deterministic = True\ntorch.backends.cudnn.benchmark = False\n\nclass Model(BenchmarkModel):\n task = NLP.OTHER_NLP\n def __init__(self, device=None, jit=False):\n super().__init__()\n self.device = device\n self.jit = jit\n self.input_dir = CMRC2018_DIR\n # Generate input data files\n generate_inputs()\n data_bundle = CMRC2018BertPipe().process_from_file(paths=self.input_dir)\n data_bundle.rename_field('chars', 'words')\n self.embed = BertEmbedding(data_bundle.get_vocab('words'),\n model_dir_or_name=CMRC2018_CONFIG_DIR,\n requires_grad=True,\n include_cls_sep=False, auto_truncate=True,\n dropout=0.5, word_dropout=0.01)\n self.model = self._move_model_to_device(BertForQuestionAnswering(self.embed), device=device)\n if self._model_contains_inner_module(self.model):\n self._forward_func = self.model.module.forward\n else:\n self._forward_func = self.model.forward\n self.losser = CMRC2018Loss()\n self.metrics = CMRC2018Metric()\n # Use Train batch for batch size\n self.batch_size = CMRC2018_TRAIN_SPEC[\"data_size\"]\n self.update_every = 10\n # Do not spawn new processes on small scale of data\n self.num_workers = 0\n wm_callback = WarmupCallback(schedule='linear')\n gc_callback = GradientClipCallback(clip_value=1, clip_type='norm')\n callbacks = [wm_callback, gc_callback]\n self.optimizer = AdamW(self.model.parameters(), lr=5e-5)\n self.callback_manager = CallbackManager(env={\"trainer\":self}, callbacks=callbacks)\n self.train_data = data_bundle.get_dataset('train')\n self.eval_data = data_bundle.get_dataset('dev')\n self.train_data_iterator = DataSetIter(dataset=self.train_data,\n batch_size=CMRC2018_TRAIN_SPEC[\"data_size\"],\n sampler=None,\n num_workers=self.num_workers, drop_last=False)\n self.eval_data_iterator = DataSetIter(dataset=self.eval_data,\n batch_size=CMRC2018_DEV_SPEC[\"data_size\"],\n sampler=None,\n num_workers=self.num_workers, drop_last=False)\n\n def get_module(self):\n batch_x, batch_y = list(self.train_data_iterator)[0]\n self._move_dict_value_to_device(batch_x, batch_y, device=self.device)\n return self.model, (batch_x[\"words\"], )\n\n # Sliced version of fastNLP.Tester._test()\n def eval(self, niter=1):\n if self.jit:\n raise NotImplementedError(\"PyTorch JIT compiler is not able to compile this model.\")\n self._mode(self.model, is_test=True)\n self._predict_func = self.model.forward\n with torch.no_grad():\n for epoch in range(niter):\n for batch_x, batch_y in self.eval_data_iterator:\n self._move_dict_value_to_device(batch_x, batch_y, device=self.device)\n pred_dict = self._data_forward(self._predict_func, batch_x)\n\n # Sliced version of fastNLP.Trainer._train()\n def train(self, niter=1):\n if self.jit:\n raise NotImplementedError(\"PyTorch JIT compiler is not able to compile this model.\")\n self.step = 0\n self.n_epochs = niter\n self._mode(self.model, is_test=False)\n self.callback_manager.on_train_begin()\n # Move the data to GPU before the train loop\n for batch_x, batch_y in self.train_data_iterator:\n self._move_dict_value_to_device(batch_x, batch_y, device=self.device)\n for epoch in range(niter):\n self.callback_manager.on_epoch_begin()\n for batch_x, batch_y in self.train_data_iterator:\n self._move_dict_value_to_device(batch_x, batch_y, device=self.device)\n self.step += 1\n prediction = self._data_forward(self.model, batch_x)\n self.callback_manager.on_loss_begin(batch_y, prediction)\n loss = self._compute_loss(prediction, batch_y).mean()\n self.callback_manager.on_backward_begin(loss)\n self._grad_backward(loss)\n self.callback_manager.on_backward_end()\n self._update()\n self.callback_manager.on_step_end()\n self.callback_manager.on_batch_end()\n self.callback_manager.on_epoch_end()\n self.callback_manager.on_train_end()\n\n # Helper functions\n def _build_args(self, func, **kwargs):\n spect = inspect.getfullargspec(func)\n if spect.varkw is not None:\n return kwargs\n needed_args = set(spect.args)\n defaults = []\n if spect.defaults is not None:\n defaults = [arg for arg in spect.defaults]\n start_idx = len(spect.args) - len(defaults)\n output = {name: default for name, default in zip(spect.args[start_idx:], defaults)}\n output.update({name: val for name, val in kwargs.items() if name in needed_args})\n return output\n\n def _move_dict_value_to_device(self, *args, device, non_blocking=False):\n if not torch.cuda.is_available() or device is None:\n return\n for arg in args:\n if isinstance(arg, dict):\n for key, value in arg.items():\n if isinstance(value, torch.Tensor):\n arg[key] = value.to(device, non_blocking=non_blocking)\n else:\n raise TypeError(\"Only support `dict` type right now.\")\n\n def _model_contains_inner_module(self, model):\n if isinstance(model, torch.nn.Module):\n if isinstance(model, (torch.nn.DataParallel, torch.nn.parallel.DistributedDataParallel)):\n return True\n return False\n\n def _move_model_to_device(self, model, device):\n model = model.to(device)\n return model\n\n def _mode(self, model, is_test=False):\n r\"\"\"Train mode or Test mode. This is for PyTorch currently.\n\n :param model: a PyTorch model\n :param bool is_test: whether in test mode or not.\n\n \"\"\"\n if is_test:\n model.eval()\n else:\n model.train()\n\n def _update(self):\n r\"\"\"Perform weight update on a model.\n \"\"\"\n if self.step % self.update_every == 0:\n self.optimizer.step()\n\n def _data_forward(self, network, x):\n x = self._build_args(self._forward_func, **x)\n y = network(**x)\n if not isinstance(y, dict):\n raise TypeError(\n f\"The return value of {_get_func_signature(self._forward_func)} should be dict, got {type(y)}.\")\n return y\n\n def _grad_backward(self, loss):\n r\"\"\"Compute gradient with link rules.\n\n :param loss: a scalar where back-prop starts\n\n For PyTorch, just do \"loss.backward()\"\n \"\"\"\n if (self.step-1) % self.update_every == 0:\n self.model.zero_grad()\n loss.backward()\n\n def _compute_loss(self, predict, truth):\n r\"\"\"Compute loss given prediction and ground truth.\n\n :param predict: prediction dict, produced by model.forward\n :param truth: ground truth dict, produced by batch_y\n :return: a scalar\n \"\"\"\n return self.losser(predict, truth)\n\nif __name__ == \"__main__\":\n device = \"cuda\" if torch.cuda.is_available() else \"cpu\"\n m = Model(device=device, jit=False)\n model, example_inputs = m.get_module()\n model(*example_inputs)\n m.train()\n m.eval()\n" ]
[ [ "torch.manual_seed", "torch.no_grad", "torch.cuda.is_available", "numpy.random.seed" ] ]
peterukk/ecrad
[ "d2cd86b63b60cd2bc1e8d8e189a098e1ec287e07" ]
[ "practical/ecradplot/general.py" ]
[ "\"\"\"\nFilename: general.py\nAuthor: Shannon Mason, [email protected]\nDescription: Common plotting functions.\n\"\"\"\n\nimport pandas as pd\nimport numpy as np\n\n#For loading and handling netCDF data\nimport xarray as xr\n\ndef format_time(ax, format_string=\"%H:%M\", label='Time (UTC)'):\n \"\"\"\n Format axes for time coordinates.\n \"\"\"\n import matplotlib.dates as mdates\n ax.set_xticklabels(ax.xaxis.get_majorticklabels(), rotation=0, ha='center')\n ax.xaxis.set_major_formatter(mdates.DateFormatter(format_string))\n ax.set_xlabel(label)\n \n \ndef format_height(ax, scale=1.0e3, label='Height [km]'):\n \"\"\"\n Format axes for height coordinates.\n \"\"\"\n import matplotlib.ticker as ticker\n ticks_y = ticker.FuncFormatter(lambda x, pos: '${0:g}$'.format(x/scale))\n ax.yaxis.set_major_formatter(ticks_y)\n ax.set_ylabel(label)\n \n \ndef format_temperature(ax, scale='K'):\n \"\"\"\n Format axes for temperature coordinates.\n \"\"\"\n import matplotlib.ticker as ticker\n ticks_y = ticker.FuncFormatter(lambda x, pos: '${0:g}$'.format(x))\n ax.yaxis.set_major_formatter(ticks_y)\n ymin, ymax = ax.get_ylim()\n ax.set_ylim(ymax, ymin)\n if scale == 'K':\n ax.set_ylabel('Temperature [K]')\n elif scale == 'C':\n ax.set_ylabel('Temperature [K]')\n else:\n Error(\"Scale must be either K or C\")\n \n \ndef format_pressure(ax, scale=100, label='Pressure [hPa]'):\n \"\"\"\n Format axes for pressure coordinates.\n \"\"\"\n import matplotlib.ticker as ticker\n ticks_p = ticker.FuncFormatter(lambda x, pos: '${0:g}$'.format(x/scale))\n ax.yaxis.set_major_formatter(ticks_p)\n ax.set_ylabel(label)\n \n \ndef format_latitude(ax): \n \"\"\"\n Format axes for latitude coordinates.\n \"\"\"\n import matplotlib.ticker as ticker\n latFormatter = ticker.FuncFormatter(lambda x, pos: \"${:g}^\\circ$S\".format(-1*x) if x < 0 else \"${:g}^\\circ$N\".format(x))\n ax.xaxis.set_major_formatter(latFormatter)\n \nfancy_format_latitude = lambda x: r\"${:.0f}^{{\\circ}}$S\".format(-1*x) if x < 0 else \"${:.0f}^{{\\circ}}$N\".format(x)\nunfancy_format_latitude = lambda x: r\"{:.0f}S\".format(-1*x) if x < 0 else \"{:.0f}N\".format(x)\n\ndef snap_to_axis(ax, ax_ref):\n \"\"\"\n Align subplot ax with the bounds of subplot ax_ref\n \"\"\"\n pos_ref = ax_ref.get_position()\n pos = ax.get_position()\n ax.set_position([pos_ref.x0, pos.y0, pos_ref.width, pos.height])\n \ndef get_figure_center(ax):\n bbox = ax.figbox\n return (bbox.get_points()[0][0] + bbox.get_points()[1][0])/2\n\ndef get_figure_top(fig, ax, include_hspace=True):\n bbox = ax.figbox\n if include_hspace:\n return bbox.get_points()[0][1] + fig.subplotpars.hspace\n else:\n return bbox.get_points()[0][1]\n \ndef place_suptitle(fig, axes, suptitle, y=0.95, va='top'):\n center = get_figure_center(axes[0])\n fig.suptitle(suptitle, ha='center', x=center, va=va, y=y) \n \ndef add_subfigure_labels(axes, xloc=0.0, yloc=1.05, zorder=0, label_list=[], flatten_order='F'):\n if label_list == []:\n import string\n labels = string.ascii_lowercase\n else:\n labels = label_list\n \n for i, ax in enumerate(axes.flatten(order=flatten_order)):\n ax.text(xloc, yloc, \"%s)\" %(labels[i]), va='baseline', transform=ax.transAxes, fontweight='bold', zorder=zorder)\n \n" ]
[ [ "matplotlib.dates.DateFormatter" ] ]
bosques-urbanos/open_model_zoo
[ "c1deee7b0707621c248e6d74b43b1564e94cf001" ]
[ "demos/python_demos/speech_recognition_demo/ctcdecode-numpy/setup.py" ]
[ "#!/usr/bin/env python3\n#\n# Copyright (C) 2020 Intel Corporation\n# SPDX-License-Identifier: Apache-2.0\n#\n# This file is based in part on setup.py and build.py from https://github.com/parlance/ctcdecode,\n# commit 431408f22d93ef5ebc4422995111bbb081b971a9 on Apr 4, 2020, 20:54:49 UTC+1.\n#\nimport glob\nimport numpy\nimport os.path\nimport setuptools\n\n\ncompile_args = ['-O3', '-std=c++11', '-fPIC']\nyoklm_includes = ['ctcdecode_numpy/yoklm']\nyoklm_sources = glob.glob('ctcdecode_numpy/yoklm/*.cpp')\nthird_party_libs = ['ThreadPool']\nthird_party_includes = [os.path.realpath(os.path.join(\"third_party\", lib)) for lib in third_party_libs]\nctc_sources = glob.glob('ctcdecode_numpy/*.cpp')\n\nextension = setuptools.Extension(\n name='ctcdecode_numpy._impl',\n sources=ctc_sources + yoklm_sources,\n include_dirs=third_party_includes + yoklm_includes + [numpy.get_include()],\n extra_compile_args=compile_args,\n language='c++',\n swig_opts=['-c++'],\n)\n\nsetuptools.setup(\n name='ctcdecode-numpy',\n version='0.1',\n description=\"CTC Decoder for NumPy based on implementation from PaddlePaddle-Deepspeech and Parlance ctcdecode\",\n packages=['ctcdecode_numpy'],\n ext_modules=[extension],\n py_modules=['ctcdecode_numpy.impl'],\n install_requires=['numpy'],\n)\n" ]
[ [ "numpy.get_include" ] ]
seanmchu/algo-research
[ "199964b7ce376a88e248349946538cb2159c4797", "199964b7ce376a88e248349946538cb2159c4797" ]
[ "matching/plot15.py", "matching/plot20.py" ]
[ "import matplotlib.pyplot as plt\nimport matplotlib.markers\nhfont = {'fontname':'serif'}\n\ndef fix(a):\n print(a)\n a[0] = a[0]/89.5\n a[1] = a[1]/79.5\n a[2] = a[2]/69.5\n a[3] = a[3]/59.5\n print(a)\n\nx = [20,40,60,80]\naz = [72.93449999999999, 55.38224999999999, 39.46649999999999, 50.304750000000006]\nehyy = [72.8825, 55.366749999999996, 39.46616666666665, 50.304750000000006]\nsy1 = [85.312, 74.39575, 63.65166666666667, 56.35325000000002]\nsy2 = [73.9365, 55.38224999999999, 40.26649999999999, 50.904750000000006]\npog =[89.5, 79.5, 69.5, 59.5]\npos = [89.5, 79.5, 69.5, 59.5]\nfix(az)\nfix(ehyy)\nfix(sy1)\nfix(sy2)\nfix(pog)\nfix(pos)\nplt.plot(x,az, label = \"A-S\", linestyle = '--', marker = '^')\nplt.plot(x,ehyy, label = \"EHYY\",linestyle = '-.', marker = 'o')\nplt.plot(x,sy1, label = \"SY1\",alpha = 1,lw = 1,linestyle = ':', marker = 's')\nplt.plot(x,sy2, label = \"SY2\",linestyle = 'dotted', marker = 'p')\nplt.plot(x,pog, label = \"POG\",linestyle = '--', marker = 'D')\nplt.plot(x,pos, label = \"POS\",linestyle = '--', marker = 'P')\nplt.title(\"\")\nplt.xlabel(\"Capacity (q_c)\",**hfont)\nplt.legend()\nplt.show()\n\n", "import matplotlib.pyplot as plt\nimport matplotlib.markers\nhfont = {'fontname':'serif'}\nx = [20,40,60,80]\naz = [1, 1, 1, 1]\nehyy = [1.0, 1.0, 1.0, 1.000000000000000]\nsy1 = [0.7, 0.75, 0.7643961270596229, 0.9595291841341503]\nsy2 = [1, 1, 1, 1]\npog = [0.3045, 0.38775, 0.4897231187361984, 0.7175104804901645]\npos = [0.3045, 0.38775, 0.4897231187361984, 0.7175104804901645]\nplt.plot(x,az, label = \"A-S\", linestyle = '--', marker = '^')\nplt.plot(x,ehyy, label = \"EHYY\",linestyle = '-.', marker = 'o')\nplt.plot(x,sy1, label = \"SY1\",alpha = 1,lw = 1,linestyle = ':', marker = 's')\nplt.plot(x,sy2, label = \"SY2\",linestyle = 'dotted', marker = 'p')\nplt.plot(x,pog, label = \"POG\",linestyle = '--', marker = 'D')\nplt.plot(x,pos, label = \"POS\",linestyle = '--', marker = 'P')\nplt.title(\"\")\nplt.xlabel(\"Capacity (q_c)\",**hfont)\nplt.legend()\nplt.show()\n\n\n " ]
[ [ "matplotlib.pyplot.xlabel", "matplotlib.pyplot.plot", "matplotlib.pyplot.legend", "matplotlib.pyplot.title", "matplotlib.pyplot.show" ], [ "matplotlib.pyplot.xlabel", "matplotlib.pyplot.plot", "matplotlib.pyplot.legend", "matplotlib.pyplot.title", "matplotlib.pyplot.show" ] ]
denecity/n-Body
[ "9b58e4ab9bba0684d8457942dc68a68cc1598e8a" ]
[ "n-body problen.py" ]
[ "# N-Body Problem.py\r\n# Denis Titov 29.11.2018\r\n\r\nimport pygame\r\nfrom pygame import gfxdraw\r\nimport random\r\nfrom numba import jit\r\nimport numpy as np\r\n\r\n\r\nnumParticles = 2000\r\n\r\ndisplaySize = (1000, 1000)\r\n\r\nmassSpread = (2, 20)\r\n\r\nblack = (255, 255, 255)\r\nwhite = (0, 0, 0)\r\n\r\n\r\ndef generate(numParticles, velSpread=100, posSpread=100, massSpread=(2, 8)):\r\n particleList = []\r\n for i in range(numParticles):\r\n # empty list. will be: [yPos, yPos, xVel, yVel, xAcc, yAcc mass]\r\n particleInfo = []\r\n # pos according to display size\r\n xPos = (random.randrange(posSpread)) + 500 - (posSpread / 2)\r\n yPos = (random.randrange(posSpread)) + 500 - (posSpread / 2)\r\n # 1 velspread is 1/100 pixel\r\n xVel = (random.randrange(velSpread) / 100)\r\n yVel = (random.randrange(velSpread) / 100)\r\n # mass is random int\r\n mass = random.randrange(massSpread[0], (massSpread[1] + 1))\r\n # append to info\r\n particleInfo.append(xPos)\r\n particleInfo.append(yPos)\r\n particleInfo.append(xVel)\r\n particleInfo.append(yVel)\r\n particleInfo.append(mass)\r\n # put info into main list\r\n particleList.append(particleInfo)\r\n\r\n return particleList\r\n\r\n#numpy + numba approach [xPos, yPos, xVel, yVel, mass]\r\n@jit()\r\ndef nBodyNumpyNumba(particleList):\r\n for i in range(len(particleList)):\r\n xAccList = np.zeros(particleList.shape[0])\r\n yAccList = np.zeros(particleList.shape[0])\r\n # for every other particle calculations to get current acc\r\n for j in range(len(particleList)):\r\n if not i == j:\r\n # distance in respective dimension\r\n xDist = particleList[j, 0] - particleList[i, 0] # otherXPos - thisXPos\r\n yDist = particleList[j, 1] - particleList[i, 1] # otherYPos - thisYPos\r\n # pythagorean theorem to get real distance\r\n dist = ((xDist**2 + yDist**2)**0.5) + 20\r\n # calc acceleration\r\n acc = (particleList[i, 4] * particleList[j, 4]) / (dist**2 * particleList[i, 4]) * 0.05\r\n xAcc = (xDist / dist) * acc\r\n yAcc = (yDist / dist) * acc\r\n\r\n xAccList[j] = xAcc\r\n yAccList[j] = yAcc\r\n \r\n # sums all elements in AccLists to total acc\r\n xAccCurrent = np.sum(xAccList)\r\n yAccCurrent = np.sum(yAccList)\r\n\r\n # adds accs to vels\r\n particleList[i, 2] += xAccCurrent\r\n particleList[i, 3] += yAccCurrent\r\n\r\n # adds vels to poss\r\n particleList[i, 0] += particleList[i, 2]\r\n particleList[i, 1] += particleList[i, 3]\r\n\r\n # calculate center of mass (mass ignored)\r\n xMove = 500 - np.sum(particleList[:, 0]) / len(particleList)\r\n yMove = 500 - np.sum(particleList[:, 1]) / len(particleList)\r\n\r\n particleList[:, 0] += xMove\r\n particleList[:, 1] += yMove\r\n\r\n return particleList\r\n\r\ndef draw(XPos, YPos, mass):\r\n #pygame.gfxdraw.aacircle(gameDisplay, int(XPos), int(YPos), int(mass / 2), black)\r\n pygame.gfxdraw.filled_circle(gameDisplay, int(XPos), int(YPos), int(mass / 5), black)\r\n\r\n\r\ngameDisplay = pygame.display.set_mode((displaySize))\r\npygame.display.set_caption(\"N-Body Problem\")\r\nclock = pygame.time.Clock()\r\n\r\n# Particle Init\r\ngameDisplay.fill(white)\r\nparticleList = []\r\n# set properties to particle list (xpos, ypos, xvel, yvel, mass)\r\nparticleList = np.array(generate(numParticles))\r\n\r\n\r\n# main loop\r\nclosed = False\r\nwhile not closed:\r\n for event in pygame.event.get():\r\n if event.type == pygame.QUIT:\r\n closed = True\r\n\r\n # remove old particles\r\n gameDisplay.fill(white)\r\n\r\n # Loop for particle handling\r\n nBodyNumpyNumba(particleList)\r\n\r\n\r\n for i in range(len(particleList)):\r\n draw(particleList[i][0], particleList[i][1], particleList[i][4])\r\n\r\n pygame.display.update()\r\n clock.tick(60)\r\n\r\npygame.quit()\r\nquit()\r\n" ]
[ [ "numpy.sum", "numpy.zeros" ] ]
jwcalder/MSTAR-Active-Learning
[ "6918543be495955cdcd6005dce645f029b964e51" ]
[ "Python/mstar_run_al_nga.py" ]
[ "'''\nPython script to run active learning test on MSTAR data, starting with 100 initially labeled points per class and label 100 more per class.\n * Run this script through command line (terminal).\n * View parameter descriptions with \"python mstar_run_al.py --help\"\n'''\n\nimport numpy as np\nimport graphlearning as gl\nfrom scipy import sparse\nimport scipy.sparse as sps\nfrom scipy.special import softmax\nimport os\nimport sys\nimport pandas as pd\nfrom argparse import ArgumentParser\nfrom tqdm import tqdm\n\nimport torch\nimport utils\nimport models\nimport matplotlib.pyplot as plt\nfrom active_learning import *\n\n\nMETHODS = ['random', 'uncertainty', 'mcvopt', 'mc']\n\n# Make sure results directory exists to put the active learning results\nRESULTSDIR = os.path.join(\"..\", \"results\", \"nga_results\")\nif not os.path.exists(RESULTSDIR):\n os.makedirs(RESULTSDIR)\n\n# Make sure a directory to store eigenvalue and eigenvector data so don't have to recompute with every test\nEIGDIR = os.path.join(\"..\", \"eigData\")\nif not os.path.exists(EIGDIR):\n os.makedirs(EIGDIR)\n\nif __name__ == \"__main__\":\n parser = ArgumentParser(description='Run active learning test requested by NGA on MSTAR dataset.')\n parser.add_argument(\"--vae_fname\", type=str, default=\"SAR10_CNNVAE\", help=\"string of CNNVAE model name to use for representations, located in ./models directory. Ensure this is a VAE model by having string 'VAE' in the model name.\")\n parser.add_argument(\"--iters\", type=int, default=100, help=\"number of active learning iterations\")\n parser.add_argument(\"--by_class\", type=bool, default=True, help=\"flag to indicate if select 1 point per (pseudo)class labeling at each active learning iteration\")\n parser.add_argument(\"--M\", type=int, default=200, help=\"number of eigenvalues to use in truncation\")\n parser.add_argument(\"--knn\", type=int, default=20, help=\"number of knn to use in graph construction\")\n parser.add_argument(\"--gamma\", type=float, default=0.5, help=\"gamma constant for Gaussian Regression covariance calculations\")\n parser.add_argument(\"--uncertainty_method\", type=str, default=\"smallest_margin\", help=\"string of uncertainty criterion to use ['smallest_margin', 'largest_margin', 'entropy', 'norm', 'least_confidence']\")\n parser.add_argument(\"--seed\", type=int, default=2, help=\"random number generator seed for train_ind choices\")\n parser.add_argument(\"--num_per_class\", type=int, default=100, help=\"number of initially labeled points per class\")\n parser.add_argument(\"--algorithm\", type=str, default=\"laplace\", help=\"Graphlearning graph-based ssl algorithm to use for accuracy calculations\")\n parser.add_argument(\"--plot\", type=bool, default=False, help=\"Set to True to save plot of accuracy results\")\n parser.add_argument(\"--tsne\", type=bool, default=False, help=\"Set to True to visualize t-sne embedding of selected points\")\n args = parser.parse_args()\n\n print(\"-\"*30)\n print(f\"MSTAR GBSSL Active Learning NGA Tests - Using {args.vae_fname} Representations\")\n print(\"-\"*30)\n print(f\"\\titers = {args.iters}, num_per_class = {args.num_per_class}, by_class = {args.by_class}\")\n print(f\"\\tknn = {args.knn}, M (num evals) = {args.M}, uncertainty_method = {args.uncertainty_method}\")\n print(f\"\\talgorithm = {args.algorithm}, seed = {args.seed}\")\n print(f\"\\tplot = {args.plot}\")\n print()\n\n assert \"vae\" in args.vae_fname.lower() # ensure that we are using VAE computed representations, NOT one of the supervised CNN model's representations\n\n # Load MSTAR and CNN models\n hdr, fields, mag, phase = utils.load_MSTAR()\n\n # Get labels and corresponding target names\n train_mask, test_mask, _ = utils.train_test_split(hdr,1)\n labels, target_names = utils.targets_to_labels(hdr)\n\n # Find specified CNNVAE model's filepath\n model_fpath = os.path.join(\"..\", \"models\", args.vae_fname + \".pt\")\n assert os.path.exists(model_fpath)\n\n # Define and make results filepath\n results_fpath = args.vae_fname + f\"_{args.algorithm}_{args.knn}_{args.M}_{args.gamma}_{args.seed}_{args.num_per_class}_{args.iters}_{args.by_class}\"\n if not os.path.exists(os.path.join(RESULTSDIR, results_fpath)):\n os.makedirs(os.path.join(RESULTSDIR, results_fpath))\n print(f\"Saving results to {RESULTSDIR}/{results_fpath}/...\")\n print(\"\\t filename format: {vae_fname}_{algorithm}_{knn}_{M}_{gamma}_{seed}_{num_per_class}_{iters}_{by_class}/\")\n print()\n\n\n # Define dataset name and vae \"metric\" identifier as well as training set indicies\n dataset, metric = args.vae_fname.split(\"_\")\n train_idx_all = np.where(train_mask)[0]\n\n try:\n knn_data = gl.weightmatrix.load_knn_data(dataset,metric=metric)\n except:\n X = utils.encodeMSTAR(model_fpath, use_phase=True)\n knn_data = gl.weightmatrix.knnsearch(X,50,similarity='angular',dataset=dataset,metric=metric)\n\n #Build weight matrix\n W = gl.weightmatrix.knn(None,args.knn,knn_data=knn_data)\n N = W.shape[0]\n\n # Calculate (or load in previously computed) eigenvalues and eigenvectors of\n eig_fpath = os.path.join(EIGDIR, f\"{args.vae_fname}_{args.knn}_{args.M}.npz\")\n if not os.path.exists(eig_fpath):\n # Calculate eigenvalues and eigenvectors of unnormalized graph Laplacian if not previously calculated\n print(\"Calculating Eigenvalues/Eigenvectors...\")\n L = sps.csgraph.laplacian(W, normed=False)\n evals, evecs = sparse.linalg.eigsh(L, k=args.M+1, which='SM')\n evals, evecs = evals.real, evecs.real\n evals, evecs = evals[1:], evecs[:,1:] # we will ignore the first eigenvalue/vector\n\n\n\n # Also compute normalized graph laplacian eigenvectors for use in some GraphLearning graph_ssl functions (e.g. \"mbo\")\n n = W.shape[0]\n G = gl.graph(W)\n deg = G.degree_vector()\n m = np.sum(deg)/2\n gamma = 0\n Lnorm = G.laplacian(normalization=\"normalized\")\n def Mnorm(v):\n v = v.flatten()\n return (Lnorm*v).flatten() + (gamma/m)*(deg.T@v)*deg\n Anorm = sparse.linalg.LinearOperator((n,n), matvec=Mnorm)\n vals_norm, vecs_norm = sparse.linalg.eigs(Anorm,k=300,which='SM')\n vals_norm = vals_norm.real; vecs_norm = vecs_norm.real\n\n print(f\"\\tSaved to {eig_fpath}\")\n np.savez(eig_fpath, evals=evals, evecs=evecs, vals_norm=vals_norm, vecs_norm=vecs_norm)\n else:\n print(f\"Found saved eigendata at {eig_fpath}\")\n eigdata = np.load(eig_fpath)\n evals, evecs, vals_norm, vecs_norm = eigdata[\"evals\"], eigdata[\"evecs\"], eigdata[\"vals_norm\"], eigdata[\"vecs_norm\"]\n\n\n print()\n print(\"-\"*30)\n print(\"\\tActive Learning Tests\")\n print(\"-\"*30)\n\n results_df = pd.DataFrame([]) # instantiate pandas dataframe for recording results\n\n for acq in METHODS:\n print(f\"Acquisition Function = {acq.upper()}\")\n\n # Select initial training set -- Should be same for each method\n train_ind = np.array([], dtype=np.int16)\n for c in np.sort(np.unique(labels)):\n c_ind = np.intersect1d(np.where(labels == c)[0], train_idx_all) # ensure the chosen points are in the correct subset of the dataset\n rng = np.random.default_rng(args.seed) # for reproducibility\n train_ind = np.append(train_ind, rng.choice(c_ind, args.num_per_class, replace=False))\n\n\n # Save initially labeled set\n if not os.path.exists(os.path.join(RESULTSDIR, results_fpath, \"init_labeled.npy\")):\n np.save(os.path.join(RESULTSDIR, results_fpath, \"init_labeled.npy\"), train_ind)\n\n\n # Run Active Learning Test for this current acqusition function\n train_ind, accuracy = active_learning_loop(W, evals, evecs, train_ind, labels, args.iters, acq, train_idx_all=train_idx_all, \\\n test_mask=test_mask, gamma=args.gamma, by_class=args.by_class, algorithm=args.algorithm, vals_norm=vals_norm, vecs_norm=vecs_norm)\n\n results_df[acq+\"_choices\"] = np.concatenate(([-1], train_ind[-args.iters:]))\n results_df[acq+\"_acc\"] = accuracy\n\n print(\"\\n\")\n\n results_df.to_csv(os.path.join(RESULTSDIR, results_fpath, \"results.csv\"))\n print(f\"Results saved in directory {os.path.join(RESULTSDIR, results_fpath)}/\")\n\n # Creates t-SNE visualizations of dataset, train/test split, and queried active learning points\n if args.tsne:\n from sklearn.manifold import TSNE\n tsne_data_path = os.path.join(\"..\", \"results\", f\"tsne_{args.vae_fname}.npy\")\n if not os.path.exists(tsne_data_path):\n X = utils.encodeMSTAR(model_fpath, use_phase=True)\n tsne_embedded_data = TSNE(n_components=2, init='pca', learning_rate='auto').fit_transform(X)\n np.save(tsne_data_path, tsne_embedded_data)\n print(f\"tSNE embedding data saved to {tsne_data_path}\")\n else:\n print(f\"Found saved t-SNE embedding at {tsne_data_path}\")\n tsne_embedded_data = np.load(tsne_data_path)\n\n # Plot the t-SNE embedding of MSTAR, if not already exist\n if not os.path.exists(os.path.join(\"..\", \"results\", f\"tsne_{args.vae_fname}.png\")):\n plt.figure()\n plt.scatter(tsne_embedded_data[:,0], tsne_embedded_data[:,1], c=labels, s=.5)\n plt.title(\"t-SNE Embedding of MSTAR Data\")\n plt.savefig(os.path.join(\"..\", \"results\", f\"tsne_{args.vae_fname}.png\"))\n\n # Visualize the train/test split with the t-SNE Embedding\n plt.figure()\n plt.scatter(tsne_embedded_data[train_idx_all,0], tsne_embedded_data[train_idx_all,1], c = 'blue', label = \"Train points\", s=.5)\n plt.scatter(tsne_embedded_data[test_mask,0], tsne_embedded_data[test_mask,1], c = 'red', label = \"Test points\", s=.5)\n plt.title(\"t-SNE Embedding of Train Test Split\")\n plt.legend()\n plt.savefig(os.path.join(\"..\", \"results\", f\"tsne_{args.vae_fname}_train_test.png\"))\n\n\n # Visualize the points queried by each active learning method for this test\n for method in METHODS:\n plt.figure()\n indexes_queried = results_df[method + \"_choices\"]\n plt.scatter(tsne_embedded_data[:,0], tsne_embedded_data[:,1], c = labels, s=.5)\n plt.scatter(tsne_embedded_data[indexes_queried, 0], tsne_embedded_data[indexes_queried, 1], c = 'red', marker = '*', label = \"Active learning points\")\n plt.title(f\"Query Points from {method}\")\n plt.legend()\n plt.savefig(os.path.join(RESULTSDIR, results_fpath, \"tsne_\" + method + \"_query_points.png\"))\n\n\n # Plots the accuracies of the tested active learning methods and saves to results folder\n if args.plot:\n\n plt.figure()\n\n if args.by_class:\n num_classes = np.unique(labels).size\n x = np.arange(args.num_per_class, args.num_per_class + args.iters*num_classes + 1, num_classes)\n else:\n x = np.arange(args.num_per_class, args.num_per_class + args.iters + 1)\n\n # General plot settings\n legend_fontsize = 12\n label_fontsize = 16\n fontsize = 16\n # matplotlib.rcParams.update({'font.size': fontsize})\n styles = ['^b-','or-','dg-','pm-','xc-','sk-', '*y-']\n\n skip = 2\n\n for i, method in enumerate(METHODS):\n plt.plot(x[::skip], 100*results_df[method + \"_acc\"][::skip], styles[i], label = method + \" accuracy\")\n\n plt.xlabel(\"Number of Labeled Points\")\n plt.ylabel(\"Accuracy %\")\n plt.title(f\"Active Learning with {args.vae_fname} Representations\")\n plt.legend()\n plt.grid(True)\n plt.tight_layout()\n\n text = \"iters = \" + str(args.iters) + \", num_per_class = \" + str(args.num_per_class) + \", knn = \" + str(args.knn) + \", gamma = \" + str(args.gamma) + \", M (num evals) = \" + str(args.M) + \", algorithm = \" + str(args.algorithm) + \", seed = \" + str(args.seed)\n #plt.figtext(.5, .99, text, wrap= True, horizontalalignment = 'center', fontsize=6) #puts description at top of plot of which parameters were used to help with reproducibility\n\n\n plt.savefig(os.path.join(RESULTSDIR, results_fpath, \"results.png\"))\n plt.close()\n" ]
[ [ "numpy.load", "scipy.sparse.linalg.LinearOperator", "numpy.where", "numpy.unique", "numpy.concatenate", "pandas.DataFrame", "numpy.save", "numpy.arange", "matplotlib.pyplot.tight_layout", "numpy.array", "matplotlib.pyplot.title", "matplotlib.pyplot.close", "sklearn.manifold.TSNE", "matplotlib.pyplot.figure", "matplotlib.pyplot.xlabel", "matplotlib.pyplot.grid", "numpy.sum", "matplotlib.pyplot.legend", "numpy.random.default_rng", "matplotlib.pyplot.plot", "scipy.sparse.linalg.eigs", "numpy.savez", "matplotlib.pyplot.ylabel", "matplotlib.pyplot.scatter", "scipy.sparse.linalg.eigsh", "scipy.sparse.csgraph.laplacian" ] ]
damien911224/maxl
[ "6ac4ee712f0b68bb1f8ea2e3602f75085c08fd25" ]
[ "model_vgg_maxl.py" ]
[ "from collections import OrderedDict\nfrom create_dataset import *\n\nimport numpy as np\n\nimport torch\nimport torch.nn as nn\nimport torchvision.transforms as transforms\nimport torch.optim as optim\nimport torch.nn.functional as F\nimport torch.utils.data.sampler as sampler\n\nclass LabelGenerator(nn.Module):\n def __init__(self, psi):\n super(LabelGenerator, self).__init__()\n \"\"\"\n label-generation network:\n takes the input and generates auxiliary labels with masked softmax for an auxiliary task.\n \"\"\"\n filter = [64, 128, 256, 512, 512]\n self.class_nb = psi\n\n # define convolution block in VGG-16\n self.block1 = self.conv_layer(3, filter[0], 1)\n self.block2 = self.conv_layer(filter[0], filter[1], 2)\n self.block3 = self.conv_layer(filter[1], filter[2], 3)\n self.block4 = self.conv_layer(filter[2], filter[3], 4)\n self.block5 = self.conv_layer(filter[3], filter[4], 5)\n\n # define fc-layers in VGG-16 (output auxiliary classes \\sum_i\\psi[i])\n self.classifier = nn.Sequential(\n nn.Linear(filter[-1], filter[-1]),\n nn.ReLU(inplace=True),\n nn.Linear(filter[-1], int(np.sum(self.class_nb))),\n )\n\n # apply weight initialisation\n for m in self.modules():\n if isinstance(m, nn.Conv2d):\n nn.init.xavier_normal_(m.weight)\n nn.init.constant_(m.bias, 0)\n elif isinstance(m, nn.BatchNorm2d):\n nn.init.constant_(m.weight, 1)\n nn.init.constant_(m.bias, 0)\n elif isinstance(m, nn.Linear):\n nn.init.xavier_normal_(m.weight)\n nn.init.constant_(m.bias, 0)\n\n def conv_layer(self, in_channel, out_channel, index):\n if index < 3:\n conv_block = nn.Sequential(\n nn.Conv2d(in_channels=in_channel, out_channels=out_channel, kernel_size=3, padding=1),\n nn.BatchNorm2d(out_channel),\n nn.ReLU(inplace=True),\n nn.Conv2d(in_channels=out_channel, out_channels=out_channel, kernel_size=3, padding=1),\n nn.BatchNorm2d(out_channel),\n nn.ReLU(inplace=True),\n nn.MaxPool2d(kernel_size=2, stride=2),\n )\n else:\n conv_block = nn.Sequential(\n nn.Conv2d(in_channels=in_channel, out_channels=out_channel, kernel_size=3, padding=1),\n nn.BatchNorm2d(out_channel),\n nn.ReLU(inplace=True),\n nn.Conv2d(in_channels=out_channel, out_channels=out_channel, kernel_size=3, padding=1),\n nn.BatchNorm2d(out_channel),\n nn.ReLU(inplace=True),\n nn.Conv2d(in_channels=out_channel, out_channels=out_channel, kernel_size=3, padding=1),\n nn.BatchNorm2d(out_channel),\n nn.ReLU(inplace=True),\n nn.MaxPool2d(kernel_size=2, stride=2),\n )\n return conv_block\n\n # define masked softmax\n def mask_softmax(self, x, mask, dim=1):\n logits = torch.exp(x) * mask / torch.sum(torch.exp(x) * mask, dim=dim, keepdim=True)\n return logits\n\n def forward(self, x, y):\n g_block1 = self.block1(x)\n g_block2 = self.block2(g_block1)\n g_block3 = self.block3(g_block2)\n g_block4 = self.block4(g_block3)\n g_block5 = self.block5(g_block4)\n\n # build a binary mask by psi, we add epsilon=1e-8 to avoid nans\n index = torch.zeros([len(self.class_nb), np.sum(self.class_nb)]) + 1e-8\n for i in range(len(self.class_nb)):\n index[i, int(np.sum(self.class_nb[:i])):np.sum(self.class_nb[:i+1])] = 1\n mask = index[y].to(device)\n\n predict = self.classifier(g_block5.view(g_block5.size(0), -1))\n label_pred = self.mask_softmax(predict, mask, dim=1)\n\n return label_pred\n\n\nclass VGG16(nn.Module):\n def __init__(self, psi):\n super(VGG16, self).__init__()\n \"\"\"\n multi-task network:\n takes the input and predicts primary and auxiliary labels (same network structure as in human)\n \"\"\"\n filter = [64, 128, 256, 512, 512]\n\n # define convolution block in VGG-16\n self.block1 = self.conv_layer(3, filter[0], 1)\n self.block2 = self.conv_layer(filter[0], filter[1], 2)\n self.block3 = self.conv_layer(filter[1], filter[2], 3)\n self.block4 = self.conv_layer(filter[2], filter[3], 4)\n self.block5 = self.conv_layer(filter[3], filter[4], 5)\n\n # primary task prediction\n self.classifier1 = nn.Sequential(\n nn.Linear(filter[-1], filter[-1]),\n nn.ReLU(inplace=True),\n nn.Linear(filter[-1], len(psi)),\n nn.Softmax(dim=1)\n )\n\n # auxiliary task prediction\n self.classifier2 = nn.Sequential(\n nn.Linear(filter[-1], filter[-1]),\n nn.ReLU(inplace=True),\n nn.Linear(filter[-1], int(np.sum(psi))),\n nn.Softmax(dim=1)\n )\n\n # apply weight initialisation\n for m in self.modules():\n if isinstance(m, nn.Conv2d):\n nn.init.xavier_uniform_(m.weight)\n nn.init.constant_(m.bias, 0)\n elif isinstance(m, nn.BatchNorm2d):\n nn.init.constant_(m.weight, 1)\n nn.init.constant_(m.bias, 0)\n elif isinstance(m, nn.Linear):\n nn.init.xavier_uniform_(m.weight)\n nn.init.constant_(m.bias, 0)\n\n def conv_layer(self, in_channel, out_channel, index):\n if index < 3:\n conv_block = nn.Sequential(\n nn.Conv2d(in_channels=in_channel, out_channels=out_channel, kernel_size=3, padding=1),\n nn.BatchNorm2d(out_channel),\n nn.ReLU(inplace=True),\n nn.Conv2d(in_channels=out_channel, out_channels=out_channel, kernel_size=3, padding=1),\n nn.BatchNorm2d(out_channel),\n nn.ReLU(inplace=True),\n nn.MaxPool2d(kernel_size=2, stride=2),\n )\n else:\n conv_block = nn.Sequential(\n nn.Conv2d(in_channels=in_channel, out_channels=out_channel, kernel_size=3, padding=1),\n nn.BatchNorm2d(out_channel),\n nn.ReLU(inplace=True),\n nn.Conv2d(in_channels=out_channel, out_channels=out_channel, kernel_size=3, padding=1),\n nn.BatchNorm2d(out_channel),\n nn.ReLU(inplace=True),\n nn.Conv2d(in_channels=out_channel, out_channels=out_channel, kernel_size=3, padding=1),\n nn.BatchNorm2d(out_channel),\n nn.ReLU(inplace=True),\n nn.MaxPool2d(kernel_size=2, stride=2),\n )\n return conv_block\n\n # define forward conv-layer (will be used in second-derivative step)\n def conv_layer_ff(self, input, weights, index):\n if index < 3:\n net = F.conv2d(input, weights['block{:d}.0.weight'.format(index)], weights['block{:d}.0.bias'.format(index)], padding=1)\n net = F.batch_norm(net, torch.zeros(net.data.size()[1]).to(device), torch.ones(net.data.size()[1]).to(device),\n weights['block{:d}.1.weight'.format(index)], weights['block{:d}.1.bias'.format(index)],\n training=True)\n net = F.relu(net, inplace=True)\n net = F.conv2d(net, weights['block{:d}.3.weight'.format(index)], weights['block{:d}.3.bias'.format(index)], padding=1)\n net = F.batch_norm(net, torch.zeros(net.data.size()[1]).to(device), torch.ones(net.data.size()[1]).to(device),\n weights['block{:d}.4.weight'.format(index)], weights['block{:d}.4.bias'.format(index)],\n training=True)\n net = F.relu(net, inplace=True)\n net = F.max_pool2d(net, kernel_size=2, stride=2, )\n else:\n net = F.conv2d(input, weights['block{:d}.0.weight'.format(index)], weights['block{:d}.0.bias'.format(index)], padding=1)\n net = F.batch_norm(net, torch.zeros(net.data.size()[1]).to(device), torch.ones(net.data.size()[1]).to(device),\n weights['block{:d}.1.weight'.format(index)], weights['block{:d}.1.bias'.format(index)],\n training=True)\n net = F.relu(net, inplace=True)\n net = F.conv2d(net, weights['block{:d}.3.weight'.format(index)], weights['block{:d}.3.bias'.format(index)], padding=1)\n net = F.batch_norm(net, torch.zeros(net.data.size()[1]).to(device), torch.ones(net.data.size()[1]).to(device),\n weights['block{:d}.4.weight'.format(index)], weights['block{:d}.4.bias'.format(index)],\n training=True)\n net = F.relu(net, inplace=True)\n net = F.conv2d(net, weights['block{:d}.6.weight'.format(index)], weights['block{:d}.6.bias'.format(index)], padding=1)\n net = F.batch_norm(net, torch.zeros(net.data.size()[1]).to(device), torch.ones(net.data.size()[1]).to(device),\n weights['block{:d}.7.weight'.format(index)], weights['block{:d}.7.bias'.format(index)],\n training=True)\n net = F.relu(net, inplace=True)\n net = F.max_pool2d(net, kernel_size=2, stride=2)\n return net\n\n # define forward fc-layer (will be used in second-derivative step)\n def dense_layer_ff(self, input, weights, index):\n net = F.linear(input, weights['classifier{:d}.0.weight'.format(index)], weights['classifier{:d}.0.bias'.format(index)])\n net = F.relu(net, inplace=True)\n net = F.linear(net, weights['classifier{:d}.2.weight'.format(index)], weights['classifier{:d}.2.bias'.format(index)])\n net = F.softmax(net, dim=1)\n return net\n\n def forward(self, x, weights=None):\n \"\"\"\n if no weights given, use the direct training strategy and update network paramters\n else retain the computational graph which will be used in second-derivative step\n \"\"\"\n if weights is None:\n g_block1 = self.block1(x)\n g_block2 = self.block2(g_block1)\n g_block3 = self.block3(g_block2)\n g_block4 = self.block4(g_block3)\n g_block5 = self.block5(g_block4)\n\n t1_pred = self.classifier1(g_block5.view(g_block5.size(0), -1))\n t2_pred = self.classifier2(g_block5.view(g_block5.size(0), -1))\n\n else:\n g_block1 = self.conv_layer_ff(x, weights, 1)\n g_block2 = self.conv_layer_ff(g_block1, weights, 2)\n g_block3 = self.conv_layer_ff(g_block2, weights, 3)\n g_block4 = self.conv_layer_ff(g_block3, weights, 4)\n g_block5 = self.conv_layer_ff(g_block4, weights, 5)\n\n t1_pred = self.dense_layer_ff(g_block5.view(g_block5.size(0), -1), weights, 1)\n t2_pred = self.dense_layer_ff(g_block5.view(g_block5.size(0), -1), weights, 2)\n\n return t1_pred, t2_pred\n\n def model_fit(self, x_pred, x_output, pri=True, num_output=3):\n if not pri:\n # generated auxiliary label is a soft-assignment vector (no need to change into one-hot vector)\n x_output_onehot = x_output\n else:\n # convert a single label into a one-hot vector\n x_output_onehot = torch.zeros((len(x_output), num_output)).to(device)\n x_output_onehot.scatter_(1, x_output.unsqueeze(1), 1)\n\n # apply focal loss\n loss = x_output_onehot * (1 - x_pred)**2 * torch.log(x_pred + 1e-20)\n return torch.sum(-loss, dim=1)\n\n def model_entropy(self, x_pred1):\n # compute entropy loss\n x_pred1 = torch.mean(x_pred1, dim=0)\n loss1 = x_pred1 * torch.log(x_pred1 + 1e-20)\n return torch.sum(loss1)\n\n\n# load CIFAR100 dataset\ntrans_train = transforms.Compose([\n transforms.RandomCrop(32, padding=4),\n transforms.RandomHorizontalFlip(),\n transforms.ToTensor(),\n transforms.Normalize((0.5, 0.5, 0.5), (0.2, 0.2, 0.2)),\n\n])\ntrans_test = transforms.Compose([\n transforms.ToTensor(),\n transforms.Normalize((0.5, 0.5, 0.5), (0.2, 0.2, 0.2)),\n\n])\n\n# load CIFAR-100 dataset with batch-size 100\n# set keyword download=True at the first time to download the dataset\ncifar100_train_set = CIFAR100(root='dataset', train=True, transform=trans_train, download=False)\ncifar100_test_set = CIFAR100(root='dataset', train=False, transform=trans_test, download=False)\n\nbatch_size = 100\nkwargs = {'num_workers': 1, 'pin_memory': True}\ncifar100_train_loader = torch.utils.data.DataLoader(\n dataset=cifar100_train_set,\n batch_size=batch_size,\n shuffle=True)\n\ncifar100_test_loader = torch.utils.data.DataLoader(\n dataset=cifar100_test_set,\n batch_size=batch_size,\n shuffle=True)\n\n# define label-generation model,\n# and optimiser with learning rate 1e-3, drop half for every 50 epochs, weight_decay=5e-4,\npsi = [5]*20 # for each primary class split into 5 auxiliary classes, with total 100 auxiliary classes\ndevice = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\nLabelGenerator = LabelGenerator(psi=psi).to(device)\ngen_optimizer = optim.SGD(LabelGenerator.parameters(), lr=1e-3, weight_decay=5e-4)\ngen_scheduler = optim.lr_scheduler.StepLR(gen_optimizer, step_size=50, gamma=0.5)\n\n# define parameters\ntotal_epoch = 200\ntrain_batch = len(cifar100_train_loader)\ntest_batch = len(cifar100_test_loader)\n\n# define multi-task network, and optimiser with learning rate 0.01, drop half for every 50 epochs\nVGG16_model = VGG16(psi=psi).to(device)\noptimizer = optim.SGD(VGG16_model.parameters(), lr=0.01)\nscheduler = optim.lr_scheduler.StepLR(optimizer, step_size=50, gamma=0.5)\navg_cost = np.zeros([total_epoch, 9], dtype=np.float32)\nvgg_lr = 0.01 # define learning rate for second-derivative step (theta_1^+)\nk = 0\nfor index in range(total_epoch):\n cost = np.zeros(4, dtype=np.float32)\n\n # drop the learning rate with the same strategy in the multi-task network\n # note: not necessary to be consistent with the multi-task network's parameter,\n # it can also be learned directly from the network\n if (index + 1) % 50 == 0:\n vgg_lr = vgg_lr * 0.5\n\n scheduler.step()\n gen_scheduler.step()\n\n # evaluate training data (training-step, update on theta_1)\n VGG16_model.train()\n cifar100_train_dataset = iter(cifar100_train_loader)\n for i in range(train_batch):\n train_data, train_label = cifar100_train_dataset.next()\n train_label = train_label.type(torch.LongTensor)\n train_data, train_label = train_data.to(device), train_label.to(device)\n train_pred1, train_pred2 = VGG16_model(train_data)\n train_pred3 = LabelGenerator(train_data, train_label[:, 2]) # generate auxiliary labels\n\n # reset optimizers with zero gradient\n optimizer.zero_grad()\n gen_optimizer.zero_grad()\n\n # choose level 2/3 hierarchy, 20-class (gt) / 100-class classification (generated by labelgeneartor)\n train_loss1 = VGG16_model.model_fit(train_pred1, train_label[:, 2], pri=True, num_output=20)\n train_loss2 = VGG16_model.model_fit(train_pred2, train_pred3, pri=False, num_output=100)\n train_loss3 = VGG16_model.model_entropy(train_pred3)\n\n # compute cosine similarity between gradients from primary and auxiliary loss\n grads1 = torch.autograd.grad(torch.mean(train_loss1), VGG16_model.parameters(), retain_graph=True, allow_unused=True)\n grads2 = torch.autograd.grad(torch.mean(train_loss2), VGG16_model.parameters(), retain_graph=True, allow_unused=True)\n cos_mean = 0\n for k in range(len(grads1) - 8): # only compute on shared representation (ignore task-specific fc-layers)\n cos_mean += torch.mean(F.cosine_similarity(grads1[k], grads2[k], dim=0)) / (len(grads1) - 8)\n # cosine similarity evaluation ends here\n\n train_loss = torch.mean(train_loss1) + torch.mean(train_loss2)\n train_loss.backward()\n\n optimizer.step()\n\n train_predict_label1 = train_pred1.data.max(1)[1]\n train_acc1 = train_predict_label1.eq(train_label[:, 2]).sum().item() / batch_size\n\n cost[0] = torch.mean(train_loss1).item()\n cost[1] = train_acc1\n cost[2] = cos_mean\n k = k + 1\n avg_cost[index][0:3] += cost[0:3] / train_batch\n\n # evaluating training data (meta-training step, update on theta_2)\n cifar100_train_dataset = iter(cifar100_train_loader)\n for i in range(train_batch):\n train_data, train_label = cifar100_train_dataset.next()\n train_label = train_label.type(torch.LongTensor)\n train_data, train_label = train_data.to(device), train_label.to(device)\n train_pred1, train_pred2 = VGG16_model(train_data)\n train_pred3 = LabelGenerator(train_data, train_label[:, 2])\n\n # reset optimizer with zero gradient\n optimizer.zero_grad()\n gen_optimizer.zero_grad()\n\n # choose level 2/3 hierarchy, 20-class/100-class classification\n train_loss1 = VGG16_model.model_fit(train_pred1, train_label[:, 2], pri=True, num_output=20)\n train_loss2 = VGG16_model.model_fit(train_pred2, train_pred3, pri=False, num_output=100)\n train_loss3 = VGG16_model.model_entropy(train_pred3)\n\n # multi-task loss\n train_loss = torch.mean(train_loss1) + torch.mean(train_loss2)\n\n # current accuracy on primary task\n train_predict_label1 = train_pred1.data.max(1)[1]\n train_acc1 = train_predict_label1.eq(train_label[:, 2]).sum().item() / batch_size\n cost[0] = torch.mean(train_loss1).item()\n cost[1] = train_acc1\n\n # current theta_1\n fast_weights = OrderedDict((name, param) for (name, param) in VGG16_model.named_parameters())\n\n # create_graph flag for computing second-derivative\n grads = torch.autograd.grad(train_loss, VGG16_model.parameters(), create_graph=True)\n data = [p.data for p in list(VGG16_model.parameters())]\n\n # compute theta_1^+ by applying sgd on multi-task loss\n fast_weights = OrderedDict((name, param - vgg_lr * grad) for ((name, param), grad, data) in zip(fast_weights.items(), grads, data))\n\n # compute primary loss with the updated thetat_1^+\n train_pred1, train_pred2 = VGG16_model.forward(train_data, fast_weights)\n train_loss1 = VGG16_model.model_fit(train_pred1, train_label[:, 2], pri=True, num_output=20)\n\n # update theta_2 with primary loss + entropy loss\n (torch.mean(train_loss1) + 0.2*torch.mean(train_loss3)).backward()\n gen_optimizer.step()\n\n train_predict_label1 = train_pred1.data.max(1)[1]\n train_acc1 = train_predict_label1.eq(train_label[:, 2]).sum().item() / batch_size\n\n # accuracy on primary task after one update\n cost[2] = torch.mean(train_loss1).item()\n cost[3] = train_acc1\n avg_cost[index][3:7] += cost[0:4] / train_batch\n\n # evaluate on test data\n VGG16_model.eval()\n with torch.no_grad():\n cifar100_test_dataset = iter(cifar100_test_loader)\n for i in range(test_batch):\n test_data, test_label = cifar100_test_dataset.next()\n test_label = test_label.type(torch.LongTensor)\n test_data, test_label = test_data.to(device), test_label.to(device)\n test_pred1, test_pred2 = VGG16_model(test_data)\n\n test_loss1 = VGG16_model.model_fit(test_pred1, test_label[:, 2], pri=True, num_output=20)\n\n test_predict_label1 = test_pred1.data.max(1)[1]\n test_acc1 = test_predict_label1.eq(test_label[:, 2]).sum().item() / batch_size\n\n cost[0] = torch.mean(test_loss1).item()\n cost[1] = test_acc1\n\n avg_cost[index][7:] += cost[0:2] / test_batch\n\n print('EPOCH: {:04d} Iter {:04d} | TRAIN [LOSS|ACC.]: PRI {:.4f} {:.4f} COSSIM {:.4f} || '\n 'META [LOSS|ACC.]: PRE {:.4f} {:.4f} AFTER {:.4f} {:.4f} || TEST: {:.4f} {:.4f}'\n .format(index, k, avg_cost[index][0], avg_cost[index][1], avg_cost[index][2], avg_cost[index][3],\n avg_cost[index][4], avg_cost[index][5], avg_cost[index][6], avg_cost[index][7], avg_cost[index][8]))\n" ]
[ [ "torch.nn.Linear", "torch.optim.lr_scheduler.StepLR", "torch.nn.BatchNorm2d", "torch.cuda.is_available", "torch.exp", "torch.sum", "torch.nn.Softmax", "torch.nn.init.constant_", "torch.nn.MaxPool2d", "torch.utils.data.DataLoader", "torch.nn.functional.relu", "torch.nn.init.xavier_normal_", "numpy.zeros", "torch.nn.ReLU", "torch.nn.Conv2d", "torch.nn.functional.cosine_similarity", "torch.nn.functional.softmax", "torch.log", "numpy.sum", "torch.no_grad", "torch.nn.init.xavier_uniform_", "torch.nn.functional.max_pool2d", "torch.mean" ] ]
Thomas2512/CS224n-NLP
[ "6c8ad402498600cf0ccc9f2dd75074c044f219c8" ]
[ "Assignments/Assignment 2/assignment2/q2_parser_model.py" ]
[ "import pickle\nimport os\nimport time\nimport tensorflow as tf\n\nfrom model import Model\nfrom q2_initialization import xavier_weight_init\nfrom utils.parser_utils import minibatches, load_and_preprocess_data\n\n\nclass Config(object):\n \"\"\"Holds model hyperparams and data information.\n\n The config class is used to store various hyperparameters and dataset\n information parameters. Model objects are passed a Config() object at\n instantiation. They can then call self.config.<hyperparameter_name> to\n get the hyperparameter settings.\n \"\"\"\n n_features = 36\n n_classes = 3\n dropout = 0.5 # (p_drop in the handout)\n embed_size = 50\n hidden_size = 200\n batch_size = 1024\n n_epochs = 10\n # lr = 0.0005\n lr = 0.005\n\n\nclass ParserModel(Model):\n \"\"\"\n Implements a feedforward neural network with an embedding layer and single hidden layer.\n This network will predict which transition should be applied to a given partial parse\n configuration.\n \"\"\"\n\n def add_placeholders(self):\n \"\"\"Generates placeholder variables to represent the input tensors\n\n These placeholders are used as inputs by the rest of the model building and will be fed\n data during training. Note that when \"None\" is in a placeholder's shape, it's flexible\n (so we can use different batch sizes without rebuilding the model).\n\n Adds following nodes to the computational graph\n\n input_placeholder: Input placeholder tensor of shape (None, n_features), type tf.int32\n labels_placeholder: Labels placeholder tensor of shape (None, n_classes), type tf.float32\n dropout_placeholder: Dropout value placeholder (scalar), type tf.float32\n\n Add these placeholders to self as the instance variables\n self.input_placeholder\n self.labels_placeholder\n self.dropout_placeholder\n\n (Don't change the variable names)\n \"\"\"\n ### YOUR CODE HERE\n\n n_features = self.config.n_features\n n_classes = self.config.n_classes\n\n self.input_placeholder = tf.placeholder(tf.int32, shape=(None, n_features))\n self.labels_placeholder = tf.placeholder(tf.float32, shape=(None, n_classes))\n self.dropout_placeholder = tf.placeholder(tf.float32, shape=[])\n\n ### END YOUR CODE\n\n def create_feed_dict(self, inputs_batch, labels_batch=None, dropout=0):\n \"\"\"Creates the feed_dict for the dependency parser.\n\n A feed_dict takes the form of:\n\n feed_dict = {\n <placeholder>: <tensor of values to be passed for placeholder>,\n ....\n }\n\n\n Hint: The keys for the feed_dict should be a subset of the placeholder\n tensors created in add_placeholders.\n Hint: When an argument is None, don't add it to the feed_dict.\n\n Args:\n inputs_batch: A batch of input data.\n labels_batch: A batch of label data.\n dropout: The dropout rate.\n Returns:\n feed_dict: The feed dictionary mapping from placeholders to values.\n \"\"\"\n ### YOUR CODE HERE\n\n feed_dict = {}\n\n if inputs_batch is not None:\n feed_dict[self.input_placeholder] = inputs_batch\n if labels_batch is not None:\n feed_dict[self.labels_placeholder] = labels_batch\n if dropout is not None:\n feed_dict[self.dropout_placeholder] = dropout\n\n ### END YOUR CODE\n return feed_dict\n\n def add_embedding(self):\n \"\"\"Adds an embedding layer that maps from input tokens (integers) to vectors and then\n concatenates those vectors:\n - Creates a tf.Variable and initializes it with self.pretrained_embeddings.\n - Uses the input_placeholder to index into the embeddings tensor, resulting in a\n tensor of shape (None, n_features, embedding_size).\n - Concatenates the embeddings by reshaping the embeddings tensor to shape\n (None, n_features * embedding_size).\n\n Hint: You might find tf.nn.embedding_lookup useful.\n Hint: You can use tf.reshape to concatenate the vectors. See following link to understand\n what -1 in a shape means.\n https://www.tensorflow.org/api_docs/python/tf/reshape\n\n Returns:\n embeddings: tf.Tensor of shape (None, n_features*embed_size)\n \"\"\"\n ### YOUR CODE HERE\n\n n_features = self.config.n_features\n # print(self.pretrained_embeddings.shape[1])\n # embedding_size = self.pretrained_embeddings.shape[1]\n embed_size = self.config.embed_size\n\n pretrained_embeddings = tf.Variable(self.pretrained_embeddings)\n embeddings = tf.nn.embedding_lookup(pretrained_embeddings, self.input_placeholder)\n embeddings = tf.reshape(embeddings, [-1, n_features * embed_size])\n\n ### END YOUR CODE\n return embeddings\n\n def add_prediction_op(self):\n \"\"\"Adds the 1-hidden-layer NN:\n h = Relu(xW + b1)\n h_drop = Dropout(h, dropout_rate)\n pred = h_dropU + b2\n\n Note that we are not applying a softmax to pred. The softmax will instead be done in\n the add_loss_op function, which improves efficiency because we can use\n tf.nn.softmax_cross_entropy_with_logits\n\n Use the initializer from q2_initialization.py to initialize W and U (you can initialize b1\n and b2 with zeros)\n\n Hint: Note that tf.nn.dropout takes the keep probability (1 - p_drop) as an argument.\n Therefore the keep probability should be set to the value of\n (1 - self.dropout_placeholder)\n\n Returns:\n pred: tf.Tensor of shape (batch_size, n_classes)\n \"\"\"\n\n x = self.add_embedding()\n ### YOUR CODE HERE\n\n n_classes = self.config.n_classes\n n_features = self.config.n_features\n hidden_size = self.config.hidden_size\n embed_size = self.config.embed_size\n\n b1 = tf.Variable(tf.zeros([hidden_size]))\n print(\"B1: \", b1.shape)\n print(b1)\n b2 = tf.Variable(tf.zeros([1, n_classes]))\n print(\"B2: \", b2.shape)\n print(b2)\n\n xavier_initializer = xavier_weight_init()\n W = tf.Variable(xavier_initializer([n_features * embed_size, hidden_size]))\n print(\"x: \", x.shape)\n print(\"W: \", W.shape)\n print(\"xW: \", tf.linalg.matmul(x, W).shape)\n U = tf.Variable(xavier_initializer([hidden_size, n_classes]))\n\n h = tf.nn.relu_layer(x, weights=W, biases=b1)\n h_drop = tf.nn.dropout(h, rate=1 - self.dropout_placeholder)\n pred = tf.linalg.matmul(h_drop, U) + b2\n print(\"n_classes: \", n_classes)\n print(\"pred: \", pred)\n\n ### END YOUR CODE\n return pred\n\n def add_loss_op(self, pred):\n \"\"\"Adds Ops for the loss function to the computational graph.\n In this case we are using cross entropy loss.\n The loss should be averaged over all examples in the current minibatch.\n\n Hint: You can use tf.nn.softmax_cross_entropy_with_logits to simplify your\n implementation. You might find tf.reduce_mean useful.\n Args:\n pred: A tensor of shape (batch_size, n_classes) containing the output of the neural\n network before the softmax layer.\n Returns:\n loss: A 0-d tensor (scalar)\n \"\"\"\n ### YOUR CODE HERE\n\n probs = tf.nn.softmax_cross_entropy_with_logits_v2(logits=pred, labels=self.labels_placeholder)\n loss = tf.reduce_mean(probs)\n\n ### END YOUR CODE\n return loss\n\n def add_training_op(self, loss):\n \"\"\"Sets up the training Ops.\n\n Creates an optimizer and applies the gradients to all trainable variables.\n The Op returned by this function is what must be passed to the\n `sess.run()` call to cause the model to train. See\n\n https://www.tensorflow.org/api_docs/python/tf/train/Optimizer\n\n for more information.\n\n Use tf.train.AdamOptimizer for this model.\n Use the learning rate from self.config.\n Calling optimizer.minimize() will return a train_op object.\n\n Args:\n loss: Loss tensor, from cross_entropy_loss.\n Returns:\n train_op: The Op for training.\n \"\"\"\n ### YOUR CODE HERE\n\n learning_rate = self.config.lr\n\n optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate)\n train_op = optimizer.minimize(loss=loss)\n\n ### END YOUR CODE\n return train_op\n\n def train_on_batch(self, sess, inputs_batch, labels_batch):\n feed = self.create_feed_dict(inputs_batch, labels_batch=labels_batch,\n dropout=self.config.dropout)\n _, loss = sess.run([self.train_op, self.loss], feed_dict=feed)\n return loss\n\n def run_epoch(self, sess, parser, train_examples, dev_set):\n n_minibatches = 1 + len(train_examples) / self.config.batch_size\n prog = tf.keras.utils.Progbar(target=n_minibatches)\n for i, (train_x, train_y) in enumerate(minibatches(train_examples, self.config.batch_size)):\n loss = self.train_on_batch(sess, train_x, train_y)\n prog.update(i + 1, [(\"train loss\", loss)]\n # , force=i + 1 == n_minibatches\n )\n print()\n\n print(\"Evaluating on dev set\", )\n dev_UAS, _ = parser.parse(dev_set)\n print(\"dev_UAS: \", dev_UAS)\n print(\"- dev UAS: {:.2f}\".format(dev_UAS * 100.0))\n return dev_UAS\n\n def fit(self, sess, saver, parser, train_examples, dev_set):\n best_dev_UAS = 0\n for epoch in range(self.config.n_epochs):\n print(\"Epoch {:} out of {:}\".format(epoch + 1, self.config.n_epochs))\n dev_UAS = self.run_epoch(sess, parser, train_examples, dev_set)\n if dev_UAS > best_dev_UAS:\n best_dev_UAS = dev_UAS\n if saver:\n print(\"New best dev UAS! Saving model in ./data/weights/parser.weights\")\n saver.save(sess, './data/weights/parser.weights')\n print()\n\n def __init__(self, config, pretrained_embeddings):\n self.pretrained_embeddings = pretrained_embeddings\n self.config = config\n self.build()\n\n\ndef main(debug=True):\n print(80 * \"=\")\n print(\"INITIALIZING\")\n print(80 * \"=\")\n config = Config()\n parser, embeddings, train_examples, dev_set, test_set = load_and_preprocess_data(debug)\n if not os.path.exists('./data/weights/'):\n os.makedirs('./data/weights/')\n\n with tf.Graph().as_default() as graph:\n print(\"Building model...\", )\n start = time.time()\n model = ParserModel(config, embeddings)\n parser.model = model\n init_op = tf.global_variables_initializer()\n saver = None if debug else tf.train.Saver()\n print(\"took {:.2f} seconds\\n\".format(time.time() - start))\n graph.finalize()\n\n with tf.Session(graph=graph) as session:\n parser.session = session\n session.run(init_op)\n\n print(80 * \"=\")\n print(\"TRAINING\")\n print(80 * \"=\")\n model.fit(session, saver, parser, train_examples, dev_set)\n\n if not debug:\n print(80 * \"=\")\n print(\"TESTING\")\n print(80 * \"=\")\n print(\"Restoring the best model weights found on the dev set\")\n saver.restore(session, './data/weights/parser.weights')\n print(\"Final evaluation on test set\", )\n UAS, dependencies = parser.parse(test_set)\n print(\"- test UAS: {:.2f}\".format(UAS * 100.0))\n print(\"Writing predictions\")\n with open('q2_test.predicted.pkl', 'w') as f:\n pickle.dump(dependencies, f, -1)\n print(\"Done!\")\n\n\nif __name__ == '__main__':\n main(debug=True)\n" ]
[ [ "tensorflow.zeros", "tensorflow.train.AdamOptimizer", "tensorflow.Graph", "tensorflow.Session", "tensorflow.Variable", "tensorflow.train.Saver", "tensorflow.reshape", "tensorflow.linalg.matmul", "tensorflow.placeholder", "tensorflow.nn.relu_layer", "tensorflow.nn.embedding_lookup", "tensorflow.reduce_mean", "tensorflow.global_variables_initializer", "tensorflow.nn.dropout", "tensorflow.keras.utils.Progbar", "tensorflow.nn.softmax_cross_entropy_with_logits_v2" ] ]
tbienhoff/carla-rl
[ "51960c8ce3b7e90cdd6c3ab5e18721d1969e1b50" ]
[ "client/carla/planner/map.py" ]
[ "# Copyright (c) 2017 Computer Vision Center (CVC) at the Universitat Autonoma de\n# Barcelona (UAB).\n#\n# This work is licensed under the terms of the MIT license.\n# For a copy, see <https://opensource.org/licenses/MIT>.\n\n\"\"\"Class used for operating the city map.\"\"\"\n\nimport math\nimport os\n\ntry:\n import numpy as np\nexcept ImportError:\n raise RuntimeError('cannot import numpy, make sure numpy package is installed')\n\ntry:\n from PIL import Image\nexcept ImportError:\n raise RuntimeError('cannot import PIL, make sure pillow package is installed')\n\nfrom carla.planner.graph import Graph\nfrom carla.planner.graph import sldist\nfrom carla.planner.grid import Grid\nfrom carla.planner.converter import Converter\n\n\ndef color_to_angle(color):\n return (float(color) / 255.0) * 2 * math.pi\n\n\nclass CarlaMap(object):\n\n def __init__(self, city, pixel_density, node_density):\n dir_path = os.path.dirname(__file__)\n city_file = os.path.join(dir_path, city + '.txt')\n\n city_map_file = os.path.join(dir_path, city + '.png')\n city_map_file_lanes = os.path.join(dir_path, city + 'Lanes.png')\n city_map_file_center = os.path.join(dir_path, city + 'Central.png')\n\n # The built graph. This is the exact same graph that unreal builds. This\n # is a generic structure used for many cases\n self._graph = Graph(city_file, node_density)\n\n self._pixel_density = pixel_density\n self._grid = Grid(self._graph)\n # The number of game units per pixel. For now this is fixed.\n\n self._converter = Converter(city_file, pixel_density, node_density)\n\n # Load the lanes image\n self.map_image_lanes = Image.open(city_map_file_lanes)\n self.map_image_lanes.load()\n self.map_image_lanes = np.asarray(self.map_image_lanes, dtype=\"int32\")\n # Load the image\n self.map_image = Image.open(city_map_file)\n self.map_image.load()\n self.map_image = np.asarray(self.map_image, dtype=\"int32\")\n\n # Load the lanes image\n self.map_image_center = Image.open(city_map_file_center)\n self.map_image_center.load()\n self.map_image_center = np.asarray(self.map_image_center, dtype=\"int32\")\n\n def get_graph_resolution(self):\n\n return self._graph.get_resolution()\n\n def get_map(self, height=None):\n if height is not None:\n img = Image.fromarray(self.map_image.astype(np.uint8))\n\n aspect_ratio = height / float(self.map_image.shape[0])\n\n img = img.resize((int(aspect_ratio * self.map_image.shape[1]), height), Image.ANTIALIAS)\n img.load()\n return np.asarray(img, dtype=\"int32\")\n return np.fliplr(self.map_image)\n\n def get_map_lanes(self, size=None):\n if size is not None:\n img = Image.fromarray(self.map_image_lanes.astype(np.uint8))\n img = img.resize((size[1], size[0]), Image.ANTIALIAS)\n img.load()\n return np.fliplr(np.asarray(img, dtype=\"int32\"))\n return np.fliplr(self.map_image_lanes)\n\n def get_lane_orientation(self, world):\n \"\"\"Get the lane orientation of a certain world position.\"\"\"\n pixel = self.convert_to_pixel(world)\n\n ori = self.map_image_lanes[int(pixel[1]), int(pixel[0]), 2]\n ori = color_to_angle(ori)\n\n return (-math.cos(ori), -math.sin(ori))\n\n def convert_to_node(self, input_data):\n \"\"\"\n Receives a data type (Can Be Pixel or World )\n :param input_data: position in some coordinate\n :return: A node object\n \"\"\"\n return self._converter.convert_to_node(input_data)\n\n def convert_to_pixel(self, input_data):\n \"\"\"\n Receives a data type (Can Be Node or World )\n :param input_data: position in some coordinate\n :return: A node object\n \"\"\"\n return self._converter.convert_to_pixel(input_data)\n\n def convert_to_world(self, input_data):\n \"\"\"\n Receives a data type (Can Be Pixel or Node )\n :param input_data: position in some coordinate\n :return: A node object\n \"\"\"\n return self._converter.convert_to_world(input_data)\n\n def get_walls_directed(self, node_source, source_ori, node_target, target_ori):\n \"\"\"\n This is the most hacky function. Instead of planning on two ways,\n we basically use a one way road and interrupt the other road by adding\n an artificial wall.\n\n \"\"\"\n\n final_walls = self._grid.get_wall_source(node_source, source_ori, node_target)\n\n final_walls = final_walls.union(self._grid.get_wall_target(\n node_target, target_ori, node_source))\n return final_walls\n\n def get_walls(self):\n\n return self._grid.get_walls()\n\n def get_distance_closest_node(self, pos):\n\n distance = []\n for node_iter in self._graph.intersection_nodes():\n distance.append(sldist(node_iter, pos))\n\n return sorted(distance)[0]\n\n def get_intersection_nodes(self):\n return self._graph.intersection_nodes()\n\n def search_on_grid(self,node):\n return self._grid.search_on_grid(node[0], node[1])\n" ]
[ [ "numpy.asarray", "numpy.fliplr" ] ]
KristineYW/DS-Unit-3-Sprint-2-SQL-and-Databases
[ "4a690cd8e651161296d7aec2af86a56c499d6801" ]
[ "module2-sql-for-analysis/insert_titanic.py" ]
[ "import os\nimport json\nfrom dotenv import load_dotenv\nimport psycopg2\nfrom psycopg2.extras import execute_values\nimport pandas as pd\n\n# Load contents of the .env file into the script's environment\nload_dotenv() \n\n# Add credentials for accessing the elephant \nDB_NAME = os.getenv(\"DB_NAME\")\nDB_USER = os.getenv(\"DB_USER\")\nDB_PASSWORD = os.getenv(\"DB_PASSWORD\")\nDB_HOST = os.getenv(\"DB_HOST\")\n\n# Load the dataframe with pandas\ndf = pd.read_csv('https://raw.githubusercontent.com/LambdaSchool/DS-Unit-3-Sprint-2-SQL-and-Databases/master/module2-sql-for-analysis/titanic.csv')\n\n# Create the psychopg2 connection and cursor objections to access the elephant \nconnection = psycopg2.connect(dbname=DB_NAME, user=DB_USER, password=DB_PASSWORD, host=DB_HOST)\n\ncursor = connection.cursor()\n\n# Create the titanic table\ncreate_query = \"CREATE TABLE titanic (Survived INT, Class INT, Name VARCHAR(255), Sex CHAR(10), Age FLOAT, Sibling_Spouse INT, Parent_Child INT, Fare FLOAT);\"\n\n# Create placeholder insertion query for the titanic table\n# insertion_query = \"INSERT INTO titanic2 (Survived, Class, Name, Sex, Age, Sibling_Spouse, Parent_Child, Fare) VALUES %s\"\ninsertion_query = f\"INSERT INTO titanic (Survived, Class, Name, Sex, Age, Sibling_Spouse, Parent_Child, Fare)\" \\\n \"VALUES ({Survivor},{Class},{Name},{Sex},{Age},{Sibing_Spouse},{Parent_Child},{Fare})\"\n\n\n# Change the format of database into a list of tuples\nlist_of_tuples = []\nfor row in df.iterrows():\n list_of_tuples.append(row)\nprint(list_of_tuples)\n\nlines = []\nfor i in range(0,len(df)):\n result = []\n for j in range(0,8):\n result.append(list_of_tuples[i][1][j])\n lines.append(tuple(result))\nprint(lines)\n\n# Use execute_values to insert the list of tuples into the titanic table as rows\n\nexecute_values(cursor, insertion_query, lines)\n\n# Save the transactions\nconnection.commit()\ncursor.close()\nconnection.close()\n" ]
[ [ "pandas.read_csv" ] ]
carbon-drive/pyleecan
[ "e89d4fe97f23f6182c19127d2c6a2133614e169d" ]
[ "pyleecan/Methods/Machine/Winding/comp_connection_mat.py" ]
[ "# -*- coding: utf-8 -*-\n\nfrom numpy import zeros, swapaxes, sign\n\nfrom ....Methods.Machine.Winding import WindingError\n\n\nfrom swat_em import datamodel\n\n\ndef comp_connection_mat(self, Zs=None, p=None):\n \"\"\"Compute the Winding Matrix for\n\n Parameters\n ----------\n self : Winding\n A: Winding object\n Zs : int\n Number of Slot (Integer >0)\n p : int\n Number of pole pairs (Integer >0)\n\n Returns\n -------\n wind_mat: numpy.ndarray\n Winding Matrix (1, 1, Zs, qs)\n\n Raises\n ------\n WindingT2DefNtError\n Zs/qs/2 must be an integer\n\n \"\"\"\n\n if Zs is None:\n if self.parent is None:\n raise WindingError(\n \"ERROR: The Winding object must be in a Lamination object.\"\n )\n\n if self.parent.slot is None:\n raise WindingError(\n \"ERROR: The Winding object must be in a Lamination object with Slot.\"\n )\n\n Zs = self.parent.slot.Zs\n\n if p is None:\n if self.parent is None:\n raise WindingError(\n \"ERROR: The Winding object must be in a Lamination object.\"\n )\n\n p = self.parent.get_pole_pair_number()\n\n assert Zs > 0, \"Zs must be >0\"\n assert Zs % 1 == 0, \"Zs must be an integer\"\n\n assert p > 0, \"p must be >0\"\n assert p % 1 == 0, \"p must be an integer\"\n\n qs = self.qs # Phase Number\n\n Ntcoil = self.Ntcoil # number of turns per coils\n\n Nlayer = self.Nlayer # number of layers\n\n coil_pitch = self.coil_pitch # coil pitch (coil span)\n\n # generate a datamodel for the winding\n wdg = datamodel()\n\n # generate winding from inputs\n wdg.genwdg(Q=Zs, P=2 * p, m=qs, layers=Nlayer, turns=Ntcoil, w=coil_pitch)\n\n # init connexion matrix\n wind_mat = zeros((Nlayer, 1, Zs, qs))\n\n # get connexion matrix from swat-em\n wind_mat_swat = wdg.get_phases()\n\n # perform checks\n assert p == wdg.get_num_polepairs(), (\n \"number of pole pairs is not as requested (returned \"\n + str(wdg.get_num_polepairs())\n + \" expected \"\n + str(p)\n + \")\"\n )\n assert qs == wdg.get_num_phases(), (\n \"number of phases is not as requested (returned \"\n + str(wdg.get_num_phases())\n + \" expected \"\n + str(qs)\n + \")\"\n )\n\n # convert swat-em connexion matrix to pyleecan connexion matrix\n for qq, phase in enumerate(wind_mat_swat):\n for ll, layer in enumerate(phase):\n if len(layer) > 0:\n for cond in layer:\n wind_mat[Nlayer - ll - 1, 0, abs(cond) - 1, qq] = (\n sign(cond) * Ntcoil\n )\n\n # permute radial and tangential layers if coil span is 1\n if wdg.get_coilspan() == 1:\n wind_mat = swapaxes(wind_mat, 0, 1)\n\n # check that requested number of parallel connections is feasible\n Npcp_list = wdg.get_parallel_connections()\n if self.Npcp is None:\n self.Npcp = Npcp_list[0]\n elif self.Npcp > p:\n self.Npcp = p\n self.get_logger().warning(\n \"Number of parallel circuits per phase must be < p, assign it to: \" + str(p)\n )\n # if self.Npcp not in Npcp_list:\n\n # if self.Npcp is not None:\n # self.get_logger().warning(\n # \"Requested number of parallel circuits per phase is not feasible, assign it to: \"\n # + str(Npcp_list[0])\n # )\n\n # self.Npcp = Npcp_list[0]\n\n # enforce the number of layers if it is not as requested\n Nlayer_actual = wdg.get_num_layers()\n if self.Nlayer != Nlayer_actual:\n self.Nlayer = Nlayer_actual\n self.get_logger().info(\n \"Requested number of layers is not feasible, assign it to: \"\n + str(Nlayer_actual)\n )\n\n # get periodicities\n # self.per_a = wdg.get_periodicity_t()\n # self.is_aper_a = wdg.get_is_symmetric()\n\n # To check periodicities swat-em / pyleecan definitions\n self.per_a, self.is_aper_a = self.comp_periodicity(wind_mat=wind_mat)\n # if is_aper_a: # Different def for Anti per\n # per_a = per_a / 2\n # if self.per_a != per_a or self.is_aper_a != is_aper_a:\n # self.get_logger().warning(\n # \"(Anti-)periodicity calculated by pyleecan and SWAT_EM differs\"\n # )\n\n # Set default values\n if self.is_reverse_wind is None:\n self.is_reverse_wind = False\n if self.Nslot_shift_wind is None:\n self.Nslot_shift_wind = 0\n\n return wind_mat\n" ]
[ [ "numpy.sign", "numpy.swapaxes", "numpy.zeros" ] ]
lukaselflein/sarah_folderstructure
[ "a725271db3d8b5b28b24918b3daf0942fa04dcd8" ]
[ ".template_simulation/average_cost.py" ]
[ "\"\"\" Average Cost Functions for Horton to determine Charges for Molecular Dynamics.\nCopyright 2019 Simulation Lab\nUniversity of Freiburg\nAuthor: Lukas Elflein <[email protected]>\n\"\"\"\n\nimport numpy as np\nimport h5py\nimport shutil\nimport warnings\nimport os\nfrom smamp.tools import cd\n\n\ndef find_cost(path='.', cost_function_filename='cost_-5_0.8.h5'):\n \"\"\"\n Find all cost functions in folder structure.\n\n We have multiple snapshots of a molecule, with corresponding cost functions.\n This function explores the folderstructure, and returns all paths leading to cost functions.\n\n Arguments:\n path: path to the folder to search in\n\n Returns:\n list of strings, representing paths to cost functions\n \"\"\"\n\n cost_function_paths = []\n\n # Crawl the directory structure\n for subdir, dirs, files in os.walk(path):\n\n # Exclude template folders from search\n if 'template' in subdir or 'exclude' in subdir:\n continue\n\n # Select the folder with cost functions:\n if 'horton_cost_function/lnrho_sweep' in subdir:\n # The cost file should be in:\n cost_path = os.path.join(subdir, cost_function_filename)\n if os.path.isfile(cost_path):\n # Add the cost function to our collection\n cost_function_paths += [cost_path]\n\n # Maybe the cost function is missing. Print to screen\n else:\n print('\\nWarning: No cost file found in: \"{}\"'.format(subdir))\n print('Filename was assumed to be \"{}\"'.format(cost_function_filename))\n print(cost_function_paths)\n return cost_function_paths\n\ndef read_h5(path, verbose=False):\n \"\"\"\n Import cost functions, convert to numpy arrays.\n\n Argumentis:\n path: the path to the cost function file\n\n Returns:\n A_matrices: a dictionary of matrices, indexed by their timestep.\n B_vectors: a dictionary of vectors, indexed by their timestep.\n \"\"\"\n\n # Extract the values for each timestep\n if verbose:\n print('Loading data: {}'.format(path))\n\n # load the objects (read-only) from HDF5 file\n f = h5py.File(path, 'r')\n # Extract the A matrix\n A_matrix = np.array(f['cost']['A'])\n # Extract the B vector\n B_vector = np.array(f['cost']['B'])\n\n\n return A_matrix, B_vector\n\ndef collect_matrices(paths):\n \"\"\"\n Extract A and B from all cost functions.\n\n Arguments:\n paths: list of strings, pointing to the cost function files\n \n Returns:\n A_list: list of A-matrices\n B_list: list of B-vectors\n \"\"\"\n\n A_list = []\n B_list = []\n for cost_function_path in paths:\n A_matrix, B_vector = read_h5(cost_function_path)\n A_list += [A_matrix]\n B_list += [B_vector]\n\n assert len(A_list) == len(B_list)\n return A_list, B_list\n \n\ndef average(A_matrices, B_vectors):\n \"\"\" \n Average over cost function matrices.\n\n The cost functions contain the three objects of the cost function: A, B, C\n A is a quadratic matrix (97x97), B a vector (d=97), and C is a constant.\n In the end, we are interested in the best-fit charges Q which are the solution to\n Q = A^-1 B\n\n Arguments:\n A_matrices: a list of NxN matrices\n B_vectors: a list of vectors with len N\n\n Returns:\n A: the average of all A_matrices.\n B: the average of all B_matrices.\n \"\"\"\n\n # Initialize empty\n A = A_matrices[0] * 0\n B = B_vectors[0] * 0\n\n # Average by adding all objects and dividing by their number\n for index in range(len(A_matrices)):\n A += A_matrices[index]\n B += B_vectors[index]\n\n # Divide\n number_snapshots = len(A_matrices)\n A /= number_snapshots\n B /= number_snapshots\n\n return A, B\n\ndef export(A, B, template_path='./average_cost.h5'):\n \"\"\"\n Export&save numpy-matrices to HDF5 objects.\n \n Arguments:\n A: Averaged A-matrix.\n B: Averaged B-vector.\n\n Keyword Arguments:\n template_path: the path to the template file A and B are written into.\n \"\"\"\n # Open the template file\n f = h5py.File(template_path, 'r+')\n # Load the template A matrix\n A_old = f['cost/A']\n # Assign the averaged A\n A_old[...] = A\n # Do the same for the B-vectors\n B_old = f['cost/B']\n B_old[...] = B\n # Save changes\n f.close() \n\n # Make sure that the changes were written into the template\n f = h5py.File(template_path, 'r')\n assert np.allclose(f['cost/A'][()], A)\n assert np.allclose(f['cost/B'][()], B)\n\n # print('Data has been written to {}\\n'.format(template_path))\n\ndef main():\n \"\"\" Run the script.\"\"\"\n print('This is {}.'.format(__file__))\n # Create a folder for the averaged cost function\n chargepath = './horton_charges'\n if os.path.isdir(chargepath):\n shutil.rmtree(chargepath)\n os.mkdir(chargepath)\n\n # Find the locations of the cost function files\n cost_function_paths = find_cost()\n\n # Extract cost function As and Bs\n A_matrices, B_vectors = collect_matrices(cost_function_paths)\n\n # Average over all matrices & vectors\n average_A, average_B = average(A_matrices, B_vectors)\n\n # keep one HDF5 file as a template for writing into later\n shutil.copyfile(cost_function_paths[0], './horton_charges/costfunction_average.h5')\n\n # Export matrices to hdf5 \n export(average_A, average_B, template_path='./horton_charges/costfunction_average.h5')\n\n print('Done.')\n\n\nif __name__ == '__main__':\n main()\n" ]
[ [ "numpy.allclose", "numpy.array" ] ]
nmail-sds/Framework
[ "20ce6767188adbccd9edc15000dbdfa25cfdab70" ]
[ "data/sampling.py" ]
[ "'''\n2019. 1. 21\nSMOTE data sampling\nYihan Kim\n\nusage : import sampling.smote as smote \n\n'''\n\nfrom ds import Pair, Data\nfrom sklearn.datasets import make_classification\nfrom imblearn.over_sampling import SMOTE\n\ndef smote_dataset(dataset: Data):\n # unpack \n X = dataset.train.data \n y = dataset.train.labels\n sm = SMOTE(random_state=42)\n X_res, Y_res = sm.fit_resample(X, y)\n return Data(X_res, Y_res, dataset.test.data, dataset.test.labels)\n\ndef main():\n X, y = make_classification(n_classes=2, class_sep=2,\n weights=[0.1, 0.9], n_informative=3, n_redundant=1, flip_y=0,\n n_features=20, n_clusters_per_class=1, n_samples=1000, random_state=10)\n \n p = smote_dataset(Data(X, y, None, None))\n X_ = p.train.data\n y_ = p.train.labels\n print(X_.shape)\n print(y_.shape)\n\nif __name__ == \"__main__\":\n main()\n" ]
[ [ "sklearn.datasets.make_classification" ] ]
sanixa/P3GM_custom
[ "fbd8fb61ca58cc681fae7e8272df1655acf4d804" ]
[ "dp_utils/dp_pca.py" ]
[ "\"\"\" DP Principal Component Analysis\n\"\"\"\n\n# Author: Alexandre Gramfort <[email protected]>\n# Olivier Grisel <[email protected]>\n# Mathieu Blondel <[email protected]>\n# Denis A. Engemann <[email protected]>\n# Michael Eickenberg <[email protected]>\n# Giorgio Patrini <[email protected]>\n#\n# License: BSD 3 clause\n\nfrom math import log, sqrt\nimport numbers\n\nimport numpy as np\nimport scipy.stats as ss\nimport sklearn\n\nfrom sklearn.decomposition.base import _BasePCA\n\nclass DP_PCA(_BasePCA):\n\n def __init__(self, eps=1e-2, n_components=None, whiten=False, random_state=None):\n self.n_components = n_components\n self.whiten = whiten\n self.eps = eps\n self.random_state = random_state\n \n def compute_privacy(self):\n return self.eps\n\n def fit(self, X, y=None):\n self.max_norm = np.linalg.norm(X, axis=1).max()\n self._fit(X)\n return self\n\n\n def _fit(self, X):\n \n self.mean_ = np.mean(X, axis=0)\n data = X - self.mean_\n cov = np.dot(data.T, data)\n \n w = ss.wishart(df=data.shape[1]+1, scale=np.matrix(np.eye(data.shape[1]) * 3 * self.max_norm/(2*data.shape[0]*self.eps)))\n noise = w.rvs(1, random_state=self.random_state)\n \n cov = cov + noise\n \n cov = cov/data.shape[0]\n ev, evec = np.linalg.eig(cov)\n evec = evec.T\n self.components_ = evec[:self.n_components]" ]
[ [ "numpy.dot", "numpy.linalg.norm", "numpy.mean", "numpy.eye", "numpy.linalg.eig" ] ]
chihhao428/pytorch-optimizer
[ "e049b0526ccb70317faf0c6f8dd97ec02d04a5b2" ]
[ "tests/test_basic.py" ]
[ "import pytest\nimport torch\n\nimport torch_optimizer as optim\n\n\ndef rosenbrock(tensor):\n x, y = tensor\n return (1 - x) ** 2 + 1 * (y - x ** 2) ** 2\n\n\ndef quadratic(tensor):\n x, y = tensor\n a = 1.0\n b = 1.0\n return (x ** 2) / a + (y ** 2) / b\n\n\ndef beale(tensor):\n x, y = tensor\n f = (\n (1.5 - x + x * y) ** 2\n + (2.25 - x + x * y ** 2) ** 2\n + (2.625 - x + x * y ** 3) ** 2\n )\n return f\n\n\ncases = [\n (rosenbrock, (1.5, 1.5), (1, 1)),\n (quadratic, (1.5, 1.5), (0, 0)),\n (beale, (1.5, 1.5), (3, 0.5)),\n]\n\n\ndef ids(v):\n n = '{} {}'.format(v[0].__name__, v[1:])\n return n\n\n\ndef build_lookahead(*a, **kw):\n base = optim.Yogi(*a, **kw)\n return optim.Lookahead(base)\n\n\noptimizers = [\n (optim.A2GradUni, {'lips': 40, 'beta': 0.0001}, 800),\n (optim.PID, {'lr': 0.002, 'momentum': 0.8, 'weight_decay': 0.0001}, 900),\n (optim.QHM, {'lr': 0.02, 'momentum': 0.95, 'nu': 1}, 900),\n (\n optim.NovoGrad,\n {'lr': 2.9, 'betas': (0.9, 0.999), 'grad_averaging': True},\n 900,\n ),\n (optim.RAdam, {'lr': 0.01, 'betas': (0.9, 0.95), 'eps': 1e-3}, 800),\n (optim.SGDW, {'lr': 0.002, 'momentum': 0.91}, 900),\n (optim.DiffGrad, {'lr': 0.5}, 500),\n (optim.AdaMod, {'lr': 1.0}, 800),\n (optim.AdaBound, {'lr': 1.0}, 800),\n (optim.Yogi, {'lr': 1.0}, 500),\n (optim.AccSGD, {'lr': 0.015}, 800),\n (build_lookahead, {'lr': 1.0}, 500),\n (optim.QHAdam, {'lr': 1.0}, 500),\n (optim.AdamP, {'lr': 0.01, 'betas': (0.9, 0.95), 'eps': 1e-3}, 800),\n (optim.SGDP, {'lr': 0.002, 'momentum': 0.91}, 900),\n (optim.AggMo, {'lr': 0.003}, 1800),\n (optim.SWATS, {'lr': 0.1, 'amsgrad': True, 'nesterov': True}, 900),\n (optim.Adafactor, {'lr': None, 'decay_rate': -0.3, 'beta1': 0.9}, 800),\n (optim.AdaBelief, {'lr': 1.0}, 500),\n (optim.Adahessian, {'lr': 0.15, 'hessian_power': 0.6, 'seed': 0}, 900),\n]\n\n\[email protected]('case', cases, ids=ids)\[email protected]('optimizer_config', optimizers, ids=ids)\ndef test_benchmark_function(case, optimizer_config):\n func, initial_state, min_loc = case\n optimizer_class, config, iterations = optimizer_config\n\n x = torch.Tensor(initial_state).requires_grad_(True)\n x_min = torch.Tensor(min_loc)\n optimizer = optimizer_class([x], **config)\n for _ in range(iterations):\n optimizer.zero_grad()\n f = func(x)\n f.backward(retain_graph=True, create_graph=True)\n optimizer.step()\n assert torch.allclose(x, x_min, atol=0.001)\n\n name = optimizer.__class__.__name__\n assert name in optimizer.__repr__()\n" ]
[ [ "torch.allclose", "torch.Tensor" ] ]
Vandervir/PyLight
[ "694ba1d39010d4474f9fc581011ea8bbc7219b1a" ]
[ "main.py" ]
[ "import pyaudio\nimport numpy as np\nfrom magichome import MagicHomeApi\nimport cv2\nimport time\nfrom screen import grab_screen\nimport colorsysą\n\nregions = [(130, 130, 260, 260), (130, 760, 260, 890), (900, 480, 1120, 600), (1660, 130, 1780, 260),\n (1660, 760, 1780, 890)]\n\n\nclass ColorControl:\n parts = 10\n CHANNELS = 1\n RATE = 44100\n VERY_LOUD_SOUND_LEVEL = 5\n LOUD_SOUND_LEVEL = 3\n NORMAL_SOUND_LEVEL = 1\n QUIET_SOUND_LEVEL = 0.5\n VERY_QUIET_SOUND_LEVEL = 0.125\n\n VERY_LOUD_SOUND_RANGE = 0.4\n LOUD_SOUND_RANGE = 0.2\n NORMAL_SOUND_RANGE = 0.015\n QUIET_SOUND_RANGE = 0.005\n\n def __exit__(self, exc_type, exc_value, traceback):\n self.controller.turn_off()\n\n def __init__(self):\n self.time_sleep = 0.05\n self.timer = 0\n\n self.region = (900, 480, 1120, 600)\n self.region = (0, 0, 1920, 1080)\n self.red_diff = 0\n self.green_diff = 0\n self.blue_diff = 0\n\n self.sound_level = 0.0\n\n self.previous_color = self._get_new_dominant_color()\n self.next_color = self._get_new_dominant_color()\n self._init_led()\n self.p = pyaudio.PyAudio()\n self.stream = self.p.open(format=pyaudio.paFloat32,\n channels=self.CHANNELS,\n rate=self.RATE,\n output=True,\n input=True,\n stream_callback=self._audio_callback)\n pass\n\n def run(self):\n # update colors\n while (True):\n if self._is_time_to_probe():\n print('probe')\n self.do_magic()\n\n self._update_colors()\n\n self.stream.start_stream()\n while self.stream.is_active():\n time.sleep(self.time_sleep)\n self.stream.stop_stream()\n # self.stream.close()\n # write color\n pass\n\n def do_magic(self):\n self.previous_color = self.next_color\n self.next_color = self._get_new_dominant_color()\n self.red_diff = self._split_parts(self.previous_color[0], self.next_color[0])\n self.green_diff = self._split_parts(self.previous_color[1], self.next_color[1])\n self.blue_diff = self._split_parts(self.previous_color[2], self.next_color[2])\n print(self.red_diff, self.green_diff, self.blue_diff, 'sound level {}'.format(self.sound_level))\n\n def _get_red(self):\n return self.previous_color[0] + (self.red_diff * self.timer)\n\n def _get_green(self):\n return self.previous_color[1] + (self.green_diff * self.timer)\n\n def _get_blue(self):\n return self.previous_color[2] + (self.blue_diff * self.timer)\n\n def _is_any_color_change(self):\n return self.red_diff + self.green_diff + self.blue_diff != 0\n\n def _get_white1(self):\n return 0\n\n def _get_white2(self):\n return 0\n\n def _split_parts(self, start, end):\n length = abs(start - end)\n\n end_part = int(length / self.parts)\n\n if start > end:\n return -end_part\n return end_part\n\n def _get_new_dominant_color(self):\n screen = grab_screen(self.region)\n data = np.reshape(screen, (-1, 3))\n data = np.float32(data)\n\n criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 10, 1.0)\n flags = cv2.KMEANS_RANDOM_CENTERS\n compactness, labels, centers = cv2.kmeans(data, 1, None, criteria, 10, flags)\n\n return centers[0].astype(np.int32)\n\n def _init_led(self):\n # return\n self.controller = MagicHomeApi('10.10.123.3', 1)\n self.controller.get_status()\n self.controller.turn_on()\n\n def _update_colors(self):\n if not self._is_any_color_change():\n return\n # print(self._get_red(), self._get_green(), self._get_blue(), self._get_color_brightest(self._get_red(), self._get_green(), self._get_blue()))\n r, g, b = self._change_saturation_with_sound(self._get_red(), self._get_green(), self._get_blue())\n self.controller.update_device(r, g, b, self._get_white1(), self._get_white2())\n\n def _change_saturation_with_sound(self, r, g, b):\n h, s, v = colorsys.rgb_to_hsv(r, g, b)\n r,g,b = colorsys.hsv_to_rgb(h, s * self.sound_level, v)\n print(self.sound_level, abs(int(r)), abs(int(g)), abs(int(b)))\n return abs(int(r)), abs(int(g)), abs(int(b))\n\n def _get_color_brightest(self, red, green, blue):\n # ((Red value X 299) + (Green value X 587) + (Blue value X 114)) / 1000\n return ((red * 299) + (green * 587) + (blue * 114) / 1000)\n\n def get_new_colors(self):\n pass\n\n def _is_time_to_probe(self):\n if self.timer >= self.parts:\n self.timer = 0\n self.do_magic()\n else:\n self.timer += 1\n\n def _audio_callback(self, in_data, frame_count, time_info, flag):\n global b, a, fulldata, dry_data, frames\n audio_data = np.fromstring(in_data, dtype=np.float32)\n self.sound_level = self._parse_sound(audio_data.max())\n return (audio_data, pyaudio.paContinue)\n\n def _parse_sound(self, max_value):\n if max_value > self.VERY_LOUD_SOUND_RANGE:\n return self.VERY_LOUD_SOUND_LEVEL\n elif max_value > self.LOUD_SOUND_RANGE:\n return self.LOUD_SOUND_LEVEL\n elif max_value > self.NORMAL_SOUND_RANGE:\n return self.NORMAL_SOUND_LEVEL\n elif max_value > self.QUIET_SOUND_RANGE:\n return self.QUIET_SOUND_LEVEL\n else:\n return self.VERY_QUIET_SOUND_LEVEL\n\n\ncc = ColorControl()\ncc.run()\n" ]
[ [ "numpy.float32", "numpy.fromstring", "numpy.reshape" ] ]
Kajune/stylegan2-pytorch
[ "8a15338843a814c6d3b7c0151c814c8a6c75c028" ]
[ "stylegan2_pytorch/stylegan2_pytorch.py" ]
[ "import os\nimport sys\nimport math\nimport fire\nimport json\nimport urllib.request\nimport urllib.parse\nimport io\n\nfrom tqdm import tqdm\nfrom math import floor, log2\nfrom random import random\nfrom shutil import rmtree\nfrom functools import partial\nimport multiprocessing\nfrom contextlib import contextmanager, ExitStack\n\nimport numpy as np\n\nimport torch\nfrom torch import nn\nfrom torch.utils import data\nfrom torch.optim import Adam\nimport torch.nn.functional as F\nfrom torch.autograd import grad as torch_grad\nfrom torch.utils.data.distributed import DistributedSampler\nfrom torch.nn.parallel import DistributedDataParallel as DDP\n\nfrom kornia.filters import filter2D\n\nimport torchvision\nfrom torchvision import transforms\nfrom stylegan2_pytorch.version import __version__\nfrom stylegan2_pytorch.diff_augment import DiffAugment\n\nfrom vector_quantize_pytorch import VectorQuantize\nfrom linear_attention_transformer import ImageLinearAttention\n\nfrom PIL import Image\nfrom pathlib import Path\n\ntry:\n from apex import amp\n APEX_AVAILABLE = True\nexcept:\n APEX_AVAILABLE = False\n\nimport aim\n\nassert torch.cuda.is_available(), 'You need to have an Nvidia GPU with CUDA installed.'\n\n\n# constants\n\nNUM_CORES = multiprocessing.cpu_count()\nEXTS = ['jpg', 'jpeg', 'png']\n\n# helper classes\n\nclass NanException(Exception):\n pass\n\nclass EMA():\n def __init__(self, beta):\n super().__init__()\n self.beta = beta\n def update_average(self, old, new):\n if not exists(old):\n return new\n return old * self.beta + (1 - self.beta) * new\n\nclass Flatten(nn.Module):\n def forward(self, x):\n return x.reshape(x.shape[0], -1)\n\nclass RandomApply(nn.Module):\n def __init__(self, prob, fn, fn_else = lambda x: x):\n super().__init__()\n self.fn = fn\n self.fn_else = fn_else\n self.prob = prob\n def forward(self, x):\n fn = self.fn if random() < self.prob else self.fn_else\n return fn(x)\n\nclass Residual(nn.Module):\n def __init__(self, fn):\n super().__init__()\n self.fn = fn\n def forward(self, x):\n return self.fn(x) + x\n\nclass Rezero(nn.Module):\n def __init__(self, fn):\n super().__init__()\n self.fn = fn\n self.g = nn.Parameter(torch.zeros(1))\n def forward(self, x):\n return self.fn(x) * self.g\n\nclass PermuteToFrom(nn.Module):\n def __init__(self, fn):\n super().__init__()\n self.fn = fn\n def forward(self, x):\n x = x.permute(0, 2, 3, 1)\n out, loss = self.fn(x)\n out = out.permute(0, 3, 1, 2)\n return out, loss\n\nclass Blur(nn.Module):\n def __init__(self):\n super().__init__()\n f = torch.Tensor([1, 2, 1])\n self.register_buffer('f', f)\n def forward(self, x):\n f = self.f\n f = f[None, None, :] * f [None, :, None]\n return filter2D(x, f, normalized=True)\n\n# one layer of self-attention and feedforward, for images\n\nattn_and_ff = lambda chan: nn.Sequential(*[\n Residual(Rezero(ImageLinearAttention(chan, norm_queries = True))),\n Residual(Rezero(nn.Sequential(nn.Conv2d(chan, chan * 2, 1), leaky_relu(), nn.Conv2d(chan * 2, chan, 1))))\n])\n\n# helpers\n\ndef exists(val):\n return val is not None\n\n@contextmanager\ndef null_context():\n yield\n\ndef combine_contexts(contexts):\n @contextmanager\n def multi_contexts():\n with ExitStack() as stack:\n yield [stack.enter_context(ctx()) for ctx in contexts]\n return multi_contexts\n\ndef default(value, d):\n return value if exists(value) else d\n\ndef cycle(iterable):\n while True:\n for i in iterable:\n yield i\n\ndef cast_list(el):\n return el if isinstance(el, list) else [el]\n\ndef is_empty(t):\n if isinstance(t, torch.Tensor):\n return t.nelement() == 0\n return not exists(t)\n\ndef raise_if_nan(t):\n if torch.isnan(t):\n raise NanException\n\ndef gradient_accumulate_contexts(gradient_accumulate_every, is_ddp, ddps):\n if is_ddp:\n num_no_syncs = gradient_accumulate_every - 1\n head = [combine_contexts(map(lambda ddp: ddp.no_sync, ddps))] * num_no_syncs\n tail = [null_context]\n contexts = head + tail\n else:\n contexts = [null_context] * gradient_accumulate_every\n\n for context in contexts:\n with context():\n yield\n\ndef loss_backwards(fp16, loss, optimizer, loss_id, **kwargs):\n if fp16:\n with amp.scale_loss(loss, optimizer, loss_id) as scaled_loss:\n scaled_loss.backward(**kwargs)\n else:\n loss.backward(**kwargs)\n\ndef gradient_penalty(images, output, weight = 10):\n batch_size = images.shape[0]\n gradients = torch_grad(outputs=output, inputs=images,\n grad_outputs=torch.ones(output.size(), device=images.device),\n create_graph=True, retain_graph=True, only_inputs=True)[0]\n\n gradients = gradients.reshape(batch_size, -1)\n return weight * ((gradients.norm(2, dim=1) - 1) ** 2).mean()\n\ndef calc_pl_lengths(styles, images):\n device = images.device\n num_pixels = images.shape[2] * images.shape[3]\n pl_noise = torch.randn(images.shape, device=device) / math.sqrt(num_pixels)\n outputs = (images * pl_noise).sum()\n\n pl_grads = torch_grad(outputs=outputs, inputs=styles,\n grad_outputs=torch.ones(outputs.shape, device=device),\n create_graph=True, retain_graph=True, only_inputs=True)[0]\n\n return (pl_grads ** 2).sum(dim=2).mean(dim=1).sqrt()\n\ndef noise(n, latent_dim, device):\n return torch.randn(n, latent_dim).cuda(device)\n\ndef noise_list(n, layers, latent_dim, device):\n return [(noise(n, latent_dim, device), layers)]\n\ndef mixed_list(n, layers, latent_dim, device):\n tt = int(torch.rand(()).numpy() * layers)\n return noise_list(n, tt, latent_dim, device) + noise_list(n, layers - tt, latent_dim, device)\n\ndef latent_to_w(style_vectorizer, latent_descr):\n return [(style_vectorizer(z), num_layers) for z, num_layers in latent_descr]\n\ndef image_noise(n, im_size, device):\n return torch.FloatTensor(n, im_size, im_size, 1).uniform_(0., 1.).cuda(device)\n\ndef leaky_relu(p=0.2):\n return nn.LeakyReLU(p, inplace=True)\n\ndef evaluate_in_chunks(max_batch_size, model, *args):\n split_args = list(zip(*list(map(lambda x: x.split(max_batch_size, dim=0), args))))\n chunked_outputs = [model(*i) for i in split_args]\n if len(chunked_outputs) == 1:\n return chunked_outputs[0]\n return torch.cat(chunked_outputs, dim=0)\n\ndef styles_def_to_tensor(styles_def):\n return torch.cat([t[:, None, :].expand(-1, n, -1) for t, n in styles_def], dim=1)\n\ndef set_requires_grad(model, bool):\n for p in model.parameters():\n p.requires_grad = bool\n\ndef slerp(val, low, high):\n low_norm = low / torch.norm(low, dim=1, keepdim=True)\n high_norm = high / torch.norm(high, dim=1, keepdim=True)\n omega = torch.acos((low_norm * high_norm).sum(1))\n so = torch.sin(omega)\n res = (torch.sin((1.0 - val) * omega) / so).unsqueeze(1) * low + (torch.sin(val * omega) / so).unsqueeze(1) * high\n return res\n\n# dataset\n\ndef convert_rgb_to_transparent(image):\n if image.mode != 'RGBA':\n return image.convert('RGBA')\n return image\n\ndef convert_transparent_to_rgb(image):\n if image.mode != 'RGB':\n return image.convert('RGB')\n return image\n\nclass expand_greyscale(object):\n def __init__(self, transparent):\n self.transparent = transparent\n\n def __call__(self, tensor):\n channels = tensor.shape[0]\n num_target_channels = 4 if self.transparent else 3\n\n if channels == num_target_channels:\n return tensor\n\n alpha = None\n if channels == 1:\n color = tensor.expand(3, -1, -1)\n elif channels == 2:\n color = tensor[:1].expand(3, -1, -1)\n alpha = tensor[1:]\n else:\n raise Exception(f'image with invalid number of channels given {channels}')\n\n if not exists(alpha) and self.transparent:\n alpha = torch.ones(1, *tensor.shape[1:], device=tensor.device)\n\n return color if not self.transparent else torch.cat((color, alpha))\n\ndef resize_to_minimum_size(min_size, image):\n if max(*image.size) < min_size:\n return torchvision.transforms.functional.resize(image, min_size)\n return image\n\nclass Dataset(data.Dataset):\n def __init__(self, folder, image_size, transparent = False, aug_prob = 0.):\n super().__init__()\n self.folder = folder\n self.image_size = image_size\n self.paths = [p for ext in EXTS for p in Path(f'{folder}').glob(f'**/*.{ext}')]\n assert len(self.paths) > 0, f'No images were found in {folder} for training'\n\n convert_image_fn = convert_transparent_to_rgb if not transparent else convert_rgb_to_transparent\n num_channels = 3 if not transparent else 4\n\n self.transform = transforms.Compose([\n transforms.Lambda(convert_image_fn),\n transforms.Lambda(partial(resize_to_minimum_size, image_size)),\n transforms.Resize(image_size),\n RandomApply(aug_prob, transforms.RandomResizedCrop(image_size, scale=(0.5, 1.0), ratio=(0.98, 1.02)), transforms.CenterCrop(image_size)),\n transforms.ToTensor(),\n transforms.Lambda(expand_greyscale(transparent))\n ])\n\n def __len__(self):\n return len(self.paths)\n\n def __getitem__(self, index):\n path = self.paths[index]\n img = Image.open(path)\n return self.transform(img)\n\nclass NetworkDataset(data.Dataset):\n def __init__(self, host, image_size, transparent = False, aug_prob = 0.):\n super().__init__()\n\n self.host = host\n self.image_size = image_size\n\n with urllib.request.urlopen(host + 'list') as response:\n self.paths = json.loads(response.read())\n self.paths = [path.replace('\\\\', '/') for path in self.paths]\n assert len(self.paths) > 0, f'No images were found in {host} for training'\n\n convert_image_fn = convert_transparent_to_rgb if not transparent else convert_rgb_to_transparent\n num_channels = 3 if not transparent else 4\n\n self.transform = transforms.Compose([\n transforms.Lambda(convert_image_fn),\n transforms.Lambda(partial(resize_to_minimum_size, image_size)),\n transforms.Resize(image_size),\n RandomApply(aug_prob, transforms.RandomResizedCrop(image_size, scale=(0.5, 1.0), ratio=(0.98, 1.02)), transforms.CenterCrop(image_size)),\n transforms.ToTensor(),\n transforms.Lambda(expand_greyscale(transparent))\n ])\n\n def __len__(self):\n return len(self.paths)\n\n def __getitem__(self, index):\n path = self.paths[index]\n request = urllib.request.urlopen(self.host + urllib.parse.quote(path))\n img = Image.open(io.BytesIO(request.read()))\n return self.transform(img)\n\n# augmentations\n\ndef random_hflip(tensor, prob):\n if prob > random():\n return tensor\n return torch.flip(tensor, dims=(3,))\n\nclass AugWrapper(nn.Module):\n def __init__(self, D, image_size):\n super().__init__()\n self.D = D\n\n def forward(self, images, prob = 0., types = [], detach = False):\n if random() < prob:\n images = random_hflip(images, prob=0.5)\n images = DiffAugment(images, types=types)\n\n if detach:\n images = images.detach()\n\n return self.D(images)\n\n# stylegan2 classes\n\nclass EqualLinear(nn.Module):\n def __init__(self, in_dim, out_dim, lr_mul = 1, bias = True):\n super().__init__()\n self.weight = nn.Parameter(torch.randn(out_dim, in_dim))\n if bias:\n self.bias = nn.Parameter(torch.zeros(out_dim))\n\n self.lr_mul = lr_mul\n\n def forward(self, input):\n return F.linear(input, self.weight * self.lr_mul, bias=self.bias * self.lr_mul)\n\nclass StyleVectorizer(nn.Module):\n def __init__(self, emb, depth, lr_mul = 0.1):\n super().__init__()\n\n layers = []\n for i in range(depth):\n layers.extend([EqualLinear(emb, emb, lr_mul), leaky_relu()])\n\n self.net = nn.Sequential(*layers)\n\n def forward(self, x):\n x = F.normalize(x, dim=1)\n return self.net(x)\n\nclass RGBBlock(nn.Module):\n def __init__(self, latent_dim, input_channel, upsample, rgba = False):\n super().__init__()\n self.input_channel = input_channel\n self.to_style = nn.Linear(latent_dim, input_channel)\n\n out_filters = 3 if not rgba else 4\n self.conv = Conv2DMod(input_channel, out_filters, 1, demod=False)\n\n self.upsample = nn.Sequential(\n nn.Upsample(scale_factor = 2, mode='bilinear', align_corners=False),\n Blur()\n ) if upsample else None\n\n def forward(self, x, prev_rgb, istyle):\n b, c, h, w = x.shape\n style = self.to_style(istyle)\n x = self.conv(x, style)\n\n if exists(prev_rgb):\n x = x + prev_rgb\n\n if exists(self.upsample):\n x = self.upsample(x)\n\n return x\n\nclass Conv2DMod(nn.Module):\n def __init__(self, in_chan, out_chan, kernel, demod=True, stride=1, dilation=1, eps = 1e-8, **kwargs):\n super().__init__()\n self.filters = out_chan\n self.demod = demod\n self.kernel = kernel\n self.stride = stride\n self.dilation = dilation\n self.weight = nn.Parameter(torch.randn((out_chan, in_chan, kernel, kernel)))\n self.eps = eps\n nn.init.kaiming_normal_(self.weight, a=0, mode='fan_in', nonlinearity='leaky_relu')\n\n def _get_same_padding(self, size, kernel, dilation, stride):\n return ((size - 1) * (stride - 1) + dilation * (kernel - 1)) // 2\n\n def forward(self, x, y):\n b, c, h, w = x.shape\n\n w1 = y[:, None, :, None, None]\n w2 = self.weight[None, :, :, :, :]\n weights = w2 * (w1 + 1)\n\n if self.demod:\n d = torch.rsqrt((weights ** 2).sum(dim=(2, 3, 4), keepdim=True) + self.eps)\n weights = weights * d\n\n x = x.reshape(1, -1, h, w)\n\n _, _, *ws = weights.shape\n weights = weights.reshape(b * self.filters, *ws)\n\n padding = self._get_same_padding(h, self.kernel, self.dilation, self.stride)\n x = F.conv2d(x, weights, padding=padding, groups=b)\n\n x = x.reshape(-1, self.filters, h, w)\n return x\n\nclass GeneratorBlock(nn.Module):\n def __init__(self, latent_dim, input_channels, filters, upsample = True, upsample_rgb = True, rgba = False):\n super().__init__()\n self.upsample = nn.Upsample(scale_factor=2, mode='bilinear', align_corners=False) if upsample else None\n\n self.to_style1 = nn.Linear(latent_dim, input_channels)\n self.to_noise1 = nn.Linear(1, filters)\n self.conv1 = Conv2DMod(input_channels, filters, 3)\n \n self.to_style2 = nn.Linear(latent_dim, filters)\n self.to_noise2 = nn.Linear(1, filters)\n self.conv2 = Conv2DMod(filters, filters, 3)\n\n self.activation = leaky_relu()\n self.to_rgb = RGBBlock(latent_dim, filters, upsample_rgb, rgba)\n\n def forward(self, x, prev_rgb, istyle, inoise):\n if exists(self.upsample):\n x = self.upsample(x)\n\n inoise = inoise[:, :x.shape[2], :x.shape[3], :]\n noise1 = self.to_noise1(inoise).permute((0, 3, 2, 1))\n noise2 = self.to_noise2(inoise).permute((0, 3, 2, 1))\n\n style1 = self.to_style1(istyle)\n x = self.conv1(x, style1)\n x = self.activation(x + noise1)\n\n style2 = self.to_style2(istyle)\n x = self.conv2(x, style2)\n x = self.activation(x + noise2)\n\n rgb = self.to_rgb(x, prev_rgb, istyle)\n return x, rgb\n\nclass DiscriminatorBlock(nn.Module):\n def __init__(self, input_channels, filters, downsample=True):\n super().__init__()\n self.conv_res = nn.Conv2d(input_channels, filters, 1, stride = (2 if downsample else 1))\n\n self.net = nn.Sequential(\n nn.Conv2d(input_channels, filters, 3, padding=1),\n leaky_relu(),\n nn.Conv2d(filters, filters, 3, padding=1),\n leaky_relu()\n )\n\n self.downsample = nn.Sequential(\n Blur(),\n nn.Conv2d(filters, filters, 3, padding = 1, stride = 2)\n ) if downsample else None\n\n def forward(self, x):\n res = self.conv_res(x)\n x = self.net(x)\n if exists(self.downsample):\n x = self.downsample(x)\n x = (x + res) * (1 / math.sqrt(2))\n return x\n\nclass Generator(nn.Module):\n def __init__(self, image_size, latent_dim, network_capacity = 16, transparent = False, attn_layers = [], no_const = False, fmap_max = 512):\n super().__init__()\n self.image_size = image_size\n self.latent_dim = latent_dim\n self.num_layers = int(log2(image_size) - 1)\n\n filters = [network_capacity * (2 ** (i + 1)) for i in range(self.num_layers)][::-1]\n\n set_fmap_max = partial(min, fmap_max)\n filters = list(map(set_fmap_max, filters))\n init_channels = filters[0]\n filters = [init_channels, *filters]\n\n in_out_pairs = zip(filters[:-1], filters[1:])\n self.no_const = no_const\n\n if no_const:\n self.to_initial_block = nn.ConvTranspose2d(latent_dim, init_channels, 4, 1, 0, bias=False)\n else:\n self.initial_block = nn.Parameter(torch.randn((1, init_channels, 4, 4)))\n\n self.initial_conv = nn.Conv2d(filters[0], filters[0], 3, padding=1)\n self.blocks = nn.ModuleList([])\n self.attns = nn.ModuleList([])\n\n for ind, (in_chan, out_chan) in enumerate(in_out_pairs):\n not_first = ind != 0\n not_last = ind != (self.num_layers - 1)\n num_layer = self.num_layers - ind\n\n attn_fn = attn_and_ff(in_chan) if num_layer in attn_layers else None\n\n self.attns.append(attn_fn)\n\n block = GeneratorBlock(\n latent_dim,\n in_chan,\n out_chan,\n upsample = not_first,\n upsample_rgb = not_last,\n rgba = transparent\n )\n self.blocks.append(block)\n\n def forward(self, styles, input_noise):\n batch_size = styles.shape[0]\n image_size = self.image_size\n\n if self.no_const:\n avg_style = styles.mean(dim=1)[:, :, None, None]\n x = self.to_initial_block(avg_style)\n else:\n x = self.initial_block.expand(batch_size, -1, -1, -1)\n\n rgb = None\n styles = styles.transpose(0, 1)\n x = self.initial_conv(x)\n\n for style, block, attn in zip(styles, self.blocks, self.attns):\n if exists(attn):\n x = attn(x)\n x, rgb = block(x, rgb, style, input_noise)\n\n return rgb\n\nclass Discriminator(nn.Module):\n def __init__(self, image_size, network_capacity = 16, fq_layers = [], fq_dict_size = 256, attn_layers = [], transparent = False, fmap_max = 512):\n super().__init__()\n num_layers = int(log2(image_size) - 1)\n num_init_filters = 3 if not transparent else 4\n\n blocks = []\n filters = [num_init_filters] + [(network_capacity * 4) * (2 ** i) for i in range(num_layers + 1)]\n\n set_fmap_max = partial(min, fmap_max)\n filters = list(map(set_fmap_max, filters))\n chan_in_out = list(zip(filters[:-1], filters[1:]))\n\n blocks = []\n attn_blocks = []\n quantize_blocks = []\n\n for ind, (in_chan, out_chan) in enumerate(chan_in_out):\n num_layer = ind + 1\n is_not_last = ind != (len(chan_in_out) - 1)\n\n block = DiscriminatorBlock(in_chan, out_chan, downsample = is_not_last)\n blocks.append(block)\n\n attn_fn = attn_and_ff(out_chan) if num_layer in attn_layers else None\n\n attn_blocks.append(attn_fn)\n\n quantize_fn = PermuteToFrom(VectorQuantize(out_chan, fq_dict_size)) if num_layer in fq_layers else None\n quantize_blocks.append(quantize_fn)\n\n self.blocks = nn.ModuleList(blocks)\n self.attn_blocks = nn.ModuleList(attn_blocks)\n self.quantize_blocks = nn.ModuleList(quantize_blocks)\n\n chan_last = filters[-1]\n latent_dim = 2 * 2 * chan_last\n\n self.final_conv = nn.Conv2d(chan_last, chan_last, 3, padding=1)\n self.flatten = Flatten()\n self.to_logit = nn.Linear(latent_dim, 1)\n\n def forward(self, x):\n b, *_ = x.shape\n\n quantize_loss = torch.zeros(1).to(x)\n\n for (block, attn_block, q_block) in zip(self.blocks, self.attn_blocks, self.quantize_blocks):\n x = block(x)\n\n if exists(attn_block):\n x = attn_block(x)\n\n if exists(q_block):\n x, _, loss = q_block(x)\n quantize_loss += loss\n\n x = self.final_conv(x)\n x = self.flatten(x)\n x = self.to_logit(x)\n return x.squeeze(), quantize_loss\n\nclass StyleGAN2(nn.Module):\n def __init__(self, image_size, latent_dim = 512, fmap_max = 512, style_depth = 8, network_capacity = 16, transparent = False, fp16 = False, cl_reg = False, steps = 1, lr = 1e-4, ttur_mult = 2, fq_layers = [], fq_dict_size = 256, attn_layers = [], no_const = False, lr_mlp = 0.1, rank = 0):\n super().__init__()\n self.lr = lr\n self.steps = steps\n self.ema_updater = EMA(0.995)\n\n self.S = StyleVectorizer(latent_dim, style_depth, lr_mul = lr_mlp)\n self.G = Generator(image_size, latent_dim, network_capacity, transparent = transparent, attn_layers = attn_layers, no_const = no_const, fmap_max = fmap_max)\n self.D = Discriminator(image_size, network_capacity, fq_layers = fq_layers, fq_dict_size = fq_dict_size, attn_layers = attn_layers, transparent = transparent, fmap_max = fmap_max)\n\n self.SE = StyleVectorizer(latent_dim, style_depth, lr_mul = lr_mlp)\n self.GE = Generator(image_size, latent_dim, network_capacity, transparent = transparent, attn_layers = attn_layers, no_const = no_const)\n\n self.D_cl = None\n\n if cl_reg:\n from contrastive_learner import ContrastiveLearner\n # experimental contrastive loss discriminator regularization\n assert not transparent, 'contrastive loss regularization does not work with transparent images yet'\n self.D_cl = ContrastiveLearner(self.D, image_size, hidden_layer='flatten')\n\n # wrapper for augmenting all images going into the discriminator\n self.D_aug = AugWrapper(self.D, image_size)\n\n # turn off grad for exponential moving averages\n set_requires_grad(self.SE, False)\n set_requires_grad(self.GE, False)\n\n # init optimizers\n generator_params = list(self.G.parameters()) + list(self.S.parameters())\n self.G_opt = Adam(generator_params, lr = self.lr, betas=(0.5, 0.9))\n self.D_opt = Adam(self.D.parameters(), lr = self.lr * ttur_mult, betas=(0.5, 0.9))\n\n # init weights\n self._init_weights()\n self.reset_parameter_averaging()\n\n self.cuda(rank)\n\n # startup apex mixed precision\n self.fp16 = fp16\n if fp16:\n (self.S, self.G, self.D, self.SE, self.GE), (self.G_opt, self.D_opt) = amp.initialize([self.S, self.G, self.D, self.SE, self.GE], [self.G_opt, self.D_opt], opt_level='O1', num_losses=3)\n\n def _init_weights(self):\n for m in self.modules():\n if type(m) in {nn.Conv2d, nn.Linear}:\n nn.init.kaiming_normal_(m.weight, a=0, mode='fan_in', nonlinearity='leaky_relu')\n\n for block in self.G.blocks:\n nn.init.zeros_(block.to_noise1.weight)\n nn.init.zeros_(block.to_noise2.weight)\n nn.init.zeros_(block.to_noise1.bias)\n nn.init.zeros_(block.to_noise2.bias)\n\n def EMA(self):\n def update_moving_average(ma_model, current_model):\n for current_params, ma_params in zip(current_model.parameters(), ma_model.parameters()):\n old_weight, up_weight = ma_params.data, current_params.data\n ma_params.data = self.ema_updater.update_average(old_weight, up_weight)\n\n update_moving_average(self.SE, self.S)\n update_moving_average(self.GE, self.G)\n\n def reset_parameter_averaging(self):\n self.SE.load_state_dict(self.S.state_dict())\n self.GE.load_state_dict(self.G.state_dict())\n\n def forward(self, x):\n return x\n\nclass Trainer():\n def __init__(\n self,\n name = 'default',\n results_dir = 'results',\n models_dir = 'models',\n base_dir = './',\n image_size = 128,\n network_capacity = 16,\n fmap_max = 512,\n transparent = False,\n batch_size = 4,\n mixed_prob = 0.9,\n gradient_accumulate_every=1,\n lr = 2e-4,\n lr_mlp = 0.1,\n ttur_mult = 2,\n rel_disc_loss = False,\n num_workers = None,\n save_every = 1000,\n evaluate_every = 1000,\n num_image_tiles = 8,\n trunc_psi = 0.6,\n fp16 = False,\n cl_reg = False,\n no_pl_reg = False,\n fq_layers = [],\n fq_dict_size = 256,\n attn_layers = [],\n no_const = False,\n aug_prob = 0.,\n aug_types = ['translation', 'cutout'],\n top_k_training = False,\n generator_top_k_gamma = 0.99,\n generator_top_k_frac = 0.5,\n dataset_aug_prob = 0.,\n calculate_fid_every = None,\n calculate_fid_num_images = 12800,\n clear_fid_cache = False,\n is_ddp = False,\n rank = 0,\n world_size = 1,\n log = False,\n *args,\n **kwargs\n ):\n self.GAN_params = [args, kwargs]\n self.GAN = None\n\n self.name = name\n\n base_dir = Path(base_dir)\n self.base_dir = base_dir\n self.results_dir = base_dir / results_dir\n self.models_dir = base_dir / models_dir\n self.fid_dir = base_dir / 'fid' / name\n self.config_path = self.models_dir / name / '.config.json'\n\n assert log2(image_size).is_integer(), 'image size must be a power of 2 (64, 128, 256, 512, 1024)'\n self.image_size = image_size\n self.network_capacity = network_capacity\n self.fmap_max = fmap_max\n self.transparent = transparent\n\n self.fq_layers = cast_list(fq_layers)\n self.fq_dict_size = fq_dict_size\n self.has_fq = len(self.fq_layers) > 0\n\n self.attn_layers = cast_list(attn_layers)\n self.no_const = no_const\n\n self.aug_prob = aug_prob\n self.aug_types = aug_types\n\n self.lr = lr\n self.lr_mlp = lr_mlp\n self.ttur_mult = ttur_mult\n self.rel_disc_loss = rel_disc_loss\n self.batch_size = batch_size\n self.num_workers = num_workers\n self.mixed_prob = mixed_prob\n\n self.num_image_tiles = num_image_tiles\n self.evaluate_every = evaluate_every\n self.save_every = save_every\n self.steps = 0\n\n self.av = None\n self.trunc_psi = trunc_psi\n\n self.no_pl_reg = no_pl_reg\n self.pl_mean = None\n\n self.gradient_accumulate_every = gradient_accumulate_every\n\n assert not fp16 or fp16 and APEX_AVAILABLE, 'Apex is not available for you to use mixed precision training'\n self.fp16 = fp16\n\n self.cl_reg = cl_reg\n\n self.d_loss = 0\n self.g_loss = 0\n self.q_loss = None\n self.last_gp_loss = None\n self.last_cr_loss = None\n self.last_fid = None\n\n self.pl_length_ma = EMA(0.99)\n self.init_folders()\n\n self.loader = None\n self.dataset_aug_prob = dataset_aug_prob\n\n self.calculate_fid_every = calculate_fid_every\n self.calculate_fid_num_images = calculate_fid_num_images\n self.clear_fid_cache = clear_fid_cache\n\n self.top_k_training = top_k_training\n self.generator_top_k_gamma = generator_top_k_gamma\n self.generator_top_k_frac = generator_top_k_frac\n\n assert not (is_ddp and cl_reg), 'Contrastive loss regularization does not work well with multi GPUs yet'\n self.is_ddp = is_ddp\n self.is_main = rank == 0\n self.rank = rank\n self.world_size = world_size\n\n self.logger = aim.Session(experiment=name) if log else None\n\n @property\n def image_extension(self):\n return 'jpg' if not self.transparent else 'png'\n\n @property\n def checkpoint_num(self):\n return floor(self.steps // self.save_every)\n\n @property\n def hparams(self):\n return {'image_size': self.image_size, 'network_capacity': self.network_capacity}\n \n def init_GAN(self):\n args, kwargs = self.GAN_params\n self.GAN = StyleGAN2(lr = self.lr, lr_mlp = self.lr_mlp, ttur_mult = self.ttur_mult, image_size = self.image_size, network_capacity = self.network_capacity, fmap_max = self.fmap_max, transparent = self.transparent, fq_layers = self.fq_layers, fq_dict_size = self.fq_dict_size, attn_layers = self.attn_layers, fp16 = self.fp16, cl_reg = self.cl_reg, no_const = self.no_const, rank = self.rank, *args, **kwargs)\n\n if self.is_ddp:\n ddp_kwargs = {'device_ids': [self.rank]}\n self.S_ddp = DDP(self.GAN.S, **ddp_kwargs)\n self.G_ddp = DDP(self.GAN.G, **ddp_kwargs)\n self.D_ddp = DDP(self.GAN.D, **ddp_kwargs)\n self.D_aug_ddp = DDP(self.GAN.D_aug, **ddp_kwargs)\n\n if exists(self.logger):\n self.logger.set_params(self.hparams)\n\n def write_config(self):\n self.config_path.write_text(json.dumps(self.config()))\n\n def load_config(self):\n config = self.config() if not self.config_path.exists() else json.loads(self.config_path.read_text())\n self.image_size = config['image_size']\n self.network_capacity = config['network_capacity']\n self.transparent = config['transparent']\n self.fq_layers = config['fq_layers']\n self.fq_dict_size = config['fq_dict_size']\n self.fmap_max = config.pop('fmap_max', 512)\n self.attn_layers = config.pop('attn_layers', [])\n self.no_const = config.pop('no_const', False)\n self.lr_mlp = config.pop('lr_mlp', 0.1)\n del self.GAN\n self.init_GAN()\n\n def config(self):\n return {'image_size': self.image_size, 'network_capacity': self.network_capacity, 'lr_mlp': self.lr_mlp, 'transparent': self.transparent, 'fq_layers': self.fq_layers, 'fq_dict_size': self.fq_dict_size, 'attn_layers': self.attn_layers, 'no_const': self.no_const}\n\n def set_data_src(self, folder):\n self.dataset = NetworkDataset(folder, self.image_size, transparent = self.transparent, aug_prob = self.dataset_aug_prob)\n num_workers = num_workers = default(self.num_workers, NUM_CORES if not self.is_ddp else 0)\n sampler = DistributedSampler(self.dataset, rank=self.rank, num_replicas=self.world_size, shuffle=True) if self.is_ddp else None\n dataloader = data.DataLoader(self.dataset, num_workers = num_workers, batch_size = math.ceil(self.batch_size / self.world_size), sampler = sampler, shuffle = not self.is_ddp, drop_last = True, pin_memory = True)\n self.loader = cycle(dataloader)\n\n # auto set augmentation prob for user if dataset is detected to be low\n num_samples = len(self.dataset)\n if not exists(self.aug_prob) and num_samples < 1e5:\n self.aug_prob = min(0.5, (1e5 - num_samples) * 3e-6)\n print(f'autosetting augmentation probability to {round(self.aug_prob * 100)}%')\n\n def train(self):\n assert exists(self.loader), 'You must first initialize the data source with `.set_data_src(<folder of images>)`'\n\n if not exists(self.GAN):\n self.init_GAN()\n\n self.GAN.train()\n total_disc_loss = torch.tensor(0.).cuda(self.rank)\n total_gen_loss = torch.tensor(0.).cuda(self.rank)\n\n batch_size = math.ceil(self.batch_size / self.world_size)\n\n image_size = self.GAN.G.image_size\n latent_dim = self.GAN.G.latent_dim\n num_layers = self.GAN.G.num_layers\n\n aug_prob = self.aug_prob\n aug_types = self.aug_types\n aug_kwargs = {'prob': aug_prob, 'types': aug_types}\n\n apply_gradient_penalty = self.steps % 4 == 0\n apply_path_penalty = not self.no_pl_reg and self.steps > 5000 and self.steps % 32 == 0\n apply_cl_reg_to_generated = self.steps > 20000\n\n S = self.GAN.S if not self.is_ddp else self.S_ddp\n G = self.GAN.G if not self.is_ddp else self.G_ddp\n D = self.GAN.D if not self.is_ddp else self.D_ddp\n D_aug = self.GAN.D_aug if not self.is_ddp else self.D_aug_ddp\n\n backwards = partial(loss_backwards, self.fp16)\n\n if exists(self.GAN.D_cl):\n self.GAN.D_opt.zero_grad()\n\n if apply_cl_reg_to_generated:\n for i in range(self.gradient_accumulate_every):\n get_latents_fn = mixed_list if random() < self.mixed_prob else noise_list\n style = get_latents_fn(batch_size, num_layers, latent_dim, device=self.rank)\n noise = image_noise(batch_size, image_size, device=self.rank)\n\n w_space = latent_to_w(self.GAN.S, style)\n w_styles = styles_def_to_tensor(w_space)\n\n generated_images = self.GAN.G(w_styles, noise)\n self.GAN.D_cl(generated_images.clone().detach(), accumulate=True)\n\n for i in range(self.gradient_accumulate_every):\n image_batch = next(self.loader).cuda(self.rank)\n self.GAN.D_cl(image_batch, accumulate=True)\n\n loss = self.GAN.D_cl.calculate_loss()\n self.last_cr_loss = loss.clone().detach().item()\n backwards(loss, self.GAN.D_opt, loss_id = 0)\n\n self.GAN.D_opt.step()\n\n # train discriminator\n\n avg_pl_length = self.pl_mean\n self.GAN.D_opt.zero_grad()\n\n for i in gradient_accumulate_contexts(self.gradient_accumulate_every, self.is_ddp, ddps=[D_aug, S, G]):\n get_latents_fn = mixed_list if random() < self.mixed_prob else noise_list\n style = get_latents_fn(batch_size, num_layers, latent_dim, device=self.rank)\n noise = image_noise(batch_size, image_size, device=self.rank)\n\n w_space = latent_to_w(S, style)\n w_styles = styles_def_to_tensor(w_space)\n\n generated_images = G(w_styles, noise)\n fake_output, fake_q_loss = D_aug(generated_images.clone().detach(), detach = True, **aug_kwargs)\n\n image_batch = next(self.loader).cuda(self.rank)\n image_batch.requires_grad_()\n real_output, real_q_loss = D_aug(image_batch, **aug_kwargs)\n\n real_output_loss = real_output\n fake_output_loss = fake_output\n\n if self.rel_disc_loss:\n real_output_loss = real_output_loss - fake_output.mean()\n fake_output_loss = fake_output_loss - real_output.mean()\n\n divergence = (F.relu(1 + real_output_loss) + F.relu(1 - fake_output_loss)).mean()\n disc_loss = divergence\n\n if self.has_fq:\n quantize_loss = (fake_q_loss + real_q_loss).mean()\n self.q_loss = float(quantize_loss.detach().item())\n\n disc_loss = disc_loss + quantize_loss\n\n if apply_gradient_penalty:\n gp = gradient_penalty(image_batch, real_output)\n self.last_gp_loss = gp.clone().detach().item()\n self.track(self.last_gp_loss, 'GP')\n disc_loss = disc_loss + gp\n\n disc_loss = disc_loss / self.gradient_accumulate_every\n disc_loss.register_hook(raise_if_nan)\n backwards(disc_loss, self.GAN.D_opt, loss_id = 1)\n\n total_disc_loss += divergence.detach().item() / self.gradient_accumulate_every\n\n self.d_loss = float(total_disc_loss)\n self.track(self.d_loss, 'D')\n\n self.GAN.D_opt.step()\n\n # train generator\n\n self.GAN.G_opt.zero_grad()\n\n for i in gradient_accumulate_contexts(self.gradient_accumulate_every, self.is_ddp, ddps=[S, G, D_aug]):\n style = get_latents_fn(batch_size, num_layers, latent_dim, device=self.rank)\n noise = image_noise(batch_size, image_size, device=self.rank)\n\n w_space = latent_to_w(S, style)\n w_styles = styles_def_to_tensor(w_space)\n\n generated_images = G(w_styles, noise)\n fake_output, _ = D_aug(generated_images, **aug_kwargs)\n fake_output_loss = fake_output\n\n if self.top_k_training:\n epochs = (self.steps * batch_size * self.gradient_accumulate_every) / len(self.dataset)\n k_frac = max(self.generator_top_k_gamma ** epochs, self.generator_top_k_frac)\n k = math.ceil(batch_size * k_frac)\n\n if k != batch_size:\n fake_output_loss, _ = fake_output_loss.topk(k=k, largest=False)\n\n loss = fake_output_loss.mean()\n gen_loss = loss\n\n if apply_path_penalty:\n pl_lengths = calc_pl_lengths(w_styles, generated_images)\n avg_pl_length = np.mean(pl_lengths.detach().cpu().numpy())\n\n if not is_empty(self.pl_mean):\n pl_loss = ((pl_lengths - self.pl_mean) ** 2).mean()\n if not torch.isnan(pl_loss):\n gen_loss = gen_loss + pl_loss\n\n gen_loss = gen_loss / self.gradient_accumulate_every\n gen_loss.register_hook(raise_if_nan)\n backwards(gen_loss, self.GAN.G_opt, loss_id = 2)\n\n total_gen_loss += loss.detach().item() / self.gradient_accumulate_every\n\n self.g_loss = float(total_gen_loss)\n self.track(self.g_loss, 'G')\n\n self.GAN.G_opt.step()\n\n # calculate moving averages\n\n if apply_path_penalty and not np.isnan(avg_pl_length):\n self.pl_mean = self.pl_length_ma.update_average(self.pl_mean, avg_pl_length)\n self.track(self.pl_mean, 'PL')\n\n if self.is_main and self.steps % 10 == 0 and self.steps > 20000:\n self.GAN.EMA()\n\n if self.is_main and self.steps <= 25000 and self.steps % 1000 == 2:\n self.GAN.reset_parameter_averaging()\n\n # save from NaN errors\n\n if any(torch.isnan(l) for l in (total_gen_loss, total_disc_loss)):\n print(f'NaN detected for generator or discriminator. Loading from checkpoint #{self.checkpoint_num}')\n self.load(self.checkpoint_num)\n raise NanException\n\n # periodically save results\n\n if self.is_main:\n if self.steps % self.save_every == 0:\n self.save(self.checkpoint_num)\n\n if self.steps % self.evaluate_every == 0 or (self.steps % 100 == 0 and self.steps < 2500):\n self.evaluate(floor(self.steps / self.evaluate_every))\n\n if exists(self.calculate_fid_every) and self.steps % self.calculate_fid_every == 0 and self.steps != 0:\n num_batches = math.ceil(self.calculate_fid_num_images / self.batch_size)\n fid = self.calculate_fid(num_batches)\n self.last_fid = fid\n\n with open(str(self.results_dir / self.name / f'fid_scores.txt'), 'a') as f:\n f.write(f'{self.steps},{fid}\\n')\n\n self.steps += 1\n self.av = None\n\n @torch.no_grad()\n def evaluate(self, num = 0, trunc = 1.0):\n self.GAN.eval()\n ext = self.image_extension\n num_rows = self.num_image_tiles\n \n latent_dim = self.GAN.G.latent_dim\n image_size = self.GAN.G.image_size\n num_layers = self.GAN.G.num_layers\n\n # latents and noise\n\n latents = noise_list(num_rows ** 2, num_layers, latent_dim, device=self.rank)\n n = image_noise(num_rows ** 2, image_size, device=self.rank)\n\n # regular\n\n generated_images = self.generate_truncated(self.GAN.S, self.GAN.G, latents, n, trunc_psi = self.trunc_psi)\n torchvision.utils.save_image(generated_images, str(self.results_dir / self.name / f'{str(num)}.{ext}'), nrow=num_rows)\n \n # moving averages\n\n generated_images = self.generate_truncated(self.GAN.SE, self.GAN.GE, latents, n, trunc_psi = self.trunc_psi)\n torchvision.utils.save_image(generated_images, str(self.results_dir / self.name / f'{str(num)}-ema.{ext}'), nrow=num_rows)\n\n # mixing regularities\n\n def tile(a, dim, n_tile):\n init_dim = a.size(dim)\n repeat_idx = [1] * a.dim()\n repeat_idx[dim] = n_tile\n a = a.repeat(*(repeat_idx))\n order_index = torch.LongTensor(np.concatenate([init_dim * np.arange(n_tile) + i for i in range(init_dim)])).cuda(self.rank)\n return torch.index_select(a, dim, order_index)\n\n nn = noise(num_rows, latent_dim, device=self.rank)\n tmp1 = tile(nn, 0, num_rows)\n tmp2 = nn.repeat(num_rows, 1)\n\n tt = int(num_layers / 2)\n mixed_latents = [(tmp1, tt), (tmp2, num_layers - tt)]\n\n generated_images = self.generate_truncated(self.GAN.SE, self.GAN.GE, mixed_latents, n, trunc_psi = self.trunc_psi)\n torchvision.utils.save_image(generated_images, str(self.results_dir / self.name / f'{str(num)}-mr.{ext}'), nrow=num_rows)\n\n @torch.no_grad()\n def calculate_fid(self, num_batches):\n from pytorch_fid import fid_score\n torch.cuda.empty_cache()\n\n real_path = self.fid_dir / 'real'\n fake_path = self.fid_dir / 'fake'\n\n # remove any existing files used for fid calculation and recreate directories\n\n if not real_path.exists() or self.clear_fid_cache:\n rmtree(real_path, ignore_errors=True)\n os.makedirs(real_path)\n\n for batch_num in tqdm(range(num_batches), desc='calculating FID - saving reals'):\n real_batch = next(self.loader)\n for k, image in enumerate(real_batch.unbind(0)):\n filename = str(k + batch_num * self.batch_size)\n torchvision.utils.save_image(image, str(real_path / f'{filename}.png'))\n\n # generate a bunch of fake images in results / name / fid_fake\n\n rmtree(fake_path, ignore_errors=True)\n os.makedirs(fake_path)\n\n self.GAN.eval()\n ext = self.image_extension\n\n latent_dim = self.GAN.G.latent_dim\n image_size = self.GAN.G.image_size\n num_layers = self.GAN.G.num_layers\n\n for batch_num in tqdm(range(num_batches), desc='calculating FID - saving generated'):\n # latents and noise\n latents = noise_list(self.batch_size, num_layers, latent_dim, device=self.rank)\n noise = image_noise(self.batch_size, image_size, device=self.rank)\n\n # moving averages\n generated_images = self.generate_truncated(self.GAN.SE, self.GAN.GE, latents, noise, trunc_psi = self.trunc_psi)\n\n for j, image in enumerate(generated_images.unbind(0)):\n torchvision.utils.save_image(image, str(fake_path / f'{str(j + batch_num * self.batch_size)}-ema.{ext}'))\n\n return fid_score.calculate_fid_given_paths([str(real_path), str(fake_path)], 256, noise.device, 2048)\n\n @torch.no_grad()\n def truncate_style(self, tensor, trunc_psi = 0.75):\n S = self.GAN.S\n batch_size = self.batch_size\n latent_dim = self.GAN.G.latent_dim\n\n if not exists(self.av):\n z = noise(2000, latent_dim, device=self.rank)\n samples = evaluate_in_chunks(batch_size, S, z).cpu().numpy()\n self.av = np.mean(samples, axis = 0)\n self.av = np.expand_dims(self.av, axis = 0)\n\n av_torch = torch.from_numpy(self.av).cuda(self.rank)\n tensor = trunc_psi * (tensor - av_torch) + av_torch\n return tensor\n\n @torch.no_grad()\n def truncate_style_defs(self, w, trunc_psi = 0.75):\n w_space = []\n for tensor, num_layers in w:\n tensor = self.truncate_style(tensor, trunc_psi = trunc_psi) \n w_space.append((tensor, num_layers))\n return w_space\n\n @torch.no_grad()\n def generate_truncated(self, S, G, style, noi, trunc_psi = 0.75, num_image_tiles = 8):\n w = map(lambda t: (S(t[0]), t[1]), style)\n w_truncated = self.truncate_style_defs(w, trunc_psi = trunc_psi)\n w_styles = styles_def_to_tensor(w_truncated)\n generated_images = evaluate_in_chunks(self.batch_size, G, w_styles, noi)\n return generated_images.clamp_(0., 1.)\n\n @torch.no_grad()\n def generate_interpolation(self, num = 0, num_image_tiles = 8, trunc = 1.0, num_steps = 100, save_frames = False):\n self.GAN.eval()\n ext = self.image_extension\n num_rows = num_image_tiles\n\n latent_dim = self.GAN.G.latent_dim\n image_size = self.GAN.G.image_size\n num_layers = self.GAN.G.num_layers\n\n # latents and noise\n\n latents_low = noise(num_rows ** 2, latent_dim, device=self.rank)\n latents_high = noise(num_rows ** 2, latent_dim, device=self.rank)\n n = image_noise(num_rows ** 2, image_size, device=self.rank)\n\n ratios = torch.linspace(0., 8., num_steps)\n\n frames = []\n for ratio in tqdm(ratios):\n interp_latents = slerp(ratio, latents_low, latents_high)\n latents = [(interp_latents, num_layers)]\n generated_images = self.generate_truncated(self.GAN.SE, self.GAN.GE, latents, n, trunc_psi = self.trunc_psi)\n images_grid = torchvision.utils.make_grid(generated_images, nrow = num_rows)\n pil_image = transforms.ToPILImage()(images_grid.cpu())\n \n if self.transparent:\n background = Image.new(\"RGBA\", pil_image.size, (255, 255, 255))\n pil_image = Image.alpha_composite(background, pil_image)\n \n frames.append(pil_image)\n\n frames[0].save(str(self.results_dir / self.name / f'{str(num)}.gif'), save_all=True, append_images=frames[1:], duration=80, loop=0, optimize=True)\n\n if save_frames:\n folder_path = (self.results_dir / self.name / f'{str(num)}')\n folder_path.mkdir(parents=True, exist_ok=True)\n for ind, frame in enumerate(frames):\n frame.save(str(folder_path / f'{str(ind)}.{ext}'))\n\n def print_log(self):\n data = [\n ('G', self.g_loss),\n ('D', self.d_loss),\n ('GP', self.last_gp_loss),\n ('PL', self.pl_mean),\n ('CR', self.last_cr_loss),\n ('Q', self.q_loss),\n ('FID', self.last_fid)\n ]\n\n data = [d for d in data if exists(d[1])]\n log = ' | '.join(map(lambda n: f'{n[0]}: {n[1]:.2f}', data))\n print(log)\n\n def track(self, value, name):\n if not exists(self.logger):\n return\n self.logger.track(value, name = name)\n\n def model_name(self, num):\n return str(self.models_dir / self.name / f'model_{num}.pt')\n\n def init_folders(self):\n (self.results_dir / self.name).mkdir(parents=True, exist_ok=True)\n (self.models_dir / self.name).mkdir(parents=True, exist_ok=True)\n\n def clear(self):\n rmtree(str(self.models_dir / self.name), True)\n rmtree(str(self.results_dir / self.name), True)\n rmtree(str(self.fid_dir), True)\n rmtree(str(self.config_path), True)\n self.init_folders()\n\n def save(self, num):\n save_data = {\n 'GAN': self.GAN.state_dict(),\n 'version': __version__\n }\n\n if self.GAN.fp16:\n save_data['amp'] = amp.state_dict()\n\n torch.save(save_data, self.model_name(num))\n self.write_config()\n\n def load(self, num = -1):\n self.load_config()\n\n name = num\n if num == -1:\n file_paths = [p for p in Path(self.models_dir / self.name).glob('model_*.pt')]\n saved_nums = sorted(map(lambda x: int(x.stem.split('_')[1]), file_paths))\n if len(saved_nums) == 0:\n return\n name = saved_nums[-1]\n print(f'continuing from previous epoch - {name}')\n\n self.steps = name * self.save_every\n\n load_data = torch.load(self.model_name(name))\n\n if 'version' in load_data:\n print(f\"loading from version {load_data['version']}\")\n\n try:\n self.GAN.load_state_dict(load_data['GAN'])\n except Exception as e:\n print('unable to load save model. please try downgrading the package to the version specified by the saved model')\n raise e\n if self.GAN.fp16 and 'amp' in load_data:\n amp.load_state_dict(load_data['amp'])\n\nclass ModelLoader:\n def __init__(self, *, base_dir, name = 'default', load_from = -1):\n self.model = Trainer(name = name, base_dir = base_dir)\n self.model.load(load_from)\n\n def noise_to_styles(self, noise, trunc_psi = None):\n noise = noise.cuda()\n w = self.model.GAN.SE(noise)\n if exists(trunc_psi):\n w = self.model.truncate_style(w)\n return w\n\n def styles_to_images(self, w):\n batch_size, *_ = w.shape\n num_layers = self.model.GAN.GE.num_layers\n image_size = self.model.image_size\n w_def = [(w, num_layers)]\n\n w_tensors = styles_def_to_tensor(w_def)\n noise = image_noise(batch_size, image_size, device = 0)\n\n images = self.model.GAN.GE(w_tensors, noise)\n images.clamp_(0., 1.)\n return images\n" ]
[ [ "torch.cat", "torch.isnan", "numpy.mean", "torch.ones", "torch.cuda.is_available", "torch.flip", "torch.norm", "torch.FloatTensor", "torch.tensor", "numpy.arange", "torch.index_select", "torch.nn.functional.relu", "torch.nn.functional.conv2d", "torch.Tensor", "numpy.expand_dims", "torch.zeros", "torch.linspace", "torch.nn.parallel.DistributedDataParallel", "torch.nn.functional.linear", "torch.cuda.empty_cache", "torch.nn.functional.normalize", "torch.rand", "numpy.isnan", "torch.sin", "torch.no_grad", "torch.optim.Adam", "torch.from_numpy", "torch.utils.data.distributed.DistributedSampler", "torch.randn" ] ]
koldunovn/pyfesom2
[ "68a5473eb84301edb25bbb389093e3d3a9999ffc" ]
[ "pyfesom2/transport.py" ]
[ "\"\"\"\nModule for computing transports across sections from fesom2 output\nAuthor: Finn Heukamp ([email protected])\nInitial version: 23.11.2021\n\"\"\"\n\nimport warnings\nfrom os.path import isfile, isdir\nimport xarray as xr\nimport numpy as np\nimport shapely.geometry as sg\nimport pyproj\nimport pymap3d as pm\nfrom dask.diagnostics import ProgressBar\nfrom tqdm.notebook import tqdm\nfrom .load_mesh_data import load_mesh\nfrom .ut import vec_rotate_r2g, get_no_cyclic, cut_region\n\n\ndef _ProcessInputs(section, data_path, years, n_points):\n '''\n process_inputs.py\n\n Processes inputs and aborts if inputs are wrong\n\n Inputs\n ------\n section (list, str)\n either a list of the form [lon_start, lon_end, lat_start, lat_end] or a string for a preset section\n mesh_path (str)\n directory where the mesh files are stored\n data_path (str)\n directory where the data is stored\n years (np.ndarray)\n years to compute\n mesh_diag_path (str: optional, default=None)\n directory where the mesh_diag file is stored, if None it is assumed to be located in data_path\n n_points (int)\n number of waypoints between start and end of section\n\n Returns\n -------\n mesh (fesom.mesh object)\n fesom mesh\n mesh_diag (xr.dataset)\n fesom mesh diag\n section (dict)\n section dictionary containing additional information\n files (list)\n list of velocity files\n\n\n '''\n\n print('Starting computation...')\n\n # Check the input data types\n if not isinstance(section, list) | isinstance(section, str):\n raise ValueError(\n 'The section must be a list of form [lon_start, lon_end, lat_start, lat_end] or a string for a preset section (\"FS\", \"BSO\", \"BSX\", ...)')\n\n if isinstance(section, list) & (len(section) != 4):\n raise ValueError(\n 'The section must be a list of form [lon_start, lon_end, lat_start, lat_end]')\n\n if not isinstance(n_points, int):\n raise ValueError(\n 'n_points must be an integer!'\n )\n\n # Check for existance of the files\n files_u = [data_path + 'u.fesom.' + str(year) + '.nc' for year in years]\n files_v = [data_path + 'v.fesom.' + str(year) + '.nc' for year in years]\n\n files = files_u + files_v\n\n file_check = []\n for file in files:\n file_check.append(isfile(file))\n\n if not all(file_check):\n raise FileExistsError('One or more of the velocity files do not exist!')\n\n return files\n\ndef _CreateLoadSection(section):\n '''\n Load the section parameters from present or create from custom section_name\n\n Inputs\n ------\n section (list, str)\n either a list of the form [lon_start, lon_end, lat_start, lat_end] or a string for a preset section\n\n Returns\n -------\n section (dict)\n section dictionary\n '''\n\n # Create the section dictionary from preset\n if isinstance(section, str):\n section_name = section\n\n presets = [\"BSO\", \"BSX\", \"ST_ANNA_TROUGH\", \"FRAMSTRAIT\", \"FRAMSTRAIT_FULL\",\n \"BSO_FULL\", \"BS_40E\"]\n\n if not section_name in presets:\n raise ValueError('The chosen preset section does not exist! Choose from:' + str(presets)\n + ' or add your own preset to _CreateLoadSection.py in pyfesom.transport.py')\n else:\n if section_name == 'BSO':\n section = {'lon_start': 19.999,\n 'lon_end': 19.999,\n 'lat_start': 74.5,\n 'lat_end': 70.08,\n }\n if section_name == 'BSO_FULL':\n section = {'lon_start': 19.999,\n 'lon_end': 19.999,\n 'lat_start': 78.8,\n 'lat_end': 70.08,\n }\n\n elif section_name == 'BSX':\n section = {'lon_start': 64,\n 'lon_end': 64,\n 'lat_start': 76,\n 'lat_end': 80.66,\n }\n\n elif section_name == 'FRAMSTRAIT_FULL':\n section = {'lon_start': -18.3,#-6,\n 'lon_end': 10.6,\n 'lat_start': 78.8,\n 'lat_end': 78.8,\n }\n\n elif section_name == 'FRAMSTRAIT':\n section = {'lon_start': -6,\n 'lon_end': 10.6,\n 'lat_start': 78.8,\n 'lat_end': 78.8,\n }\n\n elif section_name == 'ST_ANNA_TROUGH':\n section = {'lon_start': 60,\n 'lon_end': 80,\n 'lat_start': 80,\n 'lat_end': 80,\n }\n\n elif section_name == 'BS_40E':\n section = {'lon_start': 40,\n 'lon_end': 40,\n 'lat_start': 68,\n 'lat_end': 80,\n }\n\n # add more presets here\n\n section['name'] = section_name\n\n # create custom section dict\n elif isinstance(section, list):\n section = {'lon_start': section[0],\n 'lon_end': section[1],\n 'lat_start': section[2],\n 'lat_end': section[3],\n }\n section['name'] = 'not specified'\n\n # Find the orientation of the section and look for the nesseccary velocity files\n if section['lon_start'] == section['lon_end']:\n section['orientation'] = 'meridional'\n\n elif (section['lat_start'] == section['lat_end']) :\n section['orientation'] = 'zonal'\n\n else:\n section['orientation'] = 'other'\n raise ValueError('Only zonal or meridional are currently supported!')\n\n print('\\nYour section: ', section['name'], ': Start: ', section['lon_start'], '°E ', section['lat_start'], '°N ', 'End: ', section['lon_end'], '°E ', section['lat_end'], '°N')\n\n return section\n\ndef _ComputeWaypoints(section, mesh, use_great_circle, n_points):\n '''\n compute_waypoints.py\n\n Computes the waypoints between the section start and end either along a great circle or linear\n\n Inputs\n ------\n section (dict)\n section dictionary containing the section start, end and orientation data\n mesh (fesom.mesh object)\n fesom.mesh\n use_great_circle (bool)\n True or False\n\n\n Returns\n -------\n section waypoints ()\n waypoints along the section\n mesh (fesom.mesh object)\n fesom mesh\n section (dict)\n dictionary containing section information\n '''\n\n if use_great_circle:\n # Compute the great circle coordinates along the section\n g = pyproj.Geod(ellps='WGS84')\n\n section_waypoints = g.npts(section['lon_start'],\n section['lat_start'],\n section['lon_end'],\n section['lat_end'],\n n_points\n )\n # bring into the desired shape [[],...,[]]\n section_waypoints = [[section_waypoints[i][0], section_waypoints[i][1]]\n for i in range(len(section_waypoints))]\n\n else:\n # Compute the 'linear' connection between the section start and end\n section_lon = np.linspace(section['lon_start'],\n section['lon_end'],\n n_points\n )\n\n section_lat = np.linspace(section['lat_start'],\n section['lat_end'],\n n_points\n )\n\n # Bring the section coordinates into the disired shape [[],...,[]]\n section_waypoints = [[section_lon[i], section_lat[i]] for i in range(len(section_lat))]\n\n return section_waypoints, mesh, section\n\ndef _ReduceMeshElementNumber(section_waypoints, mesh, section, add_extent):\n '''\n reduce_element_number.py\n\n Reduces the number of elements that are loaded into memory to those nearby the section\n\n Inputs\n ------\n section_waypoints (list)\n list with all section waypoints [[lon,lat], ... ,[lon,lat]]\n mesh (fesom.mesh object)\n fesom.mesh\n section (dict)\n section dictionary\n add_extent (int, float)\n extent in degree that is added to the box cutout to contain all elements needed, choose small in case of high resolution meshes\n\n Returns\n -------\n elem_box_nods (list)\n list of indices that define the three nods of each element that belongs to the box\n elem_box_indices (list)\n list of indices where no_nan_triangles == True (to select the right elements when loading the data)\n '''\n\n # write section longitude and latitude in separate lists\n section_waypoints_lon = [section_waypoints[i][0] for i in range(len(section_waypoints))]\n section_waypoints_lat = [section_waypoints[i][1] for i in range(len(section_waypoints))]\n\n if add_extent < 1:\n warnings.warn(\n 'The extend added to the box is small, this might lead to errors when using low resolution meshes')\n\n # find the maximum and minumum zonal/ meridional extent of the section\n box_mesh = [min(section_waypoints_lon) - add_extent,\n max(section_waypoints_lon) + add_extent,\n min(section_waypoints_lat) - add_extent,\n max(section_waypoints_lat) + add_extent\n ]\n\n # find the elements that are within the extent\n elem_no_nan, no_nan_triangles = cut_region(mesh, box_mesh)\n no_cyclic_elem2 = get_no_cyclic(mesh, elem_no_nan)\n elem_box_nods = elem_no_nan[no_cyclic_elem2]\n\n # create an array containing the indices of the elements that belong to the region\n elem_box_indices = np.arange(mesh.e2d)[no_nan_triangles]\n\n # Compute the distance of each section coodinate to the center of each element to further reduce the amount of polygons needed\n # in case of meridional or zonal section the chosen box is already small enough to be loaded and no further elements have to be removed\n # in all other cases the rectangular box gets to large and needs further shrinking\n if section['orientation'] == 'other':\n min_dist = add_extent * 100 # minimum distance in km to take element into account\n distance_bool = list()\n\n # compute the center of each element\n element_center_lon = np.mean(mesh.x2[elem_box_nods], axis=1)\n element_center_lat = np.mean(mesh.y2[elem_box_nods], axis=1)\n\n for ii in range(len(element_center_lat)):\n lon_temp = np.repeat(element_center_lon[ii], len(section_waypoints_lon))\n lat_temp = np.repeat(element_center_lat[ii], len(section_waypoints_lat))\n\n distances = _Haversine(lon_temp,\n lat_temp,\n section_waypoints_lon,\n section_waypoints_lat,\n False\n )\n\n if any(distances <= min_dist):\n distance_bool.append(True)\n else:\n distance_bool.append(False)\n\n # remove the elements that are to far away from the section\n elem_box_nods = elem_box_nods[distance_bool]\n elem_box_indices = elem_box_indices[distance_bool]\n\n return elem_box_nods, elem_box_indices\n\ndef _LinePolygonIntersections(mesh, section_waypoints, elem_box_nods, elem_box_indices):\n '''\n line_polygon_intersections.py\n\n Creates shapely polygon and line elements for the section and the mesh elements and computes the intersection coordinates\n\n Inputs\n ------\n mesh (fesom.mesh object)\n mesh object\n section_waypoints (list)\n list containing the waypoints\n elem_box_nods (list)\n list of indices that defines the three nods of each element that belongs to the box\n elem_box_indices (list)\n list of indices where no_nan_triangles == True (to select the right elements when loading the data)\n\n Returns\n -------\n elem_box_nods (list)\n list of indices that define the three nods of each element that belongs to the box\n elem_box_indices (list)\n list of indices where no_nan_triangles == True (to select the right elements when loading the data)\n cell_intersections (list)\n list with all intersections between the line element and the polygons\n line_section (shapely.line)\n shapely line element that represents the section\n '''\n # CREATE SHAPELY LINE AND POLYGON ELEMENTS\n line_section = sg.LineString(section_waypoints)\n\n polygon_list = list()\n\n print('\\nConverting grid cells to Polygons... (If this takes very long try to reduce the add_extent parameter)')\n for ii in tqdm(range(elem_box_nods.shape[0])):\n polygon_list.append(\n sg.Polygon(\n [\n (mesh.x2[elem_box_nods][ii, 0], mesh.y2[elem_box_nods][ii, 0]),\n (mesh.x2[elem_box_nods][ii, 1], mesh.y2[elem_box_nods][ii, 1]),\n (mesh.x2[elem_box_nods][ii, 2], mesh.y2[elem_box_nods][ii, 2]),\n ]\n )\n )\n\n ###################\n # Returns\n # line_section: shapely line element that contains the section coordinates\n # polygon_list: list of shapely polygons that contains all nearby elements\n ###################\n\n # COMPUTE THE INTERSECTION COORDINATES OF THE POLYGONS AND THE LINE ELEMENT\n intersection_bool = list()\n intersection_coords = list()\n intersection_points = list()\n\n # check for intersections\n print('Looking for intersected grid cells...')\n for ii in tqdm(range(len(polygon_list))):\n intersection = polygon_list[ii].intersection(line_section).coords\n\n # if no intersections (coords == [])\n if not intersection:\n intersection_bool.append(False) # fill boolean array with False (no intersects)\n\n # if exist intersections (coords != [] )\n else:\n intersection_bool.append(True) # fill boolean array with True (intersects exists)\n # fill the intersection coordinates list with the shapely intersection coordinates object\n intersection_coords.append(intersection)\n\n # remove all intersections that are not at the edge of the elements but inside (only first and last intersection coordinates are considered)\n cell_intersections = list()\n\n for intersection in intersection_coords:\n cell_intersections.append([[list(intersection)[0]], [list(intersection)[-1]]])\n\n # remove indices of elements that are not intersected\n elem_box_nods = elem_box_nods[intersection_bool]\n elem_box_indices = elem_box_indices[intersection_bool]\n\n return elem_box_nods, elem_box_indices, cell_intersections, line_section\n\ndef _FindIntersectedEdges(mesh, elem_box_nods, elem_box_indices, line_section, cell_intersections):\n '''\n Find the two intersected edges of each mesh element along the section (2 out of three). In case the start/ end point is in the ocean only one edge is\n intersected. In this case the associated mesh element is dropped.\n\n Inputs\n ------\n mesh (fesom.mesh object)\n mesh object\n elem_box_nods (list)\n list of indices that defines the three nods of each element that belongs to the box\n elem_box_indices (list)\n list of indices where no_nan_triangles == True (to select the right elements when loading the data)\n cell_intersections (list)\n list with all intersections between the line element and the polygons\n line_section (shapely.line)\n shapely line element that represents the section\n\n Returns\n -------\n intersected_edge (np.ndarray)\n boolean array, True if edge of element is intersected, False otherwise\n midpoints_edge (np.ndarray)\n centers of the three edges asociated to each single mesh element\n elem_centers (np.ndarray)\n cener of the mesh element\n elem_box_nods (list)\n list of indices that defines the three nods of each element that belongs to the box\n elem_box_indices (list)\n list of indices where no_nan_triangles == True (to select the right elements when loading the data)\n cell_intersections (list)\n list with all intersections between the line element and the polygons\n\n '''\n # array with the lons and lats of the three nods forming one element\n lon_elems = mesh.x2[elem_box_nods]\n lat_elems = mesh.y2[elem_box_nods]\n\n # array with the centers of the cells\n lon_centers, lat_centers = np.mean(mesh.x2[elem_box_nods], axis=1), np.mean(mesh.y2[elem_box_nods], axis=1)\n elem_centers = np.array([[lon_centers[i], lat_centers[i]] for i in range(len(lon_centers))])\n\n # Find the element edges that are intersected (2 of 3 regular, 1 of 3 with land)\n intersected_edge = np.ones((len(elem_centers),3), dtype=bool)\n midpoints_edge = np.zeros((len(elem_centers),3,2)) # elem, edge, (lon,lat)\n\n # iterate over all intersected elements\n for ii in range(len(elem_centers)):\n # extract the coordinates of the nods forming one element\n lon1, lon2, lon3 = lon_elems[ii][0], lon_elems[ii][1], lon_elems[ii][2]\n lat1, lat2, lat3 = lat_elems[ii][0], lat_elems[ii][1], lat_elems[ii][2]\n\n # compute the midpoints of the element edge\n midpoints_edge[ii,0,0] = (lon1+lon2)/2\n midpoints_edge[ii,1,0] = (lon2+lon3)/2\n midpoints_edge[ii,2,0] = (lon3+lon1)/2\n\n midpoints_edge[ii,0,1] = (lat1+lat2)/2\n midpoints_edge[ii,1,1] = (lat2+lat3)/2\n midpoints_edge[ii,2,1] = (lat3+lat1)/2\n\n # create shapely line elements for each of the element edges\n line12 = sg.LineString([[lon1,lat1], [lon2,lat2]])\n line23 = sg.LineString([[lon2,lat2], [lon3,lat3]])\n line31 = sg.LineString([[lon3,lat3], [lon1,lat1]])\n\n # find the element edges that intersect with the section\n if not list(line12.intersection(line_section).coords):\n intersected_edge[ii,0] = False\n if not list(line23.intersection(line_section).coords):\n intersected_edge[ii,1] = False\n if not list(line31.intersection(line_section).coords):\n intersected_edge[ii,2] = False\n\n # when there is only one edge of the element hit then set all intersections to False and drop it later\n if sum(intersected_edge[ii,:]) == 1:\n intersected_edge[ii,:] = False\n\n zeros_in_intersected_edge = np.where(intersected_edge.sum(axis=1) == 0)[0]\n #if len(zeros_in_intersected_edge) == 2:\n # print('The section starts and ends in the ocean. Those elements that contain the start and end coordinate of the section are droped.')\n #elif len(zeros_in_intersected_edge) == 1:\n # print('The section is land-ocean/ ocean-land. Those elements that contain the start and end coordinate of the section are droped.')\n #elif len(zeros_in_intersected_edge) == 0:\n # print('The section is land to land')\n if len(zeros_in_intersected_edge) > 2:\n raise ValueError('Your section contains to many cell edges that were intersected only once. Only 0, 1 or 2 are allowed.')\n\n # Now drop those elements in the arrays\n elem_box_nods = np.delete(elem_box_nods, zeros_in_intersected_edge, axis=0)\n elem_box_indices = np.delete(elem_box_indices, zeros_in_intersected_edge)\n midpoints_edge = np.delete(midpoints_edge, zeros_in_intersected_edge, axis=0)\n elem_centers = np.delete(elem_centers, zeros_in_intersected_edge, axis=0)\n intersected_edge = np.delete(intersected_edge, zeros_in_intersected_edge, axis=0)\n cell_intersections = np.delete(np.array(cell_intersections).squeeze(), zeros_in_intersected_edge, axis=0)\n\n return intersected_edge, midpoints_edge, elem_centers, elem_box_indices, elem_box_nods, cell_intersections\n\ndef _BringIntoAlongPathOrder(midpoints_edge, intersected_edge, elem_centers, section):\n '''\n Brings the mesh elements and segment vectors into an along-section order (eastwards/ northwards).\n\n Inputs\n ------\n intersected_edge (np.ndarray)\n boolean array, True if edge of element is intersected, False otherwise\n midpoints_edge (np.ndarray)\n centers of the three edges asociated to each single mesh element\n elem_centers (np.ndarray)\n cener of the mesh element\n section (dict)\n section dictionary\n\n Returns\n -------\n c_lon (list)\n center longitude of mesh element\n c_lat (list)\n center latitude of mesh element\n f_lon (list)\n first edge midpoint latitude of the element\n f_lat (list)\n first edge midpoint longitude of the element\n s_lon (list)\n second edge midpoint latitude of the element\n s_lat (list)\n second edge midpoint longitude of the element\n elem_order (list)\n indices of the ascending elements\n\n\n '''\n #### FIND THE FIRST POINT OF THE SECTION\n\n if section['orientation'] == 'zonal':\n # find the westernnmost intersected edge midpoint\n start_ind = np.argmin(midpoints_edge[intersected_edge,0])\n start_value = midpoints_edge[intersected_edge,0][start_ind]\n\n # create list for already used elements\n first_element = list()\n\n for ii in range(midpoints_edge.shape[0]):\n\n # for each single midpoint tuple, check if the longitude is the same (then this is the first element)\n if start_value in midpoints_edge[ii,intersected_edge[ii,:],0]:\n first_element.append(ii)\n #print(first_element)\n\n #if len(first_element) > 1:\n #raise ValueError('Something is wrong here...')\n\n # now look which of the two intersected midpoints of the first element is intersected first\n ind_first = np.where(midpoints_edge[first_element[0],intersected_edge[first_element[0],:],0] == start_value)[0]\n\n # write the coordinates in the right order into lists (first_value, centeroid, second_value, (lon,lat)) for each element\n f_lon, f_lat, s_lon, s_lat, c_lon, c_lat, elem_order = list(), list(), list(), list(), list(), list(), list()\n\n c_lon.append(elem_centers[first_element[0],0])\n c_lat.append(elem_centers[first_element[0],1])\n f_lon.append(midpoints_edge[first_element[0], intersected_edge[first_element[0],:], 0][ind_first][0])\n f_lat.append(midpoints_edge[first_element[0], intersected_edge[first_element[0],:], 1][ind_first][0])\n s_lon.append(midpoints_edge[first_element[0], intersected_edge[first_element[0],:], 0][ind_first-1][0])# if ind_first =0 --> -1 which is the same index as 1\n s_lat.append(midpoints_edge[first_element[0], intersected_edge[first_element[0],:], 1][ind_first-1][0])\n\n elem_order.append(first_element[0])\n\n\n ###### Bring all the elements into the right order\n\n for jj in range(elem_centers.shape[0]-1):\n # Now we repeat this procedure for the second value of the previous element\n matching_element = list()\n for ii in range(midpoints_edge.shape[0]):\n # for each single midpoint tuple, check if the longitude is the same (then this is the next element)\n if s_lon[-1] in midpoints_edge[ii,intersected_edge[ii,:],0]:\n matching_element.append(ii)\n #print(jj, matching_element)\n\n # apply some tests, the matching element has to have len() == 2 and the previous element must also be contained\n if (len(matching_element) != 2) | (elem_order[-1] not in matching_element):\n raise ValueError('Either your section hit an island or your add_extent parameter was chosen too small! ' +\n 'Increase the add_extent parameter as it might be too small for your mesh resolution! ' +\n 'Otherwise, the last working gridcell was at: ' +\n str(c_lon[-1]) + '°E, ' + str(c_lat[-1]) + '°N. ' +\n 'Please use this coordinate tuple as the new start or end of the section! '\n )\n\n # find the matching element that's not the previous one, this is the next one\n if elem_order[-1] == matching_element[0]:\n ind = 1\n else:\n ind = 0\n\n # now look which of the two intersected midpoints of the element is the same as the last second value\n ind_first = np.where(midpoints_edge[matching_element[ind],intersected_edge[matching_element[ind],:],0] == s_lon[-1])[0]\n\n # append to list in right order\n c_lon.append(elem_centers[matching_element[ind],0])\n c_lat.append(elem_centers[matching_element[ind],1])\n f_lon.append(midpoints_edge[matching_element[ind], intersected_edge[matching_element[ind],:], 0][ind_first][0])\n f_lat.append(midpoints_edge[matching_element[ind], intersected_edge[matching_element[ind],:], 1][ind_first][0])\n s_lon.append(midpoints_edge[matching_element[ind], intersected_edge[matching_element[ind],:], 0][ind_first-1][0])# if ind_first =0 --> -1 which is the same index as 1\n s_lat.append(midpoints_edge[matching_element[ind], intersected_edge[matching_element[ind],:], 1][ind_first-1][0])\n\n elem_order.append(matching_element[ind])\n\n elif section['orientation'] == 'meridional':\n\n # find the southernmost intersected edge midpoint\n start_ind = np.argmin(midpoints_edge[intersected_edge,1])\n start_value = midpoints_edge[intersected_edge,1][start_ind]\n #print(start_ind, start_value)\n\n # create list for already used elements\n first_element = list()\n\n for ii in range(midpoints_edge.shape[0]):\n\n # for each single midpoint tuple, check if the latitude is the same (then this is the first element)\n if start_value in midpoints_edge[ii,intersected_edge[ii,:],1]:\n first_element.append(ii)\n #print(first_element)\n\n #if len(first_element) > 1:\n #raise ValueError('Something is wrong here...')\n\n # now look which of the two intersected midpoints of the first element is intersected first\n ind_first = np.where(midpoints_edge[first_element[0],intersected_edge[first_element[0],:],1] == start_value)[0]\n\n # write the coordinates in the right order into lists (first_value, centeroid, second_value, (lon,lat)) for each element\n f_lon, f_lat, s_lon, s_lat, c_lon, c_lat, elem_order = list(), list(), list(), list(), list(), list(), list()\n\n c_lon.append(elem_centers[first_element[0],0])\n c_lat.append(elem_centers[first_element[0],1])\n f_lon.append(midpoints_edge[first_element[0], intersected_edge[first_element[0],:], 0][ind_first][0])\n f_lat.append(midpoints_edge[first_element[0], intersected_edge[first_element[0],:], 1][ind_first][0])\n s_lon.append(midpoints_edge[first_element[0], intersected_edge[first_element[0],:], 0][ind_first-1][0])# if ind_first =0 --> -1 which is the same index as 1\n s_lat.append(midpoints_edge[first_element[0], intersected_edge[first_element[0],:], 1][ind_first-1][0])\n\n elem_order.append(first_element[0])\n\n\n ###### Bring all the elements into the right order\n\n for jj in range(elem_centers.shape[0]-1):\n # Now we repeat this procedure for the second value of the previous element\n matching_element = list()\n for ii in range(midpoints_edge.shape[0]):\n # for each single midpoint tuple, check if the longitude is the same (then this is the next element)\n if s_lat[-1] in midpoints_edge[ii,intersected_edge[ii,:],1]:\n matching_element.append(ii)\n #print(jj, matching_element)\n\n # apply some tests, the matching element has to have len() == 2 and the previous element must also be contained\n if (len(matching_element) != 2) | (elem_order[-1] not in matching_element):\n raise ValueError('Either your section hit an island or your add_extent parameter was chosen too small! ' +\n 'Increase the add_extent parameter as it might be too small for your mesh resolution! ' +\n 'Otherwise, the last working gridcell was at: ' +\n str(c_lon[-1]) + '°E, ' + str(c_lat[-1]) + '°N. ' +\n 'Please use this coordinate tuple as the new start or end of the section! '\n )\n\n # find the matching element that's not the previous one, this is the next one\n if elem_order[-1] == matching_element[0]:\n ind = 1\n else:\n ind = 0\n\n # now look which of the two intersected midpoints of the element is the same as the last second value\n ind_first = np.where(midpoints_edge[matching_element[ind],intersected_edge[matching_element[ind],:],0] == s_lon[-1])[0]\n\n # append to list in right order\n c_lon.append(elem_centers[matching_element[ind],0])\n c_lat.append(elem_centers[matching_element[ind],1])\n f_lon.append(midpoints_edge[matching_element[ind], intersected_edge[matching_element[ind],:], 0][ind_first][0])\n f_lat.append(midpoints_edge[matching_element[ind], intersected_edge[matching_element[ind],:], 1][ind_first][0])\n s_lon.append(midpoints_edge[matching_element[ind], intersected_edge[matching_element[ind],:], 0][ind_first-1][0])# if ind_first =0 --> -1 which is the same index as 1\n s_lat.append(midpoints_edge[matching_element[ind], intersected_edge[matching_element[ind],:], 1][ind_first-1][0])\n\n elem_order.append(matching_element[ind])\n\n #check if the no element appears twice\n for i in elem_order:\n if elem_order.count(i) > 1:\n raise ValueError('An element appeared twice while sorting...' + str(i))\n if len(elem_order) != elem_centers.shape[0]:\n raise ValueError('Wrong number of elements while sorting along path...')\n\n # Add the segments to the section dictionary\n section['f_lon'] = f_lon\n section['c_lon'] = c_lon\n section['s_lon'] = s_lon\n section['f_lat'] = f_lat\n section['c_lat'] = c_lat\n section['s_lat'] = s_lat\n\n\n return c_lon, c_lat, f_lon, f_lat, s_lon, s_lat, elem_order\n\ndef _ComputeBrokenLineSegments(f_lat, f_lon, s_lat, s_lon, c_lat, c_lon, section):\n '''\n Compute the two broken line segments that connect the intersected edge midpoints to the center of the mesh element\n in local cartesian coordinates. Afterwards compute the effective length of the two segments in x and y direction.\n\n Inputs\n ------\n c_lon (list)\n center longitude of mesh element\n c_lat (list)\n center latitude of mesh element\n f_lon (list)\n first edge midpoint latitude of the element\n f_lat (list)\n first edge midpoint longitude of the element\n s_lon (list)\n second edge midpoint latitude of the element\n s_lat (list)\n second edge midpoint longitude of the element\n section (dict)\n section dictionary\n\n Returns\n -------\n effective_dx (np.ndarray)\n the effective length of the two segment elements in x direction to compute transport with v\n effective_dy (np.ndarray)\n the effective length of the two segment elements in y direction to compute transport with u\n '''\n\n # create an array for the segment vectors (2 for each element) with the shape (elem, (dlon,dlat))\n first_segment_vector = np.ones((len(f_lon), 2))\n second_segment_vector = np.ones_like(first_segment_vector)\n for ii in range(len(f_lon)):\n\n # FIRST VECTOR OF THE ELEMENT\n # switch to a local cartesian coordinate system (centered at the element midpoint) and compute the vector connecting\n # the center of the intersected edge with the center of the element (always pointing outwards from the center of the element)\n dx, dy, dz = pm.geodetic2enu(lat0=c_lat[ii],\n lon0=c_lon[ii],\n h0=0,\n lat=f_lat[ii],\n lon=f_lon[ii],\n h=0,\n ell=pm.utils.Ellipsoid('wgs84')\n )\n\n first_segment_vector[ii,0], first_segment_vector[ii,1] = -dx, -dy # turn the vector to point towards the center, in the direction of the section\n\n # SECOND VECTOR OF THE ELEMENT\n dx, dy, dz = pm.geodetic2enu(lat0=c_lat[ii],\n lon0=c_lon[ii],\n h0=0,\n lat=s_lat[ii],\n lon=s_lon[ii],\n h=0,\n ell=pm.utils.Ellipsoid('wgs84')\n )\n\n second_segment_vector[ii,0], second_segment_vector[ii,1] = dx, dy\n\n # define the sign of the segment length\n if section['orientation'] == 'zonal':\n\n effective_dx = first_segment_vector[:,0] + second_segment_vector[:,0]\n effective_dy = -first_segment_vector[:,1] - second_segment_vector[:,1]\n\n\n if section['orientation'] == 'meridional':\n\n effective_dx = -first_segment_vector[:,0] - second_segment_vector[:,0]\n effective_dy = first_segment_vector[:,1] + second_segment_vector[:,1]\n\n\n return effective_dx, effective_dy\n\ndef _CreateVerticalGrid(effective_dx, effective_dy, mesh_diag):\n '''\n Creates the vertical grid to compute transports through the section\n\n Inputs\n ------\n effective_dx (np.ndarray)\n the effective length of the two segment elements in x direction to compute transport with v\n effective_dy (np.ndarray)\n the effective length of the two segment elements in y direction to compute transport with u\n\n Returns\n -------\n vertical_cell_area_dx (np.ndarray)\n the cell area for each mesh element to be multiplied by the meridional velocity\n vertical_cell_area_dy (np.ndarray)\n the cell area for each mesh element to be multiplied by the zonal velocity\n\n '''\n # take the layer thickness\n # old mesh_diag: zbar, new mesh_diag: nz\n try:\n layer_thickness = np.abs(np.diff(mesh_diag.zbar))\n except:\n layer_thickness = np.abs(np.diff(mesh_diag.nz))\n\n # compute the vertical area for dx and dy\n vertical_cell_area_dx = layer_thickness[:,np.newaxis] * effective_dx[np.newaxis,:]\n vertical_cell_area_dy = layer_thickness[:,np.newaxis] * effective_dy[np.newaxis,:]\n\n return vertical_cell_area_dx, vertical_cell_area_dy\n\ndef _AddMetaData(ds, elem_box_indices, elem_box_nods, effective_dx, effective_dy, vertical_cell_area_dx, vertical_cell_area_dy, c_lon, c_lat):\n '''\n Add some meta-data to the dataset.\n '''\n\n # ADD SOME FURTHER VARIABLES\n ds.assign_coords({'triple': (\"triple\", [1, 2, 3])})\n\n # elem_indices\n ds['elem_indices'] = (('elem'), elem_box_indices)\n ds.elem_indices.attrs['description'] = 'indices of the elements that belong to the section relative to the global data field'\n\n # elem_nods\n ds['elem_nods'] = (('elem', 'triple'), elem_box_nods)\n ds.elem_nods.attrs['description'] = 'indices of the 3 nods that represent the elements that belong to the section relative to the global data field'\n\n # horizontal_distances\n ds['zonal_distance'] = (('elem'), effective_dx)\n ds.zonal_distance.attrs['description'] = 'width of the two broken lines in each element in west-east direction'\n ds.zonal_distance.attrs['units'] = 'm'\n ds['meridional_distance'] = (('elem'), effective_dy)\n ds.meridional_distance.attrs['description'] = 'width of the two broken lines in each element in south-east direction'\n ds.meridional_distance.attrs['units'] = 'm'\n\n # vertical_cell_area\n ds['vertical_cell_area_dx'] = (('elem', 'nz1'), np.transpose(vertical_cell_area_dx))\n ds.vertical_cell_area_dx.attrs['description'] = 'cell area of the single intersected elements in east-west direction'\n ds.vertical_cell_area_dx.attrs['units'] = 'm^2'\n\n ds['vertical_cell_area_dy'] = (('elem', 'nz1'), np.transpose(vertical_cell_area_dy))\n ds.vertical_cell_area_dy.attrs['description'] = 'cell area of the single intersected elements in south-north direction'\n ds.vertical_cell_area_dy.attrs['units'] = 'm^2'\n\n # lon lat\n ds['lon_center'] = (('elem'), c_lon)\n ds.lon_center.attrs['description'] = 'longitude of the element centers'\n ds.lon_center.attrs['units'] = '°E'\n\n ds['lat_center'] = (('elem'), c_lat)\n ds.lat_center.attrs['description'] = 'latitude of the element centers'\n ds.lat_center.attrs['units'] = '°E'\n\n return ds\n\ndef _UnrotateLoadVelocity(how, files, elem_box_indices, elem_box_nods, vertical_cell_area_dx, vertical_cell_area_dy, c_lon, c_lat, effective_dx, effective_dy, elem_order, chunks, mesh, abg):\n '''\n Load and unrotate the fesom velocity files. Additionally bring the mesh elements into the right order (according to the section)\n\n Inputs\n ------\n how (str)\n mean or ori\n files (list)\n list of strings contianing the files to load\n elem_box_nods (list)\n list of indices that defines the three nods of each element that belongs to the box\n elem_box_indices (list)\n list of indices where no_nan_triangles == True (to select the right elements when loading the data)\n vertical_cell_area_dx (np.ndarray)\n the cell area for each mesh element to be multiplied by the meridional velocity\n vertical_cell_area_dy (np.ndarray)\n the cell area for each mesh element to be multiplied by the zonal velocity\n c_lon (list)\n center longitude of mesh element\n c_lat (list)\n center latitude of mesh element\n effective_dx (np.ndarray)\n the effective length of the two segment elements in x direction to compute transport with v\n effective_dy (np.ndarray)\n the effective length of the two segment elements in y direction to compute transport with u\n chunks (dict)\n chunks for dask (default: {'elem': 1e5}\n mesh (fesom.mesh object)\n fesom.mesh\n abg (list)\n mesh rotation [50 15 -90]\n\n Returns\n -------\n ds (xr.Dataset)\n dataset containing all variables\n\n\n '''\n\n print('Loading the data into memory...')\n # decide on the loading strategy, for small datasets combine the data to one dataset, for large datasets load files individually\n overload = xr.open_dataset(files[0]).nbytes * 1e-9 * len(files) >= 25\n if overload:\n print('A lot of velocity data (' + str(np.round(xr.open_dataset(files[0]).nbytes * 1e-9 * len(files), decimals=2)) + 'GB)... This will take some time...')\n\n # Load and merge at the same time\n ProgressBar().register()\n ds = xr.open_mfdataset(files, combine='by_coords', chunks=chunks).isel(\n elem=elem_box_indices).load()\n\n ds = _AddMetaData(ds, elem_box_indices, elem_box_nods, effective_dx, effective_dy, vertical_cell_area_dx, vertical_cell_area_dy, c_lon, c_lat)\n\n # rename u and v to u_rot, v_rot\n ds = ds.rename({'u': 'u_rot'})\n ds = ds.rename({'v': 'v_rot'})\n # UNROTATE\n lon_elem_center = np.mean(mesh.x2[ds.elem_nods], axis=1)\n lat_elem_center = np.mean(mesh.y2[ds.elem_nods], axis=1)\n u, v = vec_rotate_r2g(abg[0], abg[1], abg[2], lon_elem_center[np.newaxis, :, np.newaxis],\n lat_elem_center[np.newaxis, :, np.newaxis], ds.u_rot.values, ds.v_rot.values, flag=1)\n\n ds['u'] = (('time', 'elem', 'nz1'), u)\n ds['v'] = (('time', 'elem', 'nz1'), v)\n\n ds = ds.drop_vars(['u_rot','v_rot'])\n\n # bring u and v into the right order\n ds['u'] = ds.u.isel(elem=elem_order)\n ds['v'] = ds.v.isel(elem=elem_order)\n\n if how == 'mean':\n ds = ds.mean(dim='time')\n\n return ds\n\ndef _TransportAcross(ds):\n '''\n Compute the transport across the broken line elements\n\n Inputs\n ------\n ds (xr.Dataset)\n dataset\n\n Returns\n -------\n ds (xr.Dataset)\n updated dataset\n\n\n '''\n ds['transport_across'] = ds.u * ds.vertical_cell_area_dy + ds.v * ds.vertical_cell_area_dx\n\n return ds\n\ndef _AddTempSalt(section, ds, data_path, mesh, years, elem_order):\n '''\n _AddTempSalt.py\n\n Adds temperature and salinity values to the section. The temperature and salinity is converted from nods to elements by taking the average\n of the three nods that form the element.\n\n Inputs\n ------\n section (dict)\n section dictionary\n ds (xarray.Dataset)\n dataset containing the velocities etc.\n data_path (str)\n directory where the fesom output is stored\n mesh (fesom mesh file)\n fesom mesh file\n\n Returns\n -------\n\n ds (xr.Dataset)\n final dataset\n\n\n '''\n\n # Check for existance of the files\n files_temp = [data_path + 'temp.fesom.' + str(year) + '.nc' for year in years]\n files_salt = [data_path + 'salt.fesom.' + str(year) + '.nc' for year in years]\n files = files_temp + files_salt\n\n file_check = []\n for file in files:\n file_check.append(isfile(file))\n\n if not all(file_check):\n raise FileExistsError('One or more of the temperature/ salinity files do not exist!')\n\n overload = xr.open_dataset(files[0]).nbytes * 1e-9 * len(files) >= 25\n if overload:\n print('A lot of TS data (' + str(np.round(xr.open_dataset(files[0]).nbytes * 1e-9 * len(files), decimals=2)) + 'GB)... This will take some time...')\n\n # Open files\n ds_ts = xr.open_mfdataset(files, combine='by_coords', chunks={'nod2': 1e4})\n\n # Only load the nods that belong to elements that are part of the section\n # Flatten the triplets first\n ds_ts = ds_ts.isel(nod2=ds.elem_nods.values.flatten()).load()\n\n # Reshape to triplets again and average all three values to obtain an estimate of the elements properties\n temp = ds_ts.temp.values.reshape(len(ds.time), len(ds.elem_nods), 3, mesh.nlev - 1).mean(axis=2)\n salt = ds_ts.salt.values.reshape(len(ds.time), len(ds.elem_nods), 3, mesh.nlev - 1).mean(axis=2)\n\n # Add to dataset\n ds['temp'] = (('time', 'elem', 'nz1'), temp)\n ds['salt'] = (('time', 'elem', 'nz1'), salt)\n\n # bring temp and sal into the right order\n ds['temp'] = ds.temp.isel(elem=elem_order)\n ds['salt'] = ds.salt.isel(elem=elem_order)\n\n return ds\n\ndef _OrderIndices(ds, elem_order):\n '''Brings the indices into the right order.\n\n Inputs\n ------\n ds (xr.dataset)\n dataset containing transport\n elem_order (list)\n order\n '''\n\n ds['elem_indices'] = ds.elem_indices.isel(elem=elem_order)\n ds['elem_nods'] = ds.elem_nods.isel(elem=elem_order)\n\n print('\\n Done!')\n return ds\n\ndef cross_section_transport(section, mesh, data_path, years, mesh_diag, how='mean', add_extent=1, abg=[50, 15, -90], add_TS=False, chunks={'elem': 1e4}, use_great_circle=False, n_points=1000):\n '''\n Inputs\n ------\n section (list, str)\n either a list of the form [lon_start, lon_end, lat_start, lat_end] or a string for a preset section: 'FRAMSTRAIT', 'BSO'\n mesh (fesom.mesh file)\n fesom.mesh file\n data_path (str)\n directory where the data is stored\n mesh_diag (xr.Dataset)\n fesom.mesh.diag file\n use_great_circle (bool)\n compute the section waypoints along a great great circle (default=True)\n how (str)\n either 'mean' for time mean transport or 'ori' for original data (default='mean')\n add_extent (int, float)\n the additional extent of the cutoutbox [lon_start, lon_end, lat_start, lat_end],\n choose as small as possible (small for high resolution meshes and large for low resolution meshes)\n this will impove the speed of the function (default = 1°)\n abg (list)\n rotation of the velocity data (default=[50,15,-90])\n add_TS (bool)\n add temperature and salinity to the section (default=False)\n chunks (dict)\n chunks for parallelising the velocity data (default: chunks={'elem': 1e4})\n n_points (int)\n number of waypoints between start and end of section\n\n Returns\n -------\n ds (xarray.Dataset)\n dataset containing all output variables\n section (dict)\n dictionary containing all section information\n\n '''\n # Wrap up all the subroutines to a main function\n files = _ProcessInputs(section, data_path, years, n_points)\n\n section = _CreateLoadSection(section)\n\n section_waypoints, mesh, section = _ComputeWaypoints(section, mesh, use_great_circle, n_points)\n\n elem_box_nods, elem_box_indices = _ReduceMeshElementNumber(section_waypoints, mesh, section, add_extent)\n\n elem_box_nods, elem_box_indices, cell_intersections, line_section = _LinePolygonIntersections(mesh, section_waypoints, elem_box_nods, elem_box_indices)\n\n intersected_edge, midpoints_edge, elem_centers, elem_box_indices, elem_box_nods, cell_intersections = _FindIntersectedEdges(mesh, elem_box_nods, elem_box_indices, line_section, cell_intersections)\n\n c_lon, c_lat, f_lon, f_lat, s_lon, s_lat, elem_order = _BringIntoAlongPathOrder(midpoints_edge, intersected_edge, elem_centers, section)\n\n effective_dx, effective_dy = _ComputeBrokenLineSegments(f_lat, f_lon, s_lat, s_lon, c_lat, c_lon, section)\n\n vertical_cell_area_dx, vertical_cell_area_dy = _CreateVerticalGrid(effective_dx, effective_dy, mesh_diag)\n\n ds = _UnrotateLoadVelocity(how, files, elem_box_indices, elem_box_nods, vertical_cell_area_dx, vertical_cell_area_dy, c_lon, c_lat, effective_dx, effective_dy, elem_order, chunks, mesh, abg)\n\n ds = _TransportAcross(ds)\n\n if add_TS:\n ds = _AddTempSalt(section, ds, data_path, mesh, years, elem_order)\n\n ds = _OrderIndices(ds, elem_order)\n\n return ds, section\n" ]
[ [ "numpy.delete", "numpy.ones_like", "numpy.array", "numpy.argmin", "numpy.mean", "numpy.diff", "numpy.where", "numpy.arange", "numpy.transpose", "numpy.linspace" ] ]
ArvinSiChuan/keras-yolo3
[ "4eebe12e59d73e550d894c0d33b3ca15b57c0e64" ]
[ "yolo.py" ]
[ "# -*- coding: utf-8 -*-\n\"\"\"\nClass definition of YOLO_v3 style detection model on image and video\n\"\"\"\n\nimport colorsys\nimport os\nfrom timeit import default_timer as timer\n\nimport numpy as np\nfrom keras import backend as K\nfrom keras.models import load_model\nfrom keras.layers import Input\nfrom PIL import Image, ImageFont, ImageDraw\n\nfrom yolo3.model import yolo_eval, yolo_body, tiny_yolo_body\nfrom yolo3.utils import letterbox_image\nimport os\nfrom keras.utils import multi_gpu_model\n\nclass YOLO(object):\n _defaults = {\n \"model_path\": 'model_data/yolo.h5',\n \"anchors_path\": 'model_data/yolo_anchors.txt',\n \"classes_path\": 'model_data/coco_classes.txt',\n \"score\" : 0.3,\n \"iou\" : 0.45,\n \"model_image_size\" : (416, 416),\n \"gpu_num\" : 1,\n }\n\n @classmethod\n def get_defaults(cls, n):\n if n in cls._defaults:\n return cls._defaults[n]\n else:\n return \"Unrecognized attribute name '\" + n + \"'\"\n\n def __init__(self, **kwargs):\n self.__dict__.update(self._defaults) # set up default values\n self.__dict__.update(kwargs) # and update with user overrides\n self.class_names = self._get_class()\n self.anchors = self._get_anchors()\n self.sess = K.get_session()\n self.boxes, self.scores, self.classes = self.generate()\n\n def _get_class(self):\n classes_path = os.path.expanduser(self.classes_path)\n with open(classes_path) as f:\n class_names = f.readlines()\n class_names = [c.strip() for c in class_names]\n return class_names\n\n def _get_anchors(self):\n anchors_path = os.path.expanduser(self.anchors_path)\n with open(anchors_path) as f:\n anchors = f.readline()\n anchors = [float(x) for x in anchors.split(',')]\n return np.array(anchors).reshape(-1, 2)\n\n def generate(self):\n model_path = os.path.expanduser(self.model_path)\n assert model_path.endswith('.h5'), 'Keras model or weights must be a .h5 file.'\n\n # Load model, or construct model and load weights.\n num_anchors = len(self.anchors)\n num_classes = len(self.class_names)\n is_tiny_version = num_anchors==6 # default setting\n try:\n self.yolo_model = load_model(model_path, compile=False)\n except:\n self.yolo_model = tiny_yolo_body(Input(shape=(None,None,3)), num_anchors//2, num_classes) \\\n if is_tiny_version else yolo_body(Input(shape=(None,None,3)), num_anchors//3, num_classes)\n self.yolo_model.load_weights(self.model_path) # make sure model, anchors and classes match\n else:\n assert self.yolo_model.layers[-1].output_shape[-1] == \\\n num_anchors/len(self.yolo_model.output) * (num_classes + 5), \\\n 'Mismatch between model and given anchor and class sizes'\n\n print('{} model, anchors, and classes loaded.'.format(model_path))\n\n # Generate colors for drawing bounding boxes.\n hsv_tuples = [(x / len(self.class_names), 1., 1.)\n for x in range(len(self.class_names))]\n self.colors = list(map(lambda x: colorsys.hsv_to_rgb(*x), hsv_tuples))\n self.colors = list(\n map(lambda x: (int(x[0] * 255), int(x[1] * 255), int(x[2] * 255)),\n self.colors))\n np.random.seed(10101) # Fixed seed for consistent colors across runs.\n np.random.shuffle(self.colors) # Shuffle colors to decorrelate adjacent classes.\n np.random.seed(None) # Reset seed to default.\n\n # Generate output tensor targets for filtered bounding boxes.\n self.input_image_shape = K.placeholder(shape=(2, ))\n if self.gpu_num>=2:\n self.yolo_model = multi_gpu_model(self.yolo_model, gpus=self.gpu_num)\n boxes, scores, classes = yolo_eval(self.yolo_model.output, self.anchors,\n len(self.class_names), self.input_image_shape,\n score_threshold=self.score, iou_threshold=self.iou)\n return boxes, scores, classes\n\n def detect_image(self, image):\n start = timer()\n\n if self.model_image_size != (None, None):\n assert self.model_image_size[0]%32 == 0, 'Multiples of 32 required'\n assert self.model_image_size[1]%32 == 0, 'Multiples of 32 required'\n boxed_image = letterbox_image(image, tuple(reversed(self.model_image_size)))\n else:\n new_image_size = (image.width - (image.width % 32),\n image.height - (image.height % 32))\n boxed_image = letterbox_image(image, new_image_size)\n image_data = np.array(boxed_image, dtype='float32')\n\n \n image_data /= 255.\n image_data = np.expand_dims(image_data, 0) # Add batch dimension.\n\n out_boxes, out_scores, out_classes = self.sess.run(\n [self.boxes, self.scores, self.classes],\n feed_dict={\n self.yolo_model.input: image_data,\n self.input_image_shape: [image.size[1], image.size[0]],\n K.learning_phase(): 0\n })\n\n \n\n font = ImageFont.truetype(font='font/FiraMono-Medium.otf',\n size=np.floor(3e-2 * image.size[1] + 0.5).astype('int32'))\n thickness = (image.size[0] + image.size[1]) // 300\n\n for i, c in reversed(list(enumerate(out_classes))):\n predicted_class = self.class_names[c]\n box = out_boxes[i]\n score = out_scores[i]\n\n label = '{} {:.2f}'.format(predicted_class, score)\n draw = ImageDraw.Draw(image)\n label_size = draw.textsize(label, font)\n\n top, left, bottom, right = box\n top = max(0, np.floor(top + 0.5).astype('int32'))\n left = max(0, np.floor(left + 0.5).astype('int32'))\n bottom = min(image.size[1], np.floor(bottom + 0.5).astype('int32'))\n right = min(image.size[0], np.floor(right + 0.5).astype('int32'))\n \n\n if top - label_size[1] >= 0:\n text_origin = np.array([left, top - label_size[1]])\n else:\n text_origin = np.array([left, top + 1])\n\n # My kingdom for a good redistributable image drawing library.\n for i in range(thickness):\n draw.rectangle(\n [left + i, top + i, right - i, bottom - i],\n outline=self.colors[c])\n draw.rectangle(\n [tuple(text_origin), tuple(text_origin + label_size)],\n fill=self.colors[c])\n draw.text(text_origin, label, fill=(0, 0, 0), font=font)\n del draw\n\n end = timer()\n return image\n\n def close_session(self):\n self.sess.close()\n\ndef detect_video(yolo, video_path, output_path=\"\"):\n import cv2\n vid = cv2.VideoCapture(video_path)\n if not vid.isOpened():\n raise IOError(\"Couldn't open webcam or video\")\n video_FourCC = cv2.VideoWriter_fourcc(*'MP4V')\n video_fps = vid.get(cv2.CAP_PROP_FPS)\n video_size = (int(vid.get(cv2.CAP_PROP_FRAME_WIDTH)),\n int(vid.get(cv2.CAP_PROP_FRAME_HEIGHT)))\n isOutput = True if output_path != \"\" else False\n if isOutput:\n print(\"!!! TYPE:\", type(output_path),video_size, type(video_FourCC), type(video_fps), type(video_size))\n out = cv2.VideoWriter(output_path, video_FourCC, video_fps, video_size)\n accum_time = 0\n curr_fps = 0\n fps = \"FPS: ??\"\n prev_time = timer()\n while True:\n return_value, frame = vid.read()\n if not return_value: \n break\n image = Image.fromarray(frame)\n image = yolo.detect_image(image)\n result = np.asarray(image)\n curr_time = timer()\n exec_time = curr_time - prev_time\n prev_time = curr_time\n accum_time = accum_time + exec_time\n curr_fps = curr_fps + 1\n if accum_time > 1:\n accum_time = accum_time - 1\n fps = \"FPS: \" + str(curr_fps)\n curr_fps = 0\n cv2.putText(result, text=fps, org=(3, 15), fontFace=cv2.FONT_HERSHEY_SIMPLEX,\n fontScale=0.50, color=(255, 0, 0), thickness=2)\n if isOutput:\n out.write(result)\n\n\n" ]
[ [ "numpy.array", "numpy.asarray", "numpy.random.seed", "numpy.random.shuffle", "numpy.expand_dims", "numpy.floor" ] ]
cristobalfuenzalida/grafica
[ "cf7bb90c4c5c34ee56d328188111c917a0d10389" ]
[ "grafica/scene_graph.py" ]
[ "# coding=utf-8\n\"\"\"A simple scene graph class and functionality\"\"\"\n\nfrom OpenGL.GL import *\nimport OpenGL.GL.shaders\nimport numpy as np\nimport grafica.transformations as tr\nimport grafica.gpu_shape as gs\n\n__author__ = \"Daniel Calderon\"\n__license__ = \"MIT\"\n\n\nclass SceneGraphNode:\n \"\"\"\n A simple class to handle a scene graph\n Each node represents a group of objects\n Each leaf represents a basic figure (GPUShape)\n To identify each node properly, it MUST have a unique name\n \"\"\"\n def __init__(self, name):\n self.name = name\n self.transform = tr.identity()\n self.childs = []\n\n def clear(self):\n \"\"\"Freeing GPU memory\"\"\"\n\n for child in self.childs:\n child.clear()\n\n \n\n \ndef findNode(node, name):\n\n # The name was not found in this path\n if isinstance(node, gs.GPUShape):\n return None\n\n # This is the requested node\n if node.name == name:\n return node\n \n # All childs are checked for the requested name\n for child in node.childs:\n foundNode = findNode(child, name)\n if foundNode != None:\n return foundNode\n\n # No child of this node had the requested name\n return None\n\n\ndef findTransform(node, name, parentTransform=tr.identity()):\n\n # The name was not found in this path\n if isinstance(node, gs.GPUShape):\n return None\n\n newTransform = np.matmul(parentTransform, node.transform)\n\n # This is the requested node\n if node.name == name:\n return newTransform\n \n # All childs are checked for the requested name\n for child in node.childs:\n foundTransform = findTransform(child, name, newTransform)\n if isinstance(foundTransform, (np.ndarray, np.generic) ):\n return foundTransform\n\n # No child of this node had the requested name\n return None\n\n\ndef findPosition(node, name, parentTransform=tr.identity()):\n foundTransform = findTransform(node, name, parentTransform)\n\n if isinstance(foundTransform, (np.ndarray, np.generic) ):\n zero = np.array([[0,0,0,1]], dtype=np.float32).T\n foundPosition = np.matmul(foundTransform, zero)\n return foundPosition\n\n return None\n\n\ndef drawSceneGraphNode(node, pipeline, transformName, parentTransform=tr.identity()):\n assert(isinstance(node, SceneGraphNode))\n\n # Composing the transformations through this path\n newTransform = np.matmul(parentTransform, node.transform)\n\n # If the child node is a leaf, it should be a GPUShape.\n # Hence, it can be drawn with drawCall\n if len(node.childs) == 1 and isinstance(node.childs[0], gs.GPUShape):\n leaf = node.childs[0]\n glUniformMatrix4fv(glGetUniformLocation(pipeline.shaderProgram, transformName), 1, GL_TRUE, newTransform)\n pipeline.drawCall(leaf)\n\n # If the child node is not a leaf, it MUST be a SceneGraphNode,\n # so this draw function is called recursively\n else:\n for child in node.childs:\n drawSceneGraphNode(child, pipeline, transformName, newTransform)\n\n" ]
[ [ "numpy.array", "numpy.matmul" ] ]
m1ka05/tensiga
[ "32a2e5310aff649d9f911d28d49d4ba65c2a4304" ]
[ "tensiga/utils/mat_dot_sp.py" ]
[ "import numpy as np\n\ndef mat_dot_sp(A, B, axis=-1):\n if len(A.shape) == 1:\n return A @ B\n\n A = np.moveaxis(A, axis, -1)\n Ashape = A.shape\n\n # this is equivalent to contracting A with B along the given axis\n A = A.reshape(np.prod(A.shape[:-1]), A.shape[-1])\n A = A @ B\n\n A = A.reshape(Ashape[:-1] + B.shape[1:])\n A = np.moveaxis(A, -1, axis)\n\n return A\n" ]
[ [ "numpy.moveaxis", "numpy.prod" ] ]
Dragon-M-Ren/grad_code
[ "d814b81adaec709d5dffd737f0c350953cc361fd" ]
[ "code/utils.py" ]
[ "import tensorflow as tf\nimport numpy as np\nimport scipy.sparse as sp\n\nfrom load_data import create_raw_input\nfrom scipy.sparse.linalg.eigen.arpack import eigsh\n\n\ndef create_load_sparse(sparse_mat):\n '''\n The sparse matrix is saved as a scipy sparse matrix\n It cannot be directly used by TF\n This will change the sparse matrix to tuple representation\n index, values, shape\n '''\n if not sp.isspmatrix_coo(sparse_mat):\n sparse_mat = sparse_mat.tocoo()\n indexs = np.vstack((sparse_mat.row, sparse_mat.col)).transpose()\n data = sparse_mat.data\n shape = sparse_mat.shape\n\n #Type cast\n indexs = indexs.astype(np.int64)\n data = data.astype(np.float32)\n shape = np.array(list(shape))\n shape = shape.astype(np.int64)\n\n return [indexs, data, shape]\n\ndef symmetric_normalized_laplacian(adjancy):\n '''\n Given a Lapllacian Matrix\n Compute its symmetric normalized form\n Arguments:\n L: Laplacian Matrix\n is_sparse: If L is sparse\n Return:\n D^-0.5 L D^-0.5\n '''\n #convert to coo matrix for computation\n adjancy = sp.coo_matrix(adjancy)\n rowsum = np.array(adjancy.sum(1))\n\n #Compute D\n d_inv_sqrt = np.power(rowsum, -0.5).flatten()\n #If it is inf(The inverse of 0, then set it to 0)\n d_inv_sqrt[np.isinf(d_inv_sqrt)] = 0.\n d_mat_inv_sqrt = sp.diags(d_inv_sqrt)\n\n normalized = adjancy.dot(d_mat_inv_sqrt).transpose().dot(d_mat_inv_sqrt).tocoo()\n\n return normalized\n \n\n\ndef row_normalized(mat):\n '''\n row normalized feature\n '''\n #Compute row sum and its inverse\n #If the inverse is inf, set it to 0\n row_sum = np.array(mat.sum(1))\n rs_inv = np.power(row_sum, -1).flatten()\n rs_inv[np.isinf(rs_inv)] = 0\n r_inv_diag = sp.diags(rs_inv)\n \n mat = r_inv_diag.dot(mat)\n\n return mat\n\n\n\ndef pre_GCN(directed, undirected):\n '''\n Preprocess adjancy matrix for GCN\n '''\n sys_norm_directed = symmetric_normalized_laplacian(directed)\n sys_norm_undirected = symmetric_normalized_laplacian(undirected)\n\n return sys_norm_directed, sys_norm_undirected\n\ndef pre_DCNN(directed, undirected):\n '''\n Preprocess for dcnn\n Build degree normalized transition matrix\n '''\n directed = row_normalized(directed)\n undirected = row_normalized(undirected)\n\n return directed, undirected\n\ndef create_cheb_series(adjancy, poly_order, self_loop=True):\n '''\n Used to build ChebNet input\n compute cheb_series\n Used in ChebNet\n input is sparse matrix\n this should be processed before it is feeded into the model\n using numpy\n self_loop: if the adjancy matrix has a selfloop\n '''\n\n ##Normalize adjancy\n ##L= D - W\n ##D is row_sum\n W = symmetric_normalized_laplacian(adjancy)\n W = sp.coo_matrix(adjancy)\n D = np.array(W.sum(1))\n D = D.flatten()\n shape = W.shape\n\n L = sp.diags(D) - W\n\n if self_loop:\n L = L + sp.eye(shape[0])\n\n #Get the largest eigenvalue\n l_ev = eigsh(L + L.T,1,which='LA')[0]\n l_ev = l_ev[0]\n print(l_ev)\n #exit()\n #l_ev = 1\n\n L_hat = (2*L)/l_ev - sp.eye(shape[1])\n\n cheb_series = []\n\n cheb_series.append(sp.eye(shape[0]))\n cheb_series.append(L_hat)\n\n for i in range(2, poly_order):\n L_cp = sp.csr_matrix(L, copy=True)\n res = 2*L_cp.dot(cheb_series[-1]) - cheb_series[-2]\n cheb_series.append(res)\n\n undirected = [create_load_sparse(item) for item in cheb_series]\n\n return undirected\n\ndef create_mean_pool_adj_info(adjancy):\n '''\n Create the neighborhood informaion for GraphSage\n Used by mean pool\n '''\n adjancy = sp.coo_matrix(adjancy)\n\n row = adjancy.row\n col = adjancy.col\n\n return row, col\n\ndef create_neighbor_matrix(adjancy, num_nodes, maxdegree):\n '''\n Create the neighborhood matrix\n '''\n adjancy = sp.coo_matrix(adjancy)\n\n neigh = np.zeros((num_nodes, maxdegree), dtype=np.int32)\n loc = np.zeros((num_nodes), dtype=np.int32)\n\n #get row and column index\n row = adjancy.row\n col = adjancy.col\n\n for index in zip(row, col):\n node = index[0]\n value = index[1]\n locate = loc[node]\n\n #update neighborhood information\n neigh[node][locate] = value \n\n #update location\n loc[node] = locate + 1\n\n return neigh\n\n\n\n\n\ndef create_input(model_name, path, dataset_name, index, train_num, val_num, test_num = None):\n '''\n This will create the input that can be directly feed to the neural network\n '''\n directed, undirected, features, y_train, y_val, y_test, train_mask, val_mask, test_mask,\\\n info = create_raw_input(path, dataset_name, index, train_num, 500, None)\n #print(train_mask)\n #exit()\n\n #preprocess features\n norm_features = row_normalized(features)\n\n #information\n node_num = directed.shape[0]\n input_dim = features.shape[1]\n output_dim = y_train.shape[1]\n\n #return value\n dataset = {}\n info = {}\n\n #create degrees\n binary_value = undirected.astype(np.bool)\n binary_value = binary_value.astype(np.int32)\n degrees = np.array(binary_value.sum(1))\n maxdegree = np.max(degrees)\n\n #create neigh_info, used by graphsage max pool\n neigh_info = create_neighbor_matrix(undirected, node_num, maxdegree)\n row, col = create_mean_pool_adj_info(undirected)\n\n #Preprocess adjancy for different models\n if 'gcn' == model_name or 'firstcheb' == model_name:\n directed, undirected = pre_GCN(directed, undirected)\n elif 'dcnn' == model_name:\n directed, undirected = pre_DCNN(directed, undirected)\n elif 'spectralcnn' == model_name:\n #Convert to dense matrix\n #only the undirected matrix is computed\n #Since the directed adjancy is not used in any model\n dense_undirected = sp.csr_matrix.todense(undirected)\n\n #compute eigenvalue decompsition\n undirected_evalues, undirected_evectors = np.linalg.eigh(dense_undirected)\n undirected = undirected_evectors\n elif 'chebnet' == model_name:\n pass\n elif 'gat' == model_name:\n dataset['row'] = row\n dataset['col'] = col\n indices = zip(row, col)\n indices = [ind for ind in indices]\n dataset['indices'] = indices\n elif 'graphsage' == model_name:\n dataset['degrees'] = degrees\n elif 'graphsage_maxpool' == model_name:\n info['max_degree'] = maxdegree\n dataset['degrees'] = degrees\n dataset['neigh_info'] = neigh_info\n elif 'graphsage_meanpool' == model_name:\n dataset['degrees'] = degrees\n dataset['row'] = row\n dataset['col'] = col\n elif 'mlp' == model_name:\n pass\n else:\n raise 'There is no model named: ' + model_name\n\n \n #Change scipy sparse matrix to the format that can be directly used by\n #the model\n if 'spectralcnn' == model_name:\n #Adjancy matrix is not used in these models\n #The eigenvector is used\n #directed = None\n #print(undirected_evectors.shape)\n #exit()\n #undirected = [undirected_evalues, undirected_evectors]\n pass\n elif 'chebnet' == model_name:\n pass\n else:\n directed = create_load_sparse(directed)\n undirected = create_load_sparse(undirected)\n \n features = create_load_sparse(features)\n\n\n dataset.update({\n 'directed': directed,\n 'undirected': undirected,\n 'features': features,\n 'train_label': y_train,\n 'val_label': y_val,\n 'test_label': y_test,\n 'train_mask': train_mask,\n 'val_mask': val_mask,\n 'test_mask': test_mask\n })\n\n info.update({\n 'input_dim': input_dim,\n 'output_dim': output_dim,\n 'node_num': node_num,\n 'cate_num': output_dim\n })\n\n\n return dataset, info\n\n\n\n\n" ]
[ [ "numpy.max", "scipy.sparse.coo_matrix", "numpy.isinf", "scipy.sparse.diags", "numpy.zeros", "scipy.sparse.csr_matrix.todense", "numpy.linalg.eigh", "scipy.sparse.eye", "numpy.power", "scipy.sparse.linalg.eigen.arpack.eigsh", "scipy.sparse.isspmatrix_coo", "scipy.sparse.csr_matrix", "numpy.vstack" ] ]
vemonet/fuji
[ "92aabcb58d76a58981c677bcf0da8e57309c6096" ]
[ "fuji_server/controllers/fair_check.py" ]
[ "# -*- coding: utf-8 -*-\n\n# MIT License\n#\n# Copyright (c) 2020 PANGAEA (https://www.pangaea.de/)\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in all\n# copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\nimport io\nimport logging, logging.handlers\nimport mimetypes\nimport os\nimport re\nimport sys\nimport urllib\nimport urllib.request as urllib\nfrom typing import List, Any\nfrom urllib.parse import urlparse, urljoin\nimport pandas as pd\n\nimport Levenshtein\nimport idutils\nimport lxml\nimport rdflib\nfrom rdflib.exceptions import ParserError\nfrom rdflib.namespace import RDF\nfrom rdflib.namespace import DCTERMS\nfrom rdflib.namespace import DC\nfrom rapidfuzz import fuzz\nfrom rapidfuzz import process\nfrom tika import parser\nimport hashlib\n\nfrom tldextract import extract\n\nfrom fuji_server.evaluators.fair_evaluator_license import FAIREvaluatorLicense\nfrom fuji_server.evaluators.fair_evaluator_data_access_level import FAIREvaluatorDataAccessLevel\nfrom fuji_server.evaluators.fair_evaluator_persistent_identifier import FAIREvaluatorPersistentIdentifier\nfrom fuji_server.evaluators.fair_evaluator_unique_identifier import FAIREvaluatorUniqueIdentifier\nfrom fuji_server.evaluators.fair_evaluator_minimal_metadata import FAIREvaluatorCoreMetadata\nfrom fuji_server.evaluators.fair_evaluator_content_included import FAIREvaluatorContentIncluded\nfrom fuji_server.evaluators.fair_evaluator_related_resources import FAIREvaluatorRelatedResources\nfrom fuji_server.evaluators.fair_evaluator_searchable import FAIREvaluatorSearchable\nfrom fuji_server.evaluators.fair_evaluator_file_format import FAIREvaluatorFileFormat\nfrom fuji_server.evaluators.fair_evaluator_data_provenance import FAIREvaluatorDataProvenance\nfrom fuji_server.evaluators.fair_evaluator_data_content_metadata import FAIREvaluatorDataContentMetadata\nfrom fuji_server.evaluators.fair_evaluator_formal_metadata import FAIREvaluatorFormalMetadata\nfrom fuji_server.evaluators.fair_evaluator_semantic_vocabulary import FAIREvaluatorSemanticVocabulary\nfrom fuji_server.evaluators.fair_evaluator_metadata_preservation import FAIREvaluatorMetadataPreserved\nfrom fuji_server.evaluators.fair_evaluator_community_metadata import FAIREvaluatorCommunityMetadata\nfrom fuji_server.evaluators.fair_evaluator_standardised_protocol_data import FAIREvaluatorStandardisedProtocolData\nfrom fuji_server.evaluators.fair_evaluator_standardised_protocol_metadata import FAIREvaluatorStandardisedProtocolMetadata\n\nfrom fuji_server.helper.metadata_collector import MetaDataCollector\nfrom fuji_server.helper.metadata_collector_datacite import MetaDataCollectorDatacite\nfrom fuji_server.helper.metadata_collector_dublincore import MetaDataCollectorDublinCore\nfrom fuji_server.helper.metadata_collector_microdata import MetaDataCollectorMicroData\nfrom fuji_server.helper.metadata_collector_opengraph import MetaDataCollectorOpenGraph\nfrom fuji_server.helper.metadata_collector_ore_atom import MetaDataCollectorOreAtom\nfrom fuji_server.helper.metadata_collector_rdf import MetaDataCollectorRdf\nfrom fuji_server.helper.metadata_collector_schemaorg import MetaDataCollectorSchemaOrg\nfrom fuji_server.helper.metadata_collector_xml import MetaDataCollectorXML\nfrom fuji_server.helper.metadata_mapper import Mapper\nfrom fuji_server.helper.metadata_provider_csw import OGCCSWMetadataProvider\nfrom fuji_server.helper.metadata_provider_oai import OAIMetadataProvider\nfrom fuji_server.helper.metadata_provider_sparql import SPARQLMetadataProvider\nfrom fuji_server.helper.metadata_provider_rss_atom import RSSAtomMetadataProvider\nfrom fuji_server.helper.preprocessor import Preprocessor\nfrom fuji_server.helper.repository_helper import RepositoryHelper\nfrom fuji_server.helper.identifier_helper import IdentifierHelper\n\nclass FAIRCheck:\n METRICS = None\n SPDX_LICENSES = None\n SPDX_LICENSE_NAMES = None\n COMMUNITY_STANDARDS_NAMES = None\n COMMUNITY_METADATA_STANDARDS_URIS = None\n COMMUNITY_METADATA_STANDARDS_URIS_LIST = None\n COMMUNITY_STANDARDS = None\n SCIENCE_FILE_FORMATS = None\n LONG_TERM_FILE_FORMATS = None\n OPEN_FILE_FORMATS = None\n DEFAULT_NAMESPACES = None\n VOCAB_NAMESPACES = None\n ARCHIVE_MIMETYPES = Mapper.ARCHIVE_COMPRESS_MIMETYPES.value\n STANDARD_PROTOCOLS = None\n SCHEMA_ORG_CONTEXT = []\n FILES_LIMIT = None\n LOG_SUCCESS = 25\n VALID_RESOURCE_TYPES = []\n IDENTIFIERS_ORG_DATA = {}\n GOOGLE_DATA_DOI_CACHE =[]\n GOOGLE_DATA_URL_CACHE = []\n FUJI_VERSION = 'v1.3.8'\n\n def __init__(self, uid, test_debug=False, metadata_service_url=None, metadata_service_type =None,use_datacite=True, oaipmh_endpoint = None):\n uid_bytes = uid.encode('utf-8')\n self.test_id = hashlib.sha1(uid_bytes).hexdigest()\n #str(base64.urlsafe_b64encode(uid_bytes), \"utf-8\") # an id we can use for caching etc\n if isinstance(uid,str):\n uid = uid.strip()\n self.id = self.input_id = uid\n self.metadata_service_url = metadata_service_url\n self.metadata_service_type = metadata_service_type\n self.oaipmh_endpoint = oaipmh_endpoint\n self.csw_endpoint = None\n self.sparql_endpoint = None\n if self.oaipmh_endpoint:\n self.metadata_service_url = self.oaipmh_endpoint\n self.metadata_service_type = 'oai_pmh'\n if self.metadata_service_type == 'oai_pmh':\n self.oaipmh_endpoint = self.metadata_service_url\n elif self.metadata_service_type == 'ogc_csw':\n self.csw_endpoint = self.metadata_service_url\n elif self.metadata_service_type == 'sparql':\n self.sparql_endpoint = self.metadata_service_url\n self.pid_url = None # full pid # e.g., \"https://doi.org/10.1594/pangaea.906092 or url (non-pid)\n self.landing_url = None # url of the landing page of self.pid_url\n self.origin_url = None #the url from where all starts - in case of redirection we'll need this later on\n self.landing_html = None\n self.landing_content_type= None\n self.landing_origin = None # schema + authority of the landing page e.g. https://www.pangaea.de\n self.signposting_header_links = []\n self.pid_scheme = None\n self.id_scheme= None\n self.logger = logging.getLogger(self.test_id)\n self.metadata_sources = []\n self.isDebug = test_debug\n self.isMetadataAccessible = None\n self.metadata_merged = {}\n self.content_identifier=[]\n self.community_standards = []\n self.community_standards_uri = {}\n self.namespace_uri=[]\n self.reference_elements = Mapper.REFERENCE_METADATA_LIST.value.copy() # all metadata elements required for FUJI metrics\n self.related_resources = []\n # self.test_data_content_text = None# a helper to check metadata against content\n self.rdf_graph = None\n\n self.rdf_collector = None\n self.use_datacite = use_datacite\n self.repeat_pid_check = False\n self.logger_message_stream = io.StringIO()\n logging.addLevelName(self.LOG_SUCCESS, 'SUCCESS')\n # in case log messages shall be sent to a remote server\n self.remoteLogPath = None\n self.remoteLogHost = None\n if self.isDebug:\n self.logStreamHandler = logging.StreamHandler(self.logger_message_stream)\n formatter = logging.Formatter('%(message)s|%(levelname)s')\n self.logStreamHandler.setFormatter(formatter)\n self.logger.propagate = False\n self.logger.setLevel(logging.INFO) # set to debug in testing environment\n self.logger.addHandler(self.logStreamHandler)\n\n self.count = 0\n self.embedded_retrieved = False\n FAIRCheck.load_predata()\n self.extruct = None\n self.extruct_result = {}\n self.tika_content_types_list = []\n\n\n @classmethod\n def load_predata(cls):\n cls.FILES_LIMIT = Preprocessor.data_files_limit\n if not cls.METRICS:\n cls.METRICS = Preprocessor.get_custom_metrics(['metric_name', 'total_score','metric_tests','metric_number'])\n if not cls.SPDX_LICENSES:\n # cls.SPDX_LICENSES, cls.SPDX_LICENSE_NAMES, cls.SPDX_LICENSE_URLS = Preprocessor.get_licenses()\n cls.SPDX_LICENSES, cls.SPDX_LICENSE_NAMES = Preprocessor.get_licenses()\n if not cls.COMMUNITY_METADATA_STANDARDS_URIS:\n cls.COMMUNITY_METADATA_STANDARDS_URIS = Preprocessor.get_metadata_standards_uris()\n cls.COMMUNITY_METADATA_STANDARDS_URIS_LIST = list(cls.COMMUNITY_METADATA_STANDARDS_URIS.keys())\n if not cls.COMMUNITY_STANDARDS:\n cls.COMMUNITY_STANDARDS = Preprocessor.get_metadata_standards()\n cls.COMMUNITY_STANDARDS_NAMES = list(cls.COMMUNITY_STANDARDS.keys())\n if not cls.SCIENCE_FILE_FORMATS:\n cls.SCIENCE_FILE_FORMATS = Preprocessor.get_science_file_formats()\n if not cls.LONG_TERM_FILE_FORMATS:\n cls.LONG_TERM_FILE_FORMATS = Preprocessor.get_long_term_file_formats()\n if not cls.OPEN_FILE_FORMATS:\n cls.OPEN_FILE_FORMATS = Preprocessor.get_open_file_formats()\n if not cls.DEFAULT_NAMESPACES:\n cls.DEFAULT_NAMESPACES = Preprocessor.getDefaultNamespaces()\n if not cls.VOCAB_NAMESPACES:\n cls.VOCAB_NAMESPACES = Preprocessor.getLinkedVocabs()\n if not cls.STANDARD_PROTOCOLS:\n cls.STANDARD_PROTOCOLS = Preprocessor.get_standard_protocols()\n if not cls.SCHEMA_ORG_CONTEXT:\n cls.SCHEMA_ORG_CONTEXT = Preprocessor.get_schema_org_context()\n if not cls.VALID_RESOURCE_TYPES:\n cls.VALID_RESOURCE_TYPES = Preprocessor.get_resource_types()\n if not cls.IDENTIFIERS_ORG_DATA:\n cls.IDENTIFIERS_ORG_DATA = Preprocessor.get_identifiers_org_data()\n #not needed locally ... but init class variable\n Preprocessor.get_google_data_dois()\n Preprocessor.get_google_data_urls()\n\n\n @staticmethod\n def uri_validator(u): # TODO integrate into request_helper.py\n try:\n r = urlparse(u)\n return all([r.scheme, r.netloc])\n except:\n return False\n\n def set_remote_logging_target(self, host, path):\n if host and path:\n isHostUp = False\n try:\n if urllib.urlopen('http://'+host+''+path).getcode() == 200:\n isHostUp = True\n except Exception as e:\n print('Remote logging not possible, please check config.ini, host not reachable: http://'+str(host)+'/'+str(path))\n print(e)\n if isHostUp:\n try:\n weblogger = logging.handlers.HTTPHandler(host,\n path + '?testid=' + str(self.test_id), method='POST')\n webformatter = logging.Formatter('%(levelname)s - %(message)s \\r\\n')\n weblogger.setFormatter(webformatter)\n self.logger.addHandler(weblogger)\n except Exception as e:\n print(e)\n\n\n\n def validate_service_url(self):\n # checks if service url and landing page url have same domain in order to avoid manipulations\n if self.metadata_service_url:\n service_url_parts = extract(self.metadata_service_url)\n landing_url_parts = extract(self.landing_url)\n service_domain = service_url_parts.domain+'.'+service_url_parts.suffix\n landing_domain = landing_url_parts.domain+'.'+landing_url_parts.suffix\n if landing_domain == service_domain:\n return True\n else:\n self.logger.warning('FsF-R1.3-01M : Service URL domain/subdomain does not match with landing page domain -: {}'.format(service_domain,landing_domain))\n self.metadata_service_url, self.csw_endpoint, self.oaipmh_endpoint ,self.sparql_endpoint = None, None, None, None\n return False\n else:\n return False\n\n def retrieve_metadata(self, extruct_metadata):\n embedded_exists={}\n if isinstance(extruct_metadata, dict):\n embedded_exists = {k: v for k, v in extruct_metadata.items() if v}\n self.extruct = embedded_exists.copy()\n '''\n if embedded_exists: # retrieve metadata from landing page\n self.logger.info(\n 'FsF-F2-01M : Formats of structured metadata embedded in HTML markup detected by extruct - {}'.format(\n list(embedded_exists.keys())))\n #self.retrieve_metadata_embedded(embedded_exists)\n else:\n self.logger.warning('FsF-F2-01M : NO structured metadata embedded in HTML')\n '''\n #if self.reference_elements: # this will be always true as we need datacite client id\n # if include_embedded ==True:\n # self.retrieve_metadata_embedded(embedded_exists)\n # self.retrieve_metadata_external()\n\n # ========= clean merged metadata, delete all entries which are None or ''\n data_objects = self.metadata_merged.get('object_content_identifier')\n if data_objects == {'url': None} or data_objects == [None]:\n data_objects = self.metadata_merged['object_content_identifier'] = None\n if data_objects is not None:\n if not isinstance(data_objects, list):\n self.metadata_merged['object_content_identifier']=[data_objects]\n\n # TODO quick-fix to merge size information - should do it at mapper\n if 'object_content_identifier' in self.metadata_merged:\n if self.metadata_merged.get('object_content_identifier'):\n for c in self.metadata_merged['object_content_identifier']:\n if not c.get('size') and self.metadata_merged.get('object_size'):\n c['size'] = self.metadata_merged.get('object_size')\n\n for mk, mv in list(self.metadata_merged.items()):\n if mv == '' or mv is None:\n del self.metadata_merged[mk]\n\n self.logger.info('FsF-F2-01M : Type of object described by the metadata -: {}'.format(self.metadata_merged.get('object_type')))\n\n # detect api and standards\n self.retrieve_apis_standards()\n\n # remove duplicates\n if self.namespace_uri:\n self.namespace_uri = list(set(self.namespace_uri))\n\n def retrieve_apis_standards(self):\n if self.landing_url is not None:\n self.logger.info('FsF-R1.3-01M : Retrieving API and Standards')\n if self.use_datacite:\n client_id = self.metadata_merged.get('datacite_client')\n self.logger.info('FsF-R1.3-01M : re3data/datacite client id -: {}'.format(client_id))\n else:\n client_id = None\n self.logger.warning('{} : Datacite support disabled, therefore skipping standards identification using in re3data record'.format(\n 'FsF-R1.3-01M', ))\n\n if self.metadata_service_url not in [None,'']:\n self.logger.info('FsF-R1.3-01M : Metadata service endpoint ('+str(self.metadata_service_type)+') provided as part of the request -: '+str(self.metadata_service_url))\n #else:\n #check re3data always instead...\n if self.use_datacite:\n self.logger.info('FsF-R1.3-01M : Trying to retrieve metadata info from re3data/datacite services using client id -: '+str(client_id))\n #find endpoint via datacite/re3data if pid is provided\n #print(client_id ,self.pid_scheme)\n if client_id and self.pid_scheme:\n repoHelper = RepositoryHelper(client_id, self.pid_scheme, logger= self.logger.name)\n repoHelper.lookup_re3data()\n if not self.metadata_service_url:\n self.logger.info('{} : Inferring endpoint information through re3data/datacite services'.format('FsF-R1.3-01M'))\n self.oaipmh_endpoint = repoHelper.getRe3MetadataAPIs().get('OAI-PMH')\n self.sparql_endpoint = repoHelper.getRe3MetadataAPIs().get('SPARQL')\n self.community_standards.extend(repoHelper.getRe3MetadataStandards())\n self.logger.info('{} : Metadata standards listed in re3data record -: {}'.format('FsF-R1.3-01M', self.community_standards ))\n else:\n self.logger.info('FsF-R1.3-01M : Skipped re3data metadata standards query since Datacite support is disabled by user')\n # verify the service url by domain matching\n self.validate_service_url()\n # retrieve metadata standards info from oai-pmh\n if self.oaipmh_endpoint:\n self.logger.info('{} : Use OAI-PMH endpoint to retrieve standards used by the repository -: {}'.format('FsF-R1.3-01M',self.oaipmh_endpoint))\n if (self.uri_validator(self.oaipmh_endpoint)):\n oai_provider = OAIMetadataProvider(endpoint=self.oaipmh_endpoint, logger=self.logger,metric_id='FsF-R1.3-01M')\n self.community_standards_uri = oai_provider.getMetadataStandards()\n self.namespace_uri.extend(oai_provider.getNamespaces())\n stds = None\n if self.community_standards_uri:\n stds = list(self.community_standards_uri.keys())\n self.logger.log(self.LOG_SUCCESS,'{} : Found disciplinary standards that are listed in OAI-PMH endpoint -: {}'.format('FsF-R1.3-01M',stds ))\n else:\n self.logger.info('{} : Invalid endpoint'.format('FsF-R1.3-01M'))\n else:\n self.logger.warning('{} : NO valid OAI-PMH endpoint found'.format('FsF-R1.3-01M'))\n\n # retrieve metadata standards info from OGC CSW\n if self.csw_endpoint:\n self.logger.info('{} : Use OGC CSW endpoint to retrieve standards used by the repository -: {}'.format('FsF-R1.3-01M',self.oaipmh_endpoint))\n if (self.uri_validator(self.csw_endpoint)):\n csw_provider = OGCCSWMetadataProvider(endpoint=self.csw_endpoint, logger=self.logger,metric_id='FsF-R1.3-01M')\n self.community_standards_uri = csw_provider.getMetadataStandards()\n self.namespace_uri.extend(csw_provider.getNamespaces())\n stds = None\n if self.community_standards_uri:\n stds = list(self.community_standards_uri.keys())\n self.logger.log(self.LOG_SUCCESS,\n '{} : Found disciplinary standards that are listed in OGC CSW endpoint -: {}'.format(\n 'FsF-R1.3-01M', stds))\n else:\n self.logger.info('{} : Invalid OGC CSW endpoint'.format('FsF-R1.3-01M'))\n\n # retrieve metadata standards info from SPARQL endpoint\n if self.sparql_endpoint:\n self.logger.info('{} : Use SPARQL endpoint to retrieve standards used by the repository -: {}'.format('FsF-R1.3-01M',self.oaipmh_endpoint))\n if (self.uri_validator(self.sparql_endpoint)):\n sparql_provider = SPARQLMetadataProvider(endpoint=self.sparql_endpoint, logger=self.logger,metric_id='FsF-R1.3-01M')\n self.community_standards_uri = sparql_provider.getMetadataStandards()\n self.namespace_uri.extend(sparql_provider.getNamespaces())\n stds = None\n if self.community_standards_uri:\n stds = list(self.community_standards_uri.keys())\n self.logger.log(self.LOG_SUCCESS,\n '{} : Found disciplinary standards that are listed in SPARQL endpoint -: {}'.format(\n 'FsF-R1.3-01M', stds))\n else:\n self.logger.info('{} : Invalid SPARQL endpoint'.format('FsF-R1.3-01M'))\n\n\n else:\n self.logger.warning('{} : Skipped external ressources (e.g. OAI, re3data) checks since landing page could not be resolved'.format('FsF-R1.3-01M'))\n\n def retrieve_metadata_embedded(self, extruct_metadata ={}):\n isPid = False\n if self.pid_scheme:\n isPid = True\n self.embedded_retrieved = True\n if self.landing_url:\n self.logger.info('FsF-F2-01M : Starting to identify EMBEDDED metadata at -: ' + str(self.landing_url))\n #test if content is html otherwise skip embedded tests\n #print(self.landing_content_type)\n if 'html' in str(self.landing_content_type):\n\n # ========= retrieve schema.org (embedded, or from via content-negotiation if pid provided) =========\n ext_meta = extruct_metadata.get('json-ld')\n #print(ext_meta)\n self.logger.info('FsF-F2-01M : Trying to retrieve schema.org JSON-LD metadata from html page')\n\n schemaorg_collector = MetaDataCollectorSchemaOrg(loggerinst=self.logger, sourcemetadata=ext_meta,\n mapping=Mapper.SCHEMAORG_MAPPING, pidurl=None)\n source_schemaorg, schemaorg_dict = schemaorg_collector.parse_metadata()\n schemaorg_dict = self.exclude_null(schemaorg_dict)\n if schemaorg_dict:\n self.namespace_uri.extend(schemaorg_collector.namespaces)\n self.metadata_sources.append((source_schemaorg,'embedded'))\n if schemaorg_dict.get('related_resources'):\n self.related_resources.extend(schemaorg_dict.get('related_resources'))\n if schemaorg_dict.get('object_content_identifier'):\n self.logger.info('FsF-F3-01M : Found data links in Schema.org metadata -: ' + str(schemaorg_dict.get('object_content_identifier')))\n # add object type for future reference\n for i in schemaorg_dict.keys():\n if i in self.reference_elements:\n self.metadata_merged[i] = schemaorg_dict[i]\n self.reference_elements.remove(i)\n self.logger.log(self.LOG_SUCCESS, 'FsF-F2-01M : Found schema.org JSON-LD metadata in html page -: '+str(schemaorg_dict.keys()))\n else:\n self.logger.info('FsF-F2-01M : schema.org JSON-LD metadata in html page UNAVAILABLE')\n\n # ========= retrieve dublin core embedded in html page =========\n if self.reference_elements:\n self.logger.info('FsF-F2-01M : Trying to retrieve Dublin Core metadata from html page')\n dc_collector = MetaDataCollectorDublinCore(loggerinst=self.logger, sourcemetadata=self.landing_html,\n mapping=Mapper.DC_MAPPING)\n source_dc, dc_dict = dc_collector.parse_metadata()\n dc_dict = self.exclude_null(dc_dict)\n if dc_dict:\n self.namespace_uri.extend(dc_collector.namespaces)\n #not_null_dc = [k for k, v in dc_dict.items() if v is not None]\n self.metadata_sources.append((source_dc,'embedded'))\n if dc_dict.get('related_resources'):\n self.related_resources.extend(dc_dict.get('related_resources'))\n for d in dc_dict.keys():\n if d in self.reference_elements:\n self.metadata_merged[d] = dc_dict[d]\n self.reference_elements.remove(d)\n self.logger.log(self.LOG_SUCCESS, 'FsF-F2-01M : Found DublinCore metadata -: '+str(dc_dict.keys()))\n else:\n self.logger.info('FsF-F2-01M : DublinCore metadata UNAVAILABLE')\n # ========= retrieve embedded rdfa and microdata metadata ========\n self.logger.info('FsF-F2-01M : Trying to retrieve Microdata metadata from html page')\n\n micro_meta = extruct_metadata.get('microdata')\n microdata_collector = MetaDataCollectorMicroData(loggerinst=self.logger, sourcemetadata=micro_meta,\n mapping=Mapper.MICRODATA_MAPPING)\n source_micro, micro_dict = microdata_collector.parse_metadata()\n if micro_dict:\n self.metadata_sources.append((source_micro,'embedded'))\n self.namespace_uri.extend(microdata_collector.getNamespaces())\n micro_dict = self.exclude_null(micro_dict)\n for i in micro_dict.keys():\n if i in self.reference_elements:\n self.metadata_merged[i] = micro_dict[i]\n self.reference_elements.remove(i)\n self.logger.log(self.LOG_SUCCESS, 'FsF-F2-01M : Found microdata metadata -: '+str(micro_dict.keys()))\n\n #================== RDFa\n self.logger.info('FsF-F2-01M : Trying to retrieve RDFa metadata from html page')\n\n RDFA_ns = rdflib.Namespace(\"http://www.w3.org/ns/rdfa#\")\n rdfasource = MetaDataCollector.Sources.RDFA.value\n rdfagraph = None\n errors=[]\n try:\n rdflib_logger = logging.getLogger('rdflib')\n rdflib_logger.setLevel(logging.ERROR)\n rdfagraph = rdflib.Graph()\n rdfagraph.parse(data=self.landing_html, format='rdfa')\n rdfa_collector = MetaDataCollectorRdf(loggerinst=self.logger, target_url=self.landing_url, source=rdfasource,\n rdf_graph=rdfagraph)\n source_rdfa, rdfa_dict = rdfa_collector.parse_metadata()\n if(len(rdfa_dict) > 0):\n self.metadata_sources.append((rdfasource,'embedded'))\n self.namespace_uri.extend(rdfa_collector.getNamespaces())\n #rdfa_dict['object_identifier']=self.pid_url\n rdfa_dict = self.exclude_null(rdfa_dict)\n for i in rdfa_dict.keys():\n if i in self.reference_elements:\n self.metadata_merged[i] = rdfa_dict[i]\n self.reference_elements.remove(i)\n self.logger.log(self.LOG_SUCCESS, 'FsF-F2-01M : Found RDFa metadata -: '+str(rdfa_dict.keys()))\n except Exception as e:\n self.logger.info('FsF-F2-01M : RDFa metadata parsing exception, probably no RDFa embedded in HTML -:'+str(e))\n\n\n # ======== retrieve OpenGraph metadata\n self.logger.info('FsF-F2-01M : Trying to retrieve OpenGraph metadata from html page')\n\n ext_meta = extruct_metadata.get('opengraph')\n opengraph_collector = MetaDataCollectorOpenGraph(loggerinst=self.logger, sourcemetadata=ext_meta,\n mapping=Mapper.OG_MAPPING)\n source_opengraph, opengraph_dict = opengraph_collector.parse_metadata()\n opengraph_dict = self.exclude_null(opengraph_dict)\n if opengraph_dict:\n self.namespace_uri.extend(opengraph_collector.namespaces)\n self.metadata_sources.append((source_opengraph,'embedded'))\n for i in opengraph_dict.keys():\n if i in self.reference_elements:\n self.metadata_merged[i] = opengraph_dict[i]\n self.reference_elements.remove(i)\n self.logger.log(self.LOG_SUCCESS, 'FsF-F2-01M : Found OpenGraph metadata -: ' + str(opengraph_dict.keys()))\n else:\n self.logger.info('FsF-F2-01M : OpenGraph metadata UNAVAILABLE')\n\n #========= retrieve signposting data links\n self.logger.info('FsF-F2-01M : Trying to identify Typed Links in html page')\n\n data_sign_links = self.get_signposting_links('item')\n if data_sign_links:\n self.logger.info('FsF-F3-01M : Found data links in response header (signposting) -: ' + str(len(data_sign_links)))\n if self.metadata_merged.get('object_content_identifier') is None:\n self.metadata_merged['object_content_identifier'] = data_sign_links\n\n\n\n #======== retrieve OpenSearch links\n search_links = self.get_html_typed_links(rel='search')\n for search in search_links:\n if search.get('type') in ['application/opensearchdescription+xml']:\n self.logger.info('FsF-R1.3-01M : Found OpenSearch link in HTML head (link rel=search) -: ' + str(search['url']))\n self.namespace_uri.append('http://a9.com/-/spec/opensearch/1.1/')\n\n #========= retrieve atom, GeoRSS links\n #TODO: do somethin useful with this..\n feed_links = self.get_html_typed_links(rel='alternate')\n for feed in feed_links:\n if feed.get('type') in ['application/rss+xml']:\n self.logger.info('FsF-R1.3-01M : Found atom/rss/georss feed link in HTML head (link rel=alternate) -: ' + str(feed.get('url')))\n feed_helper = RSSAtomMetadataProvider(self.logger,feed['url'],'FsF-R1.3-01M')\n feed_helper.getMetadataStandards()\n self.namespace_uri.extend(feed_helper.getNamespaces())\n #========= retrieve typed data object links =========\n\n data_meta_links = self.get_html_typed_links(rel='item')\n if data_meta_links:\n self.logger.info('FsF-F3-01M : Found data links in HTML head (link rel=item) -: ' + str(len(data_meta_links)))\n if self.metadata_merged.get('object_content_identifier') is None:\n self.metadata_merged['object_content_identifier'] = data_meta_links\n # self.metadata_sources.append((MetaDataCollector.Sources.TYPED_LINK.value,'linked'))\n #Now if an identifier has been detected in the metadata, potentially check for persistent identifier has to be repeated..\n self.check_pidtest_repeat()\n else:\n self.logger.warning('FsF-F2-01M : Skipped EMBEDDED metadata identification of landing page at -: ' + str(self.landing_url)+' expected html content but received: '+str(self.landing_content_type))\n else:\n self.logger.warning('FsF-F2-01M : Skipped EMBEDDED metadata identification, no landing page URL could be determined' )\n\n\n def check_pidtest_repeat(self):\n if self.metadata_merged.get('object_identifier'):\n if isinstance(self.metadata_merged.get('object_identifier'), list):\n identifiertotest = self.metadata_merged.get('object_identifier')\n else:\n identifiertotest = [self.metadata_merged.get('object_identifier')]\n if self.pid_scheme is None:\n found_pids={}\n for pidcandidate in identifiertotest:\n idhelper = IdentifierHelper(pidcandidate)\n found_id_scheme = idhelper.preferred_schema\n if idhelper.is_persistent:\n found_pids[found_id_scheme] = pidcandidate\n if len(found_pids) >=1 and self.repeat_pid_check == False:\n self.logger.info(\n 'FsF-F2-01M : Found object identifier in metadata, repeating PID check for FsF-F1-02D')\n self.logger.log(self.LOG_SUCCESS,\n 'FsF-F1-02D : Found object identifier in metadata during FsF-F2-01M, PID check was repeated')\n self.repeat_pid_check = True\n\n if 'doi' in found_pids:\n self.id = found_pids['doi']\n self.pid_scheme = 'doi'\n else:\n self.pid_scheme, self.id = next(iter(found_pids.items()))\n\n # Comment: not sure if we really need a separate class as proposed below. Instead we can use a dictionary\n # TODO (important) separate class to represent https://www.iana.org/assignments/link-relations/link-relations.xhtml\n # use IANA relations for extracting metadata and meaningful links\n def get_html_typed_links(self, rel=\"item\"):\n # Use Typed Links in HTTP Link headers to help machines find the resources that make up a publication.\n # Use links to find domains specific metadata\n datalinks = []\n if isinstance(self.landing_html, str):\n if self.landing_html:\n try:\n dom = lxml.html.fromstring(self.landing_html.encode('utf8'))\n links=dom.xpath('/*/head/link[@rel=\"'+rel+'\"]')\n for l in links:\n href=l.attrib.get('href')\n #handle relative paths\n if urlparse(href).scheme == '':\n href = urljoin(self.landing_url,href)\n #landingpath = urlparse(self.landing_url).path\n #landingdir, landingfile = os.path.split(landingpath)\n #href= landingdir+'/'+href\n datalinks.append({'url': href, 'type': l.attrib.get('type'), 'rel': l.attrib.get('rel'), 'profile': l.attrib.get('format')})\n except:\n self.logger.info('FsF-F2-01M : Typed links identification failed -:')\n else:\n self.logger.info('FsF-F2-01M : Expected HTML to check for typed links but received empty string ')\n\n return datalinks\n\n def get_signposting_links(self, rel=\"item\"):\n signlinks =[]\n for signposting_links in self.signposting_header_links:\n if signposting_links.get('rel') == rel:\n signlinks.append(signposting_links)\n return signlinks\n\n def get_guessed_xml_link(self):\n # in case object landing page URL ends with '.html' or '/html'\n # try to find out if there is some xml content if suffix is replaced by 'xml\n datalink = None\n if self.landing_url is not None:\n suff_res = re.search(r\".*[\\.\\/](html?)?$\", self.landing_url)\n if suff_res is not None:\n if suff_res[1] is not None:\n guessed_link = self.landing_url.replace(suff_res[1],'xml')\n try:\n response=urllib.urlopen(guessed_link)\n if response.getheader('Content-Type') in ['text/xml','application/rdf+xml']:\n datalink={'source':'guessed','url': guessed_link, 'type': response.getheader('Content-Type'), 'rel': 'alternate'}\n self.logger.log(self.LOG_SUCCESS, 'FsF-F2-01M : Found XML content at -: '+guessed_link)\n\n except:\n self.logger.info('FsF-F2-01M : Guessed XML retrieval failed for -: '+guessed_link)\n return datalink\n\n def retrieve_metadata_external(self):\n test_content_negotiation = False\n test_typed_links = False\n test_signposting = False\n test_embedded = False\n self.logger.info('FsF-F2-01M : Starting to identify EXTERNAL metadata through content negotiation or typed links')\n\n # ========= retrieve xml metadata namespaces by content negotiation ========\n if self.landing_url:\n if self.use_datacite is True:\n target_url = self.pid_url\n else:\n target_url = self.landing_url\n if target_url is None:\n target_url = self.origin_url\n\n self.logger.info('FsF-F2-01M : Trying to retrieve XML metadata through content negotiation')\n negotiated_xml_collector = MetaDataCollectorXML(loggerinst=self.logger,target_url=self.landing_url, link_type='negotiated')\n source_neg_xml, metadata_neg_dict = negotiated_xml_collector.parse_metadata()\n #print('### ',metadata_neg_dict)\n metadata_neg_dict = self.exclude_null(metadata_neg_dict)\n\n if metadata_neg_dict:\n self.metadata_sources.append((source_neg_xml, 'negotiated'))\n if metadata_neg_dict.get('related_resources'):\n self.related_resources.extend(metadata_neg_dict.get('related_resources'))\n if metadata_neg_dict.get('object_content_identifier'):\n self.logger.info('FsF-F3-01M : Found data links in XML metadata -: ' + str(\n metadata_neg_dict.get('object_content_identifier')))\n # add object type for future reference\n for i in metadata_neg_dict.keys():\n if i in self.reference_elements:\n self.metadata_merged[i] = metadata_neg_dict[i]\n self.reference_elements.remove(i)\n self.logger.log(self.LOG_SUCCESS,\n 'FsF-F2-01M : Found XML metadata through content negotiation-: ' + str(metadata_neg_dict.keys()))\n self.namespace_uri.extend(negotiated_xml_collector.getNamespaces())\n #TODO: Finish this ...\n\n # ========= retrieve json-ld/schema.org metadata namespaces by content negotiation ========\n self.logger.info('FsF-F2-01M : Trying to retrieve schema.org JSON-LD metadata through content negotiation')\n schemaorg_collector = MetaDataCollectorSchemaOrg(loggerinst=self.logger, sourcemetadata=None,\n mapping=Mapper.SCHEMAORG_MAPPING, pidurl=target_url)\n source_schemaorg, schemaorg_dict = schemaorg_collector.parse_metadata()\n schemaorg_dict = self.exclude_null(schemaorg_dict)\n if schemaorg_dict:\n self.namespace_uri.extend(schemaorg_collector.namespaces)\n self.metadata_sources.append((source_schemaorg, 'negotiated'))\n if schemaorg_dict.get('related_resources'):\n self.related_resources.extend(schemaorg_dict.get('related_resources'))\n if schemaorg_dict.get('object_content_identifier'):\n self.logger.info('FsF-F3-01M : Found data links in Schema.org metadata -: ' + str(\n schemaorg_dict.get('object_content_identifier')))\n # add object type for future reference\n for i in schemaorg_dict.keys():\n if i in self.reference_elements:\n self.metadata_merged[i] = schemaorg_dict[i]\n self.reference_elements.remove(i)\n self.logger.log(self.LOG_SUCCESS,\n 'FsF-F2-01M : Found Schema.org metadata through content negotiation-: ' + str(schemaorg_dict.keys()))\n else:\n self.logger.info('FsF-F2-01M : Schema.org metadata through content negotiation UNAVAILABLE')\n\n # ========= retrieve rdf metadata namespaces by content negotiation ========\n self.logger.info('FsF-F2-01M : Trying to retrieve RDF metadata through content negotiation')\n source = MetaDataCollector.Sources.LINKED_DATA.value\n #TODO: handle this the same way as with datacite based content negotiation->use the use_datacite switch\n if self.pid_scheme == 'purl':\n targeturl = self.pid_url\n else:\n targeturl = self.landing_url\n\n neg_rdf_collector = MetaDataCollectorRdf(loggerinst=self.logger, target_url=targeturl,\n source=source)\n if neg_rdf_collector is not None:\n source_rdf, rdf_dict = neg_rdf_collector.parse_metadata()\n # in case F-UJi was redirected and the landing page content negotiation doesnt return anything try the origin URL\n if not rdf_dict:\n if self.origin_url is not None and self.origin_url != targeturl:\n neg_rdf_collector.target_url = self.origin_url\n source_rdf, rdf_dict = neg_rdf_collector.parse_metadata()\n self.namespace_uri.extend(neg_rdf_collector.getNamespaces())\n rdf_dict = self.exclude_null(rdf_dict)\n if rdf_dict:\n if rdf_dict.get('object_content_identifier'):\n self.logger.info('FsF-F3-01M : Found data links in RDF metadata -: ' + str(\n len(rdf_dict.get('object_content_identifier'))))\n\n test_content_negotiation = True\n self.logger.log(self.LOG_SUCCESS,\n 'FsF-F2-01M : Found Linked Data metadata -: {}'.format(str(rdf_dict.keys())))\n self.metadata_sources.append((source_rdf,'negotiated'))\n\n for r in rdf_dict.keys():\n if r in self.reference_elements:\n self.metadata_merged[r] = rdf_dict[r]\n self.reference_elements.remove(r)\n if rdf_dict.get('related_resources'):\n self.related_resources.extend(rdf_dict.get('related_resources'))\n else:\n self.logger.info('FsF-F2-01M : Linked Data metadata UNAVAILABLE')\n\n\n\n # ========= retrieve datacite json metadata based on pid =========\n if self.pid_scheme:\n # ================= datacite by content negotiation ===========\n # in case use_datacite id false use the landing page URL for content negotiation, otherwise the pid url\n if self.use_datacite is True:\n datacite_target_url = self.pid_url\n else:\n datacite_target_url = self.landing_url\n dcite_collector = MetaDataCollectorDatacite(mapping=Mapper.DATACITE_JSON_MAPPING, loggerinst=self.logger,\n pid_url=datacite_target_url)\n source_dcitejsn, dcitejsn_dict = dcite_collector.parse_metadata()\n dcitejsn_dict = self.exclude_null(dcitejsn_dict)\n if dcitejsn_dict:\n test_content_negotiation = True\n # not_null_dcite = [k for k, v in dcitejsn_dict.items() if v is not None]\n self.metadata_sources.append((source_dcitejsn,'negotiated'))\n self.logger.log(self.LOG_SUCCESS,'FsF-F2-01M : Found Datacite metadata -: {}'.format(str(dcitejsn_dict.keys())))\n if dcitejsn_dict.get('object_content_identifier'):\n self.logger.info('FsF-F3-01M : Found data links in Datacite metadata -: ' + str(\n dcitejsn_dict.get('object_content_identifier')))\n if dcitejsn_dict.get('related_resources'):\n self.related_resources.extend(dcitejsn_dict.get('related_resources'))\n self.namespace_uri.extend(dcite_collector.getNamespaces())\n for r in dcitejsn_dict.keys():\n # only merge when the value cannot be retrived from embedded metadata\n if r in self.reference_elements:\n self.metadata_merged[r] = dcitejsn_dict[r]\n self.reference_elements.remove(r)\n else:\n self.logger.info('FsF-F2-01M : Datacite metadata UNAVAILABLE')\n else:\n self.logger.info('FsF-F2-01M : Not a PID, therefore Datacite metadata (json) not requested.')\n sign_header_links = []\n rel_meta_links = []\n sign_meta_links = []\n #signposting header links\n if self.get_signposting_links('describedby'):\n sign_header_links = self.get_signposting_links('describedby')\n self.metadata_sources.append((MetaDataCollector.Sources.SIGN_POSTING.value, 'signposting'))\n typed_metadata_links=[]\n if self.landing_html:\n #dcat style meta links\n typed_metadata_links = self.get_html_typed_links(rel='alternate')\n #ddi style meta links\n rel_meta_links = self.get_html_typed_links(rel='meta')\n #signposting style meta links\n sign_meta_links = self.get_html_typed_links(rel='describedby')\n else:\n self.logger.info('FsF-F2-01M : Expected HTML to check for typed links but received empty string ')\n\n typed_metadata_links.extend(sign_meta_links)\n typed_metadata_links.extend(rel_meta_links)\n typed_metadata_links.extend(sign_header_links)\n guessed_metadata_link = self.get_guessed_xml_link()\n\n if guessed_metadata_link is not None:\n typed_metadata_links.append(guessed_metadata_link)\n\n if typed_metadata_links is not None:\n typed_rdf_collector = None\n #unique entries for typed links\n typed_metadata_links = [dict(t) for t in {tuple(d.items()) for d in typed_metadata_links}]\n for metadata_link in typed_metadata_links:\n if metadata_link['type'] in ['application/rdf+xml','text/n3','text/ttl','application/ld+json']:\n self.logger.info('FsF-F2-01M : Found e.g. Typed Links in HTML Header linking to RDF Metadata -: ('+str(metadata_link['type'])+' '+str(metadata_link['url'])+')')\n found_metadata_link=True\n source = MetaDataCollector.Sources.RDF_TYPED_LINKS.value\n typed_rdf_collector = MetaDataCollectorRdf(loggerinst=self.logger, target_url=metadata_link['url'], source=source )\n elif metadata_link['type'] in ['application/atom+xml'] and metadata_link['rel'] == 'resourcemap':\n self.logger.info('FsF-F2-01M : Found e.g. Typed Links in HTML Header linking to OAI ORE (atom) Metadata -: (' + str(\n metadata_link['type'] + ')'))\n ore_atom_collector = MetaDataCollectorOreAtom(loggerinst=self.logger, target_url=metadata_link['url'] )\n source_ore, ore_dict = ore_atom_collector.parse_metadata()\n ore_dict = self.exclude_null(ore_dict)\n if ore_dict:\n self.logger.log(self.LOG_SUCCESS,\n 'FsF-F2-01M : Found OAI ORE metadata -: {}'.format(str(ore_dict.keys())))\n self.metadata_sources.append((source_ore, 'linked'))\n for r in ore_dict.keys():\n if r in self.reference_elements:\n self.metadata_merged[r] = ore_dict[r]\n self.reference_elements.remove(r)\n elif re.search(r'[+\\/]xml$', str(metadata_link['type'])):\n #elif metadata_link['type'] in ['text/xml', 'application/xml', 'application/x-ddi-l+xml',\n # 'application/x-ddametadata+xml']:\n self.logger.info(\n 'FsF-F2-01M : Found e.g. Typed Links in HTML Header linking to XML Metadata -: (' + str(\n metadata_link['type'] + ')'))\n linked_xml_collector = MetaDataCollectorXML(loggerinst=self.logger,\n target_url=metadata_link['url'],\n link_type=metadata_link.get('source'))\n if linked_xml_collector is not None:\n source_linked_xml, linked_xml_dict = linked_xml_collector.parse_metadata()\n if linked_xml_dict:\n self.metadata_sources.append((source_linked_xml, 'linked'))\n if linked_xml_dict.get('related_resources'):\n self.related_resources.extend(linked_xml_dict.get('related_resources'))\n if linked_xml_dict.get('object_content_identifier'):\n self.logger.info('FsF-F3-01M : Found data links in XML metadata -: ' + str(\n linked_xml_dict.get('object_content_identifier')))\n # add object type for future reference\n for i in linked_xml_dict.keys():\n if i in self.reference_elements:\n self.metadata_merged[i] = linked_xml_dict[i]\n self.reference_elements.remove(i)\n self.logger.log(self.LOG_SUCCESS,\n 'FsF-F2-01M : Found XML metadata through typed links-: ' + str(\n linked_xml_dict.keys()))\n self.namespace_uri.extend(linked_xml_collector.getNamespaces())\n\n if typed_rdf_collector is not None:\n source_rdf, rdf_dict = typed_rdf_collector.parse_metadata()\n self.namespace_uri.extend(typed_rdf_collector.getNamespaces())\n rdf_dict = self.exclude_null(rdf_dict)\n if rdf_dict:\n test_typed_links = True\n self.logger.log(self.LOG_SUCCESS,'FsF-F2-01M : Found Linked Data metadata -: {}'.format(str(rdf_dict.keys())))\n self.metadata_sources.append((source_rdf,'linked'))\n for r in rdf_dict.keys():\n if r in self.reference_elements:\n self.metadata_merged[r] = rdf_dict[r]\n self.reference_elements.remove(r)\n if rdf_dict.get('related_resources'):\n self.related_resources.extend(rdf_dict.get('related_resources'))\n else:\n self.logger.info('FsF-F2-01M : Linked Data metadata UNAVAILABLE')\n\n if self.reference_elements:\n self.logger.debug('FsF-F2-01M : Reference metadata elements NOT FOUND -: {}'.format(self.reference_elements))\n else:\n self.logger.debug('FsF-F2-01M : ALL reference metadata elements available')\n # Now if an identifier has been detected in the metadata, potentially check for persistent identifier has to be repeated..\n self.check_pidtest_repeat()\n\n def exclude_null(self, dt):\n if type(dt) is dict:\n return dict((k, self.exclude_null(v)) for k, v in dt.items() if v and self.exclude_null(v))\n elif type(dt) is list:\n return [self.exclude_null(v) for v in dt if v and self.exclude_null(v)]\n else:\n return dt\n\n def lookup_metadatastandard_by_name(self, value):\n found = None\n # get standard name with the highest matching percentage using fuzzywuzzy\n highest = process.extractOne(value, FAIRCheck.COMMUNITY_STANDARDS_NAMES, scorer=fuzz.token_sort_ratio)\n if highest[1] > 80:\n found = highest[0]\n return found\n\n def lookup_metadatastandard_by_uri(self, value):\n found = None\n # get standard uri with the highest matching percentage using fuzzywuzzy\n highest = process.extractOne(value, FAIRCheck.COMMUNITY_METADATA_STANDARDS_URIS_LIST,\n scorer=fuzz.token_sort_ratio)\n if highest[1] > 90:\n found = highest[0]\n return found\n\n\n def check_unique_identifier(self):\n unique_identifier_check = FAIREvaluatorUniqueIdentifier(self)\n unique_identifier_check.set_metric('FsF-F1-01D', metrics=FAIRCheck.METRICS)\n return unique_identifier_check.getResult()\n\n def check_persistent_identifier(self):\n persistent_identifier_check = FAIREvaluatorPersistentIdentifier(self)\n persistent_identifier_check.set_metric('FsF-F1-02D', metrics=FAIRCheck.METRICS)\n return persistent_identifier_check.getResult()\n\n def check_unique_persistent(self):\n return self.check_unique_identifier(), self.check_persistent_identifier()\n\n def check_minimal_metatadata(self,include_embedded = True):\n core_metadata_check = FAIREvaluatorCoreMetadata(self)\n core_metadata_check.set_metric('FsF-F2-01M', metrics=FAIRCheck.METRICS)\n return core_metadata_check.getResult()\n\n def check_content_identifier_included(self):\n content_included_check = FAIREvaluatorContentIncluded(self)\n content_included_check.set_metric('FsF-F3-01M', metrics=FAIRCheck.METRICS)\n return content_included_check.getResult()\n\n def check_data_access_level(self):\n data_access_level_check = FAIREvaluatorDataAccessLevel(self)\n data_access_level_check.set_metric('FsF-A1-01M', metrics=FAIRCheck.METRICS)\n return data_access_level_check.getResult()\n\n def check_license(self):\n license_check = FAIREvaluatorLicense(self)\n license_check.set_metric('FsF-R1.1-01M', metrics=FAIRCheck.METRICS)\n return license_check.getResult()\n\n def check_relatedresources(self):\n related_check = FAIREvaluatorRelatedResources(self)\n related_check.set_metric('FsF-I3-01M', metrics=FAIRCheck.METRICS)\n return related_check.getResult()\n\n def check_searchable(self):\n searchable_check = FAIREvaluatorSearchable(self)\n searchable_check.set_metric('FsF-F4-01M', metrics=FAIRCheck.METRICS)\n return searchable_check.getResult()\n\n def check_data_file_format(self):\n data_file_check = FAIREvaluatorFileFormat(self)\n data_file_check.set_metric('FsF-R1.3-02D', metrics=FAIRCheck.METRICS)\n return data_file_check.getResult()\n\n def check_community_metadatastandards(self):\n community_metadata_check = FAIREvaluatorCommunityMetadata(self)\n community_metadata_check.set_metric('FsF-R1.3-01M', metrics=FAIRCheck.METRICS)\n return community_metadata_check.getResult()\n\n def check_data_provenance(self):\n data_prov_check = FAIREvaluatorDataProvenance(self)\n data_prov_check.set_metric('FsF-R1.2-01M', metrics=FAIRCheck.METRICS)\n return data_prov_check.getResult()\n\n def check_data_content_metadata(self):\n data_content_metadata_check = FAIREvaluatorDataContentMetadata(self)\n data_content_metadata_check.set_metric('FsF-R1-01MD', metrics=FAIRCheck.METRICS)\n return data_content_metadata_check.getResult()\n\n def check_formal_metadata(self):\n formal_metadata_check = FAIREvaluatorFormalMetadata(self)\n formal_metadata_check.set_metric('FsF-I1-01M', metrics=FAIRCheck.METRICS)\n return formal_metadata_check.getResult()\n\n def check_semantic_vocabulary(self):\n semantic_vocabulary_check = FAIREvaluatorSemanticVocabulary(self)\n semantic_vocabulary_check.set_metric('FsF-I1-02M', metrics=FAIRCheck.METRICS)\n return semantic_vocabulary_check.getResult()\n\n def check_metadata_preservation(self):\n metadata_preserved_check = FAIREvaluatorMetadataPreserved(self)\n metadata_preserved_check.set_metric('FsF-A2-01M', metrics=FAIRCheck.METRICS)\n return metadata_preserved_check.getResult()\n\n def check_standardised_protocol_data(self):\n standardised_protocol_check = FAIREvaluatorStandardisedProtocolData(self)\n standardised_protocol_check.set_metric('FsF-A1-03D', metrics=FAIRCheck.METRICS)\n return standardised_protocol_check.getResult()\n\n def check_standardised_protocol_metadata(self):\n standardised_protocol_metadata_check = FAIREvaluatorStandardisedProtocolMetadata(self)\n standardised_protocol_metadata_check.set_metric('FsF-A1-02M', metrics=FAIRCheck.METRICS)\n return standardised_protocol_metadata_check.getResult()\n\n def get_log_messages_dict(self):\n logger_messages ={}\n self.logger_message_stream.seek(0)\n for log_message in self.logger_message_stream.readlines():\n if log_message.startswith('FsF-'):\n m = log_message.split(\":\", 1)\n metric = m[0].strip()\n message_n_level = m[1].strip().split(\"|\",1)\n if len(message_n_level) >1:\n level = message_n_level[1]\n else:\n level ='INFO'\n message = message_n_level[0]\n if metric not in logger_messages:\n logger_messages[metric] =[]\n if message not in logger_messages[metric]:\n logger_messages[metric].append(level.replace('\\n', '')+': '+message.strip())\n\n return logger_messages\n\n def get_assessment_summary(self, results):\n status_dict = {'pass':1, 'fail':0}\n maturity_dict = Mapper.MATURITY_LEVELS.value\n summary_dict={'fair_category':[], 'fair_principle':[],'score_earned':[],'score_total':[], 'maturity':[],'status':[]}\n for res_k, res_v in enumerate(results):\n metric_match = re.search(r'^FsF-(([FAIR])[0-9](\\.[0-9])?)-',res_v['metric_identifier'])\n if metric_match.group(2) is not None:\n fair_principle = metric_match[1]\n fair_category = metric_match[2]\n earned_maturity = res_v['maturity']\n #earned_maturity = [k for k, v in maturity_dict.items() if v == res_v['maturity']][0]\n summary_dict['fair_category'].append(fair_category)\n summary_dict['fair_principle'].append(fair_principle)\n #An easter egg for Mustapha\n if self.input_id == 'https://www.rd-alliance.org/users/mustapha-mokrane':\n summary_dict['score_earned'].append(res_v['score']['total'])\n summary_dict['maturity'].append(3)\n summary_dict['status'].append(1)\n else:\n summary_dict['score_earned'].append(res_v['score']['earned'])\n summary_dict['maturity'].append(earned_maturity)\n summary_dict['status'].append(status_dict.get(res_v['test_status']))\n summary_dict['score_total'] .append(res_v['score']['total'])\n\n sf = pd.DataFrame(summary_dict)\n summary = {'score_earned':{},'score_total':{},'score_percent':{}, 'status_total':{},'status_passed':{}}\n\n summary['score_earned'] = sf.groupby(by='fair_category')['score_earned'].sum().to_dict()\n summary['score_earned'].update(sf.groupby(by='fair_principle')['score_earned'].sum().to_dict())\n summary['score_earned']['FAIR'] = round(float(sf['score_earned'].sum()),2)\n\n summary['score_total'] = sf.groupby(by='fair_category')['score_total'].sum().to_dict()\n summary['score_total'].update(sf.groupby(by='fair_principle')['score_total'].sum().to_dict())\n summary['score_total']['FAIR'] = round(float(sf['score_total'].sum()),2)\n\n summary['score_percent'] = (round(sf.groupby(by='fair_category')['score_earned'].sum()/sf.groupby(by='fair_category')['score_total'].sum()*100,2)).to_dict()\n summary['score_percent'].update((round(sf.groupby(by='fair_principle')['score_earned'].sum()/sf.groupby(by='fair_principle')['score_total'].sum()*100,2)).to_dict())\n summary['score_percent']['FAIR'] = round(float(sf['score_earned'].sum()/sf['score_total'].sum()*100),2)\n\n summary['maturity'] = sf.groupby(by='fair_category')['maturity'].apply(lambda x: 1 if x.mean() < 1 and x.mean() > 0 else round(x.mean())).to_dict()\n summary['maturity'].update(sf.groupby(by='fair_principle')['maturity'].apply(lambda x: 1 if x.mean() < 1 and x.mean() > 0 else round(x.mean())).to_dict())\n total_maturity = 0\n for fair_index in ['F','A','I','R']:\n total_maturity += summary['maturity'][fair_index]\n summary['maturity']['FAIR'] = round(float(1 if total_maturity/4 < 1 and total_maturity/4 > 0 else total_maturity/4),2)\n\n summary['status_total'] = sf.groupby(by='fair_principle')['status'].count().to_dict()\n summary['status_total'].update(sf.groupby(by='fair_category')['status'].count().to_dict())\n summary['status_total']['FAIR'] = int(sf['status'].count())\n\n summary['status_passed'] = sf.groupby(by='fair_principle')['status'].sum().to_dict()\n summary['status_passed'].update(sf.groupby(by='fair_category')['status'].sum().to_dict())\n summary['status_passed']['FAIR'] = int(sf['status'].sum())\n return summary" ]
[ [ "pandas.DataFrame" ] ]
czarmanu/deeplab-lakeice-webcams
[ "fc5e4152f73755b6f2ed0fd1ef2dbd5e10186bf9" ]
[ "eval.py" ]
[ "# Copyright 2018 The TensorFlow Authors All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n# \"\"\"Evaluation script for the DeepLab model.\n\n# See model.py for more details and usage.\n# \"\"\"\n\n\n\nimport os\nimport tensorflow as tf\nimport common\nimport model\nfrom datasets import data_generator\nimport confusion_matrix\nimport my_metrics\nfrom tensorboard import summary as summary_lib\nimport numpy as np\nimport keras.backend as K\n\nflags = tf.app.flags\nFLAGS = flags.FLAGS\n\n#import wandb\n#wandb.init(project=\"deeplab\", sync_tensorboard=True)\n\nflags.DEFINE_string('master', '', 'BNS name of the tensorflow server')\n\n# Settings for log directories.\n\nflags.DEFINE_string('eval_logdir', None, 'Where to write the event logs.')\n\nflags.DEFINE_string('checkpoint_dir', None, 'Directory of model checkpoints.')\n\n# Settings for evaluating the model.\n\nflags.DEFINE_integer('eval_batch_size', 1,\n 'The number of images in each batch during evaluation.')\n\nflags.DEFINE_list('eval_crop_size', '513,513',\n 'Image crop size [height, width] for evaluation.')\n\nflags.DEFINE_integer('eval_interval_secs', 600 * 5,\n 'How often (in seconds) to run evaluation.')\n\n# For `xception_65`, use atrous_rates = [12, 24, 36] if output_stride = 8, or\n# rates = [6, 12, 18] if output_stride = 16. For `mobilenet_v2`, use None. Note\n# one could use different atrous_rates/output_stride during training/evaluation.\nflags.DEFINE_multi_integer('atrous_rates', None,\n 'Atrous rates for atrous spatial pyramid pooling.')\n\nflags.DEFINE_integer('output_stride', 16,\n 'The ratio of input to output spatial resolution.')\n\n# Change to [0.5, 0.75, 1.0, 1.25, 1.5, 1.75] for multi-scale test.\nflags.DEFINE_multi_float('eval_scales', [1.0],\n 'The scales to resize images for evaluation.')\n\n# Change to True for adding flipped images during test.\nflags.DEFINE_bool('add_flipped_images', False,\n 'Add flipped images for evaluation or not.')\n\nflags.DEFINE_integer(\n 'quantize_delay_step', -1,\n 'Steps to start quantized training. If < 0, will not quantize model.')\n\n# Dataset settings.\n\nflags.DEFINE_string('dataset', 'lake',\n 'Name of the segmentation dataset.')\n\nflags.DEFINE_string('eval_split', 'val',\n 'Which split of the dataset used for evaluation')\n\nflags.DEFINE_string('dataset_dir', None, 'Where the dataset reside.')\n\nflags.DEFINE_integer('max_number_of_evaluations', 0,\n 'Maximum number of eval iterations. Will loop '\n 'indefinitely upon nonpositive values.')\n\nflags.DEFINE_integer('skips', 0,\n 'Do you want extra skips layers from encoder to decoder? 0 for no and 1 for yes')\n\n\ndef main(unused_argv):\n tf.logging.set_verbosity(tf.logging.INFO)\n\n dataset = data_generator.Dataset(\n dataset_name=FLAGS.dataset,\n split_name=FLAGS.eval_split,\n dataset_dir=FLAGS.dataset_dir,\n batch_size=FLAGS.eval_batch_size,\n crop_size=[int(sz) for sz in FLAGS.eval_crop_size],\n min_resize_value=FLAGS.min_resize_value,\n max_resize_value=FLAGS.max_resize_value,\n resize_factor=FLAGS.resize_factor,\n model_variant=FLAGS.model_variant,\n num_readers=2,\n is_training=False,\n should_shuffle=False,\n should_repeat=False)\n\n tf.gfile.MakeDirs(FLAGS.eval_logdir)\n tf.logging.info('Evaluating on %s set', FLAGS.eval_split)\n\n gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=1.0)\n\n session_config = tf.ConfigProto(\n allow_soft_placement=True, log_device_placement=False, gpu_options=gpu_options)\n\n #session_config.gpu_options.allow_growth = True\n\n \n\n with tf.Graph().as_default():\n samples = dataset.get_one_shot_iterator().get_next()\n #print(samples[common.IMAGE_NAME])\n\n model_options = common.ModelOptions(\n outputs_to_num_classes={common.OUTPUT_TYPE: dataset.num_of_classes},\n crop_size=[int(sz) for sz in FLAGS.eval_crop_size],\n atrous_rates=FLAGS.atrous_rates,\n output_stride=FLAGS.output_stride)\n\n # Set shape in order for tf.contrib.tfprof.model_analyzer to work properly.\n\n \n samples[common.IMAGE].set_shape(\n [FLAGS.eval_batch_size,\n int(FLAGS.eval_crop_size[0]),\n int(FLAGS.eval_crop_size[1]),\n 3])\n if tuple(FLAGS.eval_scales) == (1.0,):\n \n tf.logging.info('Performing single-scale test.')\n predictions, logits = model.predict_labels(samples[common.IMAGE], model_options,\n image_pyramid=FLAGS.image_pyramid, skips=FLAGS.skips)\n \n else:\n tf.logging.info('Performing multi-scale test.')\n if FLAGS.quantize_delay_step >= 0:\n raise ValueError(\n 'Quantize mode is not supported with multi-scale test.')\n\n predictions = model.predict_labels_multi_scale(\n samples[common.IMAGE],\n model_options=model_options,\n skips=FLAGS.skips,\n eval_scales=FLAGS.eval_scales,\n add_flipped_images=FLAGS.add_flipped_images)\n predictions = predictions[common.OUTPUT_TYPE]\n predictions = tf.reshape(predictions, shape=[-1])\n labels = tf.reshape(samples[common.LABEL], shape=[-1])\n weights = tf.to_float(tf.not_equal(labels, dataset.ignore_label))\n \n # Set ignore_label regions to label 0, because metrics.mean_iou requires\n # range of labels = [0, dataset.num_classes). Note the ignore_label regions\n # are not evaluated since the corresponding regions contain weights = 0.\n labels = tf.where(\n tf.equal(labels, dataset.ignore_label), tf.zeros_like(labels), labels)\n\n predictions_tag = 'miou'\n for eval_scale in FLAGS.eval_scales:\n predictions_tag += '_' + str(eval_scale)\n if FLAGS.add_flipped_images:\n predictions_tag += '_flipped'\n\n # Define the evaluation metric.\n \n\n metric_map = {}\n\n # to remove \"predictions out of bound error\"\n indices = tf.squeeze(tf.where(tf.less_equal(\n labels, dataset.num_of_classes - 1)), 1)\n labels_ind = tf.cast(tf.gather(labels, indices), tf.int32)\n predictions_ind = tf.gather(predictions, indices)\n # end of insert\n\n miou, update_miou = tf.metrics.mean_iou(\n labels_ind, predictions_ind, dataset.num_of_classes, weights=weights, name=\"mean_iou\")\n tf.summary.scalar(predictions_tag, miou)\n\n # Define the evaluation metric IOU for individual classes\n iou_v, update_op = my_metrics.iou(\n labels_ind, predictions_ind, dataset.num_of_classes, weights=weights)\n for index in range(0, dataset.num_of_classes):\n metric_map['class_' + str(index) + '_iou'] = (iou_v[index], update_op[index])\n tf.summary.scalar('class_' + str(index) + '_iou', iou_v[index])\n\n # Confusion matrix save hook. It updates the confusion matrix on tensorboard at the end of eval loop.\n confusionMatrixSaveHook = confusion_matrix.SaverHook(\n labels=['BG', 'water', 'ice', 'snow', 'clutter' ],\n confusion_matrix_tensor_name='mean_iou/total_confusion_matrix',\n summary_writer=tf.summary.FileWriterCache.get(str(FLAGS.eval_logdir))\n )\n\n \n summary_op = tf.summary.merge_all()\n\n summary_hook = tf.contrib.training.SummaryAtEndHook(\n log_dir=FLAGS.eval_logdir, summary_op=summary_op)\n hooks = [summary_hook, confusionMatrixSaveHook]\n\n\n num_eval_iters = None\n if FLAGS.max_number_of_evaluations > 0:\n num_eval_iters = FLAGS.max_number_of_evaluations\n\n if FLAGS.quantize_delay_step >= 0:\n tf.contrib.quantize.create_eval_graph()\n\n tf.contrib.training.evaluate_repeatedly(\n master=FLAGS.master,\n checkpoint_dir=FLAGS.checkpoint_dir,\n eval_ops=[update_miou, update_op],\n max_number_of_evaluations=num_eval_iters,\n hooks=hooks,\n eval_interval_secs=FLAGS.eval_interval_secs)\n\n\nif __name__ == '__main__':\n tf.app.run()\n" ]
[ [ "tensorflow.reshape", "tensorflow.zeros_like", "tensorflow.logging.info", "tensorflow.contrib.training.SummaryAtEndHook", "tensorflow.ConfigProto", "tensorflow.gfile.MakeDirs", "tensorflow.less_equal", "tensorflow.app.run", "tensorflow.metrics.mean_iou", "tensorflow.logging.set_verbosity", "tensorflow.summary.scalar", "tensorflow.summary.merge_all", "tensorflow.not_equal", "tensorflow.contrib.training.evaluate_repeatedly", "tensorflow.equal", "tensorflow.Graph", "tensorflow.gather", "tensorflow.contrib.quantize.create_eval_graph", "tensorflow.GPUOptions" ] ]
Jiggu07/Tic-tac-toe
[ "0e7ab80a59720cedc4c0c52390aab49bd4c216be" ]
[ "training_(AIvsAI)_ReinforcementLearning.py" ]
[ "import numpy as np\nfrom math import inf as infinity\nimport itertools\nimport random\nimport time\n\ngame_state = [[' ',' ',' '],\n [' ',' ',' '],\n [' ',' ',' ']]\nplayers = ['X','O']\n\ndef play_move(state, player, block_num):\n if state[int((block_num-1)/3)][(block_num-1)%3] is ' ':\n state[int((block_num-1)/3)][(block_num-1)%3] = player\n else:\n block_num = int(input(\"Block is not empty, ya blockhead! Choose again: \"))\n play_move(state, player, block_num)\n \ndef copy_game_state(state):\n new_state = [[' ',' ',' '],[' ',' ',' '],[' ',' ',' ']]\n for i in range(3):\n for j in range(3):\n new_state[i][j] = state[i][j]\n return new_state\n \ndef check_current_state(game_state): \n # Check horizontals\n if (game_state[0][0] == game_state[0][1] and game_state[0][1] == game_state[0][2] and game_state[0][0] is not ' '):\n return game_state[0][0], \"Done\"\n if (game_state[1][0] == game_state[1][1] and game_state[1][1] == game_state[1][2] and game_state[1][0] is not ' '):\n return game_state[1][0], \"Done\"\n if (game_state[2][0] == game_state[2][1] and game_state[2][1] == game_state[2][2] and game_state[2][0] is not ' '):\n return game_state[2][0], \"Done\"\n \n # Check verticals\n if (game_state[0][0] == game_state[1][0] and game_state[1][0] == game_state[2][0] and game_state[0][0] is not ' '):\n return game_state[0][0], \"Done\"\n if (game_state[0][1] == game_state[1][1] and game_state[1][1] == game_state[2][1] and game_state[0][1] is not ' '):\n return game_state[0][1], \"Done\"\n if (game_state[0][2] == game_state[1][2] and game_state[1][2] == game_state[2][2] and game_state[0][2] is not ' '):\n return game_state[0][2], \"Done\"\n \n # Check diagonals\n if (game_state[0][0] == game_state[1][1] and game_state[1][1] == game_state[2][2] and game_state[0][0] is not ' '):\n return game_state[1][1], \"Done\"\n if (game_state[2][0] == game_state[1][1] and game_state[1][1] == game_state[0][2] and game_state[2][0] is not ' '):\n return game_state[1][1], \"Done\"\n \n # Check if draw\n draw_flag = 0\n for i in range(3):\n for j in range(3):\n if game_state[i][j] is ' ':\n draw_flag = 1\n if draw_flag is 0:\n return None, \"Draw\"\n \n return None, \"Not Done\"\n\ndef print_board(game_state):\n print('----------------')\n print('| ' + str(game_state[0][0]) + ' || ' + str(game_state[0][1]) + ' || ' + str(game_state[0][2]) + ' |')\n print('----------------')\n print('| ' + str(game_state[1][0]) + ' || ' + str(game_state[1][1]) + ' || ' + str(game_state[1][2]) + ' |')\n print('----------------')\n print('| ' + str(game_state[2][0]) + ' || ' + str(game_state[2][1]) + ' || ' + str(game_state[2][2]) + ' |')\n print('----------------')\n \n \n# Initialize state values\nplayer = ['X','O',' ']\nstates_dict = {}\nall_possible_states = [[list(i[0:3]),list(i[3:6]),list(i[6:10])] for i in itertools.product(player, repeat = 9)]\nn_states = len(all_possible_states) # 2 players, 9 spaces\nn_actions = 9 # 9 spaces\nstate_values_for_AI_O = np.full((n_states),0.0)\nstate_values_for_AI_X = np.full((n_states),0.0)\nprint(\"n_states = %i \\nn_actions = %i\"%(n_states, n_actions))\n\n# State values for AI 'O'\nfor i in range(n_states):\n states_dict[i] = all_possible_states[i]\n winner, _ = check_current_state(states_dict[i])\n if winner == 'O': # AI won\n state_values_for_AI_O[i] = 1\n elif winner == 'X': # AI lost\n state_values_for_AI_O[i] = -1\n \n# State values for AI 'X' \nfor i in range(n_states):\n winner, _ = check_current_state(states_dict[i])\n if winner == 'O': # AI lost\n state_values_for_AI_X[i] = -1\n elif winner == 'X': # AI won\n state_values_for_AI_X[i] = 1\n\ndef update_state_value_O(curr_state_idx, next_state_idx, learning_rate):\n new_value = state_values_for_AI_O[curr_state_idx] + learning_rate*(state_values_for_AI_O[next_state_idx] - state_values_for_AI_O[curr_state_idx])\n state_values_for_AI_O[curr_state_idx] = new_value\n \ndef update_state_value_X(curr_state_idx, next_state_idx, learning_rate):\n new_value = state_values_for_AI_X[curr_state_idx] + learning_rate*(state_values_for_AI_X[next_state_idx] - state_values_for_AI_X[curr_state_idx])\n state_values_for_AI_X[curr_state_idx] = new_value\n\ndef getBestMove(state, player, epsilon):\n '''\n Reinforcement Learning Algorithm\n ''' \n moves = []\n curr_state_values = []\n empty_cells = []\n for i in range(3):\n for j in range(3):\n if state[i][j] is ' ':\n empty_cells.append(i*3 + (j+1))\n \n for empty_cell in empty_cells:\n moves.append(empty_cell)\n new_state = copy_game_state(state)\n play_move(new_state, player, empty_cell)\n next_state_idx = list(states_dict.keys())[list(states_dict.values()).index(new_state)]\n if player == 'X':\n curr_state_values.append(state_values_for_AI_X[next_state_idx])\n else:\n curr_state_values.append(state_values_for_AI_O[next_state_idx])\n \n print('Possible moves = ' + str(moves))\n print('Move values = ' + str(curr_state_values)) \n best_move_idx = np.argmax(curr_state_values)\n \n if np.random.uniform(0,1) <= epsilon: # Exploration\n best_move = random.choice(empty_cells)\n print('Agent decides to explore! Takes action = ' + str(best_move))\n epsilon *= 0.99\n else: #Exploitation\n best_move = moves[best_move_idx]\n print('Agent decides to exploit! Takes action = ' + str(best_move))\n return best_move\n\n# PLaying\n\n#LOAD TRAINED STATE VALUES\nstate_values_for_AI_X = np.loadtxt('trained_state_values_X.txt', dtype=np.float64)\nstate_values_for_AI_O = np.loadtxt('trained_state_values_O.txt', dtype=np.float64)\n\nlearning_rate = 0.2\nepsilon = 0.2\nnum_iterations = 10000\nfor iteration in range(num_iterations):\n game_state = [[' ',' ',' '],\n [' ',' ',' '],\n [' ',' ',' ']]\n current_state = \"Not Done\"\n print(\"\\nIteration \" + str(iteration) + \"!\")\n print_board(game_state)\n winner = None\n current_player_idx = random.choice([0,1])\n \n while current_state == \"Not Done\":\n curr_state_idx = list(states_dict.keys())[list(states_dict.values()).index(game_state)]\n if current_player_idx == 0: # AI_X's turn\n print(\"\\nAI X's turn!\") \n block_choice = getBestMove(game_state, players[current_player_idx], epsilon)\n play_move(game_state ,players[current_player_idx], block_choice)\n new_state_idx = list(states_dict.keys())[list(states_dict.values()).index(game_state)]\n \n else: # AI_O's turn\n print(\"\\nAI O's turn!\") \n block_choice = getBestMove(game_state, players[current_player_idx], epsilon)\n play_move(game_state ,players[current_player_idx], block_choice)\n new_state_idx = list(states_dict.keys())[list(states_dict.values()).index(game_state)]\n \n print_board(game_state)\n #print('State value = ' + str(state_values_for_AI[new_state_idx]))\n update_state_value_O(curr_state_idx, new_state_idx, learning_rate)\n update_state_value_X(curr_state_idx, new_state_idx, learning_rate)\n winner, current_state = check_current_state(game_state)\n if winner is not None:\n print(str(winner) + \" won!\")\n else:\n current_player_idx = (current_player_idx + 1)%2\n \n if current_state is \"Draw\":\n print(\"Draw!\")\n \n #time.sleep(1)\nprint('Training Complete!') \n\n# Save state values for future use\nnp.savetxt('trained_state_values_X.txt', state_values_for_AI_X, fmt = '%.6f')\nnp.savetxt('trained_state_values_O.txt', state_values_for_AI_O, fmt = '%.6f')\n" ]
[ [ "numpy.full", "numpy.savetxt", "numpy.loadtxt", "numpy.argmax", "numpy.random.uniform" ] ]
hconanb/hep_ml
[ "5f13d27a737b4d83f4346c80d3623c93cc91e716" ]
[ "tests/test_reweight.py" ]
[ "from __future__ import division, print_function, absolute_import\n\nimport numpy\n\nfrom hep_ml.reweight import BinsReweighter, GBReweighter, FoldingReweighter\nfrom hep_ml.metrics_utils import ks_2samp_weighted\n\n__author__ = 'Alex Rogozhnikov'\n\n\ndef weighted_covariance(data, weights):\n if len(data.shape) == 1:\n data = data[:, numpy.newaxis]\n data = data - numpy.mean(data, axis=0, keepdims=True)\n weights = weights * 1. / weights.sum()\n return numpy.einsum('ij, ik, i -> jk', data, data, weights)\n\n\ndef check_reweighter(n_dimensions, n_samples, reweighter, folding=False):\n mean_original = numpy.random.normal(size=n_dimensions)\n cov_original = numpy.diag([1.] * n_dimensions)\n\n mean_target = numpy.random.mtrand.multivariate_normal(mean=mean_original, cov=cov_original)\n cov_target = cov_original * 0.4 + numpy.ones([n_dimensions, n_dimensions]) * 0.2\n\n original = numpy.random.mtrand.multivariate_normal(mean=mean_original, cov=cov_original, size=n_samples + 1)\n original_weight = numpy.ones(n_samples + 1)\n\n target = numpy.random.mtrand.multivariate_normal(mean=mean_target, cov=cov_target, size=n_samples)\n target_weight = numpy.ones(n_samples)\n\n reweighter.fit(original, target, original_weight=original_weight, target_weight=target_weight)\n new_weights_array = []\n new_weights_array.append(reweighter.predict_weights(original, original_weight=original_weight))\n if folding:\n def mean_vote(x):\n return numpy.mean(x, axis=0)\n\n new_weights_array.append(reweighter.predict_weights(original, original_weight=original_weight,\n vote_function=mean_vote))\n\n for new_weights in new_weights_array:\n av_orig = numpy.average(original, weights=original_weight, axis=0)\n print('WAS', av_orig)\n av_now = numpy.average(original, weights=new_weights, axis=0)\n print('NOW:', av_now)\n av_ideal = numpy.average(target, weights=target_weight, axis=0)\n print('IDEAL:', av_ideal)\n\n print('COVARIANCE')\n print('WAS', weighted_covariance(original, original_weight))\n print('NOW', weighted_covariance(original, new_weights))\n print('IDEAL', weighted_covariance(target, target_weight))\n\n assert numpy.all(abs(av_now - av_ideal) < abs(av_orig - av_ideal)), 'averages are wrong'\n for dim in range(n_dimensions):\n diff1 = ks_2samp_weighted(original[:, dim], target[:, dim], original_weight, target_weight)\n diff2 = ks_2samp_weighted(original[:, dim], target[:, dim], new_weights, target_weight)\n print('KS', diff1, diff2)\n assert diff2 < diff1, 'Differences {} {}'.format(diff1, diff2)\n\n\ndef test_reweighter_1d():\n reweighter = BinsReweighter(n_bins=200, n_neighs=2)\n check_reweighter(n_dimensions=1, n_samples=100000, reweighter=reweighter)\n\n\ndef test_gb_reweighter_1d():\n reweighter = GBReweighter(n_estimators=100, max_depth=2)\n check_reweighter(n_dimensions=1, n_samples=100000, reweighter=reweighter)\n\n\ndef test_reweighter_2d():\n reweighter = BinsReweighter(n_bins=20, n_neighs=2)\n check_reweighter(n_dimensions=2, n_samples=1000000, reweighter=reweighter)\n\n\ndef test_gb_reweighter_2d():\n reweighter = GBReweighter(max_depth=3, n_estimators=30, learning_rate=0.3, gb_args=dict(subsample=0.3))\n check_reweighter(n_dimensions=2, n_samples=200000, reweighter=reweighter)\n\n\ndef test_folding_gb_reweighter():\n reweighter = FoldingReweighter(GBReweighter(n_estimators=20, max_depth=2, learning_rate=0.1), n_folds=3)\n check_reweighter(n_dimensions=2, n_samples=200000, reweighter=reweighter, folding=True)\n\n\ndef test_folding_bins_reweighter():\n reweighter = FoldingReweighter(BinsReweighter(n_bins=20, n_neighs=2), n_folds=3)\n check_reweighter(n_dimensions=2, n_samples=1000000, reweighter=reweighter, folding=True)\n" ]
[ [ "numpy.random.normal", "numpy.random.mtrand.multivariate_normal", "numpy.ones", "numpy.mean", "numpy.einsum", "numpy.average", "numpy.diag" ] ]
agganu/motion_illusions
[ "a5343bada7678827a53551e637e21fcd1a189a0d" ]
[ "motion_illusions/utils/rate_limit.py" ]
[ "###############################################################################\n#\n# File: rate_limit.py\n#\n# Accurate rate limiter\n#\n# History:\n# 02-13-20 - Levi Burner - Created file\n#\n###############################################################################\n\nimport time\nimport numpy as np\n\nclass RateLimit(object):\n def __init__(self, limit_hz=None, limit_period_s=None, collect_stats=False, num_stat_elements=100):\n self._t_last = time.perf_counter()\n\n if limit_hz is not None:\n self._min_period_s = (1.0 / limit_hz)\n\n if limit_period_s is not None:\n self._min_period_s = limit_period_s\n\n self._collect_stats = collect_stats\n self._num_stat_elements = num_stat_elements\n\n if self._collect_stats:\n self._stat_index = 0\n self._past_timestamps = np.zeros((num_stat_elements, 3))\n\n def sleep(self):\n t = time.perf_counter()\n client_delta_s = t - self._t_last\n if client_delta_s < self._min_period_s:\n sleep_duration = self._min_period_s - client_delta_s\n time.sleep(sleep_duration)\n\n true_delta_s = time.perf_counter() - self._t_last\n self._t_last = self._t_last + self._min_period_s\n else:\n sleep_duration = 0.0\n t = time.perf_counter()\n true_delta_s = t - self._t_last\n self._t_last = t\n\n if self._collect_stats:\n self._past_timestamps[self._stat_index, :] = (client_delta_s, sleep_duration, true_delta_s)\n self._stat_index = (self._stat_index + 1) % self._num_stat_elements\n\n def print_stats(self):\n avg_ms = 1000*np.average(self._past_timestamps, axis=0)\n std_dev_ms = 1000*np.sqrt(np.var(self._past_timestamps, axis=0))\n print('Client avg: %d ms std dev: %d ms' % (avg_ms[0], std_dev_ms[0]))\n print('Sleep avg: %d ms std dev: %d ms' % (avg_ms[1], std_dev_ms[1]))\n print('True avg: %d ms std dev: %d ms' % (avg_ms[2], std_dev_ms[2]))\n" ]
[ [ "numpy.average", "numpy.var", "numpy.zeros" ] ]
chromy/PyOP2
[ "8a1955c628b795019485c9771709c338a806e661" ]
[ "test/unit/test_linalg.py" ]
[ "# This file is part of PyOP2\n#\n# PyOP2 is Copyright (c) 2012, Imperial College London and\n# others. Please see the AUTHORS file in the main source directory for\n# a full list of copyright holders. All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions\n# are met:\n#\n# * Redistributions of source code must retain the above copyright\n# notice, this list of conditions and the following disclaimer.\n# * Redistributions in binary form must reproduce the above copyright\n# notice, this list of conditions and the following disclaimer in the\n# documentation and/or other materials provided with the distribution.\n# * The name of Imperial College London or that of other\n# contributors may not be used to endorse or promote products\n# derived from this software without specific prior written\n# permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTERS\n# ''AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS\n# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE\n# COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,\n# INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES\n# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR\n# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)\n# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,\n# STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)\n# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED\n# OF THE POSSIBILITY OF SUCH DAMAGE.\n\nimport pytest\nimport numpy as np\n\nfrom pyop2 import op2\n\nnelems = 8\n\n\[email protected]\ndef set():\n return op2.Set(nelems)\n\n\[email protected]\ndef dset(set):\n return op2.DataSet(set, 1)\n\n\[email protected]\ndef x(dset):\n return op2.Dat(dset, None, np.float64, \"x\")\n\n\[email protected]\ndef y(dset):\n return op2.Dat(dset, np.arange(1, nelems + 1), np.float64, \"y\")\n\n\[email protected]\ndef yi(dset):\n return op2.Dat(dset, np.arange(1, nelems + 1), np.int64, \"y\")\n\n\[email protected]\ndef x2():\n s = op2.Set(nelems, \"s1\")\n return op2.Dat(s ** (1, 2), np.zeros(2 * nelems), np.float64, \"x\")\n\n\[email protected]\ndef y2():\n s = op2.Set(nelems, \"s2\")\n return op2.Dat(s ** (2, 1), np.zeros(2 * nelems), np.float64, \"y\")\n\n\nclass TestLinAlgOp:\n\n \"\"\"\n Tests of linear algebra operators returning a new Dat.\n \"\"\"\n\n def test_add(self, backend, x, y):\n x._data = 2 * y.data\n assert all((x + y).data == 3 * y.data)\n\n def test_sub(self, backend, x, y):\n x._data = 2 * y.data\n assert all((x - y).data == y.data)\n\n def test_mul(self, backend, x, y):\n x._data = 2 * y.data\n assert all((x * y).data == 2 * y.data * y.data)\n\n def test_div(self, backend, x, y):\n x._data = 2 * y.data\n assert all((x / y).data == 2.0)\n\n def test_add_shape_mismatch(self, backend, x2, y2):\n with pytest.raises(ValueError):\n x2 + y2\n\n def test_sub_shape_mismatch(self, backend, x2, y2):\n with pytest.raises(ValueError):\n x2 - y2\n\n def test_mul_shape_mismatch(self, backend, x2, y2):\n with pytest.raises(ValueError):\n x2 * y2\n\n def test_div_shape_mismatch(self, backend, x2, y2):\n with pytest.raises(ValueError):\n x2 / y2\n\n def test_add_scalar(self, backend, x, y):\n x._data = y.data + 1.0\n assert all(x.data == (y + 1.0).data)\n\n def test_radd_scalar(self, backend, x, y):\n x._data = y.data + 1.0\n assert all(x.data == (1.0 + y).data)\n\n def test_pos_copies(self, backend, y):\n z = +y\n assert all(z.data == y.data)\n assert z is not y\n\n def test_neg_copies(self, backend, y):\n z = -y\n assert all(z.data == -y.data)\n assert z is not y\n\n def test_sub_scalar(self, backend, x, y):\n x._data = y.data - 1.0\n assert all(x.data == (y - 1.0).data)\n\n def test_rsub_scalar(self, backend, x, y):\n x._data = 1.0 - y.data\n assert all(x.data == (1.0 - y).data)\n\n def test_mul_scalar(self, backend, x, y):\n x._data = 2 * y.data\n assert all(x.data == (y * 2.0).data)\n\n def test_rmul_scalar(self, backend, x, y):\n x._data = 2 * y.data\n assert all(x.data == (2.0 * y).data)\n\n def test_div_scalar(self, backend, x, y):\n x._data = 2 * y.data\n assert all((x / 2.0).data == y.data)\n\n def test_add_ftype(self, backend, y, yi):\n x = y + yi\n assert x.data.dtype == np.float64\n\n def test_sub_ftype(self, backend, y, yi):\n x = y - yi\n assert x.data.dtype == np.float64\n\n def test_mul_ftype(self, backend, y, yi):\n x = y * yi\n assert x.data.dtype == np.float64\n\n def test_div_ftype(self, backend, y, yi):\n x = y / yi\n assert x.data.dtype == np.float64\n\n def test_add_itype(self, backend, y, yi):\n xi = yi + y\n assert xi.data.dtype == np.int64\n\n def test_sub_itype(self, backend, y, yi):\n xi = yi - y\n assert xi.data.dtype == np.int64\n\n def test_mul_itype(self, backend, y, yi):\n xi = yi * y\n assert xi.data.dtype == np.int64\n\n def test_div_itype(self, backend, y, yi):\n xi = yi / y\n assert xi.data.dtype == np.int64\n\n def test_linalg_and_parloop(self, backend, x, y):\n \"\"\"Linear algebra operators should force computation\"\"\"\n x._data = np.zeros(x.dataset.total_size, dtype=np.float64)\n k = op2.Kernel('void k(double *x) { *x = 1.0; }', 'k')\n op2.par_loop(k, x.dataset.set, x(op2.WRITE))\n z = x + y\n assert all(z.data == y.data + 1)\n\n\nclass TestLinAlgIop:\n\n \"\"\"\n Tests of linear algebra operators modifying a Dat in place.\n \"\"\"\n\n def test_iadd(self, backend, x, y):\n x._data = 2 * y.data\n x += y\n assert all(x.data == 3 * y.data)\n\n def test_isub(self, backend, x, y):\n x._data = 2 * y.data\n x -= y\n assert all(x.data == y.data)\n\n def test_imul(self, backend, x, y):\n x._data = 2 * y.data\n x *= y\n assert all(x.data == 2 * y.data * y.data)\n\n def test_idiv(self, backend, x, y):\n x._data = 2 * y.data\n x /= y\n assert all(x.data == 2.0)\n\n def test_iadd_shape_mismatch(self, backend, x2, y2):\n with pytest.raises(ValueError):\n x2 += y2\n\n def test_isub_shape_mismatch(self, backend, x2, y2):\n with pytest.raises(ValueError):\n x2 -= y2\n\n def test_imul_shape_mismatch(self, backend, x2, y2):\n with pytest.raises(ValueError):\n x2 *= y2\n\n def test_idiv_shape_mismatch(self, backend, x2, y2):\n with pytest.raises(ValueError):\n x2 /= y2\n\n def test_iadd_scalar(self, backend, x, y):\n x._data = y.data + 1.0\n y += 1.0\n assert all(x.data == y.data)\n\n def test_isub_scalar(self, backend, x, y):\n x._data = y.data - 1.0\n y -= 1.0\n assert all(x.data == y.data)\n\n def test_imul_scalar(self, backend, x, y):\n x._data = 2 * y.data\n y *= 2.0\n assert all(x.data == y.data)\n\n def test_idiv_scalar(self, backend, x, y):\n x._data = 2 * y.data\n x /= 2.0\n assert all(x.data == y.data)\n\n def test_iadd_ftype(self, backend, y, yi):\n y += yi\n assert y.data.dtype == np.float64\n\n def test_isub_ftype(self, backend, y, yi):\n y -= yi\n assert y.data.dtype == np.float64\n\n def test_imul_ftype(self, backend, y, yi):\n y *= yi\n assert y.data.dtype == np.float64\n\n def test_idiv_ftype(self, backend, y, yi):\n y /= yi\n assert y.data.dtype == np.float64\n\n def test_iadd_itype(self, backend, y, yi):\n yi += y\n assert yi.data.dtype == np.int64\n\n def test_isub_itype(self, backend, y, yi):\n yi -= y\n assert yi.data.dtype == np.int64\n\n def test_imul_itype(self, backend, y, yi):\n yi *= y\n assert yi.data.dtype == np.int64\n\n def test_idiv_itype(self, backend, y, yi):\n yi /= y\n assert yi.data.dtype == np.int64\n\n\nclass TestLinAlgScalar:\n\n \"\"\"\n Tests of linear algebra operators return a scalar.\n \"\"\"\n\n def test_norm(self, backend):\n s = op2.Set(2)\n n = op2.Dat(s, [3, 4], np.float64, \"n\")\n assert abs(n.norm - 5) < 1e-12\n\n def test_inner(self, backend):\n s = op2.Set(2)\n n = op2.Dat(s, [3, 4], np.float64)\n o = op2.Dat(s, [4, 5], np.float64)\n\n ret = n.inner(o)\n\n assert abs(ret - 32) < 1e-12\n\n ret = o.inner(n)\n\n assert abs(ret - 32) < 1e-12\n\n def test_norm_mixed(self, backend):\n s = op2.Set(1)\n\n n = op2.Dat(s, [3], np.float64)\n o = op2.Dat(s, [4], np.float64)\n\n md = op2.MixedDat([n, o])\n\n assert abs(md.norm - 5) < 1e-12\n\n def test_inner_mixed(self, backend):\n s = op2.Set(1)\n\n n = op2.Dat(s, [3], np.float64)\n o = op2.Dat(s, [4], np.float64)\n\n md = op2.MixedDat([n, o])\n\n n1 = op2.Dat(s, [4], np.float64)\n o1 = op2.Dat(s, [5], np.float64)\n\n md1 = op2.MixedDat([n1, o1])\n\n ret = md.inner(md1)\n\n assert abs(ret - 32) < 1e-12\n\n ret = md1.inner(md)\n\n assert abs(ret - 32) < 1e-12\n" ]
[ [ "numpy.arange", "numpy.zeros" ] ]
dby-tmwctw/nlp-architect
[ "6a865201fdef15042d731cf53f54dcf981c32aea" ]
[ "examples/supervised_sentiment/customized_reviews.py" ]
[ "import json\n\nimport pandas as pd\n\nfrom nlp_architect.utils.generic import normalize, balance\n\nimport csv\n\ngood_columns = [\"overall\", \"reviewText\", \"summary\"]\n\n\ndef review_to_sentiment(review):\n # Review is coming in as overall (the rating, reviewText, and summary)\n # this then cleans the summary and review and gives it a positive or negative value\n norm_text = normalize(review[1])\n review_sent = [\"neutral\", norm_text]\n score = float(review[0])\n if score > 0:\n review_sent = [\"positive\", norm_text]\n elif score < 0:\n review_sent = [\"negative\", norm_text]\n\n return review_sent\n\n\nclass Customized_Reviews(object):\n \"\"\"\n Takes CSV file from the NLP input team and process it into usable object by LTSM model\n \"\"\"\n\n def __init__(self, review_file, run_balance=True):\n self.run_balance = run_balance\n\n print(\"Parsing and processing json file\")\n data = []\n\n # with open(review_file, \"r\") as f:\n # for line in f:\n # data_line = json.loads(line)\n # selected_row = []\n # for item in good_columns:\n # selected_row.append(data_line[item])\n # # as we read in, clean\n # data.append(review_to_sentiment(selected_row))\n \n with open(review_file, newline='\\n') as csvfile:\n reader = csv.reader(csvfile, delimiter=',', quotechar='\\\"')\n for row in reader:\n data.append(review_to_sentiment(row))\n\n # Not sure how to easily balance outside of pandas...but should replace eventually\n self.amazon = pd.DataFrame(data, columns=[\"Sentiment\", \"clean_text\"])\n self.all_text = self.amazon[\"clean_text\"]\n self.labels_0 = pd.get_dummies(self.amazon[\"Sentiment\"])\n self.labels = self.labels_0.values\n self.text = self.amazon[\"clean_text\"].values\n\n def process(self):\n self.amazon = self.amazon[self.amazon[\"Sentiment\"].isin([\"positive\", \"negative\"])]\n\n if self.run_balance:\n # balance it out\n self.amazon = balance(self.amazon)\n\n print(\"Sample Data\")\n print(self.amazon[[\"Sentiment\", \"clean_text\"]].head())\n\n # mapping of the labels with dummies (has headers)\n self.labels_0 = pd.get_dummies(self.amazon[\"Sentiment\"])\n self.labels = self.labels_0.values\n self.text = self.amazon[\"clean_text\"].values" ]
[ [ "pandas.DataFrame", "pandas.get_dummies" ] ]
XingzhiZhou/mmselfsup
[ "fc69e380095a7eda7632beada5611db527b57672" ]
[ "tests/test_runtime/test_extract_process.py" ]
[ "# Copyright (c) OpenMMLab. All rights reserved.\nfrom unittest.mock import MagicMock\n\nimport pytest\nimport torch\nimport torch.nn as nn\nfrom mmcv.parallel import MMDataParallel\nfrom torch.utils.data import DataLoader, Dataset\n\nfrom mmselfsup.models.utils import ExtractProcess\n\n\nclass ExampleDataset(Dataset):\n\n def __getitem__(self, idx):\n results = dict(img=torch.tensor([1]), img_metas=dict())\n return results\n\n def __len__(self):\n return 1\n\n\nclass ExampleModel(nn.Module):\n\n def __init__(self):\n super(ExampleModel, self).__init__()\n self.conv = nn.Conv2d(3, 3, 3)\n\n def forward(self, img, test_mode=False, **kwargs):\n return [\n torch.rand((1, 32, 112, 112)),\n torch.rand((1, 64, 56, 56)),\n torch.rand((1, 128, 28, 28)),\n ]\n\n def train_step(self, data_batch, optimizer):\n loss = self.forward(**data_batch)\n return dict(loss=loss)\n\n\ndef test_extract_process():\n with pytest.raises(AssertionError):\n process = ExtractProcess(\n pool_type='specified', backbone='resnet50', layer_indices=(-1, ))\n\n test_dataset = ExampleDataset()\n test_dataset.evaluate = MagicMock(return_value=dict(test='success'))\n data_loader = DataLoader(\n test_dataset, batch_size=1, sampler=None, num_workers=0, shuffle=False)\n model = MMDataParallel(ExampleModel())\n\n process = ExtractProcess(\n pool_type='specified', backbone='resnet50', layer_indices=(0, 1, 2))\n\n results = process.extract(model, data_loader)\n assert 'feat1' in results\n assert 'feat2' in results\n assert 'feat3' in results\n assert results['feat1'].shape == (1, 32 * 12 * 12)\n assert results['feat2'].shape == (1, 64 * 6 * 6)\n assert results['feat3'].shape == (1, 128 * 4 * 4)\n" ]
[ [ "torch.rand", "torch.nn.Conv2d", "torch.tensor", "torch.utils.data.DataLoader" ] ]
jacobson15p/CenterPoint
[ "aee99be4545641864026ebc1dd6e92684580560c" ]
[ "det3d/models/bbox_heads/center_head.py" ]
[ "# ------------------------------------------------------------------------------\n# Portions of this code are from\n# det3d (https://github.com/poodarchu/Det3D/tree/56402d4761a5b73acd23080f537599b0888cce07)\n# Copyright (c) 2019 朱本金\n# Licensed under the MIT License\n# ------------------------------------------------------------------------------\n\nimport logging\nfrom collections import defaultdict\nfrom det3d.core import box_torch_ops\nimport torch\nfrom det3d.torchie.cnn import kaiming_init\nfrom torch import double, nn\nfrom det3d.models.losses.centernet_loss import FastFocalLoss, RegLoss\nfrom det3d.models.utils import Sequential\nfrom ..registry import HEADS\nimport copy \ntry:\n from det3d.ops.dcn import DeformConv\nexcept:\n print(\"Deformable Convolution not built!\")\n\nfrom det3d.core.utils.circle_nms_jit import circle_nms\n\nimport matplotlib.pyplot as plt\n\n\nclass FeatureAdaption(nn.Module):\n \"\"\"Feature Adaption Module.\n\n Feature Adaption Module is implemented based on DCN v1.\n It uses anchor shape prediction rather than feature map to\n predict offsets of deformable conv layer.\n\n Args:\n in_channels (int): Number of channels in the input feature map.\n out_channels (int): Number of channels in the output feature map.\n kernel_size (int): Deformable conv kernel size.\n deformable_groups (int): Deformable conv group size.\n \"\"\"\n\n def __init__(self,\n in_channels,\n out_channels,\n kernel_size=3,\n deformable_groups=4):\n super(FeatureAdaption, self).__init__()\n offset_channels = kernel_size * kernel_size * 2\n self.conv_offset = nn.Conv2d(\n in_channels, deformable_groups * offset_channels, 1, bias=True)\n self.conv_adaption = DeformConv(\n in_channels,\n out_channels,\n kernel_size=kernel_size,\n padding=(kernel_size - 1) // 2,\n deformable_groups=deformable_groups)\n self.relu = nn.ReLU(inplace=True)\n self.init_offset()\n\n def init_offset(self):\n self.conv_offset.weight.data.zero_()\n\n def forward(self, x,):\n offset = self.conv_offset(x)\n x = self.relu(self.conv_adaption(x, offset))\n return x\n\nclass SepHead(nn.Module):\n def __init__(\n self,\n in_channels, #share_channels means the last layer before they seperate, YZ Notes \n heads,\n head_conv=64,\n final_kernel=1,\n bn=False,\n init_bias=-2.19,\n **kwargs,\n ):\n super(SepHead, self).__init__(**kwargs)\n\n self.heads = heads \n for head in self.heads:\n classes, num_conv = self.heads[head]\n\n fc = Sequential()\n for i in range(num_conv-1): #Twice, 0, 1 \n fc.add(nn.Conv2d(in_channels, head_conv,\n kernel_size=final_kernel, stride=1, \n padding=final_kernel // 2, bias=True))\n if bn:\n fc.add(nn.BatchNorm2d(head_conv))\n fc.add(nn.ReLU())\n\n fc.add(nn.Conv2d(head_conv, classes,\n kernel_size=final_kernel, stride=1, \n padding=final_kernel // 2, bias=True)) \n\n if 'hm' in head:\n fc[-1].bias.data.fill_(init_bias)\n else:\n for m in fc.modules():\n if isinstance(m, nn.Conv2d):\n kaiming_init(m)\n\n self.__setattr__(head, fc)\n \n\n def forward(self, x):\n ret_dict = dict() \n for head in self.heads:\n ret_dict[head] = self.__getattr__(head)(x)\n\n return ret_dict\n\nclass DCNSepHead(nn.Module):\n def __init__(\n self,\n in_channels,\n num_cls,\n heads,\n head_conv=64,\n final_kernel=1,\n bn=False,\n init_bias=-2.19,\n **kwargs,\n ):\n super(DCNSepHead, self).__init__(**kwargs)\n\n # feature adaptation with dcn\n # use separate features for classification / regression\n self.feature_adapt_cls = FeatureAdaption(\n in_channels,\n in_channels,\n kernel_size=3,\n deformable_groups=4) \n \n self.feature_adapt_reg = FeatureAdaption(\n in_channels,\n in_channels,\n kernel_size=3,\n deformable_groups=4) \n\n # heatmap prediction head \n self.cls_head = Sequential(\n nn.Conv2d(in_channels, head_conv,\n kernel_size=3, padding=1, bias=True),\n nn.BatchNorm2d(64),\n nn.ReLU(inplace=True),\n nn.Conv2d(head_conv, num_cls,\n kernel_size=3, stride=1, \n padding=1, bias=True)\n )\n self.cls_head[-1].bias.data.fill_(init_bias)\n\n # other regression target \n self.task_head = SepHead(in_channels, heads, head_conv=head_conv, bn=bn, final_kernel=final_kernel)\n\n\n def forward(self, x): \n center_feat = self.feature_adapt_cls(x)\n reg_feat = self.feature_adapt_reg(x)\n\n cls_score = self.cls_head(center_feat)\n ret = self.task_head(reg_feat)\n ret['hm'] = cls_score\n\n return ret\n\n\[email protected]_module\nclass CenterHead(nn.Module):\n def __init__(\n self,\n in_channels=[128,],\n tasks=[],\n dataset='nuscenes',\n weight=0.25,\n code_weights=[],\n common_heads=dict(),\n logger=None,\n init_bias=-2.19,\n share_conv_channel=64,\n num_hm_conv=2,\n dcn_head=False,\n ):\n super(CenterHead, self).__init__()\n\n num_classes = [len(t[\"class_names\"]) for t in tasks]\n self.class_names = [t[\"class_names\"] for t in tasks]\n self.code_weights = code_weights \n self.weight = weight # weight between hm loss and loc loss\n self.dataset = dataset\n\n self.in_channels = in_channels\n self.num_classes = num_classes\n\n self.crit = FastFocalLoss()\n self.crit_reg = RegLoss()\n\n # YZ Notes \n # common_heads is the \n # common_heads={'reg': (2, 2), 'height': (1, 2), 'dim':(3, 2), 'rot':(2, 2), 'vel':(2,2)}, # (output_channel, num_conv)\n self.box_n_dim = 9 if 'vel' in common_heads else 7 \n self.use_direction_classifier = False \n\n if not logger:\n logger = logging.getLogger(\"CenterHead\")\n self.logger = logger\n\n logger.info(\n f\"num_classes: {num_classes}\"\n )\n\n # a shared convolution \n self.shared_conv = nn.Sequential(\n nn.Conv2d(in_channels, share_conv_channel,\n kernel_size=3, padding=1, bias=True),\n nn.BatchNorm2d(share_conv_channel),\n nn.ReLU(inplace=True)\n )\n\n self.tasks = nn.ModuleList() # YZ Notes, different from the task list, this is a module list for training purposes. \n print(\"Use HM Bias: \", init_bias)\n\n if dcn_head:\n print(\"Use Deformable Convolution in the CenterHead!\")\n\n for num_cls in num_classes:\n heads = copy.deepcopy(common_heads) # Creating a head for each class \n if not dcn_head:\n heads.update(dict(hm=(num_cls, num_hm_conv))) #Here, added the head map \n self.tasks.append(\n SepHead(share_conv_channel, heads, bn=True, init_bias=init_bias, final_kernel=3)\n )\n else:\n self.tasks.append(\n DCNSepHead(share_conv_channel, num_cls, heads, bn=True, init_bias=init_bias, final_kernel=3)\n )\n\n logger.info(\"Finish CenterHead Initialization\")\n\n def forward(self, x, *kwargs):\n ret_dicts = []\n\n x = self.shared_conv(x)\n\n for task in self.tasks:\n ret_dicts.append(task(x)) #ret_dict is a dictionary of each head. \n return ret_dicts, x\n\n def _sigmoid(self, x):\n y = torch.clamp(x.sigmoid_(), min=1e-4, max=1-1e-4)\n return y\n\n def loss(self, example, preds_dicts, test_cfg, **kwargs):\n rets = []\n for task_id, preds_dict in enumerate(preds_dicts):\n # heatmap focal loss\n preds_dict['hm'] = self._sigmoid(preds_dict['hm'])\n\n #print(preds_dict['hm'].shape,example['hm'][task_id].shape, example['ind'][task_id].shape, example['mask'][task_id].shape, example['cat'][task_id].shape)\n\n hm_loss = self.crit(preds_dict['hm'], example['hm'][task_id], example['ind'][task_id], example['mask'][task_id], example['cat'][task_id])\n #ind (batch x max_objects) get the object location, and only work with the category \n\n target_box = example['anno_box'][task_id]\n # reconstruct the anno_box from multiple reg heads\n if self.dataset in ['waymo', 'nuscenes']:\n if 'vel' in preds_dict:\n preds_dict['anno_box'] = torch.cat((preds_dict['reg'], preds_dict['height'], preds_dict['dim'],\n preds_dict['vel'], preds_dict['rot']), dim=1) \n else:\n preds_dict['anno_box'] = torch.cat((preds_dict['reg'], preds_dict['height'], preds_dict['dim'],\n preds_dict['rot']), dim=1) \n target_box = target_box[..., [0, 1, 2, 3, 4, 5, -2, -1]] # remove vel target \n else:\n raise NotImplementedError()\n\n ret = {}\n \n # Regression loss for dimension, offset, height, rotation \n box_loss = self.crit_reg(preds_dict['anno_box'], example['mask'][task_id], example['ind'][task_id], target_box)\n\n loc_loss = (box_loss*box_loss.new_tensor(self.code_weights)).sum()\n\n loss = hm_loss + self.weight*loc_loss\n\n ret.update({'loss': loss, 'hm_loss': hm_loss.detach().cpu(), 'loc_loss':loc_loss, 'loc_loss_elem': box_loss.detach().cpu(), 'num_positive': example['mask'][task_id].float().sum()})\n\n rets.append(ret)\n \n \"\"\"convert batch-key to key-batch\n \"\"\"\n rets_merged = defaultdict(list)\n for ret in rets:\n for k, v in ret.items():\n rets_merged[k].append(v)\n\n return rets_merged\n\n @torch.no_grad()\n def predict(self, example, preds_dicts, test_cfg, **kwargs):\n \"\"\"decode, nms, then return the detection result. Additionaly support double flip testing \n \"\"\"\n # get loss info\n rets = []\n metas = []\n\n double_flip = test_cfg.get('double_flip', False)\n\n post_center_range = test_cfg.post_center_limit_range\n if len(post_center_range) > 0:\n post_center_range = torch.tensor(\n post_center_range,\n dtype=preds_dicts[0]['hm'].dtype,\n device=preds_dicts[0]['hm'].device,\n )\n\n for task_id, preds_dict in enumerate(preds_dicts):\n # convert N C H W to N H W C \n for key, val in preds_dict.items():\n preds_dict[key] = val.permute(0, 2, 3, 1).contiguous()\n\n batch_size = preds_dict['hm'].shape[0]\n\n if double_flip:\n assert batch_size % 4 == 0, print(batch_size)\n batch_size = int(batch_size / 4)\n for k in preds_dict.keys():\n # transform the prediction map back to their original coordinate befor flipping\n # the flipped predictions are ordered in a group of 4. The first one is the original pointcloud\n # the second one is X flip pointcloud(y=-y), the third one is Y flip pointcloud(x=-x), and the last one is \n # X and Y flip pointcloud(x=-x, y=-y).\n # Also please note that pytorch's flip function is defined on higher dimensional space, so dims=[2] means that\n # it is flipping along the axis with H length(which is normaly the Y axis), however in our traditional word, it is flipping along\n # the X axis. The below flip follows pytorch's definition yflip(y=-y) xflip(x=-x)\n _, H, W, C = preds_dict[k].shape\n preds_dict[k] = preds_dict[k].reshape(int(batch_size), 4, H, W, C)\n preds_dict[k][:, 1] = torch.flip(preds_dict[k][:, 1], dims=[1]) \n preds_dict[k][:, 2] = torch.flip(preds_dict[k][:, 2], dims=[2])\n preds_dict[k][:, 3] = torch.flip(preds_dict[k][:, 3], dims=[1, 2])\n\n if \"metadata\" not in example or len(example[\"metadata\"]) == 0:\n meta_list = [None] * batch_size\n else:\n meta_list = example[\"metadata\"]\n if double_flip:\n meta_list = meta_list[:4*int(batch_size):4]\n\n batch_hm = torch.sigmoid(preds_dict['hm'])\n\n batch_dim = torch.exp(preds_dict['dim'])\n\n batch_rots = preds_dict['rot'][..., 0:1]\n batch_rotc = preds_dict['rot'][..., 1:2]\n batch_reg = preds_dict['reg']\n batch_hei = preds_dict['height']\n\n if double_flip:\n batch_hm = batch_hm.mean(dim=1)\n batch_hei = batch_hei.mean(dim=1)\n batch_dim = batch_dim.mean(dim=1)\n\n # y = -y reg_y = 1-reg_y\n batch_reg[:, 1, ..., 1] = 1 - batch_reg[:, 1, ..., 1]\n batch_reg[:, 2, ..., 0] = 1 - batch_reg[:, 2, ..., 0]\n\n batch_reg[:, 3, ..., 0] = 1 - batch_reg[:, 3, ..., 0]\n batch_reg[:, 3, ..., 1] = 1 - batch_reg[:, 3, ..., 1]\n batch_reg = batch_reg.mean(dim=1)\n\n # first yflip \n # y = -y theta = pi -theta\n # sin(pi-theta) = sin(theta) cos(pi-theta) = -cos(theta)\n # batch_rots[:, 1] the same\n batch_rotc[:, 1] *= -1\n\n # then xflip x = -x theta = 2pi - theta\n # sin(2pi - theta) = -sin(theta) cos(2pi - theta) = cos(theta)\n # batch_rots[:, 2] the same\n batch_rots[:, 2] *= -1\n\n # double flip \n batch_rots[:, 3] *= -1\n batch_rotc[:, 3] *= -1\n\n batch_rotc = batch_rotc.mean(dim=1)\n batch_rots = batch_rots.mean(dim=1)\n\n batch_rot = torch.atan2(batch_rots, batch_rotc)\n\n batch, H, W, num_cls = batch_hm.size()\n\n batch_reg = batch_reg.reshape(batch, H*W, 2)\n batch_hei = batch_hei.reshape(batch, H*W, 1)\n\n batch_rot = batch_rot.reshape(batch, H*W, 1)\n batch_dim = batch_dim.reshape(batch, H*W, 3)\n batch_hm = batch_hm.reshape(batch, H*W, num_cls)\n\n ys, xs = torch.meshgrid([torch.arange(0, H), torch.arange(0, W)])\n ys = ys.view(1, H, W).repeat(batch, 1, 1).to(batch_hm)\n xs = xs.view(1, H, W).repeat(batch, 1, 1).to(batch_hm)\n\n xs = xs.view(batch, -1, 1) + batch_reg[:, :, 0:1]\n ys = ys.view(batch, -1, 1) + batch_reg[:, :, 1:2]\n\n xs = xs * test_cfg.out_size_factor * test_cfg.voxel_size[0] + test_cfg.pc_range[0]\n ys = ys * test_cfg.out_size_factor * test_cfg.voxel_size[1] + test_cfg.pc_range[1]\n\n if 'vel' in preds_dict:\n batch_vel = preds_dict['vel']\n\n if double_flip:\n # flip vy\n batch_vel[:, 1, ..., 1] *= -1\n # flip vx\n batch_vel[:, 2, ..., 0] *= -1\n\n batch_vel[:, 3] *= -1\n \n batch_vel = batch_vel.mean(dim=1)\n\n batch_vel = batch_vel.reshape(batch, H*W, 2)\n batch_box_preds = torch.cat([xs, ys, batch_hei, batch_dim, batch_vel, batch_rot], dim=2)\n else: \n batch_box_preds = torch.cat([xs, ys, batch_hei, batch_dim, batch_rot], dim=2)\n\n metas.append(meta_list)\n\n if test_cfg.get('per_class_nms', False):\n pass \n else:\n rets.append(self.post_processing(batch_box_preds, batch_hm, test_cfg, post_center_range, task_id)) \n\n # Merge branches results\n ret_list = []\n num_samples = len(rets[0])\n\n ret_list = []\n for i in range(num_samples):\n ret = {}\n for k in rets[0][i].keys():\n if k in [\"box3d_lidar\", \"scores\"]:\n ret[k] = torch.cat([ret[i][k] for ret in rets])\n elif k in [\"label_preds\"]:\n flag = 0\n for j, num_class in enumerate(self.num_classes):\n rets[j][i][k] += flag\n flag += num_class\n ret[k] = torch.cat([ret[i][k] for ret in rets])\n\n ret['metadata'] = metas[0][i]\n ret['hm'] = torch.sigmoid(preds_dict['hm'])\n ret_list.append(ret)\n\n return ret_list \n\n\n def Unproject(points, Z, intrinsic, distortion):\n f_x = intrinsic[0, 0]\n f_y = intrinsic[1, 1]\n c_x = intrinsic[0, 2]\n c_y = intrinsic[1, 2]\n # This was an error before\n # c_x = intrinsic[0, 3]\n # c_y = intrinsic[1, 3]\n\n # Step 1. Undistort.\n points_undistorted = np.array([])\n if len(points) > 0:\n points_undistorted = cv2.undistortPoints(np.expand_dims(points, axis=1), intrinsic, distortion, P=intrinsic)\n points_undistorted = np.squeeze(points_undistorted, axis=1)\n\n # Step 2. Reproject.\n result = []\n for idx in range(points_undistorted.shape[0]):\n z = Z[0] if len(Z) == 1 else Z[idx]\n x = (points_undistorted[idx, 0] - c_x) / f_x * z\n y = (points_undistorted[idx, 1] - c_y) / f_y * z\n result.append([x, y, z])\n return result\n\n\n @torch.no_grad()\n def predict_with_fusion(self, example, preds_dicts, image_out, test_cfg, **kwargs):\n \"\"\"decode, nms, then return the detection result. Additionaly support double flip testing\n Added image_output as well for fusion in heat map (hm) and possibly other features \n \"\"\"\n\n\n\n \n\n front_intrinsics= example['calib']['FRONT_INTRINSIC']\n front_extrinsics= example['calib']['FRONT_EXTRINSIC']\n\n # USE THIS FOR RESULT \n batch_idx=0\n for cam_hm, cam_dep, front_intrinsic, front_extrinsic, viewrangemeters, feature_map_size, hm_pixel_size in zip(image_out['results']['hm'].cpu(), \n image_out['results']['dep'].cpu().numpy(), front_intrinsics.cpu().numpy(), front_extrinsics.cpu().numpy(),\n example['range'], example['feature_map_size'], example['hm_pixel_size']):\n\n # USE THIS FOR EXAMPLE TESTING \n # for cam_hm, cam_dep, front_intrinsic, front_extrinsic, viewrangemeters, feature_map_size, hm_pixel_size in zip(example['hm_cam'][0].cpu().numpy(), \n # example['dep_map'], front_intrinsics.cpu().numpy(), front_extrinsics.cpu().numpy(),\n # example['range'], example['feature_map_size'], example['hm_pixel_size']):\n\n # Assuming the down ratio is 4. \n front_intrinsic/=4\n # USE THIS FOR RESULT \n pixel_unproject= np.array(np.meshgrid(np.linspace(1,cam_dep.shape[2],cam_dep.shape[2]),\n np.linspace(1,cam_dep.shape[1],cam_dep.shape[1]))).T.reshape(-1, 2).astype(int)\n\n # USE THIS FOR EXAMPLE TESTING \n # pixel_unproject= np.array(np.meshgrid(np.linspace(1,cam_dep.shape[1],cam_dep.shape[1]),\n # np.linspace(1,cam_dep.shape[0],cam_dep.shape[0]))).T.reshape(-1, 2)\n\n\n pixel_depth= cam_dep.T.reshape(-1,1)\n pixel_unproject[:,0]= (pixel_unproject[:,0]- front_intrinsic[2]) / front_intrinsic[0]\n pixel_unproject[:,1]= (pixel_unproject[:,1]- front_intrinsic[3]) / front_intrinsic[1]\n for i in range(pixel_unproject.shape[0]):\n if pixel_depth[i] is not None:\n pixel_unproject[i] = pixel_unproject[i] * pixel_depth[i][0]\n else:\n pixel_unproject[i] = pixel_unproject[i] * 999\n pixel_unproject= np.hstack((pixel_unproject,pixel_depth))\n\n rotation_axis= np.array([[0,0,1],\n [1,0,0],\n [0,1,0]])\n pixel_unproject= (rotation_axis @ pixel_unproject.T)\n pixel_unproject= ((front_extrinsic[0:3,0:3] @ pixel_unproject) + np.expand_dims(front_extrinsic[0:3,3], axis=1)).T \n\n\n # Now x is pointing towards forward, y to left, and z up \n\n pixel_unproject/=hm_pixel_size\n hm_new= np.zeros((cam_hm.shape[0], feature_map_size[0], feature_map_size[1]))\n index_hm_new=0\n for task_hm in cam_hm:\n pixel_with_taskHM= task_hm.T.reshape(-1,1)\n pixel_unproject_withHM= np.hstack((pixel_unproject,pixel_with_taskHM))\n pixel_unproject_withHM= pixel_unproject_withHM[pixel_unproject_withHM[:,0]< feature_map_size[0]/2]\n pixel_unproject_withHM= pixel_unproject_withHM[pixel_unproject_withHM[:,0]> 0]\n pixel_unproject_withHM= pixel_unproject_withHM[pixel_unproject_withHM[:,1]< feature_map_size[1]/2]\n pixel_unproject_withHM= pixel_unproject_withHM[pixel_unproject_withHM[:,1]> -feature_map_size[1]/2]\n\n for entry in pixel_unproject_withHM:\n hm_new[index_hm_new][int(feature_map_size[0]/2-entry[0]), int(feature_map_size[1]/2 + entry[1])] = entry[3]\n index_hm_new+=1\n \n\n\n # Visualization code part\n \n imdep= cam_dep\n print(np.max(imdep))\n fig = plt.figure(figsize=(6, 3.2))\n ax = fig.add_subplot(111)\n ax.set_title('Depth')\n plt.imshow(imdep.transpose(1,2,0))\n ax.set_aspect('equal')\n cax = fig.add_axes([0.12, 0.1, 0.78, 0.8])\n cax.get_xaxis().set_visible(False)\n cax.get_yaxis().set_visible(False)\n cax.patch.set_alpha(0)\n cax.set_frame_on(False)\n plt.colorbar(orientation='vertical')\n plt.savefig('/code/CenterPoint/DepthImage.jpeg')\n\n #imimg= np.moveaxis(cam_hm, 0, -1)\n imimg = cam_hm.permute(1,2,0)\n print(\"IMAGE SHAPE HM \")\n print(imimg.shape)\n fig = plt.figure(figsize=(6, 3.2))\n ax = fig.add_subplot(111)\n ax.set_title('Image')\n plt.imshow(imimg)\n ax.set_aspect('equal')\n cax = fig.add_axes([0.12, 0.1, 0.78, 0.8])\n cax.get_xaxis().set_visible(False)\n cax.get_yaxis().set_visible(False)\n cax.patch.set_alpha(0)\n cax.set_frame_on(False)\n plt.savefig(\"/code/CenterPoint/Image.jpeg\")\n\n #imimg= np.moveaxis(hm_new,0, -1)\n imimg = hm_new.transpose(1,2,0)\n print(\"IMAGE SHAPE HM \")\n print(imimg.shape)\n fig = plt.figure(figsize=(6, 3.2))\n ax = fig.add_subplot(111)\n ax.set_title('BEV Image')\n plt.imshow(imimg)\n ax.set_aspect('equal')\n cax = fig.add_axes([0.12, 0.1, 0.78, 0.8])\n cax.get_xaxis().set_visible(False)\n cax.get_yaxis().set_visible(False)\n cax.patch.set_alpha(0)\n cax.set_frame_on(False)\n plt.savefig(\"/code/CenterPoint/Image_BEV.jpeg\")\n \n \n\n preds_dicts[0]['hm'][batch_idx]= torch.add(preds_dicts[0]['hm'][batch_idx],\n torch.tensor(hm_new, dtype=preds_dicts[0]['hm'].dtype, device=preds_dicts[0]['hm'].device))\n batch_idx+=1\n # get loss info\n rets = []\n metas = []\n\n double_flip = test_cfg.get('double_flip', False)\n\n post_center_range = test_cfg.post_center_limit_range\n if len(post_center_range) > 0:\n post_center_range = torch.tensor(\n post_center_range,\n dtype=preds_dicts[0]['hm'].dtype,\n device=preds_dicts[0]['hm'].device,\n )\n\n for task_id, preds_dict in enumerate(preds_dicts):\n # convert N C H W to N H W C \n for key, val in preds_dict.items():\n preds_dict[key] = val.permute(0, 2, 3, 1).contiguous()\n\n batch_size = preds_dict['hm'].shape[0]\n\n if double_flip:\n assert batch_size % 4 == 0, print(batch_size)\n batch_size = int(batch_size / 4)\n for k in preds_dict.keys():\n # transform the prediction map back to their original coordinate befor flipping\n # the flipped predictions are ordered in a group of 4. The first one is the original pointcloud\n # the second one is X flip pointcloud(y=-y), the third one is Y flip pointcloud(x=-x), and the last one is \n # X and Y flip pointcloud(x=-x, y=-y).\n # Also please note that pytorch's flip function is defined on higher dimensional space, so dims=[2] means that\n # it is flipping along the axis with H length(which is normaly the Y axis), however in our traditional word, it is flipping along\n # the X axis. The below flip follows pytorch's definition yflip(y=-y) xflip(x=-x)\n _, H, W, C = preds_dict[k].shape\n preds_dict[k] = preds_dict[k].reshape(int(batch_size), 4, H, W, C)\n preds_dict[k][:, 1] = torch.flip(preds_dict[k][:, 1], dims=[1]) \n preds_dict[k][:, 2] = torch.flip(preds_dict[k][:, 2], dims=[2])\n preds_dict[k][:, 3] = torch.flip(preds_dict[k][:, 3], dims=[1, 2])\n\n if \"metadata\" not in example or len(example[\"metadata\"]) == 0:\n meta_list = [None] * batch_size\n else:\n meta_list = example[\"metadata\"]\n if double_flip:\n meta_list = meta_list[:4*int(batch_size):4]\n\n batch_hm = torch.sigmoid(preds_dict['hm'])\n\n batch_dim = torch.exp(preds_dict['dim'])\n\n batch_rots = preds_dict['rot'][..., 0:1]\n batch_rotc = preds_dict['rot'][..., 1:2]\n batch_reg = preds_dict['reg']\n batch_hei = preds_dict['height']\n\n if double_flip:\n batch_hm = batch_hm.mean(dim=1)\n batch_hei = batch_hei.mean(dim=1)\n batch_dim = batch_dim.mean(dim=1)\n\n # y = -y reg_y = 1-reg_y\n batch_reg[:, 1, ..., 1] = 1 - batch_reg[:, 1, ..., 1]\n batch_reg[:, 2, ..., 0] = 1 - batch_reg[:, 2, ..., 0]\n\n batch_reg[:, 3, ..., 0] = 1 - batch_reg[:, 3, ..., 0]\n batch_reg[:, 3, ..., 1] = 1 - batch_reg[:, 3, ..., 1]\n batch_reg = batch_reg.mean(dim=1)\n\n # first yflip \n # y = -y theta = pi -theta\n # sin(pi-theta) = sin(theta) cos(pi-theta) = -cos(theta)\n # batch_rots[:, 1] the same\n batch_rotc[:, 1] *= -1\n\n # then xflip x = -x theta = 2pi - theta\n # sin(2pi - theta) = -sin(theta) cos(2pi - theta) = cos(theta)\n # batch_rots[:, 2] the same\n batch_rots[:, 2] *= -1\n\n # double flip \n batch_rots[:, 3] *= -1\n batch_rotc[:, 3] *= -1\n\n batch_rotc = batch_rotc.mean(dim=1)\n batch_rots = batch_rots.mean(dim=1)\n\n batch_rot = torch.atan2(batch_rots, batch_rotc)\n\n batch, H, W, num_cls = batch_hm.size()\n\n batch_reg = batch_reg.reshape(batch, H*W, 2)\n batch_hei = batch_hei.reshape(batch, H*W, 1)\n\n batch_rot = batch_rot.reshape(batch, H*W, 1)\n batch_dim = batch_dim.reshape(batch, H*W, 3)\n batch_hm = batch_hm.reshape(batch, H*W, num_cls)\n\n ys, xs = torch.meshgrid([torch.arange(0, H), torch.arange(0, W)])\n ys = ys.view(1, H, W).repeat(batch, 1, 1).to(batch_hm)\n xs = xs.view(1, H, W).repeat(batch, 1, 1).to(batch_hm)\n\n xs = xs.view(batch, -1, 1) + batch_reg[:, :, 0:1]\n ys = ys.view(batch, -1, 1) + batch_reg[:, :, 1:2]\n\n xs = xs * test_cfg.out_size_factor * test_cfg.voxel_size[0] + test_cfg.pc_range[0]\n ys = ys * test_cfg.out_size_factor * test_cfg.voxel_size[1] + test_cfg.pc_range[1]\n\n if 'vel' in preds_dict:\n batch_vel = preds_dict['vel']\n\n if double_flip:\n # flip vy\n batch_vel[:, 1, ..., 1] *= -1\n # flip vx\n batch_vel[:, 2, ..., 0] *= -1\n\n batch_vel[:, 3] *= -1\n \n batch_vel = batch_vel.mean(dim=1)\n\n batch_vel = batch_vel.reshape(batch, H*W, 2)\n batch_box_preds = torch.cat([xs, ys, batch_hei, batch_dim, batch_vel, batch_rot], dim=2)\n else: \n batch_box_preds = torch.cat([xs, ys, batch_hei, batch_dim, batch_rot], dim=2)\n\n metas.append(meta_list)\n\n if test_cfg.get('per_class_nms', False):\n pass \n else:\n rets.append(self.post_processing(batch_box_preds, batch_hm, test_cfg, post_center_range, task_id)) \n\n # Merge branches results\n ret_list = []\n num_samples = len(rets[0])\n\n ret_list = []\n for i in range(num_samples):\n ret = {}\n for k in rets[0][i].keys():\n if k in [\"box3d_lidar\", \"scores\"]:\n ret[k] = torch.cat([ret[i][k] for ret in rets])\n elif k in [\"label_preds\"]:\n flag = 0\n for j, num_class in enumerate(self.num_classes):\n rets[j][i][k] += flag\n flag += num_class\n ret[k] = torch.cat([ret[i][k] for ret in rets])\n\n ret['metadata'] = metas[0][i]\n ret_list.append(ret)\n\n return ret_list \n\n\n\n\n\n @torch.no_grad()\n def post_processing(self, batch_box_preds, batch_hm, test_cfg, post_center_range, task_id):\n batch_size = len(batch_hm)\n\n prediction_dicts = []\n for i in range(batch_size):\n box_preds = batch_box_preds[i]\n hm_preds = batch_hm[i]\n\n scores, labels = torch.max(hm_preds, dim=-1)\n\n score_mask = scores > test_cfg.score_threshold\n distance_mask = (box_preds[..., :3] >= post_center_range[:3]).all(1) \\\n & (box_preds[..., :3] <= post_center_range[3:]).all(1)\n\n mask = distance_mask & score_mask \n\n box_preds = box_preds[mask]\n scores = scores[mask]\n labels = labels[mask]\n\n boxes_for_nms = box_preds[:, [0, 1, 2, 3, 4, 5, -1]]\n\n if test_cfg.get('circular_nms', False):\n centers = boxes_for_nms[:, [0, 1]] \n boxes = torch.cat([centers, scores.view(-1, 1)], dim=1)\n selected = _circle_nms(boxes, min_radius=test_cfg.min_radius[task_id], post_max_size=test_cfg.nms.nms_post_max_size) \n else:\n selected = box_torch_ops.rotate_nms_pcdet(boxes_for_nms.float(), scores.float(), \n thresh=test_cfg.nms.nms_iou_threshold,\n pre_maxsize=test_cfg.nms.nms_pre_max_size,\n post_max_size=test_cfg.nms.nms_post_max_size)\n\n selected_boxes = box_preds[selected]\n selected_scores = scores[selected]\n selected_labels = labels[selected]\n\n prediction_dict = {\n 'box3d_lidar': selected_boxes,\n 'scores': selected_scores,\n 'label_preds': selected_labels\n }\n\n prediction_dicts.append(prediction_dict)\n\n return prediction_dicts \n\nimport numpy as np \ndef _circle_nms(boxes, min_radius, post_max_size=83):\n \"\"\"\n NMS according to center distance\n \"\"\"\n keep = np.array(circle_nms(boxes.cpu().numpy(), thresh=min_radius))[:post_max_size]\n\n keep = torch.from_numpy(keep).long().to(boxes.device)\n\n return keep " ]
[ [ "torch.cat", "torch.nn.ModuleList", "torch.nn.BatchNorm2d", "torch.exp", "torch.flip", "numpy.max", "torch.sigmoid", "matplotlib.pyplot.colorbar", "matplotlib.pyplot.savefig", "torch.tensor", "numpy.expand_dims", "numpy.array", "numpy.zeros", "torch.max", "matplotlib.pyplot.figure", "torch.nn.ReLU", "torch.nn.Conv2d", "numpy.hstack", "numpy.squeeze", "torch.arange", "torch.no_grad", "numpy.linspace", "torch.from_numpy", "torch.atan2", "matplotlib.pyplot.imshow" ] ]
malininae/ESMValTool
[ "9a1bf70a153135ebe2698e2275f6d6b1251e4d30" ]
[ "esmvaltool/diag_scripts/seaice/ipcc_sea_ice_diag_tools.py" ]
[ "import iris\nfrom iris.experimental.equalise_cubes import equalise_attributes\nimport logging\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport os\nfrom scipy import stats, special\nimport sys\n\n# import internal esmvaltool modules here\nfrom esmvaltool.diag_scripts.ocean import diagnostic_tools as diagtools\n\n# This part sends debug statements to stdout\nlogger = logging.getLogger(os.path.basename(__file__))\nlogging.getLogger().addHandler(logging.StreamHandler(sys.stdout))\n\n\ndef select_months(cubelist,month):\n\n month_constr = iris.Constraint(time=lambda cell: cell.point.month == month)\n\n cropped_cubelist = iris.cube.CubeList()\n\n for cube in cubelist:\n cropped_cube=cube.extract(month_constr)\n cropped_cubelist.append(cropped_cube)\n\n return (cropped_cubelist)\n\ndef select_latitudes(cubelist, start_lat=-90, end_lat=90):\n\n # possibly add here a warning about start_ and end_lat\n\n lat_constr=iris.Constraint(latitude=lambda cell: start_lat < cell <= end_lat)\n\n cropped_cubelist = iris.cube.CubeList()\n\n for n, cube in enumerate(cubelist):\n cropped_cube=cube.extract(lat_constr)\n cropped_cubelist.append(cropped_cube)\n\n return (cropped_cubelist)\n\n\ndef load_cubelist(filenames):\n\n cubelist = iris.cube.CubeList()\n\n for n, filename in enumerate(filenames):\n cube=iris.load_cube(filename)\n cubelist.append(cube)\n\n return (cubelist)\n\n\ndef calculate_siextent(cubelist, threshold=15):\n\n # calculates siextent for the hemisphere\n # creates a cubelist with only one dimension: 'time'\n\n for n, cube in enumerate(cubelist):\n\n if cube is None:\n continue\n\n area=iris.analysis.cartography.area_weights(cube,normalize=False)\n time = cube.coord('time')\n\n mask = (cube.data.mask) | (cube.data.data<=threshold)\n\n area=np.ma.array(area,mask=mask)\n\n # not beautiful but it works: here we create a new cube, where data is a sum of area covered by\n # sea ice in the whole space that was provided. The only coordinate is time, taken from the original\n # cube. Might be corrected in the future.\n area=(area.sum(axis=(1, 2))/(1000**2))/1000000\n #for now passing paren cube attributes, clean before merging!!!\n new_cube = iris.cube.Cube(area, standard_name='sea_ice_extent',long_name='sea ice extent', var_name='siextent', units= \"10^6km2\", attributes=cube.attributes, dim_coords_and_dims=[(time, 0)])\n\n if n==0:\n conv_cubelist=iris.cube.CubeList([new_cube])\n else:\n conv_cubelist.append(new_cube)\n\n return (conv_cubelist)\n\ndef calculate_siarea(cubelist):\n\n sia_cubelist = iris.cube.CubeList()\n for cube in cubelist:\n area = iris.analysis.cartography.area_weights(cube, normalize=False)\n time = cube.coord('time')\n\n siconc = cube.data / 100 # since the data is in %, it has to be converted into fraction\n area = np.ma.array(area, mask=cube.data.mask)\n\n sia_arr = siconc * area\n\n sia = (sia_arr.sum(axis=(1, 2)) / (1000 ** 2)) / 1000000 # iris provides area in m, converting it to 10^6km2\n # for now passing parent cube attributes, clean before merging!!!\n sia_cube = iris.cube.Cube(sia, standard_name='sea_ice_area', long_name='sea ice area', var_name='siarea',\n units=\"1e6 km2\", attributes=cube.attributes, dim_coords_and_dims=[(time, 0)])\n sia_cubelist.append(sia_cube)\n\n return (sia_cubelist)\n\ndef calculate_siparam(cubelist, siext=True):\n # function which determines if sea ice extent or sea ice are should be calculated\n\n if siext:\n cubelist=calculate_siextent(cubelist)\n else:\n cubelist=calculate_siarea(cubelist)\n\n return (cubelist)\n\ndef n_year_mean(cubelist, n):\n\n # the idea behind it is that we pass the cubelist with the same time coords\n\n n_aver_cubelist = iris.cube.CubeList()\n\n dcoord = create_coords(cubelist, n)\n\n for cube in cubelist:\n n_t = len(cube.coord('time').points)\n if n_t%n!=0:\n # add here a warning that the last is an average of n_t%n==0 years\n logger.info('The n of years is not divisible by %s last %s years were not taken into account',\n str(n), str(n_t%n))\n if len(cube.data.shape) == 1:\n data = [np.average(cube.data[n*i:n*i + n]) for i in range(0, int(n_t / n))]\n n_aver_cube = iris.cube.Cube(np.asarray(data), long_name=cube.long_name + ', ' + str(n) + 'y mean',\n var_name=cube.var_name, units=cube.units,\n attributes=cube.attributes, dim_coords_and_dims=[(dcoord, 0)])\n elif len(cube.data.shape) == 2:\n data = np.asarray([np.average(cube.data[n * i:n * i + n, :], axis=0) for i in range(0, int(n_t / n))])\n n_aver_cube =iris.cube.Cube(data, long_name=cube.long_name + ', ' + str(n) + 'y mean',\n var_name=cube.var_name, units=cube.units, attributes=cube.attributes,\n dim_coords_and_dims=[(dcoord,0), (cube.coords()[1],1)])\n elif len(cube.data.shape) == 3:\n data = np.asarray([np.average(cube.data[n * i:n * i + n, :, :], axis=0) for i in range(0, int(n_t / n))])\n n_aver_cube = iris.cube.Cube(data, long_name=cube.long_name + ', ' + str(n) + 'y mean',\n var_name=cube.var_name, units=cube.units, attributes=cube.attributes,\n dim_coords_and_dims=[(dcoord, 0), (cube.coords()[1], 1), (cube.coords()[2],2)])\n\n n_aver_cubelist.append(n_aver_cube)\n\n return (n_aver_cubelist)\n\ndef create_coords(cubelist, year_n):\n # dirty trick, we try to unify time to merge the cubelist in the end\n\n cb = cubelist [0]\n\n n_t = len(cb.coord('time').points)\n coord = [np.average(cb.coord('time').points[year_n*i:year_n*i + year_n]) for i in range(0, int(n_t / year_n))]\n bnds = [[cb.coord('time').bounds[year_n*i][0], cb.coord('time').bounds[year_n*i + (year_n - 1)][1]] for i in\n range(0, int(n_t / year_n))]\n if n_t%year_n != 0:\n # raise warning\n logger.info('The n of years is not divisible by %s', str(year_n))\n # coord.append(np.average(cb.coord('time').points[int(n_t / year_n):-1]))\n # bnds.append([cb.coord('time').bounds[int(n_t / year_n) * year_n][0], cb.coord('time').bounds[-1][1]])\n\n dcoord = iris.coords.DimCoord(np.asarray(coord), bounds=np.asarray(bnds),\n standard_name=cb.coord('time').standard_name,\n units=cb.coord('time').units, long_name=cb.coord('time').long_name,\n var_name=cb.coord('time').var_name)\n\n return (dcoord)\n\n\ndef substract_ref_period(cubelist, ref_period):\n\n constr = iris.Constraint(time=lambda cell: ref_period[0] <= cell.point.year <= ref_period[1])\n\n upd_cubelist = iris.cube.CubeList()\n\n for cube in cubelist:\n mean = cube.extract(constr).collapsed('time', iris.analysis.MEAN)\n upd_cube = cube - mean\n upd_cube.attributes = cube.attributes\n upd_cube.long_name = cube.long_name + ' anomaly'\n upd_cube.var_name = cube.var_name + '_ano'\n upd_cubelist.append(upd_cube)\n\n return(upd_cubelist)\n\ndef figure_handling(cfg, name = 'plot', img_ext=None):\n\n if cfg['write_plots']:\n\n if img_ext == None:\n img_ext = diagtools.get_image_format(cfg)\n\n path=os.path.join(cfg['plot_dir'], name + img_ext)\n\n logger.info('Saving plots to %s', path)\n plt.savefig(path)\n\n else:\n\n plt.show()\n\n return\n" ]
[ [ "numpy.asarray", "matplotlib.pyplot.savefig", "numpy.ma.array", "numpy.average", "matplotlib.pyplot.show" ] ]
alhadidi/PiccoloKitchen
[ "9b64e809a90937b24b34701e1899b8a6a4add94b" ]
[ "Sensors/Computer codes/udpESP8266_csv.py" ]
[ "# -*- coding: utf-8 -*-\r\n\"\"\"\r\n\r\n@author: Alejandro Garcia\r\n\"\"\"\r\n\r\nimport socket\r\nimport pandas as pd\r\n#import csv\r\nimport time\r\nfrom datetime import datetime\r\n\r\ndata_kit=pd.DataFrame(columns=['Time',\r\n 'Sound1','Sound 1 Level', 'Sound2','Sound 2 Level',\r\n 'Distance sonic','Distance Laser','User position', #Add 'Distance Sonar' if sensor included.\r\n 'Back Cab Position','Back Movements','Back Interactions in Position','Back Interactions in Total',\r\n 'Right Cab Position','Right Movements','Right Interactions in Position','Right Interactions in Total',\r\n 'Left Cab Position','Left Movements','Left Interactions in Position','Left Interactions in Total'])\r\n\r\nfile_name='KitchenData_'+datetime.now().strftime('%m_%d_%H_%M_%S')+'.csv'\r\n\r\n####### MODIFY THE FOLLOWING TWO LINES ##########################################################\r\nUDP_IP = \"\" #Use the same address that was specified on the UDP Settings.\r\nUDP_PORT = 8888 #Use the same port that was specified on the UDP Settings.\r\n#################################################################################################\r\n\r\nprint(\"Connecting...\")\r\ntime.sleep(2) # Wait for the NodeMCU to connect to the internet.\r\n\r\ntry:\r\n sock = socket.socket(socket.AF_INET, # Internet\r\n socket.SOCK_DGRAM) # UDP\r\nexcept:\r\n print('Not able to connect over UDP')\r\n\r\nwhile True:\r\n message='Hello Node'\r\n try:\r\n sock.sendto(message.encode(),(UDP_IP, UDP_PORT))\r\n # sock.bind((UDP_IP, UDP_PORT))\r\n data,addr= sock.recvfrom(2024)\r\n print(data.decode())\r\n data=data.decode().split(',')\r\n try:\r\n data_kit=data_kit.append({'Time':datetime.now().strftime('%H:%M:%S'),\r\n 'Sound1':data[0],'Sound 1 Level':data[1], 'Sound2':data[2],'Sound 2 Level':data[3],\r\n 'Distance sonic':data[4],'Distance Laser':data[5],'User position':data[6], #Add 'Distance Sonar':data[5] and reorder number if Distance Sonar included.\r\n 'Back Cab Position':data[7],'Back Movements':data[8],'Back Interactions in Position':data[9],'Back Interactions in Total':data[10],\r\n 'Right Cab Position':data[11],'Right Movements':data[12],'Right Interactions in Position':data[13],'Right Interactions in Total':data[14],\r\n 'Left Cab Position':data[15],'Left Movements':data[16],'Left Interactions in Position':data[17],'Left Interactions in Total':data[18]},ignore_index=True)\r\n data_kit.to_csv(file_name)\r\n except:\r\n print('bad row')\r\n \r\n except:\r\n print('connection problem')\r\n time.sleep(0.1)\r\n \r\n\r\n" ]
[ [ "pandas.DataFrame" ] ]
PuneethRegonda/realtime_surveillance_system
[ "bf0f7a80c8a0f8bbaaf3dcb6fd7ee419708a5f4f" ]
[ "facemap_flask_backend/facemap_notebook.py" ]
[ "\nimport sys\nimport requests\nimport socketio\nimport cv2\nimport os\nimport numpy as np\nfrom PIL import Image\nimport pickle\nimport random\n\nprint(cv2.__file__)\n\nTOKEN =\"\"\n\n\n\nclass Recognizer:\n\n def __init__(self,socket):\n self.socket =socket\n self.stop = False\n print(\"initializing Recognizer\")\n pass\n \n def disconnect(self):\n self.socket.disconnect()\n\n def train(self):\n try:\n print(\"training \")\n # BASE_DIR = os.path.dirname(os.path.abspath(__file__)) this says where is this notebook exactly is..\n\n recognizer = cv2.face.LBPHFaceRecognizer_create()\n\n faces_got,ids = self.getImageAndLabels(\"./dataset/\")\n recognizer.train(faces_got, np.array(ids))\n\n # Save the model into recognizers/trainer.yml\n recognizer.write('recognizers/trainner.yml')\n\n print(\"Saved the model into recognizers/trainer.yml\")\n except Exception as e:\n print(e)\n \n def getImageAndLabels(self,path):\n face_cascade = cv2.CascadeClassifier('cascades/data/haarcascade_frontalface_default.xml')\n \n print(\"getting all face_vectors and face_labels\")\n imagePaths = [os.path.join(path,f) for f in os.listdir(path)] \n # print(len(imagePaths))\n faceSamples=[]\n ids = []\n for root, dirs, files_names in os.walk(path):\n total_files = 0\n detected_faces = 0\n if(root==path):\n continue\n for file in files_names:\n if file.endswith(\"png\") or file.endswith(\"jpg\") or file.endswith(\"JPG\"):\n total_files = total_files+1\n image_path = os.path.join(root, file)\n PIL_img = Image.open(image_path).convert('L') # convert it to grayscale\n img_numpy = np.array(PIL_img,'uint8')\n\n # print(os.path.split(file)) ('', '1.194.jpg')\n # os.path.split(file)[-1].split(\".\") --->>> ['1', '194', 'jpg']\n\n label_id = int(os.path.split(file)[-1].split(\".\")[0])\n faces = face_cascade.detectMultiScale(img_numpy) \n\n for (x,y,w,h) in faces:\n detected_faces = detected_faces+1\n faceSamples.append(img_numpy[y:y+h,x:x+w])\n ids.append(label_id)\n print(\"root:\",root,end='\\n')\n print(\"DIR:\",len(dirs),end='\\n')\n print(\"Files:\",len(files_names))\n print(\"total images:\"+str(total_files))\n print(\"detected_faces:\" +str(detected_faces))\n return faceSamples,ids\n \n\n \n def recognize(self):\n \n try:\n recognizer = cv2.face.LBPHFaceRecognizer_create()\n# recognizer.read('recognizers/trainner.yml')\n recognizer.read('recognizers/trainner_3_but_better.yml')\n\n faceCascade = cv2.CascadeClassifier('cascades/data/haarcascade_frontalface_alt.xml')\n font = cv2.FONT_HERSHEY_SIMPLEX\n\n id = 0\n names = ['', '', 'Priyanka Chopra',\"Puneeth\",\"Tiger Shroff\",\"Vicky Kaushal\",\"Shah_Rukh_Khan\"]\n# url = input()\n cam = cv2.VideoCapture(0)\n cam.set(3, 640) # set video widht\n cam.set(4, 480) # set video height\n\n while True:\n if self.stop:\n self.socket.disconnect()\n break\n\n ret, img =cam.read()\n gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)\n\n faces = faceCascade.detectMultiScale( \n gray,\n scaleFactor = 1.2,\n minNeighbors = 3,\n )\n \n# after detecting faces from the frame its time for predicting the faces\n for(x,y,w,h) in faces:\n cv2.rectangle(img, (x,y), (x+w,y+h), (0,255,0), 2)\n id, confidence = recognizer.predict(gray[y:y+h,x:x+w])\n\n # If confidence is less then 100 ==> \"0\" : perfect match \n if (confidence < 100):\n sio.emit('push_faces', {'location': random.choice(locations),'name':names[id]}, namespace='/facemap') \n id = names[id]\n confidence = \" {0}%\".format(round(100 - confidence))\n\n else:\n id = \"unknown\"\n confidence = \" {0}%\".format(round(100 - confidence))\n\n cv2.putText(\n img, \n str(id), \n (x+5,y-5), \n font, \n 1, \n (255,255,255), \n 2\n )\n cv2.putText(\n img, \n str(confidence), \n (x+5,y+h-5), \n font, \n 1, \n (255,255,0), \n 1\n ) \n\n cv2.imshow('camera',img) \n if cv2.waitKey(10) & 0xff==ord(\"q\"):\n self.socket.disconnect()\n break\n # before ending its complusory we release resources \n\n except Exception as e:\n print(e)\n \n cam.release()\n cv2.destroyAllWindows()\n \n\n\n# In[7]:\n\n\n\n\n# Socket Manager\n\n# sio = socketio.Client(logger=True)\nsio = socketio.Client()\n\n\[email protected]\ndef connect_error():\n print(\"The connection failed!\") \n \[email protected]\ndef disconnect():\n print(\"disconnected\")\n \n# just for testing\[email protected]('new_faces', namespace='/facemap')\ndef new_faces(data):\n print(\"new faces\")\n print(data)\n return \"OK\", 123\n\[email protected]('old_faces', namespace='/facemap')\ndef old_faces(data):\n print(\"old faces\")\n print(data)\n return \"OK\", 123\n\[email protected]('param_error', namespace='/facemap')\ndef param_error(data):\n print(\"param_error\")\n print(data)\n return \"OK\", 123\n\[email protected]('connect', namespace='/facemap')\ndef on_connect():\n print(\"Start Pushing the faces and Locations of people\")\n\[email protected]('disconnect', namespace='/facemap')\ndef on_disconnect():\n print(\"on disconnect\")\n _host\nsio.connect(_host,headers={'Authorization':'TOKEN '+TOKEN}, namespaces=['/facemap']) \n# sio.connect('https://face-map-node-server.herokuapp.com',headers={'Authorization':'TOKEN '+TOKEN}, namespaces=['/facemap'])\n\n\n# In[8]:\n\n\n# Recognizer\n# sio.emit('push_faces', {'location': 'FoodCourt'}, namespace='/facemap')\n\n\n# Recognizer has \n# * Train \n# * Recognize\n# \n\n# In[9]:\n\n\nrecognizer = Recognizer(sio)\n# recognizer.train()\nrecognizer.recognize()\n# recognizer.capture()\n# rtsp://192.168.0.5:8080/h264_pcm.sdp\n\n\n# In[10]:\n\n\n# recognizer.disconnect()\n\n\n# In[129]:\n\n\n#print(help(cv2.face))\n\n\n# In[22]:\n\n\nrecognizer.recognize()\n\n\n# In[10]:\n\n\n\n\n\n# In[ ]:\n\n\n\n\n" ]
[ [ "numpy.array" ] ]
jdayton3/Confounded
[ "907ed5b751f2a7b8905772b0912884e1a746424d" ]
[ "confounded/network.py" ]
[ "\"\"\"Definitions for the neural networks in Confounded.\n\"\"\"\n\n# pylint: disable=E1129\n\nimport functools\nimport tensorflow as tf\nfrom tensorflow.contrib.layers import fully_connected, batch_norm # pylint: disable=E0611\nfrom math import ceil\n\ndef is_square(n):\n sqrt = n**0.5\n return int(sqrt) == sqrt\n\ndef var_scope(scope):\n \"\"\"Decorator to wrap a function in a tensorflow variable scope\n\n Arguments:\n scope {str} -- Name of the variable scope\n\n Returns:\n function -- The decorated function wrapped in the variable scope\n \"\"\"\n def decorator_var_scope(func):\n @functools.wraps(func)\n def wrapper_var_scope(*args, **kwargs):\n with tf.variable_scope(scope):\n return func(*args, **kwargs)\n return wrapper_var_scope\n return decorator_var_scope\n\ndef show_before_and_after_images(func):\n @functools.wraps(func)\n def wrapper_show_images(*args, **kwargs):\n inputs = args[0]\n show_image(inputs, name=\"inputs\")\n outputs = func(*args, **kwargs)\n if isinstance(outputs, tuple):\n # The autoencoder functions might return (outputs, loss)\n show_image(outputs[0], name=\"outputs\")\n else:\n show_image(outputs, name=\"outputs\")\n return outputs\n return wrapper_show_images\n\n@var_scope(\"vae\")\n@show_before_and_after_images\ndef variational_autoencoder(inputs, code_size=20):\n \"\"\"Creates a variational autoencoder based on \"Hands-On Machine\n Learning with Scikit-Learn and TensorFlow by Aurélien Géron\n (O’Reilly). Copyright 2017 Aurélien Géron, 978-1-491-96229-9.\"\n\n Arguments:\n input_size {int} -- Size of the input to the autoencoder\n\n Returns:\n input {Tensor} -- The input tensor\n output {Tensor} -- The output tensor\n loss {Tensor} -- The loss operation\n \"\"\"\n layer_sizes = [500, 500]\n activations = [tf.nn.elu for _ in layer_sizes]\n input_size = get_layer_size(inputs)\n\n encoding = make_layers(inputs, layer_sizes, activations)\n code_mean, code_gamma, code = vae_code_layer(encoding, code_size)\n decoding = make_layers(code, layer_sizes, activations)\n logits = fully_connected(decoding, input_size, activation_fn=None)\n outputs = tf.sigmoid(logits)\n\n reconstruction_loss = tf.losses.mean_squared_error(inputs, outputs)\n xentropy = tf.nn.sigmoid_cross_entropy_with_logits(labels=inputs, logits=logits)\n reconstruction_loss = tf.reduce_sum(xentropy)\n latent_loss = kl_divergence(code_gamma, code_mean) #* 0.01 / code_size\n loss = reconstruction_loss + latent_loss\n\n loss = loss / input_size\n\n return outputs, loss\n\ndef get_layer_size(layer):\n dimensions = layer.shape[1:]\n size = 1\n for dimension in dimensions:\n size *= int(dimension) # must be converted from Dimension to int\n return size\n\ndef make_layers(inputs, layer_sizes, activations=None, keep_prob=1.0, do_batch_norm=False):\n if not activations:\n activations = [tf.nn.relu for _ in layer_sizes]\n current_layer = inputs\n for layer_size, activation in zip(layer_sizes, activations):\n current_layer = fully_connected(current_layer, layer_size, activation_fn=activation)\n current_layer = tf.nn.dropout(current_layer, keep_prob)\n if do_batch_norm:\n current_layer = batch_norm(current_layer)\n return current_layer\n\ndef vae_code_layer(inputs, code_size):\n code_mean = fully_connected(inputs, code_size, activation_fn=None)\n code_gamma = fully_connected(inputs, code_size, activation_fn=None)\n noise = tf.random_normal(tf.shape(code_gamma), dtype=tf.float32)\n code = code_mean + tf.exp(0.5 * code_gamma) * noise\n return code_mean, code_gamma, code\n\ndef kl_divergence(gamma, mean):\n return 0.5 * tf.reduce_sum(tf.exp(gamma) + tf.square(mean) - 1 - gamma)\n\ndef show_image(x, name=\"image\"):\n input_size = get_layer_size(x)\n if is_square(input_size):\n width_height = int(input_size**0.5)\n img = tf.reshape(x, [-1, width_height, width_height, 1])\n tf.summary.image(name, img, max_outputs=1)\n\nclass Confounded(object):\n def __init__(self,\n input_size,\n code_size,\n num_targets,\n discriminator_layers=2,\n autoencoder_layers=2,\n activation=tf.nn.relu,\n disc_weghting=1.0,\n learning_rate=0.0001):\n self.sess = tf.Session()\n\n self.input_size = input_size\n self.code_size = code_size\n self.num_targets = num_targets\n self.discriminator_layers = discriminator_layers\n self.autoencoder_layers = autoencoder_layers\n self.activation = activation\n self.disc_weighting = disc_weghting\n self.learning_rate = learning_rate\n\n self.inputs = None\n self.code = None\n self.outputs = None\n self.targets = None\n self.logits = None\n self.classification = None\n\n self.d_loss = None\n self.ae_loss = None\n self.loss = None\n self.optimizer = None\n self.d_optimizer = None\n\n self._setup_networks()\n\n def _setup_networks(self):\n self._setup_autoencoder()\n self._setup_discriminator()\n self._setup_loss_functions()\n\n @var_scope(\"autoencoder\")\n def _setup_autoencoder(self):\n self.inputs = tf.placeholder(tf.float32, [None, self.input_size])\n self.outputs, self.ae_loss = variational_autoencoder(self.inputs, code_size=self.code_size)\n\n @var_scope(\"discriminator\")\n def _setup_discriminator(self):\n self.targets = tf.placeholder(tf.float32, [None, self.num_targets])\n inputs = batch_norm(self.outputs)\n layer_size = 512\n layer_sizes = [int(ceil(layer_size / 2**n)) for n in range(self.discriminator_layers)]\n layer_sizes = [1024, 512, 512, 128]\n penultimate_layer = make_layers(self.outputs, layer_sizes, keep_prob=0.5, do_batch_norm=True)\n with tf.variable_scope(\"do_not_save\"):\n self.logits = fully_connected(penultimate_layer, self.num_targets, activation_fn=None)\n self.classification = tf.nn.sigmoid(self.logits)\n\n @var_scope(\"discriminator\")\n @var_scope(\"optimizer\")\n def _setup_disc_loss(self):\n xentropy = tf.nn.sigmoid_cross_entropy_with_logits(labels=self.targets, logits=self.logits)\n self.d_loss = tf.reduce_sum(xentropy) / self.num_targets\n tf.summary.scalar(\"d_loss\", self.d_loss)\n discriminator_vars = tf.get_collection(\n tf.GraphKeys.TRAINABLE_VARIABLES,\n \"discriminator\"\n )\n self.d_optimizer = tf.train.AdamOptimizer(\n learning_rate=self.learning_rate,\n name=\"discriminator\"\n ).minimize(self.d_loss, var_list=discriminator_vars)\n\n @var_scope(\"autoencoder\")\n @var_scope(\"optimizer\")\n def _setup_ae_loss(self):\n tf.summary.scalar(\"ae_loss\", self.ae_loss)\n autoencoder_vars = tf.get_collection(\n tf.GraphKeys.TRAINABLE_VARIABLES,\n \"autoencoder\"\n )\n self.ae_optimizer = tf.train.AdamOptimizer(\n learning_rate=self.learning_rate,\n name=\"ae\"\n ).minimize(self.ae_loss, var_list=autoencoder_vars)\n\n @var_scope(\"autoencoder\")\n @var_scope(\"optimizer\")\n def _setup_dual_loss(self):\n autoencoder_vars = tf.get_collection(\n tf.GraphKeys.TRAINABLE_VARIABLES,\n \"autoencoder\"\n )\n self.loss = self.ae_loss - self.disc_weighting * self.d_loss\n tf.summary.scalar(\"dual_loss\", self.loss)\n self.optimizer = tf.train.AdamOptimizer(\n learning_rate=self.learning_rate,\n name=\"dual\"\n ).minimize(self.loss, var_list=autoencoder_vars)\n\n def _setup_loss_functions(self):\n self._setup_disc_loss()\n self._setup_ae_loss()\n self._setup_dual_loss()\n" ]
[ [ "tensorflow.exp", "tensorflow.contrib.layers.batch_norm", "tensorflow.contrib.layers.fully_connected", "tensorflow.reshape", "tensorflow.losses.mean_squared_error", "tensorflow.shape", "tensorflow.sigmoid", "tensorflow.variable_scope", "tensorflow.nn.sigmoid", "tensorflow.get_collection", "tensorflow.nn.dropout", "tensorflow.train.AdamOptimizer", "tensorflow.summary.scalar", "tensorflow.Session", "tensorflow.placeholder", "tensorflow.reduce_sum", "tensorflow.summary.image", "tensorflow.nn.sigmoid_cross_entropy_with_logits", "tensorflow.square" ] ]
ianrh125/hilbert-drawing
[ "2d3fbaf80b454f66de5b505a74f2e9533f385a6b" ]
[ "hilbert drawing.py" ]
[ "# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Fri Sep 13 10:35:41 2019\r\n\r\n@author: ianrh\r\n\"\"\"\r\n\r\nfrom PIL import Image\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\nimport math\r\n\r\nimageLayers = []\r\n\r\n\"\"\"\r\nScaleUp affects the scale of the hilbert curve itself, effectively reducing the number of iterations by this value\r\nimageScale determines how the image will be scaled before the curve is drawn.\r\nminCurve detemines the minimum density of the curve at any given point\r\n\"\"\"\r\nScaleUp = 0\r\nimageScale = 1\r\nminCurve = 16\r\n\r\n#Function for creating the list of points for the curve to follow\r\ndef ListPoints(y, x, level, direction, bonus):\r\n \r\n # evaluating whether the specific section of the curve is detailed enough to match the shading in the relevant section of image\r\n if(level <= ScaleUp or imageLayers[level][y][x]+bonus < 2**(level+9+ScaleUp)):\r\n return [[y * 2**level, x * 2**level, 2**level]]\r\n else:\r\n \r\n #creating four points to increase the section of the curve to the next iteration\r\n output = []\r\n addBonus = 2**(level+5 + ScaleUp)\r\n if(direction == 0):\r\n output.extend(ListPoints(y*2, x*2, level-1, 1, bonus/8))\r\n output.extend(ListPoints(y*2+1, x*2, level-1, 0, bonus/8+addBonus))\r\n output.extend(ListPoints(y*2+1, x*2+1, level-1, 0, bonus/8+addBonus*2))\r\n output.extend(ListPoints(y*2, x*2+1, level-1, 3, bonus/8+addBonus*3))\r\n elif(direction == 1):\r\n output.extend(ListPoints(y*2, x*2, level-1, 0, bonus/8+0))\r\n output.extend(ListPoints(y*2, x*2+1, level-1, 1, bonus/8+addBonus))\r\n output.extend(ListPoints(y*2+1, x*2+1, level-1, 1, bonus/8+addBonus*2))\r\n output.extend(ListPoints(y*2+1, x*2, level-1, 2, bonus/8+addBonus*3))\r\n elif(direction == 2):\r\n output.extend(ListPoints(y*2+1, x*2+1, level-1, 3, bonus/8+0))\r\n output.extend(ListPoints(y*2, x*2+1, level-1, 2, bonus/8+addBonus))\r\n output.extend(ListPoints(y*2, x*2, level-1, 2, bonus/8+addBonus*2))\r\n output.extend(ListPoints(y*2+1, x*2, level-1, 1, bonus/8+addBonus*3))\r\n elif(direction == 3):\r\n output.extend(ListPoints(y*2+1, x*2+1, level-1, 2, 0))\r\n output.extend(ListPoints(y*2+1, x*2, level-1, 3, bonus/8+addBonus))\r\n output.extend(ListPoints(y*2, x*2, level-1, 3, bonus/8+addBonus*2))\r\n output.extend(ListPoints(y*2, x*2+1, level-1, 0, bonus/8+addBonus*3))\r\n return output\r\n \r\nim = Image.open(\"C:/Image_filepath\")\r\nnp_im = np.array(im)\r\nprint(\"input size:\",np_im.shape)\r\nprint(\"square size:\",max(np_im.shape[0], np_im.shape[1]))\r\nsize = 2 ** math.ceil(math.log(np_im.shape[0]*imageScale,2))\r\ngrayscale = np.zeros((size, size))\r\n\r\n# converting the image to grayscale\r\nfor i in range(size):\r\n for j in range(size):\r\n \r\n # various systems for handling images with grayscale, RBG, or RBG and opacity data\r\n try:\r\n referencePixel = np_im[int(i/imageScale)][int(j/imageScale)]\r\n if(len(np_im.shape) == 3):\r\n if(len(referencePixel) == 4):\r\n grayscale[i][j] = (255 - int(math.sqrt(referencePixel[0]**2*0.21 + referencePixel[1]**2*0.72 + referencePixel[2]**2*0.07))) * referencePixel[3] / 255\r\n if(i == 0 and j == 0):\r\n print(\"foo\")\r\n else:\r\n grayscale[i][j] = 255 - int(math.sqrt(referencePixel[0]**2*0.21 + referencePixel[1]**2*0.72 + referencePixel[2]**2*0.07))\r\n if(i == 0 and j == 0):\r\n print(\"bar\")\r\n else:\r\n grayscale[i][j] = 255 - referencePixel\r\n if(i == 0 and j == 0):\r\n print(\"boo\")\r\n except:\r\n grayscale[i][j] = 0\r\n if(i == 0 and j == 0):\r\n print(\"far\")\r\n grayscale[i][j] = max(grayscale[i][j],minCurve)\r\n \r\n# scaling the grayscale to the power of 2 closest to the imageScale parameter\r\nprint(\"final size:\",grayscale.shape)\r\nimageLayers.append(grayscale)\r\n\r\n# creating copies of the original image, each scaled down 50% from the last\r\nwhile(len(imageLayers[-1]) > 1):\r\n layerSize = int(len(imageLayers[-1])/2)\r\n newLayer = np.zeros((layerSize,layerSize))\r\n for i in range(layerSize):\r\n for j in range(layerSize):\r\n newLayer[i][j] = imageLayers[-1][i*2][j*2]+imageLayers[-1][i*2+1][j*2]+imageLayers[-1][i*2][j*2+1]+imageLayers[-1][i*2+1][j*2+1]\r\n imageLayers.append(newLayer)\r\n \r\n# running function to generate a list of points for the curve to follow\r\npointsList = ListPoints(0, 0, len(imageLayers)-1,0,0)\r\nxPositions = []\r\nyPositions = []\r\nxLevels = []\r\nyLevels = []\r\nfor i in pointsList:\r\n yPositions.append(i[0]+i[2]/2)\r\n xPositions.append(i[1]+i[2]/2)\r\n yLevels.append(i[2])\r\n xLevels.append(i[2])\r\noriginalX = xPositions[:]\r\noriginalY = yPositions[:]\r\n# tweaking posion values of points to snap to 90 degree angles with points of higher iteration levels\r\nfor a in range(-1,8):\r\n for i in range(len(pointsList)-1):\r\n if(xPositions[i] != xPositions[i+1] and yPositions[i] != yPositions[i+1]):\r\n if(abs(originalX[i]-originalX[i+1]) > abs(originalY[i]-originalY[i+1])):\r\n if(yLevels[i] > yLevels[i+1]):\r\n yPositions[i] = yPositions[i+1]\r\n yLevels[i] = yLevels[i+1]\r\n elif(yLevels[i] < yLevels[i+1]):\r\n yPositions[i+1] = yPositions[i]\r\n yLevels[i+1] = yLevels[i]\r\n else:\r\n print(\"Same Levels Y\")\r\n elif(abs(originalX[i]-originalX[i+1]) < abs(originalY[i]-originalY[i+1])):\r\n if(xLevels[i] > xLevels[i+1]):\r\n xPositions[i] = xPositions[i+1]\r\n xLevels[i] = xLevels[i+1]\r\n elif(xLevels[i] < xLevels[i+1]):\r\n xPositions[i+1] = xPositions[i]\r\n xLevels[i+1] = xLevels[i]\r\n else:\r\n print(\"Same Levels X\")\r\n else:\r\n print(\"Diagonal\")\r\n\r\nduplicates = 0\r\n# scanning the list for items that don't line up. Used for finding error in previous pass\r\nfor i in range(len(pointsList) - 1):\r\n if(xPositions[i] != xPositions[i+1] and yPositions[i] != yPositions[i+1]):\r\n duplicates += 1\r\n print(\"X1:\", xPositions[i])\r\n print(\"Y1:\", yPositions[i])\r\n print(\"X2:\", xPositions[i+1])\r\n print(\"Y2:\", yPositions[i+1])\r\n if(duplicates < 5):\r\n plt.figure(figsize=(5,5))\r\n plt.plot(xPositions[(i-10):(i+10)], yPositions[(i-10):(i+10)], marker=',', markersize=2, mfc='black')\r\nprint(\"duplicates:\", duplicates)\r\n\r\nprint(\"Size:\", size)\r\n\r\n# reducing the position values to integers to work with rendering \r\nfor i in range(len(xPositions)):\r\n xPositions[i] = int(xPositions[i])\r\n yPositions[i] = int(yPositions[i]) \r\n\r\nif(duplicates == 0):\r\n printArray = np.zeros((size,size))\r\n \r\n xDraw = xPositions[0]\r\n yDraw = yPositions[0]\r\n # drawing the hilbert curve, taking 1 pixel steps towards the next point and marking the corresponding pixel black\r\n for i in range(len(xPositions)):\r\n while(xDraw != xPositions[i] or yDraw != yPositions[i]):\r\n printArray[int(yDraw)][int(xDraw)] = 1\r\n if(xDraw == xPositions[i]):\r\n yDraw -= (yDraw-yPositions[i])/abs(yDraw-yPositions[i])\r\n elif(yDraw == yPositions[i]):\r\n xDraw -= (xDraw-xPositions[i])/abs(xDraw-xPositions[i])\r\n # plotting the array to an image\r\n dpi = 80\r\n width = size/dpi\r\n height = size/dpi\r\n fig = plt.figure(figsize=(width, height), dpi=dpi)\r\n ax = fig.add_axes([0, 0, 1, 1], frameon=False, aspect=1)\r\n ax.imshow(printArray, cmap='Greys', interpolation = 'none')\r\nelse:\r\n plt.figure(figsize=(10,10))\r\n plt.plot(xPositions, yPositions, marker=',', markersize=2, mfc='black')" ]
[ [ "numpy.array", "matplotlib.pyplot.plot", "numpy.zeros", "matplotlib.pyplot.figure" ] ]
colincqian/SPIN
[ "4e34f4fdaac65a44026f7d5d244850d35cae46b9", "4e34f4fdaac65a44026f7d5d244850d35cae46b9" ]
[ "datasets/preprocess/mpi_inf_3dhp.py", "datasets/mixed_dataset.py" ]
[ "import os\nimport sys\nimport cv2\nimport glob\nimport h5py\nimport json\nimport numpy as np\nimport scipy.io as sio\nimport scipy.misc\nfrom .read_openpose import read_openpose\n\ndef read_calibration(calib_file, vid_list):\n Ks, Rs, Ts = [], [], []\n file = open(calib_file, 'r')\n content = file.readlines()\n for vid_i in vid_list:\n K = np.array([float(s) for s in content[vid_i*7+5][11:-2].split()])\n K = np.reshape(K, (4, 4))\n RT = np.array([float(s) for s in content[vid_i*7+6][11:-2].split()])\n RT = np.reshape(RT, (4, 4))\n R = RT[:3,:3]\n T = RT[:3,3]/1000\n Ks.append(K)\n Rs.append(R)\n Ts.append(T)\n return Ks, Rs, Ts\n \ndef train_data(dataset_path, openpose_path, out_path, joints_idx, scaleFactor, extract_img=False, fits_3d=None):\n\n joints17_idx = [4, 18, 19, 20, 23, 24, 25, 3, 5, 6, 7, 9, 10, 11, 14, 15, 16]\n\n h, w = 2048, 2048\n imgnames_, scales_, centers_ = [], [], []\n parts_, Ss_, openposes_ = [], [], []\n\n # training data\n user_list = range(1,9)\n seq_list = range(1,3)\n vid_list = list(range(3)) + list(range(4,9))\n\n counter = 0\n\n for user_i in user_list:\n for seq_i in seq_list:\n seq_path = os.path.join(dataset_path,\n 'S' + str(user_i),\n 'Seq' + str(seq_i))\n # mat file with annotations\n annot_file = os.path.join(seq_path, 'annot.mat')\n annot2 = sio.loadmat(annot_file)['annot2']\n annot3 = sio.loadmat(annot_file)['annot3']\n # calibration file and camera parameters\n calib_file = os.path.join(seq_path, 'camera.calibration')\n Ks, Rs, Ts = read_calibration(calib_file, vid_list)\n\n for j, vid_i in enumerate(vid_list):\n\n # image folder\n imgs_path = os.path.join(seq_path, \n 'imageFrames',\n 'video_' + str(vid_i))\n\n # extract frames from video file\n if extract_img:\n\n # if doesn't exist\n if not os.path.isdir(imgs_path):\n os.makedirs(imgs_path)\n\n # video file\n vid_file = os.path.join(seq_path,\n 'imageSequence',\n 'video_' + str(vid_i) + '.avi')\n\n vidcap = cv2.VideoCapture(vid_file)\n\n # process video\n frame = 0\n while 1:\n # extract all frames\n success, image = vidcap.read()\n if not success:\n break\n frame += 1\n # image name\n imgname = os.path.join(imgs_path,\n 'frame_%06d.jpg' % frame)\n # save image\n cv2.imwrite(imgname, image)\n\n\n\n # # per frame\n # cam_aa = cv2.Rodrigues(Rs[j])[0].T[0]\n # pattern = os.path.join(imgs_path, '*.jpg')\n # img_list = glob.glob(pattern)\n # for i, img_i in enumerate(img_list):\n #\n # # for each image we store the relevant annotations\n # img_name = img_i.split('/')[-1]\n # img_view = os.path.join('S' + str(user_i),\n # 'Seq' + str(seq_i),\n # 'imageFrames',\n # 'video_' + str(vid_i),\n # img_name)\n # joints = np.reshape(annot2[vid_i][0][i], (28, 2))[joints17_idx]\n # S17 = np.reshape(annot3[vid_i][0][i], (28, 3))/1000\n # S17 = S17[joints17_idx] - S17[4] # 4 is the root\n # bbox = [min(joints[:,0]), min(joints[:,1]),\n # max(joints[:,0]), max(joints[:,1])]\n # center = [(bbox[2]+bbox[0])/2, (bbox[3]+bbox[1])/2]\n # scale = scaleFactor*max(bbox[2]-bbox[0], bbox[3]-bbox[1])/200\n #\n # # check that all joints are visible\n # x_in = np.logical_and(joints[:, 0] < w, joints[:, 0] >= 0)\n # y_in = np.logical_and(joints[:, 1] < h, joints[:, 1] >= 0)\n # ok_pts = np.logical_and(x_in, y_in)\n # if np.sum(ok_pts) < len(joints_idx):\n # continue\n #\n # part = np.zeros([24,3])\n # part[joints_idx] = np.hstack([joints, np.ones([17,1])])\n # json_file = os.path.join(openpose_path, 'mpi_inf_3dhp',\n # img_view.replace('.jpg', '_keypoints.json'))\n # openpose = read_openpose(json_file, part, 'mpi_inf_3dhp')\n #\n # S = np.zeros([24,4])\n # S[joints_idx] = np.hstack([S17, np.ones([17,1])])\n #\n # # because of the dataset size, we only keep every 10th frame\n # counter += 1\n # if counter % 10 != 1:\n # continue\n #\n # # store the data\n # imgnames_.append(img_view)\n # centers_.append(center)\n # scales_.append(scale)\n # parts_.append(part)\n # Ss_.append(S)\n # openposes_.append(openpose)\n #\n # # store the data struct\n # if not os.path.isdir(out_path):\n # os.makedirs(out_path)\n # out_file = os.path.join(out_path, 'mpi_inf_3dhp_train.npz')\n # if fits_3d is not None:\n # fits_3d = np.load(fits_3d)\n # np.savez(out_file, imgname=imgnames_,\n # center=centers_,\n # scale=scales_,\n # part=parts_,\n # pose=fits_3d['pose'],\n # shape=fits_3d['shape'],\n # has_smpl=fits_3d['has_smpl'],\n # S=Ss_,\n # openpose=openposes_)\n # else:\n # np.savez(out_file, imgname=imgnames_,\n # center=centers_,\n # scale=scales_,\n # part=parts_,\n # S=Ss_,\n # openpose=openposes_)\n \n \ndef test_data(dataset_path, out_path, joints_idx, scaleFactor):\n\n joints17_idx = [14, 11, 12, 13, 8, 9, 10, 15, 1, 16, 0, 5, 6, 7, 2, 3, 4]\n\n imgnames_, scales_, centers_, parts_, Ss_ = [], [], [], [], []\n\n # training data\n user_list = range(1,7)\n\n for user_i in user_list:\n seq_path = os.path.join(dataset_path,\n 'mpi_inf_3dhp_test_set',\n 'TS' + str(user_i))\n # mat file with annotations\n annot_file = os.path.join(seq_path, 'annot_data.mat')\n mat_as_h5 = h5py.File(annot_file, 'r')\n annot2 = np.array(mat_as_h5['annot2'])\n annot3 = np.array(mat_as_h5['univ_annot3'])\n valid = np.array(mat_as_h5['valid_frame'])\n for frame_i, valid_i in enumerate(valid):\n if valid_i == 0:\n continue\n img_name = os.path.join('mpi_inf_3dhp_test_set',\n 'TS' + str(user_i),\n 'imageSequence',\n 'img_' + str(frame_i+1).zfill(6) + '.jpg')\n\n joints = annot2[frame_i,0,joints17_idx,:]\n S17 = annot3[frame_i,0,joints17_idx,:]/1000\n S17 = S17 - S17[0]\n\n bbox = [min(joints[:,0]), min(joints[:,1]),\n max(joints[:,0]), max(joints[:,1])]\n center = [(bbox[2]+bbox[0])/2, (bbox[3]+bbox[1])/2]\n scale = scaleFactor*max(bbox[2]-bbox[0], bbox[3]-bbox[1])/200\n\n # check that all joints are visible\n img_file = os.path.join(dataset_path, img_name)\n I = scipy.misc.imread(img_file)\n h, w, _ = I.shape\n x_in = np.logical_and(joints[:, 0] < w, joints[:, 0] >= 0)\n y_in = np.logical_and(joints[:, 1] < h, joints[:, 1] >= 0)\n ok_pts = np.logical_and(x_in, y_in)\n if np.sum(ok_pts) < len(joints_idx):\n continue\n\n part = np.zeros([24,3])\n part[joints_idx] = np.hstack([joints, np.ones([17,1])])\n\n S = np.zeros([24,4])\n S[joints_idx] = np.hstack([S17, np.ones([17,1])])\n\n # store the data\n imgnames_.append(img_name)\n centers_.append(center)\n scales_.append(scale)\n parts_.append(part)\n Ss_.append(S)\n\n # store the data struct\n if not os.path.isdir(out_path):\n os.makedirs(out_path)\n out_file = os.path.join(out_path, 'mpi_inf_3dhp_test.npz')\n np.savez(out_file, imgname=imgnames_,\n center=centers_,\n scale=scales_,\n part=parts_,\n S=Ss_) \n\ndef mpi_inf_3dhp_extract(dataset_path, openpose_path, out_path, mode, extract_img=False, static_fits=None):\n\n scaleFactor = 1.2\n joints_idx = [14, 3, 4, 5, 2, 1, 0, 16, 12, 17, 18, 9, 10, 11, 8, 7, 6]\n \n if static_fits is not None:\n fits_3d = os.path.join(static_fits, \n 'mpi-inf-3dhp_mview_fits.npz')\n else:\n fits_3d = None\n \n if mode == 'train':\n train_data(dataset_path, openpose_path, out_path, \n joints_idx, scaleFactor, extract_img=extract_img, fits_3d=fits_3d)\n elif mode == 'test':\n test_data(dataset_path, out_path, joints_idx, scaleFactor)\n", "\"\"\"\nThis file contains the definition of different heterogeneous datasets used for training\n\"\"\"\nimport torch\nimport numpy as np\n\nfrom .base_dataset import BaseDataset\n\nclass MixedDataset(torch.utils.data.Dataset):\n\n # def __init__(self, options, **kwargs):\n # self.dataset_list = ['h36m', 'lsp-orig', 'mpii', 'lspet', 'coco', 'mpi-inf-3dhp']\n # self.dataset_dict = {'h36m': 0, 'lsp-orig': 1, 'mpii': 2, 'lspet': 3, 'coco': 4, 'mpi-inf-3dhp': 5}\n # self.datasets = [BaseDataset(options, ds, **kwargs) for ds in self.dataset_list]\n # total_length = sum([len(ds) for ds in self.datasets])\n # length_itw = sum([len(ds) for ds in self.datasets[1:-1]])\n # self.length = max([len(ds) for ds in self.datasets])\n # \"\"\"\n # Data distribution inside each batch:\n # 30% H36M - 60% ITW - 10% MPI-INF\n # \"\"\"\n # self.partition = [.3, .6*len(self.datasets[1])/length_itw,\n # .6*len(self.datasets[2])/length_itw,\n # .6*len(self.datasets[3])/length_itw,\n # .6*len(self.datasets[4])/length_itw,\n # 0.1]\n # self.partition = np.array(self.partition).cumsum()\n #\n # def __getitem__(self, index):\n # p = np.random.rand()\n # for i in range(6):\n # if p <= self.partition[i]:\n # return self.datasets[i][index % len(self.datasets[i])]\n #\n # def __len__(self):\n # return self.length\n\n #training without H3.6M\n def __init__(self, options, **kwargs):\n self.dataset_list = ['lsp-orig', 'mpii', 'lspet', 'coco']#, 'mpi-inf-3dhp']\n self.dataset_dict = {'lsp-orig': 0, 'mpii': 1, 'lspet': 2, 'coco': 3}#, 'mpi-inf-3dhp': 4}\n self.datasets = [BaseDataset(options, ds, **kwargs) for ds in self.dataset_list]\n total_length = sum([len(ds) for ds in self.datasets])\n length_itw = sum([len(ds) for ds in self.datasets[0:-1]])\n self.length = max([len(ds) for ds in self.datasets])\n \"\"\"\n Data distribution inside each batch:\n 90% ITW - 10% MPI-INF\n \"\"\"\n self.partition = [.9*len(self.datasets[0])/length_itw,\n .9*len(self.datasets[1])/length_itw,\n .9*len(self.datasets[2])/length_itw,\n .9*len(self.datasets[3])/length_itw,\n 0.1]\n self.partition = np.array(self.partition).cumsum()\n\n def __getitem__(self, index):\n p = np.random.rand()\n for i in range(len( self.dataset_list)):\n if p <= self.partition[i]:\n return self.datasets[i][index % len(self.datasets[i])]\n\n def __len__(self):\n return self.length" ]
[ [ "numpy.array", "numpy.reshape", "numpy.zeros", "numpy.sum", "scipy.io.loadmat", "numpy.ones", "numpy.logical_and", "numpy.savez" ], [ "numpy.array", "numpy.random.rand" ] ]
clararehmann/epistasis
[ "431d30504d1ef3f5a5ae0c8ee56844de2d6ababb" ]
[ "epistasis/simulate/linear.py" ]
[ "__description__ = \\\n\"\"\"\nClass for simulating genotype phenotype map with linear epistasis model.\n\"\"\"\n__author__ = \"Zach Sailer\"\n\nfrom epistasis.simulate.base import BaseSimulation\n\nimport numpy as np\n\nclass LinearSimulation(BaseSimulation):\n \"\"\"\n Construct an genotype-phenotype from linear building blocks and\n epistatic coefficients.\n\n Example\n -------\n Phenotype = b0 + b1 + b2 + b3 + b12 + b13 + b13 + b123\n\n Parameters\n ---------\n wildtype : str\n Wildtype genotype\n mutations : dict\n Mapping for each site to its alphabet\n order : int\n Order of epistasis in simulated genotype-phenotype map\n betas : array-like\n values of epistatic coefficients (must be positive for this function\n to work. Log is taken)\n model_type : str\n Use a local or global (i.e. Walsh space) epistasis model to construct\n phenotypes\n \"\"\"\n\n def __init__(self, wildtype, mutations,\n model_type='global',\n **kwargs):\n\n # Construct epistasis mapping objects (empty)\n super(LinearSimulation, self).__init__(\n wildtype,\n mutations,\n **kwargs)\n self.model_type = model_type\n\n def build(self):\n \"\"\"\n Build the phenotype map from epistatic interactions.\n \"\"\"\n X = self.add_X()\n\n # Get model type:\n self.data.loc[:,'phenotype'] = np.dot(X, self.epistasis.values)\n" ]
[ [ "numpy.dot" ] ]
psyeon990/osmnx
[ "aaa5c3edf4a563c3e8a2c4067d15e7292ce307cc" ]
[ "osmnx/speed.py" ]
[ "\"\"\"Calculate graph edge speeds and travel times.\"\"\"\n\nimport re\n\nimport networkx as nx\nimport numpy as np\nimport pandas as pd\n\nfrom . import utils_graph\n\n\ndef add_edge_speeds(G, hwy_speeds=None, fallback=None, precision=1):\n \"\"\"\n Add edge speeds (km per hour) to graph as new `speed_kph` edge attributes.\n\n Imputes free-flow travel speeds for all edges based on mean `maxspeed`\n value of edges, per highway type. For highway types in graph that have no\n `maxspeed` value on any edge, function assigns the mean of all `maxspeed`\n values in graph.\n\n This mean-imputation can obviously be imprecise, and the caller can\n override it by passing in `hwy_speeds` and/or `fallback` arguments that\n correspond to local speed limit standards.\n\n If edge `maxspeed` attribute has \"mph\" in it, value will automatically be\n converted from miles per hour to km per hour. Any other speed units should\n be manually converted to km per hour prior to running this function,\n otherwise there could be unexpected results. If \"mph\" does not appear in\n the edge's maxspeed attribute string, then function assumes kph, per OSM\n guidelines: https://wiki.openstreetmap.org/wiki/Map_Features/Units\n\n Parameters\n ----------\n G : networkx.MultiDiGraph\n input graph\n hwy_speeds : dict\n dict keys = OSM highway types and values = typical speeds (km per\n hour) to assign to edges of that highway type for any edges missing\n speed data. Any edges with highway type not in `hwy_speeds` will be\n assigned the mean preexisting speed value of all edges of that highway\n type.\n fallback : numeric\n default speed value (km per hour) to assign to edges whose highway\n type did not appear in `hwy_speeds` and had no preexisting speed\n values on any edge\n precision : int\n decimal precision to round speed_kph\n\n Returns\n -------\n G : networkx.MultiDiGraph\n graph with speed_kph attributes on all edges\n \"\"\"\n if fallback is None:\n fallback = np.nan\n\n edges = utils_graph.graph_to_gdfs(G, nodes=False, fill_edge_geometry=False)\n\n # collapse any highway lists (can happen during graph simplification)\n # into string values simply by keeping just the first element of the list\n edges[\"highway\"] = edges[\"highway\"].map(lambda x: x[0] if isinstance(x, list) else x)\n\n if \"maxspeed\" in edges.columns:\n # collapse any maxspeed lists (can happen during graph simplification)\n # into a single value\n edges[\"maxspeed\"] = edges[\"maxspeed\"].map(_collapse_multiple_maxspeed_values)\n\n # create speed_kph by cleaning maxspeed strings and converting mph to\n # kph if necessary\n edges[\"speed_kph\"] = edges[\"maxspeed\"].astype(str).map(_clean_maxspeed).astype(float)\n else:\n # if no edges in graph had a maxspeed attribute\n edges[\"speed_kph\"] = None\n\n # if user provided hwy_speeds, use them as default values, otherwise\n # initialize an empty series to populate with values\n if hwy_speeds is None:\n hwy_speed_avg = pd.Series(dtype=float)\n else:\n hwy_speed_avg = pd.Series(hwy_speeds).dropna()\n\n # for each highway type that caller did not provide in hwy_speeds, impute\n # speed of type by taking the mean of the preexisting speed values of that\n # highway type\n for hwy, group in edges.groupby(\"highway\"):\n if hwy not in hwy_speed_avg:\n hwy_speed_avg.loc[hwy] = group[\"speed_kph\"].mean()\n\n # if any highway types had no preexisting speed values, impute their speed\n # with fallback value provided by caller. if fallback=np.nan, impute speed\n # as the mean speed of all highway types that did have preexisting values\n hwy_speed_avg = hwy_speed_avg.fillna(fallback).fillna(hwy_speed_avg.mean())\n\n # for each edge missing speed data, assign it the imputed value for its\n # highway type\n speed_kph = (\n edges[[\"highway\", \"speed_kph\"]].set_index(\"highway\").iloc[:, 0].fillna(hwy_speed_avg)\n )\n\n # all speeds will be null if edges had no preexisting maxspeed data and\n # caller did not pass in hwy_speeds or fallback arguments\n if pd.isnull(speed_kph).all():\n raise ValueError(\n (\n \"this graph's edges have no preexisting `maxspeed` \"\n \"attribute values so you must pass `hwy_speeds` or \"\n \"`fallback` arguments.\"\n )\n )\n\n # add speed kph attribute to graph edges\n edges[\"speed_kph\"] = speed_kph.round(precision).values\n edge_speed_kph = edges[[\"u\", \"v\", \"key\", \"speed_kph\"]].set_index([\"u\", \"v\", \"key\"]).iloc[:, 0]\n nx.set_edge_attributes(G, values=edge_speed_kph, name=\"speed_kph\")\n\n return G\n\n\ndef add_edge_travel_times(G, precision=1):\n \"\"\"\n Add edge travel time (seconds) to graph as new `travel_time` edge attributes.\n\n Calculates free-flow travel time along each edge, based on `length` and\n `speed_kph` attributes. Note: run `add_edge_speeds` first to generate the\n `speed_kph` attribute. All edges must have `length` and `speed_kph`\n attributes and all their values must be non-null.\n\n Parameters\n ----------\n G : networkx.MultiDiGraph\n input graph\n precision : int\n decimal precision to round travel_time\n\n Returns\n -------\n G : networkx.MultiDiGraph\n graph with travel_time attributes on all edges\n \"\"\"\n edges = utils_graph.graph_to_gdfs(G, nodes=False)\n\n # verify edge length and speed_kph attributes exist and contain no nulls\n if not (\"length\" in edges.columns and \"speed_kph\" in edges.columns):\n raise KeyError(\"all edges must have `length` and `speed_kph` attributes.\")\n else:\n if pd.isnull(edges[\"length\"]).any() or pd.isnull(edges[\"speed_kph\"]).any():\n raise ValueError(\"edge `length` and `speed_kph` values must be non-null.\")\n\n # convert distance km to meters, and speed km per hour to km per second\n distance_km = edges[\"length\"] / 1000\n speed_km_sec = edges[\"speed_kph\"] / (60 * 60)\n\n # calculate edge travel time in seconds\n travel_time = distance_km / speed_km_sec\n\n # add travel time attribute to graph edges\n edges[\"travel_time\"] = travel_time.round(precision).values\n edge_times = edges[[\"u\", \"v\", \"key\", \"travel_time\"]].set_index([\"u\", \"v\", \"key\"]).iloc[:, 0]\n nx.set_edge_attributes(G, values=edge_times, name=\"travel_time\")\n\n return G\n\n\ndef _clean_maxspeed(value, convert_mph=True):\n \"\"\"\n Clean a maxspeed string and convert mph to kph if necessary.\n\n Parameters\n ----------\n value : string\n an OSM way maxspeed value\n convert_mph : bool\n if True, convert mph to kph\n\n Returns\n -------\n value_clean : string\n \"\"\"\n MPH_TO_KPH = 1.60934\n pattern = re.compile(r\"[^\\d\\.,;]\")\n\n try:\n # strip out everything but numbers, periods, commas, semicolons\n value_clean = float(re.sub(pattern, \"\", value).replace(\",\", \".\"))\n if convert_mph and \"mph\" in value.lower():\n value_clean = value_clean * MPH_TO_KPH\n return value_clean\n\n except ValueError:\n return None\n\n\ndef _collapse_multiple_maxspeed_values(value):\n \"\"\"\n Collapse a list of maxspeed values into its mean value.\n\n Parameters\n ----------\n value : list or string\n an OSM way maxspeed value, or a list of them\n\n Returns\n -------\n mean_value : int\n an integer representation of the mean value in the list, converted\n to kph if original value was in mph.\n \"\"\"\n # if this isn't a list, just return it right back to the caller\n if not isinstance(value, list):\n return value\n\n else:\n try:\n # clean each value in list and convert to kph if it is mph then\n # return mean value\n values = [_clean_maxspeed(x) for x in value]\n mean_value = int(pd.Series(values).dropna().mean())\n return mean_value\n except ValueError:\n return None\n" ]
[ [ "pandas.isnull", "pandas.Series" ] ]
fpwg/pyFAI
[ "cebf935b79333c9289a78b74ee99d550ebcdc549" ]
[ "pyFAI/units.py" ]
[ "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n#\n# Project: Azimuthal integration\n# https://github.com/silx-kit/pyFAI\n#\n# Copyright (C) 2012-2018 European Synchrotron Radiation Facility, Grenoble, France\n#\n# Principal author: Picca Frédéric-Emmanuel <[email protected]>\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\n# THE SOFTWARE.\n\n\"\"\"Manages the different units\n\nNota for developers: this module is used a singleton to store all units in a\nunique manner. This explains the number of top-level variables on the one\nhand and their CAPITALIZATION on the other.\n\"\"\"\n\n\nfrom __future__ import division, print_function\n\n\n__authors__ = [\"Picca Frédéric-Emmanuel\", \"Jérôme Kieffer\"]\n__contact__ = \"[email protected]\"\n__license__ = \"MIT\"\n__copyright__ = \"European Synchrotron Radiation Facility, Grenoble, France\"\n__date__ = \"13/12/2018\"\n__status__ = \"production\"\n__docformat__ = 'restructuredtext'\n\nimport logging\nlogger = logging.getLogger(__name__)\nimport numpy\nfrom numpy import pi\nimport scipy.constants\n\nfrom .third_party import six\n\n################################################################################\n# A few physical constants\n################################################################################\n\nhc = CONST_hc = scipy.constants.c * scipy.constants.h / scipy.constants.e * 1e7\n\"\"\"Product of h the Planck constant, and c the speed of light in vacuum\nin Angstrom.KeV. It is approximativly equal to 12.398419292004204.\"\"\"\n\nCONST_q = 1.602176565e-19\n\"\"\"One electron-volt is equal to 1.602176565⋅10-19 joules\"\"\"\n\n\nclass Unit(object):\n \"\"\"Represents a unit.\n\n It has at least a name and a scale (in SI-unit)\n \"\"\"\n def __init__(self, name, scale=1, label=None, equation=None,\n center=None, corner=None, delta=None, short_name=None, unit_symbol=None):\n \"\"\"Constructor of a unit.\n\n :param str name: name of the unit\n :param float scale: scale of th unit to go to SI\n :param string label: label for nice representation in matplotlib,\n can use latex representation\n :param func equation: equation to calculate the value from coordinates\n (x,y,z) in detector space.\n Parameters of the function are x, y, z, lambda\n :param str center: name of the fast-path function\n :param str unit_symbol: Symbol used to display values of this unit\n \"\"\"\n self.name = name\n self.scale = scale\n self.label = label if label is not None else name\n self.corner = corner\n self.center = center\n self.delta = delta\n self.equation = equation\n self.short_name = short_name\n self.unit_symbol = unit_symbol\n\n def get(self, key):\n \"\"\"Mimic the dictionary interface\n\n :param (str) key: key wanted\n :return: self.key\n \"\"\"\n res = None\n if key in dir(self):\n res = self.__getattribute__(key)\n return res\n\n def __repr__(self):\n return self.name\n\n # ensures hashability\n def __hash__(self):\n return self.name.__hash__()\n\n\nRADIAL_UNITS = {}\n\n\ndef register_radial_unit(name, scale=1, label=None, equation=None,\n center=None, corner=None, delta=None, short_name=None, unit_symbol=None):\n RADIAL_UNITS[name] = Unit(name, scale, label, equation, center, corner, delta, short_name, unit_symbol)\n\n\ndef eq_r(x, y, z=None, wavelength=None):\n \"\"\"Calculates the radius\n\n :param x: horizontal position, towards the center of the ring, from sample position\n :param y: Vertical position, to the roof, from sample position\n :param z: distance from sample along the beam\n :param wavelength: in meter\n \"\"\"\n return numpy.sqrt(x * x + y * y)\n\n\ndef eq_2th(x, y, z, wavelength=None):\n \"\"\"Calculates the 2theta aperture of the cone\n\n :param x: horizontal position, towards the center of the ring, from sample position\n :param y: Vertical position, to the roof, from sample position\n :param z: distance from sample along the beam\n :param wavelength: in meter\n \"\"\"\n return numpy.arctan2(eq_r(x, y), z)\n\n\ndef eq_q(x, y, z, wavelength):\n \"\"\"Calculates the modulus of the scattering vector\n\n :param x: horizontal position, towards the center of the ring, from sample position\n :param y: Vertical position, to the roof, from sample position\n :param z: distance from sample along the beam\n :param wavelength: in meter\n \"\"\"\n return 4.0e-9 * numpy.pi * numpy.sin(eq_2th(x, y, z) / 2.0) / wavelength\n\n\nregister_radial_unit(\"r_mm\",\n center=\"rArray\",\n delta=\"deltaR\",\n scale=1000.0,\n label=r\"Radius $r$ ($mm$)\",\n equation=eq_r,\n short_name=\"r\",\n unit_symbol=\"mm\")\n\nregister_radial_unit(\"r_m\",\n center=\"rArray\",\n delta=\"deltaR\",\n scale=1.0,\n label=r\"Radius $r$ ($m$)\",\n equation=eq_r,\n short_name=\"r\",\n unit_symbol=\"m\")\n\nregister_radial_unit(\"2th_deg\", scale=180.0 / numpy.pi,\n center=\"twoThetaArray\",\n delta=\"delta2Theta\",\n label=r\"Scattering angle $2\\theta$ ($^{o}$)\",\n equation=eq_2th,\n short_name=r\"2\\theta\",\n unit_symbol=\"deg\")\n\nregister_radial_unit(\"2th_rad\",\n center=\"twoThetaArray\",\n delta=\"delta2Theta\",\n scale=1.0,\n label=r\"Scattering angle $2\\theta$ ($rad$)\",\n equation=eq_2th,\n short_name=r\"2\\theta\",\n unit_symbol=\"rad\")\n\nregister_radial_unit(\"q_nm^-1\",\n center=\"qArray\",\n delta=\"deltaQ\",\n scale=1.0,\n label=r\"Scattering vector $q$ ($nm^{-1}$)\",\n equation=eq_q,\n short_name=\"q\",\n unit_symbol=\"nm^{-1}\")\n\nregister_radial_unit(\"q_A^-1\",\n center=\"qArray\",\n delta=\"deltaQ\",\n scale=0.1,\n label=r\"Scattering vector $q$ ($\\AA^{-1}$)\",\n equation=eq_q,\n short_name=\"q\",\n unit_symbol=r\"\\AA^{-1}\")\n\nregister_radial_unit(\"d*2_A^-2\",\n center=\"rd2Array\",\n delta=\"deltaRd2\",\n scale=0.01,\n label=r\"Reciprocal spacing squared $d^{*2}$ ($\\AA^{-2}$)\",\n equation=lambda x, y, z, wavelength: (eq_q(x, y, z, wavelength) / (2.0 * numpy.pi)) ** 2,\n short_name=\"d^{*2}\",\n unit_symbol=r\"\\AA^{-2}\")\n\nregister_radial_unit(\"d*2_nm^-2\",\n center=\"rd2Array\",\n delta=\"deltaRd2\",\n scale=1.0,\n label=r\"Reciprocal spacing squared $d^{*2}$ ($nm^{-2}$)\",\n equation=lambda x, y, z, wavelength: (eq_q(x, y, z, wavelength) / (2.0 * numpy.pi)) ** 2,\n short_name=\"d^{*2}\",\n unit_symbol=\"nm^{-2}\")\n\nregister_radial_unit(\"log10(q.m)_None\",\n scale=1.0,\n label=r\"log10($q$.m)\",\n equation=lambda x, y, z, wavelength: numpy.log10(1e9 * eq_q(x, y, z, wavelength)),\n short_name=\"log10(q.m)\",\n unit_symbol=\"?\")\n\nregister_radial_unit(\"log(q.nm)_None\",\n scale=1.0,\n label=r\"log($q$.nm)\",\n equation=lambda x, y, z, wavelength: numpy.log(eq_q(x, y, z, wavelength)),\n short_name=\"log(q.nm)\",\n unit_symbol=\"?\")\n\nregister_radial_unit(\"log(1+q.nm)_None\",\n scale=1.0,\n label=r\"log(1+$q$.nm)\",\n equation=lambda x, y, z, wavelength: numpy.log1p(eq_q(x, y, z, wavelength)),\n short_name=\"log(1+q.nm)\",\n unit_symbol=\"?\")\n\nregister_radial_unit(\"log(1+q.A)_None\",\n scale=1.0,\n label=r\"log(1+$q$.\\AA)\",\n equation=lambda x, y, z, wavelength: numpy.log1p(0.1 * eq_q(x, y, z, wavelength)),\n short_name=r\"log(1+q.\\AA)\",\n unit_symbol=\"?\")\n\nregister_radial_unit(\"arcsinh(q.nm)_None\",\n scale=1.0,\n label=r\"arcsinh($q$.nm)\",\n equation=lambda x, y, z, wavelength: numpy.arcsinh(eq_q(x, y, z, wavelength)),\n short_name=\"arcsinh(q.nm)\",\n unit_symbol=\"?\")\n\nregister_radial_unit(\"arcsinh(q.A)_None\",\n scale=1.0,\n label=r\"arcsinh($q$.\\AA)\",\n equation=lambda x, y, z, wavelength: numpy.arcsinh(0.1 * eq_q(x, y, z, wavelength)),\n short_name=r\"arcsinh(q.\\AA)\",\n unit_symbol=\"?\")\n\n\nLENGTH_UNITS = {\"m\": Unit(\"m\", scale=1., label=r\"length $l$ ($m$)\"),\n \"mm\": Unit(\"mm\", scale=1e3, label=r\"length $l$ ($mm$)\"),\n \"cm\": Unit(\"cm\", scale=1e2, label=r\"length $l$ ($cm$)\"),\n \"micron\": Unit(\"micron\", scale=1e6, label=r\"length $l$ ($\\mu m$)\"),\n \"nm\": Unit(\"nm\", scale=1e9, label=r\"length $l$ ($nm$)\"),\n \"A\": Unit(\"A\", scale=1e10, label=r\"length $l$ ($\\AA$)\"),\n }\n\n\nANGLE_UNITS = {\"deg\": Unit(\"deg\", scale=180.0 / pi, label=r\"angle $\\alpha$ ($^{o}$)\"),\n \"rad\": Unit(\"rad\", scale=1.0, label=r\"angle $\\alpha$ ($rad$)\"),\n }\n\nAZIMUTHAL_UNITS = {\"chi_rad\": Unit(\"chi_rad\", scale=1.0, label=r\"Azimuthal angle $\\chi$ ($rad$)\"),\n \"chi_deg\": Unit(\"chi_deg\", scale=180 / pi, label=r\"Azimuthal angle $\\chi$ ($^{o}$)\")}\n\n\ndef to_unit(obj, type_=None):\n if type_ is None:\n type_ = RADIAL_UNITS\n rad_unit = None\n if isinstance(obj, six.string_types):\n rad_unit = type_.get(obj)\n elif isinstance(obj, Unit):\n rad_unit = obj\n if rad_unit is None:\n logger.error(\"Unable to recognize this type unit '%s' of type %s. \"\n \"Valid units are %s\" % (obj, type(obj), \", \".join([i for i in type_])))\n return rad_unit\n\n\n# To ensure the compatibility with former code:\nQ = Q_NM = RADIAL_UNITS[\"q_nm^-1\"]\nQ_A = RADIAL_UNITS[\"q_A^-1\"]\nTTH_RAD = RADIAL_UNITS[\"2th_rad\"]\nTTH_DEG = TTH = RADIAL_UNITS[\"2th_deg\"]\nR = R_MM = RADIAL_UNITS[\"r_mm\"]\nR_M = RADIAL_UNITS[\"r_m\"]\nRecD2_NM = RADIAL_UNITS[\"d*2_nm^-2\"]\nl_m = LENGTH_UNITS[\"m\"]\nA_rad = ANGLE_UNITS[\"rad\"]\nCHI_DEG = AZIMUTHAL_UNITS[\"chi_deg\"]\nCHI_RAD = AZIMUTHAL_UNITS[\"chi_rad\"]\n" ]
[ [ "numpy.sqrt" ] ]
kasper93/Halide
[ "35a78df088eddeffa56a62cf552c00c3d01f2db4" ]
[ "python_bindings/apps/interpolate.py" ]
[ "\"\"\"\nFast image interpolation using a pyramid.\n\"\"\"\n\nfrom __future__ import print_function\nfrom __future__ import division\n\nimport time, sys\nimport halide as hl\n\nfrom datetime import datetime\nfrom imageio import imread, imsave\nimport numpy as np\nimport os.path\n\nint_t = hl.Int(32)\nfloat_t = hl.Float(32)\n\ndef get_interpolate(input, levels):\n \"\"\"\n Build function, schedules it, and invokes jit compiler\n :return: halide.hl.Func\n \"\"\"\n\n # THE ALGORITHM\n\n downsampled = [hl.Func('downsampled%d'%i) for i in range(levels)]\n downx = [hl.Func('downx%d'%l) for l in range(levels)]\n interpolated = [hl.Func('interpolated%d'%i) for i in range(levels)]\n# level_widths = [hl.Param(int_t,'level_widths%d'%i) for i in range(levels)]\n# level_heights = [hl.Param(int_t,'level_heights%d'%i) for i in range(levels)]\n upsampled = [hl.Func('upsampled%d'%l) for l in range(levels)]\n upsampledx = [hl.Func('upsampledx%d'%l) for l in range(levels)]\n x = hl.Var('x')\n y = hl.Var('y')\n c = hl.Var('c')\n\n clamped = hl.Func('clamped')\n clamped[x, y, c] = input[hl.clamp(x, 0, input.width()-1), hl.clamp(y, 0, input.height()-1), c]\n\n # This triggers a bug in llvm 3.3 (3.2 and trunk are fine), so we\n # rewrite it in a way that doesn't trigger the bug. The rewritten\n # form assumes the input alpha is zero or one.\n # downsampled[0][x, y, c] = hl.select(c < 3, clamped[x, y, c] * clamped[x, y, 3], clamped[x, y, 3])\n downsampled[0][x,y,c] = clamped[x, y, c] * clamped[x, y, 3]\n\n for l in range(1, levels):\n prev = hl.Func()\n prev = downsampled[l-1]\n\n if l == 4:\n # Also add a boundary condition at a middle pyramid level\n # to prevent the footprint of the downsamplings to extend\n # too far off the base image. Otherwise we look 512\n # pixels off each edge.\n w = input.width()/(1 << l)\n h = input.height()/(1 << l)\n prev = hl.lambda_func(x, y, c, prev[hl.clamp(x, 0, w), hl.clamp(y, 0, h), c])\n\n downx[l][x,y,c] = (prev[x*2-1,y,c] + 2.0 * prev[x*2,y,c] + prev[x*2+1,y,c]) * 0.25\n downsampled[l][x,y,c] = (downx[l][x,y*2-1,c] + 2.0 * downx[l][x,y*2,c] + downx[l][x,y*2+1,c]) * 0.25\n\n\n interpolated[levels-1][x,y,c] = downsampled[levels-1][x,y,c]\n for l in range(levels-1)[::-1]:\n upsampledx[l][x,y,c] = (interpolated[l+1][x/2, y, c] + interpolated[l+1][(x+1)/2, y, c]) / 2.0\n upsampled[l][x,y,c] = (upsampledx[l][x, y/2, c] + upsampledx[l][x, (y+1)/2, c]) / 2.0\n interpolated[l][x,y,c] = downsampled[l][x,y,c] + (1.0 - downsampled[l][x,y,3]) * upsampled[l][x,y,c]\n\n normalize = hl.Func('normalize')\n normalize[x,y,c] = interpolated[0][x, y, c] / interpolated[0][x, y, 3]\n\n final = hl.Func('final')\n final[x,y,c] = normalize[x,y,c]\n\n print(\"Finished function setup.\")\n\n # THE SCHEDULE\n\n sched = 2\n target = hl.get_target_from_environment()\n if target.has_gpu_feature():\n sched = 4\n else:\n sched = 2\n\n if sched == 0:\n print (\"Flat schedule.\")\n for l in range(levels):\n downsampled[l].compute_root()\n interpolated[l].compute_root()\n\n final.compute_root()\n\n elif sched == 1:\n print(\"Flat schedule with vectorization.\")\n for l in range(levels):\n downsampled[l].compute_root().vectorize(x, 4)\n interpolated[l].compute_root().vectorize(x, 4)\n\n final.compute_root()\n\n elif sched == 2:\n print(\"Flat schedule with parallelization + vectorization\")\n xi, yi = hl.Var('xi'), hl.Var('yi')\n clamped.compute_root().parallel(y).bound(c, 0, 4).reorder(c, x, y).reorder_storage(c, x, y).vectorize(c, 4)\n for l in range(1, levels - 1):\n if l > 0:\n downsampled[l].compute_root().parallel(y).reorder(c, x, y).reorder_storage(c, x, y).vectorize(c, 4)\n interpolated[l].compute_root().parallel(y).reorder(c, x, y).reorder_storage(c, x, y).vectorize(c, 4)\n interpolated[l].unroll(x, 2).unroll(y, 2);\n\n final.reorder(c, x, y).bound(c, 0, 3).parallel(y)\n final.tile(x, y, xi, yi, 2, 2).unroll(xi).unroll(yi)\n final.bound(x, 0, input.width())\n final.bound(y, 0, input.height())\n\n elif sched == 3:\n print(\"Flat schedule with vectorization sometimes.\")\n for l in range(levels):\n if l + 4 < levels:\n yo, yi = hl.Var('yo'), hl.Var('yi')\n downsampled[l].compute_root().vectorize(x, 4)\n interpolated[l].compute_root().vectorize(x, 4)\n else:\n downsampled[l].compute_root()\n interpolated[l].compute_root()\n\n final.compute_root();\n\n elif sched == 4:\n print(\"GPU schedule.\")\n\n # Some gpus don't have enough memory to process the entire\n # image, so we process the image in tiles.\n yo, yi, xo, xi, ci = hl.Var('yo'), hl.Var('yi'), hl.Var('xo'), hl.Var(\"ci\")\n final.reorder(c, x, y).bound(c, 0, 3).vectorize(x, 4)\n final.tile(x, y, xo, yo, xi, yi, input.width()/4, input.height()/4)\n normalize.compute_at(final, xo).reorder(c, x, y).gpu_tile(x, y, xi, yi, 16, 16, GPU_Default).unroll(c)\n\n # Start from level 1 to save memory - level zero will be computed on demand\n for l in range(1, levels):\n tile_size = 32 >> l;\n if tile_size < 1: tile_size = 1\n if tile_size > 16: tile_size = 16\n downsampled[l].compute_root().gpu_tile(x, y, c, xi, yi, ci, tile_size, tile_size, 4, GPU_Default)\n interpolated[l].compute_at(final, xo).gpu_tile(x, y, c, xi, yi, ci, tile_size, tile_size, 4, GPU_Default)\n\n else:\n print(\"No schedule with this number.\")\n exit(1)\n\n # JIT compile the pipeline eagerly, so we don't interfere with timing\n final.compile_jit(target)\n\n return final\n\ndef get_input_data():\n\n image_path = os.path.join(os.path.dirname(__file__), \"../../apps/images/rgba.png\")\n assert os.path.exists(image_path), \"Could not find %s\" % image_path\n rgba_data = imread(image_path)\n #print(\"rgba_data\", type(rgba_data), rgba_data.shape, rgba_data.dtype)\n\n input_data = np.copy(rgba_data, order=\"F\").astype(np.float32) / 255.0\n # input data is in range [0, 1]\n #print(\"input_data\", type(input_data), input_data.shape, input_data.dtype)\n\n return input_data\n\n\ndef main():\n\n input = hl.ImageParam(float_t, 3, \"input\")\n levels = 10\n\n interpolate = get_interpolate(input, levels)\n\n # preparing input and output memory buffers (numpy ndarrays)\n input_data = get_input_data()\n assert input_data.shape[2] == 4\n input_image = hl.Buffer(input_data)\n input.set(input_image)\n\n input_width, input_height = input_data.shape[:2]\n\n t0 = datetime.now()\n output_image = interpolate.realize(input_width, input_height, 3)\n t1 = datetime.now()\n print('Interpolated in %.5f secs' % (t1-t0).total_seconds())\n\n output_data = np.array(output_image, copy = False)\n\n # save results\n input_path = \"interpolate_input.png\"\n output_path = \"interpolate_result.png\"\n imsave(input_path, input_data)\n imsave(output_path, output_data)\n print(\"\\nblur realized on output image.\",\n \"Result saved at\", output_path,\n \"( input data copy at\", input_path, \")\")\n\n print(\"\\nEnd of game. Have a nice day!\")\n\n\nif __name__ == '__main__':\n main()\n" ]
[ [ "numpy.array", "numpy.copy" ] ]
jrclimer/Projects
[ "a9d4395a98a79fb0a700a99168cd358ab7494fdf" ]
[ "Basic_ML/Exploratory_Data_Analysis/exploratory_data_analysis.py" ]
[ "import numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\n\nForbes2000 = pd.read_csv(\"Forbes2000.csv\", sep=',', usecols=range(0,9))\nForbes2000 = Forbes2000[1:]\n\n#companies by market value and profits\nfig, axes = plt.subplots(nrows=1, ncols=2, figsize=(15, 8))\nmarketv = axes[0].hist(np.array(Forbes2000['Market Value'].astype(float)), 50, range=[0,100], facecolor='green', alpha=0.5)\naxes[0].set_title('2014 Forbes 2000 Company Market Values')\naxes[0].set_ylabel('# of Companies')\naxes[0].set_xlabel('Market Value in Billion $')\naxes[0].set_xticks(np.arange(0,101,10))\n\nprofits = axes[1].hist(np.array(Forbes2000['Profits'].astype(float)), 50, range=[-5,15], facecolor='green', alpha=0.5)\naxes[1].set_title('2014 Forbes 2000 Company Profits')\naxes[1].set_ylabel('# of Companies')\naxes[1].set_xlabel('Profit in Billion $')\naxes[1].set_xticks(np.arange(-4,15,2))\n\nplt.savefig('f1.png')\nplt.show()\n\n#separate into sectors\nFinancials = Forbes2000[Forbes2000.Sector==\"Financials\"]\nEnergy = Forbes2000[Forbes2000.Sector==\"Energy\"]\nIndustrials = Forbes2000[Forbes2000.Sector==\"Industrials\"]\nIT = Forbes2000[Forbes2000.Sector==\"Information Technology\"]\nConsumerD = Forbes2000[Forbes2000.Sector==\"Consumer Discretionary\"]\nConsumerS = Forbes2000[Forbes2000.Sector==\"Consumer Staples\"]\nHealth = Forbes2000[Forbes2000.Sector==\"Health Care\"]\nUtilities = Forbes2000[Forbes2000.Sector==\"Utilities\"]\nTelecom = Forbes2000[Forbes2000.Sector==\"Telecommunication Services\"]\nMaterials = Forbes2000[Forbes2000.Sector==\"Materials\"]\n\n#companies by sector\nxnames = ['Financials', 'Energy', 'Industrials', 'Information Tech.', 'Cons. Discretionary', 'Cons. Staples', 'Health Care', 'Utilities', 'Telecommunications', 'Materials']\ncolors = ['lightgreen', 'cornflowerblue', 'lightgrey', 'steelblue', 'plum', 'sandybrown', 'tomato', 'silver', 'violet', 'skyblue']\nplt.figure(figsize=(12, 8))\nplt.pie([sector.count()[0] for sector in [Financials, Energy, Industrials, IT, ConsumerD, ConsumerS, Health, Utilities, Telecom, Materials]], labels=xnames, colors=colors, autopct='%1.1f%%', shadow=True, startangle=90)\nplt.axis('equal')\nplt.title(\"Forbes 2000 Companies by Sector\", y=1.08)\nplt.savefig('f2.png')\nplt.show()\n\n#market value and profits by sector\nfig, axes = plt.subplots(nrows=1, ncols=2, figsize=(15, 10))\n\nmarketv = axes[0].boxplot([np.array(sector['Market Value'].astype(float)) for sector in [Financials, Energy, Industrials, IT, ConsumerD, ConsumerS, Health, Utilities, Telecom, Materials]], showmeans=True)\naxes[0].set_ylabel('Market Value in Billion $')\naxes[0].set_ylim(0, 200)\naxes[0].set_title('2014 Forbes 2000 Market Value by Sector')\naxes[0].set_yticks(np.arange(0,200,10))\naxes[0].set_xticklabels(xnames, rotation=45, fontsize=8, ha=\"right\")\naxes[0].yaxis.grid(True, linestyle='-', color='lightgrey', alpha=0.5)\n\nprofits = axes[1].boxplot([np.array(sector.Profits.astype(float)) for sector in [Financials, Energy, Industrials, IT, ConsumerD, ConsumerS, Health, Utilities, Telecom, Materials]], showmeans=True)\naxes[1].set_ylabel('Profits in Billion $')\naxes[1].set_ylim(-4, 20)\naxes[1].set_title('2014 Forbes 2000 Profits by Sector')\naxes[1].set_yticks(np.arange(-4,20,2))\naxes[1].set_xticklabels(xnames, rotation=45, fontsize=8, ha=\"right\")\naxes[1].yaxis.grid(True, linestyle='-', color='lightgrey', alpha=0.5)\n\nplt.savefig('f3.png')\nplt.show()\n\n#separate by continent\nNA = Forbes2000[Forbes2000.Continent==\"North America\"]\nSA = Forbes2000[Forbes2000.Continent==\"South America\"]\nEurope = Forbes2000[Forbes2000.Continent==\"Europe\"]\nAsia = Forbes2000[Forbes2000.Continent==\"Asia\"]\nAustralia = Forbes2000[Forbes2000.Continent==\"Australia\"]\nAfrica = Forbes2000[Forbes2000.Continent==\"Africa\"]\n\n#companies by continent\nxnames = ['North America', 'South America', 'Europe', 'Australia', 'Asia', 'Africa']\ncolors = ['cornflowerblue', 'tomato', 'violet', 'gold', 'palegreen', 'sandybrown']\nplt.figure(figsize=(12, 8))\nplt.pie([continent.count()[0] for continent in [NA, SA, Europe, Australia, Asia, Africa]], labels=xnames, colors=colors, autopct='%1.1f%%', shadow=True, startangle=30)\nplt.axis('equal')\nplt.title(\"Forbes 2000 Companies by Continent\", y=1.08)\nplt.savefig('f4.png')\nplt.show()\n\n#market value and profits by continent\nfig, axes = plt.subplots(nrows=1, ncols=2, figsize=(15, 10))\n\nmarketv = axes[0].boxplot([np.array(continent['Market Value'].astype(float)) for continent in [NA, SA, Europe, Australia, Asia, Africa]], showmeans=True)\naxes[0].set_ylabel('Market Value in Billion $')\naxes[0].set_ylim(0, 300)\naxes[0].set_title('2014 Forbes 2000 Market Value by Continent')\naxes[0].set_yticks(np.arange(0,300,20))\naxes[0].set_xticklabels(xnames, rotation=45, fontsize=8, ha=\"right\")\naxes[0].yaxis.grid(True, linestyle='-', color='lightgrey', alpha=0.5)\n\nprofits = axes[1].boxplot([np.array(continent.Profits.astype(float)) for continent in [NA, SA, Europe, Australia, Asia, Africa]], showmeans=True)\naxes[1].set_ylabel('Profits in Billion $')\naxes[1].set_ylim(-5, 30)\naxes[1].set_title('2014 Forbes 2000 Profits by Continent')\naxes[1].set_yticks(np.arange(-5,30,5))\naxes[1].set_xticklabels(xnames, rotation=45, fontsize=8, ha=\"right\")\naxes[1].yaxis.grid(True, linestyle='-', color='lightgrey', alpha=0.5)\n\nplt.savefig('f5.png')\nplt.show()\n\n#relationship vetween profits and market value\nplt.figure(figsize=(12, 8))\nmarketv = np.array(Forbes2000['Market Value'].astype(float))\nprofits = np.array(Forbes2000['Profits'].astype(float))\nplt.scatter(marketv, profits, alpha=0.5)\nplt.title(\"Relationship Between Market Value and Profits\", y=1.08)\nplt.xlabel('Market Value in Billion $')\nplt.ylabel('Profit in Billion $')\nplt.xlim(-20, 500)\nplt.savefig('f6.png')\nplt.show()" ]
[ [ "matplotlib.pyplot.xlim", "matplotlib.pyplot.savefig", "matplotlib.pyplot.xlabel", "matplotlib.pyplot.title", "matplotlib.pyplot.subplots", "matplotlib.pyplot.figure", "numpy.arange", "matplotlib.pyplot.scatter", "matplotlib.pyplot.ylabel", "matplotlib.pyplot.show", "matplotlib.pyplot.axis" ] ]
SebastianoF/bruker2nif
[ "4fbac970e9125f3e53dd5e31d63b971ad739a6d2" ]
[ "bruker2nifti/_utils.py" ]
[ "import numpy as np\nimport os\nimport nibabel as nib\nimport re\nimport warnings\nfrom os.path import join as jph\n\n\n# --- text-files utils ---\n\n\ndef unique_words_in_string(in_string):\n ulist = []\n [ulist.append(s) for s in in_string.split() if s not in ulist]\n return ulist[0]\n\n\ndef indians_file_parser(s, sh=None):\n \"\"\"\n An here-called indians file is a string obtained from a sequence of rows from a Bruker parameter file\n whose shape needs to be changed, in function of its content and according to an optional parameter sh\n that defines the shape of the output.\n This function transform the indian file in a data structure,\n according to the information that can be parsed in the file:\n A - list of vectors transformed into a list\n B - list of numbers, transformed into a np.ndarray, or single number stored as a float.\n B bis - string of 'inf' repeated n times that will be transformed in a numpy array of 'inf'.\n C - list of strings separated by <>.\n D - everything else becomes a string.\n\n :param s: string indian file\n :param sh: shape related\n :return: parsed indian file of adequate output.\n \"\"\"\n\n s = s.strip() # removes initial and final spaces.\n\n # A\n if (\"(\" in s) and (\")\" in s):\n s = s[1:-1] # removes initial and final ( )\n a = [\"(\" + v + \")\" for v in s.split(\") (\")]\n # B\n elif (\n s.replace(\"-\", \"\").replace(\".\", \"\").replace(\" \", \"\").replace(\"e\", \"\").isdigit()\n ):\n if \" \" in s:\n a = np.array([float(x) for x in s.split()])\n if sh is not None:\n a = a.reshape(sh)\n else:\n a = float(s)\n # B-bis\n elif \"inf\" in s:\n if \"inf\" == unique_words_in_string(s):\n num_occurrences = sum(\"inf\" == word for word in s.split())\n a = [np.inf] * num_occurrences\n else:\n a = s[:]\n # C\n elif (\"<\" in s) and (\">\" in s):\n s = s[1:-1] # removes initial and final < >\n a = [v for v in s.split(\"> <\")]\n # D\n else:\n a = s[:]\n\n # added to work with ParaVision vers 6.0.1:\n if isinstance(a, list):\n if len(a) == 1:\n a = a[0]\n\n return a\n\n\ndef var_name_clean(line_in):\n \"\"\"\n Removes #, $ and PVM_ from line_in, where line in is a string from a Bruker parameter list file.\n :param line_in: input string\n :return: output string cleaned from #, $ and PVM_\n \"\"\"\n line_out = line_in.replace(\"#\", \"\").replace(\"$\", \"\").replace(\"PVM_\", \"\").strip()\n return line_out\n\n\ndef from_dict_to_txt_sorted(dict_input, pfi_output):\n \"\"\"\n Simple auxiliary to save the information contained in a dictionary into a txt file\n at the specified path to file (pfi).\n :param dict_input: input structure dictionary\n :param pfi_output: path to file.\n :return:\n \"\"\"\n sorted_keys = sorted(dict_input.keys())\n\n with open(pfi_output, \"w\") as f:\n f.writelines(\"{0} = {1} \\n\".format(k, dict_input[k]) for k in sorted_keys)\n\n\ndef bruker_read_files(param_file, data_path, sub_scan_num=\"1\"):\n \"\"\"\n Reads parameters files of from Bruker raw data imaging format.\n It parses the files 'acqp', 'method', 'reco', 'visu_pars' and 'subject'.\n Even if only 'visu_pars' is relevant for the conversion to nifti, having a more general parser has turned out\n to be useful in many cases (e.g. in PV5.1 to check).\n :param param_file: file parameter, must be a string in the list ['acqp', 'method', 'reco', 'visu_pars', 'subject'].\n :param data_path: path to data.\n :param sub_scan_num: number of the sub-scan folder where usually the 'reco' and 'visu_pars' parameter files\n are stored.\n :return: dict_info dictionary with the parsed information from the input file.\n \"\"\"\n if param_file.lower() == \"reco\":\n if os.path.exists(jph(data_path, \"pdata\", str(sub_scan_num), \"reco\")):\n f = open(jph(data_path, \"pdata\", str(sub_scan_num), \"reco\"), \"r\")\n else:\n print(\n \"File {} does not exist\".format(\n jph(data_path, \"pdata\", str(sub_scan_num), \"reco\")\n )\n )\n return {}\n elif param_file.lower() == \"acqp\":\n if os.path.exists(jph(data_path, \"acqp\")):\n f = open(jph(data_path, \"acqp\"), \"r\")\n else:\n print(\"File {} does not exist\".format(jph(data_path, \"acqp\")))\n return {}\n elif param_file.lower() == \"method\":\n if os.path.exists(jph(data_path, \"method\")):\n f = open(jph(data_path, \"method\"), \"r\")\n else:\n print(\"File {} does not exist\".format(jph(data_path, \"method\")))\n return {}\n elif param_file.lower() == \"visu_pars\":\n if os.path.exists(jph(data_path, \"pdata\", str(sub_scan_num), \"visu_pars\")):\n f = open(jph(data_path, \"pdata\", str(sub_scan_num), \"visu_pars\"), \"r\")\n elif os.path.exists(\n jph(data_path, str(sub_scan_num), \"pdata\", \"1\", \"visu_pars\")\n ):\n f = open(jph(data_path, str(sub_scan_num), \"pdata\", \"1\", \"visu_pars\"), \"r\")\n else:\n print(\n \"File {} does not exist\".format(\n jph(data_path, \"pdata\", str(sub_scan_num), \"visu_pars\")\n )\n )\n return {}\n elif param_file.lower() == \"subject\":\n if os.path.exists(jph(data_path, \"subject\")):\n f = open(jph(data_path, \"subject\"), \"r\")\n else:\n print(\"File {} does not exist\".format(jph(data_path, \"subject\")))\n return {}\n else:\n raise IOError(\n \"param_file input must be the string 'reco', 'acqp', 'method', 'visu_pars' or 'subject'\"\n )\n\n dict_info = {}\n lines = f.readlines()\n\n for line_num in range(len(lines)):\n \"\"\"\n Relevant information are in the lines with '##'.\n For the parameters that have arrays values specified between (), with values in the next line.\n Values in the next line can be parsed in lists or np.ndarray when they contains also characters or numbers.\n \"\"\"\n\n line_in = lines[line_num]\n\n if \"##\" in line_in:\n\n if (\"$\" in line_in) and (\"(\" in line_in) and (\"<\" not in line_in):\n # A:\n splitted_line = line_in.split(\"=\")\n # name of the variable contained in the row, and shape:\n var_name = var_name_clean(splitted_line[0][3:])\n\n done = False\n indian_file = \"\"\n pos = line_num\n sh = splitted_line[1]\n # this is not the shape of the vector but the beginning of a full vector.\n if sh.replace(\" \", \"\").endswith(\",\\n\"):\n sh = sh.replace(\"(\", \"\").replace(\")\", \"\").replace(\"\\n\", \"\").strip()\n indian_file += sh\n sh = None\n # this is not the shape of the vector but a full vector.\n elif sh.replace(\" \", \"\").endswith(\")\\n\") and \".\" in sh:\n sh = sh.replace(\"(\", \"\").replace(\")\", \"\").replace(\"\\n\", \"\").strip()\n indian_file += sh\n sh = None\n # this is finally the shape of the vector that will start in the next line.\n else:\n sh = sh.replace(\"(\", \"\").replace(\")\", \"\").replace(\"\\n\", \"\").strip()\n sh = [int(num) for num in sh.split(\",\")]\n\n while not done:\n\n pos += 1\n # collect the indian file: info related to the same variables that can appears on multiple rows.\n line_to_explore = lines[\n pos\n ] # tell seek does not work in the line iterators...\n\n if (\"##\" in line_to_explore) or (\"$$\" in line_to_explore):\n # indian file is over\n done = True\n\n else:\n # we store the rows in the indian file all in the same string.\n indian_file += line_to_explore.replace(\"\\n\", \"\").strip() + \" \"\n\n dict_info[var_name] = indians_file_parser(indian_file, sh)\n\n elif (\"$\" in line_in) and (\"(\" not in line_in):\n # B:\n splitted_line = line_in.split(\"=\")\n var_name = var_name_clean(splitted_line[0][3:])\n indian_file = splitted_line[1]\n\n dict_info[var_name] = indians_file_parser(indian_file)\n\n elif (\"$\" not in line_in) and (\"(\" in line_in):\n # C:\n splitted_line = line_in.split(\"=\")\n var_name = var_name_clean(splitted_line[0][2:])\n\n done = False\n indian_file = splitted_line[1].strip() + \" \"\n pos = line_num\n\n while not done:\n pos += 1\n # collect the indian file: info related to the same variables that can appears on multiple rows.\n line_to_explore = lines[\n pos\n ] # tell seek does not work in the line iterators...\n if (\"##\" in line_to_explore) or (\"$$\" in line_to_explore):\n # indian file is over\n done = True\n else:\n # we store the rows in the indian file all in the same string.\n indian_file += line_to_explore.replace(\"\\n\", \"\").strip() + \" \"\n\n dict_info[var_name] = indians_file_parser(indian_file)\n\n elif (\"$\" not in line_in) and (\"(\" not in line_in):\n # D:\n splitted_line = line_in.split(\"=\")\n var_name = var_name_clean(splitted_line[0])\n indian_file = splitted_line[1].replace(\"=\", \"\").strip()\n dict_info[var_name] = indians_file_parser(indian_file)\n\n else:\n # General case: take it as a simple string.\n splitted_line = line_in.split(\"=\")\n var_name = var_name_clean(splitted_line[0])\n dict_info[var_name] = (\n splitted_line[1]\n .replace(\"(\", \"\")\n .replace(\")\", \"\")\n .replace(\"\\n\", \"\")\n .replace(\"<\", \"\")\n .replace(\">\", \"\")\n .replace(\",\", \" \")\n .strip()\n )\n\n else:\n # line does not contain any 'assignable' variable, so this information is not included in the info.\n pass\n\n return dict_info\n\n\n# --- Slope correction utils ---\n\n\ndef eliminate_consecutive_duplicates(input_list):\n \"\"\"\n Simple funcion to eliminate consecutive duplicates in a list or arrays or in a list of numbers.\n :param input_list: list with possible consecutive duplicates.\n :return: input_list with no consecutive duplicates.\n \"\"\"\n if isinstance(input_list[0], np.ndarray):\n output_list = [input_list[0]]\n for k in input_list[1:]:\n if not list(k) == list(output_list[-1]):\n output_list.append(k)\n return output_list\n else:\n output_list = [input_list[0]]\n for i in range(1, len(input_list)):\n if not input_list[i] == input_list[i - 1]:\n output_list.append(input_list[i])\n return output_list\n\n\ndef data_corrector(\n data, factors, kind=\"slope\", num_initial_dir_to_skip=None, dtype=np.float64\n):\n \"\"\"\n Slope is a float or a vector that needs to be multiplied to the data, to obtain the data as they are acquired.\n To reduce the weight of an image, each slice can be divided by a common float factor, so that at each voxel only the\n integer remaining is stored:\n\n real_value_acquired[slice_j][x] = data_integer_reminder[slice_j][x] * float_slope[slice_j][x]\n\n (where = is an almost equal, where the small loss of accuracy is justified by the huge amount of space saved)\n\n :param data: data as parsed from the data structure.\n :param factors: can be the slope or the offset as parsed from the data structure\n :param kind: is a string that can be 'slope' (multiplicative factor) or 'offset' additive factor.\n :param num_initial_dir_to_skip: in some cases (as some DWI) the number of slices in the image is higher than the\n provided slope/offset length. Usually it is because the initial directions have no weighted and the first element\n in the slope/offset can correct them all. If num_initial_direction_to_skip=j the slope/offset correction starts\n after j slices, and the initial j timepoint are trimmed by j.\n :param dtype: [np.float64] output datatype.\n :return: data after the slope/offset correction.\n ---\n NOTE 1: if used in sequence to correct for slope and offset, correct FIRST slope, then OFFSET.\n NOTE 2: when read 'factor' think slope or offset. The two are embeded in the same method to avoid code repetition.\n \"\"\"\n\n if len(data.shape) > 5:\n raise IOError(\n \"4d or lower dimensional images allowed. Input data has shape {} \".format(\n data.shape\n )\n )\n assert kind in (\"slope\", \"offset\")\n\n if hasattr(factors, \"__contains__\"):\n if np.inf in factors:\n warnings.warn(\n \"bruker2nifti - Vector corresponding to {} has some inf values. Can not correct it.\".format(\n kind\n ),\n UserWarning,\n )\n return data\n\n data = data.astype(dtype)\n\n if num_initial_dir_to_skip is not None:\n factors = factors[num_initial_dir_to_skip:]\n data = data[..., num_initial_dir_to_skip:]\n\n # Check compatibility slope and data and if necessarily correct for possible consecutive duplicates\n # (as in some cases, when the size of the slope is larger than any timepoint or spatial point, the problem can\n # be in the fact that there are duplicates in the slope vector. This has been seein only in PV5.1).\n if not (isinstance(factors, int) or isinstance(factors, float)):\n if factors.ndim == 1:\n if (\n not factors.size == data.shape[-1]\n and not factors.size == data.shape[-2]\n ):\n factors = np.array(\n eliminate_consecutive_duplicates(list(factors)), dtype=np.float64\n )\n if (\n not factors.size == data.shape[-1]\n and not factors.size == data.shape[-2]\n ):\n msg = \"Slope shape {0} and data shape {1} appears to be not compatible\".format(\n factors.shape, data.shape\n )\n raise IOError(msg)\n\n if isinstance(factors, int) or isinstance(factors, float):\n # scalar slope/offset times nd array data\n if kind == \"slope\":\n data *= factors\n elif kind == \"offset\":\n data += factors\n\n elif factors.size == 1:\n # scalar slope/offset embedded in a singleton times nd array data\n if kind == \"slope\":\n data *= factors[0]\n else:\n data += factors[0]\n\n elif len(data.shape) == 3 and len(factors.shape) == 1:\n # each slice of the 3d image is multiplied an element of the slope consecutively\n if data.shape[2] == factors.shape[0]:\n for t, fa in enumerate(factors):\n if kind == \"slope\":\n data[..., t] = data[..., t] * fa\n elif kind == \"offset\":\n data[..., t] = data[..., t] + fa\n else:\n raise IOError(\n \"Shape of the 2d image and slope dimensions are not consistent\"\n )\n\n elif (\n len(data.shape) == 4\n and len(factors.shape) == 1\n and factors.shape[0] == data.shape[2]\n ):\n # each slice of the 4d image, taken from the third dim, is multiplied by each element of the slope in sequence.\n if factors.size == data.shape[2]:\n for t in range(data.shape[3]):\n for k in range(factors.size):\n if kind == \"slope\":\n data[..., k, t] = data[..., k, t] * factors[k]\n elif kind == \"offset\":\n data[..., k, t] = data[..., k, t] + factors[k]\n else:\n raise IOError(\n \"If you are here, your case cannot be converted. Further investigations required.\"\n )\n\n elif (\n len(data.shape) == 5\n and len(factors.shape) == 1\n and factors.shape[0] == data.shape[3]\n ):\n # each slice of the 5d image, taken from the fourth dim, is multiplied by each element of the slope in sequence.\n if factors.size == data.shape[3]:\n for t in range(data.shape[4]):\n for k in range(factors.size):\n if kind == \"slope\":\n data[..., k, t] = data[..., k, t] * factors[k]\n elif kind == \"offset\":\n data[..., k, t] = data[..., k, t] + factors[k]\n else:\n raise IOError(\n \"If you are here, your case cannot be converted. Further investigations required.\"\n )\n\n else:\n # each slice of the nd image, taken from the last dimension, is multiplied by each element of the slope.\n if factors.size == data.shape[-1]:\n for t in range(data.shape[-1]):\n if kind == \"slope\":\n data[..., t] = data[..., t] * factors[t]\n elif kind == \"offset\":\n data[..., t] = data[..., t] + factors[t]\n else:\n msg = \"Slope shape {0} and data shape {1} appears to be not compatible\".format(\n factors.shape, data.shape\n )\n raise IOError(msg)\n\n return data\n\n\n# -- nifti affine matrix utils --\n\n\ndef compute_resolution_from_visu_pars(vc_extent, vc_size, vc_frame_thickness):\n \"\"\"\n Resolution parameter is provided as a vector in the 'reco' parameter file. To extract the information from the\n 'visu_pars' only, as some scans can lack the reco file, some computation on its paramteres neesd to be performed.\n :param vc_extent: VisuCoreExtent parameter file from 'visu_pars'.\n :param vc_size: VisuCoreSize parameter file from 'visu_pars'.\n :param vc_frame_thickness: VisuCoreFrameThickness parameter file from 'visu_pars'.\n :return:\n \"\"\"\n\n if len(vc_extent) == len(vc_size):\n resolution = [e / float(s) for e, s in zip(vc_extent, vc_size)]\n else:\n raise IOError\n\n if isinstance(vc_frame_thickness, np.ndarray) or isinstance(\n vc_frame_thickness, list\n ):\n vc_frame_thickness = vc_frame_thickness[0]\n\n if len(vc_extent) == 2:\n resolution += [vc_frame_thickness]\n return resolution\n elif len(vc_extent) == 3:\n return resolution\n else:\n raise IOError\n\n\ndef sanity_check_visu_core_subject_position(vc_subject_position):\n \"\"\"\n The parameter VisuCoreSubjectPosition can be 'Head_Prone' or 'Head_Supine'. Tertium non datur.\n :param vc_subject_position: VisuCoreSubjectPosition from 'visu_pars'\n :return: Raise error if VisuCoreSubjectPosition is not 'Head_Prone' or 'Head_Supine'\n \"\"\"\n if vc_subject_position not in [\"Head_Prone\", \"Head_Supine\"]:\n msg = \"Known cases are 'Head_Prone' or 'Head_Supine' for the parameter 'visu_pars.VisuSubjectPosition.\"\n raise IOError(msg)\n\n\ndef filter_orientation(visu_parse_orientation):\n \"\"\"\n Pre-process the paramter value VisuParseOrientation from the 'visu_pars' paramter file.\n :param visu_parse_orientation: VisuParseOrientation from the 'visu_pars' paramter file.\n :return: re-shaped and rounded VisuParseOrientation parameter.\n \"\"\"\n\n if not np.prod(visu_parse_orientation.shape) == 9:\n # Take the first 9 elements:\n visu_parse_orientation = visu_parse_orientation.flat[:9]\n\n ans = np.around(visu_parse_orientation.reshape([3, 3], order=\"F\"), decimals=4)\n return ans\n\n\ndef pivot(v):\n \"\"\"\n :param v: vector or list\n :return: max in absolute value with original sign or max from origin.\n Corresponds to the main direction for each column of an orientation matrix.\n \"\"\"\n return v[list(abs(v)).index(abs(v).max())]\n\n\ndef compute_affine_from_visu_pars(\n vc_orientation,\n vc_position,\n vc_subject_position,\n resolution,\n frame_body_as_frame_head=False,\n keep_same_det=True,\n consider_subject_position=False,\n):\n \"\"\"\n How the affine is computed (to the understanding acquired so far):\n\n 0) resolution, orientation and translation are provided in separate arrays, we combine them together in a\n standard 4x4 matrix.\n\n 1) We invert the resulting matrix - according to conventions ParaVision (scanner to image frame)\n and DICOM/Nifti (image to scanner frame).\n\n 2) impose the signs of the first two columns (pivots) to be negative, and the third to be be positive.\n - according to the fact that the provided transformation is DICOM-like (LPS) instead of NIFTI like (RAS)\n (Left/Right, Anterior/Posterior, Inferior/Superior).\n\n -------- optional changes ----------\n\n 3) frame_body_as_frame_head: Switching the last 2 columns of the rotational part, no matter the value of\n VisuCorePosition - According to the fact we are dealing with quadrupeds and not with humans,\n we need to switch the Anterior-Posterior with the Inferior-Superior direction.\n Set frame_body_as_frame_head=True to set the biped orientation.\n\n 4) consider_subject_position: This can be 'head_prone' or 'head_supine'.\n Reason why sometimes this must be considered for a correct\n orientation and must be considered dis-jointly with frame_body_as_frame_head, is that this parameter is sometimes\n tuned to voluntarily switch from radiological to neurological coordinate systems.\n If the subject is Prone and the technician wants to have the coordinates in neurological he/she can consciously\n set the variable vc_subject_position to 'Head_Supine', even if the subject is not supine.\n\n 5) keep_same_det: Finally, for safety, we can impose the same determinant as the input matrix.\n\n (If there is any b-vectors list, this is modified accordingly).\n\n :param vc_orientation: visu core orientation parameter.\n :param vc_position: visu core position parameter. - corresponds to the translational part of the matrix.\n :param vc_subject_position: 'Head_Prone' or 'Head_Supine'. If head supine and if consider_subject_position is True\n it invert the direction of the axis anterior-posterior. - do not confuse subject_position with positon (read this\n last as 'translation').\n :param resolution: resolution of the image, output of compute_resolution_from_visu_pars in the same module.\n :param frame_body_as_frame_head: [False] to parametrise the difference between monkeys [True] and rats [False].\n :param keep_same_det: in case you want the determinant to be the same as the input one. Consider it in particular\n if frame_body_as_frame_head is set to False, and according to the choice of consider_subject_position.\n :param consider_subject_position: [False] The reason why sometimes this must be considered for a correct\n orientation and sometimes must not, is that this parameter is tuned to voluntarily switch from radiological\n to neurological coordinate systems. If the subject is Prone and the technician wants to have the coordinates\n in neurological he/she can consciously set the variable vc_subject_position to 'Head_Supine'.\n :return: final affine (qform) transformation according to the nifti convention\n\n NOTE: we are assuming that the angles parametrisation is the same for the input and the output.\n We hope this is the case as we do not have any mean to confirm that. The fslreorient2std from FSL\n should be applied afterwards to all the images (after DWI analysis if any).\n \"\"\"\n\n sanity_check_visu_core_subject_position(vc_subject_position)\n vc_orientation = filter_orientation(vc_orientation)\n\n # 0) integrate resolution with the orientation and add the translation in the projective coordinates:\n\n result = np.eye(4, dtype=np.float32)\n result[0:3, 0:3] = vc_orientation\n result[0:3, 3] = vc_position\n\n # 1) Invert the orientation matrix, according to nifti convention and Bruker manual.\n # Round the decimals to avoid precision problems. Check if determinant makes sense.\n result = np.round(np.linalg.inv(result), decimals=4)\n result_det = np.linalg.det(result)\n if result_det == 0:\n raise IOError(\"Orientation determinant is 0. Cannot grasp this dataset.\")\n\n # 2-3) impose pivot first column negative, second column negative, third column positive\n result_orientation = result[:3, :3]\n\n result_orientation = result_orientation.dot(\n np.array([[1, 0, 0], [0, 0, 1], [0, 1, 0]])\n )\n if frame_body_as_frame_head: # from SAR to ASL\n result_orientation = result_orientation.dot(\n np.array([[0, -1, 0], [1, 0, 0], [0, 0, 1]])\n )\n\n if pivot(result_orientation[:, 0]) > 0:\n result_orientation[:, 0] = -1 * result_orientation[:, 0]\n if pivot(result_orientation[:, 1]) > 0:\n result_orientation[:, 1] = -1 * result_orientation[:, 1]\n if pivot(result_orientation[:, 2]) < 0:\n result_orientation[:, 2] = -1 * result_orientation[:, 2]\n\n result_orientation = result_orientation.dot(np.diag(resolution))\n\n result[:3, :3] = result_orientation\n\n # 4) - optional\n if consider_subject_position:\n if vc_subject_position == \"Head_Prone\":\n result[1, :] = -1 * result[1, :]\n # 5) - optional\n if keep_same_det:\n if (np.linalg.det(result) < 0 < result_det) or (\n np.linalg.det(result) > 0 > result_det\n ):\n result[0, :3] = -1 * result[0, :3]\n\n return result\n\n\n# --- b-vectors utils ---\n\n\ndef obtain_b_vectors_orient_matrix(\n vc_orientation,\n vc_subject_position,\n frame_body_as_frame_head=False,\n keep_same_det=True,\n consider_subject_position=False,\n):\n\n \"\"\"\n See _utils.compute_affine_from_visu_pars help for the same input parameters.\n :param vc_orientation: VisuCoreOrientation parameter file\n :param vc_subject_position: VisuCoreSubjectPosition parameter file\n :param frame_body_as_frame_head:\n :param keep_same_det:\n :param consider_subject_position:\n :return:\n \"\"\"\n resolution = np.array([1, 1, 1])\n translation = np.array([0, 0, 0])\n\n aff = compute_affine_from_visu_pars(\n vc_orientation,\n translation,\n vc_subject_position,\n resolution,\n frame_body_as_frame_head=frame_body_as_frame_head,\n keep_same_det=keep_same_det,\n consider_subject_position=consider_subject_position,\n )\n\n return np.copy(aff[:3, :3])\n\n\ndef normalise_b_vect(b_vect, remove_nan=True):\n \"\"\"\n Normalisation of the b_vector matrix (dim : num b-vectors x 3)\n :param b_vect: the b_vector matrix (dim : num b-vectors x 3)\n :param remove_nan: remove nan if appears in the b-vector matrix, applying np.nan_to_num.\n :return: normalised b-vectors.\n \"\"\"\n\n b_vect_normalised = np.zeros_like(b_vect)\n norms = np.linalg.norm(b_vect, axis=1)\n\n for r in range(b_vect.shape[0]):\n if norms[r] < 10e-5:\n b_vect_normalised[r, :] = np.nan\n else:\n b_vect_normalised[r, :] = (1 / float(norms[r])) * b_vect[r, :]\n\n if remove_nan:\n b_vect_normalised = np.nan_to_num(b_vect_normalised)\n\n return b_vect_normalised\n\n\ndef apply_reorientation_to_b_vects(reorientation_matrix, row_b_vectors_in_rows):\n \"\"\"\n :param reorientation_matrix: a 3x3 matrix representing a reorientation in the 3D space:\n Typically with det = 1 or -1.\n a b c\n d e f\n g h i\n\n :param row_b_vectors_in_rows:\n A nx3 matrix where n row-major b-vectors (v1, v2, v3, v4, ...) are aligned in rows\n v1_1 v1_2 v1_3\n v2_1 v2_2 v2_3\n v3_1 v3_2 v3_3\n v4_1 v4_2 v4_3\n ...\n\n :return:\n An nx3 matrix where each row is the corresponding b-vector multiplied by the same matrix reorientation_matrix:\n a.v1_1 + b.v1_2 + c.v1_3 + d.v1_1 + e.v1_2 + f.v1_3 + g.v1_1 + h.v1_2 + i.v1_3\n a.v2_1 + b.v2_2 + c.v2_3 + d.v2_1 + e.v2_2 + f.v2_3 + g.v2_1 + h.v2_2 + i.v2_3\n a.v3_1 + b.v3_2 + c.v3_3 + d.v3_1 + e.v3_2 + f.v3_3 + g.v3_1 + h.v3_2 + i.v3_3\n a.v4_1 + b.v4_2 + c.v4_3 + d.v4_1 + e.v4_2 + f.v4_3 + g.v4_1 + h.v4_2 + i.v4_3\n ...\n\n \"\"\"\n b_vectors_in_column_reoriented = np.einsum(\n \"ij, kj -> ki\", reorientation_matrix, row_b_vectors_in_rows\n )\n return b_vectors_in_column_reoriented\n\n\n# -- nibabel-related utils --\n\n\ndef set_new_data(image, new_data, new_dtype=None, remove_nan=True):\n \"\"\"\n From a nibabel image and a numpy array it creates a new image with\n the same header of the image and the new_data as its data.\n :param image: nibabel image\n :param new_data: numpy array\n :param new_dtype:\n :param remove_nan:\n :return: nibabel image\n \"\"\"\n if remove_nan:\n new_data = np.nan_to_num(new_data)\n\n # if nifty1\n if image.header[\"sizeof_hdr\"] == 348:\n new_image = nib.Nifti1Image(new_data, image.affine, header=image.header)\n # if nifty2\n elif image.header[\"sizeof_hdr\"] == 540:\n new_image = nib.Nifti2Image(new_data, image.affine, header=image.header)\n else:\n raise IOError(\"Input image header problem\")\n\n # update data type:\n if new_dtype is None:\n new_image.set_data_dtype(new_data.dtype)\n else:\n new_image.set_data_dtype(new_dtype)\n\n return new_image\n\n\ndef path_contains_whitespace(*args):\n\n if re.search(\"\\\\s+\", os.path.join(*args)):\n return True\n else:\n return False\n" ]
[ [ "numpy.zeros_like", "numpy.array", "numpy.linalg.norm", "numpy.nan_to_num", "numpy.copy", "numpy.linalg.det", "numpy.eye", "numpy.prod", "numpy.einsum", "numpy.diag", "numpy.linalg.inv" ] ]
JinhuaSu/xview2_1st_place_solution
[ "6c653f2779d1780d70c2bb94fa41a3430e20ac3b" ]
[ "train_src/train_teacher_building.py" ]
[ "import os\nos.environ[\"MKL_NUM_THREADS\"] = \"1\" \nos.environ[\"NUMEXPR_NUM_THREADS\"] = \"1\" \nos.environ[\"OMP_NUM_THREADS\"] = \"1\" \n\nfrom os import path, makedirs, listdir\nimport sys\nimport numpy as np\nnp.random.seed(1)\nimport random\nrandom.seed(1)\n\nimport torch\nfrom torch import nn\nfrom torch.backends import cudnn\nfrom torch.utils.data import Dataset\nfrom torch.utils.data import DataLoader\nimport torch.optim.lr_scheduler as lr_scheduler\n\nfrom apex import amp\n\nfrom util.adamw import AdamW\nfrom util.losses import dice_round, ComboLoss\n\nimport pandas as pd\nfrom tqdm import tqdm\nimport timeit\nimport cv2\n\nfrom zoo.models import SeResNext50_Unet_Loc\n\nfrom imgaug import augmenters as iaa\n\nfrom util.utils import *\n\nfrom sklearn.model_selection import train_test_split\n\nfrom sklearn.metrics import accuracy_score\n\nimport gc\n\ncv2.setNumThreads(0)\ncv2.ocl.setUseOpenCL(False)\n\ntrain_dirs = ['../data/train', '../data/tier3']\n\nmodels_folder = '../weights'\n\ninput_shape = (512, 512)\n\n\nall_files = []\nfor d in train_dirs:\n for f in sorted(listdir(path.join(d, 'images'))):\n if '_pre_disaster.png' in f:\n all_files.append(path.join(d, 'images', f))\n\n\nclass TrainData(Dataset):\n def __init__(self, train_idxs):\n super().__init__()\n self.train_idxs = train_idxs\n self.elastic = iaa.ElasticTransformation(alpha=(0.25, 1.2), sigma=0.2)\n\n def __len__(self):\n return len(self.train_idxs)\n\n def __getitem__(self, idx):\n _idx = self.train_idxs[idx]\n\n fn = all_files[_idx]\n\n img = cv2.imread(fn, cv2.IMREAD_COLOR)\n\n if random.random() > 0.985:\n img = cv2.imread(fn.replace('_pre_disaster', '_post_disaster'), cv2.IMREAD_COLOR)\n\n msk0 = cv2.imread(fn.replace('/images/', '/masks/'), cv2.IMREAD_UNCHANGED)\n\n if random.random() > 0.5:\n img = img[::-1, ...]\n msk0 = msk0[::-1, ...]\n\n if random.random() > 0.05:\n rot = random.randrange(4)\n if rot > 0:\n img = np.rot90(img, k=rot)\n msk0 = np.rot90(msk0, k=rot)\n\n if random.random() > 0.9:\n shift_pnt = (random.randint(-320, 320), random.randint(-320, 320))\n img = shift_image(img, shift_pnt)\n msk0 = shift_image(msk0, shift_pnt)\n \n if random.random() > 0.9:\n rot_pnt = (img.shape[0] // 2 + random.randint(-320, 320), img.shape[1] // 2 + random.randint(-320, 320))\n scale = 0.9 + random.random() * 0.2\n angle = random.randint(0, 20) - 10\n if (angle != 0) or (scale != 1):\n img = rotate_image(img, angle, scale, rot_pnt)\n msk0 = rotate_image(msk0, angle, scale, rot_pnt)\n\n crop_size = input_shape[0]\n if random.random() > 0.3:\n crop_size = random.randint(int(input_shape[0] / 1.1), int(input_shape[0] / 0.9))\n\n bst_x0 = random.randint(0, img.shape[1] - crop_size)\n bst_y0 = random.randint(0, img.shape[0] - crop_size)\n bst_sc = -1\n try_cnt = random.randint(1, 5)\n for i in range(try_cnt):\n x0 = random.randint(0, img.shape[1] - crop_size)\n y0 = random.randint(0, img.shape[0] - crop_size)\n _sc = msk0[y0:y0+crop_size, x0:x0+crop_size].sum()\n if _sc > bst_sc:\n bst_sc = _sc\n bst_x0 = x0\n bst_y0 = y0\n x0 = bst_x0\n y0 = bst_y0\n img = img[y0:y0+crop_size, x0:x0+crop_size, :]\n msk0 = msk0[y0:y0+crop_size, x0:x0+crop_size]\n\n if crop_size != input_shape[0]:\n img = cv2.resize(img, input_shape, interpolation=cv2.INTER_LINEAR)\n msk0 = cv2.resize(msk0, input_shape, interpolation=cv2.INTER_LINEAR)\n\n if random.random() > 0.99:\n img = shift_channels(img, random.randint(-5, 5), random.randint(-5, 5), random.randint(-5, 5))\n\n if random.random() > 0.99:\n img = change_hsv(img, random.randint(-5, 5), random.randint(-5, 5), random.randint(-5, 5))\n\n if random.random() > 0.99:\n if random.random() > 0.99:\n img = clahe(img)\n elif random.random() > 0.99:\n img = gauss_noise(img)\n elif random.random() > 0.99:\n img = cv2.blur(img, (3, 3))\n elif random.random() > 0.99:\n if random.random() > 0.99:\n img = saturation(img, 0.9 + random.random() * 0.2)\n elif random.random() > 0.99:\n img = brightness(img, 0.9 + random.random() * 0.2)\n elif random.random() > 0.99:\n img = contrast(img, 0.9 + random.random() * 0.2)\n \n if random.random() > 0.999:\n el_det = self.elastic.to_deterministic()\n img = el_det.augment_image(img)\n\n msk = msk0[..., np.newaxis]\n\n msk = (msk > 127) * 1\n\n img = preprocess_inputs(img)\n\n img = torch.from_numpy(img.transpose((2, 0, 1))).float()\n msk = torch.from_numpy(msk.transpose((2, 0, 1))).long()\n\n sample = {'img': img, 'msk': msk, 'fn': fn}\n return sample\n\n\n \nclass ValData(Dataset):\n def __init__(self, image_idxs):\n super().__init__()\n self.image_idxs = image_idxs\n\n def __len__(self):\n return len(self.image_idxs)\n\n def __getitem__(self, idx):\n _idx = self.image_idxs[idx]\n\n fn = all_files[_idx]\n\n img = cv2.imread(fn, cv2.IMREAD_COLOR)\n\n msk0 = cv2.imread(fn.replace('/images/', '/masks/'), cv2.IMREAD_UNCHANGED)\n\n msk = msk0[..., np.newaxis]\n\n msk = (msk > 127) * 1\n\n img = preprocess_inputs(img)\n\n img = torch.from_numpy(img.transpose((2, 0, 1))).float()\n msk = torch.from_numpy(msk.transpose((2, 0, 1))).long()\n\n sample = {'img': img, 'msk': msk, 'fn': fn}\n return sample\n\n\ndef validate(net, data_loader):\n dices0 = []\n\n _thr = 0.5\n\n with torch.no_grad():\n for i, sample in enumerate(tqdm(data_loader)):\n msks = sample[\"msk\"].numpy()\n imgs = sample[\"img\"].cuda(non_blocking=True)\n \n out = model(imgs)\n\n msk_pred = torch.sigmoid(out[:, 0, ...]).cpu().numpy()\n \n for j in range(msks.shape[0]):\n dices0.append(dice(msks[j, 0], msk_pred[j] > _thr))\n\n d0 = np.mean(dices0)\n\n print(\"Val Dice: {}\".format(d0))\n return d0\n\n\ndef evaluate_val(data_val, best_score, model, snapshot_name, current_epoch):\n model = model.eval()\n d = validate(model, data_loader=data_val)\n\n if d > best_score:\n torch.save({\n 'epoch': current_epoch + 1,\n 'state_dict': model.state_dict(),\n 'best_score': d,\n }, path.join(models_folder, snapshot_name + '_best'))\n best_score = d\n\n print(\"score: {}\\tscore_best: {}\".format(d, best_score))\n return best_score\n\n\n\ndef train_epoch(current_epoch, seg_loss, model, optimizer, scheduler, train_data_loader):\n losses = AverageMeter()\n\n dices = AverageMeter()\n\n iterator = tqdm(train_data_loader)\n model.train()\n for i, sample in enumerate(iterator):\n imgs = sample[\"img\"].cuda(non_blocking=True)\n msks = sample[\"msk\"].cuda(non_blocking=True)\n \n out = model(imgs)\n\n loss = seg_loss(out, msks)\n\n with torch.no_grad():\n _probs = torch.sigmoid(out[:, 0, ...])\n dice_sc = 1 - dice_round(_probs, msks[:, 0, ...])\n\n losses.update(loss.item(), imgs.size(0))\n\n dices.update(dice_sc, imgs.size(0))\n\n iterator.set_description(\n \"epoch: {}; lr {:.7f}; Loss {loss.val:.4f} ({loss.avg:.4f}); Dice {dice.val:.4f} ({dice.avg:.4f})\".format(\n current_epoch, scheduler.get_lr()[-1], loss=losses, dice=dices))\n \n optimizer.zero_grad()\n # loss.backward()\n with amp.scale_loss(loss, optimizer) as scaled_loss:\n scaled_loss.backward()\n torch.nn.utils.clip_grad_norm_(amp.master_params(optimizer), 1.1)\n optimizer.step()\n\n scheduler.step(current_epoch)\n\n print(\"epoch: {}; lr {:.7f}; Loss {loss.avg:.4f}; Dice {dice.avg:.4f}\".format(\n current_epoch, scheduler.get_lr()[-1], loss=losses, dice=dices))\n\n\n\nif __name__ == '__main__':\n t0 = timeit.default_timer()\n\n makedirs(models_folder, exist_ok=True)\n \n seed = int(sys.argv[1]) \n vis_dev = sys.argv[2]\n\n # os.environ['CUDA_DEVICE_ORDER'] = 'PCI_BUS_ID'\n os.environ[\"CUDA_VISIBLE_DEVICES\"] = vis_dev\n\n cudnn.benchmark = True\n\n batch_size = 15\n val_batch_size = 4\n\n snapshot_name = 'res50_loc_{}_0'.format(seed)\n\n train_idxs, val_idxs = train_test_split(np.arange(len(all_files)), test_size=0.1, random_state=seed)\n\n np.random.seed(seed+123)\n random.seed(seed+123)\n\n steps_per_epoch = len(train_idxs) // batch_size\n validation_steps = len(val_idxs) // val_batch_size\n\n print('steps_per_epoch', steps_per_epoch, 'validation_steps', validation_steps)\n\n data_train = TrainData(train_idxs)\n val_train = ValData(val_idxs)\n\n train_data_loader = DataLoader(data_train, batch_size=batch_size, num_workers=5, shuffle=True, pin_memory=False, drop_last=True)\n val_data_loader = DataLoader(val_train, batch_size=val_batch_size, num_workers=5, shuffle=False, pin_memory=False)\n\n model = SeResNext50_Unet_Loc().cuda()\n\n params = model.parameters()\n\n optimizer = AdamW(params, lr=0.00015, weight_decay=1e-6)\n \n model, optimizer = amp.initialize(model, optimizer, opt_level=\"O1\")\n\n scheduler = lr_scheduler.MultiStepLR(optimizer, milestones=[15, 29, 43, 53, 65, 80, 90, 100, 110, 130, 150, 170, 180, 190], gamma=0.5)\n\n seg_loss = ComboLoss({'dice': 1.0, 'focal': 10.0}, per_image=False).cuda()\n\n best_score = 0\n _cnt = -1\n torch.cuda.empty_cache()\n for epoch in range(150):\n train_epoch(epoch, seg_loss, model, optimizer, scheduler, train_data_loader)\n if epoch % 1 == 0:\n _cnt += 1\n torch.cuda.empty_cache()\n best_score = evaluate_val(val_data_loader, best_score, model, snapshot_name, epoch)\n\n elapsed = timeit.default_timer() - t0\n print('Time: {:.3f} min'.format(elapsed / 60))" ]
[ [ "numpy.rot90", "torch.sigmoid", "numpy.random.seed", "torch.no_grad", "numpy.mean", "torch.optim.lr_scheduler.MultiStepLR", "torch.cuda.empty_cache", "torch.utils.data.DataLoader" ] ]
YXZhai97/Area-Coverage-Path-Planning
[ "3be475d703841432744c522b65f6d4d3dde647a3" ]
[ "tests/test.py" ]
[ "# import sample\n#\n#\n# agent1 = sample.AlphaAgent()\n# print(agent1.id)\n# print(sample.AlphaAgent.number_of_agent)\n# print(agent1.number_of_agent)\n#\n# agent2 = sample.AlphaAgent()\n# print(agent2.id)\n# print(sample.AlphaAgent.number_of_agent)\n# print(agent2.number_of_agent)\n#\n# # test floodFill\n# image = [\n# [0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n# [0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n# [0, 0, 1, 1, 1, 1, 0, 0, 0, 0],\n# [0, 0, 1, 0, 0, 1, 0, 0, 0, 0],\n# [0, 0, 1, 0, 0, 1, 0, 0, 0, 0],\n# [0, 0, 1, 0, 0, 1, 0, 0, 0, 0],\n# [0, 0, 1, 0, 0, 1, 0, 0, 0, 0],\n# [0, 0, 1, 1, 1, 1, 0, 0, 0, 0],\n# [0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n# [0, 0, 0, 1, 1, 1, 0, 0, 1, 1],\n# [0, 0, 0, 1, 0, 0, 1, 0, 1, 1],\n# [0, 0, 0, 1, 0, 0, 1, 1, 1, 1],\n# [0, 0, 0, 1, 1, 1, 1, 0, 0, 0],\n# ]\n#\n# print(sample.floodFill(image, 3, 3, 1))\nfrom math import *\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nimport time\n# a=np.zeros((4,6,6))\n# a[1,3,5]=2\n# print(a[1])\n# b=np.zeros((10,4))\n# b[0,:2]=[1,2]\n# print(b)\n# n=np.array(b)[0,:2]\n# print(n)\n\n\nfig1=plt.figure('Figure 1 with subplots', figsize=(10,4))\nsubfig1=fig1.add_subplot(121)\nsubfig1.plot([1,2,3,4],[5,6,7,8])\nsubfig2=fig1.add_subplot(122)\nsubfig2.scatter([5,6,7,8],[1,2,3,4])\n\nfig2=plt.figure('Figure 2 with subplots', figsize=(10,4))\nsubfig1=fig2.add_subplot(121)\nsubfig1.scatter([1,2,3,4],[5,6,7,8])\n\ngrid=np.zeros((4,4))\ngrid[:,1]=1\nprint(grid)\n\na_k=np.array([1,2])\na=np.array([1,1])\nb=np.array([1,4])\nc=[1,2,3]\nc_n=np.array(b)\nprint(c)\n\nnorm=np.linalg.norm(a-b)\nprint(norm.shape)\nprint(a_k.shape)\nprint(a_k/norm)\n\nneighbour=[]\nn1=[1,2]\nn2=[3,4]\nneighbour.append(n1)\nneighbour.append(n2)\nprint(neighbour)\nfor n in neighbour:\n n.append(0)\n print(n)\n\nneighbour=[[1,2,3,4]]\nprint(len(neighbour))\n\nnn=np.zeros(0)\nprint(nn)\n\nnnn=np.array([1,2,3])\nprint(2*nnn)\n\na=np.array([[2,2,3,4],[5,6,7,8]])\nb=[]\nfor aa in a:\n aa = np.append(aa, [1, 2])\n print(aa)\n b.append(aa)\n\nprint(a)\nprint(b)\nprint(len(b))\nprint(b[1])\n\nfig1 = plt.figure('Figure1',figsize = (6,4))\nfig1.add_subplot(221)\nplt.plot([1,2,3,4],[5,6,7,8])\nplt.xlabel(\"size\")\nplt.ylabel(\"price \")\nrobot=0.1\nt=1\nplt.title(\"the state of robot %1.1f\" %robot +\" and %i\" %t)\nfig1.add_subplot(222)\nplt.plot([1,2,3,4],[5,6,7,8])\nplt.xlabel(\"size2\")\nplt.ylabel(\"price2 \")\n\n# fix the random value generator\nn1=np.random.uniform(1,3)\nprint(n1)\nn1=np.random.uniform(1,3)\nprint(n1)\n\nnp.random.seed(1)\nn2=np.random.uniform(1,3)\nnp.random.seed(1)\nn3=np.random.uniform(1,3)\nprint(n2,n3)\nnp.random.seed(1)\nn2=np.random.uniform(1,3)\nnp.random.seed(1)\nn3=np.random.uniform(1,3)\nprint(n2,n3)\n\n# flip the matrix upside down\nm1=np.array([[1,2,3,4],[6,7,8,9]])\nprint(np.flipud(m1))\nprint(m1)\nmatrix=np.array([[1,2,3,4,4,5,7],[5,6,7,8,6,7,8],[1,2,3,4,5,6,6]])\n# test np.sum\nstart = time.time()\nprint(np.sum(matrix))\nend=time.time()\nprint(end-start)\n\nstart = time.time()\nsum=0\nfor i in range(len(matrix)):\n for j in range(len(matrix[0])):\n sum+=matrix[i,j]\nprint(sum)\nend=time.time()\nprint(end-start)\n\n\nmatrix1=np.array([[1,0,1,1,0],[1,1,0,0,1]])\nmatrix2=np.array([[1,0,0,0,0],[0,0,1,1,1]])\n\nm=1*np.logical_or(matrix2,matrix1)\nm1=np.logical_or(matrix2,matrix1)\nm1.astype(int)\nprint(m)\nprint(m1)\n\n\ndef get_angle(p1, p2):\n '''\n Args:\n p1: [x1,y1] of the first point\n p2: [x2,y2] of the second point\n Returns:\n angle of the line\n '''\n dx = p2[0] - p1[0]\n dy = p2[1] - p1[1]\n angle = atan2(dy, dx) * 180 / pi\n\n return [angle, p2[0], p2[1]]\nprint(get_angle([1,2],[8,3]))\na=[]\na.append(get_angle([1,2],[8,3]))\na.append(get_angle([1,2],[4,3]))\nprint(a)\nb=3.8\nif b in range(2,4):\n print(b)\n\nnums=[1,2,3,4,5,6]\nmaxn=max(nums)\nmax_index=nums.index(maxn)\nprint(maxn,max_index)\nm=[[1,2,3,4],[6,7,8,9],[3,4,5,6]]\nprint(m[1][:])\nmm=np.array([])\nprint(mm)\n\nnnn=np.array([1,1,1,1])\nnnn=np.vstack((nnn,[1,2,3,4]))\nnnn=np.vstack((nnn,[1,2,3,4]))\nprint(nnn[1:])\n\nmatrixs=[]\nmmm=np.vstack((nnn,[1,2,3,4]))\nmatrixs.append(nnn)\nmatrixs.append(mmm)\nprint(matrixs)\nfor m in matrixs:\n print(m[:,0])\n\nvalues = np.array([3,6,1,5])\nindex_min = np.argmin(values)\nprint(index_min)\n\nprint(cos(180/180*pi))\n\n\n\n\nnums=[[1,2,3,4,4],[4,3,4,3,2]]\nprint(len(nums[0]))\n\nnum_iter=10\nnn=np.array([1,3])\ncircle_scanned=np.zeros((num_iter,2))\n\n\np1=np.array([1,2])\nx,y=p1\nprint(x,y)\nprint(1%2)\nprint(4%2)\n\nend_points=np.array([[1,2],[3,4]])\nplt.scatter(end_points[:,0],end_points[:,1])\n\na=0\nif a==0:\n a=1\n print(\"inside if \")\nelif a==1:\n print(\"inside elif\")\nelse:\n print(\"inside else\")\n\n\nx=13.5\nn_x=x%2\nn=x//2\nprint(n_x,n)\n\nfollow=np.zeros(2)\n\nalist=np.array([[1,2],[3,4],[1,2],[5,6],[0,0],[0,0],[0,0]])\nblist=np.array([[1,1],[3,3],[3,4]])\nprint (list(map(list,set(map(tuple,alist)))))\nfollow=np.vstack((follow,alist))\nfollow=np.vstack((follow,blist))\nfollow=list(map(list,set(map(tuple,follow))))\nfollow.remove([0,0])\nprint(follow)\nfor it in follow:\n print(it)\n\n\n\n\ndef on_segment(p, q, r):\n if r[0] <= max(p[0], q[0]) and r[0] >= min(p[0], q[0]) and r[1] <= max(p[1], q[1]) and r[1] >= min(p[1], q[1]):\n return True\n return False\n\ndef orientation(p, q, r):\n val = ((q[1] - p[1]) * (r[0] - q[0])) - ((q[0] - p[0]) * (r[1] - q[1]))\n if val == 0:\n return 0\n return 1 if val > 0 else -1\n\ndef intersects(seg1, seg2):\n p1, q1 = seg1\n p2, q2 = seg2\n\n o1 = orientation(p1, q1, p2)\n o2 = orientation(p1, q1, q2)\n o3 = orientation(p2, q2, p1)\n o4 = orientation(p2, q2, q1)\n if o1 != o2 and o3 != o4:\n return True\n if o1 == 0 and on_segment(p1, q1, p2): return True\n if o2 == 0 and on_segment(p1, q1, q2): return True\n if o3 == 0 and on_segment(p2, q2, p1): return True\n if o4 == 0 and on_segment(p2, q2, q1): return True\n\n return False\n\nsegment_one = ((25, 15), (25, 20))\nsegment_two = ((26, 19), (28, 20))\nprint(intersects(segment_one, segment_two))\ncheck=np.zeros((2,2))\nis_all_zero = np.all((check == 0))\nif is_all_zero:\n print(\"yes\")\nelse:\n print(\"no\")\n\na=1\nfor i in range(4):\n if i==1:\n print(i)\n a+=1\n else:\n print(\"else\")\n if a==2:\n print(a)\na=1\nb=a\nprint(a)\nprint(b)\nb=-1\nprint(a)\nprint(b)\nprint(id(a))\nprint(id(b))\na=0.5\nprint(int(a//1))\n\nfollowed=[[1,2],[3,4],[0,8],[0,0]]\nfollowed.remove([0,0])\nprint(followed)\nprint(max([sub[1] for sub in followed]))" ]
[ [ "numpy.logical_or", "numpy.array", "numpy.linalg.norm", "numpy.zeros", "numpy.argmin", "numpy.random.seed", "matplotlib.pyplot.xlabel", "numpy.sum", "matplotlib.pyplot.plot", "matplotlib.pyplot.title", "matplotlib.pyplot.figure", "numpy.flipud", "numpy.random.uniform", "matplotlib.pyplot.ylabel", "numpy.append", "numpy.all", "matplotlib.pyplot.scatter", "numpy.vstack" ] ]
justinfocus12/SHORT
[ "75ade25953373bcd04f63e688393fee2df0e021a" ]
[ "oscillating_doublewell_model.py" ]
[ "# This is where the oscillating double well potential is specified\nimport numpy as np\nfrom os import mkdir\nfrom scipy.interpolate import interp1d\nfrom os.path import join,exists\nfrom numpy import save,load\nfrom model_obj import Model\nimport helper\n\nclass OscillatingDoubleWellModel(Model):\n def __init__(self,state_dim=2,tau=0.25,kappa=0.0,lam=0.5,sigma=1.0):\n dt_sim = 0.001\n self.q = {\n 'tau': tau,\n 'kappa': kappa,\n 'sigma': sigma,\n 'lam': lam, # strngth of oscillation\n }\n noise_rank = state_dim\n tpt_obs_dim = 2\n parallel_sim_limit = 50000\n nshort_per_file_limit = 100000\n super().__init__(state_dim,noise_rank,dt_sim,tpt_obs_dim,parallel_sim_limit,nshort_per_file_limit)\n # Find the orbits\n period = 1.0\n x0_list = self.approximate_fixed_points()\n print(\"x0_list = \\n{}\".format(x0_list))\n self.find_stable_orbits(x0_list,period)\n return\n def drift_fun(self,x):\n q = self.q\n Nx = len(x)\n b = np.zeros((Nx,self.state_dim))\n b[:,0] = 1/q['tau']*(x[:,0] - x[:,0]**3 + q['kappa'] + q['lam']*np.cos(2*np.pi*x[:,1]))\n b[:,1] = 1.0\n #for d in range(1,self.state_dim):\n # b[:,d] = -1/q['tau']*x[:,d]\n return b\n def drift_jacobian_fun(self,x):\n # single x\n bx = np.zeros((self.state_dim,self.state_dim))\n bx[0,0] = 1/q['tau']*(1 - 3*x[0]**2)\n bx[0,1] = 1/q['tau']*(-2*np.pi*q['lam']*np.sin(2*np.pi*x[:,1]))\n return bx\n def diffusion_fun(self,x):\n q = self.q\n Nx = len(x)\n wdot = np.random.randn(self.state_dim*Nx).reshape((self.state_dim,Nx))\n sig = q['sigma']*np.array([[1,0],[0,0]])\n sw = (sig.dot(wdot)).T\n return sw\n def diffusion_mat(self,x):\n return q['sigma']*np.array([[1,0],[0,0]])\n def tpt_observables(self,x):\n cvx = x\n cvx[:,1] = cvx[:,1] % 1.0 # Is this enough to periodize? \n if cvx.shape[1] != self.tpt_obs_dim:\n sys.exit(\"DOH! tpt_observables output does not match expected dimension\")\n return x\n def create_tpt_damage_functions(self):\n # A dictionary of lambda functions of interest to be integrated along reactive trajectories (or just forward-reactive or backward-reactive).\n q = self.q\n self.dam_dict = {\n 'one': {\n 'pay': lambda x: np.ones(len(x)),\n 'name_fwd': \"T+\", #r\"$\\tau^+$\",\n 'name_bwd': \"T-\", #r\"$\\tau^-$\",\n 'name_full': \"T\", #r\"$\\tau^+-\\tau^-$\",\n 'abb_fwd': 't+',\n 'abb_bwd': 't-',\n 'abb_full': 'tfull',\n 'units': 1.0,\n 'unit_symbol': \"\",\n 'logscale': True,\n },\n 'potential': {\n 'pay': lambda x: 1/q['tau']*(-x[:,0]**2/2 + x[:,0]**4/4 - (q['kappa'] + q['lam']*np.cos(2*np.pi*x[:,1]))*x[:,0]),\n 'name_fwd': \"V+\", #r\"$\\int_0^{\\tau^+}V(X(r))dr$\",\n 'name_bwd': \"V-\", #r\"$\\int_{\\tau^-}^0V(X(r))dr$\",\n 'name_full': \"V\", #r\"$\\int_{\\tau^-}^{\\tau^+}V(X(r))dr$\",\n 'abb_fwd': 'V+',\n 'abb_bwd': 'V-',\n 'abb_full': 'Vfull',\n 'units': 1.0,\n 'unit_symbol': '',\n 'logscale': False,\n },\n }\n return\n def approximate_fixed_points(self):\n xst_approx = np.zeros((2,self.state_dim))\n xst_approx[0,0] = -1.0\n xst_approx[1,0] = 1.0\n return xst_approx\n def find_stable_orbits(self,x0_list,period):\n # One starts close to (1,0) and the other to (-1,0)\n #ta,xa = rk4(drift,np.array([[-1.0,0.0]]),0,10,0.001,q)\n #tb,xb = rk4(drift,np.array([[1.0,0.0]]),0,10,0.001,q)\n self.period = period\n print(\"x0_list.shape = {}\".format(x0_list.shape))\n tmax = 100\n Nt = int(tmax/self.dt_sim) + 1\n tmax = (Nt-1)*self.dt_sim\n t = np.linspace(0,tmax,Nt)\n x = self.integrate_euler_maruyama(x0_list,t,stochastic_flag=False)\n print(\"x.shape = {}\".format(x.shape))\n # Find which orbits end close to (1,0) and (-1,0)\n a_ends = np.where(x[-1,:,0] < 0)[0]\n b_ends = np.where(x[-1,:,0] > 0)[0]\n if len(a_ends) == 0 or len(b_ends) == 0: \n sys.exit(\"PROBLEMO! Only one side found\")\n xa = x[:,a_ends[0],:]\n xb = x[:,b_ends[0],:]\n print(\"xa.shape = {}\".format(xa.shape))\n num_periods = t[-1] // self.period\n t0 = (num_periods-2)*self.period\n t1 = (num_periods-1)*self.period\n ti1 = np.argmin(np.abs(t-(t1+0.1*self.period)))\n ti0 = np.argmin(np.abs(t-(t0-0.1*self.period)))\n print(\"t0 = {}, t1 = {}\".format(t[ti0],t[ti1]))\n alpha0 = interp1d(t[ti0:ti1]-t0,xa[ti0:ti1,0]) #,axis=0)\n self.alpha = lambda t: alpha0(t % self.period)\n beta0 = interp1d(t[ti0:ti1]-t0,xb[ti0:ti1,0]) #,axis=0)\n self.beta = lambda t: beta0(t % self.period)\n return\n def adist(self,cvx):\n # A whole list of (x,t) pairs\n cva = self.alpha(cvx[:,1])\n print(\"In adist. cva: min={}, max={}\".format(np.min(cva),np.max(cva)))\n da = np.maximum(0, cvx[:,0]-cva)\n return da\n def bdist(self,cvx):\n # A whole list of (x,t) pairs\n cvb = self.beta(cvx[:,1])\n print(\"In bdist. cvb: min={}, max={}\".format(np.min(cvb),np.max(cvb)))\n db = np.maximum(0, cvb-cvx[:,0])\n return db\n def set_param_folder(self):\n self.param_foldername = (\"tau{}_kappa{}_lam{}_sigma{}_statedim{}\".format(self.q['tau'],self.q['kappa'],self.q['lam'],self.q['sigma'],self.state_dim)).replace('.','p')\n return\n def regression_features(self,cvx):\n return cvx\n\ndef default_parameters():\n q = {\n 'state_dim': 3,\n 'tau': 0.25,\n 'kappa': 0.00,\n 'sigma': 1.0,\n }\n return q\n\n\n" ]
[ [ "numpy.max", "numpy.array", "scipy.interpolate.interp1d", "numpy.sin", "numpy.zeros", "numpy.random.randn", "numpy.min", "numpy.where", "numpy.abs", "numpy.cos", "numpy.linspace", "numpy.maximum" ] ]
sages-pl/2022-01-pythonsqlalchemy-aptiv
[ "1d6d856608e9dbe25b139e8968c48b7f46753b84" ]
[ "_assignments/numpy/operations/numpy_broadcasting_a.py" ]
[ "\"\"\"\n* Assignment: Numpy Broadcasting Arithmetic\n* Complexity: easy\n* Lines of code: 4 lines\n* Time: 3 min\n\nEnglish:\n 1. Define `a: np.ndarray` with square root of each element in `A`\n 2. Define `b: np.ndarray` with square root of each element in `B`\n 3. Define `c: np.ndarray` with second power (square) of each element in `C`\n 4. Add elements from `a` to `b`\n 5. Multiply the result by `c`\n 6. Run doctests - all must succeed\n\nPolish:\n 1. Zdefiniuj `a: np.ndarray` z pierwiastkiem kwadratowym każdego elementu `A`\n 2. Zdefiniuj `b: np.ndarray` z pierwiastkiem kwadratowym każdego elementu `B`\n 3. Zdefiniu `c: np.ndarray` z drugą potęgą (kwadratem) każdego z elementu w `C`\n 4. Dodaj elementy z `a` do `b`\n 5. Przemnóż wynik przez `c`\n 6. Uruchom doctesty - wszystkie muszą się powieść\n\nTests:\n >>> import sys; sys.tracebacklimit = 0\n\n >>> assert result is not Ellipsis, \\\n 'Assign result to variable: `result`'\n >>> assert type(result) is np.ndarray, \\\n 'Variable `result` has invalid type, expected: np.ndarray'\n\n >>> result\n array([[ 1.41421356, 2.73205081],\n [45.254834 , 0. ]])\n\"\"\"\n\nimport numpy as np\n\n\nA = np.array([[0, 1], [2, 3]], float)\nB = np.array([2, 3], float)\nC = np.array([[1, 1], [4, 0]], float)\n\n# np.ndarray: square root of each element in `A` use np.pow()\na = ...\n\n# np.ndarray: square root of each element in `B` use `**` operator\nb = ...\n\n# np.ndarray: second power (square) of each element in `C` use `**` operator\nc = ...\n\n# np.ndarray: Add elements from `a` to `b` and then multiply by `c`\n# Remember about the operator precedence\nresult = ...\n\n\n" ]
[ [ "numpy.array" ] ]
Jallet/Deconvnet
[ "79f5eb67678547bcedcda2855361c23754f962c7" ]
[ "Deconvnet-keras.py" ]
[ "#!/usr/bin/env python\n#coding=utf-8\n###############################################\n# File Name: DeconvNet2D.py\n# Author: Liang Jiang\n# mail: [email protected]\n# Created Time: Sun 30 Oct 2016 09:52:15 PM CST\n# Description: Code for Deconvnet based on keras\n###############################################\n\nimport argparse\nimport numpy as np\nimport sys\nimport time\nfrom PIL import Image\nfrom keras.layers import (\n Input,\n InputLayer,\n Flatten,\n Activation,\n Dense)\nfrom keras.layers.convolutional import (\n Convolution2D,\n MaxPooling2D)\nfrom keras.activations import *\nfrom keras.models import Model, Sequential\nfrom keras.applications import vgg16, imagenet_utils\nimport keras.backend as K\n\n\nclass DConvolution2D(object):\n '''\n A class to define forward and backward operation on Convolution2D\n '''\n def __init__(self, layer):\n '''\n # Arguments\n layer: an instance of Convolution2D layer, whose configuration \n will be used to initiate DConvolution2D(input_shape, \n output_shape, weights)\n '''\n self.layer = layer\n\n weights = layer.get_weights()\n W = weights[0]\n b = weights[1]\n\n # Set up_func for DConvolution2D\n nb_up_filter = W.shape[0]\n nb_up_row = W.shape[2]\n nb_up_col = W.shape[3]\n input = Input(shape = layer.input_shape[1:])\n output = Convolution2D(\n nb_filter = nb_up_filter, \n nb_row = nb_up_row, \n nb_col = nb_up_col, \n border_mode = 'same',\n weights = [W, b]\n )(input)\n self.up_func = K.function([input, K.learning_phase()], output)\n\n # Flip W horizontally and vertically, \n # and set down_func for DConvolution2D\n W = np.transpose(W, (1, 0, 2, 3))\n W = W[:, :, ::-1, ::-1]\n nb_down_filter = W.shape[0]\n nb_down_row = W.shape[2]\n nb_down_col = W.shape[3]\n b = np.zeros(nb_down_filter)\n input = Input(shape = layer.output_shape[1:])\n output = Convolution2D(\n nb_filter = nb_down_filter, \n nb_row = nb_down_row, \n nb_col = nb_down_col, \n border_mode = 'same',\n weights = [W, b]\n )(input)\n self.down_func = K.function([input, K.learning_phase()], output)\n\n def up(self, data, learning_phase = 0):\n '''\n function to compute Convolution output in forward pass\n # Arguments\n data: Data to be operated in forward pass\n learning_phase: learning_phase of Keras, 1 or 0\n # Returns\n Convolved result\n '''\n self.up_data = self.up_func([data, learning_phase])\n return self.up_data\n\n def down(self, data, learning_phase = 0):\n '''\n function to compute Deconvolution output in backward pass\n # Arguments\n data: Data to be operated in backward pass\n learning_phase: learning_phase of Keras, 1 or 0\n # Returns\n Deconvolved result\n '''\n self.down_data= self.down_func([data, learning_phase])\n return self.down_data\n \n\nclass DDense(object):\n '''\n A class to define forward and backward operation on Dense\n '''\n def __init__(self, layer):\n '''\n # Arguments\n layer: an instance of Dense layer, whose configuration \n will be used to initiate DDense(input_shape, \n output_shape, weights)\n '''\n self.layer = layer\n weights = layer.get_weights()\n W = weights[0]\n b = weights[1]\n \n #Set up_func for DDense\n input = Input(shape = layer.input_shape[1:])\n output = Dense(output_dim = layer.output_shape[1],\n weights = [W, b])(input)\n self.up_func = K.function([input, K.learning_phase()], output)\n \n #Transpose W and set down_func for DDense\n W = W.transpose()\n self.input_shape = layer.input_shape\n self.output_shape = layer.output_shape\n b = np.zeros(self.input_shape[1])\n flipped_weights = [W, b]\n input = Input(shape = self.output_shape[1:])\n output = Dense(\n output_dim = self.input_shape[1], \n weights = flipped_weights)(input)\n self.down_func = K.function([input, K.learning_phase()], output)\n \n\n def up(self, data, learning_phase = 0):\n '''\n function to compute dense output in forward pass\n # Arguments\n data: Data to be operated in forward pass\n learning_phase: learning_phase of Keras, 1 or 0\n # Returns\n Result of dense layer\n '''\n self.up_data = self.up_func([data, learning_phase])\n return self.up_data\n \n def down(self, data, learning_phase = 0):\n '''\n function to compute dense output in backward pass\n # Arguments\n data: Data to be operated in forward pass\n learning_phase: learning_phase of Keras, 1 or 0\n # Returns\n Result of reverse dense layer\n '''\n # data = data - self.bias\n self.down_data = self.down_func([data, learning_phase])\n return self.down_data\n\nclass DPooling(object):\n '''\n A class to define forward and backward operation on Pooling\n '''\n def __init__(self, layer):\n '''\n # Arguments\n layer: an instance of Pooling layer, whose configuration \n will be used to initiate DPooling(input_shape, \n output_shape, weights)\n '''\n self.layer = layer\n self.poolsize = layer.pool_size\n # self.poolsize = layer.poolsize\n \n def up(self, data, learning_phase = 0):\n '''\n function to compute pooling output in forward pass\n # Arguments\n data: Data to be operated in forward pass\n learning_phase: learning_phase of Keras, 1 or 0\n # Returns\n Pooled result\n '''\n [self.up_data, self.switch] = \\\n self.__max_pooling_with_switch(data, self.poolsize)\n return self.up_data\n\n def down(self, data, learning_phase = 0):\n '''\n function to compute unpooling output in backward pass\n # Arguments\n data: Data to be operated in forward pass\n learning_phase: learning_phase of Keras, 1 or 0\n # Returns\n Unpooled result\n '''\n self.down_data = self.__max_unpooling_with_switch(data, self.switch)\n return self.down_data\n \n def __max_pooling_with_switch(self, input, poolsize):\n '''\n Compute pooling output and switch in forward pass, switch stores \n location of the maximum value in each poolsize * poolsize block\n # Arguments\n input: data to be pooled\n poolsize: size of pooling operation\n # Returns\n Pooled result and Switch\n '''\n switch = np.zeros(input.shape)\n out_shape = list(input.shape)\n row_poolsize = int(poolsize[0])\n col_poolsize = int(poolsize[1])\n out_shape[2] = out_shape[2] / poolsize[0]\n out_shape[3] = out_shape[3] / poolsize[1]\n pooled = np.zeros(out_shape)\n \n for sample in range(input.shape[0]):\n for dim in range(input.shape[1]):\n for row in range(out_shape[2]):\n for col in range(out_shape[3]):\n patch = input[sample, \n dim, \n row * row_poolsize : (row + 1) * row_poolsize,\n col * col_poolsize : (col + 1) * col_poolsize]\n max_value = patch.max()\n pooled[sample, dim, row, col] = max_value\n max_col_index = patch.argmax(axis = 1)\n max_cols = patch.max(axis = 1)\n max_row = max_cols.argmax()\n max_col = max_col_index[max_row]\n switch[sample, \n dim, \n row * row_poolsize + max_row, \n col * col_poolsize + max_col] = 1\n return [pooled, switch]\n \n # Compute unpooled output using pooled data and switch\n def __max_unpooling_with_switch(self, input, switch):\n '''\n Compute unpooled output using pooled data and switch\n # Arguments\n input: data to be pooled\n poolsize: size of pooling operation\n switch: switch storing location of each elements\n # Returns\n Unpooled result\n '''\n tile = np.ones((switch.shape[2] / input.shape[2], \n switch.shape[3] / input.shape[3]))\n out = np.kron(input, tile)\n unpooled = out * switch\n return unpooled\n\n\nclass DActivation(object):\n '''\n A class to define forward and backward operation on Activation\n '''\n def __init__(self, layer, linear = False):\n '''\n # Arguments\n layer: an instance of Activation layer, whose configuration \n will be used to initiate DActivation(input_shape, \n output_shape, weights)\n '''\n self.layer = layer\n self.linear = linear\n self.activation = layer.activation\n input = K.placeholder(shape = layer.output_shape)\n\n output = self.activation(input)\n # According to the original paper, \n # In forward pass and backward pass, do the same activation(relu)\n self.up_func = K.function(\n [input, K.learning_phase()], output)\n self.down_func = K.function(\n [input, K.learning_phase()], output)\n\n # Compute activation in forward pass\n def up(self, data, learning_phase = 0):\n '''\n function to compute activation in forward pass\n # Arguments\n data: Data to be operated in forward pass\n learning_phase: learning_phase of Keras, 1 or 0\n # Returns\n Activation\n '''\n self.up_data = self.up_func([data, learning_phase])\n return self.up_data\n\n # Compute activation in backward pass\n def down(self, data, learning_phase = 0):\n '''\n function to compute activation in backward pass\n # Arguments\n data: Data to be operated in backward pass\n learning_phase: learning_phase of Keras, 1 or 0\n # Returns\n Activation\n '''\n self.down_data = self.down_func([data, learning_phase])\n return self.down_data\n \n \nclass DFlatten(object):\n '''\n A class to define forward and backward operation on Flatten\n '''\n def __init__(self, layer):\n '''\n # Arguments\n layer: an instance of Flatten layer, whose configuration \n will be used to initiate DFlatten(input_shape, \n output_shape, weights)\n '''\n self.layer = layer\n self.shape = layer.input_shape[1:]\n self.up_func = K.function(\n [layer.input, K.learning_phase()], layer.output)\n\n # Flatten 2D input into 1D output\n def up(self, data, learning_phase = 0):\n '''\n function to flatten input in forward pass\n # Arguments\n data: Data to be operated in forward pass\n learning_phase: learning_phase of Keras, 1 or 0\n # Returns\n Flattened data\n '''\n self.up_data = self.up_func([data, learning_phase])\n return self.up_data\n\n # Reshape 1D input into 2D output\n def down(self, data, learning_phase = 0):\n '''\n function to unflatten input in backward pass\n # Arguments\n data: Data to be operated in backward pass\n learning_phase: learning_phase of Keras, 1 or 0\n # Returns\n Recovered data\n '''\n new_shape = [data.shape[0]] + list(self.shape)\n assert np.prod(self.shape) == np.prod(data.shape[1:])\n self.down_data = np.reshape(data, new_shape)\n return self.down_data\n\nclass DInput(object):\n '''\n A class to define forward and backward operation on Input\n '''\n def __init__(self, layer):\n '''\n # Arguments\n layer: an instance of Input layer, whose configuration \n will be used to initiate DInput(input_shape, \n output_shape, weights)\n '''\n self.layer = layer\n \n # input and output of Inputl layer are the same\n def up(self, data, learning_phase = 0):\n '''\n function to operate input in forward pass, the input and output\n are the same\n # Arguments\n data: Data to be operated in forward pass\n learning_phase: learning_phase of Keras, 1 or 0\n # Returns\n data\n '''\n self.up_data = data\n return self.up_data\n \n def down(self, data, learning_phase = 0):\n '''\n function to operate input in backward pass, the input and output\n are the same\n # Arguments\n data: Data to be operated in backward pass\n learning_phase: learning_phase of Keras, 1 or 0\n # Returns\n data\n '''\n self.down_data = data\n return self.down_data\n \ndef visualize(model, data, layer_name, feature_to_visualize, visualize_mode):\n '''\n function to visualize feature\n # Arguments\n model: Pre-trained model used to visualize data\n data: image to visualize\n layer_name: Name of layer to visualize\n feature_to_visualize: Featuren to visualize\n visualize_mode: Visualize mode, 'all' or 'max', 'max' will only pick \n the greates activation in a feature map and set others\n to 0s, this will indicate which part fire the neuron \n most; 'all' will use all values in a feature map,\n which will show what image the filter sees. For \n convolutional layers, There is difference between \n 'all' and 'max', for Dense layer, they are the same\n # Returns\n The image reflecting feature\n '''\n deconv_layers = []\n # Stack layers\n for i in range(len(model.layers)):\n if isinstance(model.layers[i], Convolution2D):\n deconv_layers.append(DConvolution2D(model.layers[i]))\n deconv_layers.append(\n DActivation(model.layers[i]))\n elif isinstance(model.layers[i], MaxPooling2D):\n deconv_layers.append(DPooling(model.layers[i]))\n elif isinstance(model.layers[i], Dense):\n deconv_layers.append(DDense(model.layers[i]))\n deconv_layers.append(\n DActivation(model.layers[i]))\n elif isinstance(model.layers[i], Activation):\n deconv_layers.append(DActivation(model.alyers[i]))\n elif isinstance(model.layers[i], Flatten):\n deconv_layers.append(DFlatten(model.layers[i]))\n elif isinstance(model.layers[i], InputLayer):\n deconv_layers.append(DInput(model.layers[i]))\n else:\n print('Cannot handle this type of layer')\n print(model.layers[i].get_config())\n sys.exit()\n if layer_name == model.layers[i].name:\n break\n\n # Forward pass\n deconv_layers[0].up(data)\n for i in range(1, len(deconv_layers)):\n deconv_layers[i].up(deconv_layers[i - 1].up_data)\n\n output = deconv_layers[-1].up_data\n assert output.ndim == 2 or output.ndim == 4\n if output.ndim == 2:\n feature_map = output[:, feature_to_visualize]\n else:\n feature_map = output[:, feature_to_visualize, :, :]\n if 'max' == visualize_mode:\n max_activation = feature_map.max()\n temp = feature_map == max_activation\n feature_map = feature_map * temp\n elif 'all' != visualize_mode:\n print('Illegal visualize mode')\n sys.exit()\n output = np.zeros_like(output)\n if 2 == output.ndim:\n output[:, feature_to_visualize] = feature_map\n else:\n output[:, feature_to_visualize, :, :] = feature_map\n\n # Backward pass\n deconv_layers[-1].down(output)\n for i in range(len(deconv_layers) - 2, -1, -1):\n deconv_layers[i].down(deconv_layers[i + 1].down_data)\n deconv = deconv_layers[0].down_data\n deconv = deconv.squeeze()\n \n return deconv\n\n \ndef argparser():\n parser = argparse.ArgumentParser()\n parser.add_argument('image', help = 'Path of image to visualize')\n parser.add_argument('--layer_name', '-l', \n action = 'store', dest = 'layer_name', \n default = 'block5_conv3', help = 'Layer to visualize')\n parser.add_argument('--feature', '-f', \n action = 'store', dest = 'feature', \n default = 0, type = int, help = 'Feature to visualize')\n parser.add_argument('--mode', '-m', action = 'store', dest = 'mode', \n choices = ['max', 'all'], default = 'max', \n help = 'Visualize mode, \\'max\\' mode will pick the greatest \\\n activation in the feature map and set others to zero, \\\n \\'all\\' mode will use all values in the feature map')\n return parser\n\ndef main():\n parser = argparser()\n args = parser.parse_args()\n image_path = args.image\n layer_name = args.layer_name\n feature_to_visualize = args.feature\n visualize_mode = args.mode\n\n model = vgg16.VGG16(weights = 'imagenet', include_top = True)\n layer_dict = dict([(layer.name, layer) for layer in model.layers])\n if not layer_dict.has_key(layer_name):\n print('Wrong layer name')\n sys.exit()\n\n # Load data and preprocess\n img = Image.open(image_path)\n img = img.resize((224, 224))\n img_array = np.array(img)\n img_array = np.transpose(img_array, (2, 0, 1))\n img_array = img_array[np.newaxis, :]\n img_array = img_array.astype(np.float)\n img_array = imagenet_utils.preprocess_input(img_array)\n \n deconv = visualize(model, img_array, \n layer_name, feature_to_visualize, visualize_mode)\n \n # postprocess and save image\n deconv = np.transpose(deconv, (1, 2, 0))\n deconv = deconv - deconv.min()\n deconv *= 1.0 / (deconv.max() + 1e-8)\n deconv = deconv[:, :, ::-1]\n uint8_deconv = (deconv * 255).astype(np.uint8)\n img = Image.fromarray(uint8_deconv, 'RGB')\n img.save('results/{}_{}_{}.png'.format(layer_name, feature_to_visualize, visualize_mode))\n\nif \"__main__\" == __name__:\n main()\n" ]
[ [ "numpy.array", "numpy.zeros_like", "numpy.reshape", "numpy.zeros", "numpy.ones", "numpy.prod", "numpy.transpose", "numpy.kron" ] ]
ZeroCool2u/transformers
[ "0be5f4a00c0fda64110e18eaadcd9d321bfebd9d" ]
[ "examples/seq2seq/utils.py" ]
[ "import itertools\nimport json\nimport linecache\nimport math\nimport os\nimport pickle\nimport socket\nfrom logging import getLogger\nfrom pathlib import Path\nfrom typing import Callable, Dict, Iterable, List, Union\n\nimport git\nimport numpy as np\nimport torch\nimport torch.distributed as dist\nfrom rouge_score import rouge_scorer, scoring\nfrom sacrebleu import corpus_bleu\nfrom torch import nn\nfrom torch.utils.data import Dataset, Sampler\n\nfrom transformers import BartTokenizer\nfrom transformers.file_utils import cached_property\n\n\ntry:\n from fairseq.data.data_utils import batch_by_size\n\n FAIRSEQ_AVAILABLE = True\nexcept (ImportError, ModuleNotFoundError):\n FAIRSEQ_AVAILABLE = False\n\n\ndef label_smoothed_nll_loss(lprobs, target, epsilon, ignore_index=-100):\n \"\"\"From fairseq\"\"\"\n if target.dim() == lprobs.dim() - 1:\n target = target.unsqueeze(-1)\n nll_loss = -lprobs.gather(dim=-1, index=target)\n smooth_loss = -lprobs.sum(dim=-1, keepdim=True)\n if ignore_index is not None:\n pad_mask = target.eq(ignore_index)\n nll_loss.masked_fill_(pad_mask, 0.0)\n smooth_loss.masked_fill_(pad_mask, 0.0)\n else:\n nll_loss = nll_loss.squeeze(-1)\n smooth_loss = smooth_loss.squeeze(-1)\n\n nll_loss = nll_loss.sum() # mean()? Scared to break other math.\n smooth_loss = smooth_loss.sum()\n eps_i = epsilon / lprobs.size(-1)\n loss = (1.0 - epsilon) * nll_loss + eps_i * smooth_loss\n return loss, nll_loss\n\n\ndef encode_line(tokenizer, line, max_length, pad_to_max_length=True, return_tensors=\"pt\"):\n \"\"\"Only used by LegacyDataset\"\"\"\n extra_kw = {\"add_prefix_space\": True} if isinstance(tokenizer, BartTokenizer) else {}\n return tokenizer(\n [line],\n max_length=max_length,\n padding=\"max_length\" if pad_to_max_length else None,\n truncation=True,\n return_tensors=return_tensors,\n **extra_kw,\n )\n\n\ndef lmap(f: Callable, x: Iterable) -> List:\n \"\"\"list(map(f, x))\"\"\"\n return list(map(f, x))\n\n\ndef calculate_bleu(output_lns, refs_lns, **kwargs) -> dict:\n \"\"\"Uses sacrebleu's corpus_bleu implementation.\"\"\"\n return {\"bleu\": round(corpus_bleu(output_lns, [refs_lns], **kwargs).score, 4)}\n\n\ndef trim_batch(\n input_ids,\n pad_token_id,\n attention_mask=None,\n):\n \"\"\"Remove columns that are populated exclusively by pad_token_id\"\"\"\n keep_column_mask = input_ids.ne(pad_token_id).any(dim=0)\n if attention_mask is None:\n return input_ids[:, keep_column_mask]\n else:\n return (input_ids[:, keep_column_mask], attention_mask[:, keep_column_mask])\n\n\nclass AbstractSeq2SeqDataset(Dataset):\n def __init__(\n self,\n tokenizer,\n data_dir,\n max_source_length,\n max_target_length,\n type_path=\"train\",\n n_obs=None,\n src_lang=None,\n tgt_lang=None,\n prefix=\"\",\n ):\n super().__init__()\n self.src_file = Path(data_dir).joinpath(type_path + \".source\")\n self.tgt_file = Path(data_dir).joinpath(type_path + \".target\")\n self.len_file = Path(data_dir).joinpath(type_path + \".len\")\n if os.path.exists(self.len_file):\n self.src_lens = pickle_load(self.len_file)\n self.used_char_len = False\n else:\n self.src_lens = self.get_char_lens(self.src_file)\n self.used_char_len = True\n self.max_source_length = max_source_length\n self.max_target_length = max_target_length\n assert min(self.src_lens) > 0, f\"found empty line in {self.src_file}\"\n self.tokenizer = tokenizer\n self.prefix = prefix if prefix is not None else \"\"\n\n if n_obs is not None:\n self.src_lens = self.src_lens[:n_obs]\n self.pad_token_id = self.tokenizer.pad_token_id\n self.src_lang = src_lang\n self.tgt_lang = tgt_lang\n self.add_prefix_space = isinstance(self.tokenizer, BartTokenizer)\n\n def __len__(self):\n return len(self.src_lens)\n\n @staticmethod\n def get_char_lens(data_file):\n return [len(x) for x in Path(data_file).open().readlines()]\n\n @cached_property\n def tgt_lens(self):\n \"\"\"Length in characters of target documents\"\"\"\n return self.get_char_lens(self.tgt_file)\n\n def make_sortish_sampler(self, batch_size, distributed=False, shuffle=True, **kwargs):\n if distributed:\n return DistributedSortishSampler(self, batch_size, shuffle=shuffle, **kwargs)\n else:\n return SortishSampler(self.src_lens, batch_size, shuffle=shuffle)\n\n def make_dynamic_sampler(self, max_tokens_per_batch=1024, **kwargs):\n assert FAIRSEQ_AVAILABLE, \"Dynamic batch size requires `pip install fairseq`\"\n assert not self.used_char_len, \"You must call python make_len_file.py before calling make_dynamic_sampler\"\n sorted_indices = list(self.make_sortish_sampler(1024, shuffle=False))\n\n def num_tokens_in_example(i):\n return min(self.src_lens[i], self.max_target_length)\n\n # call fairseq cython function\n batch_sampler: List[List[int]] = batch_by_size(\n sorted_indices,\n num_tokens_fn=num_tokens_in_example,\n max_tokens=max_tokens_per_batch,\n required_batch_size_multiple=64,\n )\n shuffled_batches = [batch_sampler[i] for i in np.random.permutation(range(len(batch_sampler)))]\n # move the largest batch to the front to OOM quickly (uses an approximation for padding)\n approximate_toks_per_batch = [max(self.src_lens[i] for i in batch) * len(batch) for batch in shuffled_batches]\n largest_batch_idx = np.argmax(approximate_toks_per_batch)\n shuffled_batches[0], shuffled_batches[largest_batch_idx] = (\n shuffled_batches[largest_batch_idx],\n shuffled_batches[0],\n )\n return shuffled_batches\n\n def __getitem__(self, item):\n raise NotImplementedError(\"You must implement this\")\n\n def collate_fn(self, batch):\n raise NotImplementedError(\"You must implement this\")\n\n\nclass LegacySeq2SeqDataset(AbstractSeq2SeqDataset):\n def __getitem__(self, index) -> Dict[str, torch.Tensor]:\n \"\"\"Call tokenizer on src and tgt_lines\"\"\"\n index = index + 1 # linecache starts at 1\n source_line = self.prefix + linecache.getline(str(self.src_file), index).rstrip(\"\\n\")\n tgt_line = linecache.getline(str(self.tgt_file), index).rstrip(\"\\n\")\n assert source_line, f\"empty source line for index {index}\"\n assert tgt_line, f\"empty tgt line for index {index}\"\n source_inputs = encode_line(self.tokenizer, source_line, self.max_source_length)\n target_inputs = encode_line(self.tokenizer, tgt_line, self.max_target_length)\n\n source_ids = source_inputs[\"input_ids\"].squeeze()\n target_ids = target_inputs[\"input_ids\"].squeeze()\n src_mask = source_inputs[\"attention_mask\"].squeeze()\n return {\n \"input_ids\": source_ids,\n \"attention_mask\": src_mask,\n \"labels\": target_ids,\n }\n\n def collate_fn(self, batch) -> Dict[str, torch.Tensor]:\n input_ids = torch.stack([x[\"input_ids\"] for x in batch])\n masks = torch.stack([x[\"attention_mask\"] for x in batch])\n target_ids = torch.stack([x[\"labels\"] for x in batch])\n pad_token_id = self.pad_token_id\n y = trim_batch(target_ids, pad_token_id)\n source_ids, source_mask = trim_batch(input_ids, pad_token_id, attention_mask=masks)\n batch = {\n \"input_ids\": source_ids,\n \"attention_mask\": source_mask,\n \"labels\": y,\n }\n return batch\n\n\nclass Seq2SeqDataset(AbstractSeq2SeqDataset):\n \"\"\"A dataset that calls prepare_seq2seq_batch.\"\"\"\n\n def __getitem__(self, index) -> Dict[str, str]:\n index = index + 1 # linecache starts at 1\n source_line = self.prefix + linecache.getline(str(self.src_file), index).rstrip(\"\\n\")\n tgt_line = linecache.getline(str(self.tgt_file), index).rstrip(\"\\n\")\n assert source_line, f\"empty source line for index {index}\"\n assert tgt_line, f\"empty tgt line for index {index}\"\n return {\"tgt_texts\": tgt_line, \"src_texts\": source_line, \"id\": index - 1}\n\n def collate_fn(self, batch) -> Dict[str, torch.Tensor]:\n \"\"\"Call prepare_seq2seq_batch.\"\"\"\n batch_encoding: Dict[str, torch.Tensor] = self.tokenizer.prepare_seq2seq_batch(\n [x[\"src_texts\"] for x in batch],\n src_lang=self.src_lang,\n tgt_texts=[x[\"tgt_texts\"] for x in batch],\n tgt_lang=self.tgt_lang,\n max_length=self.max_source_length,\n max_target_length=self.max_target_length,\n return_tensors=\"pt\",\n add_prefix_space=self.add_prefix_space,\n ).data\n batch_encoding[\"ids\"] = torch.tensor([x[\"id\"] for x in batch])\n return batch_encoding\n\n\nclass SortishSampler(Sampler):\n \"Go through the text data by order of src length with a bit of randomness. From fastai repo.\"\n\n def __init__(self, data, batch_size, shuffle=True):\n self.data, self.bs, self.shuffle = data, batch_size, shuffle\n\n def __len__(self) -> int:\n return len(self.data)\n\n def __iter__(self):\n return iter(sortish_sampler_indices(self.data, self.bs, shuffle=self.shuffle))\n\n\ndef sortish_sampler_indices(data: List, bs: int, shuffle=True) -> np.array:\n \"Go through the text data by order of src length with a bit of randomness. From fastai repo.\"\n if not shuffle:\n return np.argsort(np.array(data) * -1)\n\n def key_fn(i):\n return data[i]\n\n idxs = np.random.permutation(len(data))\n sz = bs * 50\n ck_idx = [idxs[i : i + sz] for i in range(0, len(idxs), sz)]\n sort_idx = np.concatenate([sorted(s, key=key_fn, reverse=True) for s in ck_idx])\n sz = bs\n ck_idx = [sort_idx[i : i + sz] for i in range(0, len(sort_idx), sz)]\n max_ck = np.argmax([key_fn(ck[0]) for ck in ck_idx]) # find the chunk with the largest key,\n ck_idx[0], ck_idx[max_ck] = ck_idx[max_ck], ck_idx[0] # then make sure it goes first.\n sort_idx = np.concatenate(np.random.permutation(ck_idx[1:])) if len(ck_idx) > 1 else np.array([], dtype=np.int)\n sort_idx = np.concatenate((ck_idx[0], sort_idx))\n return sort_idx\n\n\nclass DistributedSortishSampler(Sampler):\n \"\"\"Copied from torch DistributedSampler\"\"\"\n\n def __init__(self, dataset, batch_size, num_replicas=None, rank=None, add_extra_examples=True, shuffle=True):\n if num_replicas is None:\n if not dist.is_available():\n raise RuntimeError(\"Requires distributed package to be available\")\n num_replicas = dist.get_world_size()\n if rank is None:\n if not dist.is_available():\n raise RuntimeError(\"Requires distributed package to be available\")\n rank = dist.get_rank()\n self.dataset = dataset\n self.num_replicas = num_replicas\n self.rank = rank\n self.epoch = 0\n if add_extra_examples:\n self.num_samples = int(math.ceil(len(self.dataset) * 1.0 / self.num_replicas))\n self.total_size = self.num_samples * self.num_replicas\n else:\n self.total_size = len(dataset)\n self.num_samples = len(self.available_indices)\n self.batch_size = batch_size\n self.add_extra_examples = add_extra_examples\n self.shuffle = shuffle\n\n def __iter__(self) -> Iterable:\n g = torch.Generator()\n g.manual_seed(self.epoch)\n\n sortish_data = [self.dataset.src_lens[i] for i in self.available_indices]\n sortish_indices = sortish_sampler_indices(sortish_data, self.batch_size, shuffle=self.shuffle)\n indices = [self.available_indices[i] for i in sortish_indices]\n assert len(indices) == self.num_samples\n return iter(indices)\n\n @cached_property\n def available_indices(self) -> np.array:\n indices = list(range(len(self.dataset)))\n # add extra samples to make it evenly divisible\n indices += indices[: (self.total_size - len(indices))]\n assert len(indices) == self.total_size\n # subsample\n available_indices = indices[self.rank : self.total_size : self.num_replicas]\n return available_indices\n\n def __len__(self):\n return self.num_samples\n\n def set_epoch(self, epoch):\n self.epoch = epoch\n\n\nlogger = getLogger(__name__)\n\n\ndef use_task_specific_params(model, task):\n \"\"\"Update config with summarization specific params.\"\"\"\n task_specific_params = model.config.task_specific_params\n\n if task_specific_params is not None:\n pars = task_specific_params.get(task, {})\n logger.info(f\"using task specific params for {task}: {pars}\")\n model.config.update(pars)\n\n\ndef pickle_load(path):\n \"\"\"pickle.load(path)\"\"\"\n with open(path, \"rb\") as f:\n return pickle.load(f)\n\n\ndef pickle_save(obj, path):\n \"\"\"pickle.dump(obj, path)\"\"\"\n with open(path, \"wb\") as f:\n return pickle.dump(obj, f)\n\n\ndef flatten_list(summary_ids: List[List]):\n return [x for x in itertools.chain.from_iterable(summary_ids)]\n\n\ndef save_git_info(folder_path: str) -> None:\n \"\"\"Save git information to output_dir/git_log.json\"\"\"\n repo_infos = get_git_info()\n save_json(repo_infos, os.path.join(folder_path, \"git_log.json\"))\n\n\ndef save_json(content, path, indent=4, **json_dump_kwargs):\n with open(path, \"w\") as f:\n json.dump(content, f, indent=indent, **json_dump_kwargs)\n\n\ndef load_json(path):\n with open(path) as f:\n return json.load(f)\n\n\ndef get_git_info():\n repo = git.Repo(search_parent_directories=True)\n repo_infos = {\n \"repo_id\": str(repo),\n \"repo_sha\": str(repo.head.object.hexsha),\n \"repo_branch\": str(repo.active_branch),\n \"hostname\": str(socket.gethostname()),\n }\n return repo_infos\n\n\nROUGE_KEYS = [\"rouge1\", \"rouge2\", \"rougeL\"]\n\n\ndef calculate_rouge(output_lns: List[str], reference_lns: List[str], use_stemmer=True) -> Dict:\n scorer = rouge_scorer.RougeScorer(ROUGE_KEYS, use_stemmer=use_stemmer)\n aggregator = scoring.BootstrapAggregator()\n\n for reference_ln, output_ln in zip(reference_lns, output_lns):\n scores = scorer.score(reference_ln, output_ln)\n aggregator.add_scores(scores)\n\n result = aggregator.aggregate()\n return {k: round(v.mid.fmeasure * 100, 4) for k, v in result.items()}\n\n\n# Utilities for freezing parameters and checking whether they are frozen\n\n\ndef freeze_params(model: nn.Module):\n \"\"\"Set requires_grad=False for each of model.parameters()\"\"\"\n for par in model.parameters():\n par.requires_grad = False\n\n\ndef grad_status(model: nn.Module) -> Iterable:\n return (par.requires_grad for par in model.parameters())\n\n\ndef any_requires_grad(model: nn.Module) -> bool:\n return any(grad_status(model))\n\n\ndef assert_all_frozen(model):\n model_grads: List[bool] = list(grad_status(model))\n n_require_grad = sum(lmap(int, model_grads))\n npars = len(model_grads)\n assert not any(model_grads), f\"{n_require_grad/npars:.1%} of {npars} weights require grad\"\n\n\ndef assert_not_all_frozen(model):\n model_grads: List[bool] = list(grad_status(model))\n npars = len(model_grads)\n assert any(model_grads), f\"none of {npars} weights require grad\"\n\n\n# CLI Parsing utils\n\n\ndef parse_numeric_n_bool_cl_kwargs(unparsed_args: List[str]) -> Dict[str, Union[int, float, bool]]:\n \"\"\"\n Parse an argv list of unspecified command line args to a dict.\n Assumes all values are either numeric or boolean in the form of true/false.\n \"\"\"\n result = {}\n assert len(unparsed_args) % 2 == 0, f\"got odd number of unparsed args: {unparsed_args}\"\n num_pairs = len(unparsed_args) // 2\n for pair_num in range(num_pairs):\n i = 2 * pair_num\n assert unparsed_args[i].startswith(\"--\")\n if unparsed_args[i + 1].lower() == \"true\":\n value = True\n elif unparsed_args[i + 1].lower() == \"false\":\n value = False\n else:\n try:\n value = int(unparsed_args[i + 1])\n except ValueError:\n value = float(unparsed_args[i + 1]) # this can raise another informative ValueError\n\n result[unparsed_args[i][2:]] = value\n return result\n\n\ndef write_txt_file(ordered_tgt, path):\n f = Path(path).open(\"w\")\n for ln in ordered_tgt:\n f.write(ln + \"\\n\")\n f.flush()\n" ]
[ [ "numpy.concatenate", "torch.distributed.get_world_size", "numpy.array", "torch.distributed.is_available", "torch.stack", "numpy.random.permutation", "torch.Generator", "numpy.argmax", "torch.tensor", "torch.distributed.get_rank" ] ]
chunchentu/EvadeML-Zoo
[ "61a8015c096f1e6448caec4e702aef2d646c76e7" ]
[ "attacks/cleverhans_wrapper.py" ]
[ "import numpy as np\nimport tensorflow as tf\nimport click\n\nimport pdb\nimport sys, os\nsys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))\n\nfrom utils import load_externals\nfrom cleverhans.utils_tf import model_loss, batch_eval\n\nimport warnings\n\ndef override_params(default, update):\n for key in default:\n if key in update:\n val = update[key]\n if key == 'ord':\n if val == 'li':\n val = np.inf\n elif val == 'l2':\n val = 2\n elif val == 'l1':\n val = 1\n else:\n raise ValueError(\"Unsuporrted ord: %s\" % val)\n default[key] = val\n del update[key]\n\n if len(update) > 0:\n warnings.warn(\"Ignored arguments: %s\" % update.keys())\n return default\n\n\nfrom cleverhans.attacks import FastGradientMethod\ndef generate_fgsm_examples(sess, model, x, y, X, Y, attack_params, verbose, attack_log_fpath):\n \"\"\"\n Untargeted attack. Y is not needed.\n \"\"\"\n fgsm = FastGradientMethod(model, back='tf', sess=sess)\n fgsm_params = {'eps': 0.1, 'ord': np.inf, 'y': None, 'clip_min': 0, 'clip_max': 1}\n fgsm_params = override_params(fgsm_params, attack_params)\n\n X_adv = fgsm.generate_np(X, **fgsm_params)\n return X_adv\n\n\nfrom cleverhans.attacks import BasicIterativeMethod\ndef generate_bim_examples(sess, model, x, y, X, Y, attack_params, verbose, attack_log_fpath):\n \"\"\"\n Untargeted attack. Y is not needed.\n \"\"\"\n bim = BasicIterativeMethod(model, back='tf', sess=sess)\n bim_params = {'eps': 0.1, 'eps_iter':0.05, 'nb_iter':10, 'y':y,\n 'ord':np.inf, 'clip_min':0, 'clip_max':1 }\n bim_params = override_params(bim_params, attack_params)\n\n X_adv = bim.generate_np(X, **bim_params)\n return X_adv\n\n\nfrom cleverhans.attacks import SaliencyMapMethod\ndef generate_jsma_examples(sess, model, x, y, X, Y, attack_params, verbose, attack_log_fpath):\n \"\"\"\n Targeted attack, with target classes in Y.\n \"\"\"\n Y_target = Y\n\n nb_classes = Y.shape[1]\n\n jsma = SaliencyMapMethod(model, back='tf', sess=sess)\n jsma_params = {'theta': 1., 'gamma': 0.1,\n 'nb_classes': nb_classes, 'clip_min': 0.,\n 'clip_max': 1., 'targets': y,\n 'y_val': None}\n jsma_params = override_params(jsma_params, attack_params)\n\n adv_x_list = []\n\n with click.progressbar(range(0, len(X)), file=sys.stderr, show_pos=True, \n width=40, bar_template=' [%(bar)s] JSMA Attacking %(info)s', \n fill_char='>', empty_char='-') as bar:\n # Loop over the samples we want to perturb into adversarial examples\n for sample_ind in bar:\n sample = X[sample_ind:(sample_ind+1)]\n\n jsma_params['y_val'] = Y_target[[sample_ind],]\n adv_x = jsma.generate_np(sample, **jsma_params)\n adv_x_list.append(adv_x)\n\n return np.vstack(adv_x_list)\n\n" ]
[ [ "numpy.vstack" ] ]
franneck94/TensorCross
[ "d1a69ee8637d4c9e7ad800023c0141a9499bcbac" ]
[ "tests/test_utils.py" ]
[ "\"\"\"Test code for the random search.\n\"\"\"\nimport unittest\n\nimport numpy as np\nimport tensorflow as tf\n\nfrom tensorcross.utils import dataset_split\n\n\nnp.random.seed(0)\ntf.random.set_seed(0)\n\n\nclass UtilsTests(unittest.TestCase):\n def setUp(self) -> None:\n self.dataset = tf.data.Dataset.from_tensor_slices(\n ([1, 2, 3], [-1, -2, -3]) # x # y\n )\n\n def test_train_validation_split(self) -> None:\n split_fraction = 0.3\n train_dataset, val_dataset = dataset_split(\n self.dataset, split_fraction=split_fraction, fold=0\n )\n self.assertEqual(\n len(val_dataset), int(len(self.dataset) * split_fraction)\n )\n self.assertEqual(\n len(val_dataset) + len(train_dataset), len(self.dataset)\n )\n\n @staticmethod\n def _dataset_to_list(dataset: tf.data.Dataset) -> list:\n return [(it[0].numpy(), it[1].numpy()) for it in dataset]\n\n def test_cross_validation_split(self) -> None:\n split_fraction = 1 / 3\n # First cross-validation split\n first_train_dataset, first_val_dataset = dataset_split(\n dataset=self.dataset, split_fraction=split_fraction, fold=0\n )\n self.assertEqual(\n len(first_train_dataset) + len(first_val_dataset), len(self.dataset)\n )\n self.assertEqual(\n set(\n self._dataset_to_list(first_train_dataset)\n + self._dataset_to_list(first_val_dataset)\n ),\n set(self._dataset_to_list(self.dataset)),\n )\n # Second cross-validation split\n second_train_dataset, second_val_dataset = dataset_split(\n dataset=self.dataset, split_fraction=split_fraction, fold=1\n )\n self.assertEqual(\n len(second_train_dataset) + len(second_val_dataset),\n len(self.dataset),\n )\n self.assertEqual(\n set(\n self._dataset_to_list(second_train_dataset)\n + self._dataset_to_list(second_val_dataset)\n ),\n set(self._dataset_to_list(self.dataset)),\n )\n # Third cross-validation split\n third_train_dataset, third_val_dataset = dataset_split(\n dataset=self.dataset, split_fraction=split_fraction, fold=2\n )\n self.assertEqual(\n len(third_train_dataset) + len(third_val_dataset), len(self.dataset)\n )\n self.assertEqual(\n set(\n self._dataset_to_list(third_train_dataset)\n + self._dataset_to_list(third_val_dataset)\n ),\n set(self._dataset_to_list(self.dataset)),\n )\n\n\nif __name__ == \"__main__\":\n unittest.main()\n" ]
[ [ "numpy.random.seed", "tensorflow.random.set_seed", "tensorflow.data.Dataset.from_tensor_slices" ] ]
procommerz/keras-yolo3
[ "3559c524bfae2c7be9f56bea193a2b23ace3f72b" ]
[ "test.py" ]
[ "from __future__ import print_function\nimport threading\nimport os\nimport time\nimport sys\nimport RPi.GPIO as GPIO\nimport numpy\nimport json\nimport pdb\n\nfrom flask import Flask\nfrom flask import render_template\nfrom flask import request\n\nOUT_PIN = 23\nIN_PIN = 25\nTRIG_PIN = 23\n\nIN2_PIN = 20\n\nGPIO.setmode(GPIO.BCM)\nGPIO.setwarnings(True)\n\napp = None\nmeter = None\nflaskapp = Flask(__name__)\n\nclass Meter(threading.Thread):\n def __init__(self):\n print(\"Initializing Meter\")\n GPIO.setup(IN2_PIN, GPIO.IN)\n\n threading.Thread.__init__(self)\n self.deamon = True\n\n self.chart_data = list()\n self.chart_labels = list()\n\n self.countdown = 0\n self.start()\n\n def run(self):\n print(\"Running Meter\")\n while 1 and app.main_thread.isAlive():\n # print(GPIO.input(IN2_PIN))\n self.chart_labels.append(\"%d\" % (0.01 * self.countdown * 1000))\n self.chart_data.append(GPIO.input(IN2_PIN))\n self.countdown += 1\n time.sleep(0.01)\n\n # if self.countdown > 1000:\n # pdb.set_trace()\n\nclass Emitter(threading.Thread):\n def __init__(self):\n print(\"Initializing Emitter\")\n GPIO.setup(IN2_PIN, GPIO.IN)\n GPIO.setup(TRIG_PIN, GPIO.OUT)\n\n threading.Thread.__init__(self)\n\n self.countdown = 0\n\n self.start()\n\n def distance(self):\n GPIO.output(TRIG_PIN, True)\n # set Trigger after 0.01ms to LOW\n time.sleep(0.00001)\n GPIO.output(TRIG_PIN, False)\n\n while GPIO.input(IN_PIN) == 0:\n start_time = time.time()\n # save time of arrival\n while GPIO.input(IN_PIN) == 1:\n stop_time = time.time()\n # time difference between start and arrival\n elapsed = start_time - stop_time\n # multiply with the sonic speed (34300 cm/s)\n # and divide by 2, because there and back\n distance = (elapsed * 34300) / 2\n return distance\n\n def emit_test(self, length):\n GPIO.output(TRIG_PIN, True)\n # set Trigger after 0.01ms to LOW\n time.sleep(length)\n GPIO.output(TRIG_PIN, False)\n\n def run(self):\n print(\"Running Emitter\")\n while 1 and app.main_thread.isAlive():\n if app.enable_emitter:\n self.emit_test(0.00001)\n time.sleep(0.00001 * 400)\n\nclass SonarApp:\n def __init__(self, main_thread):\n self.main_thread = main_thread\n self.enable_emitter = True\n self.enable_meter = True\n pass\n\n def sonar(self):\n self.emitter = Emitter()\n self.meter = Meter()\n\n # def measure(self):\n # GPIO.setup(TRIG_PIN, GPIO.OUT)\n # GPIO.setup(IN_PIN, GPIO.IN)\n #\n # try:\n # while 1:\n # res = self.distance()\n # sys.stdout.flush()\n # sys.stdout.write('Distance: %f cm\\r' % round(res, 1))\n # # print('Distance: %f cm\\r' % round(res, 1), file=sys.stderr)\n # time.sleep(0.01)\n # except KeyboardInterrupt:\n # pass\n #\n # GPIO.cleanup()\n\n\n def listen(self):\n GPIO.setup(OUT_PIN, GPIO.OUT)\n # \tGPIO.setup(IN_PIN, GPIO.IN)\n GPIO.setup(IN_PIN, GPIO.IN, pull_up_down=GPIO.PUD_DOWN)\n # \tp1 = GPIO.PWM(OUT_PIN, 180)\n # \tp1.start(2)\n\n try:\n while 1:\n \t\t\t# sys.stdout.write('%f\\r' % GPIO.input(IN_PIN))\n \t\t\t# sys.stdout.flush()\n reading = GPIO.input(IN_PIN)\n if reading != 0:\n print()\n time.sleep(0.01)\n except KeyboardInterrupt:\n pass\n # \tp1.stop()\n GPIO.cleanup()\n\n def choir(self):\n GPIO.setup(OUT_PIN, GPIO.OUT)\n GPIO.setup(IN_PIN, GPIO.OUT)\n\n p1 = GPIO.PWM(OUT_PIN, 280)\n p2 = GPIO.PWM(IN_PIN, 380)\n\n p1.start(5)\n p2.start(2.5)\n time.sleep(1)\n p1.stop()\n\n time.sleep(1.2)\n\n p1.ChangeFrequency(490)\n p1.start(5)\n time.sleep(1)\n\n p1.stop()\n p2.stop()\n\n # \tp1.start(0.2)\n # \ttime.sleep(0.5)\n # \tp1.stop()\n\n # \ttime.sleep(0.5)\n\n # \tp1.ChangeFrequency(255)\n # \tp1.start(1)\n # \ttime.sleep(1)\n # \tp1.stop()\n\n #p1 = GPIO.PWM(OUT_PIN, 250)\n\n GPIO.cleanup()\n\n def notes(self):\n p = GPIO.PWM(OUT_PIN, 250) # channel=12 frequency=50Hz\n p.start(0)\n try:\n while 1:\n for dc in numpy.arange(0.0, 11.0, 0.1):\n p.ChangeDutyCycle(dc)\n time.sleep(0.1)\n for dc in numpy.arange(10.0, -1.0, -1.0):\n p.ChangeDutyCycle(dc)\n time.sleep(0.1)\n except KeyboardInterrupt:\n pass\n p.stop()\n GPIO.cleanup()\n\n\n\[email protected](\"/home\", methods=['GET'])\ndef dashboard():\n return render_template(\"home.html\")\n\[email protected](\"/current_values\", methods=['GET'])\ndef current_values():\n resp = dict()\n\n if request.args.get('x_from') is not None:\n x_from = int(request.form['x_from'])\n else:\n x_from = 0\n\n # app.series\n data = app.meter.chart_data\n labels = app.meter.chart_labels\n\n resp['data'] = data[-100:] #list(map(lambda i: float(i), data))\n resp['labels'] = labels[-100:]\n resp['start_at'] = int(labels[0])\n\n return str(json.dumps(resp))\n\[email protected](\"/switch\", methods=['GET'])\ndef switch():\n if request.args.get('param') is not None:\n param_name = request.form['param']\n if param_name == \"emitter\":\n app.enable_emitter = not app.enable_emitter\n\n flags = dict()\n\n flags[\"enable_emitter\"] = app.enable_emitter\n flags[\"enable_meter\"] = app.enable_meter\n\n return str(json.dumps(flags))\n\nmain_thread = threading.currentThread()\napp = SonarApp(main_thread)\napp.sonar()\n\nflaskapp.run(port=16000, host='192.168.1.135', threaded=True)\n\n# notes()\n#choir()\n# listen()\n" ]
[ [ "numpy.arange" ] ]
reinforcementdriving/TransTrack
[ "43a6316f12a6cf2e2bdc5aed089b086017302ad8" ]
[ "track_main/main_tracktrainhalf.py" ]
[ "# Modified by Peize Sun, Rufeng Zhang\r\n# ------------------------------------------------------------------------\r\n# Deformable DETR\r\n# Copyright (c) 2020 SenseTime. All Rights Reserved.\r\n# Licensed under the Apache License, Version 2.0 [see LICENSE for details]\r\n# ------------------------------------------------------------------------\r\n# Modified from DETR (https://github.com/facebookresearch/detr)\r\n# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved\r\n# ------------------------------------------------------------------------\r\nimport argparse\r\nimport datetime\r\nimport json\r\nimport random\r\nimport time\r\nfrom pathlib import Path\r\n\r\nimport numpy as np\r\nimport torch\r\nfrom torch.utils.data import DataLoader\r\nimport datasets\r\nimport util.misc as utils\r\nimport datasets.samplers as samplers\r\nfrom datasets import build_dataset, get_coco_api_from_dataset\r\nfrom track_engine.engine_tracktrain import evaluate, train_one_epoch\r\nfrom models import build_tracktrain_model as build_model\r\nfrom models import Tracker\r\nfrom collections import defaultdict\r\n\r\n\r\ndef get_args_parser():\r\n parser = argparse.ArgumentParser('Deformable DETR Detector', add_help=False)\r\n parser.add_argument('--lr', default=2e-4, type=float)\r\n parser.add_argument('--lr_backbone_names', default=[\"backbone.0\"], type=str, nargs='+')\r\n parser.add_argument('--lr_backbone', default=2e-5, type=float)\r\n parser.add_argument('--lr_linear_proj_names', default=['reference_points', 'sampling_offsets'], type=str, nargs='+')\r\n parser.add_argument('--lr_linear_proj_mult', default=0.1, type=float)\r\n parser.add_argument('--batch_size', default=1, type=int)\r\n parser.add_argument('--weight_decay', default=1e-4, type=float)\r\n parser.add_argument('--epochs', default=50, type=int)\r\n parser.add_argument('--lr_drop', default=40, type=int)\r\n parser.add_argument('--lr_drop_epochs', default=None, type=int, nargs='+')\r\n parser.add_argument('--clip_max_norm', default=0.1, type=float,\r\n help='gradient clipping max norm')\r\n\r\n parser.add_argument('--sgd', action='store_true')\r\n\r\n # Variants of Deformable DETR\r\n parser.add_argument('--with_box_refine', default=False, action='store_true')\r\n parser.add_argument('--two_stage', default=False, action='store_true')\r\n\r\n # Model parameters\r\n parser.add_argument('--frozen_weights', type=str, default=None,\r\n help=\"Path to the pretrained model. If set, only the mask head will be trained\")\r\n\r\n # * Backbone\r\n parser.add_argument('--backbone', default='resnet50', type=str,\r\n help=\"Name of the convolutional backbone to use\")\r\n parser.add_argument('--dilation', action='store_true',\r\n help=\"If true, we replace stride with dilation in the last convolutional block (DC5)\")\r\n parser.add_argument('--position_embedding', default='sine', type=str, choices=('sine', 'learned'),\r\n help=\"Type of positional embedding to use on top of the image features\")\r\n parser.add_argument('--position_embedding_scale', default=2 * np.pi, type=float,\r\n help=\"position / size * scale\")\r\n parser.add_argument('--num_feature_levels', default=4, type=int, help='number of feature levels')\r\n\r\n # * Transformer\r\n parser.add_argument('--enc_layers', default=6, type=int,\r\n help=\"Number of encoding layers in the transformer\")\r\n parser.add_argument('--dec_layers', default=6, type=int,\r\n help=\"Number of decoding layers in the transformer\")\r\n parser.add_argument('--dim_feedforward', default=1024, type=int,\r\n help=\"Intermediate size of the feedforward layers in the transformer blocks\")\r\n parser.add_argument('--hidden_dim', default=256, type=int,\r\n help=\"Size of the embeddings (dimension of the transformer)\")\r\n parser.add_argument('--dropout', default=0.1, type=float,\r\n help=\"Dropout applied in the transformer\")\r\n parser.add_argument('--nheads', default=8, type=int,\r\n help=\"Number of attention heads inside the transformer's attentions\")\r\n parser.add_argument('--num_queries', default=300, type=int,\r\n help=\"Number of query slots\")\r\n parser.add_argument('--dec_n_points', default=4, type=int)\r\n parser.add_argument('--enc_n_points', default=4, type=int)\r\n\r\n # * Segmentation\r\n parser.add_argument('--masks', action='store_true',\r\n help=\"Train segmentation head if the flag is provided\")\r\n\r\n # Loss\r\n parser.add_argument('--no_aux_loss', dest='aux_loss', action='store_false',\r\n help=\"Disables auxiliary decoding losses (loss at each layer)\")\r\n\r\n # * Matcher\r\n parser.add_argument('--set_cost_class', default=2, type=float,\r\n help=\"Class coefficient in the matching cost\")\r\n parser.add_argument('--set_cost_bbox', default=5, type=float,\r\n help=\"L1 box coefficient in the matching cost\")\r\n parser.add_argument('--set_cost_giou', default=2, type=float,\r\n help=\"giou box coefficient in the matching cost\")\r\n\r\n # * Loss coefficients\r\n parser.add_argument('--mask_loss_coef', default=1, type=float)\r\n parser.add_argument('--dice_loss_coef', default=1, type=float)\r\n parser.add_argument('--cls_loss_coef', default=2, type=float)\r\n parser.add_argument('--bbox_loss_coef', default=5, type=float)\r\n parser.add_argument('--giou_loss_coef', default=2, type=float)\r\n parser.add_argument('--focal_alpha', default=0.25, type=float)\r\n\r\n # dataset parameters\r\n parser.add_argument('--dataset_file', default='coco')\r\n parser.add_argument('--coco_path', default='./data/coco', type=str)\r\n parser.add_argument('--coco_panoptic_path', type=str)\r\n parser.add_argument('--remove_difficult', action='store_true')\r\n\r\n parser.add_argument('--output_dir', default='',\r\n help='path where to save, empty for no saving')\r\n parser.add_argument('--device', default='cuda',\r\n help='device to use for training / testing')\r\n parser.add_argument('--seed', default=42, type=int)\r\n parser.add_argument('--resume', default='', help='resume from checkpoint')\r\n parser.add_argument('--start_epoch', default=0, type=int, metavar='N',\r\n help='start epoch')\r\n parser.add_argument('--eval', action='store_true')\r\n parser.add_argument('--num_workers', default=2, type=int)\r\n parser.add_argument('--cache_mode', default=False, action='store_true', help='whether to cache images on memory')\r\n\r\n # PyTorch checkpointing for saving memory (torch.utils.checkpoint.checkpoint)\r\n parser.add_argument('--checkpoint_enc_ffn', default=False, action='store_true')\r\n parser.add_argument('--checkpoint_dec_ffn', default=False, action='store_true')\r\n\r\n # appended for track.\r\n parser.add_argument('--track_on', default=True, type=bool)\r\n parser.add_argument('--track_thresh', default=0.4, type=float) # not use.\r\n\r\n return parser\r\n\r\n\r\ndef main(args):\r\n utils.init_distributed_mode(args)\r\n print(\"git:\\n {}\\n\".format(utils.get_sha()))\r\n\r\n if args.frozen_weights is not None:\r\n assert args.masks, \"Frozen training is meant for segmentation only\"\r\n print(args)\r\n\r\n device = torch.device(args.device)\r\n\r\n # fix the seed for reproducibility\r\n seed = args.seed + utils.get_rank()\r\n torch.manual_seed(seed)\r\n np.random.seed(seed)\r\n random.seed(seed)\r\n\r\n model, criterion, postprocessors = build_model(args)\r\n model.to(device)\r\n\r\n model_without_ddp = model\r\n n_parameters = sum(p.numel() for p in model.parameters() if p.requires_grad)\r\n print('number of params:', n_parameters)\r\n\r\n dataset_train = build_dataset(image_set='train', args=args)\r\n dataset_val = build_dataset(image_set='val', args=args)\r\n\r\n if args.distributed:\r\n if args.cache_mode:\r\n sampler_train = samplers.NodeDistributedSampler(dataset_train)\r\n sampler_val = samplers.NodeDistributedSampler(dataset_val, shuffle=False)\r\n else:\r\n sampler_train = samplers.DistributedSampler(dataset_train)\r\n sampler_val = samplers.DistributedSampler(dataset_val, shuffle=False)\r\n else:\r\n sampler_train = torch.utils.data.RandomSampler(dataset_train)\r\n sampler_val = torch.utils.data.SequentialSampler(dataset_val)\r\n\r\n batch_sampler_train = torch.utils.data.BatchSampler(\r\n sampler_train, args.batch_size, drop_last=True)\r\n\r\n data_loader_train = DataLoader(dataset_train, batch_sampler=batch_sampler_train,\r\n collate_fn=utils.collate_fn, num_workers=args.num_workers,\r\n pin_memory=True)\r\n data_loader_val = DataLoader(dataset_val, args.batch_size, sampler=sampler_val,\r\n drop_last=False, collate_fn=utils.collate_fn, num_workers=args.num_workers,\r\n pin_memory=True)\r\n\r\n # lr_backbone_names = [\"backbone.0\", \"backbone.neck\", \"input_proj\", \"transformer.encoder\"]\r\n def match_name_keywords(n, name_keywords):\r\n out = False\r\n for b in name_keywords:\r\n if b in n:\r\n out = True\r\n break\r\n return out\r\n\r\n for n, p in model_without_ddp.named_parameters():\r\n print(n)\r\n\r\n param_dicts = [\r\n {\r\n \"params\":\r\n [p for n, p in model_without_ddp.named_parameters()\r\n if not match_name_keywords(n, args.lr_backbone_names) and not match_name_keywords(n, args.lr_linear_proj_names) and p.requires_grad],\r\n \"lr\": args.lr,\r\n },\r\n {\r\n \"params\": [p for n, p in model_without_ddp.named_parameters() if match_name_keywords(n, args.lr_backbone_names) and p.requires_grad],\r\n \"lr\": args.lr_backbone,\r\n },\r\n {\r\n \"params\": [p for n, p in model_without_ddp.named_parameters() if match_name_keywords(n, args.lr_linear_proj_names) and p.requires_grad],\r\n \"lr\": args.lr * args.lr_linear_proj_mult,\r\n }\r\n ]\r\n if args.sgd:\r\n optimizer = torch.optim.SGD(param_dicts, lr=args.lr, momentum=0.9,\r\n weight_decay=args.weight_decay)\r\n else:\r\n optimizer = torch.optim.AdamW(param_dicts, lr=args.lr,\r\n weight_decay=args.weight_decay)\r\n lr_scheduler = torch.optim.lr_scheduler.StepLR(optimizer, args.lr_drop)\r\n\r\n if args.distributed:\r\n model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.gpu], find_unused_parameters=True)\r\n model_without_ddp = model.module\r\n\r\n if args.dataset_file == \"coco_panoptic\":\r\n # We also evaluate AP during panoptic training, on original coco DS\r\n coco_val = datasets.coco.build(\"val\", args)\r\n base_ds = get_coco_api_from_dataset(coco_val)\r\n else:\r\n base_ds = get_coco_api_from_dataset(dataset_val)\r\n\r\n if args.frozen_weights is not None:\r\n checkpoint = torch.load(args.frozen_weights, map_location='cpu')\r\n model_without_ddp.detr.load_state_dict(checkpoint['model'])\r\n\r\n output_dir = Path(args.output_dir)\r\n if args.resume:\r\n if args.resume.startswith('https'):\r\n checkpoint = torch.hub.load_state_dict_from_url(\r\n args.resume, map_location='cpu', check_hash=True)\r\n else:\r\n checkpoint = torch.load(args.resume, map_location='cpu')\r\n missing_keys, unexpected_keys = model_without_ddp.load_state_dict(checkpoint['model'], strict=False)\r\n unexpected_keys = [k for k in unexpected_keys if not (k.endswith('total_params') or k.endswith('total_ops'))]\r\n if len(missing_keys) > 0:\r\n print('Missing Keys: {}'.format(missing_keys))\r\n if len(unexpected_keys) > 0:\r\n print('Unexpected Keys: {}'.format(unexpected_keys))\r\n if not args.eval and 'optimizer' in checkpoint and 'lr_scheduler' in checkpoint and 'epoch' in checkpoint:\r\n import copy\r\n p_groups = copy.deepcopy(optimizer.param_groups)\r\n optimizer.load_state_dict(checkpoint['optimizer'])\r\n for pg, pg_old in zip(optimizer.param_groups, p_groups):\r\n pg['lr'] = pg_old['lr']\r\n pg['initial_lr'] = pg_old['initial_lr']\r\n print(optimizer.param_groups)\r\n lr_scheduler.load_state_dict(checkpoint['lr_scheduler'])\r\n # todo: this is a hack for doing experiment that resume from checkpoint and also modify lr scheduler (e.g., decrease lr in advance).\r\n args.override_resumed_lr_drop = True\r\n if args.override_resumed_lr_drop:\r\n print('Warning: (hack) args.override_resumed_lr_drop is set to True, so args.lr_drop would override lr_drop in resumed lr_scheduler.')\r\n lr_scheduler.step_size = args.lr_drop\r\n lr_scheduler.base_lrs = list(map(lambda group: group['initial_lr'], optimizer.param_groups))\r\n lr_scheduler.step(lr_scheduler.last_epoch)\r\n args.start_epoch = checkpoint['epoch'] + 1\r\n # check the resumed model\r\n if not args.eval:\r\n test_stats, coco_evaluator, _ = evaluate(\r\n model, criterion, postprocessors, data_loader_val, base_ds, device, args.output_dir\r\n )\r\n \r\n if args.eval:\r\n tracker = None\r\n if args.track_on:\r\n# assert args.batch_size == 1, print(\"Now only support 1.\")\r\n tracker = Tracker(num_ins=args.num_queries, score_thresh=args.track_thresh)\r\n test_stats, coco_evaluator, res_tracks = evaluate(model, criterion, postprocessors, data_loader_val,\r\n base_ds, device, args.output_dir, tracker=tracker)\r\n if args.output_dir:\r\n utils.save_on_master(coco_evaluator.coco_eval[\"bbox\"].eval, output_dir / \"eval.pth\")\r\n\r\n return\r\n\r\n print(\"Start training\")\r\n start_time = time.time()\r\n for epoch in range(args.start_epoch, args.epochs):\r\n if args.distributed:\r\n sampler_train.set_epoch(epoch)\r\n train_stats = train_one_epoch(\r\n model, criterion, data_loader_train, optimizer, device, epoch, args.clip_max_norm)\r\n lr_scheduler.step()\r\n if args.output_dir:\r\n checkpoint_paths = [output_dir / 'checkpoint.pth']\r\n # extra checkpoint before LR drop and every 5 epochs\r\n if (epoch + 1) % args.lr_drop == 0 or (epoch + 1) % 5 == 0:\r\n checkpoint_paths.append(output_dir / f'checkpoint{epoch:04}.pth')\r\n for checkpoint_path in checkpoint_paths:\r\n utils.save_on_master({\r\n 'model': model_without_ddp.state_dict(),\r\n 'optimizer': optimizer.state_dict(),\r\n 'lr_scheduler': lr_scheduler.state_dict(),\r\n 'epoch': epoch,\r\n 'args': args,\r\n }, checkpoint_path)\r\n\r\n test_stats, coco_evaluator, _ = evaluate(\r\n model, criterion, postprocessors, data_loader_val, base_ds, device, args.output_dir\r\n )\r\n\r\n log_stats = {**{f'train_{k}': v for k, v in train_stats.items()},\r\n **{f'test_{k}': v for k, v in test_stats.items()},\r\n 'epoch': epoch,\r\n 'n_parameters': n_parameters}\r\n\r\n if args.output_dir and utils.is_main_process():\r\n with (output_dir / \"log.txt\").open(\"a\") as f:\r\n f.write(json.dumps(log_stats) + \"\\n\")\r\n\r\n # for evaluation logs\r\n if coco_evaluator is not None:\r\n (output_dir / 'eval').mkdir(exist_ok=True)\r\n if \"bbox\" in coco_evaluator.coco_eval:\r\n filenames = ['latest.pth']\r\n if epoch % 50 == 0:\r\n filenames.append(f'{epoch:03}.pth')\r\n for name in filenames:\r\n torch.save(coco_evaluator.coco_eval[\"bbox\"].eval,\r\n output_dir / \"eval\" / name)\r\n\r\n total_time = time.time() - start_time\r\n total_time_str = str(datetime.timedelta(seconds=int(total_time)))\r\n print('Training time {}'.format(total_time_str))\r\n\r\n\r\nif __name__ == '__main__':\r\n parser = argparse.ArgumentParser('Deformable DETR training and evaluation script', parents=[get_args_parser()])\r\n args = parser.parse_args()\r\n if args.output_dir:\r\n Path(args.output_dir).mkdir(parents=True, exist_ok=True)\r\n main(args)\r\n" ]
[ [ "torch.device", "torch.optim.AdamW", "torch.optim.lr_scheduler.StepLR", "torch.utils.data.RandomSampler", "numpy.random.seed", "torch.save", "torch.utils.data.SequentialSampler", "torch.optim.SGD", "torch.nn.parallel.DistributedDataParallel", "torch.manual_seed", "torch.utils.data.DataLoader", "torch.utils.data.BatchSampler", "torch.load", "torch.hub.load_state_dict_from_url" ] ]
rainleander/100daysofcode
[ "0391170af80b251e7fb3a78a60b55c3145e4551a", "0391170af80b251e7fb3a78a60b55c3145e4551a" ]
[ "day026/main.py", "day030/NATO-error-handling/main.py" ]
[ "# List Comprehension Notes\n# Create a new list from a previous list\n# new_list = [new_item for item in list]\nnumbers = [1, 2, 3]\nnew_numbers = [n + 1 for n in numbers]\nprint(new_numbers)\n# output: [2, 3, 4]\n\n# challenge: what will happen when list comprehension is applied to a string?\nname = \"Rain\"\nname_list = [letter for letter in name.lower()]\nprint(name_list)\n# output: ['r', 'a', 'i', 'n']\n\n# challenge: create a new list from a range, where the list items are double the values in a range\nsingle_range = range(1, 5)\ndouble_range = [x * 2 for x in single_range]\nprint(double_range)\n# output: [2, 4, 6, 8]\n\n# conditional list comprehension\nnames = [\"Alex\", \"Beth\", \"Caroline\", \"Dave\", \"Eleanor\", \"Freddie\"]\nshort_names = [name for name in names if len(name) < 5]\nprint(short_names)\n# output: ['Alex', 'Beth', 'Dave']\n\n# challenge: create a new list that contains the names longer than 5 characters in ALL CAPS\nlong_names = [name.upper() for name in names if len(name) > 5]\nprint(long_names)\n# output: ['CAROLINE', 'ELEANOR', 'FREDDIE']\n\n# Dictionary Comprehension Notes\n# new_dict = {new_key:new_value for item in list}\n# new_dict = {new_key:new_value for (key, value) in dict.items()}\n# new_dict = {new_key:new_value for (key, value) in dict.items() if test}\n\nnames = ['Alex', 'Beth', 'Caroline', 'Dave', 'Eleanor', 'Freddie']\n\nimport random\nstudent_scores = {x:random.randint(1, 100) for x in names}\nprint(student_scores)\n\npassed_students = {student:score for (student, score) in student_scores.items() if score >= 60}\nprint(passed_students)\n\nstudent_dict = {\n \"student\": [\"Angela\", \"James\", \"Lily\"], \n \"score\": [56, 76, 98]\n}\n\n# # Looping through dictionaries:\n# for (key, value) in student_dict.items():\n# print(value)\n\nimport pandas\nstudent_data_frame = pandas.DataFrame(student_dict)\nprint(student_data_frame)\n\n# Loop through rows of a data frame\nfor (index, row) in student_data_frame.iterrows():\n # Access index and row\n # Access row.student or row.score\n print(row.score)\n\n# Keyword Method with iterrows()\n# {new_key:new_value for (index, row) in df.iterrows()}\n", "import pandas\n\ndata = pandas.read_csv(\"nato_phonetic_alphabet.csv\")\n\ncode_dict = {}\nfor (index, row) in data.iterrows():\n code_dict[row.letter] = row.code\n\n# DONE: catch the keyerror when a user enters a character that is not in the dictionary\n# DONE: provide feedback to the user when an illegal word is entered\n# DONE: continue prompting the user to enter another word until they enter a valid word\ninvalid_word = True\n\nwhile invalid_word:\n try:\n user_input = input(\"Enter a word: \").upper()\n list_of_code = [code_dict[letter] for letter in user_input]\n print(list_of_code)\n invalid_word = False\n except KeyError:\n print(\"Sorry - only letters in the word, please.\")\n" ]
[ [ "pandas.DataFrame" ], [ "pandas.read_csv" ] ]
BioTuring-Notebooks/CellphoneDB
[ "9ab48d8888f11259bb3030c8d149d7bc7cdaf6e1", "9ab48d8888f11259bb3030c8d149d7bc7cdaf6e1" ]
[ "cellphonedb/src/core/tests/models/test_helper_complex.py", "cellphonedb/src/tests/test_validators/test_validator_database_random_entries.py" ]
[ "from unittest import TestCase\n\nimport pandas as pd\n\nfrom cellphonedb.src.core.Cellphonedb import data_test_dir\nfrom cellphonedb.src.core.models.complex import complex_helper\nfrom cellphonedb.utils import dataframe_functions\n\n\nclass TestHelperComplex(TestCase):\n FIXTURES_SUBPATH = '{}/helper_complex'.format(data_test_dir)\n\n def test_get_involved_complex_from_protein(self):\n proteins = pd.read_csv('{}/helper_complex_protein.csv'.format(self.FIXTURES_SUBPATH), index_col=0)\n complex_composition = pd.read_csv('{}/helper_complex_complex_composition.csv'.format(self.FIXTURES_SUBPATH))\n\n result_expected = pd.read_csv('{}/helper_complex_result.csv'.format(self.FIXTURES_SUBPATH), index_col=3)\n\n result = complex_helper.get_involved_complex_composition_from_protein(proteins, complex_composition)\n\n self.assertTrue(dataframe_functions.dataframes_has_same_data(result, result_expected))\n\n def test_get_involved_complex_from_protein_empty_result(self):\n proteins = pd.read_csv('{}/helper_complex_protein.csv'.format(self.FIXTURES_SUBPATH), index_col=0)\n proteins.drop(proteins.index, inplace=True)\n complex_composition = pd.read_csv('{}/helper_complex_complex_composition.csv'.format(self.FIXTURES_SUBPATH))\n\n result = complex_helper.get_involved_complex_composition_from_protein(proteins, complex_composition)\n\n self.assertTrue(dataframe_functions.dataframes_has_same_data(result, pd.DataFrame(columns=['complex_multidata_id', 'protein_multidata_id', 'total_protein'])))\n\n def test_get_involved_complex_composition_from_protein(self):\n proteins = pd.read_csv('{}/helper_complex_protein.csv'.format(self.FIXTURES_SUBPATH), index_col=0)\n complex_composition = pd.read_csv('{}/helper_complex_complex_composition.csv'.format(self.FIXTURES_SUBPATH))\n\n result_expected = pd.read_csv('{}/helper_complex_result_drop_duplicates.csv'.format(self.FIXTURES_SUBPATH))\n\n result = complex_helper.get_involved_complex_composition_from_protein(proteins, complex_composition)\n\n self.assertTrue(dataframe_functions.dataframes_has_same_data(result, result_expected))\n", "import pandas as pd\nfrom flask_testing import TestCase\n\nfrom cellphonedb.src.app.cellphonedb_app import cellphonedb_app\nfrom cellphonedb.src.app.app_logger import app_logger\nfrom cellphonedb.src.app.flask.flask_app import create_app\n\ncomplex_entries = [\n {\n 'data': {\n \"name\": \"aVb3 complex\",\n \"transmembrane\": True,\n \"peripheral\": False,\n \"secreted\": False,\n \"secreted_desc\": None,\n \"secreted_highlight\": False,\n \"receptor\": False,\n \"receptor_desc\": None,\n \"integrin\": True,\n \"other\": False,\n \"other_desc\": None,\n \"pdb_id\": \"1jv2\",\n \"pdb_structure\": \"TRUE\",\n \"stoichiometry\": \"ITGAV;ITGB3\",\n \"comments_complex\": \"Well known integrin combination\"\n },\n 'composition': [\"P06756\", \"P05106\"]\n },\n {\n 'data': {\n \"name\": \"a2Bb3 complex\",\n \"receptor\": False,\n \"receptor_desc\": None,\n \"other\": False,\n \"other_desc\": None,\n \"secreted_highlight\": False,\n \"secreted_desc\": None,\n \"transmembrane\": True,\n \"secreted\": False,\n \"peripheral\": False,\n \"pdb_structure\": \"TRUE\",\n \"pdb_id\": \"1kup\",\n \"stoichiometry\": \"ITGA2B;ITGB3\",\n \"comments_complex\": \"Well known integrin combination\",\n \"integrin\": True\n\n },\n 'composition':\n [\"P08514\", \"P05106\"]\n },\n\n {\n 'data': {\n \"name\": \"IL2 receptor_HA\",\n \"transmembrane\": True,\n \"peripheral\": False,\n \"secreted\": False,\n \"secreted_desc\": None,\n \"secreted_highlight\": False,\n \"receptor\": True,\n \"receptor_desc\": \"Cytokine receptor IL2 family\",\n \"integrin\": False,\n \"other\": False,\n \"other_desc\": None,\n \"pdb_id\": \"2b5i\",\n \"pdb_structure\": \"binding\",\n \"stoichiometry\": \"IL2;IL2RA;IL2RB;IL2RG\",\n \"comments_complex\": \"A high affinity dimer, an intermediate affinity monomer (beta subunit). The high and intermediate affinity forms also associate with a gamma subunit.\"\n },\n 'composition':\n [\"P01589\", \"P14784\", \"P31785\"]\n }\n]\nprotein_entries = [\n {\n \"name\": \"P39019\",\n \"protein_name\": \"RS19_HUMAN\",\n \"transmembrane\": False,\n \"peripheral\": False,\n \"secreted\": False,\n \"secreted_desc\": None,\n \"secreted_highlight\": False,\n \"receptor\": False,\n \"receptor_desc\": None,\n \"integrin\": False,\n \"other\": False,\n \"other_desc\": None,\n \"tags\": \"To_add\",\n \"tags_reason\": None,\n \"tags_description\": None,\n\n }, {\n\n \"name\": \"P54760\",\n \"protein_name\": \"EPHB4_HUMAN\",\n \"transmembrane\": True,\n \"peripheral\": False,\n \"secreted\": False,\n \"secreted_desc\": None,\n \"secreted_highlight\": False,\n \"receptor\": True,\n \"receptor_desc\": None,\n \"integrin\": False,\n \"other\": False,\n \"other_desc\": None,\n \"tags\": \"To_comment\",\n \"tags_reason\": \"Adhesion_add\",\n \"tags_description\": None\n\n }, {\n \"name\": \"P52799\",\n \"protein_name\": \"EFNB2_HUMAN\",\n \"transmembrane\": True,\n \"peripheral\": False,\n \"secreted\": False,\n \"secreted_desc\": None,\n \"secreted_highlight\": False,\n \"receptor\": True,\n \"receptor_desc\": None,\n \"integrin\": False,\n \"other\": False,\n \"other_desc\": None,\n \"tags\": \"To_add\",\n \"tags_reason\": None,\n \"tags_description\": \"ligandIUPHAR\",\n },\n {\n \"name\": \"P39059\",\n \"protein_name\": \"COFA1_HUMAN\",\n \"transmembrane\": False,\n \"peripheral\": False,\n \"secreted\": True,\n \"secreted_desc\": None,\n \"secreted_highlight\": False,\n \"receptor\": False,\n \"receptor_desc\": None,\n \"integrin\": False,\n \"other\": True,\n \"other_desc\": \"Collagen\",\n \"tags\": \"To_add\",\n \"tags_reason\": None,\n \"tags_description\": \"Collagen\"\n },\n]\ngene_entries = [\n {\n \"ensembl\": \"ENSG00000275555\",\n \"gene_name\": \"DLL1\",\n \"name\": \"O00548\"\n },\n {\n \"ensembl\": \"ENSG00000169306\",\n \"gene_name\": \"IL1RAPL1\",\n \"name\": \"Q9NZN1\"\n },\n {\n \"ensembl\": \"ENSG00000204642\",\n \"gene_name\": \"HLA-F\",\n \"name\": \"P30511\"\n }\n]\ninteraction_entries = [\n {\n \"id_cp_interaction\": \"CPI-CS0A66DB1CA\",\n \"name_1\": \"CD8 receptor\",\n \"name_2\": \"P06239\",\n \"source\": \"1q69, PDB partially\",\n \"annotation_strategy\": \"curated\",\n },\n {\n \"id_cp_interaction\": \"CPI-SS085EE60B1\",\n \"name_1\": \"P01137\",\n \"name_2\": \"Q03167\",\n \"source\": \"uniprot\",\n \"annotation_strategy\": \"curated\",\n },\n {\n \"id_cp_interaction\": \"CPI-SS056BE1011\",\n \"name_1\": \"O00421\",\n \"name_2\": \"Q99731\",\n \"source\": None,\n \"annotation_strategy\": \"guidetopharmacology.org\",\n },\n {\n \"id_cp_interaction\": \"CPI-SS03165AD8C\",\n \"name_1\": \"O00590\",\n \"name_2\": \"P13500\",\n \"source\": \"PMID: 24218476\",\n \"annotation_strategy\": \"curated\",\n },\n {\n \"id_cp_interaction\": \"CPI-SS0F972435E\",\n \"name_1\": \"P35916\",\n \"name_2\": \"P49767\",\n \"source\": \"uniprot\",\n \"annotation_strategy\": \"curated\",\n },\n {\n \"id_cp_interaction\": \"CPI-SS027E57635\",\n \"name_1\": \"Q8NFJ6\",\n \"name_2\": \"Q9HC23\",\n \"annotation_strategy\": \"guidetopharmacology.org\",\n \"source\": None\n },\n {\n \"id_cp_interaction\": \"CPI-SS0DBA4D668\",\n \"name_1\": \"P27487\",\n \"name_2\": \"P48061\",\n \"annotation_strategy\": \"curated\",\n \"source\": \"PMID: 24218476\"\n }\n]\n\n\nclass TestValidatorDatabaseRandomEntries(TestCase):\n def test_interaction(self):\n\n interaction_df = cellphonedb_app.cellphonedb.database_manager.get_repository(\n 'interaction').get_all_expanded()\n\n data_not_match = False\n\n for interaction in interaction_entries:\n db_interaction = interaction_df\n non_match_properties = []\n for column_name in interaction:\n if interaction[column_name] == None:\n db_interaction = db_interaction[pd.isnull(db_interaction[column_name])]\n else:\n db_interaction = db_interaction[db_interaction[column_name] == interaction[column_name]]\n\n if len(db_interaction) < 1:\n non_match_properties.append(column_name)\n if (len(db_interaction) < 1):\n app_logger.warning('Failed cheking Interaction:')\n app_logger.warning('Expected data:')\n app_logger.warning(interaction)\n app_logger.warning('Non Match properties')\n app_logger.warning(non_match_properties)\n data_not_match = True\n\n self.assertFalse(data_not_match, 'Some Interactions doesnt match')\n\n def test_gene(self):\n\n dataframe = cellphonedb_app.cellphonedb.database_manager.get_repository(\n 'gene').get_all_expanded()\n\n data_not_match = False\n\n for gene in gene_entries:\n db_gene = dataframe\n\n for column_name in gene:\n if gene[column_name] == None:\n db_gene = db_gene[pd.isnull(db_gene[column_name])]\n else:\n db_gene = db_gene[db_gene[column_name] == gene[column_name]]\n\n if (len(db_gene) < 1):\n app_logger.warning('Failed cheking Gene:')\n app_logger.warning('Expected data:')\n app_logger.warning(gene)\n data_not_match = True\n\n self.assertFalse(data_not_match, 'Some Gene doesnt match')\n\n def test_protein(self):\n\n dataframe = cellphonedb_app.cellphonedb.database_manager.get_repository(\n 'protein').get_all_expanded()\n\n data_not_match = False\n\n for protein in protein_entries:\n db_protein = dataframe[dataframe['name'] == protein['name']]\n\n if db_protein.empty:\n print('Protein {} dindt exist'.format(protein['name']))\n data_not_match = True\n continue\n\n for column_name in protein:\n if db_protein[column_name].iloc[0] != protein[column_name]:\n app_logger.warning('Failed checking column \\'%s\\' of multidata/protein with name \\'%s\\'' % (\n column_name, protein['name']))\n app_logger.warning('Expected value: %s' % protein[column_name])\n app_logger.warning('Database value: %s' % db_protein[column_name].iloc[0])\n app_logger.warning('---')\n data_not_match = True\n\n self.assertFalse(data_not_match, 'Some proteins doesnt match or doesnt exist')\n\n def test_complex_composition_table(self):\n df_multidata = cellphonedb_app.cellphonedb.database_manager.get_repository('multidata').get_all()\n df_complex_composition = cellphonedb_app.cellphonedb.database_manager.get_repository(\n 'complex').get_all_compositions()\n\n number_compositions_not_match = False\n some_protein_didnt_exists = False\n some_protein_not_part_of_complex = False\n some_complex_not_exist = False\n\n for complex in complex_entries:\n try:\n db_complex_id = df_multidata[df_multidata['name'] == complex['data']['name']]['id_multidata'].iloc[0]\n except IndexError:\n print('Complex {} didnt exist'.format(complex['data']['name']))\n some_complex_not_exist = True\n continue\n\n if len(df_complex_composition[df_complex_composition['complex_multidata_id'] != db_complex_id]) == len(\n complex['composition']):\n app_logger.warning('Failed checking number of complex_composition with name \\'%s\\'' % (\n complex['data']['name']))\n app_logger.warning('Expected value: %s' % len(\n df_complex_composition[df_complex_composition['complex_multidata_id'] == db_complex_id]))\n app_logger.warning('Database value: %s' % len(complex['composition']))\n app_logger.warning('---')\n number_compositions_not_match = True\n\n for protein_name in complex['composition']:\n db_complex_composition_ids = \\\n df_complex_composition[df_complex_composition['complex_multidata_id'] == db_complex_id][\n 'protein_multidata_id'].tolist()\n\n composition_multidata_id = df_multidata[df_multidata['name'] == protein_name]['id_multidata']\n\n if not len(composition_multidata_id):\n app_logger.warning('Failed finding protein \\'%s\\' in multidata from complex name \\'%s\\'' % (\n protein_name, complex['data']['name']))\n some_protein_didnt_exists = True\n continue\n\n if composition_multidata_id.iloc[0] not in db_complex_composition_ids:\n app_logger.warning('Failed finding protein \\'%s\\' in composition from complex name \\'%s\\'' % (\n protein_name, complex['data']['name']))\n some_protein_not_part_of_complex = True\n\n self.assertFalse(number_compositions_not_match, 'Number of complex composition doesnt match')\n self.assertFalse(some_protein_didnt_exists, 'Some complex_composition proteins doesnt match')\n self.assertFalse(some_protein_not_part_of_complex, 'Complex_composition proteins doesnt match')\n self.assertFalse(some_complex_not_exist, 'Some Complex not exist')\n\n def test_complex(self):\n\n dataframe = cellphonedb_app.cellphonedb.database_manager.get_repository(\n 'complex').get_all_expanded()\n\n data_not_match = False\n\n for complex in complex_entries:\n db_complex = dataframe[dataframe['name'] == complex['data']['name']]\n\n for complex_data in complex['data']:\n if db_complex[complex_data].iloc[0] != complex['data'][complex_data]:\n app_logger.warning('Failed checking column \\'%s\\' of multidata/complex with name \\'%s\\'' % (\n complex_data, complex['data']['name']))\n app_logger.warning('Expected value: %s' % complex['data'][complex_data])\n app_logger.warning('Database value: %s' % db_complex[complex_data].iloc[0])\n app_logger.warning('---')\n data_not_match = True\n\n self.assertFalse(data_not_match, 'Some complex doesnt match')\n\n def create_app(self):\n return create_app(raise_non_defined_vars=False, verbose=False)\n\n def setUp(self):\n self.client = self.app.test_client()\n" ]
[ [ "pandas.DataFrame" ], [ "pandas.isnull" ] ]
janssenhenning/aiida-core
[ "bb92dd56cc1bba142df2c48f1a73ca6b809568dc" ]
[ "tests/backends/aiida_sqlalchemy/test_migrations.py" ]
[ "# -*- coding: utf-8 -*-\n###########################################################################\n# Copyright (c), The AiiDA team. All rights reserved. #\n# This file is part of the AiiDA code. #\n# #\n# The code is hosted on GitHub at https://github.com/aiidateam/aiida-core #\n# For further information on the license, see the LICENSE.txt file #\n# For further information please visit http://www.aiida.net #\n###########################################################################\n# pylint: disable=too-many-lines,protected-access\n\"\"\"Tests for the migration engine (Alembic) as well as for the AiiDA migrations for SQLAlchemy.\"\"\"\n\nfrom contextlib import contextmanager\nimport os\n\nfrom alembic import command\nfrom alembic.config import Config\nfrom sqlalchemy import column\n\nfrom aiida.backends import sqlalchemy as sa\nfrom aiida.backends.general.migrations import utils\nfrom aiida.backends.sqlalchemy import manager\nfrom aiida.backends.sqlalchemy.models.base import Base\nfrom aiida.backends.sqlalchemy.utils import flag_modified\nfrom aiida.backends.testbase import AiidaTestCase\n\nfrom .test_utils import new_database\n\n\nclass TestMigrationsSQLA(AiidaTestCase):\n \"\"\"\n This class contains tests for the migration mechanism of SQLAlchemy called\n alembic. It checks if the migrations can be applied and removed correctly.\n \"\"\"\n # The path to the folder that contains the migration configuration (the\n # actual configuration - not the testing)\n migr_method_dir_path = None\n # The path of the migration configuration (the actual configuration - not\n # the testing)\n alembic_dpath = None\n\n migrate_from = None\n migrate_to = None\n\n @classmethod\n def setUpClass(cls, *args, **kwargs):\n \"\"\"\n Prepare the test class with the alembivc configuration\n \"\"\"\n super().setUpClass(*args, **kwargs)\n cls.manager = manager.SqlaBackendManager()\n\n def setUp(self):\n \"\"\"\n Go to the migrate_from revision, apply setUpBeforeMigration, then\n run the migration.\n \"\"\"\n super().setUp()\n from aiida.orm import autogroup\n\n self.current_autogroup = autogroup.CURRENT_AUTOGROUP\n autogroup.CURRENT_AUTOGROUP = None\n assert self.migrate_from and self.migrate_to, \\\n f\"TestCase '{type(self).__name__}' must define migrate_from and migrate_to properties\"\n\n try:\n self.migrate_db_down(self.migrate_from)\n self.setUpBeforeMigration()\n self._perform_actual_migration()\n except Exception:\n # Bring back the DB to the correct state if this setup part fails\n self._reset_database_and_schema()\n autogroup.CURRENT_AUTOGROUP = self.current_autogroup\n raise\n\n def _perform_actual_migration(self):\n \"\"\"Perform the actual migration (upwards, to migrate_to).\n\n Must be called after we are properly set to be in migrate_from.\n \"\"\"\n self.migrate_db_up(self.migrate_to)\n\n def migrate_db_up(self, destination):\n \"\"\"\n Perform a migration upwards (upgrade) with alembic\n\n :param destination: the name of the destination migration\n \"\"\"\n # Undo all previous real migration of the database\n with self.manager.alembic_config() as config:\n command.upgrade(config, destination)\n\n def migrate_db_down(self, destination):\n \"\"\"\n Perform a migration downwards (downgrade) with alembic\n\n :param destination: the name of the destination migration\n \"\"\"\n with self.manager.alembic_config() as config:\n command.downgrade(config, destination)\n\n def tearDown(self):\n \"\"\"\n Resets both the database content and the schema to prepare for the\n next test\n \"\"\"\n from aiida.orm import autogroup\n self._reset_database_and_schema()\n autogroup.CURRENT_AUTOGROUP = self.current_autogroup\n super().tearDown()\n\n def setUpBeforeMigration(self): # pylint: disable=invalid-name\n \"\"\"\n Anything to do before running the migrations.\n This is typically implemented in test subclasses.\n \"\"\"\n\n def _reset_database_and_schema(self):\n \"\"\"\n Bring back the DB to the correct state.\n\n It is important to also reset the database content to avoid hanging\n of tests.\n \"\"\"\n self.clean_db()\n self.migrate_db_up('head')\n\n @property\n def current_rev(self):\n \"\"\"\n Utility method to get the current revision string\n \"\"\"\n from alembic.migration import MigrationContext # pylint: disable=import-error\n with sa.ENGINE.begin() as connection:\n context = MigrationContext.configure(connection)\n current_rev = context.get_current_revision()\n return current_rev\n\n @staticmethod\n def get_auto_base():\n \"\"\"\n Return the automap_base class that automatically inspects the current\n database and return SQLAlchemy Models.\n\n Note that these are NOT the ones in AiiDA SQLAlchemy models, so do not\n have the special methods that we define there (like .save()).\n \"\"\"\n from alembic.migration import MigrationContext # pylint: disable=import-error\n from sqlalchemy.ext.automap import automap_base # pylint: disable=import-error,no-name-in-module\n\n with sa.ENGINE.begin() as connection:\n context = MigrationContext.configure(connection)\n bind = context.bind\n\n base = automap_base()\n # reflect the tables\n base.prepare(bind.engine, reflect=True)\n\n return base\n\n @staticmethod\n @contextmanager\n def get_session():\n \"\"\"\n Return a session that is properly closed after use.\n \"\"\"\n from sqlalchemy.orm import Session # pylint: disable=import-error,no-name-in-module\n\n with sa.ENGINE.begin() as connection:\n session = Session(connection.engine)\n yield session\n session.close()\n\n def get_current_table(self, table_name):\n \"\"\"\n Return a Model instantiated at the correct migration.\n Note that this is obtained by inspecting the database and not\n by looking into the models file. So, special methods possibly defined\n in the models files/classes are not present.\n\n For instance, you can do::\n\n DbGroup = self.get_current_table('db_dbgroup')\n\n :param table_name: the name of the table.\n \"\"\"\n base = self.get_auto_base()\n return getattr(base.classes, table_name)\n\n @staticmethod\n def get_node_array(node, name):\n return utils.load_numpy_array_from_repository(node.uuid, name)\n\n @staticmethod\n def set_node_array(node, name, array):\n \"\"\"Store a new numpy array inside a node. Possibly overwrite the array if it already existed.\n\n Internally, it stores a name.npy file in numpy format.\n\n :param name: The name of the array.\n :param array: The numpy array to store.\n \"\"\"\n utils.store_numpy_array_in_repository(node.uuid, name, array)\n attributes = node.attributes\n if attributes is None:\n attributes = {}\n attributes[f'array|{name}'] = list(array.shape)\n node.attributes = attributes\n flag_modified(node, 'attributes')\n\n\nclass TestBackwardMigrationsSQLA(TestMigrationsSQLA):\n \"\"\"\n This is the equivalent of TestMigrationsSQLA for backward migrations.\n It assumes that the migrate_from revision is higher in the hierarchy\n than the migrate_to revision.\n \"\"\"\n\n def _perform_actual_migration(self):\n \"\"\"Perform the actual migration (downwards, to migrate_to).\n\n Must be called after we are properly set to be in migrate_from.\n \"\"\"\n self.migrate_db_down(self.migrate_to)\n\n\nclass TestMigrationEngine(TestMigrationsSQLA):\n \"\"\"\n Just a simple test to verify that the TestMigrationsSQLA class indeed\n works and moves between the expected migration revisions\n \"\"\"\n migrate_from = 'b8b23ddefad4' # b8b23ddefad4_dbgroup_name_to_label_type_to_type_string.py\n migrate_to = 'e72ad251bcdb' # e72ad251bcdb_dbgroup_class_change_type_string_values.py\n\n def setUpBeforeMigration(self):\n \"\"\"\n Cache the start revision\n \"\"\"\n self.start_revision = self.current_rev\n\n def test_revision_numbers(self):\n \"\"\"\n Check that we went to the correct version\n \"\"\"\n self.assertEqual(self.start_revision, self.migrate_from)\n self.assertEqual(self.current_rev, self.migrate_to)\n\n\nclass TestMigrationSchemaVsModelsSchema(AiidaTestCase):\n \"\"\"\n This class checks that the schema that results from a migration is the\n same generated by the models. This is important since migrations are\n frequently written by hand or extended manually and we have to ensure\n that the final result is what is conceived in the SQLA models.\n \"\"\"\n # The path to the folder that contains the migration configuration (the\n # actual configuration - not the testing)\n migr_method_dir_path = None\n # The path of the migration configuration (the actual configuration - not\n # the testing)\n alembic_dpath = None\n # The alembic configuration needed for the migrations is stored here\n alembic_cfg_left = None\n\n # The URL of the databases\n db_url_left = None\n db_url_right = None\n\n def setUp(self):\n from sqlalchemydiff.util import get_temporary_uri\n\n from aiida.backends.sqlalchemy.migrations import versions\n\n self.migr_method_dir_path = os.path.dirname(os.path.realpath(manager.__file__))\n # Set the alembic script directory location\n self.alembic_dpath = os.path.join(self.migr_method_dir_path, manager.ALEMBIC_REL_PATH) # pylint: disable=no-member\n\n # Constructing the versions directory\n versions_dpath = os.path.join(os.path.dirname(versions.__file__))\n\n # Setting dynamically the the path to the alembic configuration\n # (this is where the env.py file can be found)\n self.alembic_cfg_left = Config()\n self.alembic_cfg_left.set_main_option('script_location', self.alembic_dpath)\n # Setting dynamically the versions directory. These are the\n # migration scripts to pass from one version to the other. The\n # default ones are overridden with test-specific migrations.\n self.alembic_cfg_left.set_main_option('version_locations', versions_dpath)\n\n # The correction URL to the SQLA database of the current\n # AiiDA connection\n curr_db_url = sa.ENGINE.url\n\n # Create new urls for the two new databases\n self.db_url_left = get_temporary_uri(str(curr_db_url))\n self.db_url_right = get_temporary_uri(str(curr_db_url))\n\n # Put the correct database url to the database used by alembic\n self.alembic_cfg_left.set_main_option('sqlalchemy.url', self.db_url_left)\n\n # Database creation\n new_database(self.db_url_left)\n new_database(self.db_url_right)\n\n def tearDown(self):\n from sqlalchemydiff.util import destroy_database\n destroy_database(self.db_url_left)\n destroy_database(self.db_url_right)\n\n def test_model_and_migration_schemas_are_the_same(self): # pylint: disable=invalid-name\n \"\"\"Compare two databases.\n\n Compares the database obtained with all migrations against the\n one we get out of the models. It produces a text file with the\n results to help debug differences.\n \"\"\"\n from sqlalchemy.engine import create_engine # pylint: disable=import-error,no-name-in-module\n from sqlalchemydiff import compare\n\n with create_engine(self.db_url_left).begin() as connection:\n self.alembic_cfg_left.attributes['connection'] = connection # pylint: disable=unsupported-assignment-operation\n command.upgrade(self.alembic_cfg_left, 'head')\n\n engine_right = create_engine(self.db_url_right)\n Base.metadata.create_all(engine_right)\n engine_right.dispose()\n\n result = compare(self.db_url_left, self.db_url_right, set(['alembic_version']))\n\n self.assertTrue(\n result.is_match,\n \"The migration database doesn't match to the one \"\n 'created by the models.\\nDifferences: ' + result._dump_data(result.errors) # pylint: disable=protected-access\n )\n\n\nclass TestProvenanceRedesignMigration(TestMigrationsSQLA):\n \"\"\"Test the data migration part of the provenance redesign migration.\"\"\"\n\n migrate_from = '140c971ae0a3' # 140c971ae0a3_migrate_builtin_calculations\n migrate_to = '239cea6d2452' # 239cea6d2452_provenance_redesign\n\n def setUpBeforeMigration(self):\n from sqlalchemy.orm import Session # pylint: disable=import-error,no-name-in-module\n\n DbNode = self.get_auto_base().classes.db_dbnode # pylint: disable=invalid-name\n DbUser = self.get_auto_base().classes.db_dbuser # pylint: disable=invalid-name\n\n with sa.ENGINE.begin() as connection:\n try:\n session = Session(connection.engine)\n\n user = DbUser(email=f'{self.id()}@aiida.net')\n session.add(user)\n session.commit()\n\n node_calc_job_known = DbNode(\n type='calculation.job.arithmetic.add.ArithmeticAddCalculation.', user_id=user.id\n )\n node_calc_job_unknown = DbNode(type='calculation.job.unknown.PluginJobCalculation.', user_id=user.id)\n node_process = DbNode(type='calculation.process.ProcessCalculation.', user_id=user.id)\n node_work_chain = DbNode(type='calculation.work.WorkCalculation.', user_id=user.id)\n node_work_function = DbNode(\n type='calculation.work.WorkCalculation.', attributes={'function_name': 'test'}, user_id=user.id\n )\n node_inline = DbNode(type='calculation.inline.InlineCalculation.', user_id=user.id)\n node_function = DbNode(type='calculation.function.FunctionCalculation.', user_id=user.id)\n\n session.add(node_calc_job_known)\n session.add(node_calc_job_unknown)\n session.add(node_process)\n session.add(node_work_chain)\n session.add(node_work_function)\n session.add(node_inline)\n session.add(node_function)\n session.commit()\n\n self.node_calc_job_known_id = node_calc_job_known.id\n self.node_calc_job_unknown_id = node_calc_job_unknown.id\n self.node_process_id = node_process.id\n self.node_work_chain_id = node_work_chain.id\n self.node_work_function_id = node_work_function.id\n self.node_inline_id = node_inline.id\n self.node_function_id = node_function.id\n except Exception:\n session.rollback()\n raise\n finally:\n session.close()\n\n def test_verify_migration(self):\n \"\"\"Verify that type string of the Data node was successfully adapted.\"\"\"\n from sqlalchemy.orm import Session # pylint: disable=import-error,no-name-in-module\n\n DbNode = self.get_auto_base().classes.db_dbnode # pylint: disable=invalid-name\n\n with sa.ENGINE.begin() as connection:\n try:\n session = Session(connection.engine)\n\n # Migration of calculation job with known plugin class\n node_calc_job_known = session.query(DbNode).filter(DbNode.id == self.node_calc_job_known_id).one()\n self.assertEqual(node_calc_job_known.type, 'node.process.calculation.calcjob.CalcJobNode.')\n # The test below had to be changed when the `core.` prefix was added to the `arithmetic.add` prefix.\n # This indicates that the migration of the process type for these test processes would no longer no work\n # but that only applies to databases that are still at v0.x and this change will go into v2.0, so it is\n # fine to accept that at this point.\n self.assertEqual(node_calc_job_known.process_type, 'arithmetic.add.ArithmeticAddCalculation')\n\n # Migration of calculation job with unknown plugin class\n node_calc_job_unknown = session.query(DbNode).filter(DbNode.id == self.node_calc_job_unknown_id).one()\n self.assertEqual(node_calc_job_unknown.type, 'node.process.calculation.calcjob.CalcJobNode.')\n self.assertEqual(node_calc_job_unknown.process_type, 'unknown.PluginJobCalculation')\n\n # Migration of very old `ProcessNode` class\n node_process = session.query(DbNode).filter(DbNode.id == self.node_process_id).one()\n self.assertEqual(node_process.type, 'node.process.workflow.workchain.WorkChainNode.')\n\n # Migration of old `WorkCalculation` class\n node_work_chain = session.query(DbNode).filter(DbNode.id == self.node_work_chain_id).one()\n self.assertEqual(node_work_chain.type, 'node.process.workflow.workchain.WorkChainNode.')\n\n # Migration of old `WorkCalculation` class used for work function\n node_work_function = session.query(DbNode).filter(DbNode.id == self.node_work_function_id).one()\n self.assertEqual(node_work_function.type, 'node.process.workflow.workfunction.WorkFunctionNode.')\n\n # Migration of old `InlineCalculation` class\n node_inline = session.query(DbNode).filter(DbNode.id == self.node_inline_id).one()\n self.assertEqual(node_inline.type, 'node.process.calculation.calcfunction.CalcFunctionNode.')\n\n # Migration of old `FunctionCalculation` class\n node_function = session.query(DbNode).filter(DbNode.id == self.node_function_id).one()\n self.assertEqual(node_function.type, 'node.process.workflow.workfunction.WorkFunctionNode.')\n\n finally:\n session.close()\n\n\nclass TestGroupRenamingMigration(TestMigrationsSQLA):\n \"\"\"\n Test the migration that renames the DbGroup type strings\n \"\"\"\n\n migrate_from = 'b8b23ddefad4' # b8b23ddefad4_dbgroup_name_to_label_type_to_type_string.py\n migrate_to = 'e72ad251bcdb' # e72ad251bcdb_dbgroup_class_change_type_string_values.py\n\n def setUpBeforeMigration(self):\n \"\"\"\n Create the DbGroups with the old type strings\n \"\"\"\n # Create group\n DbGroup = self.get_current_table('db_dbgroup') # pylint: disable=invalid-name\n DbUser = self.get_current_table('db_dbuser') # pylint: disable=invalid-name\n\n with self.get_session() as session:\n try:\n default_user = DbUser(email=f'{self.id()}@aiida.net')\n session.add(default_user)\n session.commit()\n\n # test user group type_string: '' -> 'user'\n group_user = DbGroup(label='test_user_group', user_id=default_user.id, type_string='')\n session.add(group_user)\n # test upf group type_string: 'data.upf.family' -> 'data.upf'\n group_data_upf = DbGroup(\n label='test_data_upf_group', user_id=default_user.id, type_string='data.upf.family'\n )\n session.add(group_data_upf)\n # test auto.import group type_string: 'aiida.import' -> 'auto.import'\n group_autoimport = DbGroup(\n label='test_import_group', user_id=default_user.id, type_string='aiida.import'\n )\n session.add(group_autoimport)\n # test auto.run group type_string: 'autogroup.run' -> 'auto.run'\n group_autorun = DbGroup(\n label='test_autorun_group', user_id=default_user.id, type_string='autogroup.run'\n )\n session.add(group_autorun)\n\n session.commit()\n\n # Store values for later tests\n self.group_user_pk = group_user.id\n self.group_data_upf_pk = group_data_upf.id\n self.group_autoimport_pk = group_autoimport.id\n self.group_autorun_pk = group_autorun.id\n\n finally:\n session.close()\n\n def test_group_string_update(self):\n \"\"\"\n Test that the type strings are properly migrated\n \"\"\"\n DbGroup = self.get_current_table('db_dbgroup') # pylint: disable=invalid-name\n\n with self.get_session() as session:\n try:\n # test user group type_string: '' -> 'user'\n group_user = session.query(DbGroup).filter(DbGroup.id == self.group_user_pk).one()\n self.assertEqual(group_user.type_string, 'user')\n\n # test upf group type_string: 'data.upf.family' -> 'data.upf'\n group_data_upf = session.query(DbGroup).filter(DbGroup.id == self.group_data_upf_pk).one()\n self.assertEqual(group_data_upf.type_string, 'data.upf')\n\n # test auto.import group type_string: 'aiida.import' -> 'auto.import'\n group_autoimport = session.query(DbGroup).filter(DbGroup.id == self.group_autoimport_pk).one()\n self.assertEqual(group_autoimport.type_string, 'auto.import')\n\n # test auto.run group type_string: 'autogroup.run' -> 'auto.run'\n group_autorun = session.query(DbGroup).filter(DbGroup.id == self.group_autorun_pk).one()\n self.assertEqual(group_autorun.type_string, 'auto.run')\n finally:\n session.close()\n\n\nclass TestCalcAttributeKeysMigration(TestMigrationsSQLA):\n \"\"\"Test the migration of the keys of certain attribute for ProcessNodes and CalcJobNodes.\"\"\"\n\n migrate_from = 'e72ad251bcdb' # e72ad251bcdb_dbgroup_class_change_type_string_values\n migrate_to = '7ca08c391c49' # 7ca08c391c49_calc_job_option_attribute_keys\n\n KEY_RESOURCES_OLD = 'jobresource_params'\n KEY_RESOURCES_NEW = 'resources'\n KEY_PARSER_NAME_OLD = 'parser'\n KEY_PARSER_NAME_NEW = 'parser_name'\n KEY_PROCESS_LABEL_OLD = '_process_label'\n KEY_PROCESS_LABEL_NEW = 'process_label'\n KEY_ENVIRONMENT_VARIABLES_OLD = 'custom_environment_variables'\n KEY_ENVIRONMENT_VARIABLES_NEW = 'environment_variables'\n\n def setUpBeforeMigration(self):\n from sqlalchemy.orm import Session # pylint: disable=import-error,no-name-in-module\n\n DbNode = self.get_auto_base().classes.db_dbnode # pylint: disable=invalid-name\n DbUser = self.get_auto_base().classes.db_dbuser # pylint: disable=invalid-name\n\n with sa.ENGINE.begin() as connection:\n try:\n session = Session(connection.engine)\n\n user = DbUser(email=f'{self.id()}@aiida.net')\n session.add(user)\n session.commit()\n\n self.resources = {'number_machines': 1}\n self.parser_name = 'aiida.parsers:parser'\n self.process_label = 'TestLabel'\n self.environment_variables = {}\n\n attributes = {\n self.KEY_RESOURCES_OLD: self.resources,\n self.KEY_PARSER_NAME_OLD: self.parser_name,\n self.KEY_PROCESS_LABEL_OLD: self.process_label,\n self.KEY_ENVIRONMENT_VARIABLES_OLD: self.environment_variables,\n }\n node_work = DbNode(type='node.process.workflow.WorkflowNode.', attributes=attributes, user_id=user.id)\n node_calc = DbNode(\n type='node.process.calculation.calcjob.CalcJobNode.', attributes=attributes, user_id=user.id\n )\n # Create a node of a different type to ensure that its attributes are not updated\n node_other = DbNode(type='node.othernode.', attributes=attributes, user_id=user.id)\n\n session.add(node_work)\n session.add(node_calc)\n session.add(node_other)\n session.commit()\n\n self.node_work_id = node_work.id\n self.node_calc_id = node_calc.id\n self.node_other_id = node_other.id\n finally:\n session.close()\n\n def test_attribute_key_changes(self):\n \"\"\"Verify that the keys are successfully changed of the affected attributes.\"\"\"\n from sqlalchemy.orm import Session # pylint: disable=import-error,no-name-in-module\n\n DbNode = self.get_auto_base().classes.db_dbnode # pylint: disable=invalid-name\n\n not_found = tuple([0])\n\n with sa.ENGINE.begin() as connection:\n try:\n session = Session(connection.engine)\n\n node_work = session.query(DbNode).filter(DbNode.id == self.node_work_id).one()\n self.assertEqual(node_work.attributes.get(self.KEY_PROCESS_LABEL_NEW), self.process_label)\n self.assertEqual(node_work.attributes.get(self.KEY_PROCESS_LABEL_OLD, not_found), not_found)\n\n node_calc = session.query(DbNode).filter(DbNode.id == self.node_calc_id).one()\n self.assertEqual(node_calc.attributes.get(self.KEY_PROCESS_LABEL_NEW), self.process_label)\n self.assertEqual(node_calc.attributes.get(self.KEY_PARSER_NAME_NEW), self.parser_name)\n self.assertEqual(node_calc.attributes.get(self.KEY_RESOURCES_NEW), self.resources)\n self.assertEqual(\n node_calc.attributes.get(self.KEY_ENVIRONMENT_VARIABLES_NEW), self.environment_variables\n )\n self.assertEqual(node_calc.attributes.get(self.KEY_PROCESS_LABEL_OLD, not_found), not_found)\n self.assertEqual(node_calc.attributes.get(self.KEY_PARSER_NAME_OLD, not_found), not_found)\n self.assertEqual(node_calc.attributes.get(self.KEY_RESOURCES_OLD, not_found), not_found)\n self.assertEqual(node_calc.attributes.get(self.KEY_ENVIRONMENT_VARIABLES_OLD, not_found), not_found)\n\n # The following node should not be migrated even if its attributes have the matching keys because\n # the node is not a ProcessNode\n node_other = session.query(DbNode).filter(DbNode.id == self.node_other_id).one()\n self.assertEqual(node_other.attributes.get(self.KEY_PROCESS_LABEL_OLD), self.process_label)\n self.assertEqual(node_other.attributes.get(self.KEY_PARSER_NAME_OLD), self.parser_name)\n self.assertEqual(node_other.attributes.get(self.KEY_RESOURCES_OLD), self.resources)\n self.assertEqual(\n node_other.attributes.get(self.KEY_ENVIRONMENT_VARIABLES_OLD), self.environment_variables\n )\n self.assertEqual(node_other.attributes.get(self.KEY_PROCESS_LABEL_NEW, not_found), not_found)\n self.assertEqual(node_other.attributes.get(self.KEY_PARSER_NAME_NEW, not_found), not_found)\n self.assertEqual(node_other.attributes.get(self.KEY_RESOURCES_NEW, not_found), not_found)\n self.assertEqual(node_other.attributes.get(self.KEY_ENVIRONMENT_VARIABLES_NEW, not_found), not_found)\n finally:\n session.close()\n\n\nclass TestDbLogMigrationRecordCleaning(TestMigrationsSQLA):\n \"\"\"Test the migration of the keys of certain attribute for ProcessNodes and CalcJobNodes.\"\"\"\n\n migrate_from = '7ca08c391c49' # 7ca08c391c49_calc_job_option_attribute_keys\n migrate_to = '041a79fc615f' # 041a79fc615f_dblog_cleaning\n\n def tearDown(self):\n \"\"\"Need to manually delete all the workflows created for the test because the model does not exist any more.\n\n Because the model does not exist anymore, they are no longer being cleaned in the database reset of the test\n base class. To prevent foreign keys from other tables still referencing these tables, we have to make sure to\n clean them here manually, before we call the parent, which will call the standard reset database methods.\n \"\"\"\n from sqlalchemy.orm import Session # pylint: disable=import-error,no-name-in-module\n\n DbWorkflow = self.get_auto_base().classes.db_dbworkflow # pylint: disable=invalid-name\n\n with sa.ENGINE.begin() as connection:\n try:\n session = Session(connection.engine)\n session.query(DbWorkflow).delete()\n session.commit()\n finally:\n session.close()\n\n super().tearDown()\n\n def setUpBeforeMigration(self):\n # pylint: disable=too-many-locals,too-many-statements\n import importlib\n\n from sqlalchemy.orm import Session # pylint: disable=import-error,no-name-in-module\n\n from aiida.backends.general.migrations.utils import dumps_json\n\n log_migration = importlib.import_module(\n 'aiida.backends.sqlalchemy.migrations.versions.041a79fc615f_dblog_cleaning'\n )\n\n DbUser = self.get_auto_base().classes.db_dbuser # pylint: disable=invalid-name\n DbNode = self.get_auto_base().classes.db_dbnode # pylint: disable=invalid-name\n DbWorkflow = self.get_auto_base().classes.db_dbworkflow # pylint: disable=invalid-name\n DbLog = self.get_auto_base().classes.db_dblog # pylint: disable=invalid-name\n\n with sa.ENGINE.begin() as connection:\n try:\n session = Session(connection.engine)\n\n user = DbUser(email=f'{self.id()}@aiida.net')\n session.add(user)\n session.commit()\n\n calc_1 = DbNode(type='node.process.calculation.CalculationNode.', user_id=user.id)\n param = DbNode(type='data.core.dict.Dict.', user_id=user.id)\n leg_workf = DbWorkflow(label='Legacy WorkflowNode', user_id=user.id)\n calc_2 = DbNode(type='node.process.calculation.CalculationNode.', user_id=user.id)\n\n session.add(calc_1)\n session.add(param)\n session.add(leg_workf)\n session.add(calc_2)\n session.commit()\n\n log_1 = DbLog(\n loggername='CalculationNode logger',\n objpk=calc_1.id,\n objname='node.calculation.job.quantumespresso.pw.',\n message='calculation node 1',\n metadata={\n 'msecs': 719.0849781036377,\n 'objpk': calc_1.id,\n 'lineno': 350,\n 'thread': 140011612940032,\n 'asctime': '10/21/2018 12:39:51 PM',\n 'created': 1540118391.719085,\n 'levelno': 23,\n 'message': 'calculation node 1',\n 'objname': 'node.calculation.job.quantumespresso.pw.',\n }\n )\n log_2 = DbLog(\n loggername='something.else logger',\n objpk=param.id,\n objname='something.else.',\n message='parameter data with log message'\n )\n log_3 = DbLog(\n loggername='TopologicalWorkflow logger',\n objpk=leg_workf.id,\n objname='aiida.workflows.user.topologicalworkflows.topo.TopologicalWorkflow',\n message='parameter data with log message'\n )\n log_4 = DbLog(\n loggername='CalculationNode logger',\n objpk=calc_2.id,\n objname='node.calculation.job.quantumespresso.pw.',\n message='calculation node 2',\n metadata={\n 'msecs': 719.0849781036377,\n 'objpk': calc_2.id,\n 'lineno': 360,\n 'levelno': 23,\n 'message': 'calculation node 1',\n 'objname': 'node.calculation.job.quantumespresso.pw.',\n }\n )\n # Creating two more log records that don't correspond to a node\n log_5 = DbLog(\n loggername='CalculationNode logger',\n objpk=(calc_2.id + 1000),\n objname='node.calculation.job.quantumespresso.pw.',\n message='calculation node 1000',\n metadata={\n 'msecs': 718,\n 'objpk': (calc_2.id + 1000),\n 'lineno': 361,\n 'levelno': 25,\n 'message': 'calculation node 1000',\n 'objname': 'node.calculation.job.quantumespresso.pw.',\n }\n )\n log_6 = DbLog(\n loggername='CalculationNode logger',\n objpk=(calc_2.id + 1001),\n objname='node.calculation.job.quantumespresso.pw.',\n message='calculation node 10001',\n metadata={\n 'msecs': 722,\n 'objpk': (calc_2.id + 1001),\n 'lineno': 362,\n 'levelno': 24,\n 'message': 'calculation node 1001',\n 'objname': 'node.calculation.job.quantumespresso.pw.',\n }\n )\n\n session.add(log_1)\n session.add(log_2)\n session.add(log_3)\n session.add(log_4)\n session.add(log_5)\n session.add(log_6)\n\n session.commit()\n\n # Storing temporarily information needed for the check at the test\n self.to_check = {}\n\n # Keeping calculation & calculation log ids\n self.to_check['CalculationNode'] = (\n calc_1.id,\n log_1.id,\n calc_2.id,\n log_4.id,\n )\n\n # The columns to project\n cols_to_project = []\n for val in log_migration.values_to_export:\n cols_to_project.append(getattr(DbLog, val))\n\n # Getting the serialized Dict logs\n param_data = session.query(DbLog).filter(DbLog.objpk == param.id\n ).filter(DbLog.objname == 'something.else.'\n ).with_entities(*cols_to_project).one()\n serialized_param_data = dumps_json([param_data._asdict()])\n # Getting the serialized logs for the unknown entity logs (as the export migration fuction\n # provides them) - this should coincide to the above\n serialized_unknown_exp_logs = log_migration.get_serialized_unknown_entity_logs(connection)\n # Getting their number\n unknown_exp_logs_number = log_migration.get_unknown_entity_log_number(connection)\n self.to_check['Dict'] = (serialized_param_data, serialized_unknown_exp_logs, unknown_exp_logs_number)\n\n # Getting the serialized legacy workflow logs\n # yapf: disable\n leg_wf = session.query(DbLog).filter(DbLog.objpk == leg_workf.id).filter(\n DbLog.objname == 'aiida.workflows.user.topologicalworkflows.topo.TopologicalWorkflow'\n ).with_entities(*cols_to_project).one()\n serialized_leg_wf_logs = dumps_json([leg_wf._asdict()])\n # Getting the serialized logs for the legacy workflow logs (as the export migration function\n # provides them) - this should coincide to the above\n serialized_leg_wf_exp_logs = log_migration.get_serialized_legacy_workflow_logs(connection)\n eg_wf_exp_logs_number = log_migration.get_legacy_workflow_log_number(connection)\n self.to_check['WorkflowNode'] = (serialized_leg_wf_logs, serialized_leg_wf_exp_logs,\n eg_wf_exp_logs_number)\n\n # Getting the serialized logs that don't correspond to a DbNode record\n logs_no_node = session.query(DbLog).filter(\n DbLog.id.in_([log_5.id, log_6.id])).with_entities(*cols_to_project)\n logs_no_node_list = [log_no_node._asdict() for log_no_node in logs_no_node]\n serialized_logs_no_node = dumps_json(logs_no_node_list)\n\n # Getting the serialized logs that don't correspond to a node (as the export migration function\n # provides them) - this should coincide to the above\n serialized_logs_exp_no_node = log_migration.get_serialized_logs_with_no_nodes(connection)\n logs_no_node_number = log_migration.get_logs_with_no_nodes_number(connection)\n self.to_check['NoNode'] = (serialized_logs_no_node, serialized_logs_exp_no_node, logs_no_node_number)\n except Exception:\n session.rollback()\n raise\n finally:\n session.close()\n\n def test_dblog_calculation_node(self):\n \"\"\"\n Verify that after the migration there is only two log records left and verify that they corresponds to\n the CalculationNodes.\n \"\"\"\n from sqlalchemy.orm import Session # pylint: disable=import-error,no-name-in-module\n\n DbLog = self.get_auto_base().classes.db_dblog # pylint: disable=invalid-name\n\n with sa.ENGINE.begin() as connection:\n try:\n session = Session(connection.engine)\n\n # Check that only two log records exist\n self.assertEqual(session.query(DbLog).count(), 2, 'There should be two log records left')\n\n # Get the node id of the log record referencing the node and verify that it is the correct one\n dbnode_id_1 = session.query(DbLog).filter(\n DbLog.id == self.to_check['CalculationNode'][1]).with_entities(column('dbnode_id')).one()[0]\n self.assertEqual(dbnode_id_1, self.to_check['CalculationNode'][0], 'The the referenced node is not '\n 'the expected one')\n dbnode_id_2 = session.query(DbLog).filter(\n DbLog.id == self.to_check['CalculationNode'][3]).with_entities(column('dbnode_id')).one()[0]\n self.assertEqual(dbnode_id_2, self.to_check['CalculationNode'][2], 'The the referenced node is not '\n 'the expected one')\n finally:\n session.close()\n\n def test_dblog_correct_export_of_logs(self):\n \"\"\"\n Verify that export log methods for legacy workflows, unknown entities and log records that\n don't correspond to nodes, work as expected\n \"\"\"\n import json\n\n self.assertEqual(self.to_check['Dict'][0], self.to_check['Dict'][1])\n self.assertEqual(self.to_check['Dict'][2], 1)\n\n self.assertEqual(self.to_check['WorkflowNode'][0], self.to_check['WorkflowNode'][1])\n self.assertEqual(self.to_check['WorkflowNode'][2], 1)\n\n self.assertEqual(sorted(list(json.loads(self.to_check['NoNode'][0])), key=lambda k: k['id']),\n sorted(list(json.loads(self.to_check['NoNode'][1])), key=lambda k: k['id']))\n self.assertEqual(self.to_check['NoNode'][2], 2)\n\n def test_metadata_correctness(self):\n \"\"\"\n Verify that the metadata of the remaining records don't have an objpk and objmetadata values.\n \"\"\"\n from sqlalchemy.orm import Session # pylint: disable=import-error,no-name-in-module\n\n DbLog = self.get_auto_base().classes.db_dblog # pylint: disable=invalid-name\n\n with sa.ENGINE.begin() as connection:\n try:\n session = Session(connection.engine)\n metadata = list(session.query(DbLog).with_entities(getattr(DbLog, 'metadata')).all())\n # Verify that the objpk and objname are no longer part of the metadata\n for (m_res,) in metadata:\n self.assertNotIn('objpk', m_res.keys(), 'objpk should not exist any more in metadata')\n self.assertNotIn('objname', m_res.keys(), 'objname should not exist any more in metadata')\n\n finally:\n session.close()\n\nclass TestDbLogMigrationBackward(TestBackwardMigrationsSQLA):\n \"\"\"\n Check that backward migrations work also for the DbLog migration(s).\n \"\"\"\n migrate_from = '041a79fc615f' # 041a79fc615f_dblog_cleaning\n migrate_to = '7ca08c391c49' # e72ad251bcdb_dbgroup_class_change_type_string_values\n\n def setUpBeforeMigration(self):\n # pylint: disable=too-many-locals,too-many-statements\n from sqlalchemy.orm import Session # pylint: disable=import-error,no-name-in-module\n\n DbUser = self.get_auto_base().classes.db_dbuser # pylint: disable=invalid-name\n DbNode = self.get_auto_base().classes.db_dbnode # pylint: disable=invalid-name\n DbLog = self.get_auto_base().classes.db_dblog # pylint: disable=invalid-name\n\n with sa.ENGINE.begin() as connection:\n try:\n session = Session(connection.engine)\n\n user = DbUser(email=f'{self.id()}@aiida.net')\n session.add(user)\n session.commit()\n\n calc_1 = DbNode(type='node.process.calculation.CalculationNode.1', user_id=user.id)\n calc_2 = DbNode(type='node.process.calculation.CalculationNode.2', user_id=user.id)\n\n session.add(calc_1)\n session.add(calc_2)\n session.commit()\n\n log_1 = DbLog(\n loggername='CalculationNode logger',\n dbnode_id=calc_1.id,\n message='calculation node 1',\n metadata={\n 'msecs': 719.0849781036377,\n 'lineno': 350,\n 'thread': 140011612940032,\n 'asctime': '10/21/2018 12:39:51 PM',\n 'created': 1540118391.719085,\n 'levelno': 23,\n 'message': 'calculation node 1',\n })\n log_2 = DbLog(\n loggername='CalculationNode logger',\n dbnode_id=calc_2.id,\n message='calculation node 2',\n metadata={\n 'msecs': 719.0849781036377,\n 'lineno': 360,\n 'levelno': 23,\n 'message': 'calculation node 1',\n })\n\n session.add(log_1)\n session.add(log_2)\n\n session.commit()\n\n # Keeping what is needed to be verified at the test\n self.to_check = {}\n self.to_check[log_1.id] = (log_1.dbnode_id, calc_1.type)\n self.to_check[log_2.id] = (log_2.dbnode_id, calc_2.type)\n except Exception:\n session.rollback()\n raise\n finally:\n session.close()\n\n def test_objpk_objname(self):\n \"\"\"\n This test verifies that the objpk and objname have the right values\n after a forward and a backward migration.\n \"\"\"\n from sqlalchemy.orm import Session # pylint: disable=import-error,no-name-in-module\n\n DbLog = self.get_auto_base().classes.db_dblog # pylint: disable=invalid-name\n\n with sa.ENGINE.begin() as connection:\n try:\n session = Session(connection.engine)\n\n for log_pk, to_check_value in self.to_check.items():\n log_entry = session.query(DbLog).filter(DbLog.id == log_pk).one()\n log_dbnode_id, node_type = to_check_value\n\n self.assertEqual(\n log_dbnode_id, log_entry.objpk,\n 'The dbnode_id ({}) of the 0024 schema version should be identical to the objpk ({}) of '\n 'the 0023 schema version.'.format(log_dbnode_id, log_entry.objpk))\n self.assertEqual(\n node_type, log_entry.objname,\n 'The type ({}) of the linked node of the 0024 schema version should be identical to the '\n 'objname ({}) of the 0023 schema version.'.format(node_type, log_entry.objname))\n self.assertEqual(\n log_dbnode_id, log_entry.metadata['objpk'],\n 'The dbnode_id ({}) of the 0024 schema version should be identical to the objpk ({}) of '\n 'the 0023 schema version stored in the metadata.'.format(log_dbnode_id,\n log_entry.metadata['objpk']))\n self.assertEqual(\n node_type, log_entry.metadata['objname'],\n 'The type ({}) of the linked node of the 0024 schema version should be identical to the '\n 'objname ({}) of the 0023 schema version stored in the metadata.'.format(\n node_type, log_entry.metadata['objname']))\n finally:\n session.close()\n\nclass TestDbLogUUIDAddition(TestMigrationsSQLA):\n \"\"\"\n Test that the UUID column is correctly added to the DbLog table and that the uniqueness\n constraint is added without problems (if the migration arrives until 375c2db70663 then the\n constraint is added properly).\n \"\"\"\n\n migrate_from = '041a79fc615f' # 041a79fc615f_dblog_cleaning\n migrate_to = '375c2db70663' # 375c2db70663_dblog_uuid_uniqueness_constraint\n\n def setUpBeforeMigration(self):\n from sqlalchemy.orm import Session # pylint: disable=import-error,no-name-in-module\n\n DbUser = self.get_auto_base().classes.db_dbuser # pylint: disable=invalid-name\n DbNode = self.get_auto_base().classes.db_dbnode # pylint: disable=invalid-name\n DbLog = self.get_auto_base().classes.db_dblog # pylint: disable=invalid-name\n\n with sa.ENGINE.begin() as connection:\n try:\n session = Session(connection.engine)\n\n user = DbUser(email=f'{self.id()}@aiida.net')\n session.add(user)\n session.commit()\n\n calc_1 = DbNode(type='node.process.calculation.CalculationNode.', user_id=user.id)\n calc_2 = DbNode(type='node.process.calculation.CalculationNode.', user_id=user.id)\n\n session.add(calc_1)\n session.add(calc_2)\n session.commit()\n\n log_1 = DbLog(loggername='CalculationNode logger', dbnode_id=calc_1.id, message='calculation node 1')\n log_2 = DbLog(loggername='CalculationNode logger', dbnode_id=calc_2.id, message='calculation node 2')\n\n session.add(log_1)\n session.add(log_2)\n\n session.commit()\n except Exception:\n session.rollback()\n raise\n finally:\n session.close()\n\n def test_dblog_unique_uuids(self):\n \"\"\"\n Verify that the UUIDs of the log records are unique\n \"\"\"\n from sqlalchemy.orm import Session # pylint: disable=import-error,no-name-in-module\n\n DbLog = self.get_auto_base().classes.db_dblog # pylint: disable=invalid-name\n\n with sa.ENGINE.begin() as connection:\n try:\n session = Session(connection.engine)\n l_uuids = list(session.query(DbLog).with_entities(getattr(DbLog, 'uuid')).all())\n s_uuids = set(l_uuids)\n self.assertEqual(len(l_uuids), len(s_uuids), 'The UUIDs are not all unique.')\n finally:\n session.close()\n\nclass TestDataMoveWithinNodeMigration(TestMigrationsSQLA):\n \"\"\"Test the migration of Data nodes after the data module was moved within the node moduel.\"\"\"\n\n migrate_from = '041a79fc615f' # 041a79fc615f_dblog_update\n migrate_to = '6a5c2ea1439d' # 6a5c2ea1439d_move_data_within_node_module\n\n def setUpBeforeMigration(self):\n from sqlalchemy.orm import Session # pylint: disable=import-error,no-name-in-module\n\n DbNode = self.get_auto_base().classes.db_dbnode # pylint: disable=invalid-name\n DbUser = self.get_auto_base().classes.db_dbuser # pylint: disable=invalid-name\n\n with sa.ENGINE.begin() as connection:\n try:\n session = Session(connection.engine)\n\n user = DbUser(email=f'{self.id()}@aiida.net')\n session.add(user)\n session.commit()\n\n node_calc = DbNode(type='node.process.calculation.calcjob.CalcJobNode.', user_id=user.id)\n node_data = DbNode(type='data.core.int.Int.', user_id=user.id)\n\n session.add(node_data)\n session.add(node_calc)\n session.commit()\n\n self.node_calc_id = node_calc.id\n self.node_data_id = node_data.id\n except Exception:\n session.rollback()\n raise\n finally:\n session.close()\n\n def test_data_node_type_string(self):\n \"\"\"Verify that type string of the Data node was successfully adapted.\"\"\"\n from sqlalchemy.orm import Session # pylint: disable=import-error,no-name-in-module\n\n DbNode = self.get_auto_base().classes.db_dbnode # pylint: disable=invalid-name\n\n with sa.ENGINE.begin() as connection:\n try:\n session = Session(connection.engine)\n\n # The data node should have been touched and migrated\n node_data = session.query(DbNode).filter(DbNode.id == self.node_data_id).one()\n self.assertEqual(node_data.type, 'node.data.core.int.Int.')\n\n # The calc node by contrast should not have been changed\n node_calc = session.query(DbNode).filter(DbNode.id == self.node_calc_id).one()\n self.assertEqual(node_calc.type, 'node.process.calculation.calcjob.CalcJobNode.')\n finally:\n session.close()\n\n\nclass TestTrajectoryDataMigration(TestMigrationsSQLA):\n \"\"\"Test the migration of the symbols from numpy array to attribute for TrajectoryData nodes.\"\"\"\n import numpy\n\n migrate_from = '37f3d4882837' # 37f3d4882837_make_all_uuid_columns_unique\n migrate_to = 'ce56d84bcc35' # ce56d84bcc35_delete_trajectory_symbols_array\n\n stepids = numpy.array([60, 70])\n times = stepids * 0.01\n positions = numpy.array(\n [[[0., 0., 0.], [0.5, 0.5, 0.5], [1.5, 1.5, 1.5]], [[0., 0., 0.], [0.5, 0.5, 0.5], [1.5, 1.5, 1.5]]])\n velocities = numpy.array(\n [[[0., 0., 0.], [0., 0., 0.], [0., 0., 0.]], [[0.5, 0.5, 0.5], [0.5, 0.5, 0.5], [-0.5, -0.5, -0.5]]])\n cells = numpy.array([[[2., 0., 0.], [0., 2., 0.], [0., 0., 2.]], [[3., 0., 0.], [0., 3., 0.], [0., 0., 3.]]])\n\n def setUpBeforeMigration(self):\n import numpy\n from sqlalchemy.orm import Session # pylint: disable=import-error,no-name-in-module\n\n DbNode = self.get_auto_base().classes.db_dbnode # pylint: disable=invalid-name\n DbUser = self.get_auto_base().classes.db_dbuser # pylint: disable=invalid-name\n\n with sa.ENGINE.begin() as connection:\n try:\n session = Session(connection.engine)\n\n user = DbUser(email=f'{self.id()}@aiida.net')\n session.add(user)\n session.commit()\n\n node = DbNode(type='node.data.array.trajectory.TrajectoryData.', user_id=user.id)\n session.add(node)\n session.commit()\n\n symbols = numpy.array(['H', 'O', 'C'])\n\n self.set_node_array(node, 'steps', self.stepids)\n self.set_node_array(node, 'cells', self.cells)\n self.set_node_array(node, 'symbols', symbols)\n self.set_node_array(node, 'positions', self.positions)\n self.set_node_array(node, 'times', self.times)\n self.set_node_array(node, 'velocities', self.velocities)\n session.commit()\n\n self.node_uuid = node.uuid\n finally:\n session.close()\n\n def test_trajectory_symbols(self):\n \"\"\"Verify that migration of symbols from repository array to attribute works properly.\"\"\"\n from sqlalchemy.orm import Session # pylint: disable=import-error,no-name-in-module\n\n DbNode = self.get_auto_base().classes.db_dbnode # pylint: disable=invalid-name\n\n with sa.ENGINE.begin() as connection:\n try:\n session = Session(connection.engine)\n\n node = session.query(DbNode).filter(DbNode.uuid == self.node_uuid).one()\n\n self.assertSequenceEqual(node.attributes['symbols'], ['H', 'O', 'C'])\n self.assertSequenceEqual(self.get_node_array(node, 'velocities').tolist(), self.velocities.tolist())\n self.assertSequenceEqual(self.get_node_array(node, 'positions').tolist(), self.positions.tolist())\n with self.assertRaises(IOError):\n self.get_node_array(node, 'symbols')\n\n finally:\n session.close()\n\n\nclass TestNodePrefixRemovalMigration(TestMigrationsSQLA):\n \"\"\"Test the migration of Data nodes after the data module was moved within the node moduel.\"\"\"\n\n migrate_from = 'ce56d84bcc35' # ce56d84bcc35_delete_trajectory_symbols_array\n migrate_to = '61fc0913fae9' # 61fc0913fae9_remove_node_prefix\n\n def setUpBeforeMigration(self):\n from sqlalchemy.orm import Session # pylint: disable=import-error,no-name-in-module\n\n DbNode = self.get_auto_base().classes.db_dbnode # pylint: disable=invalid-name\n DbUser = self.get_auto_base().classes.db_dbuser # pylint: disable=invalid-name\n\n with sa.ENGINE.begin() as connection:\n try:\n session = Session(connection.engine)\n\n user = DbUser(email=f'{self.id()}@aiida.net')\n session.add(user)\n session.commit()\n\n node_calc = DbNode(type='node.process.calculation.calcjob.CalcJobNode.', user_id=user.id)\n node_data = DbNode(type='node.data.int.Int.', user_id=user.id)\n\n session.add(node_data)\n session.add(node_calc)\n session.commit()\n\n self.node_calc_id = node_calc.id\n self.node_data_id = node_data.id\n except Exception:\n session.rollback()\n raise\n finally:\n session.close()\n\n def test_data_node_type_string(self):\n \"\"\"Verify that type string of the Data node was successfully adapted.\"\"\"\n from sqlalchemy.orm import Session # pylint: disable=import-error,no-name-in-module\n\n DbNode = self.get_auto_base().classes.db_dbnode # pylint: disable=invalid-name\n\n with sa.ENGINE.begin() as connection:\n try:\n session = Session(connection.engine)\n\n # Verify that the `node.` prefix has been dropped from both the data as well as the process node\n node_data = session.query(DbNode).filter(DbNode.id == self.node_data_id).one()\n self.assertEqual(node_data.type, 'data.int.Int.')\n\n node_calc = session.query(DbNode).filter(DbNode.id == self.node_calc_id).one()\n self.assertEqual(node_calc.type, 'process.calculation.calcjob.CalcJobNode.')\n finally:\n session.close()\n\n\nclass TestParameterDataToDictMigration(TestMigrationsSQLA):\n \"\"\"Test the data migration after `ParameterData` was renamed to `Dict`.\"\"\"\n\n migrate_from = '61fc0913fae9' # 61fc0913fae9_remove_node_prefix\n migrate_to = 'd254fdfed416' # d254fdfed416_rename_parameter_data_to_dict\n\n def setUpBeforeMigration(self):\n from sqlalchemy.orm import Session # pylint: disable=import-error,no-name-in-module\n\n DbNode = self.get_auto_base().classes.db_dbnode # pylint: disable=invalid-name\n DbUser = self.get_auto_base().classes.db_dbuser # pylint: disable=invalid-name\n\n with sa.ENGINE.begin() as connection:\n try:\n session = Session(connection.engine)\n\n user = DbUser(email=f'{self.id()}@aiida.net')\n session.add(user)\n session.commit()\n\n node = DbNode(type='data.parameter.ParameterData.', user_id=user.id)\n\n session.add(node)\n session.commit()\n\n self.node_id = node.id\n except Exception:\n session.rollback()\n raise\n finally:\n session.close()\n\n def test_type_string(self):\n \"\"\"Verify that type string of the Data node was successfully adapted.\"\"\"\n from sqlalchemy.orm import Session # pylint: disable=import-error,no-name-in-module\n\n DbNode = self.get_auto_base().classes.db_dbnode # pylint: disable=invalid-name\n\n with sa.ENGINE.begin() as connection:\n try:\n session = Session(connection.engine)\n\n node = session.query(DbNode).filter(DbNode.id == self.node_id).one()\n self.assertEqual(node.type, 'data.dict.Dict.')\n finally:\n session.close()\n\n\nclass TestLegacyJobCalcStateDataMigration(TestMigrationsSQLA):\n \"\"\"Test the migration that performs a data migration of legacy `JobCalcState`.\"\"\"\n\n migrate_from = '07fac78e6209'\n migrate_to = '26d561acd560'\n\n def setUpBeforeMigration(self):\n from sqlalchemy.orm import Session # pylint: disable=import-error,no-name-in-module\n\n from aiida.backends.general.migrations.calc_state import STATE_MAPPING\n\n self.state_mapping = STATE_MAPPING\n self.nodes = {}\n\n DbNode = self.get_auto_base().classes.db_dbnode # pylint: disable=invalid-name\n DbUser = self.get_auto_base().classes.db_dbuser # pylint: disable=invalid-name\n\n with sa.ENGINE.begin() as connection:\n try:\n session = Session(connection.engine)\n\n user = DbUser(email=f'{self.id()}@aiida.net')\n session.add(user)\n session.commit()\n\n for state in self.state_mapping:\n node = DbNode(\n node_type='process.calculation.calcjob.CalcJobNode.',\n user_id=user.id,\n attributes={'state': state}\n )\n session.add(node)\n session.commit()\n\n self.nodes[state] = node.id\n except Exception:\n session.rollback()\n raise\n finally:\n session.close()\n\n def test_data_migrated(self):\n \"\"\"Verify that the `process_state`, `process_status` and `exit_status` are set correctly.\"\"\"\n from sqlalchemy.orm import Session # pylint: disable=import-error,no-name-in-module\n\n DbNode = self.get_auto_base().classes.db_dbnode # pylint: disable=invalid-name\n\n with sa.ENGINE.begin() as connection:\n try:\n session = Session(connection.engine)\n for state, pk in self.nodes.items():\n node = session.query(DbNode).filter(DbNode.id == pk).one()\n attrs = node.attributes\n self.assertEqual(attrs.get('process_state', None), self.state_mapping[state].process_state)\n self.assertEqual(attrs.get('process_status', None), self.state_mapping[state].process_status)\n self.assertEqual(attrs.get('exit_status', None), self.state_mapping[state].exit_status)\n self.assertEqual(attrs.get('process_label'), 'Legacy JobCalculation')\n self.assertIsNone(attrs.get('state', None)) # The old state should have been removed\n\n exit_status = attrs.get('exit_status', None)\n if exit_status is not None:\n self.assertIsInstance(exit_status, int)\n finally:\n session.close()\n\n\nclass TestResetHash(TestMigrationsSQLA):\n \"\"\"Test the migration that resets the node hash.\"\"\"\n\n migrate_from = '26d561acd560'\n migrate_to = 'e797afa09270'\n\n def setUpBeforeMigration(self):\n from sqlalchemy.orm import Session # pylint: disable=import-error,no-name-in-module\n\n from aiida.backends.general.migrations.calc_state import STATE_MAPPING\n\n self.state_mapping = STATE_MAPPING\n self.nodes = {}\n\n DbNode = self.get_auto_base().classes.db_dbnode # pylint: disable=invalid-name\n DbUser = self.get_auto_base().classes.db_dbuser # pylint: disable=invalid-name\n\n with sa.ENGINE.begin() as connection:\n try:\n session = Session(connection.engine)\n\n user = DbUser(email=f'{self.id()}@aiida.net')\n session.add(user)\n session.commit()\n\n node = DbNode(\n node_type='process.calculation.calcjob.CalcJobNode.',\n user_id=user.id,\n extras={'something': 123, '_aiida_hash': 'abcd'}\n )\n session.add(node)\n session.commit()\n\n self.node_id = node.id\n except Exception:\n session.rollback()\n raise\n finally:\n session.close()\n\n def test_data_migrated(self):\n \"\"\"Verify that only the _aiida_hash extra has been removed.\"\"\"\n from sqlalchemy.orm import Session # pylint: disable=import-error,no-name-in-module\n\n DbNode = self.get_auto_base().classes.db_dbnode # pylint: disable=invalid-name\n\n with sa.ENGINE.begin() as connection:\n try:\n session = Session(connection.engine)\n node = session.query(DbNode).filter(DbNode.id == self.node_id).one()\n extras = node.extras\n self.assertEqual(extras.get('something'), 123) # Other extras should be untouched\n self.assertNotIn('_aiida_hash', extras) # The hash extra should have been removed\n finally:\n session.close()\n\n\nclass TestLegacyProcessAttributeMigration(TestMigrationsSQLA):\n \"\"\"Test the migration that performs a data migration of legacy process attributes.\"\"\"\n\n migrate_from = 'e797afa09270'\n migrate_to = 'e734dd5e50d7'\n\n def setUpBeforeMigration(self):\n from sqlalchemy.orm import Session # pylint: disable=import-error,no-name-in-module\n\n DbNode = self.get_auto_base().classes.db_dbnode # pylint: disable=invalid-name\n DbUser = self.get_auto_base().classes.db_dbuser # pylint: disable=invalid-name\n\n with sa.ENGINE.begin() as connection:\n try:\n session = Session(connection.engine)\n\n user = DbUser(email=f'{self.id()}@aiida.net')\n session.add(user)\n session.commit()\n\n node_process = DbNode(\n node_type='process.calculation.calcjob.CalcJobNode.',\n user_id=user.id,\n attributes={\n '_sealed': True,\n '_finished': True,\n '_failed': False,\n '_aborted': False,\n '_do_abort': False,\n })\n\n # This is an \"active\" modern process, due to its `process_state` and should *not* receive the\n # `sealed` attribute\n node_process_active = DbNode(\n node_type='process.calculation.calcjob.CalcJobNode.',\n user_id=user.id,\n attributes={\n 'process_state': 'created',\n '_finished': True,\n '_failed': False,\n '_aborted': False,\n '_do_abort': False,\n })\n\n # Note that `Data` nodes should not have these attributes in real databases but the migration explicitly\n # excludes data nodes, which is what this test is verifying, by checking they are not deleted\n node_data = DbNode(\n node_type='data.core.dict.Dict.',\n user_id=user.id,\n attributes={\n '_sealed': True,\n '_finished': True,\n '_failed': False,\n '_aborted': False,\n '_do_abort': False,\n })\n\n session.add(node_process)\n session.add(node_process_active)\n session.add(node_data)\n session.commit()\n\n self.node_process_id = node_process.id\n self.node_process_active_id = node_process_active.id\n self.node_data_id = node_data.id\n except Exception:\n session.rollback()\n raise\n finally:\n session.close()\n\n def test_data_migrated(self):\n \"\"\"Verify that the attributes for process node have been deleted and `_sealed` has been changed to `sealed`.\"\"\"\n from sqlalchemy.orm import Session # pylint: disable=import-error,no-name-in-module\n\n DbNode = self.get_auto_base().classes.db_dbnode # pylint: disable=invalid-name\n\n with sa.ENGINE.begin() as connection:\n try:\n session = Session(connection.engine)\n deleted_keys = ['_sealed', '_finished', '_failed', '_aborted', '_do_abort']\n\n node_process = session.query(DbNode).filter(DbNode.id == self.node_process_id).one()\n self.assertEqual(node_process.attributes['sealed'], True)\n for key in deleted_keys:\n self.assertNotIn(key, node_process.attributes)\n\n node_process_active = session.query(DbNode).filter(DbNode.id == self.node_process_active_id).one()\n self.assertNotIn('sealed', node_process_active.attributes)\n for key in deleted_keys:\n self.assertNotIn(key, node_process_active.attributes)\n\n node_data = session.query(DbNode).filter(DbNode.id == self.node_data_id).one()\n self.assertEqual(node_data.attributes.get('sealed', None), None)\n for key in deleted_keys:\n self.assertIn(key, node_data.attributes)\n\n finally:\n session.close()\n\n\nclass TestSealUnsealedProcessesMigration(TestMigrationsSQLA):\n \"\"\"Test the migration that performs a data migration of legacy process attributes.\"\"\"\n\n migrate_from = 'e734dd5e50d7'\n migrate_to = '7b38a9e783e7'\n\n def setUpBeforeMigration(self):\n from sqlalchemy.orm import Session # pylint: disable=import-error,no-name-in-module\n\n DbNode = self.get_auto_base().classes.db_dbnode # pylint: disable=invalid-name\n DbUser = self.get_auto_base().classes.db_dbuser # pylint: disable=invalid-name\n\n with sa.ENGINE.begin() as connection:\n try:\n session = Session(connection.engine)\n\n user = DbUser(email=f'{self.id()}@aiida.net')\n session.add(user)\n session.commit()\n\n node_process = DbNode(\n node_type='process.calculation.calcjob.CalcJobNode.',\n user_id=user.id,\n attributes={\n 'process_state': 'finished',\n 'sealed': True,\n })\n\n # This is an \"active\" modern process, due to its `process_state` and should *not* receive the\n # `sealed` attribute\n node_process_active = DbNode(\n node_type='process.calculation.calcjob.CalcJobNode.',\n user_id=user.id,\n attributes={\n 'process_state': 'created',\n })\n\n # This is a legacy process that does not even have a `process_state`\n node_process_legacy = DbNode(\n node_type='process.calculation.calcfunction.CalcFunctionNode.',\n user_id=user.id,\n attributes={}\n )\n\n # Note that `Data` nodes should not have these attributes in real databases but the migration explicitly\n # excludes data nodes, which is what this test is verifying, by checking they are not deleted\n node_data = DbNode(\n node_type='data.core.dict.Dict.',\n user_id=user.id,\n attributes={}\n )\n\n session.add(node_process)\n session.add(node_process_active)\n session.add(node_process_legacy)\n session.add(node_data)\n session.commit()\n\n self.node_process_id = node_process.id\n self.node_process_active_id = node_process_active.id\n self.node_process_legacy_id = node_process_legacy.id\n self.node_data_id = node_data.id\n except Exception:\n session.rollback()\n raise\n finally:\n session.close()\n\n def test_data_migrated(self):\n \"\"\"Verify that the attributes for process node have been deleted and `_sealed` has been changed to `sealed`.\"\"\"\n from sqlalchemy.orm import Session # pylint: disable=import-error,no-name-in-module\n\n DbNode = self.get_auto_base().classes.db_dbnode # pylint: disable=invalid-name\n\n with sa.ENGINE.begin() as connection:\n try:\n session = Session(connection.engine)\n\n node_process = session.query(DbNode).filter(DbNode.id == self.node_process_id).one()\n self.assertEqual(node_process.attributes['sealed'], True)\n\n node_process_active = session.query(DbNode).filter(DbNode.id == self.node_process_active_id).one()\n self.assertNotIn('sealed', node_process_active.attributes)\n\n node_process_legacy = session.query(DbNode).filter(DbNode.id == self.node_process_legacy_id).one()\n self.assertEqual(node_process_legacy.attributes['sealed'], True)\n\n node_data = session.query(DbNode).filter(DbNode.id == self.node_data_id).one()\n self.assertNotIn('sealed', node_data.attributes)\n\n finally:\n session.close()\n\n\nclass TestDefaultLinkLabelMigration(TestMigrationsSQLA):\n \"\"\"Test the migration that performs a data migration of legacy default link labels.\"\"\"\n\n migrate_from = '91b573400be5'\n migrate_to = '118349c10896'\n\n def setUpBeforeMigration(self):\n from sqlalchemy.orm import Session # pylint: disable=import-error,no-name-in-module\n\n DbLink = self.get_auto_base().classes.db_dblink # pylint: disable=invalid-name\n DbNode = self.get_auto_base().classes.db_dbnode # pylint: disable=invalid-name\n DbUser = self.get_auto_base().classes.db_dbuser # pylint: disable=invalid-name\n\n with sa.ENGINE.begin() as connection:\n try:\n session = Session(connection.engine)\n\n user = DbUser(email=f'{self.id()}@aiida.net')\n session.add(user)\n session.commit()\n\n node_process = DbNode(node_type='process.calculation.calcjob.CalcJobNode.', user_id=user.id)\n node_data = DbNode(node_type='data.core.dict.Dict.', user_id=user.id)\n link = DbLink(input_id=node_data.id, output_id=node_process.id, type='input', label='_return')\n\n session.add(node_process)\n session.add(node_data)\n session.add(link)\n session.commit()\n\n self.node_process_id = node_process.id\n self.node_data_id = node_data.id\n self.link_id = link.id\n except Exception:\n session.rollback()\n raise\n finally:\n session.close()\n\n def test_data_migrated(self):\n \"\"\"Verify that the attributes for process node have been deleted and `_sealed` has been changed to `sealed`.\"\"\"\n from sqlalchemy.orm import Session # pylint: disable=import-error,no-name-in-module\n\n DbLink = self.get_auto_base().classes.db_dblink # pylint: disable=invalid-name\n\n with sa.ENGINE.begin() as connection:\n try:\n session = Session(connection.engine)\n link = session.query(DbLink).filter(DbLink.id == self.link_id).one()\n self.assertEqual(link.label, 'result')\n\n finally:\n session.close()\n\n\nclass TestGroupTypeStringMigration(TestMigrationsSQLA):\n \"\"\"Test the migration that renames the DbGroup type strings.\"\"\"\n\n migrate_from = '118349c10896' # 118349c10896_default_link_label.py\n migrate_to = 'bf591f31dd12' # bf591f31dd12_dbgroup_type_string.py\n\n def setUpBeforeMigration(self):\n \"\"\"Create the DbGroups with the old type strings.\"\"\"\n DbGroup = self.get_current_table('db_dbgroup') # pylint: disable=invalid-name\n DbUser = self.get_current_table('db_dbuser') # pylint: disable=invalid-name\n\n with self.get_session() as session:\n try:\n default_user = DbUser(email=f'{self.id()}@aiida.net')\n session.add(default_user)\n session.commit()\n\n # test user group type_string: 'user' -> 'core'\n group_user = DbGroup(label='01', user_id=default_user.id, type_string='user')\n session.add(group_user)\n # test upf group type_string: 'data.upf' -> 'core.upf'\n group_data_upf = DbGroup(label='02', user_id=default_user.id, type_string='data.upf')\n session.add(group_data_upf)\n # test auto.import group type_string: 'auto.import' -> 'core.import'\n group_autoimport = DbGroup(label='03', user_id=default_user.id, type_string='auto.import')\n session.add(group_autoimport)\n # test auto.run group type_string: 'auto.run' -> 'core.auto'\n group_autorun = DbGroup(label='04', user_id=default_user.id, type_string='auto.run')\n session.add(group_autorun)\n\n session.commit()\n\n # Store values for later tests\n self.group_user_pk = group_user.id\n self.group_data_upf_pk = group_data_upf.id\n self.group_autoimport_pk = group_autoimport.id\n self.group_autorun_pk = group_autorun.id\n\n finally:\n session.close()\n\n def test_group_string_update(self):\n \"\"\"Test that the type strings are properly migrated.\"\"\"\n DbGroup = self.get_current_table('db_dbgroup') # pylint: disable=invalid-name\n\n with self.get_session() as session:\n try:\n # test user group type_string: 'user' -> 'core'\n group_user = session.query(DbGroup).filter(DbGroup.id == self.group_user_pk).one()\n self.assertEqual(group_user.type_string, 'core')\n\n # test upf group type_string: 'data.upf' -> 'core.upf'\n group_data_upf = session.query(DbGroup).filter(DbGroup.id == self.group_data_upf_pk).one()\n self.assertEqual(group_data_upf.type_string, 'core.upf')\n\n # test auto.import group type_string: 'auto.import' -> 'core.import'\n group_autoimport = session.query(DbGroup).filter(DbGroup.id == self.group_autoimport_pk).one()\n self.assertEqual(group_autoimport.type_string, 'core.import')\n\n # test auto.run group type_string: 'auto.run' -> 'core.auto'\n group_autorun = session.query(DbGroup).filter(DbGroup.id == self.group_autorun_pk).one()\n self.assertEqual(group_autorun.type_string, 'core.auto')\n finally:\n session.close()\n\n\nclass TestGroupExtrasMigration(TestMigrationsSQLA):\n \"\"\"Test migration to add the `extras` JSONB column to the `DbGroup` model.\"\"\"\n\n migrate_from = 'bf591f31dd12' # bf591f31dd12_dbgroup_type_string.py\n migrate_to = '0edcdd5a30f0' # 0edcdd5a30f0_dbgroup_extras.py\n\n def setUpBeforeMigration(self):\n \"\"\"Create a DbGroup.\"\"\"\n DbGroup = self.get_current_table('db_dbgroup') # pylint: disable=invalid-name\n DbUser = self.get_current_table('db_dbuser') # pylint: disable=invalid-name\n\n with self.get_session() as session:\n try:\n default_user = DbUser(email=f'{self.id()}@aiida.net')\n session.add(default_user)\n session.commit()\n\n group = DbGroup(label='01', user_id=default_user.id, type_string='user')\n session.add(group)\n session.commit()\n\n # Store values for later tests\n self.group_pk = group.id\n\n finally:\n session.close()\n\n def test_group_string_update(self):\n \"\"\"Test that the model now has an extras column with empty dictionary as default.\"\"\"\n DbGroup = self.get_current_table('db_dbgroup') # pylint: disable=invalid-name\n\n with self.get_session() as session:\n try:\n group = session.query(DbGroup).filter(DbGroup.id == self.group_pk).one()\n self.assertEqual(group.extras, {})\n finally:\n session.close()\n\n\nclass TestNodeRepositoryMetadataMigration(TestMigrationsSQLA):\n \"\"\"Test migration adding the `repository_metadata` column to the `Node` model.\"\"\"\n\n migrate_from = '0edcdd5a30f0' # 0edcdd5a30f0_dbgroup_extras.py\n migrate_to = '7536a82b2cc4' # 7536a82b2cc4_add_node_repository_metadata.py\n\n def setUpBeforeMigration(self):\n \"\"\"Create a single node before migration.\"\"\"\n DbNode = self.get_current_table('db_dbnode') # pylint: disable=invalid-name\n DbUser = self.get_current_table('db_dbuser') # pylint: disable=invalid-name\n\n with self.get_session() as session:\n try:\n default_user = DbUser(email=f'{self.id()}@aiida.net')\n session.add(default_user)\n session.commit()\n\n node = DbNode(user_id=default_user.id)\n session.add(node)\n session.commit()\n\n self.node_id = node.id\n\n finally:\n session.close()\n\n def test_add_node_repository_metadata(self):\n \"\"\"Test that the column is added and null by default.\"\"\"\n DbNode = self.get_current_table('db_dbnode') # pylint: disable=invalid-name\n\n with self.get_session() as session:\n try:\n node = session.query(DbNode).filter(DbNode.id == self.node_id).one()\n assert hasattr(node, 'repository_metadata')\n assert node.repository_metadata == {}\n finally:\n session.close()\n\n\nclass TestRepositoryMigration(TestMigrationsSQLA):\n \"\"\"Test migration of the old file repository to the disk object store.\"\"\"\n\n migrate_from = '7536a82b2cc4'\n migrate_to = '1feaea71bd5a'\n\n def setUpBeforeMigration(self):\n from aiida.common.utils import get_new_uuid\n\n DbNode = self.get_current_table('db_dbnode') # pylint: disable=invalid-name\n DbUser = self.get_current_table('db_dbuser') # pylint: disable=invalid-name\n\n with self.get_session() as session:\n try:\n default_user = DbUser(email=f'{self.id()}@aiida.net')\n session.add(default_user)\n session.commit()\n\n # For some reasons, the UUIDs do not get created automatically through the column's default in the\n # migrations so we set it manually using the same method.\n node_01 = DbNode(user_id=default_user.id, uuid=get_new_uuid())\n node_02 = DbNode(user_id=default_user.id, uuid=get_new_uuid())\n node_03 = DbNode(user_id=default_user.id, uuid=get_new_uuid())\n node_04 = DbNode(user_id=default_user.id, uuid=get_new_uuid())\n node_05 = DbNode(user_id=default_user.id, uuid=get_new_uuid())\n\n session.add(node_01)\n session.add(node_02)\n session.add(node_03) # Empty repository folder\n session.add(node_04) # Both `path` and `raw_input` subfolder\n session.add(node_05) # Both `path` and `raw_input` subfolder & `.gitignore` in `path`\n session.commit()\n\n assert node_01.uuid is not None\n assert node_02.uuid is not None\n assert node_03.uuid is not None\n assert node_04.uuid is not None\n assert node_05.uuid is not None\n\n self.node_01_pk = node_01.id\n self.node_02_pk = node_02.id\n self.node_03_pk = node_03.id\n self.node_04_pk = node_04.id\n self.node_05_pk = node_05.id\n\n utils.put_object_from_string(node_01.uuid, 'sub/path/file_b.txt', 'b')\n utils.put_object_from_string(node_01.uuid, 'sub/file_a.txt', 'a')\n utils.put_object_from_string(node_02.uuid, 'output.txt', 'output')\n\n os.makedirs(utils.get_node_repository_sub_folder(node_04.uuid, 'path'), exist_ok=True)\n os.makedirs(utils.get_node_repository_sub_folder(node_04.uuid, 'raw_input'), exist_ok=True)\n os.makedirs(utils.get_node_repository_sub_folder(node_05.uuid, 'path'), exist_ok=True)\n os.makedirs(utils.get_node_repository_sub_folder(node_05.uuid, 'raw_input'), exist_ok=True)\n\n utils.put_object_from_string(node_05.uuid, '.gitignore', 'test')\n with open(\n os.path.join(\n utils.get_node_repository_sub_folder(node_05.uuid, 'raw_input'), 'input.txt'),\n 'w',\n encoding='utf-8',\n ) as handle:\n handle.write('input')\n\n # Add a repository folder for a node that no longer exists - i.e. it may have been deleted.\n utils.put_object_from_string(get_new_uuid(), 'file_of_deleted_node', 'output')\n\n finally:\n session.close()\n\n def test_migration(self):\n \"\"\"Test that the files are correctly migrated.\"\"\"\n import hashlib\n DbNode = self.get_current_table('db_dbnode') # pylint: disable=invalid-name\n DbSetting = self.get_current_table('db_dbsetting') # pylint: disable=invalid-name\n\n repository_uuid_key = 'repository|uuid'\n\n with self.get_session() as session:\n try:\n node_01 = session.query(DbNode).filter(DbNode.id == self.node_01_pk).one()\n node_02 = session.query(DbNode).filter(DbNode.id == self.node_02_pk).one()\n node_03 = session.query(DbNode).filter(DbNode.id == self.node_03_pk).one()\n node_05 = session.query(DbNode).filter(DbNode.id == self.node_05_pk).one()\n\n assert node_01.repository_metadata == {\n 'o': {\n 'sub': {\n 'o': {\n 'path': {\n 'o': {\n 'file_b.txt': {\n 'k': hashlib.sha256('b'.encode('utf-8')).hexdigest()\n }\n }\n },\n 'file_a.txt': {\n 'k': hashlib.sha256('a'.encode('utf-8')).hexdigest()\n }\n }\n }\n }\n }\n assert node_02.repository_metadata == {\n 'o': {\n 'output.txt': {\n 'k': hashlib.sha256('output'.encode('utf-8')).hexdigest()\n }\n }\n }\n assert node_03.repository_metadata == {}\n assert node_05.repository_metadata == {\n 'o': {\n 'input.txt': {\n 'k': hashlib.sha256('input'.encode('utf-8')).hexdigest()\n }\n }\n }\n\n for hashkey, content in (\n (node_01.repository_metadata['o']['sub']['o']['path']['o']['file_b.txt']['k'], b'b'),\n (node_01.repository_metadata['o']['sub']['o']['file_a.txt']['k'], b'a'),\n (node_02.repository_metadata['o']['output.txt']['k'], b'output'),\n (node_05.repository_metadata['o']['input.txt']['k'], b'input'),\n ):\n assert utils.get_repository_object(hashkey) == content\n\n repository_uuid = session.query(DbSetting).filter(DbSetting.key == repository_uuid_key).one()\n assert repository_uuid is not None\n assert isinstance(repository_uuid.val, str)\n finally:\n session.close()\n\n\nclass TestComputerNameToLabelMigration(TestMigrationsSQLA):\n \"\"\"Test the renaming of `name` to `label` for `DbComputer.\"\"\"\n\n migrate_from = '1feaea71bd5a' # 1feaea71bd5a_migrate_repository\n migrate_to = '535039300e4a' # 5ddd24e52864_dbnode_type_to_dbnode_node_type\n\n def setUpBeforeMigration(self):\n from sqlalchemy.orm import Session # pylint: disable=import-error,no-name-in-module\n\n DbComputer = self.get_auto_base().classes.db_dbcomputer # pylint: disable=invalid-name\n\n with sa.ENGINE.begin() as connection:\n try:\n session = Session(connection.engine)\n\n computer = DbComputer(name='testing')\n\n session.add(computer)\n session.commit()\n\n self.computer_id = computer.id\n except Exception:\n session.rollback()\n raise\n finally:\n session.close()\n\n def test_migration(self):\n \"\"\"Verify that the column was successfully renamed.\"\"\"\n from sqlalchemy.orm import Session # pylint: disable=import-error,no-name-in-module\n\n DbComputer = self.get_auto_base().classes.db_dbcomputer # pylint: disable=invalid-name\n\n with sa.ENGINE.begin() as connection:\n try:\n session = Session(connection.engine)\n\n computer = session.query(DbComputer).filter(DbComputer.id == self.computer_id).one()\n self.assertEqual(computer.label, 'testing')\n finally:\n session.close()\n\n\n\nclass TestEntryPointCorePrefixMigration(TestMigrationsSQLA):\n \"\"\"Test migration that updates node types after `core.` prefix was added to entry point names.\"\"\"\n\n migrate_from = '535039300e4a' # 535039300e4a_computer_name_to_label.py\n migrate_to = '34a831f4286d' # 34a831f4286d_entry_point_core_prefix\n\n def setUpBeforeMigration(self):\n from sqlalchemy.orm import Session # pylint: disable=import-error,no-name-in-module\n\n DbComputer = self.get_auto_base().classes.db_dbcomputer # pylint: disable=invalid-name\n DbNode = self.get_auto_base().classes.db_dbnode # pylint: disable=invalid-name\n DbUser = self.get_auto_base().classes.db_dbuser # pylint: disable=invalid-name\n\n with sa.ENGINE.begin() as connection:\n try:\n session = Session(connection.engine)\n\n user = DbUser(email=f'{self.id()}@aiida.net')\n session.add(user)\n session.commit()\n\n computer = DbComputer(label='testing', scheduler_type='direct', transport_type='local')\n session.add(computer)\n session.commit()\n self.computer_id = computer.id\n\n calcjob = DbNode(\n user_id=user.id,\n process_type='aiida.calculations:core.arithmetic.add',\n attributes={'parser_name': 'core.arithmetic.add'}\n )\n session.add(calcjob)\n session.commit()\n self.calcjob_id = calcjob.id\n\n workflow = DbNode(user_id=user.id, process_type='aiida.workflows:arithmetic.add_multiply')\n session.add(workflow)\n session.commit()\n self.workflow_id = workflow.id\n\n except Exception:\n session.rollback()\n raise\n finally:\n session.close()\n\n def test_migration(self):\n \"\"\"Verify that the column was successfully renamed.\"\"\"\n from sqlalchemy.orm import Session # pylint: disable=import-error,no-name-in-module\n\n DbComputer = self.get_auto_base().classes.db_dbcomputer # pylint: disable=invalid-name\n DbNode = self.get_auto_base().classes.db_dbnode # pylint: disable=invalid-name\n\n with sa.ENGINE.begin() as connection:\n try:\n session = Session(connection.engine)\n\n computer = session.query(DbComputer).filter(DbComputer.id == self.computer_id).one()\n assert computer.scheduler_type == 'core.direct'\n assert computer.transport_type == 'core.local'\n\n calcjob = session.query(DbNode).filter(DbNode.id == self.calcjob_id).one()\n assert calcjob.process_type == 'aiida.calculations:core.arithmetic.add'\n assert calcjob.attributes['parser_name'] == 'core.arithmetic.add'\n\n workflow = session.query(DbNode).filter(DbNode.id == self.workflow_id).one()\n assert workflow.process_type == 'aiida.workflows:core.arithmetic.add_multiply'\n\n finally:\n session.close()\n" ]
[ [ "numpy.array" ] ]
soupstandstop/test
[ "ad3f185b8041d3fa611f387291ee8106439353b6" ]
[ "seq2seq/decoders/attention.py" ]
[ "# Copyright 2017 Google Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\" Implementations of attention layers.\n\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\nfrom __future__ import unicode_literals\n\nimport abc\nimport six\n\nimport tensorflow as tf\nfrom tensorflow.python.framework import function # pylint: disable=E0611\n\nfrom seq2seq.graph_module import GraphModule\nfrom seq2seq.configurable import Configurable\n\n\[email protected](\n tf.float32,\n tf.float32,\n tf.float32,\n func_name=\"att_sum_bahdanau\",\n noinline=True)\ndef att_sum_bahdanau(v_att, keys, query):\n \"\"\"Calculates a batch- and timweise dot product with a variable\"\"\"\n return tf.reduce_sum(v_att * tf.tanh(keys + tf.expand_dims(query, 1)), [2])\n\n\[email protected](tf.float32, tf.float32, func_name=\"att_sum_dot\", noinline=True)\ndef att_sum_dot(keys, query):\n \"\"\"Calculates a batch- and timweise dot product\"\"\"\n return tf.reduce_sum(keys * tf.expand_dims(query, 1), [2])\n\n\[email protected]_metaclass(abc.ABCMeta)\nclass AttentionLayer(GraphModule, Configurable):\n \"\"\"\n Attention layer according to https://arxiv.org/abs/1409.0473.\n\n Params:\n num_units: Number of units used in the attention layer\n \"\"\"\n\n def __init__(self, params, mode, name=\"attention\"):\n GraphModule.__init__(self, name)\n Configurable.__init__(self, params, mode)\n\n @staticmethod\n def default_params():\n return {\"num_units\": 128}\n\n @abc.abstractmethod\n def score_fn(self, keys, query):\n \"\"\"Computes the attention score\"\"\"\n raise NotImplementedError\n\n def _build(self, query, keys, values, values_length):\n \"\"\"Computes attention scores and outputs.\n\n Args:\n query: The query used to calculate attention scores.\n In seq2seq this is typically the current state of the decoder.\n A tensor of shape `[B, ...]`\n keys: The keys used to calculate attention scores. In seq2seq, these\n are typically the outputs of the encoder and equivalent to `values`.\n A tensor of shape `[B, T, ...]` where each element in the `T`\n dimension corresponds to the key for that value.\n values: The elements to compute attention over. In seq2seq, this is\n typically the sequence of encoder outputs.\n A tensor of shape `[B, T, input_dim]`.\n values_length: An int32 tensor of shape `[B]` defining the sequence\n length of the attention values.\n\n Returns:\n A tuple `(scores, context)`.\n `scores` is vector of length `T` where each element is the\n normalized \"score\" of the corresponding `inputs` element.\n `context` is the final attention layer output corresponding to\n the weighted inputs.\n A tensor fo shape `[B, input_dim]`.\n \"\"\"\n values_depth = values.get_shape().as_list()[-1]\n\n # Fully connected layers to transform both keys and query\n # into a tensor with `num_units` units\n att_keys = tf.contrib.layers.fully_connected(\n inputs=keys,\n num_outputs=self.params[\"num_units\"],\n activation_fn=None,\n scope=\"att_keys\")\n att_query = tf.contrib.layers.fully_connected(\n inputs=query,\n num_outputs=self.params[\"num_units\"],\n activation_fn=None,\n scope=\"att_query\")\n\n scores = self.score_fn(att_keys, att_query)\n\n # Replace all scores for padded inputs with tf.float32.min\n num_scores = tf.shape(scores)[1]\n scores_mask = tf.sequence_mask(\n lengths=tf.to_int32(values_length),\n maxlen=tf.to_int32(num_scores),\n dtype=tf.float32)\n scores = scores * scores_mask + ((1.0 - scores_mask) * tf.float32.min)\n\n # Normalize the scores\n scores_normalized = tf.nn.softmax(scores, name=\"scores_normalized\")\n\n # Calculate the weighted average of the attention inputs\n # according to the scores\n context = tf.expand_dims(scores_normalized, 2) * values\n context = tf.reduce_sum(context, 1, name=\"context\")\n context.set_shape([None, values_depth])\n\n\n return (scores_normalized, context)\n\n\nclass AttentionLayerDot(AttentionLayer):\n \"\"\"An attention layer that calculates attention scores using\n a dot product.\n \"\"\"\n\n def score_fn(self, keys, query):\n return att_sum_dot(keys, query)\n\n\nclass AttentionLayerBahdanau(AttentionLayer):\n \"\"\"An attention layer that calculates attention scores using\n a parameterized multiplication.\"\"\"\n\n def score_fn(self, keys, query):\n v_att = tf.get_variable(\n \"v_att\", shape=[self.params[\"num_units\"]], dtype=tf.float32)\n return att_sum_bahdanau(v_att, keys, query)\n" ]
[ [ "tensorflow.shape", "tensorflow.expand_dims", "tensorflow.contrib.layers.fully_connected", "tensorflow.python.framework.function.Defun", "tensorflow.get_variable", "tensorflow.reduce_sum", "tensorflow.nn.softmax", "tensorflow.to_int32" ] ]
bermanmaxim/AOWS
[ "2b9efefd426eebfcefb4b0c09f2683d3a0700951" ]
[ "latency.py" ]
[ "import argparse\nimport json\nimport logging\nimport os\nimport os.path as osp\nimport random\nimport sys\nimport time\nfrom collections import Counter\nfrom collections import defaultdict\nfrom collections import namedtuple\n\nimport cvxpy as cp\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport torch\nfrom scipy.sparse import lil_matrix\n# lazy: from torch2trt import torch2trt\n\nfrom misc import DelayedKeyboardInterrupt\nfrom misc import tuplify\nfrom model import SlimMobilenet\nfrom model import LayerType\nfrom viterbi import complete\nfrom viterbi import maxsum\n\n\nlogger = logging.getLogger(__name__)\nVartype = namedtuple(\"Vartype\", LayerType._fields + ('in_channels', 'out_channels'))\ntorch.backends.cudnn.benchmark = True\n\n\ndef parse_args():\n parser = argparse.ArgumentParser(\n description=\"Generate samples and fit a latency model.\")\n\n subparsers = parser.add_subparsers(dest='mode')\n subparsers.required = True\n\n parser_bench = subparsers.add_parser('benchmark', \n help=\"Benchmark a single channel configuration\",\n formatter_class=argparse.ArgumentDefaultsHelpFormatter)\n parser_bench.add_argument(\"configuration\",\n help=\"configuration to test (comma-separated channels or MOBILENET)\")\n\n parser_gen = subparsers.add_parser('generate', \n help=\"Generate latency samples\",\n formatter_class=argparse.ArgumentDefaultsHelpFormatter)\n for subparser in (parser_bench, parser_gen):\n subparser.add_argument(\"-D\", \"--device\", choices=[\"cpu\", \"gpu\", \"trt\"], \n default=\"gpu\", help=\"Use GPU, CPU or TensorRT latency\")\n subparser.add_argument(\"--dtype\", choices=[\"fp32\", \"fp16\"], \n default=\"fp16\", help=\"Datatype for network\")\n subparser.add_argument(\"-B\", \"--batch-size\", type=int, default=64, \n help=\"Batch size used for profiling\")\n subparser.add_argument(\"-I\", \"--iterations\", type=int, default=60, \n help=\"Profiling iterations\")\n subparser.add_argument(\"-W\", \"--warmup\", type=int, default=10, \n help=\"Warmup iterations\")\n subparser.add_argument(\"--reduction\", choices=['mean', 'min'], default='mean',\n help=\"Reduce timings by their mean or by their minimum (minimum can reduce variance)\")\n parser_gen.add_argument(\"--biased\", action=\"store_true\", \n help=\"Bias sampling towards missing configurations\")\n parser_gen.add_argument(\"-N\", \"--count\", type=int, default=8000,\n help=\"Minimum number of samples to generate\")\n parser_gen.add_argument(\"-R\", \"--repetitions\", type=int, default=0, \n help=\"Minimum number of samples per choice\")\n parser_gen.add_argument(\"--save-every\", type=int, default=1,\n help=\"Number of inferences before saving intermediate output\")\n parser_gen.add_argument(\"samples_file\", help=\"Output samples file\")\n\n parser_fit = subparsers.add_parser('fit', help=\"Fit a latency model\")\n parser_fit.add_argument(\"-K\", \"--regularize\", type=float, default=0.0,\n help=\"Amount of monotonicity regularization (Equation 7)\")\n parser_fit.add_argument(\"samples_file\", help=\"Training samples\")\n parser_fit.add_argument(\"model_file\", help=\"Output model file\")\n\n parser_val = subparsers.add_parser('validate', help=\"Validate a latency model\")\n parser_val.add_argument(\"samples_file\", help=\"Validation samples\")\n parser_val.add_argument(\"model_file\", help=\"Model file\")\n parser_val.add_argument(\"plot_file\", help=\"Plot file\")\n\n args = parser.parse_args()\n\n if 'configuration' in args:\n defaults = {'MOBILENET': \"32,64,128,128,256,256,512,512,512,512,512,512,1024,1024\"}\n if args.configuration in defaults:\n args.configuration = defaults[args.configuration]\n args.configuration = [int(''.join(ci for ci in c if ci.isdigit())) for c in args.configuration.split(',')]\n \n return args\n\n\ndef get_model(min_width=0.2, max_width=1.5, levels=14):\n return SlimMobilenet(min_width=min_width, max_width=max_width, levels=levels)\n\n\ndef benchmark(device, dtype, batch_size, iterations, warmup, reduction, configuration, silent=False):\n if device == 'cpu':\n dev = torch.device('cpu')\n elif device in ['gpu', 'trt']:\n dev = torch.device('cuda')\n fp = dict(fp16=torch.float16, fp32=torch.float32).get(dtype)\n net = SlimMobilenet.reduce(configuration).to(dev).type(fp).eval()\n x = torch.ones((batch_size, 3, 224, 224)).to(dev).type(fp)\n if device == 'trt':\n from torch2trt import torch2trt\n net = torch2trt(net, [x], fp16_mode=(dtype == 'fp16'), max_batch_size=batch_size)\n\n for i in range(warmup):\n outputs = net(x)\n torch.cuda.current_stream().synchronize()\n\n timings = []\n t0 = time.time()\n for i in range(iterations):\n outputs = net(x)\n torch.cuda.current_stream().synchronize()\n t1 = time.time()\n timings.append(t1 - t0)\n t0 = t1\n\n ms = 1000.0 * getattr(np, reduction)(timings) / batch_size\n if not silent:\n print(f\"{configuration}: {ms}ms\")\n\n return ms\n\n\ndef gen_configuration_biased(net, repetitions):\n M = min(repetitions.values())\n unary = []\n pairwise = []\n for i, L in enumerate(net.components):\n input_choices = [net.in_channels] if i == 0 else net.configurations[i - 1]\n output_choices = ([net.out_channels] if i == len(net.components) - 1\n else net.configurations[i])\n U = np.zeros(len(input_choices))\n P = np.zeros((len(input_choices), len(output_choices)))\n for i1, I in enumerate(input_choices):\n for i2, O in enumerate(output_choices):\n var = Vartype(**L._asdict(), in_channels=I, out_channels=O)\n P[i1, i2] = float(repetitions[var] == M)\n unary.append(U)\n pairwise.append(P)\n unary.append(np.zeros(len(output_choices)))\n un, pair, states = complete(unary, pairwise)\n iconfig = maxsum(un, pair, states)[1]\n configuration = [C[i] for (C, i) in zip(net.configurations, iconfig[1:-1])]\n return configuration\n\n\ndef gen_configuration(net, repetitions, biased=False):\n if biased:\n return gen_configuration_biased(net, repetitions)\n return [random.choice(conf) for conf in net.configurations]\n\n\ndef collect_repetitions(net, configuration=None):\n if configuration is None:\n configuration = net.configurations\n if isinstance(configuration[0], (int, np.integer)): # single configuration\n configuration = [[c] for c in configuration]\n layertypes = Counter()\n for i, L in enumerate(net.components):\n input_choices = [net.in_channels] if i == 0 else configuration[i - 1]\n output_choices = ([net.out_channels] if i == len(net.components) - 1 \n else configuration[i])\n for I in input_choices:\n for O in output_choices:\n var = Vartype(**L._asdict(), in_channels=I, out_channels=O)\n layertypes[var] += 1\n return layertypes\n\n\ndef sample_file_iterator(samples_file):\n with open(samples_file, 'r') as f:\n for line in f:\n yield tuplify(json.loads(line))\n\n\ndef generate(device, dtype, batch_size, iterations, warmup, reduction, biased,\n count, repetitions, samples_file=os.devnull, save_every=10):\n os.makedirs(osp.dirname(samples_file), exist_ok=True)\n\n net = get_model()\n combinations = collect_repetitions(net)\n logger.info(f\"{len(net.configurations)} modulers\")\n logger.debug(f\"search space: {net.configurations}\")\n logger.debug(f\"components: {net.components}\")\n logger.info(f\"Latency model has {len(combinations)} parameters\")\n\n repeats = Counter()\n for c in combinations:\n repeats[c] = 0\n\n samples = []\n if osp.isfile(samples_file):\n for sample in sample_file_iterator(samples_file):\n samples.append(sample)\n repeats.update(collect_repetitions(net, sample[0]))\n logger.info(f\"Loaded {samples_file}, \"\n f\"min_repetition={min(repeats.values())} \"\n f\"count={len(samples)} \")\n logger.info(f\"Writing new samples to {samples_file}\")\n new_samples = []\n while (len(samples) + len(new_samples) < count \n or min(repeats.values()) < repetitions):\n configuration = gen_configuration(net, repeats, biased=biased)\n ms = benchmark(device, dtype, batch_size, iterations, warmup, reduction, configuration, silent=True)\n repeats.update(collect_repetitions(net, configuration))\n logger.info(f\"{configuration}: {ms:.04f}ms, \"\n f\"min_repetition={min(repeats.values())} \"\n f\"count={len(samples) + len(new_samples)} \")\n new_samples.append([[int(d) for d in configuration], ms])\n if (len(new_samples) % save_every) == 0:\n with open(samples_file, 'a') as f:\n for sample in new_samples:\n dump = json.dumps(sample) + '\\n'\n with DelayedKeyboardInterrupt():\n f.write(dump)\n samples.extend(new_samples)\n new_samples = []\n\n samples.extend(new_samples)\n return samples\n\n\ndef build_equation(samples):\n \"\"\"\n Samples can be iterator\n \"\"\"\n net = get_model()\n variables = {}\n ivariables = {}\n Mcoord = []\n y = []\n for (i, sample) in enumerate(samples):\n y.append(sample[1])\n local_repeats = collect_repetitions(net, sample[0])\n for (L, r) in local_repeats.items():\n if L not in variables:\n j = len(variables)\n variables[L] = j\n ivariables[j] = L\n Mcoord.append((i, variables[L], r))\n y = np.array(y)\n M = lil_matrix((len(y), len(variables)))\n for (i, j, r) in Mcoord:\n M[i, j] = r\n return M, y, variables, ivariables\n\n\ndef solve_lsq(M, y, regularize=0.0, K=None):\n n = M.shape[1]\n x = cp.Variable(n)\n t = cp.Variable(K.shape[0])\n M_cp = cp.Constant(M)\n obj = cp.sum_squares(M_cp @ x - y)\n constraints = [x >= 0]\n if regularize:\n K_cp = cp.Constant(K)\n obj += regularize * cp.sum_squares(t)\n constraints += [t >= 0, K_cp @ x <= t]\n objective = cp.Minimize(obj)\n prob = cp.Problem(objective, constraints)\n prob.solve(cp.SCS, verbose=True)\n return x.value\n\n\ndef get_inequalities(variables):\n def other(L, *args):\n props = L._asdict()\n for k in args:\n del props[k]\n return tuple(props.values())\n buckets = defaultdict(list)\n for order in ['in_channels', 'out_channels', 'in_size']:\n for V in variables:\n buckets[other(V, order)].append(V)\n inequalities = []\n for bucket in buckets.values():\n bucket = sorted(bucket)\n for i in range(len(bucket) - 1):\n inequalities.append((bucket[i], bucket[i + 1]))\n K = lil_matrix((len(inequalities), len(variables)))\n for i, (C1, C2) in enumerate(inequalities):\n K[i, variables[C1]] = 1\n K[i, variables[C2]] = -1\n return K\n\n\ndef fit_model(samples, regularize=0.0):\n M, y, variables, ivariables = build_equation(samples)\n K = get_inequalities(variables)\n x = solve_lsq(M, y, regularize, K)\n model = []\n for i, ms in enumerate(x):\n model.append((ivariables[i], ms))\n return model\n\n\ndef dump_model(model, model_file):\n with open(model_file, 'w') as f:\n for m in model:\n var, ms = m\n dump = json.dumps([var._asdict(), ms]) + '\\n'\n f.write(dump)\n\n\ndef load_model(model_file):\n with open(model_file, 'r') as f:\n for line in f:\n var, ms = tuplify(json.loads(line))\n var = Vartype(**var)\n yield (var, ms)\n\n\ndef fit(samples_file, model_file, regularize=0.0):\n os.makedirs(osp.dirname(model_file), exist_ok=True)\n samples = sample_file_iterator(samples_file)\n model = fit_model(samples, regularize)\n dump_model(model, model_file)\n return model\n\n\ndef validate(samples_file, model_file, plot_file):\n os.makedirs(osp.dirname(plot_file), exist_ok=True)\n model = load_model(model_file)\n model_dict = dict(model)\n samples = sample_file_iterator(samples_file)\n M, y, variables, ivariables = build_equation(samples)\n x = [model_dict[ivariables[i]] for i in range(len(variables))]\n yhat = M @ x\n rmse = np.sqrt(((y - yhat) ** 2).mean())\n title = f\"RMSE {rmse:.04f}, NRMSE {100 * rmse / y.mean():.02f}%\"\n print(title)\n plt.plot(y, yhat, 'o')\n plt.xlabel(\"ground truth (ms)\")\n plt.ylabel(\"predicted (ms)\")\n plt.title(title)\n plt.savefig(plot_file)\n\n\nif __name__ == \"__main__\":\n logger = logging.getLogger(__file__)\n logging.basicConfig(stream=sys.stderr, level=logging.DEBUG,\n format='%(name)s: %(message)s')\n args = parse_args().__dict__\n\n globals()[args.pop('mode')](**args)\n" ]
[ [ "torch.device", "numpy.array", "torch.cuda.current_stream", "matplotlib.pyplot.xlabel", "matplotlib.pyplot.savefig", "matplotlib.pyplot.title", "matplotlib.pyplot.plot", "torch.ones", "matplotlib.pyplot.ylabel" ] ]
collector-m/UniTrack
[ "e8e56e164f2dd40ba590a19ed7a4a75d8da7e2eb" ]
[ "core/association/matching.py" ]
[ "import pdb\r\nimport cv2\r\nimport torch\r\nimport torch.nn.functional as F\r\nimport numpy as np\r\nimport scipy\r\nfrom scipy.spatial.distance import cdist\r\nimport lap\r\n\r\nfrom cython_bbox import bbox_overlaps as bbox_ious\r\nfrom core.motion import kalman_filter\r\nimport time\r\n\r\ndef merge_matches(m1, m2, shape):\r\n O,P,Q = shape\r\n m1 = np.asarray(m1)\r\n m2 = np.asarray(m2)\r\n\r\n M1 = scipy.sparse.coo_matrix((np.ones(len(m1)), (m1[:, 0], m1[:, 1])), shape=(O, P))\r\n M2 = scipy.sparse.coo_matrix((np.ones(len(m2)), (m2[:, 0], m2[:, 1])), shape=(P, Q))\r\n\r\n mask = M1*M2\r\n match = mask.nonzero()\r\n match = list(zip(match[0], match[1]))\r\n unmatched_O = tuple(set(range(O)) - set([i for i, j in match]))\r\n unmatched_Q = tuple(set(range(Q)) - set([j for i, j in match]))\r\n\r\n return match, unmatched_O, unmatched_Q\r\n\r\n\r\ndef linear_assignment(cost_matrix, thresh):\r\n if cost_matrix.size == 0:\r\n return np.empty((0, 2), dtype=int), tuple(range(cost_matrix.shape[0])), tuple(range(cost_matrix.shape[1]))\r\n matches, unmatched_a, unmatched_b = [], [], []\r\n cost, x, y = lap.lapjv(cost_matrix, extend_cost=True, cost_limit=thresh)\r\n for ix, mx in enumerate(x):\r\n if mx >= 0:\r\n matches.append([ix, mx])\r\n unmatched_a = np.where(x < 0)[0]\r\n unmatched_b = np.where(y < 0)[0]\r\n matches = np.asarray(matches)\r\n return matches, unmatched_a, unmatched_b\r\n \r\n\r\ndef ious(atlbrs, btlbrs):\r\n \"\"\"\r\n Compute cost based on IoU\r\n :type atlbrs: list[tlbr] | np.ndarray\r\n :type atlbrs: list[tlbr] | np.ndarray\r\n\r\n :rtype ious np.ndarray\r\n \"\"\"\r\n ious = np.zeros((len(atlbrs), len(btlbrs)), dtype=np.float)\r\n if ious.size == 0:\r\n return ious\r\n\r\n ious = bbox_ious(\r\n np.ascontiguousarray(atlbrs, dtype=np.float),\r\n np.ascontiguousarray(btlbrs, dtype=np.float)\r\n )\r\n\r\n return ious\r\n\r\n\r\ndef iou_distance(atracks, btracks):\r\n \"\"\"\r\n Compute cost based on IoU\r\n :type atracks: list[STrack]\r\n :type btracks: list[STrack]\r\n\r\n :rtype cost_matrix np.ndarray\r\n \"\"\"\r\n\r\n if (len(atracks)>0 and isinstance(atracks[0], np.ndarray)) or (len(btracks) > 0 and isinstance(btracks[0], np.ndarray)):\r\n atlbrs = atracks\r\n btlbrs = btracks\r\n else:\r\n atlbrs = [track.tlbr for track in atracks]\r\n btlbrs = [track.tlbr for track in btracks]\r\n _ious = ious(atlbrs, btlbrs)\r\n cost_matrix = 1 - _ious\r\n\r\n return cost_matrix\r\n\r\ndef embedding_distance(tracks, detections, metric='cosine'):\r\n \"\"\"\r\n :param tracks: list[STrack]\r\n :param detections: list[BaseTrack]\r\n :param metric:\r\n :return: cost_matrix np.ndarray\r\n \"\"\"\r\n\r\n cost_matrix = np.zeros((len(tracks), len(detections)), dtype=np.float)\r\n if cost_matrix.size == 0:\r\n return cost_matrix\r\n det_features = np.asarray([track.curr_feat for track in detections], dtype=np.float)\r\n track_features = np.asarray([track.smooth_feat for track in tracks], dtype=np.float)\r\n cost_matrix = np.maximum(0.0, cdist(track_features, det_features)) # Nomalized features\r\n return cost_matrix\r\n\r\n\r\ndef fuse_motion(kf, cost_matrix, tracks, detections, only_position=False, lambda_=0.98, gate=True):\r\n if cost_matrix.size == 0:\r\n return cost_matrix\r\n gating_dim = 2 if only_position else 4\r\n gating_threshold = kalman_filter.chi2inv95[gating_dim]\r\n measurements = np.asarray([det.to_xyah() for det in detections])\r\n for row, track in enumerate(tracks):\r\n gating_distance = kf.gating_distance(\r\n track.mean, track.covariance, measurements, only_position, metric='maha')\r\n if gate:\r\n cost_matrix[row, gating_distance > gating_threshold] = np.inf\r\n cost_matrix[row] = lambda_ * cost_matrix[row] + (1-lambda_)* gating_distance\r\n return cost_matrix\r\n\r\n\r\ndef center_emb_distance(tracks, detections, metric='cosine'):\r\n \"\"\"\r\n :param tracks: list[STrack]\r\n :param detections: list[BaseTrack]\r\n :param metric:\r\n :return: cost_matrix np.ndarray\r\n \"\"\"\r\n\r\n cost_matrix = np.zeros((len(tracks), len(detections)), dtype=np.float)\r\n if cost_matrix.size == 0:\r\n return cost_matrix\r\n det_features = torch.stack([track.curr_feat.squeeze() for track in detections])\r\n track_features = torch.stack([track.smooth_feat.squeeze() for track in tracks])\r\n normed_det = F.normalize(det_features)\r\n normed_track = F.normalize(track_features)\r\n cost_matrix = torch.mm(normed_track, normed_det.T)\r\n cost_matrix = 1 - cost_matrix.detach().cpu().numpy()\r\n return cost_matrix\r\n\r\ndef recons_distance(tracks, detections, tmp=100):\r\n \"\"\"\r\n :param tracks: list[STrack]\r\n :param detections: list[BaseTrack]\r\n :param metric:\r\n :return: cost_matrix np.ndarray\r\n \"\"\"\r\n\r\n cost_matrix = np.zeros((len(tracks), len(detections)), dtype=np.float)\r\n if cost_matrix.size == 0:\r\n return cost_matrix\r\n det_features_ = torch.stack([track.curr_feat.squeeze() for track in detections])\r\n track_features_ = torch.stack([track.smooth_feat for track in tracks])\r\n det_features = F.normalize(det_features_, dim=1)\r\n track_features = F.normalize(track_features_, dim=1)\r\n\r\n ndet, ndim, nw, nh = det_features.shape\r\n ntrk, _, _, _ = track_features.shape\r\n fdet = det_features.permute(0,2,3,1).reshape(-1, ndim).cuda() # ndet*nw*nh, ndim\r\n ftrk = track_features.permute(0,2,3,1).reshape(-1, ndim).cuda() # ntrk*nw*nh, ndim\r\n\r\n aff = torch.mm(ftrk, fdet.transpose(0,1)) # ntrk*nw*nh, ndet*nw*nh\r\n aff_td = F.softmax(tmp*aff, dim=1)\r\n aff_dt = F.softmax(tmp*aff, dim=0).transpose(0,1)\r\n\r\n recons_ftrk = torch.einsum('tds,dsm->tdm', aff_td.view(ntrk*nw*nh, ndet, nw*nh), \r\n fdet.view(ndet, nw*nh, ndim)) # ntrk*nw*nh, ndet, ndim\r\n recons_fdet = torch.einsum('dts,tsm->dtm', aff_dt.view(ndet*nw*nh, ntrk, nw*nh),\r\n ftrk.view(ntrk, nw*nh, ndim)) # ndet*nw*nh, ntrk, ndim\r\n \r\n res_ftrk = (recons_ftrk.permute(0,2,1) - ftrk.unsqueeze(-1)).view(ntrk, nw*nh*ndim, ndet)\r\n res_fdet = (recons_fdet.permute(0,2,1) - fdet.unsqueeze(-1)).view(ndet, nw*nh*ndim, ntrk)\r\n\r\n cost_matrix = (torch.abs(res_ftrk).mean(1) + torch.abs(res_fdet).mean(1).transpose(0,1)) * 0.5\r\n cost_matrix = cost_matrix / cost_matrix.max(1)[0].unsqueeze(-1) \r\n #pdb.set_trace()\r\n cost_matrix = cost_matrix.cpu().numpy()\r\n return cost_matrix\r\n\r\n\r\ndef get_track_feat(tracks, feat_flag='curr'):\r\n if feat_flag == 'curr':\r\n feat_list = [track.curr_feat.squeeze(0) for track in tracks]\r\n elif feat_flag == 'smooth':\r\n feat_list = [track.smooth_feat.squeeze(0) for track in tracks]\r\n else:\r\n raise NotImplementedError\r\n \r\n n = len(tracks)\r\n fdim = feat_list[0].shape[0]\r\n fdim_num = len(feat_list[0].shape)\r\n if fdim_num > 2:\r\n feat_list = [f.view(fdim,-1) for f in feat_list]\r\n numels = [f.shape[1] for f in feat_list]\r\n \r\n ret = torch.zeros(n, fdim, np.max(numels)).to(feat_list[0].device)\r\n for i, f in enumerate(feat_list):\r\n ret[i, :, :numels[i]] = f\r\n return ret \r\n\r\ndef reconsdot_distance(tracks, detections, tmp=100):\r\n \"\"\"\r\n :param tracks: list[STrack]\r\n :param detections: list[BaseTrack]\r\n :param metric:\r\n :return: cost_matrix np.ndarray\r\n \"\"\"\r\n cost_matrix = np.zeros((len(tracks), len(detections)), dtype=np.float)\r\n if cost_matrix.size == 0:\r\n return cost_matrix, None\r\n det_features_ = get_track_feat(detections)\r\n track_features_ = get_track_feat(tracks, feat_flag='curr')\r\n\r\n det_features = F.normalize(det_features_, dim=1)\r\n track_features = F.normalize(track_features_, dim=1)\r\n\r\n ndet, ndim, nsd = det_features.shape\r\n ntrk, _, nst = track_features.shape\r\n\r\n fdet = det_features.permute(0, 2, 1).reshape(-1, ndim).cuda()\r\n ftrk = track_features.permute(0, 2, 1).reshape(-1, ndim).cuda()\r\n\r\n aff = torch.mm(ftrk, fdet.transpose(0, 1))\r\n aff_td = F.softmax(tmp*aff, dim=1)\r\n aff_dt = F.softmax(tmp*aff, dim=0).transpose(0, 1)\r\n\r\n recons_ftrk = torch.einsum('tds,dsm->tdm', aff_td.view(ntrk*nst, ndet, nsd),\r\n fdet.view(ndet, nsd, ndim))\r\n recons_fdet = torch.einsum('dts,tsm->dtm', aff_dt.view(ndet*nsd, ntrk, nst),\r\n ftrk.view(ntrk, nst, ndim))\r\n\r\n recons_ftrk = recons_ftrk.permute(0, 2, 1).view(ntrk, nst*ndim, ndet)\r\n recons_ftrk_norm = F.normalize(recons_ftrk, dim=1)\r\n recons_fdet = recons_fdet.permute(0, 2, 1).view(ndet, nsd*ndim, ntrk)\r\n recons_fdet_norm = F.normalize(recons_fdet, dim=1)\r\n\r\n dot_td = torch.einsum('tad,ta->td', recons_ftrk_norm,\r\n F.normalize(ftrk.reshape(ntrk, nst*ndim), dim=1))\r\n dot_dt = torch.einsum('dat,da->dt', recons_fdet_norm,\r\n F.normalize(fdet.reshape(ndet, nsd*ndim), dim=1))\r\n\r\n cost_matrix = 1 - 0.5 * (dot_td + dot_dt.transpose(0, 1))\r\n cost_matrix = cost_matrix.detach().cpu().numpy()\r\n\r\n return cost_matrix, None\r\n\r\n\r\ndef category_gate(cost_matrix, tracks, detections):\r\n \"\"\"\r\n :param tracks: list[STrack]\r\n :param detections: list[BaseTrack]\r\n :param metric:\r\n :return: cost_matrix np.ndarray\r\n \"\"\"\r\n if cost_matrix.size == 0:\r\n return cost_matrix\r\n\r\n det_categories = np.array([d.category for d in detections])\r\n trk_categories = np.array([t.category for t in tracks])\r\n\r\n cost_matrix = cost_matrix + np.abs(\r\n det_categories[None, :] - trk_categories[:, None])\r\n return cost_matrix\r\n\r\n\r\n" ]
[ [ "numpy.max", "torch.nn.functional.normalize", "numpy.array", "torch.stack", "numpy.asarray", "numpy.empty", "numpy.ascontiguousarray", "torch.mm", "torch.abs", "numpy.where", "numpy.abs", "torch.nn.functional.softmax", "scipy.spatial.distance.cdist" ] ]
codorkh/AcousticPE
[ "a787496a7800d56743fc606a43acbe9e32de35e0" ]
[ "topo_input_files/alps/alp_profile.py" ]
[ "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Dec 8 14:36:32 2016\n\n@author: dgreen\n\"\"\"\n\n# alp_profile.py\n\n# PLotting the alpine profile, chosen to give a relatively 'up and over' profile\n# from the coordinates 44.27N 10.60E\n\n# Load in the data\n\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport numpy as np\n\ndef read_profile(filename):\n data = pd.io.parsers.read_csv(filename, sep=r'\\s*',names=['lon','lat','dist','alt'])\n return data\n \ndef read_2D_profile(filename):\n data = pd.io.parsers.read_csv(filename, sep=r'\\s*',names=['dist','alt'])\n return data\n \npara = {'axes.labelsize': 18, 'text.fontsize': 18, 'legend.fontsize': 13, 'xtick.labelsize': 16,'ytick.labelsize': 16, 'figure.subplot.left': 0.12, 'figure.subplot.right': 0.98, 'figure.subplot.bottom': 0.11, 'figure.subplot.top': 0.97}\nplt.rcParams.update(para)\n\ndirpath = '/Users/dgreen/Documents/Work/4codor/infratopo/topo_input_files/alps/' \n\nprofiledata = read_profile(dirpath+'alp.xydz')\n\nfig = plt.figure(figsize=(10,5))\nax1 = fig.add_axes([0.15,0.15,0.8,0.75])\nax1.plot(profiledata['dist'],profiledata['alt'],'k-')\nax1.set_xlabel('Distance (km)')\nax1.set_ylabel('Elevation (m)')\n\nfig.savefig(dirpath+'alpine_profile.png',bbox_inches='tight')\n\nrzfile = 'alp_2d.dat'\n\nfL1 = open(dirpath+rzfile,'w')\nfor x in range(len(profiledata['dist'])):\n fL1.write('{:8.1f} {:7.3f}\\n'.format(profiledata.iloc[x]['dist']*1000.,profiledata.iloc[x]['alt']))\nfL1.close()\n\n\n\nfig = plt.figure(figsize=(10,5))\nax1 = fig.add_axes([0.15,0.15,0.8,0.75])\nax1.plot(profiledata['dist'],profiledata['alt'],'k-',label='Alps')\nax1.set_xlabel('Distance (km)')\nax1.set_ylabel('Elevation (m)')\n\nprofilegaussdata = read_2D_profile('/Users/dgreen/Documents/Work/4codor/infratopo/topo_input_files/synthetics/gauss_3000m_hill_long.dat')\n\nax1.plot(profilegaussdata['dist']/1000.,profilegaussdata['alt'],'r-',label='Gaussian Synthetic')\nax1.legend(loc=1)\nfig.savefig(dirpath+'alpine_profile_compare.png',bbox_inches='tight')" ]
[ [ "matplotlib.pyplot.rcParams.update", "pandas.io.parsers.read_csv", "matplotlib.pyplot.figure" ] ]
muupan/chainer
[ "038c0d1195c9479335d4223f42dec8bc5830327a" ]
[ "chainer/links/loss/negative_sampling.py" ]
[ "import numpy\n\nfrom chainer import cuda\nfrom chainer.functions.loss import negative_sampling\nfrom chainer import link\nfrom chainer.utils import walker_alias\n\n\nclass NegativeSampling(link.Link):\n\n \"\"\"Negative sampling loss layer.\n\n This link wraps the :func:`~chainer.functions.negative_sampling` function.\n It holds the weight matrix as a parameter. It also builds a sampler\n internally given a list of word counts.\n\n Args:\n in_size (int): Dimension of input vectors.\n counts (int list): Number of each identifiers.\n sample_size (int): Number of negative samples.\n power (float): Power factor :math:`\\\\alpha`.\n\n .. seealso:: :func:`~chainer.functions.negative_sampling` for more detail.\n\n Attributes:\n W (~chainer.Variable): Weight parameter matrix.\n\n \"\"\"\n def __init__(self, in_size, counts, sample_size, power=0.75):\n vocab_size = len(counts)\n super(NegativeSampling, self).__init__(W=(vocab_size, in_size))\n self.W.data.fill(0)\n\n self.sample_size = sample_size\n power = numpy.float32(power)\n p = numpy.array(counts, power.dtype)\n numpy.power(p, power, p)\n self.sampler = walker_alias.WalkerAlias(p)\n\n def to_cpu(self):\n super(NegativeSampling, self).to_cpu()\n self.sampler.to_cpu()\n\n def to_gpu(self, device=None):\n with cuda.get_device(device):\n super(NegativeSampling, self).to_gpu()\n self.sampler.to_gpu()\n\n def __call__(self, x, t):\n \"\"\"Computes the loss value for given input and groundtruth labels.\n\n Args:\n x (~chainer.Variable): Input of the weight matrix multiplication.\n t (~chainer.Variable): Batch of groundtruth labels.\n\n Returns:\n ~chainer.Variable: Loss value.\n\n \"\"\"\n return negative_sampling.negative_sampling(\n x, t, self.W, self.sampler.sample, self.sample_size)\n" ]
[ [ "numpy.array", "numpy.float32", "numpy.power" ] ]
DanielBerns/furry-journey
[ "778546ad072cc23520325f042de77cacfd37ecf3" ]
[ "06/bravo.py" ]
[ "import tensorflow as tf\n\nfrom tensorflow.keras.layers import Dense, Flatten, Conv2D\nfrom tensorflow.keras import Model\n\nmnist = tf.keras.datasets.mnist\n\n(x_train, y_train), (x_test, y_test) = mnist.load_data()\nx_train, x_test = x_train / 255.0, x_test / 255.0\n\n# Add a channels dimension\nx_train = x_train[..., tf.newaxis].astype(\"float32\")\nx_test = x_test[..., tf.newaxis].astype(\"float32\")\n\ntrain_ds = tf.data.Dataset.from_tensor_slices(\n (x_train, y_train)).shuffle(10000).batch(32)\n\ntest_ds = tf.data.Dataset.from_tensor_slices((x_test, y_test)).batch(32)\n\nclass MyModel(Model):\n def __init__(self):\n super(MyModel, self).__init__()\n self.conv1 = Conv2D(32, 3, activation='relu')\n self.flatten = Flatten()\n self.d1 = Dense(128, activation='relu')\n self.d2 = Dense(10)\n\n def call(self, x):\n x = self.conv1(x)\n x = self.flatten(x)\n x = self.d1(x)\n return self.d2(x)\n\n# Create an instance of the model\nmodel = MyModel()\n\nloss_object = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True)\n\noptimizer = tf.keras.optimizers.Adam()\n\ntrain_loss = tf.keras.metrics.Mean(name='train_loss')\ntrain_accuracy = tf.keras.metrics.SparseCategoricalAccuracy(name='train_accuracy')\n\ntest_loss = tf.keras.metrics.Mean(name='test_loss')\ntest_accuracy = tf.keras.metrics.SparseCategoricalAccuracy(name='test_accuracy')\n\[email protected]\ndef train_step(images, labels):\n with tf.GradientTape() as tape:\n # training=True is only needed if there are layers with different\n # behavior during training versus inference (e.g. Dropout).\n predictions = model(images, training=True)\n loss = loss_object(labels, predictions)\n gradients = tape.gradient(loss, model.trainable_variables)\n optimizer.apply_gradients(zip(gradients, model.trainable_variables))\n\n train_loss(loss)\n train_accuracy(labels, predictions)\n \[email protected]\ndef test_step(images, labels):\n # training=False is only needed if there are layers with different\n # behavior during training versus inference (e.g. Dropout).\n predictions = model(images, training=False)\n t_loss = loss_object(labels, predictions)\n\n test_loss(t_loss)\n test_accuracy(labels, predictions)\n\nEPOCHS = 5\n\nfor epoch in range(EPOCHS):\n # Reset the metrics at the start of the next epoch\n train_loss.reset_states()\n train_accuracy.reset_states()\n test_loss.reset_states()\n test_accuracy.reset_states()\n\n for images, labels in train_ds:\n train_step(images, labels)\n\n for test_images, test_labels in test_ds:\n test_step(test_images, test_labels)\n\n template = 'Epoch {}, Loss: {}, Accuracy: {}, Test Loss: {}, Test Accuracy: {}'\n print(template.format(epoch + 1,\n train_loss.result(),\n train_accuracy.result() * 100,\n test_loss.result(),\n test_accuracy.result() * 100))\n\n\n" ]
[ [ "tensorflow.keras.metrics.Mean", "tensorflow.data.Dataset.from_tensor_slices", "tensorflow.GradientTape", "tensorflow.keras.layers.Flatten", "tensorflow.keras.layers.Dense", "tensorflow.keras.layers.Conv2D", "tensorflow.keras.losses.SparseCategoricalCrossentropy", "tensorflow.keras.metrics.SparseCategoricalAccuracy", "tensorflow.keras.optimizers.Adam" ] ]
JakobGM/robotarm-optimization
[ "aa64b5472e78f3d9ac7f42048ebbb905d366d9bc" ]
[ "problem.py" ]
[ "\"\"\"\nFunctions which will be used in optimization methods.\nThe 'thetas' argument will always be an n times s 2-d numpy array.\nIf you need this ta be a vector instead, take in the matrix and\nperform thetas.reshape(n*s, 1) instead.\n\"\"\"\nimport numpy as np\n\nfrom constraints import (\n generate_constraints_function,\n generate_constraint_gradients_function,\n)\n\ndef generate_objective_function(robot_arm):\n n = robot_arm.n\n s = robot_arm.s\n\n def objective(thetas):\n if not thetas.shape == (n*s,):\n raise ValueError('Thetas not given as a single 1D-vetor, but as: ' + str(thetas.shape))\n\n rotated = np.roll(thetas.copy().reshape((n, s), order='F'), shift=-1, axis=1)\n deltas = rotated - thetas.reshape((n, s), order='F')\n return 0.5 * np.sum(deltas ** 2)\n\n return objective\n\ndef generate_objective_gradient_function(robot_arm):\n n = robot_arm.n\n s = robot_arm.s\n\n def objective_gradient(thetas):\n if not thetas.shape == (n*s,):\n raise ValueError('Thetas not given as a single 1D-vetor, but as: ' + str(thetas.shape))\n else:\n thetas = thetas.reshape((n, s), order='F')\n\n def roll(x, y): return np.roll(x.copy(), shift=y, axis=1)\n\n return (2*thetas - roll(thetas, -1) - roll(thetas, 1)).reshape((n*s,), order='F')\n\n return objective_gradient\n" ]
[ [ "numpy.sum" ] ]
yyht/Funnel_Transformer
[ "4b35a794d5e122a8054471863a52d4eac1c39dcd" ]
[ "tensorflow/squad_utils_v2.py" ]
[ "\"\"\"Official evaluation script for SQuAD version 2.0.\n\nIn addition to basic functionality, we also compute additional statistics and\nplot precision-recall curves if an additional na_prob.json file is provided.\nThis file is expected to map question ID's to the model's predicted probability\nthat a question is unanswerable.\n\"\"\"\nimport argparse\nimport collections\nimport json\nimport numpy as np\nimport os\nimport re\nimport string\nimport sys\n\nOPTS = None\n\ndef parse_args():\n parser = argparse.ArgumentParser('Official evaluation script for SQuAD version 2.0.')\n parser.add_argument('data_file', metavar='data.json', help='Input data JSON file.')\n parser.add_argument('pred_file', metavar='pred.json', help='Model predictions.')\n parser.add_argument('--out-file', '-o', metavar='eval.json',\n help='Write accuracy metrics to file (default is stdout).')\n parser.add_argument('--na-prob-file', '-n', metavar='na_prob.json',\n help='Model estimates of probability of no answer.')\n parser.add_argument('--na-prob-thresh', '-t', type=float, default=1.0,\n help='Predict \"\" if no-answer probability exceeds this (default = 1.0).')\n parser.add_argument('--out-image-dir', '-p', metavar='out_images', default=None,\n help='Save precision-recall curves to directory.')\n parser.add_argument('--verbose', '-v', action='store_true')\n if len(sys.argv) == 1:\n parser.print_help()\n sys.exit(1)\n return parser.parse_args()\n\ndef make_qid_to_has_ans(dataset):\n qid_to_has_ans = {}\n for article in dataset:\n for p in article['paragraphs']:\n for qa in p['qas']:\n qid_to_has_ans[qa['id']] = bool(qa['answers'])\n return qid_to_has_ans\n\ndef normalize_answer(s):\n \"\"\"Lower text and remove punctuation, articles and extra whitespace.\"\"\"\n def remove_articles(text):\n regex = re.compile(r'\\b(a|an|the)\\b', re.UNICODE)\n return re.sub(regex, ' ', text)\n def white_space_fix(text):\n return ' '.join(text.split())\n def remove_punc(text):\n exclude = set(string.punctuation)\n return ''.join(ch for ch in text if ch not in exclude)\n def lower(text):\n return text.lower()\n return white_space_fix(remove_articles(remove_punc(lower(s))))\n\ndef get_tokens(s):\n if not s: return []\n return normalize_answer(s).split()\n\ndef compute_exact(a_gold, a_pred):\n return int(normalize_answer(a_gold) == normalize_answer(a_pred))\n\ndef compute_f1(a_gold, a_pred):\n gold_toks = get_tokens(a_gold)\n pred_toks = get_tokens(a_pred)\n common = collections.Counter(gold_toks) & collections.Counter(pred_toks)\n num_same = sum(common.values())\n if len(gold_toks) == 0 or len(pred_toks) == 0:\n # If either is no-answer, then F1 is 1 if they agree, 0 otherwise\n return int(gold_toks == pred_toks)\n if num_same == 0:\n return 0\n precision = 1.0 * num_same / len(pred_toks)\n recall = 1.0 * num_same / len(gold_toks)\n f1 = (2 * precision * recall) / (precision + recall)\n return f1\n\ndef get_raw_scores(dataset, preds):\n exact_scores = {}\n f1_scores = {}\n for article in dataset:\n for p in article['paragraphs']:\n for qa in p['qas']:\n qid = qa['id']\n gold_answers = [a['text'] for a in qa['answers']\n if normalize_answer(a['text'])]\n if not gold_answers:\n # For unanswerable questions, only correct answer is empty string\n gold_answers = ['']\n if qid not in preds:\n print('Missing prediction for %s' % qid)\n continue\n a_pred = preds[qid]\n # Take max over all gold answers\n exact_scores[qid] = max(compute_exact(a, a_pred) for a in gold_answers)\n f1_scores[qid] = max(compute_f1(a, a_pred) for a in gold_answers)\n return exact_scores, f1_scores\n\ndef apply_no_ans_threshold(scores, na_probs, qid_to_has_ans, na_prob_thresh):\n new_scores = {}\n for qid, s in scores.items():\n pred_na = na_probs[qid] > na_prob_thresh\n if pred_na:\n new_scores[qid] = float(not qid_to_has_ans[qid])\n else:\n new_scores[qid] = s\n return new_scores\n\ndef make_eval_dict(exact_scores, f1_scores, qid_list=None):\n if not qid_list:\n total = len(exact_scores)\n return collections.OrderedDict([\n ('exact', 100.0 * sum(exact_scores.values()) / total),\n ('f1', 100.0 * sum(f1_scores.values()) / total),\n ('total', total),\n ])\n else:\n total = len(qid_list)\n return collections.OrderedDict([\n ('exact', 100.0 * sum(exact_scores[k] for k in qid_list) / total),\n ('f1', 100.0 * sum(f1_scores[k] for k in qid_list) / total),\n ('total', total),\n ])\n\ndef merge_eval(main_eval, new_eval, prefix):\n for k in new_eval:\n main_eval['%s_%s' % (prefix, k)] = new_eval[k]\n\ndef plot_pr_curve(precisions, recalls, out_image, title):\n plt.step(recalls, precisions, color='b', alpha=0.2, where='post')\n plt.fill_between(recalls, precisions, step='post', alpha=0.2, color='b')\n plt.xlabel('Recall')\n plt.ylabel('Precision')\n plt.xlim([0.0, 1.05])\n plt.ylim([0.0, 1.05])\n plt.title(title)\n plt.savefig(out_image)\n plt.clf()\n\ndef make_precision_recall_eval(scores, na_probs, num_true_pos, qid_to_has_ans,\n out_image=None, title=None):\n qid_list = sorted(na_probs, key=lambda k: na_probs[k])\n true_pos = 0.0\n cur_p = 1.0\n cur_r = 0.0\n precisions = [1.0]\n recalls = [0.0]\n avg_prec = 0.0\n for i, qid in enumerate(qid_list):\n if qid_to_has_ans[qid]:\n true_pos += scores[qid]\n cur_p = true_pos / float(i+1)\n cur_r = true_pos / float(num_true_pos)\n if i == len(qid_list) - 1 or na_probs[qid] != na_probs[qid_list[i+1]]:\n # i.e., if we can put a threshold after this point\n avg_prec += cur_p * (cur_r - recalls[-1])\n precisions.append(cur_p)\n recalls.append(cur_r)\n if out_image:\n plot_pr_curve(precisions, recalls, out_image, title)\n return {'ap': 100.0 * avg_prec}\n\ndef run_precision_recall_analysis(main_eval, exact_raw, f1_raw, na_probs, \n qid_to_has_ans, out_image_dir):\n if out_image_dir and not os.path.exists(out_image_dir):\n os.makedirs(out_image_dir)\n num_true_pos = sum(1 for v in qid_to_has_ans.values() if v)\n if num_true_pos == 0:\n return\n pr_exact = make_precision_recall_eval(\n exact_raw, na_probs, num_true_pos, qid_to_has_ans,\n out_image=os.path.join(out_image_dir, 'pr_exact.png'),\n title='Precision-Recall curve for Exact Match score')\n pr_f1 = make_precision_recall_eval(\n f1_raw, na_probs, num_true_pos, qid_to_has_ans,\n out_image=os.path.join(out_image_dir, 'pr_f1.png'),\n title='Precision-Recall curve for F1 score')\n oracle_scores = {k: float(v) for k, v in qid_to_has_ans.items()}\n pr_oracle = make_precision_recall_eval(\n oracle_scores, na_probs, num_true_pos, qid_to_has_ans,\n out_image=os.path.join(out_image_dir, 'pr_oracle.png'),\n title='Oracle Precision-Recall curve (binary task of HasAns vs. NoAns)')\n merge_eval(main_eval, pr_exact, 'pr_exact')\n merge_eval(main_eval, pr_f1, 'pr_f1')\n merge_eval(main_eval, pr_oracle, 'pr_oracle')\n\ndef histogram_na_prob(na_probs, qid_list, image_dir, name):\n if not qid_list:\n return\n x = [na_probs[k] for k in qid_list]\n weights = np.ones_like(x) / float(len(x))\n plt.hist(x, weights=weights, bins=20, range=(0.0, 1.0))\n plt.xlabel('Model probability of no-answer')\n plt.ylabel('Proportion of dataset')\n plt.title('Histogram of no-answer probability: %s' % name)\n plt.savefig(os.path.join(image_dir, 'na_prob_hist_%s.png' % name))\n plt.clf()\n\ndef find_best_thresh(preds, scores, na_probs, qid_to_has_ans):\n num_no_ans = sum(1 for k in qid_to_has_ans if not qid_to_has_ans[k])\n cur_score = num_no_ans\n best_score = cur_score\n best_thresh = 0.0\n qid_list = sorted(na_probs, key=lambda k: na_probs[k])\n for i, qid in enumerate(qid_list):\n if qid not in scores: continue\n if qid_to_has_ans[qid]:\n diff = scores[qid]\n else:\n if preds[qid]:\n diff = -1\n else:\n diff = 0\n cur_score += diff\n if cur_score > best_score:\n best_score = cur_score\n best_thresh = na_probs[qid]\n return 100.0 * best_score / len(scores), best_thresh\n\ndef find_best_thresh_v2(preds, scores, na_probs, qid_to_has_ans):\n num_no_ans = sum(1 for k in qid_to_has_ans if not qid_to_has_ans[k])\n cur_score = num_no_ans\n best_score = cur_score\n best_thresh = 0.0\n qid_list = sorted(na_probs, key=lambda k: na_probs[k])\n for i, qid in enumerate(qid_list):\n if qid not in scores: continue\n if qid_to_has_ans[qid]:\n diff = scores[qid]\n else:\n if preds[qid]:\n diff = -1\n else:\n diff = 0\n cur_score += diff\n if cur_score > best_score:\n best_score = cur_score\n best_thresh = na_probs[qid]\n\n has_ans_score, has_ans_cnt = 0, 0\n for qid in qid_list:\n if not qid_to_has_ans[qid]: continue\n has_ans_cnt += 1\n\n if qid not in scores: continue\n has_ans_score += scores[qid]\n\n return 100.0 * best_score / len(scores), best_thresh, 1.0 * has_ans_score / has_ans_cnt\n\ndef find_all_best_thresh(main_eval, preds, exact_raw, f1_raw, na_probs, qid_to_has_ans):\n best_exact, exact_thresh = find_best_thresh(preds, exact_raw, na_probs, qid_to_has_ans)\n best_f1, f1_thresh = find_best_thresh(preds, f1_raw, na_probs, qid_to_has_ans)\n main_eval['best_exact'] = best_exact\n main_eval['best_exact_thresh'] = exact_thresh\n main_eval['best_f1'] = best_f1\n main_eval['best_f1_thresh'] = f1_thresh\n\ndef find_all_best_thresh_v2(main_eval, preds, exact_raw, f1_raw, na_probs, qid_to_has_ans):\n best_exact, exact_thresh, has_ans_exact = find_best_thresh_v2(preds, exact_raw, na_probs, qid_to_has_ans)\n best_f1, f1_thresh, has_ans_f1 = find_best_thresh_v2(preds, f1_raw, na_probs, qid_to_has_ans)\n main_eval['best_exact'] = best_exact\n main_eval['best_exact_thresh'] = exact_thresh\n main_eval['best_f1'] = best_f1\n main_eval['best_f1_thresh'] = f1_thresh\n main_eval['has_ans_exact'] = has_ans_exact\n main_eval['has_ans_f1'] = has_ans_f1\n\ndef main():\n with open(OPTS.data_file) as f:\n dataset_json = json.load(f)\n dataset = dataset_json['data']\n with open(OPTS.pred_file) as f:\n preds = json.load(f)\n if OPTS.na_prob_file:\n with open(OPTS.na_prob_file) as f:\n na_probs = json.load(f)\n else:\n na_probs = {k: 0.0 for k in preds}\n qid_to_has_ans = make_qid_to_has_ans(dataset) # maps qid to True/False\n has_ans_qids = [k for k, v in qid_to_has_ans.items() if v]\n no_ans_qids = [k for k, v in qid_to_has_ans.items() if not v]\n exact_raw, f1_raw = get_raw_scores(dataset, preds)\n exact_thresh = apply_no_ans_threshold(exact_raw, na_probs, qid_to_has_ans,\n OPTS.na_prob_thresh)\n f1_thresh = apply_no_ans_threshold(f1_raw, na_probs, qid_to_has_ans,\n OPTS.na_prob_thresh)\n out_eval = make_eval_dict(exact_thresh, f1_thresh)\n if has_ans_qids:\n has_ans_eval = make_eval_dict(exact_thresh, f1_thresh, qid_list=has_ans_qids)\n merge_eval(out_eval, has_ans_eval, 'HasAns')\n if no_ans_qids:\n no_ans_eval = make_eval_dict(exact_thresh, f1_thresh, qid_list=no_ans_qids)\n merge_eval(out_eval, no_ans_eval, 'NoAns')\n if OPTS.na_prob_file:\n find_all_best_thresh(out_eval, preds, exact_raw, f1_raw, na_probs, qid_to_has_ans)\n if OPTS.na_prob_file and OPTS.out_image_dir:\n run_precision_recall_analysis(out_eval, exact_raw, f1_raw, na_probs, \n qid_to_has_ans, OPTS.out_image_dir)\n histogram_na_prob(na_probs, has_ans_qids, OPTS.out_image_dir, 'hasAns')\n histogram_na_prob(na_probs, no_ans_qids, OPTS.out_image_dir, 'noAns')\n if OPTS.out_file:\n with open(OPTS.out_file, 'w') as f:\n json.dump(out_eval, f)\n else:\n print(json.dumps(out_eval, indent=2))\n\nif __name__ == '__main__':\n OPTS = parse_args()\n if OPTS.out_image_dir:\n import matplotlib\n matplotlib.use('Agg')\n import matplotlib.pyplot as plt \n main()\n" ]
[ [ "matplotlib.use", "numpy.ones_like", "matplotlib.pyplot.xlim", "matplotlib.pyplot.xlabel", "matplotlib.pyplot.ylim", "matplotlib.pyplot.step", "matplotlib.pyplot.title", "matplotlib.pyplot.savefig", "matplotlib.pyplot.hist", "matplotlib.pyplot.fill_between", "matplotlib.pyplot.ylabel", "matplotlib.pyplot.clf" ] ]
renshj/High-Cadence-Processing
[ "5d5a2df741858f6e1466d7c4b008e9245d4b780a" ]
[ "Photometry.py" ]
[ "#This file was created by Julian Harrison\r\nimport numpy as np\r\nimport SlicePlot\r\n#from MockDataObject import DataObject #Testing\r\n#from MockSlice import Slice #Testing \r\nimport DataObject\r\nimport matplotlib.pyplot as plt\r\nfrom matplotlib.figure import Figure\r\nimport Slice\r\nfrom Photometry_GUI import *\r\nfrom GUIHandler import GUIHandler\r\n\r\ndef photometryPlot(dataObject):\r\n #Extract image from dataObject\r\n image = dataObject.getImageData()\r\n \r\n #Convert image into figure\r\n figure = plt.figure()\r\n plt.imshow(image)\r\n \r\n #Start GUI process\r\n handler = GUIHandler.getInstance()\r\n handler.setWindow(\"PhotometryGUI\")\r\n handler.setPhotometryGUIData(figure, dataObject)\r\n\r\ndef performPhotometry(dataObject):\r\n slicePlots = dataObject.getSliceList()\r\n calibrationFactorList = []\r\n \r\n #Fill list with data on slice edges\r\n for slicePlot in slicePlots:\r\n #Get values for plotting\r\n tempX = slicePlot.getX()\r\n tempY = slicePlot.getY()\r\n tempWidth = slicePlot.getWidth()\r\n \r\n #Get slice edges\r\n slicePlotData = SlicePlot.performSlicePlot(dataObject, tempX, tempY, tempWidth)\r\n \r\n #Get slice brightness difference\r\n brightnessDifference, peak = SlicePlot.intensityDiff(dataObject, slicePlotData)\r\n \r\n #Get calibration factor\r\n callibrationFactor = SlicePlot.findCalibrationFactor(brightnessDifference, slicePlot.getActualBrightness())\r\n \r\n #Update slice with new data\r\n yl = slicePlotData[1]\r\n yh = slicePlotData[2]\r\n slicePlot.setYl(yl)\r\n slicePlot.setYh(yh)\r\n slicePlot.setBrightnessDiff(brightnessDifference)\r\n slicePlot.setPeak(peak)\r\n calibrationFactorList.append(callibrationFactor)\r\n \r\n meanCallibrationFactor = np.mean(calibrationFactorList)\r\n\r\n dataObject.setMeanCalibrationFactor(meanCallibrationFactor)\r\n\r\n return dataObject" ]
[ [ "matplotlib.pyplot.imshow", "numpy.mean", "matplotlib.pyplot.figure" ] ]
factualaudio/factualaudio
[ "8b9b824e98710470d9d09931688277f667b85a6e" ]
[ "python/factualaudio/plots/clipped_sine_spectrum.py" ]
[ "from factualaudio.plots.clipped_sine_wave import clipped_sine_wave\nfrom factualaudio.plot_format import format_spectrum, add_ellipse_annotation\nfrom factualaudio.plot import rms_amplitude_spectrum\nimport numpy as np\n\ndef populate_figure(figure):\n wave, sample_rate = clipped_sine_wave(num_periods=3)\n\n axes = figure.add_subplot(1, 1, 1)\n rms_amplitude_spectrum(axes, wave, Fs=sample_rate)\n format_spectrum(figure)\n axes.set_ylim(-60, 0)\n axes.set_xticks(np.array([1, 3, 5, 7, 9])*1000)\n axes.set_xticklabels(['1k', '3k', '5k', '7k', '9k'])\n axes.set_xticks([], minor=True)\n add_ellipse_annotation(figure, xy=(0.8, 0.3), width=0.4, height=0.8, transform=axes.transAxes)\n" ]
[ [ "numpy.array" ] ]
willdunklin/Danesfield
[ "691e48c9491aed9ebd1ca1fb85c4bbf896cad077" ]
[ "tools/kwsemantic_segment.py" ]
[ "#!/usr/bin/env python\n\n###############################################################################\n# Copyright Kitware Inc. and Contributors\n# Distributed under the Apache License, 2.0 (apache.org/licenses/LICENSE-2.0)\n# See accompanying Copyright.txt and LICENSE files for details\n###############################################################################\n\n\nimport logging\nimport os\nimport sys\nimport numpy as np\nfrom osgeo import gdal\nimport argparse\nimport json\n\nfrom danesfield.segmentation.semantic.utils.utils import update_config\nfrom danesfield.segmentation.semantic.tasks.seval import Evaluator\nfrom danesfield.segmentation.semantic.utils.config import Config\n\n# Need to append to sys.path here as the pretrained model includes an\n# import statement for \"models\" rather than\n# \"danesfield.segmentation.semantic.models\"\nsys.path.append(os.path.join(os.path.dirname(os.path.abspath(__file__)),\n \"../danesfield/segmentation/semantic\"))\n\n\ndef predict(rgbpath, dsmpath, dtmpath, msipath, outdir, outfname, config):\n img_data = np.transpose(gdal.Open(rgbpath).ReadAsArray(), (1, 2, 0))\n\n dsm_data = gdal.Open(dsmpath).ReadAsArray()\n dtm_data = gdal.Open(dtmpath).ReadAsArray()\n ndsm_data = dsm_data - dtm_data\n ndsm_data[ndsm_data < 0] = 0\n ndsm_data[ndsm_data > 40] = 40\n ndsm_data = ndsm_data/40*255\n\n msi_image = np.transpose(gdal.Open(msipath).ReadAsArray(), (1, 2, 0))\n red_map = msi_image[:, :, 4].astype(np.float)\n nir_map = msi_image[:, :, 6].astype(np.float)\n\n ndvi = (nir_map - red_map)/(nir_map + red_map + 1e-7)\n ndvi[ndvi < 0] = 0\n ndvi[ndvi > 1] = 1\n ndvi_data = ndvi*255.0\n\n input_data = np.moveaxis(np.dstack([img_data, ndsm_data, ndvi_data])/255, -1, 0)\n input_data = input_data.astype(np.float32)\n input_data = (input_data - 0.5)*2\n\n keval = Evaluator(config)\n keval.onepredict(input_data, dsmpath, outdir, outfname)\n\n\ndef main(args):\n parser = argparse.ArgumentParser(description='configuration for semantic segmentation task.')\n parser.add_argument('config_path', help='configuration file path.')\n parser.add_argument('pretrain_model_path', help='pretrained model file path.')\n parser.add_argument('rgbpath', help='3-band 8-bit RGB image path')\n parser.add_argument('dsmpath', help='1-band float DSM file path')\n parser.add_argument('dtmpath', help='1-band float DTM file path')\n parser.add_argument('msipath', help='8-band float MSI file path')\n parser.add_argument('outdir', help='directory in which to write output files')\n parser.add_argument('outfname', help='out filename for prediction probability and class mask')\n args = parser.parse_args(args)\n\n with open(args.config_path, 'r') as f:\n cfg = json.load(f)\n pretrain_model_path = args.pretrain_model_path\n rgbpath = args.rgbpath\n dsmpath = args.dsmpath\n dtmpath = args.dtmpath\n msipath = args.msipath\n outfname = args.outfname\n cfg['pretrain_model_path'] = pretrain_model_path\n cfg['out_fname'] = outfname\n\n config = Config(**cfg)\n config = update_config(config, img_rows=2048, img_cols=2048, target_rows=2048,\n target_cols=2048, num_channels=5)\n predict(rgbpath, dsmpath, dtmpath, msipath, args.outdir, outfname, config)\n\n\nif __name__ == \"__main__\":\n try:\n main(sys.argv[1:])\n except Exception as e:\n logging.exception(e)\n sys.exit(1)\n" ]
[ [ "numpy.dstack" ] ]
slachapelle/anon_disentanglement_via_mechanism_sparsity
[ "677f7e160f3532e1357a3c7f35f9f8f8529b389a" ]
[ "model/gumbel_masks.py" ]
[ "import math\nimport torch\n\n\nclass GumbelSigmoid(torch.nn.Module):\n def __init__(self, shape, freeze=False, drawhard=True, tau=1):\n super(GumbelSigmoid, self).__init__()\n self.shape = shape\n self.freeze=freeze\n self.drawhard = drawhard\n self.log_alpha = torch.nn.Parameter(torch.zeros(self.shape))\n self.tau = tau\n # useful to make sure these parameters will be pushed to the GPU\n self.uniform = torch.distributions.uniform.Uniform(0, 1)\n self.register_buffer(\"fixed_mask\", torch.ones(shape))\n self.reset_parameters()\n\n def forward(self, bs):\n if self.freeze:\n y = self.fixed_mask.unsqueeze(0).expand((bs,) + self.shape)\n return y\n else:\n shape = tuple([bs] + list(self.shape))\n logistic_noise = self.sample_logistic(shape).type(self.log_alpha.type()).to(self.log_alpha.device)\n y_soft = torch.sigmoid((self.log_alpha + logistic_noise) / self.tau)\n\n if self.drawhard:\n y_hard = (y_soft > 0.5).type(y_soft.type())\n\n # This weird line does two things:\n # 1) at forward, we get a hard sample.\n # 2) at backward, we differentiate the gumbel sigmoid\n y = y_hard.detach() - y_soft.detach() + y_soft\n\n else:\n y = y_soft\n\n return y\n\n def get_proba(self):\n \"\"\"Returns probability of getting one\"\"\"\n if self.freeze:\n return self.fixed_mask\n else:\n return torch.sigmoid(self.log_alpha)\n\n def reset_parameters(self):\n torch.nn.init.constant_(self.log_alpha, 5) # 5) # will yield a probability ~0.99. Inspired by DCDI\n\n def sample_logistic(self, shape):\n u = self.uniform.sample(shape)\n return torch.log(u) - torch.log(1 - u)\n\n def threshold(self):\n proba = self.get_proba()\n self.fixed_mask.copy_((proba > 0.5).type(proba.type()))\n self.freeze = True\n\n" ]
[ [ "torch.zeros", "torch.sigmoid", "torch.nn.init.constant_", "torch.ones", "torch.log", "torch.distributions.uniform.Uniform" ] ]
Magnusnolsoe/xlnet-proteins
[ "2f3fad3139d58e0c0d9a9438ab08e79cd9bcea91" ]
[ "tpu_worker.py" ]
[ "import os\nimport json\nimport tensorflow as tf\n\nfrom sigopt import Connection\nfrom absl import flags, app\nfrom os_utils import get_logdir\nfrom data_utils import VOCAB_SIZE\n\n# SigOpt parameters\nflags.DEFINE_string(\"api_token\", default=\"\",\n help=\"SigOpt api token\")\nflags.DEFINE_string(\"experiment_id\", default=\"\",\n help=\"SigOpt experiment ID\")\n\n# Google Cloud Platform parameters\nflags.DEFINE_string(\"gcp_project\", default=\"\",\n help=\"Name of gpc project\")\nflags.DEFINE_string(\"bucket_name\", default=\"\",\n help=\"Name of gcp bucket\")\n\n# TPU parameters\nflags.DEFINE_string(\"tpu_name\", default=\"\",\n help=\"TPU name\")\n\n# Internal Configurations\nNUM_HOSTS = 1\nNUM_CORES = 8\nEPOCHS = 50\nFAIL_THRESHOLD = 3\nITERATIONS = 10000\n\nTPU_ZONES = {\n 'instance-1': \"us-central1-a\",\n 'instance-2': \"us-central1-a\",\n 'instance-3': \"us-central1-a\",\n 'instance-4': \"us-central1-a\",\n 'instance-5': \"us-central1-a\",\n 'v2-1': \"us-central1-f\",\n 'v2-2': \"us-central1-f\",\n 'v2-3': \"us-central1-f\",\n 'v2-4': \"us-central1-f\",\n 'v2-5': \"us-central1-f\",\n 'preempt-1': \"us-central1-f\",\n 'preempt-2': \"us-central1-f\",\n 'preempt-3': \"us-central1-f\",\n 'preempt-4': \"us-central1-f\",\n 'preempt-5': \"us-central1-f\",\n 'preempt-6': \"us-central1-f\",\n 'preempt-7': \"us-central1-f\",\n 'preempt-8': \"us-central1-f\",\n 'preempt-9': \"us-central1-f\",\n 'preempt-10': \"us-central1-f\"\n}\n\n\ndef generate_model_dir(dirname):\n model_dir_basename = os.path.join(\"models\", dirname)\n _dir = os.path.join(FLAGS.bucket_name, model_dir_basename)\n if tf.gfile.Exists(_dir):\n tf.gfile.DeleteRecursively(_dir)\n tf.gfile.MakeDirs(_dir)\n\n return model_dir_basename\n\ndef get_record_info_dir(seq_len, reuse_len, n_pred, bsz):\n\n basename = \"seq_len{}-reuse_len{}-n_pred{}-bsz{}\".format(seq_len, reuse_len, n_pred, bsz)\n\n return os.path.join(\"proc_data\", basename)\n\ndef generate_param_config(dirname, suggestion_id, params):\n\n log_info = {\"id\": suggestion_id}\n\n # Suggestions from SigOpt\n seq_len = int(params['seq_len'])\n reuse_len = seq_len // 2\n mem_len = params['mem_len']*8\n perm_size = reuse_len\n n_layer = 16\n d_model = 1024\n d_embed = 1024\n n_head = 16\n d_head = 64\n d_inner = 4096\n batch_size = 64\n lr_rate = params['learning_rate']\n d_method = params['decay_method']\n dropout = params['dropout']/10\n dropatt = params['dropatt']/10\n warmup_steps = params['warmup_steps']*1000\n weight_decay = 0\n\n if seq_len == 512:\n n_pred = 85\n else:\n n_pred = int(round(0.15*seq_len))\n record_info_dir = get_record_info_dir(seq_len, reuse_len, n_pred, batch_size)\n tpu_zone = TPU_ZONES[FLAGS.tpu_name]\n\n configs = {\"master\": None, \"tpu\": FLAGS.tpu_name, \"gcp_project\": FLAGS.gcp_project,\n \"tpu_zone\": tpu_zone, \"use_tpu\": True, \"num_hosts\": NUM_HOSTS,\n \"num_core_per_host\": NUM_CORES, \"track_mean\": True,\n \"run_id\": suggestion_id, \"num_passes\": None, \"record_info_dir\": record_info_dir, \"model_dir\": dirname,\n \"init_checkpoint\": None, \"logDir\": 'logging', \"learning_rate\": lr_rate, \"clip\": None,\n \"min_lr_ratio\": None, \"warmup_steps\": warmup_steps, \"adam_epsilon\": None,\n \"decay_method\": d_method, \"weight_decay\": weight_decay, \"batch_size\": batch_size,\n \"train_steps\": None, \"iterations\": ITERATIONS, \"save_steps\": None, \"max_save\": None,\n \"seq_len\": seq_len, \"reuse_len\": reuse_len, \"perm_size\": perm_size, \n \"bi_data\": False, \"mask_alpha\": 6, \"mask_beta\": 1, \"num_predict\": n_pred, \"n_token\": VOCAB_SIZE,\n \"mem_len\": mem_len, \"same_length\": None, \"clamp_len\": None, \"n_layer\": n_layer,\n \"d_model\": d_model, \"d_embed\": d_embed, \"n_head\": n_head, \"d_head\": d_head, \"d_inner\": d_inner,\n \"dropout\": dropout, \"dropatt\": dropatt, \"untie_r\": None, \"summary_type\": 'last', \n \"ff_activation\": 'relu', \"use_bfloat16\": True, \"init\": 'normal', \"init_std\": None,\n \"init_range\": None, \"bucket_uri\": FLAGS.bucket_name, \"epochs\": EPOCHS, \"python\": \"python3\"}\n\n path = os.path.join(FLAGS.bucket_name, \"param_configs\", \"{}.json\".format(suggestion_id))\n with tf.gfile.Open(path, 'w') as fp:\n json.dump(configs, fp)\n \n return path\n\n\ndef start_tpu(config_path):\n\n with tf.gfile.Open(config_path, 'r') as config_file:\n params = json.load(config_file)\n \n # train_batch_size should be equal to the bsz_per_host used in preprocessing\n param_keys = [\"master\", \"tpu\", \"gcp_project\", \"tpu_zone\", \"use_tpu\",\n \"num_hosts\", \"num_core_per_host\", \"track_mean\",\n \"run_id\", \"num_passes\", \"record_info_dir\", \"model_dir\",\n \"init_checkpoint\", \"logDir\", \"learning_rate\", \"clip\",\n \"min_lr_ratio\", \"warmup_steps\", \"adam_epsilon\",\n \"decay_method\", \"weight_decay\", \"batch_size\",\n \"train_steps\", \"iterations\", \"save_steps\", \"max_save\",\n \"seq_len\", \"reuse_len\", \"perm_size\", \"bi_data\",\n \"mask_alpha\", \"mask_beta\", \"num_predict\", \"n_token\",\n \"mem_len\", \"same_length\", \"clamp_len\", \"n_layer\",\n \"d_model\", \"d_embed\", \"n_head\", \"d_head\", \"d_inner\",\n \"dropout\", \"dropatt\", \"untie_r\", \"summary_type\", \n \"ff_activation\", \"use_bfloat16\", \"init\", \"init_std\",\n \"init_range\", \"bucket_uri\", \"epochs\"]\n \n args = \"\"\n for key in param_keys:\n if params[key] is not None:\n args += \"--{}={} \".format(key, params[key])\n\n python = params['python']\n # returns 0 if failed, and 1 if succeeded\n return os.system(python + \" train_tpu.py \" + args)\n\n\ndef run_worker(unused_args):\n del unused_args\n\n conn = Connection(client_token=FLAGS.api_token)\n\n experiment = conn.experiments(FLAGS.experiment_id).fetch()\n\n worker_dir = os.path.join(FLAGS.bucket_name, \"workers\", str(experiment.id), FLAGS.tpu_name)\n tf.gfile.MakeDirs(worker_dir)\n worker_state_path = os.path.join(worker_dir, \"status.json\")\n\n\n fail_count = 0\n while experiment.progress.observation_count < experiment.observation_budget:\n\n suggestion = conn.experiments(experiment.id).suggestions().create(\n metadata=dict(\n host_name=FLAGS.gcp_project,\n tpu_name=FLAGS.tpu_name,\n )\n )\n\n # create model_dir and param config file\n model_dir_basename = generate_model_dir(suggestion.id)\n model_dir_total_path = os.path.join(FLAGS.bucket_name, model_dir_basename)\n config_path = generate_param_config(\n model_dir_basename,\n suggestion.id,\n suggestion.assignments\n )\n\n\n if start_tpu(config_path): # Only enters if failed\n observation = conn.experiments(experiment.id).observations().create(\n failed = True,\n suggestion=suggestion.id,\n metadata=dict(\n host_name = FLAGS.gcp_project,\n tpu_name = FLAGS.tpu_name\n )\n )\n fail_count += 1\n if fail_count >= FAIL_THRESHOLD: # Stop worker if failed FAIL_THRESHOLD or more times\n with tf.gfile.Open(worker_state_path, 'w') as f:\n json.dump({\"state\": 'FAILED'}, f)\n break\n continue\n\n result_path = os.path.join(FLAGS.bucket_name, 'results', \"{}.json\".format(suggestion.id))\n assert tf.gfile.Exists(result_path)\n with tf.gfile.Open(result_path, 'r') as result_file:\n results = json.load(result_file) # read results from suggestion generated by train_tpu.py\n\n # Report an Observation\n observation = conn.experiments(experiment.id).observations().create(\n suggestion=suggestion.id,\n value=float(results['pplx']),\n metadata=dict(\n avg_train_time=results['avg_train_time'], \n avg_eval_time=results['avg_eval_time'], \n stopped_early=results['stopped_early'], \n last_errors=results['last_errors'], \n slope=results['slope'], \n epoch=results['epoch'],\n host_name = FLAGS.gcp_project,\n tpu_name = FLAGS.tpu_name,\n )\n )\n\n # Update the experiment object\n experiment = conn.experiments(experiment.id).fetch()\n\n tf.gfile.DeleteRecursively(model_dir_total_path)\n fail_count = 0\n \n if fail_count < 3:\n with tf.gfile.Open(worker_state_path, 'w') as f:\n json.dump({\"state\": 'DONE'}, f)\n\nif __name__ == \"__main__\":\n FLAGS = flags.FLAGS\n app.run(run_worker)" ]
[ [ "tensorflow.gfile.MakeDirs", "tensorflow.gfile.Exists", "tensorflow.gfile.DeleteRecursively", "tensorflow.gfile.Open" ] ]
fffunction/jam-image-filter
[ "89ae6e83ebe8351da12c7c085435172c53232483" ]
[ "jam_image_filter/util.py" ]
[ "import math\nimport colorsys\nimport scipy\nimport scipy.cluster\nimport scipy.misc\nimport operator\nimport math\nfrom PIL import Image\nimport numpy as np\nimport random\n\nWIDTH = 1700\nHEIGHT = 540\n\ndef rgb_to_gray(r, g, b, a = None):\n return 0.299 * r + 0.587 * g + 0.114 * b\n\ndef get_avg_gray(pix, x, y, radius, sample_size = 0.1):\n nsamples = math.pow(radius, 2) * sample_size\n avg = 0\n for y in xrange(y - radius, y + radius, int(1 / sample_size)):\n for x in xrange(x - radius, x + radius, int(1 / sample_size)):\n try:\n if len(pix[x, y]) >= 3:\n avg += rgb_to_gray(*pix[x,y])\n else:\n avg += pix[x, y]\n except IndexError:\n pass\n return 1 - avg / nsamples / 255.0\n\ndef get_dominant_colours(im, n):\n small_width = 50\n small_height = 50\n orig_width, orig_height = im.size\n im = im.resize((small_width, small_height))\n array = scipy.misc.fromimage(im)\n width, height, ncolours = array.shape\n array = array.reshape(width * height, ncolours)\n arr = np.array(array)\n farr = arr.astype(float)\n codes, dist = scipy.cluster.vq.kmeans(farr, float(n))\n codes = np.array([map(int, colour) for colour in codes])\n\n codes = pad_colours(codes, n)\n\n #vec, dist = scipy.cluster.vq.vq(array, codes)\n #vec = vec.reshape(width, height)\n #scale = (orig_width / float(small_width), orig_height / float(small_height))\n return codes#, vec, scale\n\ndef pad_colours(rgbs, n):\n new_rgbs = [None] * n\n for i in xrange(n):\n j = int(i / (n / float(len(rgbs))))\n new_rgbs[i] = rgbs[j]\n return new_rgbs\n\ndef order_colours_by_brightness(colours):\n colours_value = []\n for colour in colours:\n c = colour / 255.0\n value = colorsys.rgb_to_hls(*c)[1]\n colours_value.append((value, colour))\n colours_value.sort(key=operator.itemgetter(0), reverse=True)\n return map(operator.itemgetter(1), colours_value)\n\ndef order_colours_by_hue(colours):\n colours_value = []\n for colour in colours:\n c = colour / 255.0\n value = colorsys.rgb_to_hls(*c)[0]\n colours_value.append((value, colour))\n colours_value.sort(key=operator.itemgetter(0), reverse=True)\n return map(operator.itemgetter(1), colours_value)\n\ndef order_colours_by_saturation(colours):\n colours_value = []\n for colour in colours:\n c = colour / 255.0\n value = colorsys.rgb_to_hsv(*c)[1]\n colours_value.append((value, colour))\n colours_value.sort(key=operator.itemgetter(0), reverse=True)\n return map(operator.itemgetter(1), colours_value)\n\ndef resize_jam_background(im, target_width=WIDTH, target_height=HEIGHT,\n max_resize=16.0, pixelated=False):\n width, height = im.size\n scale, crop_left, crop_right, crop_top, crop_bottom = \\\n get_resize_params(im, target_width, target_height, max_resize)\n\n if pixelated:\n im = im.resize((int(width * scale), int(height * scale)))\n else:\n im = im.resize((int(width * scale), int(height * scale)), Image.BILINEAR)\n im = im.crop((crop_left, crop_top, crop_right, crop_bottom))\n\n return im\n\ndef centre_crop(im, width, height):\n im_width, im_height = im.size\n width = min(width, im_width)\n height = min(height, im_height)\n left = max(0, (im_width - width) / 2)\n top = max(0, (im_height - height) / 2)\n\n return im.crop((left, top, left + width, top + height))\n\ndef random_crop(im, width, height):\n im_width, im_height = im.size\n width = min(width, im_width)\n height = min(height, im_height)\n\n if im_width > width:\n left = random.randint(0, im_width - width - 1)\n else:\n left = 0\n if im_height > height:\n top = random.randint(0, im_height - height - 1)\n else:\n top = 0\n\n return im.crop((left, top, left + width, top + height))\n\ndef get_resize_params(im, target_width=WIDTH, target_height=HEIGHT, max_resize=4.0):\n width, height = im.size\n\n scale = min(max_resize, max(target_width / float(width),\n target_height / float(height)))\n width = int(width * scale)\n height = int(height * scale)\n\n crop_left = int(max(0, (width - target_width) / 2))\n crop_right = int(width - crop_left)\n crop_top = int(max(0, (height - target_height) / 2))\n crop_bottom = int(height - crop_top)\n\n return scale, crop_left, crop_right, crop_top, crop_bottom\n\ndef max_size(im, target_width, target_height):\n width, height = im.size\n scale = max(width / target_width, height / target_height)\n if scale > 1:\n return im.resize((width / scale, height / scale))\n else:\n return im\n\ndef interpolate_colour(colours, x):\n i = (len(colours) - 1) * x\n start_i = int(math.floor(i))\n end_i = start_i + 1\n delta = i - start_i\n\n start_rgb = map(int, colours[start_i])\n end_rgb = map(int, colours[end_i])\n\n rgb = []\n for i in range(3):\n rgb.append(int(start_rgb[i] + (end_rgb[i] - start_rgb[i]) * delta))\n\n return tuple(rgb)\n\ndef create_gradient(size, colours):\n vertical = True # hard coded for now\n width, height = size\n im = Image.new('RGB', (1, height))\n pix = im.load()\n\n for i in xrange(height):\n pix[0, i] = interpolate_colour(colours, i / float(height))\n\n im = im.resize((width, height), Image.BILINEAR)\n return im\n\n# rgb = (r, g, b), where 0 <= r, g, b <= 255\n# returns (h, l, s), where 0 <= h, l, s <= 255\ndef rgb_to_hls(rgb):\n rgb = map(lambda x: x / 255.0, rgb)\n h, l, s = colorsys.rgb_to_hls(*rgb)\n return tuple(map(lambda x: x * 255.0, [h, l, s]))\n\ndef hls_to_rgb(hls):\n hls = map(lambda x: x / 255.0, hls)\n r, g, b = colorsys.hls_to_rgb(*hls)\n return tuple(map(lambda x: int(x * 255.0), [r, g, b]))\n\ndef modify_hls(rgb, h=None, l=None, s=None):\n hls = list(rgb_to_hls(rgb))\n mods = [h, l, s]\n for i, mod in enumerate(mods):\n if mod:\n old = hls[i]\n hls[i] = max(min(mod(hls[i]), 255), 0)\n return hls_to_rgb(hls)\n\ndef rgb_to_hsv(rgb):\n rgb = map(lambda x: x / 255.0, rgb)\n h, s, v = colorsys.rgb_to_hsv(*rgb)\n return tuple(map(lambda x: x * 255.0, [h, s, v]))\n\ndef hsv_to_rgb(hsv):\n hsv = map(lambda x: x / 255.0, hsv)\n r, g, b = colorsys.hsv_to_rgb(*hsv)\n return tuple(map(lambda x: int(x * 255.0), [r, g, b]))\n\ndef modify_hsv(rgb, h=None, s=None, v=None):\n hsv = list(rgb_to_hsv(rgb))\n mods = [h, s, v]\n for i, mod in enumerate(mods):\n if mod:\n old = hsv[i]\n hsv[i] = max(min(mod(hsv[i]), 255), 0)\n return hsv_to_rgb(hsv)\n\ndef spread_colours_by_lightness(rgbs, min_lightness=50, max_lightness=220):\n lightnesses = map(lambda rgb: rgb_to_hls(rgb)[1], rgbs)\n bottom, top = min(lightnesses), max(lightnesses)\n scale = float((max_lightness - min_lightness)) / (top - bottom)\n\n def modify_lightness(l):\n l = l - bottom\n l = l * scale\n l = l + min_lightness\n return l\n\n for i, rgb in enumerate(rgbs):\n rgbs[i] = modify_hls(rgb, l=modify_lightness)\n\n return rgbs\n" ]
[ [ "scipy.misc.fromimage", "numpy.array" ] ]
aouedions11/SSFL-Benchmarking-Semi-supervised-Federated-Learning
[ "78aec81919bf95ed4677d0e0a4ebbbe3be455742" ]
[ "dataset/randaugment.py" ]
[ "import logging\nimport random\n\nimport numpy as np\nimport PIL\nimport PIL.ImageOps\nimport PIL.ImageEnhance\nimport PIL.ImageDraw\nfrom PIL import Image\n\nlogger = logging.getLogger(__name__)\n\nPARAMETER_MAX = 10\n\n\ndef AutoContrast(img, **kwarg):\n return PIL.ImageOps.autocontrast(img)\n\n\ndef Brightness(img, v, max_v, bias=0):\n v = _float_parameter(v, max_v) + bias\n return PIL.ImageEnhance.Brightness(img).enhance(v)\n\n\ndef Color(img, v, max_v, bias=0):\n v = _float_parameter(v, max_v) + bias\n return PIL.ImageEnhance.Color(img).enhance(v)\n\n\ndef Contrast(img, v, max_v, bias=0):\n v = _float_parameter(v, max_v) + bias\n return PIL.ImageEnhance.Contrast(img).enhance(v)\n\n\ndef Cutout(img, v, max_v, bias=0):\n if v == 0:\n return img\n v = _float_parameter(v, max_v) + bias\n v = int(v * min(img.size))\n return CutoutAbs(img, v)\n\n\ndef CutoutAbs(img, v, **kwarg):\n w, h = img.size\n x0 = np.random.uniform(0, w)\n y0 = np.random.uniform(0, h)\n x0 = int(max(0, x0 - v / 2.))\n y0 = int(max(0, y0 - v / 2.))\n x1 = int(min(w, x0 + v))\n y1 = int(min(h, y0 + v))\n xy = (x0, y0, x1, y1)\n\n color = (127, 127, 127)\n img = img.copy()\n\n if w == 32:\n PIL.ImageDraw.Draw(img).rectangle(xy, color)\n else:\n color = (np.max(img)//2,)\n PIL.ImageDraw.Draw(img).rectangle(xy, color)\n\n return img\n\n\ndef Equalize(img, **kwarg):\n return PIL.ImageOps.equalize(img)\n\n\ndef Identity(img, **kwarg):\n return img\n\n\ndef Invert(img, **kwarg):\n return PIL.ImageOps.invert(img)\n\n\ndef Posterize(img, v, max_v, bias=0):\n v = _int_parameter(v, max_v) + bias\n return PIL.ImageOps.posterize(img, v)\n\n\ndef Rotate(img, v, max_v, bias=0):\n v = _int_parameter(v, max_v) + bias\n if random.random() < 0.5:\n v = -v\n return img.rotate(v)\n\n\ndef Sharpness(img, v, max_v, bias=0):\n v = _float_parameter(v, max_v) + bias\n return PIL.ImageEnhance.Sharpness(img).enhance(v)\n\n\ndef ShearX(img, v, max_v, bias=0):\n v = _float_parameter(v, max_v) + bias\n if random.random() < 0.5:\n v = -v\n return img.transform(img.size, PIL.Image.AFFINE, (1, v, 0, 0, 1, 0))\n\n\ndef ShearY(img, v, max_v, bias=0):\n v = _float_parameter(v, max_v) + bias\n if random.random() < 0.5:\n v = -v\n return img.transform(img.size, PIL.Image.AFFINE, (1, 0, 0, v, 1, 0))\n\n\ndef Solarize(img, v, max_v, bias=0):\n v = _int_parameter(v, max_v) + bias\n return PIL.ImageOps.solarize(img, 256 - v)\n\n\ndef SolarizeAdd(img, v, max_v, bias=0, threshold=128):\n v = _int_parameter(v, max_v) + bias\n if random.random() < 0.5:\n v = -v\n img_np = np.array(img).astype(np.int)\n img_np = img_np + v\n img_np = np.clip(img_np, 0, 255)\n img_np = img_np.astype(np.uint8)\n img = Image.fromarray(img_np)\n return PIL.ImageOps.solarize(img, threshold)\n\n\ndef TranslateX(img, v, max_v, bias=0):\n v = _float_parameter(v, max_v) + bias\n if random.random() < 0.5:\n v = -v\n v = int(v * img.size[0])\n return img.transform(img.size, PIL.Image.AFFINE, (1, 0, v, 0, 1, 0))\n\n\ndef TranslateY(img, v, max_v, bias=0):\n v = _float_parameter(v, max_v) + bias\n if random.random() < 0.5:\n v = -v\n v = int(v * img.size[1])\n return img.transform(img.size, PIL.Image.AFFINE, (1, 0, 0, 0, 1, v))\n\n\ndef _float_parameter(v, max_v):\n return float(v) * max_v / PARAMETER_MAX\n\n\ndef _int_parameter(v, max_v):\n return int(v * max_v / PARAMETER_MAX)\n\n\ndef fixmatch_augment_pool():\n # FixMatch paper\n augs = [(AutoContrast, None, None),\n (Brightness, 0.9, 0.05),\n (Color, 0.9, 0.05),\n (Contrast, 0.9, 0.05),\n (Equalize, None, None),\n (Identity, None, None),\n (Posterize, 4, 4),\n (Rotate, 30, 0),\n (Sharpness, 0.9, 0.05),\n (ShearX, 0.3, 0),\n (ShearY, 0.3, 0),\n (Solarize, 256, 0),\n (TranslateX, 0.3, 0),\n (TranslateY, 0.3, 0)]\n return augs\n\n\ndef my_augment_pool():\n # Test\n augs = [(AutoContrast, None, None),\n (Brightness, 1.8, 0.1),\n (Color, 1.8, 0.1),\n (Contrast, 1.8, 0.1),\n (Cutout, 0.2, 0),\n (Equalize, None, None),\n (Invert, None, None),\n (Posterize, 4, 4),\n (Rotate, 30, 0),\n (Sharpness, 1.8, 0.1),\n (ShearX, 0.3, 0),\n (ShearY, 0.3, 0),\n (Solarize, 256, 0),\n (SolarizeAdd, 110, 0),\n (TranslateX, 0.45, 0),\n (TranslateY, 0.45, 0)]\n return augs\n\n\nclass RandAugmentPC(object):\n def __init__(self, n, m):\n assert n >= 1\n assert 1 <= m <= 10\n self.n = n\n self.m = m\n self.augment_pool = my_augment_pool()\n\n def __call__(self, img):\n ops = random.choices(self.augment_pool, k=self.n)\n for op, max_v, bias in ops:\n prob = np.random.uniform(0.2, 0.8)\n if random.random() + prob >= 1:\n img = op(img, v=self.m, max_v=max_v, bias=bias)\n w, h = img.size\n if w == 32:\n img = CutoutAbs(img, 16)\n else:\n img = CutoutAbs(img, 14)\n return img\n\n\nclass RandAugmentMC(object):\n def __init__(self, n, m):\n assert n >= 1\n assert 1 <= m <= 10\n self.n = n\n self.m = m\n self.augment_pool = fixmatch_augment_pool()\n\n def __call__(self, img):\n ops = random.choices(self.augment_pool, k=self.n)\n for op, max_v, bias in ops:\n v = np.random.randint(1, self.m)\n if random.random() < 0.5:\n img = op(img, v=v, max_v=max_v, bias=bias)\n w, h = img.size\n if w == 32:\n img = CutoutAbs(img, 16)\n else:\n img = CutoutAbs(img, 7)\n return img\n" ]
[ [ "numpy.max", "numpy.array", "numpy.random.uniform", "numpy.random.randint", "numpy.clip" ] ]
w121211/agents
[ "2b23bd25ea9dccd759e754962621bad63b57a351" ]
[ "tf_agents/bandits/policies/policy_utilities_test.py" ]
[ "# coding=utf-8\n# Copyright 2018 The TF-Agents Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Tests for tf_agents.bandits.policies.policy_utilities.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nfrom absl.testing import parameterized\nimport tensorflow as tf # pylint: disable=g-explicit-tensorflow-version-import\n\nfrom tf_agents.bandits.policies import policy_utilities\nfrom tf_agents.utils import test_utils\nfrom tensorflow.python.framework import test_util # pylint:disable=g-direct-tensorflow-import # TF internal\n\n\n@test_util.run_all_in_graph_and_eager_modes\nclass PolicyUtilitiesTest(test_utils.TestCase, parameterized.TestCase):\n\n @parameterized.parameters(\n dict(\n input_tensor=[[4, 8, 2, -3], [0, 5, -234, 64]],\n mask=[[1, 0, 0, 1], [0, 1, 1, 1]],\n expected=[0, 3]),\n dict(\n input_tensor=[[3, 0.2, -3.3], [987, -2.5, 64], [0, 0, 0], [4, 3, 8]],\n mask=[[1, 0, 0], [1, 0, 1], [1, 1, 1], [0, 1, 1]],\n expected=[0, 0, 0, 2]),\n dict(input_tensor=[[1, 2]], mask=[[1, 0]], expected=[0]))\n def testMaskedArgmax(self, input_tensor, mask, expected):\n actual = policy_utilities.masked_argmax(\n tf.constant(input_tensor, dtype=tf.float32), tf.constant(mask))\n self.assertAllEqual(actual, expected)\n\n def testBadMask(self):\n input_tensor = tf.reshape(tf.range(12, dtype=tf.float32), shape=[3, 4])\n mask = [[1, 0, 0, 1], [0, 0, 0, 0], [1, 0, 1, 1]]\n with self.assertRaises(tf.errors.InvalidArgumentError):\n self.evaluate(\n policy_utilities.masked_argmax(input_tensor, tf.constant(mask)))\n\n\nif __name__ == '__main__':\n tf.test.main()\n" ]
[ [ "tensorflow.constant", "tensorflow.range", "tensorflow.test.main" ] ]
mdanilevicz/maize_early_yield_prediction
[ "e1090e6555a544a13bec19c974d628efccbcbeca" ]
[ "multimodal_model.py" ]
[ "# +\n# Import the libraries\nfrom fastai.vision.all import *\nimport fastai\nfrom fastai.tabular.all import *\nfrom fastai.data.load import _FakeLoader, _loaders\nfrom glob import glob\nimport torch\nimport pandas as pd\nimport numpy as np\nimport os\n\n# Custom functions\nfrom msi_utils import *\nfrom fold_utils import * \nfrom multimodal_utisl import *\n# -\n\n\nglobal glb_tab_logits\ndef get_tab_logits(self, inp, out):\n global glb_tab_logits\n glb_tab_logits = inp\n\nglobal glb_vis_logits\ndef get_vis_logits(self, inp, out):\n global glb_vis_logits\n glb_vis_logits = inp\n\n\nclass TabVis(nn.Module):\n # Modify the architecture here if you want more or less layers at the fusion module\n def __init__(self, tab_model, vis_model, num_classes=1): \n super(TabVis, self).__init__()\n self.tab_model = tab_model\n self.vis_model = vis_model\n \n # Add the fusion module\n self.mixed_reg = nn.Sequential(nn.Linear(612,612),\n nn.ReLU(inplace=True),\n nn.Linear(612, num_classes))\n \n # receive the weights from tab and spectral modules\n self.tab_reg = nn.Linear(100, num_classes)\n self.vis_reg = nn.Linear(512, num_classes)\n \n # register hook that will grab the module's weights\n self.tab_handle = self.tab_model.layers[2][0].register_forward_hook(get_tab_logits)\n self.vis_handle = self.vis_model[11].register_forward_hook(get_vis_logits)\n \n def remove_my_hooks(self):\n self.tab_handle.remove()\n self.vis_handle.remove()\n return None \n \n def forward(self, x_cat, x_cont, x_im):\n # Tabular Regressor\n tab_pred = self.tab_model(x_cat, x_cont) \n # Spectral Regressor\n vis_pred = self.vis_model(x_im)\n # Logits\n tab_logits = glb_tab_logits[0] # Only grabbling weights, not bias'\n vis_logits = glb_vis_logits[0] # Only grabbling weights, not bias'\n mixed = torch.cat((tab_logits, vis_logits), dim=1)\n # Mixed classifier block\n mixed_pred = self.mixed_reg(mixed) \n return (tab_pred, vis_pred, mixed_pred)\n\nclass GradientBlending(nn.Module):\n def __init__(self, tab_weight=0.0, visual_weight=0.0, tab_vis_weight=1.0, loss_scale=1.0):\n \"Expects weights for each model, the combined model, and an overall scale\"\n super(myGradientBlending, self).__init__()\n self.tab_weight = tab_weight\n self.visual_weight = visual_weight\n self.tab_vis_weight = tab_vis_weight\n self.scale = loss_scale\n \n def remove_my_hooks(self):\n self.tab_handle.remove()\n self.vis_handle.remove()\n #self.print_handle.remove()\n return None\n \n def forward(self, xb, yb):\n tab_out, visual_out, tv_out = xb\n targ = yb\n \n # Add some hook here to log the modules losses in a csv\n \"Gathers `self.loss` for each model, weighs, then sums\"\n t_loss = root_mean_squared_error(tab_out, targ) * self.scale\n v_loss = root_mean_squared_error(visual_out, targ) * self.scale\n tv_loss = root_mean_squared_error(tv_out, targ) * self.scale\n\n weighted_t_loss = t_loss * self.tab_weight\n weighted_v_loss = v_loss * self.visual_weight\n weighted_tv_loss = tv_loss * self.tab_vis_weight\n \n loss = weighted_t_loss + weighted_v_loss + weighted_tv_loss\n return loss\n\n# Metrics\ndef t_rmse(inp, targ):\n \"Compute rmse with `targ` and `pred`\"\n pred = inp[0].flatten()\n return root_mean_squared_error(*flatten_check(pred,targ))\n\ndef v_rmse(inp, targ):\n \"Compute rmse with `targ` and `pred`\"\n pred = inp[1].flatten()\n return root_mean_squared_error(*flatten_check(pred,targ))\n\ndef tv_rmse(inp, targ):\n \"Compute rmse with `targ` and `pred`\"\n pred = inp[2].flatten()\n return root_mean_squared_error(*flatten_check(pred,targ))\n\ndef weighted_RMSEp(inp, targ, w_t=0.333, w_v=0.333, w_tv=0.333):\n # normalised by the max -min\n delta = df['Yield'].max() - df['Yield'].min()\n tv_inp = (inp[2].flatten()) \n rmsep = root_mean_squared_error(*flatten_check(tv_inp,targ)) / delta \n return rmsep * 100\n" ]
[ [ "torch.cat" ] ]
pltrdy/encoder-agnostic-adaptation
[ "e45d157f84804696e109e5952957570fd781e9b7" ]
[ "onmt/modules/embeddings.py" ]
[ "\"\"\" Embeddings module \"\"\"\nimport math\nimport warnings\n\nimport torch\nimport torch.nn as nn\n\nfrom onmt.modules.util_class import Elementwise\n#from onmt.encoders.transformer import TransformerEncoder\nimport onmt.encoders\n#from onmt.decoders.transformer import TransformerDecoder\n\n\nclass PositionalEncoding(nn.Module):\n \"\"\"Sinusoidal positional encoding for non-recurrent neural networks.\n\n Implementation based on \"Attention Is All You Need\"\n :cite:`DBLP:journals/corr/VaswaniSPUJGKP17`\n\n Args:\n dropout (float): dropout parameter\n dim (int): embedding size\n \"\"\"\n\n def __init__(self, dropout, dim, max_len=5000):\n if dim % 2 != 0:\n raise ValueError(\"Cannot use sin/cos positional encoding with \"\n \"odd dim (got dim={:d})\".format(dim))\n pe = torch.zeros(max_len, dim)\n position = torch.arange(0, max_len).unsqueeze(1)\n div_term = torch.exp((torch.arange(0, dim, 2, dtype=torch.float) *\n -(math.log(10000.0) / dim)))\n pe[:, 0::2] = torch.sin(position.float() * div_term)\n pe[:, 1::2] = torch.cos(position.float() * div_term)\n pe = pe.unsqueeze(1) # [max_len, 1, dim]\n super(PositionalEncoding, self).__init__()\n self.register_buffer('pe', pe)\n \n self.dropout = nn.Dropout(p=dropout)\n self.dim = dim\n\n def forward(self, emb, step=None, offset=None):\n \"\"\"Embed inputs.\n\n Args:\n emb (FloatTensor): Sequence of word vectors\n ``(seq_len, batch_size, self.dim)``\n step (int or NoneType): If stepwise (``seq_len = 1``), use\n the encoding for this position.\n \"\"\"\n\n if offset is not None:\n raise AssertionError\n\n emb = emb * math.sqrt(self.dim)\n\n if step is None:\n emb = emb + self.pe[:emb.size(0)]\n else:\n emb = emb + self.pe[step]\n emb = self.dropout(emb)\n return emb\n\nclass LearnedPositionalEncoding(nn.Module):\n def __init__(self, context_size, embedding_dim, dropout=0):\n super(LearnedPositionalEncoding, self).__init__()\n self.pe = nn.Embedding(context_size, embedding_dim)\n self.dropout = nn.Dropout(p=dropout)\n\n def forward(self, emb, step=None, offset=None):\n \"\"\"Embed inputs.\n\n Args:\n emb (FloatTensor): Sequence of word vectors\n ``(seq_len, batch_size, self.dim)``\n step (int or NoneType): If stepwise (``seq_len = 1``), use\n the encoding for this position.\n \"\"\"\n if step is None:\n position_ids = torch.arange(0, emb.shape[0], dtype=torch.long, device=emb.device)\n else:\n position_ids = torch.arange(step, step+1, dtype=torch.long, device=emb.device)\n position_ids = position_ids.unsqueeze(1).repeat(1, emb.shape[1]) # [seq_len, batch_size]\n\n if offset is not None:\n offset = offset.unsqueeze(0) # [1, batch_size]\n position_ids += offset\n\n pe_vals = self.pe(position_ids) # [seq_len, batch_size, self.dim]\n emb = emb + pe_vals\n emb = self.dropout(emb)\n return emb\n\n\nclass Embeddings(nn.Module):\n \"\"\"Words embeddings for encoder/decoder.\n\n Additionally includes ability to add sparse input features\n based on \"Linguistic Input Features Improve Neural Machine Translation\"\n :cite:`sennrich2016linguistic`.\n\n\n .. mermaid::\n\n graph LR\n A[Input]\n C[Feature 1 Lookup]\n A-->B[Word Lookup]\n A-->C\n A-->D[Feature N Lookup]\n B-->E[MLP/Concat]\n C-->E\n D-->E\n E-->F[Output]\n\n Args:\n word_vec_size (int): size of the dictionary of embeddings.\n word_padding_idx (int): padding index for words in the embeddings.\n feat_padding_idx (List[int]): padding index for a list of features\n in the embeddings.\n word_vocab_size (int): size of dictionary of embeddings for words.\n feat_vocab_sizes (List[int], optional): list of size of dictionary\n of embeddings for each feature.\n position_encoding (bool): see :class:`~onmt.modules.PositionalEncoding`\n feat_merge (string): merge action for the features embeddings:\n concat, sum or mlp.\n feat_vec_exponent (float): when using `-feat_merge concat`, feature\n embedding size is N^feat_dim_exponent, where N is the\n number of values the feature takes.\n feat_vec_size (int): embedding dimension for features when using\n `-feat_merge mlp`\n dropout (float): dropout probability.\n \"\"\"\n\n def __init__(self, word_vec_size,\n word_vocab_size,\n word_padding_idx,\n position_encoding=False,\n position_encoding_learned=False,\n position_encoding_ctxsize=1024,\n feat_merge=\"concat\",\n feat_vec_exponent=0.7,\n feat_vec_size=-1,\n feat_padding_idx=[],\n feat_vocab_sizes=[],\n dropout=0,\n sparse=False,\n fix_word_vecs=False,\n GPT_representation_mode='none',\n GPT_representation_tgt=False):\n self._validate_args(feat_merge, feat_vocab_sizes, feat_vec_exponent,\n feat_vec_size, feat_padding_idx)\n\n if feat_padding_idx is None:\n feat_padding_idx = []\n self.word_padding_idx = word_padding_idx\n\n self.word_vec_size = word_vec_size\n\n # Dimensions and padding for constructing the word embedding matrix\n vocab_sizes = [word_vocab_size]\n emb_dims = [word_vec_size]\n pad_indices = [word_padding_idx]\n\n # Dimensions and padding for feature embedding matrices\n # (these have no effect if feat_vocab_sizes is empty)\n if feat_merge == 'sum':\n feat_dims = [word_vec_size] * len(feat_vocab_sizes)\n elif feat_vec_size > 0:\n feat_dims = [feat_vec_size] * len(feat_vocab_sizes)\n else:\n feat_dims = [int(vocab ** feat_vec_exponent)\n for vocab in feat_vocab_sizes]\n vocab_sizes.extend(feat_vocab_sizes)\n emb_dims.extend(feat_dims)\n pad_indices.extend(feat_padding_idx)\n\n # The embedding matrix look-up tables. The first look-up table\n # is for words. Subsequent ones are for features, if any exist.\n emb_params = zip(vocab_sizes, emb_dims, pad_indices)\n embeddings = [nn.Embedding(vocab, dim, padding_idx=pad, sparse=sparse)\n for vocab, dim, pad in emb_params]\n\n emb_luts = Elementwise(feat_merge, embeddings)\n\n # The final output size of word + feature vectors. This can vary\n # from the word vector size if and only if features are defined.\n # This is the attribute you should access if you need to know\n # how big your embeddings are going to be.\n self.embedding_size = (sum(emb_dims) if feat_merge == 'concat'\n else word_vec_size)\n\n # The sequence of operations that converts the input sequence\n # into a sequence of embeddings. At minimum this consists of\n # looking up the embeddings for each word and feature in the\n # input. Model parameters may require the sequence to contain\n # additional operations as well.\n super(Embeddings, self).__init__()\n self.make_embedding = nn.Sequential()\n self.make_embedding.add_module('emb_luts', emb_luts)\n\n if feat_merge == 'mlp' and len(feat_vocab_sizes) > 0:\n in_dim = sum(emb_dims)\n mlp = nn.Sequential(nn.Linear(in_dim, word_vec_size), nn.ReLU())\n self.make_embedding.add_module('mlp', mlp)\n\n self.position_encoding = position_encoding\n\n if self.position_encoding:\n if position_encoding_learned:\n pe = LearnedPositionalEncoding(position_encoding_ctxsize, self.embedding_size, dropout=dropout)\n if fix_word_vecs:\n pe.pe.weight.requires_grad = False\n else:\n pe = PositionalEncoding(dropout, self.embedding_size)\n self.make_embedding.add_module('pe', pe)\n \n if fix_word_vecs:\n self.word_lut.weight.requires_grad = False\n\n self.GPT_representation_mode = GPT_representation_mode\n self.GPT_representation_tgt = GPT_representation_tgt\n if self.GPT_representation_mode != 'none':\n gpt_dropout = 0 if self.GPT_representation_mode == 'elmo' else dropout\n if self.GPT_representation_tgt:\n self.gpt_model = onmt.decoders.TransformerDecoder(12, 768, 12, 3072, False, 'scaled-dot', gpt_dropout, gpt_dropout, None, 0, False, True, False, False)\n else:\n self.gpt_model = onmt.encoders.TransformerEncoder(12, 768, 12, 3072, gpt_dropout, gpt_dropout, None, 0, True)\n if self.GPT_representation_mode == 'elmo':\n for p in self.gpt_model.parameters():\n p.requires_grad = False\n self.elmo_scale_params = nn.Parameter(torch.ones(13))\n self.elmo_gamma_param = nn.Parameter(torch.full((1,), 1.0))\n\n\n def _validate_args(self, feat_merge, feat_vocab_sizes, feat_vec_exponent,\n feat_vec_size, feat_padding_idx):\n if feat_merge == \"sum\":\n # features must use word_vec_size\n if feat_vec_exponent != 0.7:\n warnings.warn(\"Merging with sum, but got non-default \"\n \"feat_vec_exponent. It will be unused.\")\n if feat_vec_size != -1:\n warnings.warn(\"Merging with sum, but got non-default \"\n \"feat_vec_size. It will be unused.\")\n elif feat_vec_size > 0:\n # features will use feat_vec_size\n if feat_vec_exponent != -1:\n warnings.warn(\"Not merging with sum and positive \"\n \"feat_vec_size, but got non-default \"\n \"feat_vec_exponent. It will be unused.\")\n else:\n if feat_vec_exponent <= 0:\n raise ValueError(\"Using feat_vec_exponent to determine \"\n \"feature vec size, but got feat_vec_exponent \"\n \"less than or equal to 0.\")\n n_feats = len(feat_vocab_sizes)\n if n_feats != len(feat_padding_idx):\n raise ValueError(\"Got unequal number of feat_vocab_sizes and \"\n \"feat_padding_idx ({:d} != {:d})\".format(\n n_feats, len(feat_padding_idx)))\n\n @property\n def word_lut(self):\n \"\"\"Word look-up table.\"\"\"\n return self.make_embedding[0][0]\n\n @property\n def emb_luts(self):\n \"\"\"Embedding look-up table.\"\"\"\n return self.make_embedding[0]\n\n def load_pretrained_vectors(self, emb_file):\n \"\"\"Load in pretrained embeddings.\n\n Args:\n emb_file (str) : path to torch serialized embeddings\n \"\"\"\n\n if emb_file:\n pretrained = torch.load(emb_file)\n pretrained_vec_size = pretrained.size(1)\n if self.word_vec_size > pretrained_vec_size:\n self.word_lut.weight.data[:, :pretrained_vec_size] = pretrained\n elif self.word_vec_size < pretrained_vec_size:\n self.word_lut.weight.data \\\n .copy_(pretrained[:, :self.word_vec_size])\n else:\n self.word_lut.weight.data.copy_(pretrained)\n\n def forward(self, source, step=None, offset=None):\n \"\"\"Computes the embeddings for words and features.\n\n Args:\n source (LongTensor): index tensor ``(len, batch, nfeat)``\n\n Returns:\n FloatTensor: Word embeddings ``(len, batch, embedding_size)``\n \"\"\"\n\n emb = source\n if self.position_encoding:\n for i, module in enumerate(self.make_embedding._modules.values()):\n if i == len(self.make_embedding._modules.values()) - 1:\n emb = module(emb, step=step, offset=offset)\n else:\n emb = module(emb)\n else:\n emb = self.make_embedding(emb)\n\n if self.GPT_representation_mode != 'none':\n if self.GPT_representation_tgt and step == 0:\n # Need to initialize cache for self attn layers\n self.gpt_model._init_cache(torch.zeros((source.shape[0], source.shape[1], 1), dtype=emb.dtype, device=emb.device))\n self.gpt_model.state['src'] = None\n\n words = source[:, :, 0].transpose(0, 1)\n w_batch, w_len = words.size()\n mask = words.data.eq(self.word_padding_idx).unsqueeze(1) # [B, 1, T]\n\n if self.GPT_representation_mode == 'elmo':\n layer_weights = nn.functional.softmax(self.elmo_scale_params, dim=0)\n elmo_representation = layer_weights[0]*emb.transpose(0, 1).contiguous()\n\n # Run the forward pass of every layer of the tranformer.\n out = emb.transpose(0, 1).contiguous()\n for layer_num, layer in enumerate(self.gpt_model.transformer_layers):\n if self.GPT_representation_tgt:\n layer_cache = self.gpt_model.state[\"cache\"][\"layer_{}\".format(layer_num)] \\\n if step is not None else None\n out, _ = layer(out, None, None, mask, layer_cache=layer_cache, step=step)\n else:\n out = layer(out, mask)\n\n if self.GPT_representation_mode == 'elmo':\n elmo_representation += layer_weights[layer_num+1]*out\n\n if self.GPT_representation_mode == 'elmo':\n emb = self.elmo_gamma_param*elmo_representation.transpose(0, 1).contiguous()\n else:\n emb = out.transpose(0, 1).contiguous()\n\n return emb\n" ]
[ [ "torch.zeros", "torch.nn.Linear", "torch.nn.Dropout", "torch.arange", "torch.nn.Sequential", "torch.ones", "torch.nn.ReLU", "torch.full", "torch.load", "torch.nn.functional.softmax", "torch.nn.Embedding" ] ]
rlvc/tensorpack
[ "5573087f5f2b38a6cb430d1de8763705f3ff73f8" ]
[ "examples/FasterRCNN/config.py" ]
[ "# -*- coding: utf-8 -*-\n# File: config.py\n\nimport numpy as np\nimport os\nimport pprint\nimport six\n\nfrom tensorpack.utils import logger\nfrom tensorpack.utils.gpu import get_num_gpu\n\n__all__ = ['config', 'finalize_configs']\n\n\nclass AttrDict():\n\n _freezed = False\n \"\"\" Avoid accidental creation of new hierarchies. \"\"\"\n\n def __getattr__(self, name):\n if self._freezed:\n raise AttributeError(name)\n if name.startswith('_'):\n # Do not mess with internals. Otherwise copy/pickle will fail\n raise AttributeError(name)\n ret = AttrDict()\n setattr(self, name, ret)\n return ret\n\n def __setattr__(self, name, value):\n if self._freezed and name not in self.__dict__:\n raise AttributeError(\n \"Config was freezed! Unknown config: {}\".format(name))\n super().__setattr__(name, value)\n\n def __str__(self):\n return pprint.pformat(self.to_dict(), indent=1, width=100, compact=True)\n\n __repr__ = __str__\n\n def to_dict(self):\n \"\"\"Convert to a nested dict. \"\"\"\n return {k: v.to_dict() if isinstance(v, AttrDict) else v\n for k, v in self.__dict__.items() if not k.startswith('_')}\n\n def from_dict(self, d):\n self.freeze(False)\n for k, v in d.items():\n self_v = getattr(self, k)\n if isinstance(self_v, AttrDict):\n self_v.from_dict(v)\n else:\n setattr(self, k, v)\n\n def update_args(self, args):\n \"\"\"Update from command line args. \"\"\"\n for cfg in args:\n keys, v = cfg.split('=', maxsplit=1)\n keylist = keys.split('.')\n\n dic = self\n for k in keylist[:-1]:\n assert k in dir(dic), \"Unknown config key: {}\".format(keys)\n dic = getattr(dic, k)\n key = keylist[-1]\n\n oldv = getattr(dic, key)\n if not isinstance(oldv, str):\n v = eval(v)\n setattr(dic, key, v)\n\n def freeze(self, freezed=True):\n self._freezed = freezed\n for v in self.__dict__.values():\n if isinstance(v, AttrDict):\n v.freeze(freezed)\n\n # avoid silent bugs\n def __eq__(self, _):\n raise NotImplementedError()\n\n def __ne__(self, _):\n raise NotImplementedError()\n\n\nconfig = AttrDict()\n_C = config # short alias to avoid coding\n\n# mode flags ---------------------\n_C.TRAINER = 'replicated' # options: 'horovod', 'replicated'\n_C.MODE_MASK = True # Faster R-CNN or Mask R-CNN\n_C.MODE_FPN = True\n\n# dataset -----------------------\n_C.DATA.BASEDIR = '/path/to/your/DATA/DIR'\n# All available dataset names are defined in `dataset/coco.py:register_coco`.\n# All TRAIN dataset will be concatenated for training.\n_C.DATA.TRAIN = ('coco_train2017',) # i.e. trainval35k\n# Each VAL dataset will be evaluated separately (instead of concatenated)\n_C.DATA.VAL = ('coco_val2017',) # AKA minival2014\n\n# These two configs will be populated later inside `finalize_configs`.\n_C.DATA.NUM_CATEGORY = -1 # without the background class (e.g., 80 for COCO)\n_C.DATA.CLASS_NAMES = [] # NUM_CLASS (NUM_CATEGORY+1) strings, the first is \"BG\".\n\n# whether the coordinates in your registered dataset are\n# absolute pixel values in range [0, W or H] or relative values in [0, 1]\n_C.DATA.ABSOLUTE_COORD = True\n# Filter Negative Samples from dataset\n_C.DATA.FILTER_EMPTY_ANNOTATIONS = True\n# Number of data loading workers.\n# In case of horovod training, this is the number of workers per-GPU (so you may want to use a smaller number).\n# Set to 0 to disable parallel data loading\n_C.DATA.NUM_WORKERS = 10\n\n# backbone ----------------------\n_C.BACKBONE.WEIGHTS = ''\n# To train from scratch, set it to empty, and set FREEZE_AT to 0\n# To train from ImageNet pre-trained models, use the one that matches your\n# architecture from http://models.tensorpack.com under the 'FasterRCNN' section.\n# To train from an existing COCO model, use the path to that file, and change\n# the other configurations according to that model.\n\n_C.BACKBONE.RESNET_NUM_BLOCKS = [3, 4, 23, 3] # for resnet50\n# RESNET_NUM_BLOCKS = [3, 4, 23, 3] # for resnet101\n_C.BACKBONE.FREEZE_AFFINE = False # do not train affine parameters inside norm layers\n_C.BACKBONE.NORM = 'FreezeBN' # options: FreezeBN, SyncBN, GN, None\n_C.BACKBONE.FREEZE_AT = 2 # options: 0, 1, 2. How many stages in backbone to freeze (not training)\n\n# Use a base model with TF-preferred padding mode,\n# which may pad more pixels on right/bottom than top/left.\n# See https://github.com/tensorflow/tensorflow/issues/18213\n# In tensorpack model zoo, ResNet models with TF_PAD_MODE=False are marked with \"-AlignPadding\".\n# All other models under `ResNet/` in the model zoo are using TF_PAD_MODE=True.\n# Using either one should probably give the same performance.\n# We use the \"AlignPadding\" one just to be consistent with caffe2.\n_C.BACKBONE.TF_PAD_MODE = False\n_C.BACKBONE.STRIDE_1X1 = False # True for MSRA models\n\n# schedule -----------------------\n_C.TRAIN.NUM_GPUS = None # by default, will be set from code\n_C.TRAIN.WEIGHT_DECAY = 1e-4\n_C.TRAIN.BASE_LR = 1e-2 # defined for total batch size=8. Otherwise it will be adjusted automatically\n_C.TRAIN.WARMUP = 1000 # in terms of iterations. This is not affected by #GPUs\n_C.TRAIN.WARMUP_INIT_LR = 1e-5 # defined for total batch size=8. Otherwise it will be adjusted automatically\n_C.TRAIN.STEPS_PER_EPOCH = 500\n_C.TRAIN.STARTING_EPOCH = 1 # the first epoch to start with, useful to continue a training\n\n# LR_SCHEDULE means equivalent steps when the total batch size is 8.\n# It can be either a string like \"3x\" that refers to standard convention, or a list of int.\n# LR_SCHEDULE=3x is the same as LR_SCHEDULE=[420000, 500000, 540000], which\n# means to decrease LR at steps 420k and 500k and stop training at 540k.\n# When the total bs!=8, the actual iterations to decrease learning rate, and\n# the base learning rate are computed from BASE_LR and LR_SCHEDULE.\n# Therefore, there is *no need* to modify the config if you only change the number of GPUs.\n_C.TRAIN.LR_SCHEDULE = \"3x\" # \"1x\" schedule in detectron\n_C.TRAIN.EVAL_PERIOD = 50 # period (epochs) to run evaluation\n_C.TRAIN.CHECKPOINT_PERIOD = 20 # period (epochs) to save model\n\n# preprocessing --------------------\n# Alternative old (worse & faster) setting: 600\n_C.PREPROC.TRAIN_SHORT_EDGE_SIZE = [640, 800] # [min, max] to sample from\n_C.PREPROC.TEST_SHORT_EDGE_SIZE = 1600\n_C.PREPROC.MAX_SIZE = 1600\n# mean and std in RGB order.\n# Un-scaled version: [0.485, 0.456, 0.406], [0.229, 0.224, 0.225]\n_C.PREPROC.PIXEL_MEAN = [123.675, 116.28, 103.53]\n_C.PREPROC.PIXEL_STD = [58.395, 57.12, 57.375]\n\n# anchors -------------------------\n_C.RPN.ANCHOR_STRIDE = 16\n_C.RPN.ANCHOR_SIZES = (32, 64, 128, 256, 512) # sqrtarea of the anchor box\n_C.RPN.ANCHOR_RATIOS = (0.5, 1., 2.)\n_C.RPN.POSITIVE_ANCHOR_THRESH = 0.7\n_C.RPN.NEGATIVE_ANCHOR_THRESH = 0.3\n\n# rpn training -------------------------\n_C.RPN.FG_RATIO = 0.5 # fg ratio among selected RPN anchors\n_C.RPN.BATCH_PER_IM = 256 # total (across FPN levels) number of anchors that are marked valid\n_C.RPN.MIN_SIZE = 0\n_C.RPN.PROPOSAL_NMS_THRESH = 0.7\n# Anchors which overlap with a crowd box (IOA larger than threshold) will be ignored.\n# Setting this to a value larger than 1.0 will disable the feature.\n# It is disabled by default because Detectron does not do this.\n_C.RPN.CROWD_OVERLAP_THRESH = 9.99\n_C.RPN.HEAD_DIM = 1024 # used in C4 only\n\n# RPN proposal selection -------------------------------\n# for C4\n_C.RPN.TRAIN_PRE_NMS_TOPK = 12000\n_C.RPN.TRAIN_POST_NMS_TOPK = 2000\n_C.RPN.TEST_PRE_NMS_TOPK = 6000\n_C.RPN.TEST_POST_NMS_TOPK = 1000 # if you encounter OOM in inference, set this to a smaller number\n# for FPN, #proposals per-level and #proposals after merging are (for now) the same\n# if FPN.PROPOSAL_MODE = 'Joint', these options have no effect\n_C.RPN.TRAIN_PER_LEVEL_NMS_TOPK = 2000\n_C.RPN.TEST_PER_LEVEL_NMS_TOPK = 1000\n\n# fastrcnn training ---------------------\n_C.FRCNN.BATCH_PER_IM = 512\n_C.FRCNN.BBOX_REG_WEIGHTS = [10., 10., 5., 5.] # Slightly better setting: 20, 20, 10, 10\n_C.FRCNN.FG_THRESH = 0.5\n_C.FRCNN.FG_RATIO = 0.25 # fg ratio in a ROI batch\n\n# FPN -------------------------\n_C.FPN.ANCHOR_STRIDES = (4, 8, 16, 32, 64) # strides for each FPN level. Must be the same length as ANCHOR_SIZES\n_C.FPN.PROPOSAL_MODE = 'Level' # 'Level', 'Joint'\n_C.FPN.NUM_CHANNEL = 256\n_C.FPN.NORM = 'None' # 'None', 'GN'\n# The head option is only used in FPN. For C4 models, the head is C5\n_C.FPN.FRCNN_HEAD_FUNC = 'fastrcnn_2fc_head'\n# choices: fastrcnn_2fc_head, fastrcnn_4conv1fc_{,gn_}head\n_C.FPN.FRCNN_CONV_HEAD_DIM = 256\n_C.FPN.FRCNN_FC_HEAD_DIM = 1024\n_C.FPN.MRCNN_HEAD_FUNC = 'maskrcnn_up4conv_head' # choices: maskrcnn_up4conv_{,gn_}head\n\n# Mask R-CNN\n_C.MRCNN.HEAD_DIM = 256\n_C.MRCNN.ACCURATE_PASTE = True # slightly more aligned results, but very slow on numpy\n\n# Cascade R-CNN, only available in FPN mode\n_C.FPN.CASCADE = True\n_C.CASCADE.IOUS = [0.5, 0.6, 0.7]\n_C.CASCADE.BBOX_REG_WEIGHTS = [[10., 10., 5., 5.], [20., 20., 10., 10.], [30., 30., 15., 15.]]\n\n# testing -----------------------\n_C.TEST.FRCNN_NMS_THRESH = 0.5\n\n# Smaller threshold value gives significantly better mAP. But we use 0.05 for consistency with Detectron.\n# mAP with 1e-4 threshold can be found at https://github.com/tensorpack/tensorpack/commit/26321ae58120af2568bdbf2269f32aa708d425a8#diff-61085c48abee915b584027e1085e1043 # noqa\n_C.TEST.RESULT_SCORE_THRESH = 1e-4\n_C.TEST.RESULT_SCORE_THRESH_VIS = 0.5 # only visualize confident results\n_C.TEST.RESULTS_PER_IM = 100\n\n_C.freeze() # avoid typo / wrong config keys\n\n\ndef finalize_configs(is_training):\n \"\"\"\n Run some sanity checks, and populate some configs from others\n \"\"\"\n _C.freeze(False) # populate new keys now\n if isinstance(_C.DATA.VAL, six.string_types): # support single string (the typical case) as well\n _C.DATA.VAL = (_C.DATA.VAL, )\n if isinstance(_C.DATA.TRAIN, six.string_types): # support single string\n _C.DATA.TRAIN = (_C.DATA.TRAIN, )\n\n # finalize dataset definitions ...\n from dataset import DatasetRegistry\n datasets = list(_C.DATA.TRAIN) + list(_C.DATA.VAL)\n _C.DATA.CLASS_NAMES = DatasetRegistry.get_metadata(datasets[0], \"class_names\")\n _C.DATA.NUM_CATEGORY = len(_C.DATA.CLASS_NAMES) - 1\n\n assert _C.BACKBONE.NORM in ['FreezeBN', 'SyncBN', 'GN', 'None'], _C.BACKBONE.NORM\n if _C.BACKBONE.NORM != 'FreezeBN':\n assert not _C.BACKBONE.FREEZE_AFFINE\n assert _C.BACKBONE.FREEZE_AT in [0, 1, 2]\n\n _C.RPN.NUM_ANCHOR = len(_C.RPN.ANCHOR_SIZES) * len(_C.RPN.ANCHOR_RATIOS)\n assert len(_C.FPN.ANCHOR_STRIDES) == len(_C.RPN.ANCHOR_SIZES)\n # image size into the backbone has to be multiple of this number\n _C.FPN.RESOLUTION_REQUIREMENT = _C.FPN.ANCHOR_STRIDES[3] # [3] because we build FPN with features r2,r3,r4,r5\n\n if _C.MODE_FPN:\n size_mult = _C.FPN.RESOLUTION_REQUIREMENT * 1.\n _C.PREPROC.MAX_SIZE = np.ceil(_C.PREPROC.MAX_SIZE / size_mult) * size_mult\n assert _C.FPN.PROPOSAL_MODE in ['Level', 'Joint']\n assert _C.FPN.FRCNN_HEAD_FUNC.endswith('_head')\n assert _C.FPN.MRCNN_HEAD_FUNC.endswith('_head')\n assert _C.FPN.NORM in ['None', 'GN']\n\n if _C.FPN.CASCADE:\n # the first threshold is the proposal sampling threshold\n assert _C.CASCADE.IOUS[0] == _C.FRCNN.FG_THRESH\n assert len(_C.CASCADE.BBOX_REG_WEIGHTS) == len(_C.CASCADE.IOUS)\n\n if is_training:\n train_scales = _C.PREPROC.TRAIN_SHORT_EDGE_SIZE\n if isinstance(train_scales, (list, tuple)) and train_scales[1] - train_scales[0] > 100:\n # don't autotune if augmentation is on\n os.environ['TF_CUDNN_USE_AUTOTUNE'] = '0'\n os.environ['TF_AUTOTUNE_THRESHOLD'] = '1'\n assert _C.TRAINER in ['horovod', 'replicated'], _C.TRAINER\n\n lr = _C.TRAIN.LR_SCHEDULE\n if isinstance(lr, six.string_types):\n if lr.endswith(\"x\"):\n LR_SCHEDULE_KITER = {\n \"{}x\".format(k):\n [180 * k - 120, 180 * k - 40, 180 * k]\n for k in range(2, 10)}\n LR_SCHEDULE_KITER[\"1x\"] = [120, 160, 180]\n _C.TRAIN.LR_SCHEDULE = [x * 1000 for x in LR_SCHEDULE_KITER[lr]]\n else:\n _C.TRAIN.LR_SCHEDULE = eval(lr)\n\n # setup NUM_GPUS\n if _C.TRAINER == 'horovod':\n import horovod.tensorflow as hvd\n ngpu = hvd.size()\n logger.info(\"Horovod Rank={}, Size={}, LocalRank={}\".format(\n hvd.rank(), hvd.size(), hvd.local_rank()))\n else:\n assert 'OMPI_COMM_WORLD_SIZE' not in os.environ\n ngpu = get_num_gpu()\n assert ngpu > 0, \"Has to train with GPU!\"\n assert ngpu % 8 == 0 or 8 % ngpu == 0, \"Can only train with 1,2,4 or >=8 GPUs, but found {} GPUs\".format(ngpu)\n else:\n # autotune is too slow for inference\n os.environ['TF_CUDNN_USE_AUTOTUNE'] = '0'\n ngpu = get_num_gpu()\n\n if _C.TRAIN.NUM_GPUS is None:\n _C.TRAIN.NUM_GPUS = ngpu\n else:\n if _C.TRAINER == 'horovod':\n assert _C.TRAIN.NUM_GPUS == ngpu\n else:\n assert _C.TRAIN.NUM_GPUS <= ngpu\n\n _C.freeze()\n logger.info(\"Config: ------------------------------------------\\n\" + str(_C))\n" ]
[ [ "numpy.ceil" ] ]
junkunyuan/CSAC
[ "70d918ed2fe65a0a503b56d66136032031cd67e4" ]
[ "mains/exam_align.py" ]
[ "import sys\nsys.path.append('.')\nfrom models.script.kernels import GaussianKernel\nfrom models.script.multikernel import MultipleKernelMaximumMeanDiscrepancy\nimport copy\nimport torch\nimport os\nimport os.path as osp\nfrom torch import optim\nfrom torch.utils.data import DataLoader\nimport torch.nn as nn\nimport numpy as np\nimport datasets.dg as DGData\nimport utils.transformers as DGTransformer\nimport models.backbone as Backbone\nfrom models.fullnet import FLDGFullNet\nfrom utils.utils import load_yaml\nfrom utils.loss import KLLoss\nfrom utils.parse_params import args_parser, merge_config\nfrom utils.script import Trainer, Logger, ShareAccRecorder, ForeverDataIterator, AlignPrivateTrainer\n\n\nclass FLDG(object):\n def __init__(self, config, logger):\n self.config = config\n self.logger = logger\n self.pre_model_dir = logger.pre_model_dir\n self.outdir = logger.outdir\n self.device = config['args']['device']\n self.logger.save_config()\n self.weightprint = open(osp.join(self.outdir, 'weightprint.txt'), 'w')\n\n all_num = 0\n for cli in config['args']['cli_datas']:\n all_num += config['dataset']['nums'][cli]\n\n self.ratios = {}\n for cli in config['args']['cli_datas']:\n self.ratios[cli] = round(\n config['dataset']['nums'][cli] / all_num, 4)\n\n print(self.ratios)\n\n \"\"\"init model(clients,public)\"\"\"\n self.models = {}\n self.accrecorders = {}\n for name in self.config['args']['cli_datas']+['public']:\n backbone = Backbone.__dict__[\n config['args']['net']](pretrained=True)\n self.models[name] = FLDGFullNet(\n backbone, config['dataset']['class_num'], config['model']['bottleneck_dim']).to(self.device)\n self.accrecorders[name] = self.__init_accrecorder__(name)\n\n \"\"\"prepare dataset and dataloader\"\"\"\n self.prepare_loader(config)\n\n \"\"\"init loss function\"\"\"\n self.criterion_ce = nn.CrossEntropyLoss()\n self.criterion_kl = KLLoss()\n self.mkmmd_loss = MultipleKernelMaximumMeanDiscrepancy(\n kernels=[GaussianKernel(alpha=3**k) for k in range(-3, 2)],\n linear=False, quadratic_program=False\n )\n\n def clienttrainer(self, cli):\n if self.config['args']['fix'] == 'private':\n fix_model = self.models[cli]\n train_model = copy.deepcopy(self.models['public'])\n elif self.config['args']['fix'] == 'public':\n fix_model = self.models['public']\n train_model = self.models[cli]\n trainer = AlignPrivateTrainer(self.mkmmd_loss,\n fix_model,\n train_model,\n self.client_train_loaders[cli],\n self.config['client']['epoch'],\n self.config['client']['length'],\n self.config['optim'],\n self.criterion_ce,\n self.device,\n self.test_loaders,\n self.config['args']['mode'],\n self.config['args']['addition'],\n self.config['args']['net'])\n return trainer\n\n def train(self):\n max_test = 0\n temp_models = {}\n for epoch in range(self.config['args']['round']):\n \"\"\"train in one epoch\"\"\"\n self.epoch = epoch\n\n \"\"\"init public model\"\"\"\n weights, ratios = [], []\n for cli in self.config['args']['cli_datas']:\n if self.config['args']['fix'] == 'private':\n if epoch == 0:\n weights.append(copy.deepcopy(\n self.models[cli].state_dict()))\n else:\n weights.append(copy.deepcopy(\n temp_models[cli].state_dict()))\n elif self.config['args']['fix'] == 'public':\n weights.append(copy.deepcopy(\n self.models[cli].state_dict()))\n ratios.append(self.ratios[cli])\n\n if self.config['args']['fusion'] == 'fedavg':\n public_weight = self._fedavg_(weights, ratios)\n elif self.config['args']['fusion'] == 'ouravg':\n public_weight = self._ourfedavg_(weights)\n elif self.config['args']['fusion'] in ['l1', 'l2', 'cosion']:\n public_weight = self._weightavg_(weights)\n\n self.models['public'].load_state_dict(copy.deepcopy(public_weight))\n\n for cli in self.config['args']['cli_datas']:\n self.logger.append(\n \"{} client:{} {}\".format('*' * 50, cli, '*' * 50))\n trainer = self.clienttrainer(cli)\n trainer.train(self.logger,\n accrecorder=self.accrecorders[cli],\n outer_epoch=epoch,\n trace_acc=self.config['args']['trace_acc'],\n print_step=self.config['args']['print_step'], lam=self.config['args']['lambda'])\n if self.config['args']['fix'] == 'private':\n temp_models[cli] = trainer.model\n\n self.logger.append(\"{} public test {}\".format('*'*50, '*'*50))\n\n self.models['public'].eval()\n acc = self.test(\n self.models['public'], *list(self.config['dataset']['domains'].keys()))\n self.models['public'].train()\n max_test = max(max_test, acc[self.config['args']['test']])\n\n self.logger.append(\n 'epoch/Epoch: {:>4d}/{:>4d} acc:{}'.format(epoch, self.config['args']['round'], str(acc)))\n if self.config['args']['trace_acc']:\n self.accrecorders['public'].updata(self.epoch, acc)\n\n \"\"\"save accrecorder\"\"\"\n for k, recorder in self.accrecorders.items():\n if recorder:\n recorder.save(osp.join(self.outdir, '{}.json'.format(k)))\n\n self.logger.append(\"public max acc:{}\".format(max_test))\n\n def test(self, model, *domains):\n accs = {}\n for domain in domains:\n total = 0\n with torch.no_grad():\n correct = 0\n for it, (data, label) in enumerate(self.test_loaders[domain]):\n data, label = data.to(self.device), label.to(self.device)\n class_logit = model(data)\n _, cls_pred = class_logit[0].max(dim=1)\n correct += torch.sum(cls_pred == label.data)\n total += data.size(0)\n accs[domain] = round((float(correct)/total)*100, 4)\n return accs\n\n def pre_train(self):\n trainers = {}\n for cli in self.config['args']['cli_datas']:\n trainers[cli] = Trainer(self.models[cli],\n self.client_train_loaders[cli],\n self.config['args']['epoch'],\n self.config['args']['length'],\n self.config['optim'],\n self.criterion_ce, self.device, self.test_loaders)\n self.logger.append(\n \"{} train client {} {}\".format('*' * 50, cli, '*' * 50))\n trainers[cli].train(self.logger,\n accrecorder=self.accrecorders[cli],\n trace_acc=self.config['args']['trace_acc'],\n print_step=self.config['args']['print_step'])\n\n print(self.models[cli] == trainers[cli].model)\n print(self.models[cli].parameters())\n print(trainers[cli].model.parameters())\n torch.save(self.models[cli].state_dict(), osp.join(\n self.pre_model_dir, '{}.pt'.format(cli)))\n\n \"\"\"save accrecorder\"\"\"\n for cli in self.config['args']['cli_datas']:\n if self.accrecorders[cli]:\n self.accrecorders[cli].save(\n osp.join(self.pre_model_dir, '{}.json'.format(cli)))\n self.accrecorders[cli].reset()\n\n def run(self):\n print(\"pre-train\")\n if self.config['args']['pre_train'] or not osp.exists(osp.join(self.pre_model_dir, '{}.pt'.format(self.config['args']['cli_datas'][0]))):\n self.logger.append(\"{} pre-train {}\".format('*' * 50, '*' * 50))\n self.pre_train()\n\n self.logger.append(\"{} train {}\".format('*' * 50, '*' * 50))\n for cli in self.config['args']['cli_datas']:\n self.models[cli].load_state_dict(torch.load(\n osp.join(self.pre_model_dir, '{}.pt'.format(cli))))\n\n self.train()\n torch.save(self.models['public'].state_dict(),\n osp.join(self.outdir, 'public.pt'))\n\n def prepare_loader(self, config):\n self.test_loaders = {}\n\n tf_ge = DGTransformer.__dict__[\n config['args']['dataset']](config['process'])\n test_tf, train_tf = tf_ge.test_transformer(), tf_ge.train_transformer()\n data_ge = DGData.__dict__[config['args']\n ['dataset']](config, train_tf, test_tf)\n\n test_dataset = data_ge.test_dataset(config['args']['test'])\n test_loader = DataLoader(\n test_dataset, batch_size=config['args']['bs'], shuffle=False, num_workers=4, pin_memory=True, drop_last=True)\n self.test_loaders[config['args']['test']] = test_loader\n\n client_train_loaders = {}\n for cdata in config['args']['cli_datas']:\n train_dset, test_dset = data_ge.train_dataset(cdata, True)\n cli_train_loader = DataLoader(\n train_dset, batch_size=config['client']['bs'], shuffle=True, num_workers=4, pin_memory=True, drop_last=True)\n cli_test_loader = DataLoader(\n test_dset, batch_size=config['client']['bs'], shuffle=False, num_workers=4, pin_memory=True, drop_last=False)\n\n client_train_loaders[cdata] = ForeverDataIterator(cli_train_loader)\n self.test_loaders[cdata] = cli_test_loader\n\n self.client_train_loaders = client_train_loaders\n\n def __init_accrecorder__(self, name):\n if self.config['args']['trace_acc']:\n accrecorder = ShareAccRecorder(\n name, *list(self.config['dataset']['domains'].keys()))\n else:\n accrecorder = None\n return accrecorder\n\n def _ourfedavg_(self, w):\n w_avg = copy.deepcopy(w[0])\n for k in w_avg.keys():\n for i in range(1, len(w)):\n w_avg[k] += w[i][k]\n w_avg[k] = torch.true_divide(w_avg[k], len(w))\n return w_avg\n\n def _fedavg_(self, w, ratios):\n w_avg = copy.deepcopy(w[0])\n for k in w_avg.keys():\n w_avg[k].fill_(0)\n for i in range(0, len(w)):\n if not w[i][k].size():\n w_avg[k] = w[i][k]\n else:\n w_avg[k] += (w[i][k]*ratios[i])\n\n return w_avg\n\n def _weightavg_(self, w):\n w_avg = self._ourfedavg_(w)\n\n for k in w_avg.keys():\n distance = torch.Tensor([1] * len(w))\n for i in range(len(w)):\n distance[i] = self.L(w_avg[k], w[i][k])\n if torch.sum(distance) == 0:\n coeff = distance\n coeff.fill_(0).long()\n else:\n coeff = distance/sum(distance)\n self.weightprint.write(str(k) + \" \" + str(coeff) + '\\n')\n self.weightprint.flush()\n w_avg[k].fill_(0)\n for i in range(len(w)):\n if coeff[i] == 0:\n w_avg[k] = w[i][k]\n else:\n w_avg[k] += (coeff[i]*w[i][k])\n self.weightprint.write('\\n')\n self.weightprint.flush()\n return w_avg\n\n def L(self, w1, w2):\n if self.config['args']['fusion'] == 'l2':\n return torch.sqrt(torch.sum((w1 - w2) ** 2)).data\n elif self.config['args']['fusion'] == 'l1':\n return torch.sum(torch.abs(w1-w2)).data\n elif self.config['args']['fusion'] == 'cosine':\n return torch.cosine_similarity(w1.view(1, -1), w2.view(1, -1))\n\n\ndef office8(config):\n config['args']['test'] = 'R'\n clients = ['a', 'C', 'P', 'w', \"A\", 'c']\n\n for i in range(1, len(clients) + 1):\n config['args']['cli_datas'] = clients[:i]\n logger = Logger(config)\n fldg = FLDG(config, logger)\n fldg.run()\n\n\ndef main():\n args = args_parser()\n dataset_config = load_yaml(\n osp.join('configs/datasets', args.exp_type, args.dataset + '.yaml'))\n trainer_config = load_yaml(\n osp.join('configs/trainers', args.exp_type, args.dataset + '.yaml'))\n\n config = merge_config(args, dataset_config, trainer_config)\n all_domains = list(config['dataset']['domains'].keys())\n config['args']['dataset'] = config['args']['dataset'].replace('-', '_')\n\n if config['args']['dataset'] == 'office-8':\n office8(config)\n return None\n\n # config['args']['main_name'] = 'new lab'\n # config['args']['outroot'] = 'log1'\n # config['args']['lambda'] = 0.6\n # config['args']['fusion'] = 'l2'\n # config['args']['addition'] = 'bone'\n # config['args']['mode'] = 'alpha'\n\n if config['args']['test'] == '*':\n for test in all_domains:\n config['args']['test'] = test\n config['args']['cli_datas'] = [\n item for item in all_domains if item != test]\n logger = Logger(config)\n\n fldg = FLDG(config, logger)\n fldg.run()\n else:\n test = config['args']['test']\n config['args']['cli_datas'] = [\n item for item in all_domains if item != test]\n logger = Logger(config)\n\n fldg = FLDG(config, logger)\n fldg.run()\n\n\nif __name__ == '__main__':\n main()\n" ]
[ [ "torch.no_grad", "torch.abs", "torch.utils.data.DataLoader", "torch.nn.CrossEntropyLoss", "torch.sum" ] ]
dahouda2pro/deep-learned-embedding
[ "a4428cf99eae86691286ec18a0656e632fbc4600" ]
[ "MLP_model.py" ]
[ "\"\"\"\r\nRegression is a statistical process for estimating the relationship among variables,\r\noften to make predictions about some outcome\r\n\"\"\"\r\nimport time\r\nimport seaborn as sns\r\nimport matplotlib.pyplot as plt\r\nfrom sklearn import metrics\r\nfrom sklearn.metrics import mean_squared_error\r\nfrom sklearn.metrics import confusion_matrix\r\nfrom sklearn.metrics import roc_auc_score\r\nfrom sklearn.metrics import precision_score, recall_score, f1_score\r\nfrom sklearn import model_selection\r\nfrom sklearn.model_selection import GridSearchCV\r\nfrom sklearn.svm import SVC\r\nfrom sklearn.linear_model import LogisticRegression\r\nimport models\r\nfrom sklearn.neural_network import MLPClassifier\r\nimport joblib\r\nimport numpy as np\r\nimport pandas as pd\r\nimport warnings\r\n# \"error\", \"ignore\", \"always\", \"default\", \"module\" or \"once\"\r\nwarnings.filterwarnings('always')\r\n\r\n\r\n# How long takes The program to run\r\nstart_time = time.time()\r\nwarnings.filterwarnings('ignore', category=FutureWarning)\r\nwarnings.filterwarnings('ignore', category=DeprecationWarning)\r\n\r\ntr_features = models.all_X_train2\r\ntr_labels = models.y_train\r\nte_features = models.all_X_test2\r\nte_labels = models.y_test\r\n\r\nprint(\"--------- Check Data Shapes--------\")\r\nprint(tr_features.shape, tr_labels.shape, te_features.shape, te_labels.shape)\r\n# print(tr_features.head())\r\n# print(tr_labels.head())\r\n\r\n# Logistic Regression with Hyperparameters tuning\r\n\r\n\r\ndef print_results(results):\r\n print('BEST PARAMETERS: {}\\n'.format(results.best_params_))\r\n means = results.cv_results_['mean_test_score']\r\n stds = results.cv_results_['std_test_score']\r\n for mean, std, params in zip(means, stds, results.cv_results_['params']):\r\n print('{} (+/-{}) for {}'.format(round(mean, 2), round(std * 2, 2), params))\r\n\r\n\r\nprint(\"------MULTI-LAYER PERCEPTRON-------------\")\r\n\r\nmlp = MLPClassifier()\r\nparameters = {\r\n 'hidden_layer_sizes': [(10,), (50,), (100,)],\r\n 'activation': ['relu', 'tanh', 'logistic'],\r\n 'learning_rate': ['constant', 'invscaling', 'adaptive']\r\n}\r\n\r\ncv3 = GridSearchCV(mlp, parameters, cv=5)\r\ncv3.fit(tr_features, tr_labels.values.ravel())\r\nprint(print_results(cv3))\r\n\r\n# Use the trained model to make predictions\r\nprint(\"Use the trained model to make predictions\")\r\ny_pred3 = cv3.predict(te_features)\r\nprint(y_pred3)\r\n\r\n# Evaluate the predictions of the model on the holdout test set\r\nprint(\"Evaluation : Multi-Layer Perceptron\")\r\nprint(\"F1 score:\", metrics.f1_score(te_labels, y_pred3,\r\n average='weighted', labels=np.unique(y_pred3)))\r\n\r\n\r\nprint(\"AUC: \", roc_auc_score(te_labels, y_pred3))\r\nprint(\"MSE: \", mean_squared_error(te_labels, y_pred3))\r\n\r\nprint(\" \")\r\nprint(\" \")\r\n\r\n# Check the best parameter\r\n\r\nprint(\"Check the best parameter and Save the Model as a Picked File\")\r\nprint(cv3.best_estimator_)\r\n\r\n\r\n# Write out the picked model\r\njoblib.dump(cv3.best_estimator_, 'MLP_model.pkl')\r\n\r\n\r\nprint(\"---------------------------------------------------------\")\r\nprint(\"-- Running Time : %s seconds \" % (time.time() - start_time))\r\nprint(\"---------------------------------------------------------\")\r\n" ]
[ [ "sklearn.metrics.mean_squared_error", "sklearn.neural_network.MLPClassifier", "sklearn.model_selection.GridSearchCV", "sklearn.metrics.roc_auc_score", "numpy.unique" ] ]
onecalfman/opencast-downloader
[ "ebf3ae55ef70fc4c95de02d0288f8a1069d51631" ]
[ "opencast-dl.py" ]
[ "#!/usr/bin/python3\n\nimport datetime\nimport json\nimport requests\nimport urllib\nimport os\nimport pandas\nfrom sys import exit\nfrom tqdm import tqdm\n\nlink='' # link to episodes.json filename of your OpenCast website. Probably something like\n # https://opencast-present.<your universities website>.de/search/episode.json\njson_file = '' # location for the downloaded json filename\nfilename = '' # location of the created csv (should end with .csv).\ntmp_file = '' # Temporary filename because i didn't manage to merge two filenames in python and did it with unix shell\nauto_download_file = ''\n\"\"\"\nauto download will has to be a plain text filename with the following syntax\n\nseries_id,full_path\n\nexample:\n\nb5ddb1cc-3e8e-435b-aaed-1712b5b14aed,/home/jonas/tu/fem/\n665a4876-1f24-4c59-b69e-170698d6ce34,/home/jonas/tu/messtechnik/video\n\"\"\"\n\n\nclass DownloadProgressBar(tqdm):\n def update_to(self, b=1, bsize=1, tsize=None):\n if tsize is not None:\n self.total = tsize\n self.update(b * bsize - self.n)\n\n\ndef download_url(url, output_path):\n with DownloadProgressBar(unit='B', unit_scale=True,\n miniters=1, desc=url.split('/')[-1]) as t:\n urllib.request.urlretrieve(url, filename=output_path, reporthook=t.update_to)\n\n#download_url(link, json_file)\n\ncounter = 1\nerrors = 0\nloop = 0\ntable = []\ntable_out = []\n\njson_data = json.load(open(json_file))\n\nfor result in json_data['search-results']['result']:\n table.append([])\n table[loop].append(str(result['mediapackage']['seriestitle']))\n table[loop].append(str(result['mediapackage']['title']))\n try:\n table[loop].append(result['dcCreator'])\n except:\n table[loop].append('')\n try:\n table[loop].append(result['dcCreated'][:10])\n except:\n table[loop].append('')\n\n table[loop].append(str(datetime.timedelta(seconds=int(str(result['mediapackage']['duration'])[:-3]))))\n table[loop].append(result['mediapackage']['series'])\n\n for q in result['mediapackage']['media']['track']:\n table[loop].append(q['tags']['tag'][0])\n\n for u in result['mediapackage']['media']['track']:\n table[loop].append(u['url'])\n\n loop += 1\n\nheader = ['Serientite', 'Titel', 'Creator', 'Datum', 'Dauer', 'Serie', '360p', '720p', '360p 2', '720p 2']\n\n# The following loop sorts the videos in the already generated matrix by quality\nfor line in table:\n order = []\n\n for cell in line:\n if '360p-quality' in cell:\n order.append(0)\n if '720p-quality' in cell:\n order.append(1)\n try:\n if len(order) == 2:\n if order[0]:\n line[6] = line.pop()\n line[7] = line.pop()\n else:\n line[7] = line.pop()\n line[6] = line.pop()\n elif order[0] and order[1]:\n line[8] = line.pop()\n line[6] = line.pop()\n line[9] = line.pop()\n line[7] = line.pop()\n elif order[0] and order[2]:\n line[8] = line.pop()\n line[9] = line.pop()\n line[6] = line.pop()\n line[7] = line.pop()\n elif order[0] and order[3]:\n line[9] = line.pop()\n line[8] = line.pop()\n line[6] = line.pop()\n line[7] = line.pop()\n elif order[1] and order[2]:\n line[8] = line.pop()\n line[9] = line.pop()\n line[7] = line.pop()\n line[6] = line.pop()\n elif order[1] and order[3]:\n line[9] = line.pop()\n line[8] = line.pop()\n line[7] = line.pop()\n line[6] = line.pop()\n elif order[2] and order[3]:\n line[9] = line.pop()\n line[7] = line.pop()\n line[8] = line.pop()\n line[6] = line.pop()\n except:\n print('Line ' + str(counter) + ' parsing problem')\n print('skipping')\n errors += 1\n counter += 1\n\nif errors:\n print('\\n' + str(errors) + ' errors')\n\ndf = pandas.DataFrame(table)\n\n# This part merges the newly created csv with existing csv if there is a previously downloaded csv filename\ntry:\n old = sum(1 for line in open(filename))\n df.to_csv(tmp_file, index=False, header=header)\n \n os.system('cat ' + tmp_file + ' ' + filename + ' | grep -v \\'Serientitel, Titel, Creator, Datum, Dauer, Serie, 360p, 720p, 360p 2, 720p 2\\' | sort -u > /tmp/opencast_merged.csv')\n os.system('mv /tmp/opencast_merged.csv ' + filename)\n os.system('sed -i -e \\'s/Ã?/Ü/g\\' -e \\'s/ü/ü/g\\' -e \\'s/ä/ä/g\\' -e \\'1 i\\Serientitel,Titel,Creator,Datum,Dauer,Serie,360p,720p,360p 2,720p 2\\' ' + filename)\n df.append(pandas.read_csv(filename))\n df = pandas.read_csv(filename)\nexcept:\n old = 0\n df.to_csv(filename, index=False, header=header) # you could output to other formats like xlsx, refere to pandas docs\n\nnew = len(df)\nprint(str(new - old + 1) + ' new entries')\nprint(str(new) + ' entries total')\n\n# If an auto_download filename was specified the selected series will be downloaded\ntry:\n auto_download = pandas.read_csv(auto_download_file, header=None).values.tolist()\n print(\"auto download file found\")\nexcept:\n exit(\"No auto download file found or specified\")\n\nfor line in auto_download:\n id = line[0]\n selection = df.loc[df['Serie'] == id].values.tolist()\n for row in selection:\n path = line[1] + '/' + row[1] + '.mp4'\n if not os.path.exists(path):\n try:\n print('Downloading ' + row[0] + ' ' + row[1])\n download_url(row[7], line[1] + '/' + row[1] + '.mp4')\n except:\n print('Failed ' + row[0] + ' ' + row[1]);\n" ]
[ [ "pandas.DataFrame", "pandas.read_csv" ] ]
jhunkeler/pyregion
[ "7ee5fb3f4949445c484bac4d579d0d49d6fdc5e0" ]
[ "tests/test_region.py" ]
[ "import pyregion\nfrom os.path import join\n\ntry:\n from astropy.io import fits as pyfits\nexcept ImportError:\n from astropy.io import fits as pyfits\n\nimport numpy as np\n\nfrom pyregion.wcs_helper import fix_lon\n\n# At some point, pyfits.Card.fromstring has changed from unbound\n# method to bounded method.\n\nif pyfits.Card.fromstring.__self__: # \n def pyfits_card_fromstring(l):\n return pyfits.Card.fromstring(l)\nelse:\n def pyfits_card_fromstring(l):\n c = pyfits.Card()\n return c.fromstring(l)\n\nrootdir = \"examples\"\n\ndef demo_header():\n cards = pyfits.CardList()\n for l in open(join(rootdir, \"sample_fits01.header\")):\n card = pyfits_card_fromstring(l.strip())\n cards.append(card)\n h = pyfits.Header(cards)\n return h\n\ndef test_region():\n\n ref_region_name = \"test01_img.reg\"\n\n region_list = [\"test01_fk5_sexagecimal.reg\",\n \"test01_gal.reg\",\n \"test01_ds9_physical.reg\",\n \"test01_fk5_degree.reg\",\n \"test01_mixed.reg\",\n \"test01_ciao.reg\",\n \"test01_ciao_physical.reg\",\n ]\n\n header = demo_header()\n\n ref_region = pyregion.open(join(rootdir,ref_region_name)).as_imagecoord(header)\n\n for reg_name in region_list:\n r = pyregion.open(join(rootdir,reg_name)).as_imagecoord(header)\n for reg0, reg in zip(ref_region, r):\n if reg.name == \"rotbox\":\n reg.name = \"box\"\n\n assert reg0.name == reg.name\n if reg0.name in [\"ellipse\", \"box\"]:\n assert np.allclose(reg0.coord_list[:-1], reg.coord_list[:-1],\n atol=0.01)\n a0 = reg0.coord_list[-1]\n a1 = fix_lon(reg.coord_list[-1], 0)\n assert np.allclose([a0], [a1], atol=0.02)\n else:\n assert np.allclose(reg0.coord_list, reg.coord_list,\n atol=0.01)\n assert reg0.exclude == reg.exclude\n \n" ]
[ [ "numpy.allclose" ] ]
VectorInstitute/NeuralKernelBandits
[ "6ddbfa3a410b50b84d9fb72c75803347da61b4cb" ]
[ "neural_kernel_experiment.py" ]
[ "\"\"\"Copyright 2021 Michal Lisicki\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\"\"\"\n\nimport os\nimport pickle as pkl\nimport time\n\nimport numpy as np\nimport tensorflow as tf\nfrom absl import app, flags\n\nfrom bandits.algorithms.linear_full_posterior_sampling import (\n LinearFullPosteriorSampling)\nfrom bandits.algorithms.nk_sampling import NKBandit\nfrom bandits.algorithms.uniform_sampling import UniformSampling\nfrom bandits.core.contextual_bandit import run_contextual_bandit\nfrom bandits.data.data_sampler import (sample_adult_data, sample_census_data,\n sample_covertype_data,\n sample_jester_data, sample_mushroom_data,\n sample_statlog_data, sample_stock_data)\nfrom bandits.data.synthetic_data_sampler import sample_linear_data\n\n# Set up your file routes to the data files.\nBASE_ROUTE = os.getcwd()\nDATA_ROUTE = 'contextual_bandits/datasets'\n\n# experiment output directory\nOUTDIR = \"./outputs/\"\n\nFLAGS = flags.FLAGS\nFLAGS.set_default('alsologtostderr', True)\n\n# Hyperparameters\nflags.DEFINE_integer('seed', None, 'Random seed')\nflags.DEFINE_list(\n 'methods', ['nk-ts'], 'Methods list. Choose between: uniform '\n '/ linear / ntk-ts / ntk-ucb. You can specify multiple '\n 'methods in a list. Warning: Running multiple NKs will '\n 'result in a heavy computational load.')\nflags.DEFINE_boolean('joint', False, 'Use a joint or disjoint model')\nflags.DEFINE_boolean('normalizey', False,\n 'Normalize the targets before passing them to GP')\nflags.DEFINE_string('nkmode', 'rand_prior', 'NK GP posterior type')\nflags.DEFINE_float('nkreg', 0.2, 'NK regularizer')\nflags.DEFINE_integer('nlayers', 2, 'Number of layers in neural models')\nflags.DEFINE_float('eta', 0.1, 'Bandit exploration parameter')\nflags.DEFINE_integer('steps', 5000, 'Number of MAB steps')\nflags.DEFINE_integer('trainfreq', 1, 'Training frequency of NK bandits')\n\nflags.DEFINE_string('logdir', '/tmp/bandits/', 'Base directory to save output')\n\nflags.DEFINE_string('mushroom_data',\n os.path.join(BASE_ROUTE, DATA_ROUTE, 'mushroom.data'),\n 'Directory where Mushroom data is stored.')\nflags.DEFINE_string('financial_data',\n os.path.join(BASE_ROUTE, DATA_ROUTE, 'raw_stock_contexts'),\n 'Directory where Financial data is stored.')\nflags.DEFINE_string(\n 'jester_data',\n os.path.join(BASE_ROUTE, DATA_ROUTE, 'jester_data_40jokes_19181users.npy'),\n 'Directory where Jester data is stored.')\nflags.DEFINE_string('statlog_data',\n os.path.join(BASE_ROUTE, DATA_ROUTE, 'shuttle.trn'),\n 'Directory where Statlog data is stored.')\nflags.DEFINE_string('adult_data',\n os.path.join(BASE_ROUTE, DATA_ROUTE, 'adult.full'),\n 'Directory where Adult data is stored.')\nflags.DEFINE_string('covertype_data',\n os.path.join(BASE_ROUTE, DATA_ROUTE, 'covtype.data'),\n 'Directory where Covertype data is stored.')\nflags.DEFINE_string(\n 'census_data', os.path.join(BASE_ROUTE, DATA_ROUTE,\n 'USCensus1990.data.txt'),\n 'Directory where Census data is stored.')\n\nflags.DEFINE_integer(\"task_id\", None, \"ID of task\")\n\n\nclass HParams(dict):\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.__dict__ = self\n\n\ndef sample_data(data_type, num_contexts=None):\n \"\"\"Sample data from given 'data_type'.\n\n Args:\n data_type: Dataset from which to sample.\n num_contexts: Number of contexts to sample.\n\n Returns:\n dataset: Sampled matrix with rows: (context, reward_1, ..., reward_num_act).\n opt_rewards: Vector of expected optimal reward for each context.\n opt_actions: Vector of optimal action for each context.\n num_actions: Number of available actions.\n context_dim: Dimension of each context.\n \"\"\"\n if data_type == 'linear':\n # Create linear dataset\n num_actions = 8\n context_dim = 10\n noise_stds = [0.01 * (i + 1) for i in range(num_actions)]\n dataset, _, opt_linear = sample_linear_data(num_contexts,\n context_dim,\n num_actions,\n sigma=noise_stds)\n opt_rewards, opt_actions = opt_linear\n return dataset, opt_rewards, opt_actions, num_actions, context_dim, None\n\n elif data_type == 'mushroom':\n # Create mushroom dataset\n num_actions = 2\n context_dim = 117\n file_name = FLAGS.mushroom_data\n dataset, opt_mushroom = sample_mushroom_data(file_name, num_contexts)\n opt_rewards, opt_actions = opt_mushroom\n return dataset, opt_rewards, opt_actions, num_actions, context_dim, None\n\n elif data_type == 'financial':\n num_actions = 8\n context_dim = 21\n num_contexts = min(3713, num_contexts)\n noise_stds = [0.01 * (i + 1) for i in range(num_actions)]\n file_name = FLAGS.financial_data\n dataset, opt_financial = sample_stock_data(file_name,\n context_dim,\n num_actions,\n num_contexts,\n noise_stds,\n shuffle_rows=True)\n opt_rewards, opt_actions = opt_financial\n return dataset, opt_rewards, opt_actions, num_actions, context_dim, None\n\n elif data_type == 'jester':\n num_actions = 8\n context_dim = 32\n num_contexts = min(19181, num_contexts)\n file_name = FLAGS.jester_data\n dataset, opt_jester = sample_jester_data(file_name,\n context_dim,\n num_actions,\n num_contexts,\n shuffle_rows=True,\n shuffle_cols=True)\n opt_rewards, opt_actions = opt_jester\n return dataset, opt_rewards, opt_actions, num_actions, context_dim, None\n\n elif data_type == 'statlog':\n file_name = FLAGS.statlog_data\n num_actions = 7\n num_contexts = min(43500, num_contexts)\n sampled_vals = sample_statlog_data(file_name,\n num_contexts,\n shuffle_rows=True)\n contexts, rewards, (opt_rewards, opt_actions) = sampled_vals\n dataset = np.hstack((contexts, rewards))\n context_dim = contexts.shape[1]\n return dataset, opt_rewards, opt_actions, num_actions, context_dim, None\n\n elif data_type == 'adult':\n file_name = FLAGS.adult_data\n num_actions = 2\n num_contexts = min(45222, num_contexts)\n sampled_vals = sample_adult_data(file_name, num_contexts, shuffle_rows=True)\n contexts, rewards, (opt_rewards, opt_actions) = sampled_vals\n dataset = np.hstack((contexts, rewards))\n context_dim = contexts.shape[1]\n return dataset, opt_rewards, opt_actions, num_actions, context_dim, None\n\n elif data_type == 'covertype':\n file_name = FLAGS.covertype_data\n num_actions = 7\n num_contexts = min(150000, num_contexts)\n sampled_vals = sample_covertype_data(file_name,\n num_contexts,\n shuffle_rows=True)\n contexts, rewards, (opt_rewards, opt_actions) = sampled_vals\n dataset = np.hstack((contexts, rewards))\n context_dim = contexts.shape[1] # 54\n return dataset, opt_rewards, opt_actions, num_actions, context_dim, None\n\n elif data_type == 'census':\n file_name = FLAGS.census_data\n num_actions = 9\n num_contexts = min(150000, num_contexts)\n sampled_vals = sample_census_data(file_name,\n num_contexts,\n shuffle_rows=True)\n contexts, rewards, (opt_rewards, opt_actions) = sampled_vals\n dataset = np.hstack((contexts, rewards))\n context_dim = contexts.shape[1]\n return dataset, opt_rewards, opt_actions, num_actions, context_dim, None\n\n\ndef display_final_results(algos, opt_rewards, opt_actions, res, name):\n \"\"\"Displays summary statistics of the performance of each algorithm.\"\"\"\n\n print('---------------------------------------------------')\n print('---------------------------------------------------')\n print('{} bandit completed.'.format(name))\n print('---------------------------------------------------')\n\n performance_triples = []\n for j, a in enumerate(algos):\n performance_triples.append((a.name, np.mean(res[j]), np.std(res[j])))\n\n performance_pairs = sorted(performance_triples,\n key=lambda elt: elt[1],\n reverse=True)\n\n for i, (name, mean_reward, std_reward) in enumerate(performance_pairs):\n print('{:3}) {:20}| \\t \\t total reward = {:10} +- {:10}.'.format(\n i, name, mean_reward, std_reward))\n\n print('---------------------------------------------------')\n print('Optimal total reward = {}.'.format(np.sum(opt_rewards)))\n print('Frequency of optimal actions (action, frequency):')\n print([[elt, list(opt_actions).count(elt)] for elt in set(opt_actions)])\n print('---------------------------------------------------')\n print('---------------------------------------------------')\n\n\ndef get_algorithm(method, num_actions, context_dim):\n if method == 'linear':\n hparams = HParams(num_actions=num_actions,\n context_dim=context_dim,\n a0=6,\n b0=6,\n lambda_prior=0.25,\n initial_pulls=3)\n algo = LinearFullPosteriorSampling('LinearTS / LinFullPost', hparams)\n\n elif method == 'uniform':\n # Uniform and Fixed\n hparams = HParams(num_actions=num_actions)\n algo = UniformSampling('Uniform Sampling', hparams)\n\n elif method == 'nk-ts':\n hparams = HParams(\n alg=\"ts\",\n joint=FLAGS.joint,\n mode=FLAGS.nkmode,\n num_actions=num_actions,\n context_dim=context_dim,\n num_layers=FLAGS.nlayers,\n gamma=FLAGS.nkreg, # diag reg\n eta=FLAGS.eta, # Exploration parameter\n training_freq=FLAGS.trainfreq)\n algo = NKBandit('NK-TS', hparams) #\n\n elif method == 'nk-ucb':\n hparams = HParams(\n alg=\"ucb\",\n joint=FLAGS.joint,\n mode=FLAGS.nkmode,\n num_actions=num_actions,\n context_dim=context_dim,\n num_layers=FLAGS.nlayers,\n gamma=FLAGS.nkreg, # diag reg\n eta=FLAGS.eta, # Exploration parameter\n training_freq=FLAGS.trainfreq)\n algo = NKBandit('NK-UCB', hparams) #\n\n else:\n raise ValueError(f\"Method name {method} is not found\")\n\n return algo\n\n\ndef experiment(methods, dataset, token):\n # Problem parameters\n num_contexts = FLAGS.steps\n data_type = dataset\n Nruns = 1\n\n # Create dataset\n sampled_vals = sample_data(data_type, num_contexts)\n dataset, opt_rewards, opt_actions, num_actions, context_dim, vocab_processor = sampled_vals\n\n os.makedirs(OUTDIR, exist_ok=True)\n\n res = np.zeros((len(methods), len(dataset)))\n totalreward = [0] * len(methods)\n rewards = [[]] * len(methods)\n\n for i_run in range(Nruns):\n\n algos = [\n get_algorithm(method, num_actions, context_dim) for method in methods\n ]\n\n results = run_contextual_bandit(context_dim, num_actions, dataset, algos)\n\n h_actions, h_rewards, optimal_actions, optimal_rewards, times = results\n\n for j, a in enumerate(algos):\n print(np.sum(h_rewards[:, j]))\n totalreward[j] += ((np.sum(h_rewards[:, j])) / Nruns)\n rewards[j].append((np.sum(h_rewards[:, j])))\n\n actions = [[] for i in range(len(h_actions[0]))]\n for aa in h_actions:\n for i, a in enumerate(aa):\n actions[i].append(a)\n\n for i_alg in range(len(algos)):\n res[i_alg, :] += 1 * ((actions[i_alg] != opt_actions))\n\n pkl_path = os.path.join(\n OUTDIR, \"neural_kernel_experiment_{}_{}_run{}_{}.pkl\".format(\n num_contexts, str(token), str(i_run), data_type))\n\n with open(pkl_path, \"wb\") as fp:\n # Collect experiment statistics\n pkl.dump(\n {\n 'desc': 'NK bandits experiment',\n 'seed': FLAGS.seed,\n 'times': times,\n 'models': [alg.name for alg in algos],\n 'dataset': data_type,\n 'hparams': [dict(alg.hparams) for alg in algos],\n 'flags': FLAGS.flag_values_dict(),\n 'actions': h_actions,\n 'rewards': h_rewards,\n 'opt_actions': optimal_actions,\n 'opt_rewards': optimal_rewards,\n 'opt_actions_data': opt_actions,\n 'opt_rewards_data': opt_rewards\n }, fp)\n\n print('Run number {}'.format(i_run + 1))\n display_final_results(algos, opt_rewards, opt_actions, rewards, data_type)\n\n display_final_results(algos, opt_rewards, opt_actions, rewards, data_type)\n\n\ndef main(argv):\n timestr = time.strftime(\"%Y%m%d-%H%M%S\")\n token = timestr + \"_\" + str(np.random.randint(9999))\n print(token)\n\n if FLAGS.seed is not None:\n np.random.seed(FLAGS.seed)\n tf.random.set_seed(FLAGS.seed)\n\n methods = FLAGS.methods\n datasets = [\n 'financial', 'jester', 'statlog', 'adult', 'covertype', 'census',\n 'mushroom'\n ]\n\n for dataset in datasets:\n print(\"================\")\n print(dataset)\n print(\"================\")\n experiment(methods, dataset, token)\n\n\nif __name__ == \"__main__\":\n app.run(main)\n" ]
[ [ "numpy.random.seed", "numpy.sum", "tensorflow.random.set_seed", "numpy.mean", "numpy.std", "numpy.random.randint", "numpy.hstack" ] ]
higumachan/chainer
[ "c9209a1099c9a2a5ecab2b28e1b008b19effa724" ]
[ "chainer/functions/array/select_item.py" ]
[ "import numpy\nimport six\n\nimport chainer\nfrom chainer import backend\nfrom chainer.backends import cuda\nfrom chainer import function_node\nfrom chainer.utils import type_check\n\n\nclass SelectItem(function_node.FunctionNode):\n\n \"\"\"Select elements stored in given indices.\"\"\"\n\n def check_type_forward(self, in_types):\n type_check._argname(in_types, ('x', 't'))\n\n x_type, t_type = in_types\n type_check.expect(\n t_type.dtype.kind == 'i',\n x_type.ndim == 2,\n t_type.ndim == 1,\n x_type.shape[0] == t_type.shape[0],\n )\n\n def forward(self, inputs):\n self.retain_inputs((1,))\n x, t = inputs\n self._in_shape = x.shape\n self._in_dtype = x.dtype\n if chainer.is_debug():\n if not ((0 <= t).all() and\n (t < x.shape[1]).all()):\n msg = 'Each label `t` need to satisfty `0 <= t < x.shape[1]`'\n raise ValueError(msg)\n\n xp = backend.get_array_module(x)\n if xp is numpy:\n # This code is equivalent to `t.choose(x.T)`, but `numpy.choose`\n # does not work when `x.shape[1] > 32`.\n return x[six.moves.range(t.size), t],\n else:\n y = cuda.elementwise(\n 'S t, raw T x',\n 'T y',\n 'int ind[] = {i, t}; y = x[ind];',\n 'getitem_fwd'\n )(t, x)\n return y,\n\n def backward(self, indexes, gy):\n t = self.get_retained_inputs()[0]\n ret = []\n if 0 in indexes:\n gx = Assign(self._in_shape, self._in_dtype, t).apply(gy)[0]\n ret.append(gx)\n if 1 in indexes:\n ret.append(None)\n return ret\n\n\nclass Assign(function_node.FunctionNode):\n\n def __init__(self, shape, dtype, t):\n self.shape = shape\n self.dtype = dtype\n self.t = t.data\n\n def forward_cpu(self, inputs):\n t = backend.from_chainerx(self.t) # Workaround for ChainerX.\n\n gx = numpy.zeros(self.shape, self.dtype)\n gx[six.moves.range(self.t.size), t] = inputs[0]\n return gx,\n\n def forward_gpu(self, inputs):\n t = backend.from_chainerx(self.t) # Workaround for ChainerX.\n\n gx = cuda.cupy.zeros(self.shape, self.dtype)\n gx = cuda.elementwise(\n 'S t, T gloss',\n 'raw T gx',\n 'int ind[] = {i, t}; gx[ind] = gloss;',\n 'getitem_bwd'\n )(t, inputs[0], gx)\n return gx,\n\n def backward(self, indexes, gy):\n return SelectItem().apply((gy[0], self.t))\n\n\ndef select_item(x, t):\n \"\"\"Select elements stored in given indices.\n\n This function returns ``t.choose(x.T)``, that means\n ``y[i] == x[i, t[i]]`` for all ``i``.\n\n Args:\n x (:class:`~chainer.Variable` or :class:`numpy.ndarray` or \\\n :class:`cupy.ndarray`):\n Variable storing arrays. A two-dimensional float array.\n t (:class:`~chainer.Variable` or :class:`numpy.ndarray` or \\\n :class:`cupy.ndarray`):\n Variable storing index numbers. A one-dimensional int array.\n Length of the ``t`` should be equal to ``x.shape[0]``.\n\n Returns:\n ~chainer.Variable: Variable that holds ``t``-th element of ``x``.\n\n .. admonition:: Example\n\n >>> x = np.array([[0, 1, 2], [3, 4, 5]], np.float32)\n >>> t = np.array([0, 2], np.int32)\n >>> y = F.select_item(x, t)\n >>> y.shape\n (2,)\n >>> y.array\n array([0., 5.], dtype=float32)\n\n \"\"\"\n return SelectItem().apply((x, t))[0]\n" ]
[ [ "numpy.zeros" ] ]
dpatel257/Smart-City-Sample
[ "933c9220c37fbe4b67ec749a8e0315c803fcec66" ]
[ "analytics/entrance/custom_transforms/people_counting.py" ]
[ "import gstgva # pylint: disable=import-error\nimport numpy as np\nimport math\nimport copy\nimport json\nfrom munkres import Munkres\nimport os\n\nclass PeopleCounting:\n\n def __init__(self):\n # Array of Gallery Objects - {embeddings(numpy array), timestamp}\n self.identities = []\n self.reid_threshold = 0.7\n self.matcher = Munkres()\n self.timestamp = 0\n\n def process_frame(self, frame):\n messages = list(frame.messages())\n if len(messages) > 0:\n json_msg = json.loads(messages[0])\n json_msg[\"count\"] = {\"people\": len(self.identities)}\n self.timestamp = int(json_msg[\"timestamp\"]) / 1000000000\n frame.remove_message(messages[0])\n frame.add_message(json.dumps(json_msg))\n\n self.get_ids_by_embeddings(frame)\n return True\n\n @staticmethod\n def compute_reid_distance(test_embedding, reference_embedding):\n xx = np.dot(test_embedding, test_embedding)\n yy = np.dot(reference_embedding, reference_embedding)\n xy = np.dot(test_embedding, reference_embedding)\n norm = math.sqrt(xx * yy) + 1e-6\n return np.float32(1.0) - xy / norm\n\n def get_ids_by_embeddings(self, frame):\n detected_tensors = []\n detection_ids = []\n detections = [x for x in frame.regions()]\n for i, detection in enumerate(detections):\n if detection.label() == \"person\":\n for j, tensor in enumerate(detection.tensors()):\n if tensor.name() == \"face_feature\" and tensor.format() == \"cosine_distance\":\n detected_tensors.append(tensor.data())\n detection_ids.append(i)\n\n if len(detected_tensors) == 0:\n return\n if len(self.identities) == 0:\n for i in range(len(detected_tensors)):\n self.identities.append({\"embedding\": copy.deepcopy(\n detected_tensors[i]), \"timestamp\": self.timestamp})\n return\n distances = np.empty(\n [len(detected_tensors), len(self.identities)], dtype=np.float32)\n\n for i in range(len(detected_tensors)):\n for j in range(len(self.identities)):\n distances[i][j] = PeopleCounting.compute_reid_distance(\n detected_tensors[i], self.identities[j][\"embedding\"])\n\n matched_indexes = self.matcher.compute(distances.tolist())\n matched_detections = set()\n\n for match in matched_indexes:\n if distances[match[0]][match[1]] <= self.reid_threshold:\n self.identities[match[1]][\"timestamp\"] = self.timestamp\n matched_detections.add(match[0])\n\n for i in range(len(detected_tensors)):\n if i not in matched_detections:\n self.identities.append({\"embedding\": copy.deepcopy(\n detected_tensors[i]), \"timestamp\": self.timestamp})\n\n n = len(self.identities)\n i = n - 1\n while i >= 0:\n # overdue if pass the last 5 seconds\n if int(self.timestamp - int(self.identities[i][\"timestamp\"])) > 5:\n self.identities[i] = self.identities[n - 1]\n self.identities.pop(n - 1)\n n -= 1\n i -= 1\n" ]
[ [ "numpy.float32", "numpy.dot" ] ]
Amanda-Barbara/PaddleSeg
[ "a7de36a5fae96011f5b188987670274101b8ede1" ]
[ "contrib/PP-HumanSeg/deploy/infer.py" ]
[ "# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport codecs\nimport os\nimport time\n\nimport yaml\nimport numpy as np\nimport cv2\nimport paddle\nimport paddleseg.transforms as T\nfrom paddle.inference import create_predictor, PrecisionType\nfrom paddle.inference import Config as PredictConfig\nfrom paddleseg.core.infer import reverse_transform\nfrom paddleseg.cvlibs import manager\nfrom paddleseg.utils import TimeAverager\n\nfrom scripts.optic_flow_process import optic_flow_process\n\n\nclass DeployConfig:\n def __init__(self, path):\n with codecs.open(path, 'r', 'utf-8') as file:\n self.dic = yaml.load(file, Loader=yaml.FullLoader)\n\n self._transforms = self._load_transforms(self.dic['Deploy'][\n 'transforms'])\n self._dir = os.path.dirname(path)\n\n @property\n def transforms(self):\n return self._transforms\n\n @property\n def model(self):\n return os.path.join(self._dir, self.dic['Deploy']['model'])\n\n @property\n def params(self):\n return os.path.join(self._dir, self.dic['Deploy']['params'])\n\n def _load_transforms(self, t_list):\n com = manager.TRANSFORMS\n transforms = []\n for t in t_list:\n ctype = t.pop('type')\n transforms.append(com[ctype](**t))\n\n return transforms\n\n\nclass Predictor:\n def __init__(self, args):\n self.cfg = DeployConfig(args.cfg)\n self.args = args\n self.compose = T.Compose(self.cfg.transforms)\n resize_h, resize_w = args.input_shape\n\n self.disflow = cv2.DISOpticalFlow_create(\n cv2.DISOPTICAL_FLOW_PRESET_ULTRAFAST)\n self.prev_gray = np.zeros((resize_h, resize_w), np.uint8)\n self.prev_cfd = np.zeros((resize_h, resize_w), np.float32)\n self.is_init = True\n\n pred_cfg = PredictConfig(self.cfg.model, self.cfg.params)\n pred_cfg.disable_glog_info()\n if self.args.use_gpu:\n pred_cfg.enable_use_gpu(100, 0)\n\n self.predictor = create_predictor(pred_cfg)\n if self.args.test_speed:\n self.cost_averager = TimeAverager()\n\n def preprocess(self, img):\n ori_shapes = []\n processed_imgs = []\n processed_img = self.compose(img)[0]\n processed_imgs.append(processed_img)\n ori_shapes.append(img.shape)\n return processed_imgs, ori_shapes\n\n def run(self, img, bg):\n input_names = self.predictor.get_input_names()\n input_handle = self.predictor.get_input_handle(input_names[0])\n\n processed_imgs, ori_shapes = self.preprocess(img)\n\n data = np.array(processed_imgs)\n input_handle.reshape(data.shape)\n input_handle.copy_from_cpu(data)\n if self.args.test_speed:\n start = time.time()\n\n self.predictor.run()\n\n if self.args.test_speed:\n self.cost_averager.record(time.time() - start)\n output_names = self.predictor.get_output_names()\n output_handle = self.predictor.get_output_handle(output_names[0])\n output = output_handle.copy_to_cpu()\n return self.postprocess(output, img, ori_shapes[0], bg)\n\n def postprocess(self, pred, img, ori_shape, bg):\n if not os.path.exists(self.args.save_dir):\n os.makedirs(self.args.save_dir)\n resize_w = pred.shape[-1]\n resize_h = pred.shape[-2]\n if self.args.soft_predict:\n if self.args.use_optic_flow:\n score_map = pred[:, 1, :, :].squeeze(0)\n score_map = 255 * score_map\n cur_gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n cur_gray = cv2.resize(cur_gray, (resize_w, resize_h))\n optflow_map = optic_flow_process(cur_gray, score_map, self.prev_gray, self.prev_cfd, \\\n self.disflow, self.is_init)\n self.prev_gray = cur_gray.copy()\n self.prev_cfd = optflow_map.copy()\n self.is_init = False\n\n score_map = np.repeat(optflow_map[:, :, np.newaxis], 3, axis=2)\n score_map = np.transpose(score_map, [2, 0, 1])[np.newaxis, ...]\n score_map = reverse_transform(\n paddle.to_tensor(score_map),\n ori_shape,\n self.cfg.transforms,\n mode='bilinear')\n alpha = np.transpose(score_map.numpy().squeeze(0),\n [1, 2, 0]) / 255\n else:\n score_map = pred[:, 1, :, :]\n score_map = score_map[np.newaxis, ...]\n score_map = reverse_transform(\n paddle.to_tensor(score_map),\n ori_shape,\n self.cfg.transforms,\n mode='bilinear')\n alpha = np.transpose(score_map.numpy().squeeze(0), [1, 2, 0])\n\n else:\n if pred.ndim == 3:\n pred = pred[:, np.newaxis, ...]\n result = reverse_transform(\n paddle.to_tensor(\n pred, dtype='float32'),\n ori_shape,\n self.cfg.transforms,\n mode='bilinear')\n\n result = np.array(result)\n if self.args.add_argmax:\n result = np.argmax(result, axis=1)\n else:\n result = result.squeeze(1)\n alpha = np.transpose(result, [1, 2, 0])\n\n # background replace\n h, w, _ = img.shape\n bg = cv2.resize(bg, (w, h))\n if bg.ndim == 2:\n bg = bg[..., np.newaxis]\n\n comb = (alpha * img + (1 - alpha) * bg).astype(np.uint8)\n return comb\n" ]
[ [ "numpy.array", "numpy.zeros", "numpy.transpose", "numpy.argmax", "numpy.repeat" ] ]
jonashaag/kartothek
[ "906d6831f3d9d246c3231850831c632b51f12b05" ]
[ "tests/io/dask/dataframe/test_update.py" ]
[ "# -*- coding: utf-8 -*-\n# pylint: disable=E1101\n\nimport pickle\n\nimport dask\nimport dask.dataframe as dd\nimport numpy as np\nimport pandas as pd\nimport pandas.util.testing as pdt\nimport pytest\n\nfrom kartothek.core.factory import DatasetFactory\nfrom kartothek.io.dask._update import _KTK_HASH_BUCKET, _hash_bucket\nfrom kartothek.io.dask.dataframe import update_dataset_from_ddf\nfrom kartothek.io.iter import read_dataset_as_dataframes__iterator\nfrom kartothek.io.testing.update import * # noqa\n\n\[email protected](\"col\", [\"range\", \"range_duplicated\", \"random\"])\ndef test_hash_bucket(col, num_buckets=5):\n df = pd.DataFrame(\n {\n \"range\": np.arange(10),\n \"range_duplicated\": np.repeat(np.arange(2), 5),\n \"random\": np.random.randint(0, 100, 10),\n }\n )\n hashed = _hash_bucket(df, [col], num_buckets)\n assert (hashed.groupby(col).agg({_KTK_HASH_BUCKET: \"nunique\"}) == 1).all().all()\n\n # Check that hashing is consistent for small dataframe sizes (where df.col.nunique() < num_buckets)\n df_sample = df.iloc[[0, 7]]\n hashed_sample = _hash_bucket(df_sample, [col], num_buckets)\n expected = hashed.loc[df_sample.index]\n pdt.assert_frame_equal(expected, hashed_sample)\n\n\ndef test_hashing_determinism():\n \"\"\"Make sure that the hashing algorithm used by pandas is independent of any context variables\"\"\"\n df = pd.DataFrame({\"range\": np.arange(10)})\n hashed = _hash_bucket(df, [\"range\"], 5)\n expected = pd.DataFrame(\n {\n \"range\": [0, 1, 2, 3, 4, 5, 6, 7, 8, 9],\n _KTK_HASH_BUCKET: np.uint8([0, 0, 1, 2, 0, 3, 2, 0, 1, 4]),\n }\n )\n pdt.assert_frame_equal(hashed, expected)\n\n\[email protected]\ndef bound_update_dataset():\n return _update_dataset\n\n\ndef _unwrap_partition(part):\n return next(iter(dict(part[\"data\"]).values()))\n\n\ndef _update_dataset(partitions, *args, **kwargs):\n if any(partitions):\n table_name = next(iter(dict(partitions[0][\"data\"]).keys()))\n delayed_partitions = [\n dask.delayed(_unwrap_partition)(part) for part in partitions\n ]\n partitions = dd.from_delayed(delayed_partitions)\n else:\n table_name = \"core\"\n partitions = None\n ddf = update_dataset_from_ddf(partitions, *args, table=table_name, **kwargs)\n\n s = pickle.dumps(ddf, pickle.HIGHEST_PROTOCOL)\n ddf = pickle.loads(s)\n\n return ddf.compute()\n\n\ndef _return_none():\n return None\n\n\[email protected](\"unique_primaries\", [1, 4])\[email protected](\"unique_secondaries\", [1, 3])\[email protected](\"num_buckets\", [1, 5])\[email protected](\"repartition\", [1, 2])\[email protected](\"npartitions\", [5, 10])\[email protected](\"bucket_by\", [None, \"sorted_column\"])\ndef test_update_shuffle_buckets(\n store_factory,\n metadata_version,\n unique_primaries,\n unique_secondaries,\n num_buckets,\n repartition,\n npartitions,\n bucket_by,\n):\n \"\"\"\n Assert that certain properties are always given for the output dataset\n no matter how the input data distribution looks like\n\n Properties to assert:\n * All partitions have a unique value for its correspondent primary key\n * number of partitions is at least one per unique partition value, at\n most ``num_buckets`` per primary partition value.\n * If we demand a column to be sorted it is per partition monotonic\n \"\"\"\n primaries = np.arange(unique_primaries)\n secondary = np.arange(unique_secondaries)\n num_rows = 100\n primaries = np.repeat(primaries, np.ceil(num_rows / unique_primaries))[:num_rows]\n secondary = np.repeat(secondary, np.ceil(num_rows / unique_secondaries))[:num_rows]\n # ensure that there is an unsorted column uncorrelated\n # to the primary and secondary columns which can be sorted later on per partition\n unsorted_column = np.repeat(np.arange(100 / 10), 10)\n np.random.shuffle(unsorted_column)\n np.random.shuffle(primaries)\n np.random.shuffle(secondary)\n\n df = pd.DataFrame(\n {\"primary\": primaries, \"secondary\": secondary, \"sorted_column\": unsorted_column}\n )\n secondary_indices = [\"secondary\"]\n expected_num_indices = 2 # One primary\n\n # used for tests later on to\n if bucket_by:\n secondary_indices.append(bucket_by)\n expected_num_indices = 3\n\n # shuffle all rows. properties of result should be reproducible\n df = df.sample(frac=1).reset_index(drop=True)\n ddf = dd.from_pandas(df, npartitions=npartitions)\n\n dataset_comp = update_dataset_from_ddf(\n ddf,\n store_factory,\n dataset_uuid=\"output_dataset_uuid\",\n table=\"core\",\n secondary_indices=secondary_indices,\n shuffle=True,\n bucket_by=bucket_by,\n repartition_ratio=repartition,\n num_buckets=num_buckets,\n sort_partitions_by=\"sorted_column\",\n default_metadata_version=metadata_version,\n partition_on=[\"primary\"],\n )\n\n s = pickle.dumps(dataset_comp, pickle.HIGHEST_PROTOCOL)\n dataset_comp = pickle.loads(s)\n\n dataset = dataset_comp.compute()\n dataset = dataset.load_all_indices(store_factory())\n\n assert len(dataset.partitions) <= num_buckets * unique_primaries\n assert len(dataset.partitions) >= unique_primaries\n\n assert len(dataset.indices) == expected_num_indices\n\n assert set(dataset.indices[\"primary\"].index_dct.keys()) == set(\n range(unique_primaries)\n )\n assert (\n list(map(lambda x: len(x), dataset.indices[\"primary\"].index_dct.values()))\n <= [num_buckets] * unique_primaries\n )\n\n assert set(dataset.indices[\"secondary\"].index_dct.keys()) == set(\n range(unique_secondaries)\n )\n\n factory = DatasetFactory(\"output_dataset_uuid\", store_factory)\n factory.load_all_indices()\n\n if bucket_by:\n ind_df = factory.get_indices_as_dataframe([\"primary\", bucket_by])\n\n assert not ind_df.duplicated().any()\n\n for data_dct in read_dataset_as_dataframes__iterator(\n dataset_uuid=dataset.uuid, store=store_factory\n ):\n df = data_dct[\"core\"]\n assert len(df.primary.unique()) == 1\n assert df.sorted_column.is_monotonic\n\n # update the dataset\n # do not use partition_on since it should be interfered from the existing dataset\n tasks = update_dataset_from_ddf(\n ddf,\n store_factory,\n dataset_uuid=\"output_dataset_uuid\",\n table=\"core\",\n shuffle=True,\n repartition_ratio=repartition,\n num_buckets=num_buckets,\n sort_partitions_by=\"sorted_column\",\n default_metadata_version=metadata_version,\n bucket_by=bucket_by,\n )\n\n s = pickle.dumps(tasks, pickle.HIGHEST_PROTOCOL)\n tasks = pickle.loads(s)\n\n updated_dataset = tasks.compute()\n\n assert len(updated_dataset.partitions) == 2 * len(dataset.partitions)\n\n # Not allowed to use different partition_on\n with pytest.raises(\n ValueError, match=\"Incompatible set of partition keys encountered.\"\n ):\n update_dataset_from_ddf(\n ddf,\n store_factory,\n dataset_uuid=\"output_dataset_uuid\",\n table=\"core\",\n shuffle=True,\n repartition_ratio=repartition,\n partition_on=[\"sorted_column\"],\n num_buckets=num_buckets,\n sort_partitions_by=\"sorted_column\",\n default_metadata_version=metadata_version,\n )\n\n # Not allowed to update with indices which do not yet exist in dataset\n with pytest.raises(ValueError, match=\"indices\"):\n update_dataset_from_ddf(\n ddf,\n store_factory,\n dataset_uuid=\"output_dataset_uuid\",\n table=\"core\",\n shuffle=True,\n partition_on=[\"primary\"],\n repartition_ratio=repartition,\n secondary_indices=[\"sorted_column\"],\n num_buckets=num_buckets,\n sort_partitions_by=\"sorted_column\",\n default_metadata_version=metadata_version,\n )\n\n # Check that delayed objects are allowed as delete scope.\n tasks = update_dataset_from_ddf(\n None,\n store_factory,\n dataset_uuid=\"output_dataset_uuid\",\n table=\"core\",\n shuffle=True,\n repartition_ratio=repartition,\n num_buckets=num_buckets,\n sort_partitions_by=\"sorted_column\",\n default_metadata_version=metadata_version,\n delete_scope=dask.delayed(_return_none)(),\n bucket_by=bucket_by,\n )\n\n s = pickle.dumps(tasks, pickle.HIGHEST_PROTOCOL)\n tasks = pickle.loads(s)\n\n tasks.compute()\n\n\[email protected](\"shuffle\", [True, False])\ndef test_update_dataset_from_ddf_empty(store_factory, shuffle):\n with pytest.raises(ValueError, match=\"Cannot store empty datasets\"):\n update_dataset_from_ddf(\n dask.dataframe.from_delayed([], meta=((\"a\", int),)),\n store_factory,\n dataset_uuid=\"output_dataset_uuid\",\n table=\"core\",\n shuffle=shuffle,\n partition_on=[\"a\"],\n ).compute()\n" ]
[ [ "numpy.uint8", "numpy.ceil", "pandas.util.testing.assert_frame_equal", "pandas.DataFrame", "numpy.random.shuffle", "numpy.random.randint", "numpy.arange" ] ]