repo_name
stringlengths 6
130
| hexsha
list | file_path
list | code
list | apis
list |
---|---|---|---|---|
kakaobrain/bassl
|
[
"551fe94343debf60a64c787be6752284153a0f7a"
] |
[
"bassl/pretrain/utils/metric.py"
] |
[
"\"\"\"\n- kNN Precision\n\"\"\"\n\nfrom collections import defaultdict\n\nimport torch\nimport torchmetrics\n\n\nclass KnnPrecisionMetric(torchmetrics.Metric):\n def __init__(self, top_k_list):\n super().__init__(compute_on_step=False, dist_sync_on_step=True)\n self.add_state(\"feat_data\", default=[], dist_reduce_fx=None)\n self.add_state(\"vids_data\", default=[], dist_reduce_fx=None)\n self.add_state(\"scene_data\", default=[], dist_reduce_fx=None)\n self.top_k_list = set(top_k_list)\n self.max_k = max(self.top_k_list)\n\n def update(self, vid, invideo_scene_id, feat):\n assert isinstance(invideo_scene_id, torch.Tensor)\n assert isinstance(vid, torch.Tensor)\n assert isinstance(feat, torch.Tensor)\n self.feat_data.append(feat)\n self.vids_data.append(vid)\n self.scene_data.append(invideo_scene_id)\n\n def compute(self) -> torch.Tensor:\n score = defaultdict(dict)\n pool_feats = defaultdict(list)\n pool_invideo_scene_id = defaultdict(list)\n pool_gts = defaultdict(dict)\n\n num_data = 0\n for vid, invideo_scene_id, gathered_feat in zip(\n self.vids_data, self.scene_data, self.feat_data\n ):\n vid = vid.item()\n invideo_scene_id = invideo_scene_id.item()\n if invideo_scene_id not in pool_gts[vid]:\n pool_gts[vid][invideo_scene_id] = set()\n pool_gts[vid][invideo_scene_id].add(len(pool_feats[vid]))\n pool_invideo_scene_id[vid].append(invideo_scene_id)\n pool_feats[vid].append(gathered_feat)\n num_data += 1\n\n for top_k in self.top_k_list:\n score[top_k] = {\"correct\": 0, \"total\": 0}\n\n for vid, gt in pool_gts.items():\n X = torch.stack(pool_feats[vid])\n sim = torch.matmul(X, X.t())\n sim = sim - 999 * torch.eye(sim.shape[0]).type_as(sim) # exclude self\n indices = torch.argsort(sim, descending=True)\n assert indices.shape[1] >= self.max_k, f\"{indices.shape[1]} >= {self.max_k}\"\n indices = indices[:, : self.max_k]\n\n for j in range(indices.shape[0]):\n _cache = {\"correct\": 0, \"total\": 0}\n _query_scene_id = pool_invideo_scene_id[vid][j]\n for k in range(self.max_k):\n if _query_scene_id in gt:\n if indices[j][k].item() in gt[_query_scene_id]:\n _cache[\"correct\"] += 1\n _cache[\"total\"] += 1\n if k + 1 in self.top_k_list and len(gt[_query_scene_id]) > k:\n score[k + 1][\"correct\"] += _cache[\"correct\"]\n score[k + 1][\"total\"] += _cache[\"total\"]\n\n for top_k in self.top_k_list:\n assert score[top_k][\"total\"] > 0\n score[top_k][\"precision\"] = (\n 100.0 * score[top_k][\"correct\"] / score[top_k][\"total\"]\n )\n del X, sim, indices, pool_feats, pool_invideo_scene_id, pool_gts\n torch.cuda.empty_cache()\n return score\n"
] |
[
[
"torch.stack",
"torch.argsort",
"torch.eye",
"torch.cuda.empty_cache"
]
] |
PolymerGuy/AXITOM
|
[
"7682be5b21fa933b9bea4082fe9a830076431feb"
] |
[
"axitom/phantoms.py"
] |
[
"import numpy as np\n\n\"\"\" Phantoms\n\nThis module contains the phantoms that can be used for forward projection and virtual experiments\n\n\"\"\"\n\ndef barrel(domain_size=128, outer_rad_fraction=0.7,center_val=None):\n \"\"\" Barrel shaped phantom with a linear density gradient\n The domain size is cubic with dimension \"domain_size\" along each axis\n\n Parameters\n ----------\n domain_size : int\n The length of the sides of the domain\n outer_rad_fraction : float\n The diameter of the barrel given as a the fraction of the side length\n center_val : float\n The density value in the center of the barrel\n\n Returns\n -------\n ndarray\n The phantom\n \"\"\"\n\n center = domain_size / 2.\n domain = np.zeros((domain_size, domain_size, domain_size), dtype=np.float64)\n xs, ys = np.meshgrid(np.arange(domain_size), np.arange(domain_size))\n xs = xs - center\n ys = ys - center\n r = np.sqrt(xs ** 2. + ys ** 2.)\n domain[r < outer_rad_fraction * center, :] = 1.\n if center_val is not None:\n domain = domain * (center_val + (r / (outer_rad_fraction * center)) ** 2. * 0.5)[:, :, np.newaxis]\n return domain\n\n\n\n"
] |
[
[
"numpy.arange",
"numpy.zeros",
"numpy.sqrt"
]
] |
seanliu96/R-Net
|
[
"8462330451079a2ff67cd431fe30a57a6ca3d802"
] |
[
"util.py"
] |
[
"import tensorflow as tf\nimport numpy as np\nimport re\nfrom collections import Counter\nimport string\n\n\ndef get_record_parser(config, is_test=False):\n def parse(example):\n para_limit = config.test_para_limit if is_test else config.para_limit\n ques_limit = config.test_ques_limit if is_test else config.ques_limit\n char_limit = config.char_limit\n features = tf.parse_single_example(example,\n features={\n \"context_idxs\": tf.FixedLenFeature([], tf.string),\n \"ques_idxs\": tf.FixedLenFeature([], tf.string),\n \"context_char_idxs\": tf.FixedLenFeature([], tf.string),\n \"ques_char_idxs\": tf.FixedLenFeature([], tf.string),\n \"y1\": tf.FixedLenFeature([], tf.string),\n \"y2\": tf.FixedLenFeature([], tf.string),\n \"id\": tf.FixedLenFeature([], tf.int64)\n })\n context_idxs = tf.reshape(tf.decode_raw(\n features[\"context_idxs\"], tf.int32), [para_limit])\n ques_idxs = tf.reshape(tf.decode_raw(\n features[\"ques_idxs\"], tf.int32), [ques_limit])\n context_char_idxs = tf.reshape(tf.decode_raw(\n features[\"context_char_idxs\"], tf.int32), [para_limit, char_limit])\n ques_char_idxs = tf.reshape(tf.decode_raw(\n features[\"ques_char_idxs\"], tf.int32), [ques_limit, char_limit])\n y1 = tf.reshape(tf.decode_raw(\n features[\"y1\"], tf.float32), [para_limit])\n y2 = tf.reshape(tf.decode_raw(\n features[\"y2\"], tf.float32), [para_limit])\n qa_id = features[\"id\"]\n return context_idxs, ques_idxs, context_char_idxs, ques_char_idxs, y1, y2, qa_id\n return parse\n\n\ndef get_batch_dataset(record_file, parser, config):\n \"\"\"\n Read a file and construct batches\n \"\"\"\n num_threads = tf.constant(config.num_threads, dtype=tf.int32)\n dataset = tf.data.TFRecordDataset(record_file).map(\n parser, num_parallel_calls=num_threads).shuffle(config.capacity).repeat()\n if config.is_bucket:\n buckets = [tf.constant(num) for num in range(*config.bucket_range)]\n\n def key_func(context_idxs, ques_idxs, context_char_idxs, ques_char_idxs, y1, y2, qa_id):\n c_len = tf.reduce_sum(\n tf.cast(tf.cast(context_idxs, tf.bool), tf.int32))\n buckets_min = [np.iinfo(np.int32).min] + buckets\n buckets_max = buckets + [np.iinfo(np.int32).max]\n conditions_c = tf.logical_and(\n tf.less(buckets_min, c_len), tf.less_equal(c_len, buckets_max))\n bucket_id = tf.reduce_min(tf.where(conditions_c))\n return bucket_id\n\n def reduce_func(key, elements):\n return elements.batch(config.batch_size)\n\n dataset = dataset.apply(tf.contrib.data.group_by_window(\n key_func, reduce_func, window_size=5 * config.batch_size)).shuffle(len(buckets) * 25)\n else:\n dataset = dataset.batch(config.batch_size)\n return dataset\n\n\ndef get_dataset(record_file, parser, config):\n num_threads = tf.constant(config.num_threads, dtype=tf.int32)\n dataset = tf.data.TFRecordDataset(record_file).map(\n parser, num_parallel_calls=num_threads).repeat().batch(config.batch_size)\n return dataset\n\n\ndef convert_tokens(eval_file, qa_id, pp1, pp2):\n answer_dict = {}\n remapped_dict = {}\n for qid, p1, p2 in zip(qa_id, pp1, pp2):\n context = eval_file[str(qid)][\"context\"]\n spans = eval_file[str(qid)][\"spans\"]\n uuid = eval_file[str(qid)][\"uuid\"]\n start_idx = spans[p1][0]\n end_idx = spans[p2][1]\n answer_dict[str(qid)] = context[start_idx: end_idx]\n remapped_dict[uuid] = context[start_idx: end_idx]\n return answer_dict, remapped_dict\n\n\ndef evaluate(eval_file, answer_dict):\n f1 = exact_match = total = 0\n for key, value in answer_dict.items():\n total += 1\n ground_truths = eval_file[key][\"answers\"]\n prediction = value\n exact_match += metric_max_over_ground_truths(\n exact_match_score, prediction, ground_truths)\n f1 += metric_max_over_ground_truths(f1_score,\n prediction, ground_truths)\n exact_match = 100.0 * exact_match / total\n f1 = 100.0 * f1 / total\n return {'exact_match': exact_match, 'f1': f1}\n\n\ndef normalize_answer(s):\n\n def remove_articles(text):\n return re.sub(r'\\b(a|an|the)\\b', ' ', text)\n\n def white_space_fix(text):\n return ' '.join(text.split())\n\n def remove_punc(text):\n exclude = set(string.punctuation)\n return ''.join(ch for ch in text if ch not in exclude)\n\n def lower(text):\n return text.lower()\n\n return white_space_fix(remove_articles(remove_punc(lower(s))))\n\n\ndef f1_score(prediction, ground_truth):\n prediction_tokens = normalize_answer(prediction).split()\n ground_truth_tokens = normalize_answer(ground_truth).split()\n common = Counter(prediction_tokens) & Counter(ground_truth_tokens)\n num_same = sum(common.values())\n if num_same == 0:\n return 0\n precision = 1.0 * num_same / len(prediction_tokens)\n recall = 1.0 * num_same / len(ground_truth_tokens)\n f1 = (2 * precision * recall) / (precision + recall)\n return f1\n\n\ndef exact_match_score(prediction, ground_truth):\n return (normalize_answer(prediction) == normalize_answer(ground_truth))\n\n\ndef metric_max_over_ground_truths(metric_fn, prediction, ground_truths):\n scores_for_ground_truths = []\n for ground_truth in ground_truths:\n score = metric_fn(prediction, ground_truth)\n scores_for_ground_truths.append(score)\n return max(scores_for_ground_truths)\n"
] |
[
[
"tensorflow.constant",
"tensorflow.FixedLenFeature",
"tensorflow.less",
"tensorflow.data.TFRecordDataset",
"tensorflow.decode_raw",
"tensorflow.less_equal",
"tensorflow.cast",
"numpy.iinfo",
"tensorflow.where",
"tensorflow.contrib.data.group_by_window"
]
] |
Praneethvvs/CircleCi_FastApi
|
[
"0aec14fcffcfe7053cf7db688728347feea26f70"
] |
[
"selenium_pipeline/hyatt_hotels_fetch_addresses.py"
] |
[
"import time\n\nimport pandas as pd\nfrom selenium import webdriver\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.support.ui import WebDriverWait\nfrom selenium.webdriver.support import expected_conditions as EC\nfrom selenium.webdriver.common.desired_capabilities import DesiredCapabilities\nimport traceback\nimport itertools\n\nfrom selenium.webdriver.common.keys import Keys\n\nDRIVER_PATH = r\"C:\\Program Files (x86)\\chromedriver.exe\"\n\n\nclass Address_Scraping():\n\n def __init__(self):\n self.chrome_driver = webdriver.Chrome(DRIVER_PATH)\n\n def get_hyperlinks(self):\n\n self.chrome_driver.get(\"https://www.hyatt.com/explore-hotels\")\n try:\n # The WebDriverWait method waits until it locates the presence of the element\"\n WebDriverWait(self.chrome_driver, 20).until(\n EC.presence_of_element_located((By.CLASS_NAME, \"countries.b-ph0\")))\n us_add = self.chrome_driver.find_element_by_xpath(\n \"//ul[@class='countries b-ph0']//li[@data-js-country='United States']\")\n links = us_add.find_elements_by_tag_name('a')\n hyperlinks = [link_field.get_attribute(\"href\") for link_field in links]\n return hyperlinks\n\n\n except:\n print(\"error\")\n traceback.print_exc()\n time.sleep(2)\n # chrome_driver.quit()\n\n def fetch_addresses_to_df(self):\n links_list = self.get_hyperlinks()\n # assert links_list != []\n results_list = []\n error_links_list = []\n\n for index, link in enumerate(links_list, start=1):\n if index == 5:\n break\n try:\n print(\"passing through link ------------>\", link)\n self.chrome_driver.get(link)\n address_div = self.chrome_driver.find_elements_by_xpath(\n \"//div[@class='site-info-container b-mt2 b-mb2 b-mt0@sm b-mb0@sm']//a[@class='site-info-address b-d-inline-block b-d-flex@lg b-d-inline-block@xl b-mb2@sm b-mb1@md b-mr2']//span[@class='b-d-inline-block']\")\n\n phone_num_div = self.chrome_driver.find_element_by_xpath(\n \"//div[@class='site-info-container b-mt2 b-mb2 b-mt0@sm b-mb0@sm']//a[@class='site-info-phone b-d-inline-block b-d-block@lg b-mb1@sm b-mr2']//span[@class='hover-border b-d-none b-d-inline@lg']\")\n\n address = \"\".join(map(lambda x: x.text, address_div))\n phone_number = \", \" + phone_num_div.text\n # self.chrome_driver.find_element_by_partial_link_text(\"Hoover, Alabama, United States, 35244\").click()\n # time.sleep(3)\n # self.chrome_driver.close()\n # get_url = self.chrome_driver.current_url\n # print(get_url)\n # exit()\n combined_output = \"\".join([address, phone_number])\n results_list.append(combined_output.split(\",\"))\n\n except:\n traceback.print_exc()\n error_links_list.append(link)\n\n final_df = pd.DataFrame(results_list, columns=[\"street\", \"city\", \"state\", \"country\", \"zip\", \"phone_number\"],\n index=None)\n final_df.to_excel(\"hyatt_hotels.xlsx\", index=False)\n\n\nif __name__ == \"__main__\":\n Address_Scraping().fetch_addresses_to_df()\n"
] |
[
[
"pandas.DataFrame"
]
] |
joshuabuildsthings/GamestonkTerminal
|
[
"385d12803ae1725a22b0a440c3b88bffa974edcd"
] |
[
"openbb_terminal/stocks/discovery/fidelity_view.py"
] |
[
"\"\"\" Fidelity View \"\"\"\n__docformat__ = \"numpy\"\n\nimport logging\nimport os\nimport re\n\nimport pandas as pd\n\nfrom openbb_terminal.decorators import log_start_end\nfrom openbb_terminal.helper_funcs import export_data, print_rich_table\nfrom openbb_terminal.stocks.discovery import fidelity_model\nfrom openbb_terminal import rich_config\n\nlogger = logging.getLogger(__name__)\n\n\ndef lambda_buy_sell_ratio_color_red_green(val: str) -> str:\n \"\"\"Add color tags to the Buys/Sells ratio cell\n\n Parameters\n ----------\n val : str\n Buys/Sells ratio cell\n\n Returns\n -------\n str\n Buys/Sells ratio cell with color tags\n \"\"\"\n\n buy_sell_match = re.match(r\"(\\d+)% Buys, (\\d+)% Sells\", val, re.M | re.I)\n\n if not buy_sell_match:\n return val\n\n buys = int(buy_sell_match.group(1))\n sells = int(buy_sell_match.group(2))\n\n if buys >= sells:\n return f\"[green]{buys}%[/green] Buys, {sells}% Sells\"\n\n return f\"{buys}% Buys, [red]{sells}%[/red] Sells\"\n\n\ndef lambda_price_change_color_red_green(val: str) -> str:\n \"\"\"Add color tags to the price change cell\n\n Parameters\n ----------\n val : str\n Price change cell\n\n Returns\n -------\n str\n Price change cell with color tags\n \"\"\"\n\n val_float = float(val.split(\" \")[0])\n if val_float > 0:\n return f\"[green]{val}[/green]\"\n return f\"[red]{val}[/red]\"\n\n\n@log_start_end(log=logger)\ndef orders_view(num: int, export: str):\n \"\"\"Prints last N orders by Fidelity customers. [Source: Fidelity]\n\n Parameters\n ----------\n num: int\n Number of stocks to display\n export : str\n Export dataframe data to csv,json,xlsx file\n \"\"\"\n order_header, df_orders = fidelity_model.get_orders()\n\n pd.set_option(\"display.max_colwidth\", None)\n\n if rich_config.USE_COLOR:\n df_orders[\"Buy / Sell Ratio\"] = df_orders[\"Buy / Sell Ratio\"].apply(\n lambda_buy_sell_ratio_color_red_green\n )\n df_orders[\"Price Change\"] = df_orders[\"Price Change\"].apply(\n lambda_price_change_color_red_green\n )\n\n df_orders = df_orders.head(n=num).iloc[:, :-1]\n\n print_rich_table(\n df_orders,\n headers=[x.title() for x in df_orders.columns],\n show_index=False,\n title=f\"{order_header}:\",\n )\n\n export_data(\n export,\n os.path.dirname(os.path.abspath(__file__)),\n \"ford\",\n df_orders,\n )\n"
] |
[
[
"pandas.set_option"
]
] |
weaselers/candy_cane_contest
|
[
"1d619529cd8640c20b534ec9a3f6d5f786bb78aa"
] |
[
"pull_vegas_slot_machine_v9.py"
] |
[
"import numpy as np\nimport pandas as pd\nimport random, os, datetime, math\nfrom random import shuffle\nfrom collections import OrderedDict\nfrom collections import defaultdict\n\n\ntotal_reward = 0\nbandit_dict = {}\n\n\ndef set_seed(my_seed=42):\n os.environ[\"PYTHONHASHSEED\"] = str(my_seed)\n random.seed(my_seed)\n np.random.seed(my_seed)\n\ndef get_next_bandit(exception=None):\n '''\n Choose best next bandit\n '''\n\n # init best bandit number and expectation score\n best_bandit = 0\n best_bandit_expected = 0\n\n # shuffle bandit_dict to not explore bandits in order \n b = list(bandit_dict.items())\n shuffle(b)\n a = OrderedDict(b)\n for bnd in dict(a):\n expect = (\n (\n bandit_dict[bnd][\"win\"] # from nb of win\n - bandit_dict[bnd][\"loss\"] # remove nb of loss\n + (bandit_dict[bnd][\"loss\"] > 0)\n + bandit_dict[bnd][\"opp\"] # add nb of opponant pull\n - (bandit_dict[bnd][\"opp\"] > 0) * 1.5 # minus a bonus if opponant did pulled\n + bandit_dict[bnd][\"op_continue\"] # add nb of times opponant continued to pull\n )\n / ( # divided by\n bandit_dict[bnd][\"win\"] # nb of win\n + bandit_dict[bnd][\"loss\"] # plus number of loss\n + bandit_dict[bnd][\"opp\"] # nb of times opponant used it \n ) # times\n * math.pow(\n 0.97, # decay to the power of\n bandit_dict[bnd][\"win\"] \n + bandit_dict[bnd][\"loss\"]\n + bandit_dict[bnd][\"opp\"], # total number of pull on this bandit\n )\n )\n if expect > best_bandit_expected:\n if bnd != exception:\n best_bandit_expected = expect\n best_bandit = bnd\n return best_bandit\n\ndef get_a_virgin_bandit():\n '''\n return a bandit never explored by me or opponant\n '''\n l = list(bandit_dict.items())\n random.shuffle(l)\n d = dict(l)\n for bnd in d:\n if (d[bnd][\"win\"] == 1) and (\n d[bnd][\"loss\"] == 0) and (\n d[bnd][\"opp\"] == 0):\n return bnd\n\ndef is_still_virgin_bandit_present():\n '''\n return a bandit never explored by me or opponant\n '''\n count_virgin_bandit = 0\n for bnd in bandit_dict:\n if (bandit_dict[bnd][\"win\"] == 1) and (\n bandit_dict[bnd][\"loss\"] == 0) and (\n bandit_dict[bnd][\"opp\"] == 0):\n count_virgin_bandit += 1\n if count_virgin_bandit > 0:\n return 1\n else:\n return 0\n \n\n\nmy_action_list = []\nop_action_list = []\n\nop_continue_cnt_dict = defaultdict(int)\n\n\ndef multi_armed_probabilities(observation, configuration):\n global total_reward, bandit_dict\n\n # initialise randomly\n my_pull = random.randrange(configuration[\"banditCount\"])\n\n # first step: initialise bandit_dict with default values\n if 0 == observation[\"step\"]:\n set_seed()\n total_reward = 0\n bandit_dict = {}\n for i in range(configuration[\"banditCount\"]):\n bandit_dict[i] = {\n \"win\": 1,\n \"loss\": 0,\n \"opp\": 0,\n \"my_continue\": 0,\n \"op_continue\": 0,\n }\n \n else:\n # update total reward (starting at 0)\n last_reward = observation[\"reward\"] - total_reward\n total_reward = observation[\"reward\"]\n\n # update (last) action lists\n my_idx = observation[\"agentIndex\"]\n my_last_action = observation[\"lastActions\"][my_idx]\n op_last_action = observation[\"lastActions\"][1 - my_idx]\n my_action_list.append(my_last_action)\n op_action_list.append(op_last_action)\n\n # update bandit dict\n if 0 < last_reward:\n # update nb of wining if won on last used bandit\n bandit_dict[my_last_action][\"win\"] = bandit_dict[my_last_action][\"win\"] + 1\n else:\n # update nb of loss if lost on last used bandit\n bandit_dict[my_last_action][\"loss\"] = (\n bandit_dict[my_last_action][\"loss\"] + 1\n )\n # update opponant action count on bandit\n bandit_dict[op_last_action][\"opp\"] = bandit_dict[op_last_action][\"opp\"] + 1\n\n # if we played for more than 3 times since started\n if observation[\"step\"] >= 3:\n if my_action_list[-1] == my_action_list[-2]:\n # update 'my_continue' since I played the same bandit two times in a row\n bandit_dict[my_last_action][\"my_continue\"] += 1\n else:\n bandit_dict[my_last_action][\"my_continue\"] = 0\n if op_action_list[-1] == op_action_list[-2]:\n # update 'op_continue' since opponant played the same bandit two times in a row\n bandit_dict[op_last_action][\"op_continue\"] += 1\n else:\n bandit_dict[op_last_action][\"op_continue\"] = 0\n\n # if we played less than 4 times since started\n if observation[\"step\"] < 4:\n return get_a_virgin_bandit()\n\n if (observation[\"step\"] < 100) and (op_action_list[-1] != op_action_list[-2]):\n if is_still_virgin_bandit_present() == 1:\n return get_a_virgin_bandit()\n\n # if opponant stays on same bandit 2 times in a row\n if (op_action_list[-1] == op_action_list[-2]):\n # if I wasn't on his bandit \n if my_action_list[-1] != op_action_list[-1]:\n # I go there\n my_pull = op_action_list[-1]\n # else if I was there\n elif my_action_list[-1] == op_action_list[-1]:\n # if I just won\n if last_reward > 0:\n my_pull = my_last_action\n else:\n my_pull = get_next_bandit()\n\n # else if I won\n elif last_reward > 0:\n my_pull = get_next_bandit(my_action_list[-1])\n \n else:\n # if I was winning 3 times in a row but I lost last time \n if (my_action_list[-1] == my_action_list[-2]) and (\n my_action_list[-1] == my_action_list[-3]\n ):\n # then I choose 50/50 if I continue\n if random.random() < 0.5:\n # random tell me to stay on the same bandit\n my_pull = my_action_list[-1]\n else:\n # I choose another one\n my_pull = get_next_bandit()\n # As I wasn't on the same bandit 3 times in a row, I move\n else:\n my_pull = get_next_bandit()\n\n return my_pull\n"
] |
[
[
"numpy.random.seed"
]
] |
nytbliang/siamattnat
|
[
"880643ee09e7e4fa6a0af9631a9a8b32dd06c94d"
] |
[
"tools/test.py"
] |
[
"# Copyright (c) SenseTime. All Rights Reserved.\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\nfrom __future__ import unicode_literals\n\nimport argparse\nimport os\n\nimport cv2\nimport torch\nimport numpy as np\n\nfrom pysot.core.config import cfg\nfrom pysot.models.model_builder import ModelBuilder\nfrom pysot.tracker.tracker_builder import build_tracker\nfrom pysot.utils.bbox import get_axis_aligned_bbox\nfrom pysot.utils.model_load import load_pretrain\nfrom toolkit.datasets import DatasetFactory\nfrom toolkit.utils.region import vot_overlap, vot_float2str\n\n\nparser = argparse.ArgumentParser(description='siamrpn tracking')\nparser.add_argument('--dataset', type=str,\n help='datasets')\nparser.add_argument('--config', default='', type=str,\n help='config file')\nparser.add_argument('--snapshot', default='', type=str,\n help='snapshot of models to eval')\nparser.add_argument('--video', default='', type=str,\n help='eval one special video')\nparser.add_argument('--vis', action='store_true',\n help='whether visualzie result')\nargs = parser.parse_args()\n\ntorch.set_num_threads(1)\n\ndef main():\n # load config\n cfg.merge_from_file(args.config)\n\n cur_dir = os.path.dirname(os.path.realpath(__file__))\n dataset_root = os.path.join(cur_dir, '../testing_dataset', args.dataset)\n\n # create model\n model = ModelBuilder()\n\n # load model\n model = load_pretrain(model, args.snapshot).cuda().eval()\n\n # build tracker\n tracker = build_tracker(model)\n\n # create dataset\n dataset = DatasetFactory.create_dataset(name=args.dataset,\n dataset_root=dataset_root,\n load_img=False)\n\n model_name = args.snapshot.split('/')[-1].split('.')[0]\n total_lost = 0\n if args.dataset in ['VOT2016', 'VOT2018', 'VOT2019']:\n # restart tracking\n for v_idx, video in enumerate(dataset):\n if args.video != '':\n # test one special video\n if video.name != args.video:\n continue\n frame_counter = 0\n lost_number = 0\n toc = 0\n pred_bboxes = []\n for idx, (img, gt_bbox) in enumerate(video):\n if len(gt_bbox) == 4:\n gt_bbox = [gt_bbox[0], gt_bbox[1],\n gt_bbox[0], gt_bbox[1]+gt_bbox[3]-1,\n gt_bbox[0]+gt_bbox[2]-1, gt_bbox[1]+gt_bbox[3]-1,\n gt_bbox[0]+gt_bbox[2]-1, gt_bbox[1]]\n tic = cv2.getTickCount()\n if idx == frame_counter:\n cx, cy, w, h = get_axis_aligned_bbox(np.array(gt_bbox))\n gt_bbox_ = [cx-(w-1)/2, cy-(h-1)/2, w, h]\n tracker.init(img, gt_bbox_)\n pred_bbox = gt_bbox_\n pred_bboxes.append(1)\n elif idx > frame_counter:\n outputs = tracker.track(img)\n pred_bbox = outputs['bbox']\n if cfg.MASK.MASK:\n pred_bbox = outputs['polygon']\n overlap = vot_overlap(pred_bbox, gt_bbox, (img.shape[1], img.shape[0]))\n if overlap > 0:\n # not lost\n pred_bboxes.append(pred_bbox)\n else:\n # lost object\n pred_bboxes.append(2)\n frame_counter = idx + 5 # skip 5 frames\n lost_number += 1\n else:\n pred_bboxes.append(0)\n toc += cv2.getTickCount() - tic\n if idx == 0:\n cv2.destroyAllWindows()\n if args.vis and idx > frame_counter:\n cv2.polylines(img, [np.array(gt_bbox, np.int).reshape((-1, 1, 2))],\n True, (0, 255, 0), 3)\n if cfg.MASK.MASK:\n cv2.polylines(img, [np.array(pred_bbox, np.int).reshape((-1, 1, 2))],\n True, (0, 255, 255), 3)\n else:\n bbox = list(map(int, pred_bbox))\n cv2.rectangle(img, (bbox[0], bbox[1]),\n (bbox[0]+bbox[2], bbox[1]+bbox[3]), (0, 255, 255), 3)\n cv2.putText(img, str(idx), (40, 40), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 255), 2)\n cv2.putText(img, str(lost_number), (40, 80), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 2)\n cv2.imshow(video.name, img)\n cv2.waitKey(1)\n toc /= cv2.getTickFrequency()\n # save results\n video_path = os.path.join('results', args.dataset, model_name,\n 'baseline', video.name)\n if not os.path.isdir(video_path):\n os.makedirs(video_path)\n result_path = os.path.join(video_path, '{}_001.txt'.format(video.name))\n with open(result_path, 'w') as f:\n for x in pred_bboxes:\n if isinstance(x, int):\n f.write(\"{:d}\\n\".format(x))\n else:\n f.write(','.join([vot_float2str(\"%.4f\", i) for i in x])+'\\n')\n print('({:3d}) Video: {:12s} Time: {:4.1f}s Speed: {:3.1f}fps Lost: {:d}'.format(\n v_idx+1, video.name, toc, idx / toc, lost_number))\n total_lost += lost_number\n print(\"{:s} total lost: {:d}\".format(model_name, total_lost))\n else:\n # OPE tracking\n for v_idx, video in enumerate(dataset):\n if args.video != '':\n # test one special video\n if video.name != args.video:\n continue\n toc = 0\n pred_bboxes = []\n scores = []\n track_times = []\n for idx, (img, gt_bbox) in enumerate(video):\n tic = cv2.getTickCount()\n if idx == 0:\n cx, cy, w, h = get_axis_aligned_bbox(np.array(gt_bbox))\n gt_bbox_ = [cx-(w-1)/2, cy-(h-1)/2, w, h]\n tracker.init(img, gt_bbox_)\n pred_bbox = gt_bbox_\n scores.append(None)\n if 'VOT2018-LT' == args.dataset:\n pred_bboxes.append([1])\n else:\n pred_bboxes.append(pred_bbox)\n else:\n outputs = tracker.track(img)\n pred_bbox = outputs['bbox']\n pred_bboxes.append(pred_bbox)\n scores.append(outputs['best_score'])\n # print(outputs['best_score'])\n toc += cv2.getTickCount() - tic\n track_times.append((cv2.getTickCount() - tic)/cv2.getTickFrequency())\n if idx == 0:\n cv2.destroyAllWindows()\n if args.vis and idx > 0:\n gt_bbox = list(map(int, gt_bbox))\n pred_bbox = list(map(int, pred_bbox))\n cv2.rectangle(img, (gt_bbox[0], gt_bbox[1]),\n (gt_bbox[0]+gt_bbox[2], gt_bbox[1]+gt_bbox[3]), (0, 255, 0), 3)\n cv2.rectangle(img, (pred_bbox[0], pred_bbox[1]),\n (pred_bbox[0]+pred_bbox[2], pred_bbox[1]+pred_bbox[3]), (0, 255, 255), 3)\n cv2.putText(img, str(idx), (40, 40), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 255), 2)\n cv2.imshow(video.name, img)\n cv2.waitKey(1)\n toc /= cv2.getTickFrequency()\n # save results\n if 'VOT2018-LT' == args.dataset:\n video_path = os.path.join('results', args.dataset, model_name,\n 'longterm', video.name)\n if not os.path.isdir(video_path):\n os.makedirs(video_path)\n result_path = os.path.join(video_path,\n '{}_001.txt'.format(video.name))\n with open(result_path, 'w') as f:\n for x in pred_bboxes:\n f.write(','.join([str(i) for i in x])+'\\n')\n result_path = os.path.join(video_path,\n '{}_001_confidence.value'.format(video.name))\n with open(result_path, 'w') as f:\n for x in scores:\n f.write('\\n') if x is None else f.write(\"{:.6f}\\n\".format(x))\n result_path = os.path.join(video_path,\n '{}_time.txt'.format(video.name))\n with open(result_path, 'w') as f:\n for x in track_times:\n f.write(\"{:.6f}\\n\".format(x))\n elif 'GOT-10k' == args.dataset:\n video_path = os.path.join('results', args.dataset, model_name, video.name)\n if not os.path.isdir(video_path):\n os.makedirs(video_path)\n result_path = os.path.join(video_path, '{}_001.txt'.format(video.name))\n with open(result_path, 'w') as f:\n for x in pred_bboxes:\n f.write(','.join([str(i) for i in x])+'\\n')\n result_path = os.path.join(video_path,\n '{}_time.txt'.format(video.name))\n with open(result_path, 'w') as f:\n for x in track_times:\n f.write(\"{:.6f}\\n\".format(x))\n else:\n model_path = os.path.join('results', args.dataset, model_name)\n if not os.path.isdir(model_path):\n os.makedirs(model_path)\n result_path = os.path.join(model_path, '{}.txt'.format(video.name))\n with open(result_path, 'w') as f:\n for x in pred_bboxes:\n f.write(','.join([str(i) for i in x])+'\\n')\n print('({:3d}) Video: {:12s} Time: {:5.1f}s Speed: {:3.1f}fps'.format(\n v_idx+1, video.name, toc, idx / toc))\n\n\nif __name__ == '__main__':\n main()\n"
] |
[
[
"numpy.array",
"torch.set_num_threads"
]
] |
CADWRDeltaModeling/vtools3
|
[
"226bd2920c73f36dfc2f4eaedda8adccdfd1dfc3"
] |
[
"vtools/datastore/station_info.py"
] |
[
"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport sys\nimport pandas as pd\nimport argparse\nfrom vtools.datastore import station_config\n\ndef station_info(search):\n station_lookup = station_config.config_file(\"station_dbase\")\n if search == \"config\":\n print(station_config.configuration())\n return\n #vlookup = station_config.config_file(\"variable_mappings\")\n slookup = pd.read_csv(station_lookup,sep=\",\",comment=\"#\",header=0,usecols=[\"id\",\"agency\",\n \"agency_id\",\"name\",\n \"x\",\"y\"]).squeeze()\n slookup[\"id\"] = slookup.id.str.lower()\n lsearch = search.lower()\n match_id = slookup[\"id\"].str.lower().str.contains(lsearch)\n match_name = slookup.name.str.lower().str.contains(lsearch)\n match_agency_id = slookup.agency_id.str.lower().str.contains(lsearch)\n match_agency = slookup.agency.str.lower().str.contains(lsearch)\n matches = match_id | match_name | match_agency_id | match_agency\n print(\"Matches:\")\n mlook =slookup.loc[matches,[\"id\",\"agency\",\"agency_id\",\"name\",\"x\",\"y\"]].sort_values(axis=0,by='id').set_index(\"id\") \n if mlook.shape[0] == 0: \n print(\"None\")\n else:\n print(mlook.to_string())\n return mlook\n \n \ndef create_arg_parser():\n parser = argparse.ArgumentParser(\"Lookup station metadata by partial string match on id or name\")\n parser.add_argument('--config',default=False,action =\"store_true\",help=\"Print configuration and location of lookup files\")\n parser.add_argument('searchphrase',nargs='?',default=\"\",help = 'Search phrase which can be blank if using --config')\n\n return parser \n\n\n\ndef main():\n parser = create_arg_parser()\n args = parser.parse_args()\n searchphrase = args.searchphrase\n if args.config:\n searchphrase = \"config\"\n if searchphrase is None and not args.config:\n raise ValueError(\"searchphrase required\")\n station_info(searchphrase)"
] |
[
[
"pandas.read_csv"
]
] |
adrn/gaia
|
[
"dac05003f7952af88697b271295a90bb0df091ec"
] |
[
"pyia/data.py"
] |
[
"# coding: utf-8\n\"\"\" Data structures. \"\"\"\n\n# Standard library\nimport pathlib\n\n# Third-party\nimport astropy.coordinates as coord\nfrom astropy.table import Table, Column\nfrom astropy.time import Time\nimport astropy.units as u\nimport numpy as np\n\nfrom .extinction import get_ext\nfrom .ruwetools import U0Interpolator\n\n__all__ = ['GaiaData']\n\n\n# This is from reading the data model\ngaia_unit_map = {\n 'ra': u.degree,\n 'dec': u.degree,\n 'parallax': u.milliarcsecond,\n 'pmra': u.milliarcsecond / u.year,\n 'pmdec': u.milliarcsecond / u.year,\n 'radial_velocity': u.km / u.s,\n 'ra_error': u.milliarcsecond,\n 'dec_error': u.milliarcsecond,\n 'parallax_error': u.milliarcsecond,\n 'pmra_error': u.milliarcsecond / u.year,\n 'pmdec_error': u.milliarcsecond / u.year,\n 'radial_velocity_error': u.km / u.s,\n 'astrometric_excess_noise': u.mas,\n 'astrometric_weight_al': 1/u.mas**2,\n 'astrometric_pseudo_colour': 1/u.micrometer,\n 'astrometric_pseudo_colour_error': 1/u.micrometer,\n 'astrometric_sigma5d_max': u.mas,\n 'phot_g_mean_flux': u.photon/u.s,\n 'phot_g_mean_flux_error': u.photon/u.s,\n 'phot_g_mean_mag': u.mag,\n 'phot_bp_mean_flux': u.photon/u.s,\n 'phot_bp_mean_flux_error': u.photon/u.s,\n 'phot_bp_mean_mag': u.mag,\n 'phot_rp_mean_flux': u.photon/u.s,\n 'phot_rp_mean_flux_error': u.photon/u.s,\n 'phot_rp_mean_mag': u.mag,\n 'bp_rp': u.mag,\n 'bp_g': u.mag,\n 'g_rp': u.mag,\n 'rv_template_teff': u.K,\n 'l': u.degree,\n 'b': u.degree,\n 'ecl_lon': u.degree,\n 'ecl_lat': u.degree,\n 'teff_val': u.K,\n 'teff_percentile_lower': u.K,\n 'teff_percentile_upper': u.K,\n 'a_g_val': u.mag,\n 'a_g_percentile_lower': u.mag,\n 'a_g_percentile_upper': u.mag,\n 'e_bp_min_rp_val': u.mag,\n 'e_bp_min_rp_percentile_lower': u.mag,\n 'e_bp_min_rp_percentile_upper': u.mag,\n 'radius_val': u.Rsun,\n 'radius_percentile_lower': u.Rsun,\n 'radius_percentile_upper': u.Rsun,\n 'lum_val': u.Lsun,\n 'lum_percentile_lower': u.Lsun,\n 'lum_percentile_upper': u.Lsun,\n 'ref_epoch': u.year\n}\n\nREF_EPOCH = {\n 'DR2': Time(2015.5, format='jyear'),\n 'EDR3': Time(2016.0, format='jyear')\n}\nLATEST_RELEASE = 'EDR3'\n\n\nclass GaiaData:\n \"\"\"Class for loading and interacting with data from the Gaia mission. This\n should work with data from any data release, i.e., DR1 gaia_source or TGAS,\n or DR2 gaia_source, or EDR3 gaia_source.\n\n Parameters\n ----------\n data : `astropy.table.Table`, `pandas.DataFrame`, dict_like, str\n This must be pre-loaded data as any of the types listed above, or a\n string filename containing a table that is readable by\n `astropy.table.Table.read`.\n \"\"\"\n\n def __init__(self, data, **kwargs):\n\n if not isinstance(data, Table):\n if isinstance(data, (str, pathlib.Path)):\n data = Table.read(data, **kwargs)\n\n else:\n # the dict-like object might have Quantity's, so we want to\n # preserve any units\n data = Table(data, **kwargs)\n\n # HACK: make sure table isn't masked, until astropy supports masked\n # quantities\n if data.masked:\n cols = []\n for c in data.colnames:\n\n col = data[c]\n col.mask = None\n cols.append(Column(col))\n data = Table(cols, copy=False)\n\n # Create a copy of the default unit map\n self.units = gaia_unit_map.copy()\n\n # Store the source table\n self.data = data\n\n # Update the unit map with the table units\n self._invalid_units = dict()\n for c in data.colnames:\n if data[c].unit is not None:\n try:\n self.units[c] = u.Unit(str(data[c].unit))\n except ValueError:\n self._invalid_units[c] = data[c].unit\n\n # HACK: hard coded\n self._has_rv = ('radial_velocity' in self.data.colnames or\n 'dr2_radial_velocity' in self.data.colnames)\n\n # For caching later\n self._cache = dict()\n\n @classmethod\n def from_query(cls, query_str, login_info=None, verbose=False):\n \"\"\"\n Run the specified query and return a `GaiaData` instance with the\n returned data.\n\n This is meant only to be used for quick queries to the main Gaia science\n archive. For longer queries and more customized usage, use TAP access to\n any of the Gaia mirrors with, e.g., astroquery or pyvo.\n\n This requires ``astroquery`` to be installed.\n\n Parameters\n ----------\n query_str : str\n The string ADQL query to execute.\n login_info : dict, optional\n Username and password for the Gaia science archive as keys \"user\"\n and \"password\". If not specified, will use anonymous access, subject\n to the query limits.\n\n Returns\n -------\n gaiadata : `GaiaData`\n An instance of this object.\n\n \"\"\"\n try:\n from astroquery.gaia import Gaia\n except ImportError:\n raise ImportError('Failed to import astroquery. To use the '\n 'from_query() classmethod, you must first'\n ' install astroquery, e.g., with pip: '\n '\\n\\tpip install astroquery')\n\n if login_info is not None:\n Gaia.login(**login_info)\n\n job = Gaia.launch_job_async(query_str, verbose=verbose)\n tbl = job.get_results()\n\n return cls(tbl)\n\n @classmethod\n def from_source_id(cls, source_id, source_id_dr=None, data_dr=None,\n **kwargs):\n \"\"\"Retrieve data from a DR for a given Gaia source_id in a DR.\n\n Useful if you have, e.g., a DR2 source_id and want EDR3 data.\n\n Parameters\n ----------\n source_id : int\n The Gaia source_id\n source_id_dr : str, optional\n The data release slug (e.g., 'dr2' or 'edr3') for the input\n source_id. Defaults to the latest data release.\n data_dr : str, optional\n The data release slug (e.g., 'dr2' or 'edr3') to retrieve data from.\n Defaults to the latest data release.\n **kwargs\n Passed to ``from_query()``\n\n Returns\n -------\n gaiadata : `GaiaData`\n An instance of this object.\n \"\"\"\n\n join_tables = {\n 'dr1': {'dr2': \"gaiadr2.dr1_neighbourhood\"},\n 'dr2': {'edr3': \"gaiaedr3.dr2_neighbourhood\"},\n }\n source_id_prefixes = {\n 'dr1': 'dr1',\n 'dr2': 'dr2',\n 'edr3': 'dr3'\n }\n\n if source_id_dr is None:\n source_id_dr = LATEST_RELEASE.lower()\n\n if data_dr is None:\n data_dr = LATEST_RELEASE.lower()\n\n if source_id_dr == data_dr:\n query_str = f\"\"\"\n SELECT * FROM gaia{data_dr}.gaia_source AS gaia\n WHERE gaia.source_id = {source_id}\n \"\"\"\n return cls.from_query(query_str, **kwargs)\n\n dr1, dr2 = sorted([source_id_dr, data_dr])\n\n try:\n join_table = join_tables[dr1][dr2]\n source_id_pref = source_id_prefixes[source_id_dr]\n data_pref = source_id_prefixes[data_dr]\n except KeyError:\n raise KeyError(f\"Failed to find join table for {source_id_dr} \"\n f\"to {data_dr}\")\n\n query_str = f\"\"\"\n SELECT * FROM gaia{data_dr}.gaia_source AS gaia\n JOIN {join_table} AS old_gaia\n ON gaia.source_id = old_gaia.{data_pref}_source_id\n WHERE old_gaia.{source_id_pref}_source_id = {source_id}\n \"\"\"\n return cls.from_query(query_str, **kwargs)\n\n ##########################################################################\n # Python internal\n #\n def __getattr__(self, name):\n # to prevent recursion errors:\n # nedbatchelder.com/blog/201010/surprising_getattr_recursion.html\n if name in ['data', 'units']:\n raise AttributeError()\n\n lookup_name = name\n if name.startswith('radial_velocity'):\n # HACK: this should be more general...\n if ('radial_velocity' not in self.data.colnames\n and 'dr2_radial_velocity' in self.data.colnames):\n lookup_name = f'dr2_{name}'\n\n coldata = self.data[lookup_name]\n if hasattr(coldata, 'mask') and coldata.mask is not None:\n arr = coldata.filled(np.nan)\n else:\n arr = coldata\n arr = np.asarray(arr)\n\n if name in self.units:\n return arr * self.units[name]\n\n else:\n return arr\n\n def __setattr__(self, name, val):\n\n if name in ['data', 'units']:\n # needs to be here to catch the first time we enter this func.\n super().__setattr__(name, val)\n\n elif name in self.units:\n if not hasattr(val, 'unit'):\n raise ValueError('To set data for column \"{0}\", you must '\n 'provide a Quantity-like object (with units).'\n .format(name))\n self.data[name] = val\n self.units[name] = val.unit\n\n elif name in self.data.columns:\n self.data[name] = val\n\n else:\n super().__setattr__(name, val)\n\n def __dir__(self):\n return super().__dir__() + [str(k) for k in self.data.columns]\n\n def __getitem__(self, slc):\n if isinstance(slc, int):\n slc = slice(slc, slc+1)\n elif isinstance(slc, str):\n return self.__getattr__(slc)\n return self.__class__(self.data[slc])\n\n def __setitem__(self, name, val):\n if hasattr(val, 'unit'):\n self.data[name] = val.value\n self.units[name] = val.unit\n else:\n self.data[name] = val\n\n def __len__(self):\n return len(self.data)\n\n def __str__(self):\n names = ['ra', 'dec', 'parallax', 'pmra', 'pmdec']\n if self._has_rv:\n names.append('radial_velocity')\n return str(self.data[names])\n\n def __repr__(self):\n return \"<GaiaData: {0:d} rows>\".format(len(self))\n\n ##########################################################################\n # Computed and convenience quantities\n #\n @property\n def pm(self):\n \"\"\"2D proper motion. Has shape `(nrows, 2)`\"\"\"\n _u = self.pmra.unit\n return np.vstack((self.pmra.value, self.pmdec.to(_u).value)).T * _u\n\n @u.quantity_input(min_parallax=u.mas, equivalencies=u.parallax())\n def get_distance(self, min_parallax=None, parallax_fill_value=np.nan,\n allow_negative=False):\n \"\"\"Compute distance from parallax (by inverting the parallax) using\n `~astropy.coordinates.Distance`.\n\n Parameters\n ----------\n min_parallax : `~astropy.units.Quantity` (optional)\n If `min_parallax` specified, the parallaxes are clipped to this\n values (and it is also used to replace NaNs).\n allow_negative : bool (optional)\n This is passed through to `~astropy.coordinates.Distance`.\n\n Returns\n -------\n dist : `~astropy.coordinates.Distance`\n A ``Distance`` object with the data.\n \"\"\"\n\n plx = self.parallax.copy()\n\n if np.isnan(parallax_fill_value):\n parallax_fill_value = parallax_fill_value * u.mas\n\n if min_parallax is not None:\n clipped = plx < min_parallax\n clipped |= ~np.isfinite(plx)\n plx[clipped] = parallax_fill_value\n\n return coord.Distance(parallax=plx, allow_negative=allow_negative)\n\n @property\n def distance(self):\n \"\"\"Assumes 1/parallax. Has shape `(nrows,)`.\n\n This attribute will raise an error when there are negative or zero\n parallax values. For more flexible retrieval of distance values and\n auto-filling bad values, use the .get_distance() method.\"\"\"\n return self.get_distance()\n\n def get_radial_velocity(self, fill_value=None):\n \"\"\"Return radial velocity but with invalid values filled with the\n specified fill value.\n\n Parameters\n ----------\n fill_value : `~astropy.units.Quantity` (optional)\n If not ``None``, fill any invalid values with the specified value.\n \"\"\"\n rv = self.radial_velocity.copy()\n rv[~np.isfinite(rv)] = fill_value\n return rv\n\n @property\n def distmod(self):\n \"\"\"Distance modulus, m-M = 5 * log10(dist / (10 pc))\"\"\"\n return self.distance.distmod\n\n @property\n def vtan(self):\n \"\"\"\n Tangential velocity computed using the proper motion and inverse\n parallax as the distance. Has shape `(nrows, 2)`\n \"\"\"\n d = self.distance\n vra = (self.pmra * d).to(u.km/u.s, u.dimensionless_angles()).value\n vdec = (self.pmdec * d).to(u.km/u.s, u.dimensionless_angles()).value\n return np.vstack((vra, vdec)).T * u.km/u.s\n\n def get_cov(self, RAM_threshold=1*u.gigabyte, units=None):\n \"\"\"\n The Gaia data tables contain correlation coefficients and standard\n deviations for (ra, dec, parallax, pm_ra, pm_dec), but for most analyses\n we need covariance matrices. This converts the data provided by Gaia\n into covariance matrices.\n\n If a radial velocity exists, this also contains the radial velocity\n variance. If radial velocity doesn't exist, that diagonal element is set\n to inf.\n\n The default units of the covariance matrix are [degree, degree, mas,\n mas/yr, mas/yr, km/s], but this can be modified by passing in a\n dictionary with new units. For example, to change just the default ra,\n dec units for the covariance matrix, you can pass in::\n\n units=dict(ra=u.radian, dec=u.radian)\n\n Parameters\n ----------\n RAM_threshold : `astropy.units.Quantity`\n Raise an error if the expected covariance array is larger than the\n specified threshold. Set to ``None`` to disable this checking.\n \"\"\"\n\n if 'cov' in self._cache:\n if units == self._cache['cov_units']:\n return self._cache['cov']\n\n if RAM_threshold is not None:\n # Raise error if the user is going to blow up their RAM\n estimated_RAM = 6 * 6 * len(self) * 8*u.bit\n if estimated_RAM > RAM_threshold:\n raise RuntimeError('Estimated RAM usage for generating '\n 'covariance matrices is larger than the '\n 'specified threshold. Use the argument: '\n '`RAM_threshold=None` to disable this check')\n\n if units is None:\n units = dict()\n units.setdefault('ra', u.deg)\n units.setdefault('dec', u.deg)\n units.setdefault('parallax', u.mas)\n units.setdefault('pmra', u.mas/u.yr)\n units.setdefault('pmdec', u.mas/u.yr)\n units.setdefault('radial_velocity', u.km/u.s)\n\n # The full returned matrix\n C = np.zeros((len(self), 6, 6))\n\n # We handle radial_velocity separately below - doesn't have correlation\n # coefficients with the astrometric parameters\n names = ['ra', 'dec', 'parallax', 'pmra', 'pmdec']\n\n # pre-load the diagonal\n for i, name in enumerate(names):\n if name + \"_error\" in self.data.colnames:\n err = getattr(self, name + \"_error\")\n C[:, i, i] = err.to(units[name]).value ** 2\n else:\n C[:, i, i] = np.nan\n\n if self._has_rv:\n name = 'radial_velocity'\n err = getattr(self, name + \"_error\")\n C[:, 5, 5] = err.to(units[name]).value ** 2\n else:\n C[:, 5, 5] = np.inf\n\n C[:, 5, 5][np.isnan(C[:, 5, 5])] = np.inf # missing values\n\n for i, name1 in enumerate(names):\n for j, name2 in enumerate(names):\n if j <= i:\n continue\n\n if \"{0}_{1}_corr\".format(name1, name2) in self.data.colnames:\n corr = getattr(self, \"{0}_{1}_corr\".format(name1, name2))\n else:\n corr = np.nan\n\n # We don't need to worry about units here because the diagonal\n # values have already been converted\n C[:, i, j] = corr * np.sqrt(C[:, i, i] * C[:, j, j])\n C[:, j, i] = C[:, i, j]\n\n self._cache['cov'] = C\n self._cache['cov_units'] = units\n\n return self._cache['cov']\n\n def get_ebv(self, dustmaps_cls=None):\n \"\"\"Compute the E(B-V) reddening at this location\n\n This requires the `dustmaps <http://dustmaps.readthedocs.io>`_ package\n to run!\n\n Parameters\n ----------\n dustmaps_cls : ``dustmaps`` query class\n By default, ``SFDQuery``.\n \"\"\"\n if dustmaps_cls is None:\n from dustmaps.sfd import SFDQuery\n dustmaps_cls = SFDQuery\n\n c = self.get_skycoord(distance=False)\n return dustmaps_cls().query(c)\n\n def get_ext(self, ebv=None, dustmaps_cls=None):\n \"\"\"Compute the E(B-V) reddening at this location\n\n This requires the `dustmaps <http://dustmaps.readthedocs.io>`_ package\n to run!\n\n Parameters\n ----------\n dustmaps_cls : ``dustmaps`` query class\n By default, ``SFDQuery``.\n\n Returns\n -------\n A_G\n A_BP\n A_RP\n \"\"\"\n if 'ebv' not in self._cache:\n if ebv is None:\n self._cache['ebv'] = self.get_ebv(dustmaps_cls=dustmaps_cls)\n else:\n self._cache['ebv'] = ebv\n\n if 'A_G' not in self._cache:\n A_G, A_B, A_R = get_ext(self.phot_g_mean_mag.value,\n self.phot_bp_mean_mag.value,\n self.phot_rp_mean_mag.value,\n self._cache['ebv'])\n\n self._cache['A_G'] = A_G * u.mag\n self._cache['A_B'] = A_B * u.mag\n self._cache['A_R'] = A_R * u.mag\n\n return (self._cache['A_G'],\n self._cache['A_B'],\n self._cache['A_R'])\n\n def get_G0(self, *args, **kwargs):\n \"\"\"Return the extinction-corrected G-band magnitude. Any arguments are\n passed to ``get_ext()``.\n \"\"\"\n A, _, _ = self.get_ext(*args, **kwargs)\n return self.phot_g_mean_mag - A\n\n def get_BP0(self, *args, **kwargs):\n \"\"\"Return the extinction-corrected G_BP magnitude. Any arguments are\n passed to ``get_ext()``.\"\"\"\n _, A, _ = self.get_ext(*args, **kwargs)\n return self.phot_bp_mean_mag - A\n\n def get_RP0(self, *args, **kwargs):\n \"\"\"Return the extinction-corrected G_RP magnitude. Any arguments are\n passed to ``get_ext()``.\"\"\"\n _, _, A = self.get_ext(*args, **kwargs)\n return self.phot_rp_mean_mag - A\n\n def get_uwe(self):\n \"\"\"Compute and return the unit-weight error.\"\"\"\n return np.sqrt(self.astrometric_chi2_al /\n (self.astrometric_n_good_obs_al-5))\n\n def get_ruwe(self):\n \"\"\"Compute and return the renormalized unit-weight error.\"\"\"\n interp = U0Interpolator()\n\n bprp = self.phot_bp_mean_mag.value - self.phot_rp_mean_mag.value\n u0 = interp.get_u0(self.phot_g_mean_mag.value, bprp)\n return self.get_uwe() / u0\n\n ##########################################################################\n # Astropy connections\n #\n @property\n def skycoord(self):\n \"\"\"\n Return an `~astropy.coordinates.SkyCoord` object to represent\n all coordinates. Note: this requires Astropy v3.0 or higher!\n\n Use the ``get_skycoord()`` method for more flexible access.\n \"\"\"\n return self.get_skycoord()\n\n def get_skycoord(self, distance=None, radial_velocity=None,\n ref_epoch=REF_EPOCH[LATEST_RELEASE]):\n \"\"\"\n Return an `~astropy.coordinates.SkyCoord` object to represent\n all coordinates. Note: this requires Astropy v3.0 or higher!\n\n `ref_epoch` is used to set the `obstime` attribute on the coordinate\n objects. This is often included in the data release tables, but\n `ref_epoch` here is used if it's not.\n\n Parameters\n ----------\n distance : `~astropy.coordinate.Distance`, `~astropy.units.Quantity`, ``False``, str (optional)\n If ``None``, this inverts the parallax to get the distance from the\n Gaia data. If ``False``, distance information is ignored. If an\n astropy ``Quantity`` or ``Distance`` object, it sets the distance\n values of the output ``SkyCoord`` to whatever is passed in.\n radial_velocity : `~astropy.units.Quantity`, str (optional)\n If ``None``, this uses radial velocity data from the input Gaia\n table. If an astropy ``Quantity`` object, it sets the radial\n velocity values of the output ``SkyCoord`` to whatever is passed in.\n ref_epoch : `~astropy.time.Time`, float (optional)\n The reference epoch of the data. If not specified, this will try to\n read it from the input Gaia data table. If not provided, this will\n be set to whatever the most recent data release is, so, **beware**!\n\n Returns\n -------\n c : `~astropy.coordinates.SkyCoord`\n The coordinate object constructed from the input Gaia data.\n \"\"\"\n _coord_opts = (distance, radial_velocity)\n if 'coord' in self._cache:\n try:\n _check = self._cache['coord_opts'] == _coord_opts\n except ValueError: # array passed for distance or radial_velocity\n _check = False\n\n if _check:\n return self._cache['coord']\n\n kw = dict()\n if self._has_rv:\n kw['radial_velocity'] = self.radial_velocity\n\n # Reference epoch\n if 'ref_epoch' in self.data.colnames:\n obstime = Time(self.ref_epoch.value, format='jyear')\n else:\n obstime = Time(ref_epoch, format='jyear')\n\n kw['obstime'] = obstime\n\n if radial_velocity is not False and radial_velocity is not None:\n if isinstance(radial_velocity, str):\n kw['radial_velocity'] = self[radial_velocity]\n else:\n kw['radial_velocity'] = radial_velocity\n elif radial_velocity is False and 'radial_velocity' in kw:\n kw.pop('radial_velocity')\n\n if distance is None:\n kw['distance'] = self.distance\n elif distance is not False and distance is not None:\n if isinstance(distance, str):\n kw['distance'] = self[distance]\n else:\n kw['distance'] = distance\n\n self._cache['coord'] = coord.SkyCoord(ra=self.ra, dec=self.dec,\n pm_ra_cosdec=self.pmra,\n pm_dec=self.pmdec, **kw)\n self._cache['coord_opts'] = _coord_opts\n\n return self._cache['coord']\n\n def get_error_samples(self, size=1, rnd=None):\n \"\"\"Generate a sampling from the Gaia error distribution for each source.\n\n This function constructs the astrometric covariance matrix for each\n source and generates a specified number of random samples from the error\n distribution for each source. This does not handle spatially-dependent\n correlations. Samplings generated with this method can be used to, e.g.,\n propagate the Gaia errors through coordinate transformations or\n analyses.\n\n Parameters\n ----------\n size : int\n The number of random samples per soure to generate.\n rnd : ``numpy.random.RandomState``, optional\n The random state.\n\n Returns\n -------\n g_samples : `pyia.GaiaData`\n The same data table, but now each Gaia coordinate entry contains\n samples from the error distribution.\n\n \"\"\"\n if rnd is None:\n rnd = np.random.RandomState()\n\n C = self.get_cov().copy()\n rv_mask = ~np.isfinite(C[:, 5, 5])\n C[rv_mask, 5, 5] = 0.\n\n arrs = []\n for k, unit in self._cache['cov_units'].items():\n arrs.append(getattr(self, k).to_value(unit))\n y = np.stack(arrs).T\n\n samples = np.array([rnd.multivariate_normal(y[i], C[i], size=size)\n for i in range(len(y))])\n\n d = self.data.copy()\n for i, (k, unit) in enumerate(self._cache['cov_units'].items()):\n d[k] = samples[..., i] * unit\n\n return self.__class__(d)\n\n def filter(self, **kwargs):\n \"\"\"\n Filter the data based on columns and data ranges.\n\n Parameters\n ----------\n **kwargs\n Keys should be column names, values should be tuples representing\n ranges to select the column values withing. For example, to select\n parallaxes between 0.5 and 5, pass ``parallax=(0.5, 5)*u.mas``.\n Pass `None` to skip a filter, for example ``parallax=(None,\n 5*u.mas)`` would select all parallax values < 5 mas.\n\n Returns\n -------\n filtered_g : `pyia.GaiaData`\n The same data table, but filtered.\n \"\"\"\n mask = np.ones(len(self), dtype=bool)\n for k, (x1, x2) in kwargs.items():\n if x1 is None and x2 is None:\n raise ValueError(f\"Both range values are None for key {k}!\")\n\n if x1 is None:\n mask &= self[k] < x2\n\n elif x2 is None:\n mask &= self[k] >= x1\n\n else:\n mask &= (self[k] >= x1) & (self[k] < x2)\n\n return self[mask]\n"
] |
[
[
"numpy.sqrt",
"numpy.isfinite",
"numpy.asarray",
"numpy.isnan",
"numpy.stack",
"numpy.random.RandomState",
"numpy.vstack"
]
] |
lvwuyunlifan/crop
|
[
"7392d007a8271ff384c5c66ed5717afbc4172b4d"
] |
[
"logger.py"
] |
[
"# Code referenced from https://gist.github.com/gyglim/1f8dfb1b5c82627ae3efcfbbadb9f514\n\nimport tensorflow as tf\n\nimport numpy as np\n\nimport scipy.misc\n\ntry:\n\n from StringIO import StringIO # Python 2.7\n\nexcept ImportError:\n\n from io import BytesIO # Python 3.x\n\n\nclass Logger(object):\n\n def __init__(self, log_dir):\n\n \"\"\"Create a summary writer logging to log_dir.\"\"\"\n\n self.writer = tf.summary.FileWriter(log_dir)\n\n def scalar_summary(self, tag, value, step):\n\n \"\"\"Log a scalar variable.\"\"\"\n\n summary = tf.Summary(value=[tf.Summary.Value(tag=tag, simple_value=value)])\n\n self.writer.add_summary(summary, step)\n\n def image_summary(self, tag, images, step):\n\n \"\"\"Log a list of images.\"\"\"\n\n img_summaries = []\n\n for i, img in enumerate(images):\n\n # Write the image to a string\n\n try:\n\n s = StringIO()\n\n except:\n\n s = BytesIO()\n\n scipy.misc.toimage(img).save(s, format=\"png\")\n\n # Create an Image object\n\n img_sum = tf.Summary.Image(encoded_image_string=s.getvalue(),\n\n height=img.shape[0],\n\n width=img.shape[1])\n\n # Create a Summary value\n\n img_summaries.append(tf.Summary.Value(tag='%s/%d' % (tag, i), image=img_sum))\n\n # Create and write Summary\n\n summary = tf.Summary(value=img_summaries)\n\n self.writer.add_summary(summary, step)\n\n def histo_summary(self, tag, values, step, bins=1000):\n\n \"\"\"Log a histogram of the tensor of values.\"\"\"\n\n # Create a histogram using numpy\n\n counts, bin_edges = np.histogram(values, bins=bins)\n\n # Fill the fields of the histogram proto\n\n hist = tf.HistogramProto()\n\n hist.min = float(np.min(values))\n\n hist.max = float(np.max(values))\n\n hist.num = int(np.prod(values.shape))\n\n hist.sum = float(np.sum(values))\n\n hist.sum_squares = float(np.sum(values ** 2))\n\n # Drop the start of the first bin\n\n bin_edges = bin_edges[1:]\n\n # Add bin edges and counts\n\n for edge in bin_edges:\n hist.bucket_limit.append(edge)\n\n for c in counts:\n hist.bucket.append(c)\n\n # Create and write Summary\n\n summary = tf.Summary(value=[tf.Summary.Value(tag=tag, histo=hist)])\n\n self.writer.add_summary(summary, step)\n\n self.writer.flush()"
] |
[
[
"tensorflow.summary.FileWriter",
"numpy.min",
"numpy.max",
"tensorflow.Summary.Value",
"numpy.prod",
"tensorflow.HistogramProto",
"tensorflow.Summary",
"numpy.histogram",
"numpy.sum"
]
] |
neurodata/bilateral-connectome
|
[
"b04162f84820f81cf719e8a5ddd4dae34d8f5f41"
] |
[
"pkg/pkg/utils/toy.py"
] |
[
"import numpy as np\nimport pandas as pd\nimport seaborn as sns\nfrom graspologic.simulations import sbm\n\n\ndef sample_toy_networks(seed=888888, ns=None, B=None):\n np.random.seed(seed)\n if ns is None:\n ns = [5, 6, 7]\n if B is None:\n B = np.array([[0.8, 0.2, 0.05], [0.05, 0.9, 0.2], [0.05, 0.05, 0.7]])\n A1, labels = sbm(ns, B, directed=True, loops=False, return_labels=True)\n A2 = sbm(ns, B, directed=True, loops=False)\n\n node_data = pd.DataFrame(index=np.arange(A1.shape[0]))\n node_data[\"labels\"] = labels + 1\n return A1, A2, node_data\n\n\ndef get_toy_palette():\n return dict(zip([1, 2, 3], sns.color_palette(\"Set2\")[3:]))\n"
] |
[
[
"numpy.arange",
"numpy.array",
"numpy.random.seed"
]
] |
DarrenZhang01/Neural_Tangents_TensorFlow
|
[
"2fd360c8b1b8c9106044034f6a8b5c2734db9c3d"
] |
[
"tf_dot_general/tf_dot_general_test.py"
] |
[
"# Copyright 2020 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\n\n\"\"\"\nTests for the general dot operation for TensorFlow.\n\nZhibo Zhang, 2020.06.30\n\"\"\"\n\nimport tensorflow as tf\nfrom tensorflow.python.platform import test\nimport numpy as np\nimport jax.numpy as jnp\nfrom jax import lax\nfrom tf_dot_general import *\nfrom absl.testing import parameterized\n\n\nclass TFConvGeneralTest(test.TestCase, parameterized.TestCase):\n\n\n @parameterized.parameters(\n {\"lhs\": ['i', 'j'], \"rhs\": ['j', 'k'], \"dims\": (((1,), (0,)), ((), ())),\n \"result\": \"ik\"},\n {\"lhs\": ['a', 'i', 'j'], \"rhs\": ['a', 'j', 'k'], \"dims\": \\\n (((2,), (1,)), ((0,), (0,))), \"result\": \"aik\"},\n {\"lhs\": ['a', 'b', 'i', 'j'], \"rhs\": ['a', 'b', 'j', 'k'], \"dims\": \\\n (((3,), (2,)), ((0, 1,), (0, 1,))), \"result\": \"abik\"},\n )\n def test_compose_output_rep(self, lhs, rhs, dims, result):\n contraction, batch = dims\n lhs_contraction, rhs_contraction = contraction\n lhs_batch, rhs_batch = batch\n output_rep = compose_output_rep(lhs, rhs, lhs_contraction, rhs_contraction,\n lhs_batch, rhs_batch)\n self.assertEqual(output_rep, result)\n\n @parameterized.parameters(\n {\"lhs_np\": np.ones((5, 3)), \"rhs_np\": np.ones((3, 2)),\n \"dims\": (((1,), (0,)), ((), ()))},\n {\"lhs_np\": np.ones((5, 3)), \"rhs_np\": np.ones((5, 3)),\n \"dims\": (((0, 1), (0, 1)), ((), ()))},\n {\"lhs_np\": np.ones((5, 3, 2)), \"rhs_np\": np.ones((2, 3, 2)),\n \"dims\": (((1, 2), (1, 0)), ((), ()))},\n {\"lhs_np\": np.ones((6, 5, 3)), \"rhs_np\": np.ones((6, 3, 2)),\n \"dims\": (((2,), (1,)), ((0,), (0,)))},\n {\"lhs_np\": np.ones((6, 3, 5)), \"rhs_np\": np.ones((6, 3, 2)),\n \"dims\": (((1,), (1,)), ((0,), (0,)))},\n {\"lhs_np\": np.ones((5, 3, 2, 2)), \"rhs_np\": np.ones((5, 2, 2, 6)),\n \"dims\": (((2, 3), (1, 2)), ((0,), (0,)))},\n {\"lhs_np\": np.ones((2, 2, 5, 3)), \"rhs_np\": np.ones((2, 2, 3, 2)),\n \"dims\": (((3,), (2,)), ((0, 1), (0, 1)))},\n {\"lhs_np\": np.ones((2, 2, 5, 2)), \"rhs_np\": np.ones((2, 2, 3, 2)),\n \"dims\": (((3,), (1,)), ((0,), (0,)))},\n {\"lhs_np\": np.ones((2, 2, 5, 3, 3)), \"rhs_np\": np.ones((2, 3, 2, 3, 2)),\n \"dims\": (((4,), (1,)), ((0,), (0,)))},\n )\n def test_tf_dot_general(self, lhs_np, rhs_np, dims):\n ans = lax.dot_general(lhs_np, rhs_np, dims)\n result = tf_dot_general(lhs_np, rhs_np, dims)\n self.assertAllClose(result, np.array(ans))\n\n\nif __name__ == \"__main__\":\n test.main()\n"
] |
[
[
"numpy.array",
"tensorflow.python.platform.test.main",
"numpy.ones"
]
] |
hhcho/densvis
|
[
"d65bb3133a5072356f45d2d6f4f0d16ad33032fd"
] |
[
"densmap/densmap.py"
] |
[
"import sys\nimport numpy as np\nimport argparse\nimport pickle\n\nimport densmap\nfrom sklearn.datasets import load_digits\n\ndef parse_args():\n parser = argparse.ArgumentParser(description=__doc__)\n parser.add_argument('-i','--input', help='Input .txt or .pkl', default='data.txt')\n parser.add_argument('-o','--outname', help='Output prefix for saving _emb.txt, _dens.txt',\n default='out')\n parser.add_argument('-f','--dens_frac', type=float, default=0.3)\n parser.add_argument('-l','--dens_lambda', type=float, default=2.0)\n parser.add_argument('-s','--var_shift', type=float, default=0.1)\n parser.add_argument('-d','--ndim', type=int, default=2, help='Embedding dimension (default: %(default)s)')\n parser.add_argument('-n','--n-epochs', type=int, help='Number of epochs', default=750)\n parser.add_argument('-k','--n-nei', type=int, default=30, help='Number of neighbors (default: %(default)s)')\n parser.add_argument('--final_dens', action='store_true', default=True)\n parser.add_argument('--no_final_dens', dest='final_dens', action='store_false')\n parser.add_argument('--outtype', choices=('pkl','txt'), default='txt', help='Output format type (default: %(default)s)')\n return parser\n\ndef main(args):\n if args.input.endswith('.txt'):\n data = np.loadtxt(args.input)\n elif args.input.endswith('.pkl'):\n data = pickle.load(open(args.input,'rb'))\n else:\n raise RuntimeError(f'File format for {args.input} not supported')\n\n if data.shape[0] < data.shape[1]:\n data = data.T\n \n emb = densmap.densMAP(verbose=True,\n n_components=args.ndim,\n n_neighbors=args.n_nei,\n n_epochs=args.n_epochs,\n dens_frac=args.dens_frac,\n dens_lambda=args.dens_lambda,\n logdist_shift=0,\n var_shift=args.var_shift,\n final_dens=args.final_dens).fit_transform(data)\n\n outname = args.outname\n if args.final_dens:\n (emb, ro, re) = emb\n rero = np.stack((ro,re)).transpose()\n\n if args.outtype=='txt':\n np.savetxt(outname+'_dens.txt',rero, fmt='%e')\n elif args.outtype=='pkl':\n with open(outname + '_dens.pkl','wb') as f:\n pickle.dump(rero, f)\n else:\n raise RuntimeError\n \n if args.outtype == 'txt':\n np.savetxt(outname+'_emb.txt',emb, fmt='%e')\n\n elif args.outtype == 'pkl':\n with open(outname + '_emb.pkl','wb') as f:\n pickle.dump(emb, f)\n else: # should not reach here\n raise RuntimeError \n\n print(\"Done\")\n\nif __name__ == '__main__':\n main(parse_args().parse_args())\n"
] |
[
[
"numpy.savetxt",
"numpy.stack",
"numpy.loadtxt"
]
] |
gkbharathy/econ_model_02
|
[
"d91ddf148b009bf79852d9aec70f3a1877e0f79a"
] |
[
"dolo/algos/value_iteration.py"
] |
[
"import time\nimport numpy as np\nimport numpy\nimport scipy.optimize\nfrom dolo.numeric.processes import DiscretizedIIDProcess\n# from dolo.numeric.decision_rules_markov import MarkovDecisionRule, IIDDecisionRule\nfrom dolo.numeric.decision_rule import DecisionRule, ConstantDecisionRule\nfrom dolo.numeric.grids import Grid, CartesianGrid, SmolyakGrid, UnstructuredGrid\nfrom dolo.misc.itprinter import IterationsPrinter\n\n\ndef constant_policy(model):\n return ConstantDecisionRule(model.calibration[\"controls\"])\n\nfrom .results import AlgoResult, ValueIterationResult\n\ndef value_iteration(model,\n grid={},\n tol=1e-6,\n maxit=500,\n maxit_howard=20,\n verbose=False,\n details=True):\n \"\"\"\n Solve for the value function and associated Markov decision rule by iterating over\n the value function.\n\n Parameters:\n -----------\n model :\n \"dtmscc\" model. Must contain a 'felicity' function.\n grid :\n grid options\n dr :\n decision rule to evaluate\n\n Returns:\n --------\n mdr : Markov decision rule\n The solved decision rule/policy function\n mdrv: decision rule\n The solved value function\n \"\"\"\n\n transition = model.functions['transition']\n felicity = model.functions['felicity']\n controls_lb = model.functions['controls_lb']\n controls_ub = model.functions['controls_ub']\n\n parms = model.calibration['parameters']\n discount = model.calibration['beta']\n\n x0 = model.calibration['controls']\n m0 = model.calibration['exogenous']\n s0 = model.calibration['states']\n r0 = felicity(m0, s0, x0, parms)\n\n process = model.exogenous\n dprocess = process.discretize()\n\n n_ms = dprocess.n_nodes() # number of exogenous states\n n_mv = dprocess.n_inodes(\n 0) # this assume number of integration nodes is constant\n\n endo_grid = model.get_grid(**grid)\n\n exo_grid = dprocess.grid\n\n mdrv = DecisionRule(exo_grid, endo_grid)\n\n grid = mdrv.endo_grid.nodes()\n N = grid.shape[0]\n n_x = len(x0)\n\n mdr = constant_policy(model)\n \n controls_0 = np.zeros((n_ms, N, n_x))\n for i_ms in range(n_ms):\n controls_0[i_ms, :, :] = mdr.eval_is(i_ms, grid)\n\n values_0 = np.zeros((n_ms, N, 1))\n # for i_ms in range(n_ms):\n # values_0[i_ms, :, :] = mdrv(i_ms, grid)\n\n mdr = DecisionRule(exo_grid, endo_grid)\n # mdr.set_values(controls_0)\n\n # THIRD: value function iterations until convergence\n it = 0\n err_v = 100\n err_v_0 = 0\n gain_v = 0.0\n err_x = 100\n err_x_0 = 0\n tol_x = 1e-5\n tol_v = 1e-7\n\n itprint = IterationsPrinter(\n ('N', int), ('Error_V', float), ('Gain_V', float), ('Error_x', float),\n ('Gain_x', float), ('Eval_n', int), ('Time', float),\n verbose=verbose)\n itprint.print_header('Start value function iterations.')\n\n while (it < maxit) and (err_v > tol or err_x > tol_x):\n\n t_start = time.time()\n it += 1\n\n mdr.set_values(controls_0)\n if it > 2:\n ev = evaluate_policy(\n model, mdr, initial_guess=mdrv, verbose=False, details=True)\n else:\n ev = evaluate_policy(model, mdr, verbose=False, details=True)\n\n mdrv = ev.solution\n for i_ms in range(n_ms):\n values_0[i_ms, :, :] = mdrv.eval_is(i_ms, grid)\n\n values = values_0.copy()\n controls = controls_0.copy()\n\n for i_m in range(n_ms):\n m = dprocess.node(i_m)\n for n in range(N):\n s = grid[n, :]\n x = controls[i_m, n, :]\n lb = controls_lb(m, s, parms)\n ub = controls_ub(m, s, parms)\n bnds = [e for e in zip(lb, ub)]\n\n def valfun(xx):\n return -choice_value(transition, felicity, i_m, s, xx,\n mdrv, dprocess, parms, discount)[0]\n\n res = scipy.optimize.minimize(valfun, x, bounds=bnds)\n controls[i_m, n, :] = res.x\n values[i_m, n, 0] = -valfun(x)\n\n # compute error, update value and dr\n err_x = abs(controls - controls_0).max()\n err_v = abs(values - values_0).max()\n t_end = time.time()\n elapsed = t_end - t_start\n\n values_0 = values\n controls_0 = controls\n\n gain_x = err_x / err_x_0\n gain_v = err_v / err_v_0\n\n err_x_0 = err_x\n err_v_0 = err_v\n\n itprint.print_iteration(\n N=it,\n Error_V=err_v,\n Gain_V=gain_v,\n Error_x=err_x,\n Gain_x=gain_x,\n Eval_n=ev.iterations,\n Time=elapsed)\n\n itprint.print_finished()\n\n mdr = DecisionRule(exo_grid, endo_grid)\n mdr.set_values(controls)\n mdrv.set_values(values_0)\n\n if not details:\n return mdr, mdrv\n else:\n return ValueIterationResult(\n mdr, #:AbstractDecisionRule\n mdrv, #:AbstractDecisionRule\n it, #:Int\n dprocess, #:AbstractDiscretizedProcess\n err_x<tol_x, #:Bool\n tol_x, #:Float64\n err_x, #:Float64\n err_v<tol_v, #:Bool\n tol_v, #:Float64\n err_v, #:Float64\n None, #log: #:ValueIterationLog\n None #trace: #:Union{Nothing,IterationTrace\n )\n\n\ndef choice_value(transition, felicity, i_ms, s, x, drv, dprocess, parms, beta):\n\n m = dprocess.node(i_ms)\n cont_v = 0.0\n for I_ms in range(dprocess.n_inodes(i_ms)):\n M = dprocess.inode(i_ms, I_ms)\n prob = dprocess.iweight(i_ms, I_ms)\n S = transition(m, s, x, M, parms)\n V = drv.eval_is(I_ms, S)[0]\n cont_v += prob * V\n return felicity(m, s, x, parms) + beta * cont_v\n\n\nclass EvaluationResult:\n def __init__(self, solution, iterations, tol, error):\n self.solution = solution\n self.iterations = iterations\n self.tol = tol\n self.error = error\n\n\ndef evaluate_policy(model,\n mdr,\n tol=1e-8,\n maxit=2000,\n grid={},\n verbose=True,\n initial_guess=None,\n hook=None,\n integration_orders=None,\n details=False,\n interp_type='cubic'):\n \"\"\"Compute value function corresponding to policy ``dr``\n\n Parameters:\n -----------\n\n model:\n \"dtcscc\" model. Must contain a 'value' function.\n\n mdr:\n decision rule to evaluate\n\n Returns:\n --------\n\n decision rule:\n value function (a function of the space similar to a decision rule\n object)\n\n \"\"\"\n\n process = model.exogenous\n dprocess = process.discretize()\n\n n_ms = dprocess.n_nodes() # number of exogenous states\n n_mv = dprocess.n_inodes(\n 0) # this assume number of integration nodes is constant\n\n x0 = model.calibration['controls']\n v0 = model.calibration['values']\n parms = model.calibration['parameters']\n n_x = len(x0)\n n_v = len(v0)\n n_s = len(model.symbols['states'])\n\n endo_grid = model.get_grid(**grid)\n exo_grid = dprocess.grid\n\n if initial_guess is not None:\n mdrv = initial_guess\n else:\n mdrv = DecisionRule(exo_grid, endo_grid, interp_type=interp_type)\n\n grid = mdrv.endo_grid.nodes()\n N = grid.shape[0]\n\n if isinstance(mdr, np.ndarray):\n controls = mdr\n else:\n controls = np.zeros((n_ms, N, n_x))\n for i_m in range(n_ms):\n controls[i_m, :, :] = mdr.eval_is(i_m, grid)\n\n values_0 = np.zeros((n_ms, N, n_v))\n if initial_guess is None:\n for i_m in range(n_ms):\n values_0[i_m, :, :] = v0[None, :]\n else:\n for i_m in range(n_ms):\n values_0[i_m, :, :] = initial_guess.eval_is(i_m, grid)\n\n val = model.functions['value']\n g = model.functions['transition']\n\n sh_v = values_0.shape\n\n err = 10\n inner_maxit = 50\n it = 0\n\n if verbose:\n headline = '|{0:^4} | {1:10} | {2:8} | {3:8} |'.format(\n 'N', ' Error', 'Gain', 'Time')\n stars = '-' * len(headline)\n print(stars)\n print(headline)\n print(stars)\n\n t1 = time.time()\n\n err_0 = np.nan\n\n verbit = (verbose == 'full')\n\n while err > tol and it < maxit:\n\n it += 1\n\n t_start = time.time()\n\n mdrv.set_values(values_0.reshape(sh_v))\n values = update_value(val, g, grid, controls, values_0, mdr, mdrv,\n dprocess, parms).reshape((-1, n_v))\n err = abs(values.reshape(sh_v) - values_0).max()\n\n err_SA = err / err_0\n err_0 = err\n\n values_0 = values.reshape(sh_v)\n\n t_finish = time.time()\n elapsed = t_finish - t_start\n\n if verbose:\n print('|{0:4} | {1:10.3e} | {2:8.3f} | {3:8.3f} |'.format(\n it, err, err_SA, elapsed))\n\n # values_0 = values.reshape(sh_v)\n\n t2 = time.time()\n\n if verbose:\n print(stars)\n print(\"Elapsed: {} seconds.\".format(t2 - t1))\n print(stars)\n\n if not details:\n return mdrv\n else:\n return EvaluationResult(mdrv, it, tol, err)\n\n\ndef update_value(val, g, s, x, v, dr, drv, dprocess, parms):\n\n N = s.shape[0]\n n_s = s.shape[1]\n\n n_ms = dprocess.n_nodes() # number of exogenous states\n n_mv = dprocess.n_inodes(\n 0) # this assume number of integration nodes is constant\n\n res = np.zeros_like(v)\n\n for i_ms in range(n_ms):\n\n m = dprocess.node(i_ms)[None, :].repeat(N, axis=0)\n\n xm = x[i_ms, :, :]\n vm = v[i_ms, :, :]\n\n for I_ms in range(n_mv):\n\n # M = P[I_ms,:][None,:]\n M = dprocess.inode(i_ms, I_ms)[None, :].repeat(N, axis=0)\n prob = dprocess.iweight(i_ms, I_ms)\n\n S = g(m, s, xm, M, parms)\n XM = dr.eval_ijs(i_ms, I_ms, S)\n VM = drv.eval_ijs(i_ms, I_ms, S)\n rr = val(m, s, xm, vm, M, S, XM, VM, parms)\n\n res[i_ms, :, :] += prob * rr\n\n return res\n"
] |
[
[
"numpy.zeros",
"numpy.zeros_like"
]
] |
dokato/mne-python
|
[
"a188859b57044fa158af05852bcce2870fabde91"
] |
[
"mne/decoding/transformer.py"
] |
[
"# -*- coding: utf-8 -*-\n# Authors: Mainak Jas <[email protected]>\n# Alexandre Gramfort <[email protected]>\n# Romain Trachel <[email protected]>\n#\n# License: BSD (3-clause)\n\nimport numpy as np\n\nfrom .mixin import TransformerMixin\nfrom .base import BaseEstimator\n\nfrom .. import pick_types\nfrom ..filter import filter_data, _triage_filter_params\nfrom ..time_frequency.psd import psd_array_multitaper\nfrom ..externals.six import string_types\nfrom ..utils import _check_type_picks, check_version\nfrom ..io.pick import pick_info, _pick_data_channels, _picks_by_type\nfrom ..cov import _check_scalings_user\n\n\nclass _ConstantScaler():\n \"\"\"Scale channel types using constant values.\"\"\"\n\n def __init__(self, info, scalings, do_scaling=True):\n self._scalings = scalings\n self._info = info\n self._do_scaling = do_scaling\n\n def fit(self, X, y=None):\n scalings = _check_scalings_user(self._scalings)\n picks_by_type = _picks_by_type(pick_info(\n self._info, _pick_data_channels(self._info, exclude=())))\n std = np.ones(sum(len(p[1]) for p in picks_by_type))\n if X.shape[1] != len(std):\n raise ValueError('info had %d data channels but X has %d channels'\n % (len(std), len(X)))\n if self._do_scaling: # this is silly, but necessary for completeness\n for kind, picks in picks_by_type:\n std[picks] = 1. / scalings[kind]\n self.std_ = std\n self.mean_ = np.zeros_like(std)\n return self\n\n def transform(self, X):\n return X / self.std_\n\n def inverse_transform(self, X, y=None):\n return X * self.std_\n\n def fit_transform(self, X, y=None):\n return self.fit(X, y).transform(X)\n\n\ndef _sklearn_reshape_apply(func, return_result, X, *args, **kwargs):\n \"\"\"Reshape epochs and apply function.\"\"\"\n if not isinstance(X, np.ndarray):\n raise ValueError(\"data should be an np.ndarray, got %s.\" % type(X))\n X = np.atleast_3d(X)\n orig_shape = X.shape\n X = np.reshape(X.transpose(0, 2, 1), (-1, orig_shape[1]))\n X = func(X, *args, **kwargs)\n if return_result:\n X.shape = (orig_shape[0], orig_shape[2], orig_shape[1])\n X = X.transpose(0, 2, 1)\n return X\n\n\nclass Scaler(TransformerMixin, BaseEstimator):\n u\"\"\"Standardize channel data.\n\n This class scales data for each channel. It differs from scikit-learn\n classes (e.g., :class:`sklearn.preprocessing.StandardScaler`) in that\n it scales each *channel* by estimating μ and σ using data from all\n time points and epochs, as opposed to standardizing each *feature*\n (i.e., each time point for each channel) by estimating using μ and σ\n using data from all epochs.\n\n Parameters\n ----------\n info : instance of Info | None\n The measurement info. Only necessary if ``scalings`` is a dict or\n None.\n scalings : dict, string, defaults to None.\n Scaling method to be applied to data channel wise.\n\n * if scalings is None (default), scales mag by 1e15, grad by 1e13,\n and eeg by 1e6.\n * if scalings is :class:`dict`, keys are channel types and values\n are scale factors.\n * if ``scalings=='median'``,\n :class:`sklearn.preprocessing.RobustScaler`\n is used (requires sklearn version 0.17+).\n * if ``scalings=='mean'``,\n :class:`sklearn.preprocessing.StandardScaler`\n is used.\n\n with_mean : boolean, True by default\n If True, center the data using mean (or median) before scaling.\n Ignored for channel-type scaling.\n with_std : boolean, True by default\n If True, scale the data to unit variance (``scalings='mean'``),\n quantile range (``scalings='median``), or using channel type\n if ``scalings`` is a dict or None).\n \"\"\"\n\n def __init__(self, info=None, scalings=None, with_mean=True,\n with_std=True): # noqa: D102\n self.info = info\n self.with_mean = with_mean\n self.with_std = with_std\n self.scalings = scalings\n\n if not (scalings is None or isinstance(scalings, (dict, str))):\n raise ValueError('scalings type should be dict, str, or None, '\n 'got %s' % type(scalings))\n if isinstance(scalings, string_types) and \\\n scalings not in ('mean', 'median'):\n raise ValueError('Invalid method for scaling, must be \"mean\" or '\n '\"median\" but got %s' % scalings)\n if scalings is None or isinstance(scalings, dict):\n self._scaler = _ConstantScaler(info, scalings, self.with_std)\n elif scalings == 'mean':\n from sklearn.preprocessing import StandardScaler\n self._scaler = StandardScaler(self.with_mean, self.with_std)\n else: # scalings == 'median':\n if not check_version('sklearn', '0.17'):\n raise ValueError(\"median requires version 0.17 of \"\n \"sklearn library\")\n from sklearn.preprocessing import RobustScaler\n self._scaler = RobustScaler(self.with_mean, self.with_std)\n\n def fit(self, epochs_data, y=None):\n \"\"\"Standardize data across channels.\n\n Parameters\n ----------\n epochs_data : array, shape (n_epochs, n_channels, n_times)\n The data to concatenate channels.\n y : array, shape (n_epochs,)\n The label for each epoch.\n\n Returns\n -------\n self : instance of Scaler\n Returns the modified instance.\n \"\"\"\n _sklearn_reshape_apply(self._scaler.fit, False, epochs_data, y=y)\n return self\n\n def transform(self, epochs_data):\n \"\"\"Standardize data across channels.\n\n Parameters\n ----------\n epochs_data : array, shape (n_epochs, n_channels, n_times)\n The data.\n\n Returns\n -------\n X : array, shape (n_epochs, n_channels, n_times)\n The data concatenated over channels.\n\n Notes\n -----\n This function makes a copy of the data before the operations and the\n memory usage may be large with big data.\n \"\"\"\n return _sklearn_reshape_apply(self._scaler.transform, True,\n epochs_data)\n\n def fit_transform(self, epochs_data, y=None):\n \"\"\"Fit to data, then transform it.\n\n Fits transformer to epochs_data and y and returns a transformed version\n of epochs_data.\n\n Parameters\n ----------\n epochs_data : array, shape (n_epochs, n_channels, n_times)\n The data.\n y : None | array, shape (n_epochs,)\n The label for each epoch.\n Defaults to None.\n\n Returns\n -------\n X : array, shape (n_epochs, n_channels, n_times)\n The data concatenated over channels.\n\n Notes\n -----\n This function makes a copy of the data before the operations and the\n memory usage may be large with big data.\n \"\"\"\n return self.fit(epochs_data, y).transform(epochs_data)\n\n def inverse_transform(self, epochs_data):\n \"\"\"Invert standardization of data across channels.\n\n Parameters\n ----------\n epochs_data : array, shape (n_epochs, n_channels, n_times)\n The data.\n\n Returns\n -------\n X : array, shape (n_epochs, n_channels, n_times)\n The data concatenated over channels.\n\n Notes\n -----\n This function makes a copy of the data before the operations and the\n memory usage may be large with big data.\n \"\"\"\n return _sklearn_reshape_apply(self._scaler.inverse_transform, True,\n epochs_data)\n\n\nclass Vectorizer(TransformerMixin):\n \"\"\"Transform n-dimensional array into 2D array of n_samples by n_features.\n\n This class reshapes an n-dimensional array into an n_samples * n_features\n array, usable by the estimators and transformers of scikit-learn.\n\n Examples\n --------\n clf = make_pipeline(SpatialFilter(), _XdawnTransformer(), Vectorizer(),\n LogisticRegression())\n\n Attributes\n ----------\n ``features_shape_`` : tuple\n Stores the original shape of data.\n \"\"\"\n\n def fit(self, X, y=None):\n \"\"\"Store the shape of the features of X.\n\n Parameters\n ----------\n X : array-like\n The data to fit. Can be, for example a list, or an array of at\n least 2d. The first dimension must be of length n_samples, where\n samples are the independent samples used by the estimator\n (e.g. n_epochs for epoched data).\n y : None | array, shape (n_samples,)\n Used for scikit-learn compatibility.\n\n Returns\n -------\n self : Instance of Vectorizer\n Return the modified instance.\n \"\"\"\n X = np.asarray(X)\n self.features_shape_ = X.shape[1:]\n return self\n\n def transform(self, X):\n \"\"\"Convert given array into two dimensions.\n\n Parameters\n ----------\n X : array-like\n The data to fit. Can be, for example a list, or an array of at\n least 2d. The first dimension must be of length n_samples, where\n samples are the independent samples used by the estimator\n (e.g. n_epochs for epoched data).\n\n Returns\n -------\n X : array, shape (n_samples, n_features)\n The transformed data.\n \"\"\"\n X = np.asarray(X)\n if X.shape[1:] != self.features_shape_:\n raise ValueError(\"Shape of X used in fit and transform must be \"\n \"same\")\n return X.reshape(len(X), -1)\n\n def fit_transform(self, X, y=None):\n \"\"\"Fit the data, then transform in one step.\n\n Parameters\n ----------\n X : array-like\n The data to fit. Can be, for example a list, or an array of at\n least 2d. The first dimension must be of length n_samples, where\n samples are the independent samples used by the estimator\n (e.g. n_epochs for epoched data).\n y : None | array, shape (n_samples,)\n Used for scikit-learn compatibility.\n\n Returns\n -------\n X : array, shape (n_samples, -1)\n The transformed data.\n \"\"\"\n return self.fit(X).transform(X)\n\n def inverse_transform(self, X):\n \"\"\"Transform 2D data back to its original feature shape.\n\n Parameters\n ----------\n X : array-like, shape (n_samples, n_features)\n Data to be transformed back to original shape.\n\n Returns\n -------\n X : array\n The data transformed into shape as used in fit. The first\n dimension is of length n_samples.\n \"\"\"\n X = np.asarray(X)\n if X.ndim != 2:\n raise ValueError(\"X should be of 2 dimensions but given has %s \"\n \"dimension(s)\" % X.ndim)\n return X.reshape((len(X),) + self.features_shape_)\n\n\nclass PSDEstimator(TransformerMixin):\n \"\"\"Compute power spectrum density (PSD) using a multi-taper method.\n\n Parameters\n ----------\n sfreq : float\n The sampling frequency.\n fmin : float\n The lower frequency of interest.\n fmax : float\n The upper frequency of interest.\n bandwidth : float\n The bandwidth of the multi taper windowing function in Hz.\n adaptive : bool\n Use adaptive weights to combine the tapered spectra into PSD\n (slow, use n_jobs >> 1 to speed up computation).\n low_bias : bool\n Only use tapers with more than 90% spectral concentration within\n bandwidth.\n n_jobs : int\n Number of parallel jobs to use (only used if adaptive=True).\n normalization : str\n Either \"full\" or \"length\" (default). If \"full\", the PSD will\n be normalized by the sampling rate as well as the length of\n the signal (as in nitime).\n verbose : bool, str, int, or None\n If not None, override default verbose level (see :func:`mne.verbose`\n and :ref:`Logging documentation <tut_logging>` for more).\n\n See Also\n --------\n mne.time_frequency.psd_multitaper\n \"\"\"\n\n def __init__(self, sfreq=2 * np.pi, fmin=0, fmax=np.inf, bandwidth=None,\n adaptive=False, low_bias=True, n_jobs=1,\n normalization='length', verbose=None): # noqa: D102\n self.sfreq = sfreq\n self.fmin = fmin\n self.fmax = fmax\n self.bandwidth = bandwidth\n self.adaptive = adaptive\n self.low_bias = low_bias\n self.n_jobs = n_jobs\n self.verbose = verbose\n self.normalization = normalization\n\n def fit(self, epochs_data, y):\n \"\"\"Compute power spectrum density (PSD) using a multi-taper method.\n\n Parameters\n ----------\n epochs_data : array, shape (n_epochs, n_channels, n_times)\n The data.\n y : array, shape (n_epochs,)\n The label for each epoch\n\n Returns\n -------\n self : instance of PSDEstimator\n returns the modified instance\n \"\"\"\n if not isinstance(epochs_data, np.ndarray):\n raise ValueError(\"epochs_data should be of type ndarray (got %s).\"\n % type(epochs_data))\n\n return self\n\n def transform(self, epochs_data):\n \"\"\"Compute power spectrum density (PSD) using a multi-taper method.\n\n Parameters\n ----------\n epochs_data : array, shape (n_epochs, n_channels, n_times)\n The data\n\n Returns\n -------\n psd : array, shape (n_signals, len(freqs)) or (len(freqs),)\n The computed PSD.\n \"\"\"\n if not isinstance(epochs_data, np.ndarray):\n raise ValueError(\"epochs_data should be of type ndarray (got %s).\"\n % type(epochs_data))\n psd, _ = psd_array_multitaper(\n epochs_data, sfreq=self.sfreq, fmin=self.fmin, fmax=self.fmax,\n bandwidth=self.bandwidth, adaptive=self.adaptive,\n low_bias=self.low_bias, normalization=self.normalization,\n n_jobs=self.n_jobs)\n return psd\n\n\nclass FilterEstimator(TransformerMixin):\n \"\"\"Estimator to filter RtEpochs.\n\n Applies a zero-phase low-pass, high-pass, band-pass, or band-stop\n filter to the channels selected by \"picks\".\n\n l_freq and h_freq are the frequencies below which and above which,\n respectively, to filter out of the data. Thus the uses are:\n\n - l_freq < h_freq: band-pass filter\n - l_freq > h_freq: band-stop filter\n - l_freq is not None, h_freq is None: low-pass filter\n - l_freq is None, h_freq is not None: high-pass filter\n\n If n_jobs > 1, more memory is required as \"len(picks) * n_times\"\n additional time points need to be temporarily stored in memory.\n\n Parameters\n ----------\n info : instance of Info\n Measurement info.\n l_freq : float | None\n Low cut-off frequency in Hz. If None the data are only low-passed.\n h_freq : float | None\n High cut-off frequency in Hz. If None the data are only\n high-passed.\n picks : array-like of int | None\n Indices of channels to filter. If None only the data (MEG/EEG)\n channels will be filtered.\n filter_length : str (Default: '10s') | int | None\n Length of the filter to use. If None or \"len(x) < filter_length\",\n the filter length used is len(x). Otherwise, if int, overlap-add\n filtering with a filter of the specified length in samples) is\n used (faster for long signals). If str, a human-readable time in\n units of \"s\" or \"ms\" (e.g., \"10s\" or \"5500ms\") will be converted\n to the shortest power-of-two length at least that duration.\n l_trans_bandwidth : float\n Width of the transition band at the low cut-off frequency in Hz.\n h_trans_bandwidth : float\n Width of the transition band at the high cut-off frequency in Hz.\n n_jobs : int | str\n Number of jobs to run in parallel. Can be 'cuda' if scikits.cuda\n is installed properly, CUDA is initialized, and method='fft'.\n method : str\n 'fft' will use overlap-add FIR filtering, 'iir' will use IIR\n forward-backward filtering (via filtfilt).\n iir_params : dict | None\n Dictionary of parameters to use for IIR filtering.\n See mne.filter.construct_iir_filter for details. If iir_params\n is None and method=\"iir\", 4th order Butterworth will be used.\n fir_design : str\n Can be \"firwin\" (default in 0.16) to use\n :func:`scipy.signal.firwin`, or \"firwin2\" (default in 0.15 and\n before) to use :func:`scipy.signal.firwin2`. \"firwin\" uses a\n time-domain design technique that generally gives improved\n attenuation using fewer samples than \"firwin2\".\n\n ..versionadded:: 0.15\n verbose : bool, str, int, or None\n If not None, override default verbose level (see :func:`mne.verbose`\n and :ref:`Logging documentation <tut_logging>` for more). Defaults to\n self.verbose.\n\n See Also\n --------\n TemporalFilter\n \"\"\"\n\n def __init__(self, info, l_freq, h_freq, picks=None, filter_length='auto',\n l_trans_bandwidth='auto', h_trans_bandwidth='auto', n_jobs=1,\n method='fft', iir_params=None, fir_design='firwin',\n verbose=None): # noqa: D102\n self.info = info\n self.l_freq = l_freq\n self.h_freq = h_freq\n self.picks = _check_type_picks(picks)\n self.filter_length = filter_length\n self.l_trans_bandwidth = l_trans_bandwidth\n self.h_trans_bandwidth = h_trans_bandwidth\n self.n_jobs = n_jobs\n self.method = method\n self.iir_params = iir_params\n self.fir_design = fir_design\n\n def fit(self, epochs_data, y):\n \"\"\"Filter data.\n\n Parameters\n ----------\n epochs_data : array, shape (n_epochs, n_channels, n_times)\n The data.\n y : array, shape (n_epochs,)\n The label for each epoch.\n\n Returns\n -------\n self : instance of FilterEstimator\n Returns the modified instance\n \"\"\"\n if not isinstance(epochs_data, np.ndarray):\n raise ValueError(\"epochs_data should be of type ndarray (got %s).\"\n % type(epochs_data))\n\n if self.picks is None:\n self.picks = pick_types(self.info, meg=True, eeg=True,\n ref_meg=False, exclude=[])\n\n if self.l_freq == 0:\n self.l_freq = None\n if self.h_freq is not None and self.h_freq > (self.info['sfreq'] / 2.):\n self.h_freq = None\n if self.l_freq is not None and not isinstance(self.l_freq, float):\n self.l_freq = float(self.l_freq)\n if self.h_freq is not None and not isinstance(self.h_freq, float):\n self.h_freq = float(self.h_freq)\n\n if self.info['lowpass'] is None or (self.h_freq is not None and\n (self.l_freq is None or\n self.l_freq < self.h_freq) and\n self.h_freq <\n self.info['lowpass']):\n self.info['lowpass'] = self.h_freq\n\n if self.info['highpass'] is None or (self.l_freq is not None and\n (self.h_freq is None or\n self.l_freq < self.h_freq) and\n self.l_freq >\n self.info['highpass']):\n self.info['highpass'] = self.l_freq\n\n return self\n\n def transform(self, epochs_data):\n \"\"\"Filter data.\n\n Parameters\n ----------\n epochs_data : array, shape (n_epochs, n_channels, n_times)\n The data.\n\n Returns\n -------\n X : array, shape (n_epochs, n_channels, n_times)\n The data after filtering\n \"\"\"\n if not isinstance(epochs_data, np.ndarray):\n raise ValueError(\"epochs_data should be of type ndarray (got %s).\"\n % type(epochs_data))\n epochs_data = np.atleast_3d(epochs_data)\n return filter_data(\n epochs_data, self.info['sfreq'], self.l_freq, self.h_freq,\n self.picks, self.filter_length, self.l_trans_bandwidth,\n self.h_trans_bandwidth, method=self.method,\n iir_params=self.iir_params, n_jobs=self.n_jobs, copy=False,\n fir_design=self.fir_design, verbose=False)\n\n\nclass UnsupervisedSpatialFilter(TransformerMixin, BaseEstimator):\n \"\"\"Use unsupervised spatial filtering across time and samples.\n\n Parameters\n ----------\n estimator : scikit-learn estimator\n Estimator using some decomposition algorithm.\n average : bool, defaults to False\n If True, the estimator is fitted on the average across samples\n (e.g. epochs).\n \"\"\"\n\n def __init__(self, estimator, average=False): # noqa: D102\n # XXX: Use _check_estimator #3381\n for attr in ('fit', 'transform', 'fit_transform'):\n if not hasattr(estimator, attr):\n raise ValueError('estimator must be a scikit-learn '\n 'transformer, missing %s method' % attr)\n\n if not isinstance(average, bool):\n raise ValueError(\"average parameter must be of bool type, got \"\n \"%s instead\" % type(bool))\n\n self.estimator = estimator\n self.average = average\n\n def fit(self, X, y=None):\n \"\"\"Fit the spatial filters.\n\n Parameters\n ----------\n X : array, shape (n_epochs, n_channels, n_times)\n The data to be filtered.\n y : None | array, shape (n_samples,)\n Used for scikit-learn compatibility.\n\n Returns\n -------\n self : Instance of UnsupervisedSpatialFilter\n Return the modified instance.\n \"\"\"\n if self.average:\n X = np.mean(X, axis=0).T\n else:\n n_epochs, n_channels, n_times = X.shape\n # trial as time samples\n X = np.transpose(X, (1, 0, 2)).reshape((n_channels, n_epochs *\n n_times)).T\n self.estimator.fit(X)\n return self\n\n def fit_transform(self, X, y=None):\n \"\"\"Transform the data to its filtered components after fitting.\n\n Parameters\n ----------\n X : array, shape (n_epochs, n_channels, n_times)\n The data to be filtered.\n y : None | array, shape (n_samples,)\n Used for scikit-learn compatibility.\n\n Returns\n -------\n X : array, shape (n_epochs, n_channels, n_times)\n The transformed data.\n \"\"\"\n return self.fit(X).transform(X)\n\n def transform(self, X):\n \"\"\"Transform the data to its spatial filters.\n\n Parameters\n ----------\n X : array, shape (n_epochs, n_channels, n_times)\n The data to be filtered.\n\n Returns\n -------\n X : array, shape (n_epochs, n_channels, n_times)\n The transformed data.\n \"\"\"\n return self._apply_method(X, 'transform')\n\n def inverse_transform(self, X):\n \"\"\"Inverse transform the data to its original space.\n\n Parameters\n ----------\n X : array, shape (n_epochs, n_components, n_times)\n The data to be inverted.\n\n Returns\n -------\n X : array, shape (n_epochs, n_channels, n_times)\n The transformed data.\n \"\"\"\n return self._apply_method(X, 'inverse_transform')\n\n def _apply_method(self, X, method):\n \"\"\"Vectorize time samples as trials, apply method and reshape back.\n\n Parameters\n ----------\n X : array, shape (n_epochs, n_dims, n_times)\n The data to be inverted.\n\n Returns\n -------\n X : array, shape (n_epochs, n_dims, n_times)\n The transformed data.\n \"\"\"\n n_epochs, n_channels, n_times = X.shape\n # trial as time samples\n X = np.transpose(X, [1, 0, 2])\n X = np.reshape(X, [n_channels, n_epochs * n_times]).T\n # apply method\n method = getattr(self.estimator, method)\n X = method(X)\n # put it back to n_epochs, n_dimensions\n X = np.reshape(X.T, [-1, n_epochs, n_times]).transpose([1, 0, 2])\n return X\n\n\nclass TemporalFilter(TransformerMixin):\n \"\"\"Estimator to filter data array along the last dimension.\n\n Applies a zero-phase low-pass, high-pass, band-pass, or band-stop\n filter to the channels.\n\n l_freq and h_freq are the frequencies below which and above which,\n respectively, to filter out of the data. Thus the uses are:\n\n - l_freq < h_freq: band-pass filter\n - l_freq > h_freq: band-stop filter\n - l_freq is not None, h_freq is None: low-pass filter\n - l_freq is None, h_freq is not None: high-pass filter\n\n See :func:`mne.filter.filter_data`.\n\n Parameters\n ----------\n l_freq : float | None\n Low cut-off frequency in Hz. If None the data are only low-passed.\n h_freq : float | None\n High cut-off frequency in Hz. If None the data are only\n high-passed.\n sfreq : float, defaults to 1.0\n Sampling frequency in Hz.\n filter_length : str | int, defaults to 'auto'\n Length of the FIR filter to use (if applicable):\n\n * int: specified length in samples.\n * 'auto' (default in 0.14): the filter length is chosen based\n on the size of the transition regions (7 times the reciprocal\n of the shortest transition band).\n * str: (default in 0.13 is \"10s\") a human-readable time in\n units of \"s\" or \"ms\" (e.g., \"10s\" or \"5500ms\") will be\n converted to that number of samples if ``phase=\"zero\"``, or\n the shortest power-of-two length at least that duration for\n ``phase=\"zero-double\"``.\n\n l_trans_bandwidth : float | str\n Width of the transition band at the low cut-off frequency in Hz\n (high pass or cutoff 1 in bandpass). Can be \"auto\"\n (default in 0.14) to use a multiple of ``l_freq``::\n\n min(max(l_freq * 0.25, 2), l_freq)\n\n Only used for ``method='fir'``.\n h_trans_bandwidth : float | str\n Width of the transition band at the high cut-off frequency in Hz\n (low pass or cutoff 2 in bandpass). Can be \"auto\"\n (default in 0.14) to use a multiple of ``h_freq``::\n\n min(max(h_freq * 0.25, 2.), info['sfreq'] / 2. - h_freq)\n\n Only used for ``method='fir'``.\n n_jobs : int | str, defaults to 1\n Number of jobs to run in parallel. Can be 'cuda' if scikits.cuda\n is installed properly, CUDA is initialized, and method='fft'.\n method : str, defaults to 'fir'\n 'fir' will use overlap-add FIR filtering, 'iir' will use IIR\n forward-backward filtering (via filtfilt).\n iir_params : dict | None, defaults to None\n Dictionary of parameters to use for IIR filtering.\n See mne.filter.construct_iir_filter for details. If iir_params\n is None and method=\"iir\", 4th order Butterworth will be used.\n fir_window : str, defaults to 'hamming'\n The window to use in FIR design, can be \"hamming\", \"hann\",\n or \"blackman\".\n fir_design : str\n Can be \"firwin\" (default) to use :func:`scipy.signal.firwin`,\n or \"firwin2\" to use :func:`scipy.signal.firwin2`. \"firwin\" uses\n a time-domain design technique that generally gives improved\n attenuation using fewer samples than \"firwin2\".\n\n ..versionadded:: 0.15\n verbose : bool, str, int, or None, defaults to None\n If not None, override default verbose level (see :func:`mne.verbose`\n and :ref:`Logging documentation <tut_logging>` for more). Defaults to\n self.verbose.\n\n See Also\n --------\n FilterEstimator\n Vectorizer\n mne.filter.filter_data\n \"\"\"\n\n def __init__(self, l_freq=None, h_freq=None, sfreq=1.0,\n filter_length='auto', l_trans_bandwidth='auto',\n h_trans_bandwidth='auto', n_jobs=1, method='fir',\n iir_params=None, fir_window='hamming', fir_design='firwin',\n verbose=None): # noqa: D102\n self.l_freq = l_freq\n self.h_freq = h_freq\n self.sfreq = sfreq\n self.filter_length = filter_length\n self.l_trans_bandwidth = l_trans_bandwidth\n self.h_trans_bandwidth = h_trans_bandwidth\n self.n_jobs = n_jobs\n self.method = method\n self.iir_params = iir_params\n self.fir_window = fir_window\n self.fir_design = fir_design\n self.verbose = verbose\n\n if not isinstance(self.n_jobs, int) and self.n_jobs == 'cuda':\n raise ValueError('n_jobs must be int or \"cuda\", got %s instead.'\n % type(self.n_jobs))\n\n def fit(self, X, y=None):\n \"\"\"Do nothing (for scikit-learn compatibility purposes).\n\n Parameters\n ----------\n X : array, shape (n_epochs, n_channels, n_times) or or shape (n_channels, n_times) # noqa\n The data to be filtered over the last dimension. The channels\n dimension can be zero when passing a 2D array.\n y : None\n Not used, for scikit-learn compatibility issues.\n\n Returns\n -------\n self : instance of Filterer\n Returns the modified instance.\n \"\"\"\n return self\n\n def transform(self, X):\n \"\"\"Filter data along the last dimension.\n\n Parameters\n ----------\n X : array, shape (n_epochs, n_channels, n_times) or shape (n_channels, n_times) # noqa\n The data to be filtered over the last dimension. The channels\n dimension can be zero when passing a 2D array.\n\n Returns\n -------\n X : array, shape is same as used in input.\n The data after filtering.\n \"\"\"\n X = np.atleast_2d(X)\n\n if X.ndim > 3:\n raise ValueError(\"Array must be of at max 3 dimensions instead \"\n \"got %s dimensional matrix\" % (X.ndim))\n\n shape = X.shape\n X = X.reshape(-1, shape[-1])\n (X, self.sfreq, self.l_freq, self.h_freq, self.l_trans_bandwidth,\n self.h_trans_bandwidth, self.filter_length, _, self.fir_window,\n self.fir_design) = \\\n _triage_filter_params(X, self.sfreq, self.l_freq, self.h_freq,\n self.l_trans_bandwidth,\n self.h_trans_bandwidth, self.filter_length,\n self.method, phase='zero',\n fir_window=self.fir_window,\n fir_design=self.fir_design)\n X = filter_data(X, self.sfreq, self.l_freq, self.h_freq,\n filter_length=self.filter_length,\n l_trans_bandwidth=self.l_trans_bandwidth,\n h_trans_bandwidth=self.h_trans_bandwidth,\n n_jobs=self.n_jobs, method=self.method,\n iir_params=self.iir_params, copy=False,\n fir_window=self.fir_window, fir_design=self.fir_design,\n verbose=self.verbose)\n return X.reshape(shape)\n"
] |
[
[
"numpy.asarray",
"numpy.reshape",
"sklearn.preprocessing.RobustScaler",
"numpy.atleast_2d",
"numpy.atleast_3d",
"numpy.zeros_like",
"numpy.mean",
"numpy.transpose",
"sklearn.preprocessing.StandardScaler"
]
] |
timothyfisherphd/CRISPR_Cancer_Chromatin_State_Activity
|
[
"91cbd8519baaeccab404574d61e21dbf0ea1f26f"
] |
[
"main02_ceres_data.py"
] |
[
"## Generating Ceres Data\nfrom collections import defaultdict\nimport pandas as pd\n\nmainDicticionary=defaultdict(list)\nstateDictionary=defaultdict(list)\ncountScoreDictionary=defaultdict(int)\nsumScoreDictionary=defaultdict(int)\nmeanScoreDictionary=defaultdict(int)\n\nn = 0\nwith open('/Users/timothyfisher/Desktop/Ernst_Lab/UNIX/Updated_Dataset/ceres.overlapsComparsionValues.tab.bed', 'r') as dictList:\n for line in dictList:\n chromosome, start, end, state, score, strand, signal, end2, color = line.strip().split()\n score = float(score)\n \n stateDictionary[state].append(score)\n \n n += 1\n \nwith open('/Users/timothyfisher/Desktop/Ernst_Lab/UNIX/Updated_Dataset/ceres.overlapsComparsionValues.tab.bed', 'w') as outfile:\n for state in stateDictionary:\n countScoreDictionary[state] = len(stateDictionary[state])\n sumScoreDictionary[state]= sum(stateDictionary[state])\n meanScoreDictionary[state]= sumScoreDictionary[state]/countScoreDictionary[state]\n mainDicticionary[state].append(stateDictionary)\n mainDicticionary[state].append(countScoreDictionary)\n mainDicticionary[state].append(sumScoreDictionary)\n mainDicticionary[state].append(meanScoreDictionary)\n outfile.write(state+','+str(meanScoreDictionary[state])+'\\n')\n \nprint(countScoreDictionary.items())\n\n\nimport numpy as np\nwith open('ceres_std_errs.csv','w') as f:\n for state, l in stateDictionary.items():\n print('{}\\t{}'.format(state,np.std(l)), file=f)\n\n\nimport numpy as np\nwith open('ceres_length.csv','w') as f:\n for state in countScoreDictionary.items():\n print('{}\\t'.format(state), file=f)\n"
] |
[
[
"numpy.std"
]
] |
AnesBenmerzoug/ray
|
[
"5921e87ecd4e359fad60dab55f45855456d591e5"
] |
[
"rllib/agents/trainer.py"
] |
[
"from datetime import datetime\nimport numpy as np\nimport copy\nimport logging\nimport math\nimport os\nimport pickle\nimport time\nimport tempfile\nfrom typing import Callable, Dict, List, Optional, Type, Union\n\nimport ray\nfrom ray.exceptions import RayError\nfrom ray.rllib.agents.callbacks import DefaultCallbacks\nfrom ray.rllib.env.normalize_actions import NormalizeActionWrapper\nfrom ray.rllib.env.env_context import EnvContext\nfrom ray.rllib.models import MODEL_DEFAULTS\nfrom ray.rllib.policy import Policy\nfrom ray.rllib.policy.sample_batch import DEFAULT_POLICY_ID\nfrom ray.rllib.evaluation.metrics import collect_metrics\nfrom ray.rllib.evaluation.worker_set import WorkerSet\nfrom ray.rllib.utils import FilterManager, deep_update, merge_dicts\nfrom ray.rllib.utils.spaces import space_utils\nfrom ray.rllib.utils.framework import try_import_tf, TensorStructType\nfrom ray.rllib.utils.annotations import override, PublicAPI, DeveloperAPI\nfrom ray.rllib.utils.from_config import from_config\nfrom ray.rllib.utils.typing import TrainerConfigDict, \\\n PartialTrainerConfigDict, EnvInfoDict, ResultDict, EnvType, PolicyID\nfrom ray.tune.registry import ENV_CREATOR, register_env, _global_registry\nfrom ray.tune.trainable import Trainable\nfrom ray.tune.trial import ExportFormat\nfrom ray.tune.resources import Resources\nfrom ray.tune.logger import Logger, UnifiedLogger\nfrom ray.tune.result import DEFAULT_RESULTS_DIR\n\ntf1, tf, tfv = try_import_tf()\n\nlogger = logging.getLogger(__name__)\n\n# Max number of times to retry a worker failure. We shouldn't try too many\n# times in a row since that would indicate a persistent cluster issue.\nMAX_WORKER_FAILURE_RETRIES = 3\n\n# yapf: disable\n# __sphinx_doc_begin__\nCOMMON_CONFIG: TrainerConfigDict = {\n # === Settings for Rollout Worker processes ===\n # Number of rollout worker actors to create for parallel sampling. Setting\n # this to 0 will force rollouts to be done in the trainer actor.\n \"num_workers\": 2,\n # Number of environments to evaluate vectorwise per worker. This enables\n # model inference batching, which can improve performance for inference\n # bottlenecked workloads.\n \"num_envs_per_worker\": 1,\n # Divide episodes into fragments of this many steps each during rollouts.\n # Sample batches of this size are collected from rollout workers and\n # combined into a larger batch of `train_batch_size` for learning.\n #\n # For example, given rollout_fragment_length=100 and train_batch_size=1000:\n # 1. RLlib collects 10 fragments of 100 steps each from rollout workers.\n # 2. These fragments are concatenated and we perform an epoch of SGD.\n #\n # When using multiple envs per worker, the fragment size is multiplied by\n # `num_envs_per_worker`. This is since we are collecting steps from\n # multiple envs in parallel. For example, if num_envs_per_worker=5, then\n # rollout workers will return experiences in chunks of 5*100 = 500 steps.\n #\n # The dataflow here can vary per algorithm. For example, PPO further\n # divides the train batch into minibatches for multi-epoch SGD.\n \"rollout_fragment_length\": 200,\n # Whether to rollout \"complete_episodes\" or \"truncate_episodes\" to\n # `rollout_fragment_length` length unrolls. Episode truncation guarantees\n # evenly sized batches, but increases variance as the reward-to-go will\n # need to be estimated at truncation boundaries.\n \"batch_mode\": \"truncate_episodes\",\n\n # === Settings for the Trainer process ===\n # Number of GPUs to allocate to the trainer process. Note that not all\n # algorithms can take advantage of trainer GPUs. This can be fractional\n # (e.g., 0.3 GPUs).\n \"num_gpus\": 0,\n # Training batch size, if applicable. Should be >= rollout_fragment_length.\n # Samples batches will be concatenated together to a batch of this size,\n # which is then passed to SGD.\n \"train_batch_size\": 200,\n # Arguments to pass to the policy model. See models/catalog.py for a full\n # list of the available model options.\n \"model\": MODEL_DEFAULTS,\n # Arguments to pass to the policy optimizer. These vary by optimizer.\n \"optimizer\": {},\n\n # === Environment Settings ===\n # Discount factor of the MDP.\n \"gamma\": 0.99,\n # Number of steps after which the episode is forced to terminate. Defaults\n # to `env.spec.max_episode_steps` (if present) for Gym envs.\n \"horizon\": None,\n # Calculate rewards but don't reset the environment when the horizon is\n # hit. This allows value estimation and RNN state to span across logical\n # episodes denoted by horizon. This only has an effect if horizon != inf.\n \"soft_horizon\": False,\n # Don't set 'done' at the end of the episode. Note that you still need to\n # set this if soft_horizon=True, unless your env is actually running\n # forever without returning done=True.\n \"no_done_at_end\": False,\n # Arguments to pass to the env creator.\n \"env_config\": {},\n # Environment name can also be passed via config.\n \"env\": None,\n # Unsquash actions to the upper and lower bounds of env's action space\n \"normalize_actions\": False,\n # Whether to clip rewards during Policy's postprocessing.\n # None (default): Clip for Atari only (r=sign(r)).\n # True: r=sign(r): Fixed rewards -1.0, 1.0, or 0.0.\n # False: Never clip.\n # [float value]: Clip at -value and + value.\n # Tuple[value1, value2]: Clip at value1 and value2.\n \"clip_rewards\": None,\n # Whether to clip actions to the action space's low/high range spec.\n \"clip_actions\": True,\n # Whether to use \"rllib\" or \"deepmind\" preprocessors by default\n \"preprocessor_pref\": \"deepmind\",\n # The default learning rate.\n \"lr\": 0.0001,\n\n # === Debug Settings ===\n # Whether to write episode stats and videos to the agent log dir. This is\n # typically located in ~/ray_results.\n \"monitor\": False,\n # Set the ray.rllib.* log level for the agent process and its workers.\n # Should be one of DEBUG, INFO, WARN, or ERROR. The DEBUG level will also\n # periodically print out summaries of relevant internal dataflow (this is\n # also printed out once at startup at the INFO level). When using the\n # `rllib train` command, you can also use the `-v` and `-vv` flags as\n # shorthand for INFO and DEBUG.\n \"log_level\": \"WARN\",\n # Callbacks that will be run during various phases of training. See the\n # `DefaultCallbacks` class and `examples/custom_metrics_and_callbacks.py`\n # for more usage information.\n \"callbacks\": DefaultCallbacks,\n # Whether to attempt to continue training if a worker crashes. The number\n # of currently healthy workers is reported as the \"num_healthy_workers\"\n # metric.\n \"ignore_worker_failures\": False,\n # Log system resource metrics to results. This requires `psutil` to be\n # installed for sys stats, and `gputil` for GPU metrics.\n \"log_sys_usage\": True,\n # Use fake (infinite speed) sampler. For testing only.\n \"fake_sampler\": False,\n\n # === Deep Learning Framework Settings ===\n # tf: TensorFlow\n # tfe: TensorFlow eager\n # torch: PyTorch\n \"framework\": \"tf\",\n # Enable tracing in eager mode. This greatly improves performance, but\n # makes it slightly harder to debug since Python code won't be evaluated\n # after the initial eager pass. Only possible if framework=tfe.\n \"eager_tracing\": False,\n\n # === Exploration Settings ===\n # Default exploration behavior, iff `explore`=None is passed into\n # compute_action(s).\n # Set to False for no exploration behavior (e.g., for evaluation).\n \"explore\": True,\n # Provide a dict specifying the Exploration object's config.\n \"exploration_config\": {\n # The Exploration class to use. In the simplest case, this is the name\n # (str) of any class present in the `rllib.utils.exploration` package.\n # You can also provide the python class directly or the full location\n # of your class (e.g. \"ray.rllib.utils.exploration.epsilon_greedy.\n # EpsilonGreedy\").\n \"type\": \"StochasticSampling\",\n # Add constructor kwargs here (if any).\n },\n # === Evaluation Settings ===\n # Evaluate with every `evaluation_interval` training iterations.\n # The evaluation stats will be reported under the \"evaluation\" metric key.\n # Note that evaluation is currently not parallelized, and that for Ape-X\n # metrics are already only reported for the lowest epsilon workers.\n \"evaluation_interval\": None,\n # Number of episodes to run per evaluation period. If using multiple\n # evaluation workers, we will run at least this many episodes total.\n \"evaluation_num_episodes\": 10,\n # Internal flag that is set to True for evaluation workers.\n \"in_evaluation\": False,\n # Typical usage is to pass extra args to evaluation env creator\n # and to disable exploration by computing deterministic actions.\n # IMPORTANT NOTE: Policy gradient algorithms are able to find the optimal\n # policy, even if this is a stochastic one. Setting \"explore=False\" here\n # will result in the evaluation workers not using this optimal policy!\n \"evaluation_config\": {\n # Example: overriding env_config, exploration, etc:\n # \"env_config\": {...},\n # \"explore\": False\n },\n # Number of parallel workers to use for evaluation. Note that this is set\n # to zero by default, which means evaluation will be run in the trainer\n # process. If you increase this, it will increase the Ray resource usage\n # of the trainer since evaluation workers are created separately from\n # rollout workers.\n \"evaluation_num_workers\": 0,\n # Customize the evaluation method. This must be a function of signature\n # (trainer: Trainer, eval_workers: WorkerSet) -> metrics: dict. See the\n # Trainer._evaluate() method to see the default implementation. The\n # trainer guarantees all eval workers have the latest policy state before\n # this function is called.\n \"custom_eval_function\": None,\n\n # === Advanced Rollout Settings ===\n # Use a background thread for sampling (slightly off-policy, usually not\n # advisable to turn on unless your env specifically requires it).\n \"sample_async\": False,\n\n # Experimental flag to speed up sampling and use \"trajectory views\" as\n # generic ModelV2 `input_dicts` that can be requested by the model to\n # contain different information on the ongoing episode.\n # NOTE: Only supported for PyTorch so far.\n \"_use_trajectory_view_api\": False,\n\n # Element-wise observation filter, either \"NoFilter\" or \"MeanStdFilter\".\n \"observation_filter\": \"NoFilter\",\n # Whether to synchronize the statistics of remote filters.\n \"synchronize_filters\": True,\n # Configures TF for single-process operation by default.\n \"tf_session_args\": {\n # note: overriden by `local_tf_session_args`\n \"intra_op_parallelism_threads\": 2,\n \"inter_op_parallelism_threads\": 2,\n \"gpu_options\": {\n \"allow_growth\": True,\n },\n \"log_device_placement\": False,\n \"device_count\": {\n \"CPU\": 1\n },\n \"allow_soft_placement\": True, # required by PPO multi-gpu\n },\n # Override the following tf session args on the local worker\n \"local_tf_session_args\": {\n # Allow a higher level of parallelism by default, but not unlimited\n # since that can cause crashes with many concurrent drivers.\n \"intra_op_parallelism_threads\": 8,\n \"inter_op_parallelism_threads\": 8,\n },\n # Whether to LZ4 compress individual observations\n \"compress_observations\": False,\n # Wait for metric batches for at most this many seconds. Those that\n # have not returned in time will be collected in the next train iteration.\n \"collect_metrics_timeout\": 180,\n # Smooth metrics over this many episodes.\n \"metrics_smoothing_episodes\": 100,\n # If using num_envs_per_worker > 1, whether to create those new envs in\n # remote processes instead of in the same worker. This adds overheads, but\n # can make sense if your envs can take much time to step / reset\n # (e.g., for StarCraft). Use this cautiously; overheads are significant.\n \"remote_worker_envs\": False,\n # Timeout that remote workers are waiting when polling environments.\n # 0 (continue when at least one env is ready) is a reasonable default,\n # but optimal value could be obtained by measuring your environment\n # step / reset and model inference perf.\n \"remote_env_batch_wait_ms\": 0,\n # Minimum time per train iteration (frequency of metrics reporting).\n \"min_iter_time_s\": 0,\n # Minimum env steps to optimize for per train call. This value does\n # not affect learning, only the length of train iterations.\n \"timesteps_per_iteration\": 0,\n # This argument, in conjunction with worker_index, sets the random seed of\n # each worker, so that identically configured trials will have identical\n # results. This makes experiments reproducible.\n \"seed\": None,\n # Any extra python env vars to set in the trainer process, e.g.,\n # {\"OMP_NUM_THREADS\": \"16\"}\n \"extra_python_environs_for_driver\": {},\n # The extra python environments need to set for worker processes.\n \"extra_python_environs_for_worker\": {},\n\n # === Advanced Resource Settings ===\n # Number of CPUs to allocate per worker.\n \"num_cpus_per_worker\": 1,\n # Number of GPUs to allocate per worker. This can be fractional. This is\n # usually needed only if your env itself requires a GPU (i.e., it is a\n # GPU-intensive video game), or model inference is unusually expensive.\n \"num_gpus_per_worker\": 0,\n # Any custom Ray resources to allocate per worker.\n \"custom_resources_per_worker\": {},\n # Number of CPUs to allocate for the trainer. Note: this only takes effect\n # when running in Tune. Otherwise, the trainer runs in the main program.\n \"num_cpus_for_driver\": 1,\n # You can set these memory quotas to tell Ray to reserve memory for your\n # training run. This guarantees predictable execution, but the tradeoff is\n # if your workload exceeeds the memory quota it will fail.\n # Heap memory to reserve for the trainer process (0 for unlimited). This\n # can be large if your are using large train batches, replay buffers, etc.\n \"memory\": 0,\n # Object store memory to reserve for the trainer process. Being large\n # enough to fit a few copies of the model weights should be sufficient.\n # This is enabled by default since models are typically quite small.\n \"object_store_memory\": 0,\n # Heap memory to reserve for each worker. Should generally be small unless\n # your environment is very heavyweight.\n \"memory_per_worker\": 0,\n # Object store memory to reserve for each worker. This only needs to be\n # large enough to fit a few sample batches at a time. This is enabled\n # by default since it almost never needs to be larger than ~200MB.\n \"object_store_memory_per_worker\": 0,\n\n # === Offline Datasets ===\n # Specify how to generate experiences:\n # - \"sampler\": generate experiences via online simulation (default)\n # - a local directory or file glob expression (e.g., \"/tmp/*.json\")\n # - a list of individual file paths/URIs (e.g., [\"/tmp/1.json\",\n # \"s3://bucket/2.json\"])\n # - a dict with string keys and sampling probabilities as values (e.g.,\n # {\"sampler\": 0.4, \"/tmp/*.json\": 0.4, \"s3://bucket/expert.json\": 0.2}).\n # - a function that returns a rllib.offline.InputReader\n \"input\": \"sampler\",\n # Specify how to evaluate the current policy. This only has an effect when\n # reading offline experiences. Available options:\n # - \"wis\": the weighted step-wise importance sampling estimator.\n # - \"is\": the step-wise importance sampling estimator.\n # - \"simulation\": run the environment in the background, but use\n # this data for evaluation only and not for learning.\n \"input_evaluation\": [\"is\", \"wis\"],\n # Whether to run postprocess_trajectory() on the trajectory fragments from\n # offline inputs. Note that postprocessing will be done using the *current*\n # policy, not the *behavior* policy, which is typically undesirable for\n # on-policy algorithms.\n \"postprocess_inputs\": False,\n # If positive, input batches will be shuffled via a sliding window buffer\n # of this number of batches. Use this if the input data is not in random\n # enough order. Input is delayed until the shuffle buffer is filled.\n \"shuffle_buffer_size\": 0,\n # Specify where experiences should be saved:\n # - None: don't save any experiences\n # - \"logdir\" to save to the agent log dir\n # - a path/URI to save to a custom output directory (e.g., \"s3://bucket/\")\n # - a function that returns a rllib.offline.OutputWriter\n \"output\": None,\n # What sample batch columns to LZ4 compress in the output data.\n \"output_compress_columns\": [\"obs\", \"new_obs\"],\n # Max output file size before rolling over to a new file.\n \"output_max_file_size\": 64 * 1024 * 1024,\n\n # === Settings for Multi-Agent Environments ===\n \"multiagent\": {\n # Map of type MultiAgentPolicyConfigDict from policy ids to tuples\n # of (policy_cls, obs_space, act_space, config). This defines the\n # observation and action spaces of the policies and any extra config.\n \"policies\": {},\n # Function mapping agent ids to policy ids.\n \"policy_mapping_fn\": None,\n # Optional list of policies to train, or None for all policies.\n \"policies_to_train\": None,\n # Optional function that can be used to enhance the local agent\n # observations to include more state.\n # See rllib/evaluation/observation_function.py for more info.\n \"observation_fn\": None,\n # When replay_mode=lockstep, RLlib will replay all the agent\n # transitions at a particular timestep together in a batch. This allows\n # the policy to implement differentiable shared computations between\n # agents it controls at that timestep. When replay_mode=independent,\n # transitions are replayed independently per policy.\n \"replay_mode\": \"independent\",\n },\n\n # === Logger ===\n # Define logger-specific configuration to be used inside Logger\n # Default value None allows overwriting with nested dicts\n \"logger_config\": None,\n\n # === Replay Settings ===\n # The number of contiguous environment steps to replay at once. This may\n # be set to greater than 1 to support recurrent models.\n \"replay_sequence_length\": 1,\n}\n# __sphinx_doc_end__\n# yapf: enable\n\n\n@DeveloperAPI\ndef with_common_config(\n extra_config: PartialTrainerConfigDict) -> TrainerConfigDict:\n \"\"\"Returns the given config dict merged with common agent confs.\n\n Args:\n extra_config (PartialTrainerConfigDict): A user defined partial config\n which will get merged with COMMON_CONFIG and returned.\n\n Returns:\n TrainerConfigDict: The merged config dict resulting of COMMON_CONFIG\n plus `extra_config`.\n \"\"\"\n return Trainer.merge_trainer_configs(\n COMMON_CONFIG, extra_config, _allow_unknown_configs=True)\n\n\n@PublicAPI\nclass Trainer(Trainable):\n \"\"\"A trainer coordinates the optimization of one or more RL policies.\n\n All RLlib trainers extend this base class, e.g., the A3CTrainer implements\n the A3C algorithm for single and multi-agent training.\n\n Trainer objects retain internal model state between calls to train(), so\n you should create a new trainer instance for each training session.\n\n Attributes:\n env_creator (func): Function that creates a new training env.\n config (obj): Algorithm-specific configuration data.\n logdir (str): Directory in which training outputs should be placed.\n \"\"\"\n # Whether to allow unknown top-level config keys.\n _allow_unknown_configs = False\n\n # List of top-level keys with value=dict, for which new sub-keys are\n # allowed to be added to the value dict.\n _allow_unknown_subkeys = [\n \"tf_session_args\", \"local_tf_session_args\", \"env_config\", \"model\",\n \"optimizer\", \"multiagent\", \"custom_resources_per_worker\",\n \"evaluation_config\", \"exploration_config\",\n \"extra_python_environs_for_driver\", \"extra_python_environs_for_worker\"\n ]\n\n # List of top level keys with value=dict, for which we always override the\n # entire value (dict), iff the \"type\" key in that value dict changes.\n _override_all_subkeys_if_type_changes = [\"exploration_config\"]\n\n @PublicAPI\n def __init__(self,\n config: TrainerConfigDict = None,\n env: str = None,\n logger_creator: Callable[[], Logger] = None):\n \"\"\"Initialize an RLLib trainer.\n\n Args:\n config (dict): Algorithm-specific configuration data.\n env (str): Name of the environment to use. Note that this can also\n be specified as the `env` key in config.\n logger_creator (func): Function that creates a ray.tune.Logger\n object. If unspecified, a default logger is created.\n \"\"\"\n\n # User provided config (this is w/o the default Trainer's\n # `COMMON_CONFIG` (see above)). Will get merged with COMMON_CONFIG\n # in self.setup().\n config = config or {}\n\n # Vars to synchronize to workers on each train call\n self.global_vars = {\"timestep\": 0}\n\n # Trainers allow env ids to be passed directly to the constructor.\n self._env_id = self._register_if_needed(env or config.get(\"env\"))\n\n # Create a default logger creator if no logger_creator is specified\n if logger_creator is None:\n timestr = datetime.today().strftime(\"%Y-%m-%d_%H-%M-%S\")\n logdir_prefix = \"{}_{}_{}\".format(self._name, self._env_id,\n timestr)\n\n def default_logger_creator(config):\n \"\"\"Creates a Unified logger with a default logdir prefix\n containing the agent name and the env id\n \"\"\"\n if not os.path.exists(DEFAULT_RESULTS_DIR):\n os.makedirs(DEFAULT_RESULTS_DIR)\n logdir = tempfile.mkdtemp(\n prefix=logdir_prefix, dir=DEFAULT_RESULTS_DIR)\n return UnifiedLogger(config, logdir, loggers=None)\n\n logger_creator = default_logger_creator\n\n super().__init__(config, logger_creator)\n\n @classmethod\n @override(Trainable)\n def default_resource_request(\n cls, config: PartialTrainerConfigDict) -> Resources:\n cf = dict(cls._default_config, **config)\n Trainer._validate_config(cf)\n num_workers = cf[\"num_workers\"] + cf[\"evaluation_num_workers\"]\n # TODO(ekl): add custom resources here once tune supports them\n return Resources(\n cpu=cf[\"num_cpus_for_driver\"],\n gpu=cf[\"num_gpus\"],\n memory=cf[\"memory\"],\n object_store_memory=cf[\"object_store_memory\"],\n extra_cpu=cf[\"num_cpus_per_worker\"] * num_workers,\n extra_gpu=cf[\"num_gpus_per_worker\"] * num_workers,\n extra_memory=cf[\"memory_per_worker\"] * num_workers,\n extra_object_store_memory=cf[\"object_store_memory_per_worker\"] *\n num_workers)\n\n @override(Trainable)\n @PublicAPI\n def train(self) -> ResultDict:\n \"\"\"Overrides super.train to synchronize global vars.\"\"\"\n\n result = None\n for _ in range(1 + MAX_WORKER_FAILURE_RETRIES):\n try:\n result = Trainable.train(self)\n except RayError as e:\n if self.config[\"ignore_worker_failures\"]:\n logger.exception(\n \"Error in train call, attempting to recover\")\n self._try_recover()\n else:\n logger.info(\n \"Worker crashed during call to train(). To attempt to \"\n \"continue training without the failed worker, set \"\n \"`'ignore_worker_failures': True`.\")\n raise e\n except Exception as e:\n time.sleep(0.5) # allow logs messages to propagate\n raise e\n else:\n break\n if result is None:\n raise RuntimeError(\"Failed to recover from worker crash\")\n\n if hasattr(self, \"workers\") and isinstance(self.workers, WorkerSet):\n self._sync_filters_if_needed(self.workers)\n\n if self.config[\"evaluation_interval\"] == 1 or (\n self._iteration > 0 and self.config[\"evaluation_interval\"]\n and self._iteration % self.config[\"evaluation_interval\"] == 0):\n evaluation_metrics = self._evaluate()\n assert isinstance(evaluation_metrics, dict), \\\n \"_evaluate() needs to return a dict.\"\n result.update(evaluation_metrics)\n\n return result\n\n def _sync_filters_if_needed(self, workers: WorkerSet):\n if self.config.get(\"observation_filter\", \"NoFilter\") != \"NoFilter\":\n FilterManager.synchronize(\n workers.local_worker().filters,\n workers.remote_workers(),\n update_remote=self.config[\"synchronize_filters\"])\n logger.debug(\"synchronized filters: {}\".format(\n workers.local_worker().filters))\n\n @override(Trainable)\n def log_result(self, result: ResultDict):\n self.callbacks.on_train_result(trainer=self, result=result)\n # log after the callback is invoked, so that the user has a chance\n # to mutate the result\n Trainable.log_result(self, result)\n\n @override(Trainable)\n def setup(self, config: PartialTrainerConfigDict):\n env = self._env_id\n if env:\n config[\"env\"] = env\n # An already registered env.\n if _global_registry.contains(ENV_CREATOR, env):\n self.env_creator = _global_registry.get(ENV_CREATOR, env)\n # A class specifier.\n elif \".\" in env:\n self.env_creator = \\\n lambda env_config: from_config(env, env_config)\n # Try gym.\n else:\n import gym # soft dependency\n self.env_creator = \\\n lambda env_config: gym.make(env, **env_config)\n else:\n self.env_creator = lambda env_config: None\n\n # Merge the supplied config with the class default, but store the\n # user-provided one.\n self.raw_user_config = config\n self.config = Trainer.merge_trainer_configs(self._default_config,\n config)\n\n # Check and resolve DL framework settings.\n # Enable eager/tracing support.\n if tf1 and self.config[\"framework\"] in [\"tf2\", \"tfe\"]:\n if self.config[\"framework\"] == \"tf2\" and tfv < 2:\n raise ValueError(\"`framework`=tf2, but tf-version is < 2.0!\")\n if not tf1.executing_eagerly():\n tf1.enable_eager_execution()\n logger.info(\"Executing eagerly, with eager_tracing={}\".format(\n self.config[\"eager_tracing\"]))\n if tf1 and not tf1.executing_eagerly() and \\\n self.config[\"framework\"] != \"torch\":\n logger.info(\"Tip: set framework=tfe or the --eager flag to enable \"\n \"TensorFlow eager execution\")\n\n if self.config[\"normalize_actions\"]:\n inner = self.env_creator\n\n def normalize(env):\n import gym # soft dependency\n if not isinstance(env, gym.Env):\n raise ValueError(\n \"Cannot apply NormalizeActionActionWrapper to env of \"\n \"type {}, which does not subclass gym.Env.\", type(env))\n return NormalizeActionWrapper(env)\n\n self.env_creator = lambda env_config: normalize(inner(env_config))\n\n Trainer._validate_config(self.config)\n if not callable(self.config[\"callbacks\"]):\n raise ValueError(\n \"`callbacks` must be a callable method that \"\n \"returns a subclass of DefaultCallbacks, got {}\".format(\n self.config[\"callbacks\"]))\n self.callbacks = self.config[\"callbacks\"]()\n log_level = self.config.get(\"log_level\")\n if log_level in [\"WARN\", \"ERROR\"]:\n logger.info(\"Current log_level is {}. For more information, \"\n \"set 'log_level': 'INFO' / 'DEBUG' or use the -v and \"\n \"-vv flags.\".format(log_level))\n if self.config.get(\"log_level\"):\n logging.getLogger(\"ray.rllib\").setLevel(self.config[\"log_level\"])\n\n def get_scope():\n if tf1 and not tf1.executing_eagerly():\n return tf1.Graph().as_default()\n else:\n return open(os.devnull) # fake a no-op scope\n\n with get_scope():\n self._init(self.config, self.env_creator)\n\n # Evaluation setup.\n if self.config.get(\"evaluation_interval\"):\n # Update env_config with evaluation settings:\n extra_config = copy.deepcopy(self.config[\"evaluation_config\"])\n # Assert that user has not unset \"in_evaluation\".\n assert \"in_evaluation\" not in extra_config or \\\n extra_config[\"in_evaluation\"] is True\n extra_config.update({\n \"batch_mode\": \"complete_episodes\",\n \"rollout_fragment_length\": 1,\n \"in_evaluation\": True,\n })\n logger.debug(\n \"using evaluation_config: {}\".format(extra_config))\n\n self.evaluation_workers = self._make_workers(\n self.env_creator,\n self._policy_class,\n merge_dicts(self.config, extra_config),\n num_workers=self.config[\"evaluation_num_workers\"])\n self.evaluation_metrics = {}\n\n @override(Trainable)\n def cleanup(self):\n if hasattr(self, \"workers\"):\n self.workers.stop()\n if hasattr(self, \"optimizer\") and self.optimizer:\n self.optimizer.stop()\n\n @override(Trainable)\n def save_checkpoint(self, checkpoint_dir: str) -> str:\n checkpoint_path = os.path.join(checkpoint_dir,\n \"checkpoint-{}\".format(self.iteration))\n pickle.dump(self.__getstate__(), open(checkpoint_path, \"wb\"))\n\n return checkpoint_path\n\n @override(Trainable)\n def load_checkpoint(self, checkpoint_path: str):\n extra_data = pickle.load(open(checkpoint_path, \"rb\"))\n self.__setstate__(extra_data)\n\n @DeveloperAPI\n def _make_workers(self, env_creator: Callable[[EnvContext], EnvType],\n policy_class: Type[Policy], config: TrainerConfigDict,\n num_workers: int) -> WorkerSet:\n \"\"\"Default factory method for a WorkerSet running under this Trainer.\n\n Override this method by passing a custom `make_workers` into\n `build_trainer`.\n\n Args:\n env_creator (callable): A function that return and Env given an env\n config.\n policy (Type[Policy]): The Policy class to use for creating the\n policies of the workers.\n config (TrainerConfigDict): The Trainer's config.\n num_workers (int): Number of remote rollout workers to create.\n 0 for local only.\n\n Returns:\n WorkerSet: The created WorkerSet.\n \"\"\"\n return WorkerSet(\n env_creator=env_creator,\n policy_class=policy_class,\n trainer_config=config,\n num_workers=num_workers,\n logdir=self.logdir)\n\n @DeveloperAPI\n def _init(self, config: TrainerConfigDict,\n env_creator: Callable[[EnvContext], EnvType]):\n \"\"\"Subclasses should override this for custom initialization.\"\"\"\n raise NotImplementedError\n\n @DeveloperAPI\n def _evaluate(self) -> dict:\n \"\"\"Evaluates current policy under `evaluation_config` settings.\n\n Note that this default implementation does not do anything beyond\n merging evaluation_config with the normal trainer config.\n \"\"\"\n self._before_evaluate()\n\n # Broadcast the new policy weights to all evaluation workers.\n logger.info(\"Synchronizing weights to evaluation workers.\")\n weights = ray.put(self.workers.local_worker().save())\n self.evaluation_workers.foreach_worker(\n lambda w: w.restore(ray.get(weights)))\n self._sync_filters_if_needed(self.evaluation_workers)\n\n if self.config[\"custom_eval_function\"]:\n logger.info(\"Running custom eval function {}\".format(\n self.config[\"custom_eval_function\"]))\n metrics = self.config[\"custom_eval_function\"](\n self, self.evaluation_workers)\n if not metrics or not isinstance(metrics, dict):\n raise ValueError(\"Custom eval function must return \"\n \"dict of metrics, got {}.\".format(metrics))\n else:\n logger.info(\"Evaluating current policy for {} episodes.\".format(\n self.config[\"evaluation_num_episodes\"]))\n if self.config[\"evaluation_num_workers\"] == 0:\n for _ in range(self.config[\"evaluation_num_episodes\"]):\n self.evaluation_workers.local_worker().sample()\n else:\n num_rounds = int(\n math.ceil(self.config[\"evaluation_num_episodes\"] /\n self.config[\"evaluation_num_workers\"]))\n num_workers = len(self.evaluation_workers.remote_workers())\n num_episodes = num_rounds * num_workers\n for i in range(num_rounds):\n logger.info(\"Running round {} of parallel evaluation \"\n \"({}/{} episodes)\".format(\n i, (i + 1) * num_workers, num_episodes))\n ray.get([\n w.sample.remote()\n for w in self.evaluation_workers.remote_workers()\n ])\n\n metrics = collect_metrics(self.evaluation_workers.local_worker(),\n self.evaluation_workers.remote_workers())\n return {\"evaluation\": metrics}\n\n @DeveloperAPI\n def _before_evaluate(self):\n \"\"\"Pre-evaluation callback.\"\"\"\n pass\n\n @PublicAPI\n def compute_action(self,\n observation: TensorStructType,\n state: List[TensorStructType] = None,\n prev_action: TensorStructType = None,\n prev_reward: float = None,\n info: EnvInfoDict = None,\n policy_id: PolicyID = DEFAULT_POLICY_ID,\n full_fetch: bool = False,\n explore: bool = None) -> TensorStructType:\n \"\"\"Computes an action for the specified policy on the local Worker.\n\n Note that you can also access the policy object through\n self.get_policy(policy_id) and call compute_actions() on it directly.\n\n Args:\n observation (TensorStructType): observation from the environment.\n state (List[TensorStructType]): RNN hidden state, if any. If state\n is not None, then all of compute_single_action(...) is returned\n (computed action, rnn state(s), logits dictionary).\n Otherwise compute_single_action(...)[0] is returned\n (computed action).\n prev_action (TensorStructType): Previous action value, if any.\n prev_reward (float): Previous reward, if any.\n info (EnvInfoDict): info object, if any\n policy_id (PolicyID): Policy to query (only applies to\n multi-agent).\n full_fetch (bool): Whether to return extra action fetch results.\n This is always set to True if RNN state is specified.\n explore (bool): Whether to pick an exploitation or exploration\n action (default: None -> use self.config[\"explore\"]).\n\n Returns:\n any: The computed action if full_fetch=False, or\n tuple: The full output of policy.compute_actions() if\n full_fetch=True or we have an RNN-based Policy.\n \"\"\"\n if state is None:\n state = []\n preprocessed = self.workers.local_worker().preprocessors[\n policy_id].transform(observation)\n filtered_obs = self.workers.local_worker().filters[policy_id](\n preprocessed, update=False)\n\n # Figure out the current (sample) time step and pass it into Policy.\n self.global_vars[\"timestep\"] += 1\n\n result = self.get_policy(policy_id).compute_single_action(\n filtered_obs,\n state,\n prev_action,\n prev_reward,\n info,\n clip_actions=self.config[\"clip_actions\"],\n explore=explore,\n timestep=self.global_vars[\"timestep\"])\n\n if state or full_fetch:\n return result\n else:\n return result[0] # backwards compatibility\n\n def compute_actions(self,\n observations,\n state=None,\n prev_action=None,\n prev_reward=None,\n info=None,\n policy_id=DEFAULT_POLICY_ID,\n full_fetch=False,\n explore=None):\n \"\"\"Computes an action for the specified policy on the local Worker.\n\n Note that you can also access the policy object through\n self.get_policy(policy_id) and call compute_actions() on it directly.\n\n Args:\n observation (obj): observation from the environment.\n state (dict): RNN hidden state, if any. If state is not None,\n then all of compute_single_action(...) is returned\n (computed action, rnn state(s), logits dictionary).\n Otherwise compute_single_action(...)[0] is returned\n (computed action).\n prev_action (obj): previous action value, if any\n prev_reward (int): previous reward, if any\n info (dict): info object, if any\n policy_id (str): Policy to query (only applies to multi-agent).\n full_fetch (bool): Whether to return extra action fetch results.\n This is always set to True if RNN state is specified.\n explore (bool): Whether to pick an exploitation or exploration\n action (default: None -> use self.config[\"explore\"]).\n\n Returns:\n any: The computed action if full_fetch=False, or\n tuple: The full output of policy.compute_actions() if\n full_fetch=True or we have an RNN-based Policy.\n \"\"\"\n # Preprocess obs and states\n stateDefined = state is not None\n policy = self.get_policy(policy_id)\n filtered_obs, filtered_state = [], []\n for agent_id, ob in observations.items():\n worker = self.workers.local_worker()\n preprocessed = worker.preprocessors[policy_id].transform(ob)\n filtered = worker.filters[policy_id](preprocessed, update=False)\n filtered_obs.append(filtered)\n if state is None:\n continue\n elif agent_id in state:\n filtered_state.append(state[agent_id])\n else:\n filtered_state.append(policy.get_initial_state())\n\n # Batch obs and states\n obs_batch = np.stack(filtered_obs)\n if state is None:\n state = []\n else:\n state = list(zip(*filtered_state))\n state = [np.stack(s) for s in state]\n\n # Figure out the current (sample) time step and pass it into Policy.\n self.global_vars[\"timestep\"] += 1\n\n # Batch compute actions\n actions, states, infos = policy.compute_actions(\n obs_batch,\n state,\n prev_action,\n prev_reward,\n info,\n clip_actions=self.config[\"clip_actions\"],\n explore=explore,\n timestep=self.global_vars[\"timestep\"])\n\n # Unbatch actions for the environment\n atns, actions = space_utils.unbatch(actions), {}\n for key, atn in zip(observations, atns):\n actions[key] = atn\n\n # Unbatch states into a dict\n unbatched_states = {}\n for idx, agent_id in enumerate(observations):\n unbatched_states[agent_id] = [s[idx] for s in states]\n\n # Return only actions or full tuple\n if stateDefined or full_fetch:\n return actions, unbatched_states, infos\n else:\n return actions\n\n @property\n def _name(self) -> str:\n \"\"\"Subclasses should override this to declare their name.\"\"\"\n raise NotImplementedError\n\n @property\n def _default_config(self) -> TrainerConfigDict:\n \"\"\"Subclasses should override this to declare their default config.\"\"\"\n raise NotImplementedError\n\n @PublicAPI\n def get_policy(self, policy_id: PolicyID = DEFAULT_POLICY_ID) -> Policy:\n \"\"\"Return policy for the specified id, or None.\n\n Args:\n policy_id (str): id of policy to return.\n \"\"\"\n return self.workers.local_worker().get_policy(policy_id)\n\n @PublicAPI\n def get_weights(self, policies: List[PolicyID] = None) -> dict:\n \"\"\"Return a dictionary of policy ids to weights.\n\n Args:\n policies (list): Optional list of policies to return weights for,\n or None for all policies.\n \"\"\"\n return self.workers.local_worker().get_weights(policies)\n\n @PublicAPI\n def set_weights(self, weights: Dict[PolicyID, dict]):\n \"\"\"Set policy weights by policy id.\n\n Args:\n weights (dict): Map of policy ids to weights to set.\n \"\"\"\n self.workers.local_worker().set_weights(weights)\n\n @DeveloperAPI\n def export_policy_model(self,\n export_dir: str,\n policy_id: PolicyID = DEFAULT_POLICY_ID):\n \"\"\"Export policy model with given policy_id to local directory.\n\n Args:\n export_dir (string): Writable local directory.\n policy_id (string): Optional policy id to export.\n\n Example:\n >>> trainer = MyTrainer()\n >>> for _ in range(10):\n >>> trainer.train()\n >>> trainer.export_policy_model(\"/tmp/export_dir\")\n \"\"\"\n self.workers.local_worker().export_policy_model(export_dir, policy_id)\n\n @DeveloperAPI\n def export_policy_checkpoint(self,\n export_dir: str,\n filename_prefix: str = \"model\",\n policy_id: PolicyID = DEFAULT_POLICY_ID):\n \"\"\"Export tensorflow policy model checkpoint to local directory.\n\n Args:\n export_dir (string): Writable local directory.\n filename_prefix (string): file name prefix of checkpoint files.\n policy_id (string): Optional policy id to export.\n\n Example:\n >>> trainer = MyTrainer()\n >>> for _ in range(10):\n >>> trainer.train()\n >>> trainer.export_policy_checkpoint(\"/tmp/export_dir\")\n \"\"\"\n self.workers.local_worker().export_policy_checkpoint(\n export_dir, filename_prefix, policy_id)\n\n @DeveloperAPI\n def import_policy_model_from_h5(self,\n import_file: str,\n policy_id: PolicyID = DEFAULT_POLICY_ID):\n \"\"\"Imports a policy's model with given policy_id from a local h5 file.\n\n Args:\n import_file (str): The h5 file to import from.\n policy_id (string): Optional policy id to import into.\n\n Example:\n >>> trainer = MyTrainer()\n >>> trainer.import_policy_model_from_h5(\"/tmp/weights.h5\")\n >>> for _ in range(10):\n >>> trainer.train()\n \"\"\"\n self.workers.local_worker().import_policy_model_from_h5(\n import_file, policy_id)\n\n @DeveloperAPI\n def collect_metrics(self,\n selected_workers: List[\"ActorHandle\"] = None) -> dict:\n \"\"\"Collects metrics from the remote workers of this agent.\n\n This is the same data as returned by a call to train().\n \"\"\"\n return self.optimizer.collect_metrics(\n self.config[\"collect_metrics_timeout\"],\n min_history=self.config[\"metrics_smoothing_episodes\"],\n selected_workers=selected_workers)\n\n @classmethod\n def resource_help(cls, config: TrainerConfigDict) -> str:\n return (\"\\n\\nYou can adjust the resource requests of RLlib agents by \"\n \"setting `num_workers`, `num_gpus`, and other configs. See \"\n \"the DEFAULT_CONFIG defined by each agent for more info.\\n\\n\"\n \"The config of this agent is: {}\".format(config))\n\n @classmethod\n def merge_trainer_configs(cls,\n config1: TrainerConfigDict,\n config2: PartialTrainerConfigDict,\n _allow_unknown_configs: Optional[bool] = None\n ) -> TrainerConfigDict:\n config1 = copy.deepcopy(config1)\n if \"callbacks\" in config2 and type(config2[\"callbacks\"]) is dict:\n legacy_callbacks_dict = config2[\"callbacks\"]\n\n def make_callbacks():\n # Deprecation warning will be logged by DefaultCallbacks.\n return DefaultCallbacks(\n legacy_callbacks_dict=legacy_callbacks_dict)\n\n config2[\"callbacks\"] = make_callbacks\n if _allow_unknown_configs is None:\n _allow_unknown_configs = cls._allow_unknown_configs\n return deep_update(config1, config2, _allow_unknown_configs,\n cls._allow_unknown_subkeys,\n cls._override_all_subkeys_if_type_changes)\n\n @staticmethod\n def _validate_config(config: PartialTrainerConfigDict):\n if config.get(\"_use_trajectory_view_api\") and \\\n config.get(\"framework\") != \"torch\":\n raise ValueError(\n \"`_use_trajectory_view_api` only supported for PyTorch so \"\n \"far!\")\n elif not config.get(\"_use_trajectory_view_api\") and \\\n config.get(\"model\", {}).get(\"_time_major\"):\n raise ValueError(\"`model._time_major` only supported \"\n \"iff `_use_trajectory_view_api` is True!\")\n\n if type(config[\"input_evaluation\"]) != list:\n raise ValueError(\n \"`input_evaluation` must be a list of strings, got {}\".format(\n config[\"input_evaluation\"]))\n\n def _try_recover(self):\n \"\"\"Try to identify and remove any unhealthy workers.\n\n This method is called after an unexpected remote error is encountered\n from a worker. It issues check requests to all current workers and\n removes any that respond with error. If no healthy workers remain,\n an error is raised.\n \"\"\"\n\n assert hasattr(self, \"execution_plan\")\n workers = self.workers\n\n logger.info(\"Health checking all workers...\")\n checks = []\n for ev in workers.remote_workers():\n _, obj_ref = ev.sample_with_count.remote()\n checks.append(obj_ref)\n\n healthy_workers = []\n for i, obj_ref in enumerate(checks):\n w = workers.remote_workers()[i]\n try:\n ray.get(obj_ref)\n healthy_workers.append(w)\n logger.info(\"Worker {} looks healthy\".format(i + 1))\n except RayError:\n logger.exception(\"Removing unhealthy worker {}\".format(i + 1))\n try:\n w.__ray_terminate__.remote()\n except Exception:\n logger.exception(\"Error terminating unhealthy worker\")\n\n if len(healthy_workers) < 1:\n raise RuntimeError(\n \"Not enough healthy workers remain to continue.\")\n\n logger.warning(\"Recreating execution plan after failure\")\n workers.reset(healthy_workers)\n self.train_exec_impl = self.execution_plan(workers, self.config)\n\n @override(Trainable)\n def _export_model(self, export_formats: List[str],\n export_dir: str) -> Dict[str, str]:\n ExportFormat.validate(export_formats)\n exported = {}\n if ExportFormat.CHECKPOINT in export_formats:\n path = os.path.join(export_dir, ExportFormat.CHECKPOINT)\n self.export_policy_checkpoint(path)\n exported[ExportFormat.CHECKPOINT] = path\n if ExportFormat.MODEL in export_formats:\n path = os.path.join(export_dir, ExportFormat.MODEL)\n self.export_policy_model(path)\n exported[ExportFormat.MODEL] = path\n return exported\n\n def import_model(self, import_file: str):\n \"\"\"Imports a model from import_file.\n\n Note: Currently, only h5 files are supported.\n\n Args:\n import_file (str): The file to import the model from.\n\n Returns:\n A dict that maps ExportFormats to successfully exported models.\n \"\"\"\n # Check for existence.\n if not os.path.exists(import_file):\n raise FileNotFoundError(\n \"`import_file` '{}' does not exist! Can't import Model.\".\n format(import_file))\n # Get the format of the given file.\n import_format = \"h5\" # TODO(sven): Support checkpoint loading.\n\n ExportFormat.validate([import_format])\n if import_format != ExportFormat.H5:\n raise NotImplementedError\n else:\n return self.import_policy_model_from_h5(import_file)\n\n def __getstate__(self) -> dict:\n state = {}\n if hasattr(self, \"workers\"):\n state[\"worker\"] = self.workers.local_worker().save()\n if hasattr(self, \"optimizer\") and hasattr(self.optimizer, \"save\"):\n state[\"optimizer\"] = self.optimizer.save()\n return state\n\n def __setstate__(self, state: dict):\n if \"worker\" in state:\n self.workers.local_worker().restore(state[\"worker\"])\n remote_state = ray.put(state[\"worker\"])\n for r in self.workers.remote_workers():\n r.restore.remote(remote_state)\n if \"optimizer\" in state:\n self.optimizer.restore(state[\"optimizer\"])\n\n @staticmethod\n def with_updates(**overrides) -> Type[\"Trainer\"]:\n raise NotImplementedError(\n \"`with_updates` may only be called on Trainer sub-classes \"\n \"that were generated via the `ray.rllib.agents.trainer_template.\"\n \"build_trainer()` function!\")\n\n def _register_if_needed(self, env_object: Union[str, EnvType]):\n if isinstance(env_object, str):\n return env_object\n elif isinstance(env_object, type):\n name = env_object.__name__\n register_env(name, lambda config: env_object(config))\n return name\n raise ValueError(\n \"{} is an invalid env specification. \".format(env_object) +\n \"You can specify a custom env as either a class \"\n \"(e.g., YourEnvCls) or a registered env id (e.g., \\\"your_env\\\").\")\n"
] |
[
[
"numpy.stack"
]
] |
Archer-pro666/BAAF-Net
|
[
"663d1681d4d05ad3caaacd98e6dedfdc9caa4930"
] |
[
"helper_tf_util.py"
] |
[
"\"\"\" Wrapper functions for TensorFlow layers.\n\nAuthor: Charles R. Qi\nDate: November 2016\n\"\"\"\n\nimport numpy as np\nimport tensorflow as tf\n\n\ndef _variable_on_cpu(name, shape, initializer, use_fp16=False):\n \"\"\"Helper to create a Variable stored on CPU memory.\n Args:\n name: name of the variable\n shape: list of ints\n initializer: initializer for Variable\n Returns:\n Variable Tensor\n \"\"\"\n with tf.device('/cpu:0'):\n dtype = tf.float16 if use_fp16 else tf.float32\n var = tf.get_variable(name, shape, initializer=initializer, dtype=dtype)\n return var\n\n\ndef _variable_with_weight_decay(name, shape, stddev, wd, use_xavier=True):\n \"\"\"Helper to create an initialized Variable with weight decay.\n\n Note that the Variable is initialized with a truncated normal distribution.\n A weight decay is added only if one is specified.\n\n Args:\n name: name of the variable\n shape: list of ints\n stddev: standard deviation of a truncated Gaussian\n wd: add L2Loss weight decay multiplied by this float. If None, weight\n decay is not added for this Variable.\n use_xavier: bool, whether to use xavier initializer\n\n Returns:\n Variable Tensor\n \"\"\"\n if use_xavier:\n initializer = tf.contrib.layers.xavier_initializer()\n var = _variable_on_cpu(name, shape, initializer)\n else:\n # initializer = tf.truncated_normal_initializer(stddev=stddev)\n with tf.device('/cpu:0'):\n var = tf.truncated_normal(shape, stddev=np.sqrt(2 / shape[-1]))\n var = tf.round(var * tf.constant(1000, dtype=tf.float32)) / tf.constant(1000, dtype=tf.float32)\n var = tf.Variable(var, name='weights')\n if wd is not None:\n weight_decay = tf.multiply(tf.nn.l2_loss(var), wd, name='weight_loss')\n tf.add_to_collection('losses', weight_decay)\n return var\n\n\ndef conv1d(inputs,\n num_output_channels,\n kernel_size,\n scope,\n stride=1,\n padding='SAME',\n use_xavier=True,\n stddev=1e-3,\n weight_decay=0.0,\n activation_fn=tf.nn.relu,\n bn=False,\n bn_decay=None,\n is_training=None):\n \"\"\" 1D convolution with non-linear operation.\n\n Args:\n inputs: 3-D tensor variable BxLxC\n num_output_channels: int\n kernel_size: int\n scope: string\n stride: int\n padding: 'SAME' or 'VALID'\n use_xavier: bool, use xavier_initializer if true\n stddev: float, stddev for truncated_normal init\n weight_decay: float\n activation_fn: function\n bn: bool, whether to use batch norm\n bn_decay: float or float tensor variable in [0,1]\n is_training: bool Tensor variable\n\n Returns:\n Variable tensor\n \"\"\"\n with tf.variable_scope(scope) as sc:\n num_in_channels = inputs.get_shape()[-1].value\n kernel_shape = [kernel_size,\n num_in_channels, num_output_channels]\n kernel = _variable_with_weight_decay('weights',\n shape=kernel_shape,\n use_xavier=use_xavier,\n stddev=stddev,\n wd=weight_decay)\n outputs = tf.nn.conv1d(inputs, kernel,\n stride=stride,\n padding=padding)\n biases = _variable_on_cpu('biases', [num_output_channels],\n tf.constant_initializer(0.0))\n outputs = tf.nn.bias_add(outputs, biases)\n\n if bn:\n outputs = batch_norm_for_conv1d(outputs, is_training,\n bn_decay=bn_decay, scope='bn')\n if activation_fn is not None:\n outputs = activation_fn(outputs)\n return outputs\n\n\ndef conv2d(inputs,\n num_output_channels,\n kernel_size,\n scope,\n stride=[1, 1],\n padding='SAME',\n bn=False,\n is_training=None,\n use_xavier=False,\n stddev=1e-3,\n weight_decay=0.0,\n activation_fn=tf.nn.relu,\n bn_decay=None):\n \"\"\" 2D convolution with non-linear operation.\n\n Args:\n inputs: 4-D tensor variable BxHxWxC\n num_output_channels: int\n kernel_size: a list of 2 ints\n scope: string\n stride: a list of 2 ints\n padding: 'SAME' or 'VALID'\n use_xavier: bool, use xavier_initializer if true\n stddev: float, stddev for truncated_normal init\n weight_decay: float\n activation_fn: function\n bn: bool, whether to use batch norm\n bn_decay: float or float tensor variable in [0,1]\n is_training: bool Tensor variable\n\n Returns:\n Variable tensor\n \"\"\"\n with tf.variable_scope(scope) as sc:\n kernel_h, kernel_w = kernel_size\n num_in_channels = inputs.get_shape()[-1].value\n kernel_shape = [kernel_h, kernel_w,\n num_in_channels, num_output_channels]\n kernel = _variable_with_weight_decay('weights',\n shape=kernel_shape,\n use_xavier=use_xavier,\n stddev=stddev,\n wd=weight_decay)\n stride_h, stride_w = stride\n outputs = tf.nn.conv2d(inputs, kernel,\n [1, stride_h, stride_w, 1],\n padding=padding)\n biases = _variable_on_cpu('biases', [num_output_channels],\n tf.constant_initializer(0.0))\n outputs = tf.nn.bias_add(outputs, biases)\n\n if bn:\n outputs = tf.layers.batch_normalization(outputs, momentum=0.99, epsilon=1e-6, training=is_training)\n if activation_fn is not None:\n outputs = tf.nn.leaky_relu(outputs, alpha=0.2)\n return outputs\n\n\ndef conv2d_transpose(inputs,\n num_output_channels,\n kernel_size,\n scope,\n stride=[1, 1],\n padding='SAME',\n use_xavier=False,\n stddev=1e-3,\n weight_decay=0.0,\n activation_fn=tf.nn.relu,\n bn=False,\n bn_decay=None,\n is_training=None):\n \"\"\" 2D convolution transpose with non-linear operation.\n\n Args:\n inputs: 4-D tensor variable BxHxWxC\n num_output_channels: int\n kernel_size: a list of 2 ints\n scope: string\n stride: a list of 2 ints\n padding: 'SAME' or 'VALID'\n use_xavier: bool, use xavier_initializer if true\n stddev: float, stddev for truncated_normal init\n weight_decay: float\n activation_fn: function\n bn: bool, whether to use batch norm\n bn_decay: float or float tensor variable in [0,1]\n is_training: bool Tensor variable\n\n Returns:\n Variable tensor\n\n Note: conv2d(conv2d_transpose(a, num_out, ksize, stride), a.shape[-1], ksize, stride) == a\n \"\"\"\n with tf.variable_scope(scope) as sc:\n kernel_h, kernel_w = kernel_size\n num_in_channels = inputs.get_shape()[-1].value\n kernel_shape = [kernel_h, kernel_w,\n num_output_channels, num_in_channels] # reversed to conv2d\n kernel = _variable_with_weight_decay('weights',\n shape=kernel_shape,\n use_xavier=use_xavier,\n stddev=stddev,\n wd=weight_decay)\n stride_h, stride_w = stride\n\n # from slim.convolution2d_transpose\n def get_deconv_dim(dim_size, stride_size, kernel_size, padding):\n dim_size *= stride_size\n\n if padding == 'VALID' and dim_size is not None:\n dim_size += max(kernel_size - stride_size, 0)\n return dim_size\n\n # caculate output shape\n batch_size = tf.shape(inputs)[0]\n height = tf.shape(inputs)[1]\n width = tf.shape(inputs)[2]\n out_height = get_deconv_dim(height, stride_h, kernel_h, padding)\n out_width = get_deconv_dim(width, stride_w, kernel_w, padding)\n output_shape = tf.stack([batch_size, out_height, out_width, num_output_channels], axis=0)\n\n outputs = tf.nn.conv2d_transpose(inputs, kernel, output_shape,\n [1, stride_h, stride_w, 1],\n padding=padding)\n biases = _variable_on_cpu('biases', [num_output_channels],\n tf.constant_initializer(0.0))\n outputs = tf.nn.bias_add(outputs, biases)\n\n if bn:\n # outputs = batch_norm_for_conv2d(outputs, is_training,\n # bn_decay=bn_decay, scope='bn')\n outputs = tf.layers.batch_normalization(outputs, momentum=0.99, epsilon=1e-6, training=is_training)\n if activation_fn is not None:\n # outputs = activation_fn(outputs)\n outputs = tf.nn.leaky_relu(outputs, alpha=0.2)\n return outputs\n\n\ndef conv3d(inputs,\n num_output_channels,\n kernel_size,\n scope,\n stride=[1, 1, 1],\n padding='SAME',\n use_xavier=True,\n stddev=1e-3,\n weight_decay=0.0,\n activation_fn=tf.nn.relu,\n bn=False,\n bn_decay=None,\n is_training=None):\n \"\"\" 3D convolution with non-linear operation.\n\n Args:\n inputs: 5-D tensor variable BxDxHxWxC\n num_output_channels: int\n kernel_size: a list of 3 ints\n scope: string\n stride: a list of 3 ints\n padding: 'SAME' or 'VALID'\n use_xavier: bool, use xavier_initializer if true\n stddev: float, stddev for truncated_normal init\n weight_decay: float\n activation_fn: function\n bn: bool, whether to use batch norm\n bn_decay: float or float tensor variable in [0,1]\n is_training: bool Tensor variable\n\n Returns:\n Variable tensor\n \"\"\"\n with tf.variable_scope(scope) as sc:\n kernel_d, kernel_h, kernel_w = kernel_size\n num_in_channels = inputs.get_shape()[-1].value\n kernel_shape = [kernel_d, kernel_h, kernel_w,\n num_in_channels, num_output_channels]\n kernel = _variable_with_weight_decay('weights',\n shape=kernel_shape,\n use_xavier=use_xavier,\n stddev=stddev,\n wd=weight_decay)\n stride_d, stride_h, stride_w = stride\n outputs = tf.nn.conv3d(inputs, kernel,\n [1, stride_d, stride_h, stride_w, 1],\n padding=padding)\n biases = _variable_on_cpu('biases', [num_output_channels],\n tf.constant_initializer(0.0))\n outputs = tf.nn.bias_add(outputs, biases)\n\n if bn:\n outputs = batch_norm_for_conv3d(outputs, is_training,\n bn_decay=bn_decay, scope='bn')\n\n if activation_fn is not None:\n outputs = activation_fn(outputs)\n return outputs\n\n\ndef fully_connected(inputs,\n num_outputs,\n scope,\n use_xavier=True,\n stddev=1e-3,\n weight_decay=0.0,\n activation_fn=tf.nn.relu,\n bn=False,\n bn_decay=None,\n is_training=None):\n \"\"\" Fully connected layer with non-linear operation.\n\n Args:\n inputs: 2-D tensor BxN\n num_outputs: int\n\n Returns:\n Variable tensor of size B x num_outputs.\n \"\"\"\n with tf.variable_scope(scope) as sc:\n num_input_units = inputs.get_shape()[-1].value\n weights = _variable_with_weight_decay('weights',\n shape=[num_input_units, num_outputs],\n use_xavier=use_xavier,\n stddev=stddev,\n wd=weight_decay)\n outputs = tf.matmul(inputs, weights)\n biases = _variable_on_cpu('biases', [num_outputs],\n tf.constant_initializer(0.0))\n outputs = tf.nn.bias_add(outputs, biases)\n\n if bn:\n outputs = batch_norm_for_fc(outputs, is_training, bn_decay, 'bn')\n\n if activation_fn is not None:\n # outputs = activation_fn(outputs)\n outputs = tf.nn.leaky_relu(outputs, alpha=0.2)\n return outputs\n\n\ndef max_pool2d(inputs,\n kernel_size,\n scope,\n stride=[2, 2],\n padding='VALID'):\n \"\"\" 2D max pooling.\n\n Args:\n inputs: 4-D tensor BxHxWxC\n kernel_size: a list of 2 ints\n stride: a list of 2 ints\n\n Returns:\n Variable tensor\n \"\"\"\n with tf.variable_scope(scope) as sc:\n kernel_h, kernel_w = kernel_size\n stride_h, stride_w = stride\n outputs = tf.nn.max_pool(inputs,\n ksize=[1, kernel_h, kernel_w, 1],\n strides=[1, stride_h, stride_w, 1],\n padding=padding,\n name=sc.name)\n return outputs\n\n\ndef avg_pool2d(inputs,\n kernel_size,\n scope,\n stride=[2, 2],\n padding='VALID'):\n \"\"\" 2D avg pooling.\n\n Args:\n inputs: 4-D tensor BxHxWxC\n kernel_size: a list of 2 ints\n stride: a list of 2 ints\n\n Returns:\n Variable tensor\n \"\"\"\n with tf.variable_scope(scope) as sc:\n kernel_h, kernel_w = kernel_size\n stride_h, stride_w = stride\n outputs = tf.nn.avg_pool(inputs,\n ksize=[1, kernel_h, kernel_w, 1],\n strides=[1, stride_h, stride_w, 1],\n padding=padding,\n name=sc.name)\n return outputs\n\n\ndef max_pool3d(inputs,\n kernel_size,\n scope,\n stride=[2, 2, 2],\n padding='VALID'):\n \"\"\" 3D max pooling.\n\n Args:\n inputs: 5-D tensor BxDxHxWxC\n kernel_size: a list of 3 ints\n stride: a list of 3 ints\n\n Returns:\n Variable tensor\n \"\"\"\n with tf.variable_scope(scope) as sc:\n kernel_d, kernel_h, kernel_w = kernel_size\n stride_d, stride_h, stride_w = stride\n outputs = tf.nn.max_pool3d(inputs,\n ksize=[1, kernel_d, kernel_h, kernel_w, 1],\n strides=[1, stride_d, stride_h, stride_w, 1],\n padding=padding,\n name=sc.name)\n return outputs\n\n\ndef avg_pool3d(inputs,\n kernel_size,\n scope,\n stride=[2, 2, 2],\n padding='VALID'):\n \"\"\" 3D avg pooling.\n\n Args:\n inputs: 5-D tensor BxDxHxWxC\n kernel_size: a list of 3 ints\n stride: a list of 3 ints\n\n Returns:\n Variable tensor\n \"\"\"\n with tf.variable_scope(scope) as sc:\n kernel_d, kernel_h, kernel_w = kernel_size\n stride_d, stride_h, stride_w = stride\n outputs = tf.nn.avg_pool3d(inputs,\n ksize=[1, kernel_d, kernel_h, kernel_w, 1],\n strides=[1, stride_d, stride_h, stride_w, 1],\n padding=padding,\n name=sc.name)\n return outputs\n\n\ndef batch_norm_template(inputs, is_training, scope, moments_dims, bn_decay):\n \"\"\" Batch normalization on convolutional maps and beyond...\n Ref.: http://stackoverflow.com/questions/33949786/how-could-i-use-batch-normalization-in-tensorflow\n\n Args:\n inputs: Tensor, k-D input ... x C could be BC or BHWC or BDHWC\n is_training: boolean tf.Varialbe, true indicates training phase\n scope: string, variable scope\n moments_dims: a list of ints, indicating dimensions for moments calculation\n bn_decay: float or float tensor variable, controling moving average weight\n Return:\n normed: batch-normalized maps\n \"\"\"\n with tf.variable_scope(scope) as sc:\n num_channels = inputs.get_shape()[-1].value\n beta = tf.Variable(tf.constant(0.0, shape=[num_channels]),\n name='beta', trainable=True)\n gamma = tf.Variable(tf.constant(1.0, shape=[num_channels]),\n name='gamma', trainable=True)\n batch_mean, batch_var = tf.nn.moments(inputs, moments_dims, name='moments')\n decay = bn_decay if bn_decay is not None else 0.9\n ema = tf.train.ExponentialMovingAverage(decay=decay)\n # Operator that maintains moving averages of variables.\n ema_apply_op = tf.cond(is_training,\n lambda: ema.apply([batch_mean, batch_var]),\n lambda: tf.no_op())\n\n # Update moving average and return current batch's avg and var.\n def mean_var_with_update():\n with tf.control_dependencies([ema_apply_op]):\n return tf.identity(batch_mean), tf.identity(batch_var)\n\n # ema.average returns the Variable holding the average of var.\n mean, var = tf.cond(is_training,\n mean_var_with_update,\n lambda: (ema.average(batch_mean), ema.average(batch_var)))\n normed = tf.nn.batch_normalization(inputs, mean, var, beta, gamma, 1e-3)\n return normed\n\n\ndef batch_norm_for_fc(inputs, is_training, bn_decay, scope):\n \"\"\" Batch normalization on FC data.\n\n Args:\n inputs: Tensor, 2D BxC input\n is_training: boolean tf.Varialbe, true indicates training phase\n bn_decay: float or float tensor variable, controling moving average weight\n scope: string, variable scope\n Return:\n normed: batch-normalized maps\n \"\"\"\n return batch_norm_template(inputs, is_training, scope, [0, ], bn_decay)\n\n\ndef batch_norm_for_conv1d(inputs, is_training, bn_decay, scope):\n \"\"\" Batch normalization on 1D convolutional maps.\n\n Args:\n inputs: Tensor, 3D BLC input maps\n is_training: boolean tf.Varialbe, true indicates training phase\n bn_decay: float or float tensor variable, controling moving average weight\n scope: string, variable scope\n Return:\n normed: batch-normalized maps\n \"\"\"\n return batch_norm_template(inputs, is_training, scope, [0, 1], bn_decay)\n\n\ndef batch_norm_for_conv2d(inputs, is_training, bn_decay, scope):\n \"\"\" Batch normalization on 2D convolutional maps.\n\n Args:\n inputs: Tensor, 4D BHWC input maps\n is_training: boolean tf.Varialbe, true indicates training phase\n bn_decay: float or float tensor variable, controling moving average weight\n scope: string, variable scope\n Return:\n normed: batch-normalized maps\n \"\"\"\n return batch_norm_template(inputs, is_training, scope, [0, 1, 2], bn_decay)\n\n\ndef batch_norm_for_conv3d(inputs, is_training, bn_decay, scope):\n \"\"\" Batch normalization on 3D convolutional maps.\n\n Args:\n inputs: Tensor, 5D BDHWC input maps\n is_training: boolean tf.Varialbe, true indicates training phase\n bn_decay: float or float tensor variable, controling moving average weight\n scope: string, variable scope\n Return:\n normed: batch-normalized maps\n \"\"\"\n return batch_norm_template(inputs, is_training, scope, [0, 1, 2, 3], bn_decay)\n\n\ndef dropout(inputs,\n is_training,\n scope,\n keep_prob=0.5,\n noise_shape=None):\n \"\"\" Dropout layer.\n\n Args:\n inputs: tensor\n is_training: boolean tf.Variable\n scope: string\n keep_prob: float in [0,1]\n noise_shape: list of ints\n\n Returns:\n tensor variable\n \"\"\"\n with tf.variable_scope(scope) as sc:\n outputs = tf.cond(is_training,\n lambda: tf.nn.dropout(inputs, keep_prob, noise_shape),\n lambda: inputs)\n return outputs\n"
] |
[
[
"tensorflow.device",
"tensorflow.get_variable",
"numpy.sqrt",
"tensorflow.control_dependencies",
"tensorflow.nn.max_pool",
"tensorflow.stack",
"tensorflow.nn.conv2d_transpose",
"tensorflow.train.ExponentialMovingAverage",
"tensorflow.nn.l2_loss",
"tensorflow.nn.conv1d",
"tensorflow.nn.conv2d",
"tensorflow.layers.batch_normalization",
"tensorflow.Variable",
"tensorflow.nn.moments",
"tensorflow.contrib.layers.xavier_initializer",
"tensorflow.nn.dropout",
"tensorflow.matmul",
"tensorflow.nn.batch_normalization",
"tensorflow.shape",
"tensorflow.identity",
"tensorflow.nn.conv3d",
"tensorflow.nn.avg_pool",
"tensorflow.no_op",
"tensorflow.nn.avg_pool3d",
"tensorflow.add_to_collection",
"tensorflow.nn.leaky_relu",
"tensorflow.nn.bias_add",
"tensorflow.nn.max_pool3d",
"tensorflow.constant",
"tensorflow.constant_initializer",
"tensorflow.variable_scope"
]
] |
AdrienCorenflos/tensorflow
|
[
"1b5220e89fecca70375b372a5bddc7f961c6a736"
] |
[
"tensorflow/python/data/util/nest_test.py"
] |
[
"# Copyright 2017 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Tests for utilities working with arbitrarily nested structures.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport collections\n\nimport attr\nimport numpy as np\n\nfrom tensorflow.python.data.util import nest\nfrom tensorflow.python.framework import constant_op\nfrom tensorflow.python.framework import sparse_tensor\nfrom tensorflow.python.ops import array_ops\nfrom tensorflow.python.ops import math_ops\nfrom tensorflow.python.ops.ragged import ragged_factory_ops\nfrom tensorflow.python.platform import test\n\n\nclass NestTest(test.TestCase):\n\n def testFlattenAndPack(self):\n structure = ((3, 4), 5, (6, 7, (9, 10), 8))\n flat = [\"a\", \"b\", \"c\", \"d\", \"e\", \"f\", \"g\", \"h\"]\n self.assertEqual(nest.flatten(structure), [3, 4, 5, 6, 7, 9, 10, 8])\n self.assertEqual(\n nest.pack_sequence_as(structure, flat), ((\"a\", \"b\"), \"c\",\n (\"d\", \"e\", (\"f\", \"g\"), \"h\")))\n point = collections.namedtuple(\"Point\", [\"x\", \"y\"])\n structure = (point(x=4, y=2), ((point(x=1, y=0),),))\n flat = [4, 2, 1, 0]\n self.assertEqual(nest.flatten(structure), flat)\n restructured_from_flat = nest.pack_sequence_as(structure, flat)\n self.assertEqual(restructured_from_flat, structure)\n self.assertEqual(restructured_from_flat[0].x, 4)\n self.assertEqual(restructured_from_flat[0].y, 2)\n self.assertEqual(restructured_from_flat[1][0][0].x, 1)\n self.assertEqual(restructured_from_flat[1][0][0].y, 0)\n\n @attr.s\n class PointAttr:\n x = attr.ib()\n y = attr.ib()\n\n structure = (PointAttr(x=4, y=2), ((PointAttr(x=1, y=0),),))\n flat = [4, 2, 1, 0]\n self.assertEqual(nest.flatten(structure), flat)\n restructured_from_flat = nest.pack_sequence_as(structure, flat)\n self.assertEqual(restructured_from_flat, structure)\n self.assertEqual(restructured_from_flat[0].x, 4)\n self.assertEqual(restructured_from_flat[0].y, 2)\n self.assertEqual(restructured_from_flat[1][0][0].x, 1)\n self.assertEqual(restructured_from_flat[1][0][0].y, 0)\n\n self.assertEqual([5], nest.flatten(5))\n self.assertEqual([np.array([5])], nest.flatten(np.array([5])))\n\n self.assertEqual(\"a\", nest.pack_sequence_as(5, [\"a\"]))\n self.assertEqual(\n np.array([5]), nest.pack_sequence_as(\"scalar\", [np.array([5])]))\n\n with self.assertRaisesRegexp(ValueError, \"Structure is a scalar\"):\n nest.pack_sequence_as(\"scalar\", [4, 5])\n\n with self.assertRaisesRegexp(TypeError, \"flat_sequence\"):\n nest.pack_sequence_as([4, 5], \"bad_sequence\")\n\n with self.assertRaises(ValueError):\n nest.pack_sequence_as([5, 6, [7, 8]], [\"a\", \"b\", \"c\"])\n\n def testFlattenDictOrder(self):\n \"\"\"`flatten` orders dicts by key, including OrderedDicts.\"\"\"\n ordered = collections.OrderedDict([(\"d\", 3), (\"b\", 1), (\"a\", 0), (\"c\", 2)])\n plain = {\"d\": 3, \"b\": 1, \"a\": 0, \"c\": 2}\n ordered_flat = nest.flatten(ordered)\n plain_flat = nest.flatten(plain)\n self.assertEqual([0, 1, 2, 3], ordered_flat)\n self.assertEqual([0, 1, 2, 3], plain_flat)\n\n def testPackDictOrder(self):\n \"\"\"Packing orders dicts by key, including OrderedDicts.\"\"\"\n ordered = collections.OrderedDict([(\"d\", 0), (\"b\", 0), (\"a\", 0), (\"c\", 0)])\n plain = {\"d\": 0, \"b\": 0, \"a\": 0, \"c\": 0}\n seq = [0, 1, 2, 3]\n ordered_reconstruction = nest.pack_sequence_as(ordered, seq)\n plain_reconstruction = nest.pack_sequence_as(plain, seq)\n self.assertEqual(\n collections.OrderedDict([(\"d\", 3), (\"b\", 1), (\"a\", 0), (\"c\", 2)]),\n ordered_reconstruction)\n self.assertEqual({\"d\": 3, \"b\": 1, \"a\": 0, \"c\": 2}, plain_reconstruction)\n\n def testFlattenAndPackWithDicts(self):\n # A nice messy mix of tuples, lists, dicts, and `OrderedDict`s.\n named_tuple = collections.namedtuple(\"A\", (\"b\", \"c\"))\n mess = (\n \"z\",\n named_tuple(3, 4),\n {\n \"c\": (\n 1,\n collections.OrderedDict([\n (\"b\", 3),\n (\"a\", 2),\n ]),\n ),\n \"b\": 5\n },\n 17\n )\n\n flattened = nest.flatten(mess)\n self.assertEqual(flattened, [\"z\", 3, 4, 5, 1, 2, 3, 17])\n\n structure_of_mess = (\n 14,\n named_tuple(\"a\", True),\n {\n \"c\": (\n 0,\n collections.OrderedDict([\n (\"b\", 9),\n (\"a\", 8),\n ]),\n ),\n \"b\": 3\n },\n \"hi everybody\",\n )\n\n unflattened = nest.pack_sequence_as(structure_of_mess, flattened)\n self.assertEqual(unflattened, mess)\n\n # Check also that the OrderedDict was created, with the correct key order.\n unflattened_ordered_dict = unflattened[2][\"c\"][1]\n self.assertIsInstance(unflattened_ordered_dict, collections.OrderedDict)\n self.assertEqual(list(unflattened_ordered_dict.keys()), [\"b\", \"a\"])\n\n def testFlattenSparseValue(self):\n st = sparse_tensor.SparseTensorValue([[0]], [0], [1])\n single_value = st\n list_of_values = [st, st, st]\n nest_of_values = ((st), ((st), (st)))\n dict_of_values = {\"foo\": st, \"bar\": st, \"baz\": st}\n self.assertEqual([st], nest.flatten(single_value))\n self.assertEqual([[st, st, st]], nest.flatten(list_of_values))\n self.assertEqual([st, st, st], nest.flatten(nest_of_values))\n self.assertEqual([st, st, st], nest.flatten(dict_of_values))\n\n def testFlattenRaggedValue(self):\n rt = ragged_factory_ops.constant_value([[[0]], [[1]]])\n single_value = rt\n list_of_values = [rt, rt, rt]\n nest_of_values = ((rt), ((rt), (rt)))\n dict_of_values = {\"foo\": rt, \"bar\": rt, \"baz\": rt}\n self.assertEqual([rt], nest.flatten(single_value))\n self.assertEqual([[rt, rt, rt]], nest.flatten(list_of_values))\n self.assertEqual([rt, rt, rt], nest.flatten(nest_of_values))\n self.assertEqual([rt, rt, rt], nest.flatten(dict_of_values))\n\n def testIsSequence(self):\n self.assertFalse(nest.is_sequence(\"1234\"))\n self.assertFalse(nest.is_sequence([1, 3, [4, 5]]))\n self.assertTrue(nest.is_sequence(((7, 8), (5, 6))))\n self.assertFalse(nest.is_sequence([]))\n self.assertFalse(nest.is_sequence(set([1, 2])))\n ones = array_ops.ones([2, 3])\n self.assertFalse(nest.is_sequence(ones))\n self.assertFalse(nest.is_sequence(math_ops.tanh(ones)))\n self.assertFalse(nest.is_sequence(np.ones((4, 5))))\n self.assertTrue(nest.is_sequence({\"foo\": 1, \"bar\": 2}))\n self.assertFalse(\n nest.is_sequence(sparse_tensor.SparseTensorValue([[0]], [0], [1])))\n self.assertFalse(\n nest.is_sequence(ragged_factory_ops.constant_value([[[0]], [[1]]])))\n\n def testAssertSameStructure(self):\n structure1 = (((1, 2), 3), 4, (5, 6))\n structure2 = (((\"foo1\", \"foo2\"), \"foo3\"), \"foo4\", (\"foo5\", \"foo6\"))\n structure_different_num_elements = (\"spam\", \"eggs\")\n structure_different_nesting = (((1, 2), 3), 4, 5, (6,))\n structure_dictionary = {\"foo\": 2, \"bar\": 4, \"baz\": {\"foo\": 5, \"bar\": 6}}\n structure_dictionary_diff_nested = {\n \"foo\": 2,\n \"bar\": 4,\n \"baz\": {\n \"foo\": 5,\n \"baz\": 6\n }\n }\n nest.assert_same_structure(structure1, structure2)\n nest.assert_same_structure(\"abc\", 1.0)\n nest.assert_same_structure(\"abc\", np.array([0, 1]))\n nest.assert_same_structure(\"abc\", constant_op.constant([0, 1]))\n\n with self.assertRaisesRegexp(ValueError,\n \"don't have the same nested structure\"):\n nest.assert_same_structure(structure1, structure_different_num_elements)\n\n with self.assertRaisesRegexp(ValueError,\n \"don't have the same nested structure\"):\n nest.assert_same_structure((0, 1), np.array([0, 1]))\n\n with self.assertRaisesRegexp(ValueError,\n \"don't have the same nested structure\"):\n nest.assert_same_structure(0, (0, 1))\n\n with self.assertRaisesRegexp(ValueError,\n \"don't have the same nested structure\"):\n nest.assert_same_structure(structure1, structure_different_nesting)\n\n named_type_0 = collections.namedtuple(\"named_0\", (\"a\", \"b\"))\n named_type_1 = collections.namedtuple(\"named_1\", (\"a\", \"b\"))\n self.assertRaises(TypeError, nest.assert_same_structure, (0, 1),\n named_type_0(\"a\", \"b\"))\n\n nest.assert_same_structure(named_type_0(3, 4), named_type_0(\"a\", \"b\"))\n\n self.assertRaises(TypeError, nest.assert_same_structure,\n named_type_0(3, 4), named_type_1(3, 4))\n\n with self.assertRaisesRegexp(ValueError,\n \"don't have the same nested structure\"):\n nest.assert_same_structure(named_type_0(3, 4), named_type_0((3,), 4))\n\n with self.assertRaisesRegexp(ValueError,\n \"don't have the same nested structure\"):\n nest.assert_same_structure(((3,), 4), (3, (4,)))\n\n structure1_list = {\"a\": ((1, 2), 3), \"b\": 4, \"c\": (5, 6)}\n structure2_list = {\"a\": ((1, 2), 3), \"b\": 4, \"d\": (5, 6)}\n with self.assertRaisesRegexp(TypeError,\n \"don't have the same sequence type\"):\n nest.assert_same_structure(structure1, structure1_list)\n nest.assert_same_structure(structure1, structure2, check_types=False)\n nest.assert_same_structure(structure1, structure1_list, check_types=False)\n with self.assertRaisesRegexp(ValueError, \"don't have the same set of keys\"):\n nest.assert_same_structure(structure1_list, structure2_list)\n with self.assertRaisesRegexp(ValueError, \"don't have the same set of keys\"):\n nest.assert_same_structure(structure_dictionary,\n structure_dictionary_diff_nested)\n nest.assert_same_structure(\n structure_dictionary,\n structure_dictionary_diff_nested,\n check_types=False)\n nest.assert_same_structure(\n structure1_list, structure2_list, check_types=False)\n\n def testMapStructure(self):\n structure1 = (((1, 2), 3), 4, (5, 6))\n structure2 = (((7, 8), 9), 10, (11, 12))\n structure1_plus1 = nest.map_structure(lambda x: x + 1, structure1)\n nest.assert_same_structure(structure1, structure1_plus1)\n self.assertAllEqual(\n [2, 3, 4, 5, 6, 7],\n nest.flatten(structure1_plus1))\n structure1_plus_structure2 = nest.map_structure(\n lambda x, y: x + y, structure1, structure2)\n self.assertEqual(\n (((1 + 7, 2 + 8), 3 + 9), 4 + 10, (5 + 11, 6 + 12)),\n structure1_plus_structure2)\n\n self.assertEqual(3, nest.map_structure(lambda x: x - 1, 4))\n\n self.assertEqual(7, nest.map_structure(lambda x, y: x + y, 3, 4))\n\n with self.assertRaisesRegexp(TypeError, \"callable\"):\n nest.map_structure(\"bad\", structure1_plus1)\n\n with self.assertRaisesRegexp(ValueError, \"same nested structure\"):\n nest.map_structure(lambda x, y: None, 3, (3,))\n\n with self.assertRaisesRegexp(TypeError, \"same sequence type\"):\n nest.map_structure(lambda x, y: None, ((3, 4), 5), {\"a\": (3, 4), \"b\": 5})\n\n with self.assertRaisesRegexp(ValueError, \"same nested structure\"):\n nest.map_structure(lambda x, y: None, ((3, 4), 5), (3, (4, 5)))\n\n with self.assertRaisesRegexp(ValueError, \"same nested structure\"):\n nest.map_structure(lambda x, y: None, ((3, 4), 5), (3, (4, 5)),\n check_types=False)\n\n with self.assertRaisesRegexp(ValueError, \"Only valid keyword argument\"):\n nest.map_structure(lambda x: None, structure1, foo=\"a\")\n\n with self.assertRaisesRegexp(ValueError, \"Only valid keyword argument\"):\n nest.map_structure(lambda x: None, structure1, check_types=False, foo=\"a\")\n\n def testAssertShallowStructure(self):\n inp_ab = (\"a\", \"b\")\n inp_abc = (\"a\", \"b\", \"c\")\n expected_message = (\n \"The two structures don't have the same sequence length. Input \"\n \"structure has length 2, while shallow structure has length 3.\")\n with self.assertRaisesRegexp(ValueError, expected_message):\n nest.assert_shallow_structure(inp_abc, inp_ab)\n\n inp_ab1 = ((1, 1), (2, 2))\n inp_ab2 = {\"a\": (1, 1), \"b\": (2, 2)}\n expected_message = (\n \"The two structures don't have the same sequence type. Input structure \"\n \"has type <(type|class) 'tuple'>, while shallow structure has type \"\n \"<(type|class) 'dict'>.\")\n with self.assertRaisesRegexp(TypeError, expected_message):\n nest.assert_shallow_structure(inp_ab2, inp_ab1)\n nest.assert_shallow_structure(inp_ab2, inp_ab1, check_types=False)\n\n inp_ab1 = {\"a\": (1, 1), \"b\": {\"c\": (2, 2)}}\n inp_ab2 = {\"a\": (1, 1), \"b\": {\"d\": (2, 2)}}\n expected_message = (\n r\"The two structures don't have the same keys. Input \"\n r\"structure has keys \\['c'\\], while shallow structure has \"\n r\"keys \\['d'\\].\")\n with self.assertRaisesRegexp(ValueError, expected_message):\n nest.assert_shallow_structure(inp_ab2, inp_ab1)\n\n inp_ab = collections.OrderedDict([(\"a\", 1), (\"b\", (2, 3))])\n inp_ba = collections.OrderedDict([(\"b\", (2, 3)), (\"a\", 1)])\n nest.assert_shallow_structure(inp_ab, inp_ba)\n\n def testFlattenUpTo(self):\n input_tree = (((2, 2), (3, 3)), ((4, 9), (5, 5)))\n shallow_tree = ((True, True), (False, True))\n flattened_input_tree = nest.flatten_up_to(shallow_tree, input_tree)\n flattened_shallow_tree = nest.flatten_up_to(shallow_tree, shallow_tree)\n self.assertEqual(flattened_input_tree, [(2, 2), (3, 3), (4, 9), (5, 5)])\n self.assertEqual(flattened_shallow_tree, [True, True, False, True])\n\n input_tree = (((\"a\", 1), ((\"b\", 2), ((\"c\", 3), ((\"d\", 4))))))\n shallow_tree = ((\"level_1\", (\"level_2\", (\"level_3\", (\"level_4\")))))\n input_tree_flattened_as_shallow_tree = nest.flatten_up_to(shallow_tree,\n input_tree)\n input_tree_flattened = nest.flatten(input_tree)\n self.assertEqual(input_tree_flattened_as_shallow_tree,\n [(\"a\", 1), (\"b\", 2), (\"c\", 3), (\"d\", 4)])\n self.assertEqual(input_tree_flattened, [\"a\", 1, \"b\", 2, \"c\", 3, \"d\", 4])\n\n ## Shallow non-list edge-case.\n # Using iterable elements.\n input_tree = [\"input_tree\"]\n shallow_tree = \"shallow_tree\"\n flattened_input_tree = nest.flatten_up_to(shallow_tree, input_tree)\n flattened_shallow_tree = nest.flatten_up_to(shallow_tree, shallow_tree)\n self.assertEqual(flattened_input_tree, [input_tree])\n self.assertEqual(flattened_shallow_tree, [shallow_tree])\n\n input_tree = (\"input_tree_0\", \"input_tree_1\")\n shallow_tree = \"shallow_tree\"\n flattened_input_tree = nest.flatten_up_to(shallow_tree, input_tree)\n flattened_shallow_tree = nest.flatten_up_to(shallow_tree, shallow_tree)\n self.assertEqual(flattened_input_tree, [input_tree])\n self.assertEqual(flattened_shallow_tree, [shallow_tree])\n\n # Using non-iterable elements.\n input_tree = (0,)\n shallow_tree = 9\n flattened_input_tree = nest.flatten_up_to(shallow_tree, input_tree)\n flattened_shallow_tree = nest.flatten_up_to(shallow_tree, shallow_tree)\n self.assertEqual(flattened_input_tree, [input_tree])\n self.assertEqual(flattened_shallow_tree, [shallow_tree])\n\n input_tree = (0, 1)\n shallow_tree = 9\n flattened_input_tree = nest.flatten_up_to(shallow_tree, input_tree)\n flattened_shallow_tree = nest.flatten_up_to(shallow_tree, shallow_tree)\n self.assertEqual(flattened_input_tree, [input_tree])\n self.assertEqual(flattened_shallow_tree, [shallow_tree])\n\n ## Both non-list edge-case.\n # Using iterable elements.\n input_tree = \"input_tree\"\n shallow_tree = \"shallow_tree\"\n flattened_input_tree = nest.flatten_up_to(shallow_tree, input_tree)\n flattened_shallow_tree = nest.flatten_up_to(shallow_tree, shallow_tree)\n self.assertEqual(flattened_input_tree, [input_tree])\n self.assertEqual(flattened_shallow_tree, [shallow_tree])\n\n # Using non-iterable elements.\n input_tree = 0\n shallow_tree = 0\n flattened_input_tree = nest.flatten_up_to(shallow_tree, input_tree)\n flattened_shallow_tree = nest.flatten_up_to(shallow_tree, shallow_tree)\n self.assertEqual(flattened_input_tree, [input_tree])\n self.assertEqual(flattened_shallow_tree, [shallow_tree])\n\n ## Input non-list edge-case.\n # Using iterable elements.\n input_tree = \"input_tree\"\n shallow_tree = (\"shallow_tree\",)\n expected_message = (\"If shallow structure is a sequence, input must also \"\n \"be a sequence. Input has type: <(type|class) 'str'>.\")\n with self.assertRaisesRegexp(TypeError, expected_message):\n flattened_input_tree = nest.flatten_up_to(shallow_tree, input_tree)\n flattened_shallow_tree = nest.flatten_up_to(shallow_tree, shallow_tree)\n self.assertEqual(flattened_shallow_tree, list(shallow_tree))\n\n input_tree = \"input_tree\"\n shallow_tree = (\"shallow_tree_9\", \"shallow_tree_8\")\n with self.assertRaisesRegexp(TypeError, expected_message):\n flattened_input_tree = nest.flatten_up_to(shallow_tree, input_tree)\n flattened_shallow_tree = nest.flatten_up_to(shallow_tree, shallow_tree)\n self.assertEqual(flattened_shallow_tree, list(shallow_tree))\n\n # Using non-iterable elements.\n input_tree = 0\n shallow_tree = (9,)\n expected_message = (\"If shallow structure is a sequence, input must also \"\n \"be a sequence. Input has type: <(type|class) 'int'>.\")\n with self.assertRaisesRegexp(TypeError, expected_message):\n flattened_input_tree = nest.flatten_up_to(shallow_tree, input_tree)\n flattened_shallow_tree = nest.flatten_up_to(shallow_tree, shallow_tree)\n self.assertEqual(flattened_shallow_tree, list(shallow_tree))\n\n input_tree = 0\n shallow_tree = (9, 8)\n with self.assertRaisesRegexp(TypeError, expected_message):\n flattened_input_tree = nest.flatten_up_to(shallow_tree, input_tree)\n flattened_shallow_tree = nest.flatten_up_to(shallow_tree, shallow_tree)\n self.assertEqual(flattened_shallow_tree, list(shallow_tree))\n\n # Using dict.\n input_tree = {\"a\": ((2, 2), (3, 3)), \"b\": ((4, 9), (5, 5))}\n shallow_tree = {\"a\": (True, True), \"b\": (False, True)}\n flattened_input_tree = nest.flatten_up_to(shallow_tree, input_tree)\n flattened_shallow_tree = nest.flatten_up_to(shallow_tree, shallow_tree)\n self.assertEqual(flattened_input_tree, [(2, 2), (3, 3), (4, 9), (5, 5)])\n self.assertEqual(flattened_shallow_tree, [True, True, False, True])\n\n def testMapStructureUpTo(self):\n ab_tuple = collections.namedtuple(\"ab_tuple\", \"a, b\")\n op_tuple = collections.namedtuple(\"op_tuple\", \"add, mul\")\n inp_val = ab_tuple(a=2, b=3)\n inp_ops = ab_tuple(a=op_tuple(add=1, mul=2), b=op_tuple(add=2, mul=3))\n out = nest.map_structure_up_to(\n inp_val, lambda val, ops: (val + ops.add) * ops.mul, inp_val, inp_ops)\n self.assertEqual(out.a, 6)\n self.assertEqual(out.b, 15)\n\n data_list = ((2, 4, 6, 8), ((1, 3, 5, 7, 9), (3, 5, 7)))\n name_list = (\"evens\", (\"odds\", \"primes\"))\n out = nest.map_structure_up_to(\n name_list, lambda name, sec: \"first_{}_{}\".format(len(sec), name),\n name_list, data_list)\n self.assertEqual(out, (\"first_4_evens\", (\"first_5_odds\", \"first_3_primes\")))\n\n\nif __name__ == \"__main__\":\n test.main()\n"
] |
[
[
"tensorflow.python.data.util.nest.map_structure_up_to",
"tensorflow.python.framework.sparse_tensor.SparseTensorValue",
"tensorflow.python.data.util.nest.assert_same_structure",
"tensorflow.python.data.util.nest.is_sequence",
"tensorflow.python.data.util.nest.flatten_up_to",
"numpy.ones",
"tensorflow.python.data.util.nest.pack_sequence_as",
"tensorflow.python.ops.math_ops.tanh",
"tensorflow.python.platform.test.main",
"tensorflow.python.data.util.nest.map_structure",
"tensorflow.python.ops.array_ops.ones",
"tensorflow.python.data.util.nest.assert_shallow_structure",
"tensorflow.python.data.util.nest.flatten",
"numpy.array",
"tensorflow.python.ops.ragged.ragged_factory_ops.constant_value",
"tensorflow.python.framework.constant_op.constant"
]
] |
tblondelle/TransferLearningProject
|
[
"1c6a9bba2480919e22dd08756f328a47a321eafa",
"1c6a9bba2480919e22dd08756f328a47a321eafa",
"1c6a9bba2480919e22dd08756f328a47a321eafa"
] |
[
"learning/MLP_base.py",
"learning/classifiers_in_progress/Word2Vec_legacy.py",
"learning/classifiers_in_progress/Regressors.py"
] |
[
"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals, print_function, division\nfrom io import open\nimport unicodedata\nimport string\nimport re\nimport random\nimport os\nimport time\n\nfrom sklearn.feature_extraction.text import CountVectorizer\nfrom sklearn.decomposition import TruncatedSVD\nfrom sklearn.feature_extraction.text import TfidfVectorizer\n\n\nfrom numpy.random import permutation\n\nimport torch\nimport torch.nn as nn\nfrom torch.autograd import Variable\nfrom torch import optim\nimport torch.nn.functional as F\n\nimport time\nimport math\n\n\nimport matplotlib.pyplot as plt\n\nuse_cuda = torch.cuda.is_available()\nprint(\"Utilisation de la carte graphique :\",use_cuda)\n\n\ndef asMinutes(s):\n m = math.floor(s / 60)\n s -= m * 60\n return '%dm %ds' % (m, s)\n\n\ndef timeSince(since, percent):\n now = time.time()\n s = now - since\n es = s / (percent)\n rs = es - s\n return '%s (- %s)' % (asMinutes(s), asMinutes(rs))\n\n\nclass my_MLP(nn.Module):\n def __init__(self, input_size, hidden_size,batch_size, n_layers=1):\n super(my_MLP, self).__init__()\n self.n_layers = n_layers\n self.hidden_size = hidden_size\n self.batch_size = batch_size\n self.input_size = input_size\n\n\n self.linear1 = nn.Linear(input_size,hidden_size)\n self.linear2 = nn.Linear(hidden_size, hidden_size)\n self.linear3 = nn.Linear(hidden_size, 1)\n\n # le réseaux linéaires sert à ce que la sortie ait la bonne taille\n\n\n\n def forward(self, input):\n # Entrées :\n # input (variable(mat)) : les instances\n # Sortie\n # Variable(vect) : les prédictions\n X_int_1 = F.relu(self.linear1(input))\n X_int_2 = F.relu(self.linear2(X_int_1))\n return torch.tanh(self.linear3(X_int_2))\n\n\n\n\n\n def train_once(self, input_variable, target_variable, optimizer, criterion):\n # Réalise l'entraînement pour une seule epoch\n # Entrées :\n # - n_epochs (int) : nombre de fois qu'on applique toutes les instance de l'ensemble d'apprentissage\n # - input_variable Variable(mat) : instances d'apprentissage\n # - target_variable Variable(vect(+1|-1))) : labels\n # - optimizer (pytorch object) : le résultat de optim.SGD ou optim.Adam\n # - criterion (pytorch object) : le résultat de nn.L1Loss ou nn.MSELoss\n # Sorties :\n # none\n optimizer.zero_grad()\n input_length = input_variable.size()[0]\n\n output= self(input_variable)\n\n loss = criterion(output.view(1,-1), target_variable.view(-1))\n loss.backward()\n\n optimizer.step()\n\n return loss.data[0]\n\n\n\n def trainIters(self, n_epochs, training_pairs, te_pairs, learning_rate, print_every=1000, eval_every = 1000):\n # Réalise l'entraînement complet, à partir des ensembles d'apprentissage\n # Entrées :\n # - n_epochs (int) : nombre de fois qu'on applique toutes les instance de l'ensemble d'apprentissage\n # - training_pairs (Variable(mat), Variable(vect(+1|-1))) ) : instances d'apprentissage\n # - te_pairs (list of (Variable(vect), Variable(+1|-1))) : instances de test\n # - learning_rate (float) : devine ;)\n # - print_every (int) : imprime l'erreur moyenne toutes les print_every epochs\n # - eval_every (int) : teste le NN sur la base de test et imprime la matrice de confusion\n # Sorties :\n # none\n\n start = time.time()\n plot_losses = []\n print_loss_total = 0 # Reset every print_every\n\n #optimizer = optim.SGD(self.parameters(), lr=learning_rate)\n # Autre choix possible :\n optimizer = optim.Adam(self.parameters(), lr=learning_rate)\n\n\n criterion = nn.L1Loss()\n #criterion = nn.MSELoss()\n\n for epoch in range(1, n_epochs + 1):\n\n input_variable = training_pairs[0]\n target_variable = training_pairs[1]\n\n loss = self.train_once(input_variable, target_variable, optimizer, criterion)\n print_loss_total += loss\n\n if epoch % print_every == 0:\n print_loss_avg = print_loss_total / print_every\n print_loss_total = 0\n print('%s (%d %d%%) %.4f' % (timeSince(start, epoch / n_epochs),\n epoch, epoch / n_epochs * 100, print_loss_avg))\n\n if epoch % eval_every == 0:\n self.evaluateRandomly(te_pairs) # show global results\n\n\n\n\n\n\n\n def evaluateRandomly(self, pairs):\n # evaluate on all pairs, print the confusion matrix\n n_successes = 0\n n_pos = 0 # also computes the proportion of positive reviews\n\n TP,TN,FP,FN = 0,0,0,0\n for pair in pairs: # replace with pairs[:n] for testing\n output = self(pair[1])\n #success = (output[int(pair[1])] == max(output))\n note = pair[0].data[0,0]\n predicted = output.data[0]\n\n success = (note*predicted > 0)\n if success :\n n_successes += 1\n if note>0:\n TP += 1\n else:\n TN += 1\n else:\n if note>0:\n FP += 1\n else:\n FN += 1\n\n n_pos = n_pos+1 if note==1 else n_pos\n\n print('')\n print('')\n print('Confusion matrix ')\n print()\n print(\" \\t\\t Actual class\")\n print(\" \\t\\t Pos \\t Neg\")\n print(\"Predicted Pos \\t {} \\t {}\".format(TP,FN))\n print(\" Neg \\t {} \\t {}\".format(FP,TN))\n print('')\n print('\\t \\t \\t \\t Positive reviews (%)) : ',100*n_pos/len(pairs))\n print('\\t \\t \\t \\t Success rate (%) : ',100*n_successes/len(pairs))\n\n\n# overriding getData to only load 1 folder\ndef getData(folder):\n \"\"\"\n Input:\n - folder: string of the path of a folder containing txt files.\n Output:\n - listdata: list of [Y, X] (e.g. Y = 'Positive', X = \"very cool\")\n \"\"\"\n listdata = []\n\n filenames = os.listdir(folder)\n for filename in filenames[:1]: # change here\n\n with open(os.path.join(folder, filename), 'r') as f:\n for line in f:\n\n line2 = line.strip().split('\\t')\n if len(line2) == 2:\n listdata.append(line2)\n return listdata\n\n\ndef folder2data(train_filename,test_filename,balanced_tr ,balanced_te, n_features):\n # Entrées :\n # - train_filename (str) : le nom du **dossier** (et pas le nom du fichier) où se trouvent les instances d'apprentissage\n # - test_filename (str) : le nom du **dossier** (et pas le nom du fichier) où se trouvent les instances de test\n # - balanced_tr (bool) : True si l'ensemble d'apprentissage est équilibré; False s'il est laissé tel quel\n # - balanced_te (bool) : True si l'ensemble de test est équilibré; False s'il est laissé tel quel\n # - n_features (int) : nombre de variables pour coder chaque instance\n # Sorties :\n # - cuple (new_tr_pairs, new_te_pairs):\n # new_tr_pairs : (Variable(mat), Variable(vect(+1|-1))) )\n # new_te_pairs : (list of (Variable(vect), Variable(+1|-1)))\n\n tr_te_pairs = {}\n\n pairs = getData(train_filename)\n\n print(pairs[:2])\n\n\n if balanced_tr :\n #Pour un équilibrage 75/25\n pairs_using_numbers = [(-1,text) for (target,text) in pairs if (target == 'Negative' or target == 'Neutral')]\n Positive_reviews = [(1,text) for (target,text) in pairs if target == 'Positive']\n pairs_using_numbers += Positive_reviews[:int(len(pairs_using_numbers)*3)]\n tr_pairs = pairs_using_numbers\n \"\"\"\n #Pour un équilibrage 50/50\n pairs_using_numbers = [(-1,text) for (target,text) in pairs if target == 'Negative']\n Positive_reviews = [(1,text) for (target,text) in pairs if target == 'Positive']\n pairs_using_numbers += Positive_reviews[:int(len(pairs_using_numbers))]\n tr_pairs = pairs_using_numbers\n \"\"\"\n else :\n pairs_using_numbers = [(1,text) for (target,text) in pairs if target == 'Positive']\n pairs_using_numbers += [(-1,text) for (target,text) in pairs if (target == 'Negative' or target == 'Neutral')]\n tr_pairs = pairs_using_numbers\n\n pairs = getData(test_filename)\n\n print(pairs[:2])\n\n if balanced_te :\n pairs_using_numbers = [(-1,text) for (target,text) in pairs if (target == 'Negative' or target == 'Neutral')]\n Positive_reviews = [(1,text) for (target,text) in pairs if target == 'Positive']\n pairs_using_numbers += Positive_reviews[:int(len(pairs_using_numbers))]\n te_pairs = pairs_using_numbers\n\n else :\n pairs_using_numbers = [(1,text) for (target,text) in pairs if target == 'Positive']\n pairs_using_numbers += [(-1,text) for (target,text) in pairs if (target == 'Negative' or target == 'Neutral')]\n te_pairs = pairs_using_numbers\n\n\n print([text for (_,text) in tr_pairs[:2]])\n\n tfidf_vectorizer = TfidfVectorizer(ngram_range=(1,2))\n tfidf_vectorizer.fit([ text for (_,text) in tr_pairs+te_pairs])\n\n # fitting\n X_tr_token = tfidf_vectorizer.transform([ text for (_,text) in tr_pairs])\n X_te_token = tfidf_vectorizer.transform([ text for (_,text) in te_pairs])\n\n truncatedsvd = TruncatedSVD(n_components=n_features) # prépare à projeter les données dans un espace à n_components dimensions\n truncatedsvd.fit(X_tr_token)\n truncatedsvd.fit(X_te_token)\n\n\n # Réduction de dimension\n X_tr_reduced_dim = truncatedsvd.transform(X_tr_token)\n X_te_reduced_dim = truncatedsvd.transform(X_te_token)\n\n print('part de la variance conservée :',sum(truncatedsvd.explained_variance_ratio_))\n\n new_tr_pairs = [Variable(torch.FloatTensor(X_tr_reduced_dim)),Variable(torch.FloatTensor([[note for (note,_) in tr_pairs]]))]\n\n\n new_te_pairs = []\n for i in range(len(te_pairs)):\n (note,_) = te_pairs[i]\n note = Variable(torch.FloatTensor([[note]]))\n\n vect = X_te_reduced_dim[i,:]\n variable_vect = torch.autograd.Variable(torch.Tensor(vect))\n new_te_pairs.append((note,variable_vect))\n return new_tr_pairs, new_te_pairs\n\n\n\n\n\n\n\n\n\n\n\n\n# ==================================================================\n# ================ Using the MLP in itself =========================\n# ==================================================================\n\ntraining_set_folder = \"../../data/data_books_training_set\"\ntest_set_folder = \"../../data/data_videos_testing_set\"\n#test_set_folder = \"../../data/data_books_testing_set\"\n\n\nn_features = 200\ntr_pairs,te_pairs = folder2data(training_set_folder,test_set_folder,balanced_tr = True,balanced_te = True,n_features=n_features)\n\nhidden_size = 100\nbatch_size = tr_pairs[0].data.size()[0]\n\nMLP = my_MLP(n_features, hidden_size, batch_size, n_layers = 1)\n#MLP.evaluateNpairs(te_pairs,1) # show some examples\n\n\nlr = 0.005\nN_epochs = 20000\nprint(\"learning rate\",lr)\nprint(batch_size,'instances')\nMLP.trainIters( N_epochs,tr_pairs,te_pairs,lr,500,5000)\n\nMLP.evaluateRandomly(te_pairs) # show global results\n\ntorch.save(MLP,'MLP')\n#cours ; cd 2eme_partie_S9/Transfer_learning/TransferLearningProject/learning/ ; python MLP_base.py\n\n\nprint('')\nprint('')\n\nprint(' Done')\nprint('')\nprint('')\nprint('')\n",
"from gensim.models.word2vec import Word2Vec\nimport numpy as np\nfrom nltk.stem.lancaster import LancasterStemmer\nfrom nltk.tokenize import RegexpTokenizer\nfrom nltk.corpus import stopwords\nimport os \nimport multiprocessing\nfrom time import *\n\nDIRECTORY = \"C:/Users/Antoine/Documents/Centrale/3A/Transfer_learning/\" # The directory must contain the DataLoader script\nos.chdir(DIRECTORY)\n\nfrom data_loader import *\n\nclass W2V():\n def __init__(self,vector_size,window_size,threshold_factor,model_train_data,correlation_train_data,correlation_test_data):\n '''\n This class is used to train a word2vec model and use it to determine the note of a review\n - First, the word2vec model is trained on the model_train_data corpus\n - Then the correlations between the values of the vector and the note of the reviews from the correlation_train_data corpus are computed\n - Then we use those correlations to determine the notes of the correlation_test_data corpus, \n we compute a score, if this score is positive, so is the review and the other way around \n \n\n \n NOTE : The model_train_data corpus should contain reviews from both the train and test corpuses for increased efficiency.\n '''\n self.vector_size = vector_size\n self.window_size = window_size \n self.model = None\n self.model_train_data = model_train_data\n self.correlation_train_data = correlation_train_data\n self.correlation_test_data = correlation_test_data\n self.means = None\n self.correlations = None\n self.threshold_factor = threshold_factor\n self.threshold = 0\n def train(self,save_filename):\n '''\n Will train the Word2Vec model and save it in the file save_filename, the model can be loaded later with the load_model method\n '''\n train_data = []\n for line in self.model_train_data:\n train_data.append(line[1])\n self.model = Word2Vec(sentences=train_data,\n size=self.vector_size, \n window=self.window_size, \n negative=20,\n iter=10,\n seed=100,\n workers=multiprocessing.cpu_count())\n self.model.save(save_filename)\n def load_model(self,save_filename):\n self.model = Word2Vec.load(save_filename)\n def tokens_to_vect(self):\n '''\n Transforms the tokenized text from the train and test datasets into vectors using the Word2Vec model \n '''\n vects = []\n for line in self.correlation_train_data:\n vect_line = []\n vect_line.append(line[0])\n vect_line.append(np.zeros(self.vector_size))\n for token in line[1]:\n try:\n vect_line[1] += self.model[token]\n except:\n ()\n norm = np.linalg.norm(vect_line[1])\n if norm>0:\n vect_line[1] = vect_line[1]/norm\n vects.append(vect_line)\n self.correlation_train_data = vects\n \n vects = []\n for line in self.correlation_test_data:\n vect_line = []\n vect_line.append(line[0])\n vect_line.append(np.zeros(self.vector_size))\n for token in line[1]:\n try:\n vect_line[1] += self.model[token] \n except:\n ()\n norm = np.linalg.norm(vect_line[1])\n if norm>0:\n vect_line[1] = vect_line[1]/norm\n vects.append(vect_line)\n self.correlation_test_data = vects\n \n def compute_correlations(self):\n '''\n Computes the correlations between the values of the vectors and the notes of the reviews from the training set\n '''\n self.means = [0 for i in range(self.vector_size)]\n self.correlations = [0 for i in range(self.vector_size)]\n n = len(self.correlation_train_data)\n for line in self.correlation_train_data:\n for i in range(self.vector_size):\n self.means[i] += line[1][i]/n\n for line in self.correlation_train_data:\n for i in range(self.vector_size):\n self.correlations[i] += (line[1][i]-self.means[i])*(2*(line[0]>3)-1)\n def get_efficiency(self):\n '''\n Tries to guess the notes of the test set and returns the efficiency\n '''\n n = len(self.correlation_test_data)\n self.means = [0 for i in range(self.vector_size)]\n for line in self.correlation_test_data:\n for i in range(self.vector_size):\n self.means[i] += line[1][i]/n\n n_ignored = 0\n n_treated = 0\n good = 0\n for line in self.correlation_test_data:\n n_treated += 1\n score = 0\n for i in range(self.vector_size): \n score += self.correlations[i] * (line[1][i]-self.means[i])\n if abs(score)<self.threshold:\n n_ignored += 1\n else:\n if score * (2*(line[0]>3)-1) > 0:\n good += 1\n return(\"efficiency : \"+str(100*good/(n-n_ignored)) + \"% ; reviews ignored : \"+str(n_ignored) )\n def compute_threshold(self):\n '''\n Compute the threshold above which a score is considered as significant \n '''\n n = len(self.correlation_test_data)\n self.means = [0 for i in range(self.vector_size)]\n for line in self.correlation_test_data:\n for i in range(self.vector_size):\n self.means[i] += line[1][i]/n\n \n def predict(self,review):\n '''\n Returns 0 if the score of the review is too close to 0 (can not determine if positive or negative)\n Returns -1 if negative and 1 if positive\n '''\n review = review.split()\n vect = np.zeros(self.vector_size)\n for token in review:\n try:\n vect += self.model[token] \n except:\n ()\n norm = np.linalg.norm(vect)\n if norm>0:\n vect = vect/norm\n score = 0\n for i in range(self.vector_size): \n score += self.correlations[i] * (vect[i]-self.means[i])\n if abs(score)<self.threshold:\n return(0)\n return(2*(score>0)-1)\n \n \n### CLEANING \n\nloader = DataLoader(\"text_data\") #The text files that will be loaded must be contained in DIRECTORY/text_data\n\nSTOPWORDS = stopwords.words('english')\ntkr = RegexpTokenizer('[a-zA-Z0-9@]+')\nstemmer = LancasterStemmer()\n\nmodel_train_data = []\ncorrelation_train_data = []\ncorrelation_test_data = []\n\n# Loading the training data\ndata = loader.load_raw_data(\"apps.txt\")[:10000]\nfor line in data:\n line[1].strip().lower()\n\nn = len(data)\n\n# Cleaning the correlations training data\nfor i, line in enumerate(data):\n if i%int(n/100) == 0:\n print(\"cleaning train data : \", int(100*i/n),\"%\")\n tok = tkr.tokenize(line[1])\n tokens = []\n for t in tok:\n if t not in STOPWORDS:\n tokens.append(stemmer.stem(t)) \n correlation_train_data.append([line[0],tokens])\n \n# Loading the test data\ndata = loader.load_raw_data(\"cell_phones.txt\")[:10000]\nfor line in data:\n line[1].strip().lower()\n\nn = len(data)\n\n# Cleaning the test data\nfor i, line in enumerate(data):\n if i%int(n/100) == 0:\n print(\"cleaning test data : \", int(100*i/n),\"%\")\n tok = tkr.tokenize(line[1])\n tokens = []\n for t in tok:\n if t not in STOPWORDS:\n tokens.append(stemmer.stem(t)) \n correlation_test_data.append([line[0],tokens])\n\n# Creating the Word2Vec training data\nmodel_train_data = correlation_train_data + correlation_test_data\n\n### Création et apprentissage du Word2Vec et calcul des corrélations\n\nw = W2V(20,5,0.4,model_train_data,correlation_train_data,correlation_test_data)\n\nprint(\"training model\")\nw.train(\"testw2vclass\")\n\nprint(\"tokens_to_vect\")\nw.tokens_to_vect()\n\nprint(\"computing correlations\")\nw.compute_correlations()\n\nprint(\"computing threshold\")\nw.compute_threshold()\n\nprint(w.get_efficiency())\n\n\n",
"from sklearn import linear_model\nfrom sklearn.metrics import mean_squared_error\n\n\n\n\n\nclass BaseRegressor():\n def __init__(self):\n # http://scikit-learn.org/stable/modules/linear_model.html\n self.dct = {\n 'linear':linear_model.LinearRegression(),\n 'Ridge':linear_model.Ridge(alpha = .5), # linéaire avec pénalisation de complexité\n 'lasso':linear_model.Lasso(alpha = 0.1)\n \n }\n self.successes = {}\n\n def train(self,X,Y):\n # Entrées :\n # X = numpy array (N_instances,N_features)\n # Y = numpy array (N_instances)\n # Sorties :\n # None\n \n limit = (9*X.shape[0])//10\n X_train,Y_train = X[:limit,:],Y[:limit]\n X_val,Y_val= X[limit:,:],Y[limit:]\n \n for reg_name in self.dct:\n reg = self.dct[reg_name]\n reg.fit(X_train,Y_train)\n preds = reg.predict(X_val)\n self.successes[reg_name] = 1./mean_squared_error(preds,Y_val)\n \n \n def predict(self,X):\n # Entrées :\n # X = numpy array (N_instances,N_features)\n # Sorties :\n # Y = numpy array (N_instances)\n \n predictions = np.zeros((X.shape[0],))\n \n for name in self.dct:\n reg = self.dct[name]\n predictions += reg.predict(X)*self.successes[name]\n \n predictions /= sum([self.successes[name] for name in self.successes ])\n return predictions\n \n \n \n\n\n\"\"\"\nimport numpy as np \nA = np.random.rand(100,20) \n\n\ndef f(A):\n Y = []\n for i in range(A.shape[0]) :\n y = sum([j*A[i,j] for j in range(A.shape[1])])\n Y.append(y)\n Y = np.array(Y)\n return Y\n\nY_train = f(A)\n\n\nepsilon = 0.005*(np.random.rand(100,20) -0.5)\nX_train = A + epsilon \n \n\nX_test = np.random.rand(100,20) \nY_test = f(X_test)\n \nR = BaseRegressor()\nR.train(A,Y_train)\nY_pred = R.predict(X_test)\nprint(mean_squared_error(Y_pred,Y_test))\n\n\nR = BaseRegressor()\nR.train(X_train,Y_train)\nY_pred = R.predict(X_test)\nprint(mean_squared_error(Y_pred,Y_test))\n\n\n\n\"\"\"\n\n\n\n\n\n"
] |
[
[
"sklearn.decomposition.TruncatedSVD",
"torch.Tensor",
"torch.nn.Linear",
"torch.FloatTensor",
"torch.cuda.is_available",
"torch.nn.L1Loss",
"sklearn.feature_extraction.text.TfidfVectorizer",
"torch.save"
],
[
"numpy.zeros",
"numpy.linalg.norm"
],
[
"sklearn.metrics.mean_squared_error",
"sklearn.linear_model.Ridge",
"sklearn.linear_model.LinearRegression",
"sklearn.linear_model.Lasso"
]
] |
chencq1234/ssds.pytorch
|
[
"340aeac3e5f15ffeee6750f40bfbd64343926fc9"
] |
[
"lib/dataset/dataset_factory.py"
] |
[
"from lib.dataset import voc\nfrom lib.dataset import coco\n\ndataset_map = {\n 'voc': voc.VOCDetection,\n 'coco': coco.COCODetection,\n }\n\ndef gen_dataset_fn(name):\n \"\"\"Returns a dataset func.\n\n Args:\n name: The name of the dataset.\n\n Returns:\n func: dataset_fn\n\n Raises:\n ValueError: If network `name` is not recognized.\n \"\"\"\n if name not in dataset_map:\n raise ValueError('The dataset unknown %s' % name)\n func = dataset_map[name]\n return func\n\n\nimport torch\nimport numpy as np\n\ndef detection_collate(batch):\n \"\"\"Custom collate fn for dealing with batches of images that have a different\n number of associated object annotations (bounding boxes).\n\n Arguments:\n batch: (tuple) A tuple of tensor images and lists of annotations\n\n Return:\n A tuple containing:\n 1) (tensor) batch of images stacked on their 0 dim\n 2) (list of tensors) annotations for a given image are stacked on 0 dim\n \"\"\"\n targets = []\n imgs = []\n for _, sample in enumerate(batch):\n for _, tup in enumerate(sample):\n if torch.is_tensor(tup):\n imgs.append(tup)\n elif isinstance(tup, type(np.empty(0))):\n annos = torch.from_numpy(tup).float()\n targets.append(annos)\n\n return (torch.stack(imgs, 0), targets)\n\nfrom lib.utils.data_augment import preproc\nfrom lib.utils.amdegroot_augmentations import SSDAugmentation\nimport torch.utils.data as data\n\n\ndef load_data(cfg, phase):\n if phase == 'train':\n dataset = dataset_map[cfg.DATASET](cfg.DATASET_DIR, cfg.TRAIN_SETS, preproc(cfg.IMAGE_SIZE,\n cfg.PIXEL_MEANS, cfg.PROB), transform=SSDAugmentation(cfg.IMAGE_SIZE,\n cfg.PIXEL_MEANS))\n data_loader = data.DataLoader(dataset, cfg.TRAIN_BATCH_SIZE, num_workers=cfg.NUM_WORKERS,\n shuffle=True, collate_fn=detection_collate, pin_memory=True)\n if phase == 'eval':\n dataset = dataset_map[cfg.DATASET](cfg.DATASET_DIR, cfg.TEST_SETS, preproc(cfg.IMAGE_SIZE, cfg.PIXEL_MEANS, -1))\n data_loader = data.DataLoader(dataset, cfg.TEST_BATCH_SIZE, num_workers=cfg.NUM_WORKERS,\n shuffle=False, collate_fn=detection_collate, pin_memory=True)\n if phase == 'test':\n dataset = dataset_map[cfg.DATASET](cfg.DATASET_DIR, cfg.TEST_SETS, preproc(cfg.IMAGE_SIZE, cfg.PIXEL_MEANS, -2))\n data_loader = data.DataLoader(dataset, cfg.TEST_BATCH_SIZE, num_workers=cfg.NUM_WORKERS,\n shuffle=False, collate_fn=detection_collate, pin_memory=True)\n if phase == 'visualize':\n dataset = dataset_map[cfg.DATASET](cfg.DATASET_DIR, cfg.TEST_SETS, preproc(cfg.IMAGE_SIZE, cfg.PIXEL_MEANS, 1))\n data_loader = data.DataLoader(dataset, cfg.TEST_BATCH_SIZE, num_workers=cfg.NUM_WORKERS,\n shuffle=False, collate_fn=detection_collate, pin_memory=True)\n return data_loader\n"
] |
[
[
"torch.utils.data.DataLoader",
"torch.from_numpy",
"torch.is_tensor",
"torch.stack",
"numpy.empty"
]
] |
ninamiolane/move
|
[
"83ab147ad1ebab6972591357f02fa29e186116f0"
] |
[
"move/config.py"
] |
[
"import logging\nimport torch\n\n#Set the configuration of the model \nlogging.info('Confirgure the run')\nbatch_size = 8\nlearning_rate= 3e-4\nepochs = 10\nseq_len=128\nnegative_slope = 0 #LeakyRelu\n\nlogging.info('Setup device')\nif torch.cuda.is_available():\n device = torch.device('cuda')\nelse:\n device = torch.device('cpu')"
] |
[
[
"torch.device",
"torch.cuda.is_available"
]
] |
sc0ttms/SE-TFCN
|
[
"466a2d641c6ff4184c768c1e7aaf2b8a8158ce51"
] |
[
"dataset/compute_metrics.py"
] |
[
"# -*- coding: utf-8 -*-\n\nimport sys\nimport os\nimport argparse\nimport toml\nimport librosa\nimport pandas as pd\nimport numpy as np\nfrom tqdm import tqdm\nfrom joblib import Parallel, delayed\n\nsys.path.append(os.getcwd())\nfrom audio.metrics import SI_SDR, STOI, WB_PESQ, NB_PESQ, REGISTERED_METRICS\n\n\ndef calculate_metric(noisy_file, clean_file, sr=16000, metric_type=\"STOI\", pre_load=False):\n # get noisy, clean\n if pre_load == False:\n noisy, _ = librosa.load(noisy_file, sr=sr)\n clean, _ = librosa.load(clean_file, sr=sr)\n else:\n noisy = noisy_file\n clean = clean_file\n assert len(noisy) == len(clean)\n\n # get metric score\n if metric_type in [\"SI_SDR\"]:\n return SI_SDR(noisy, clean)\n elif metric_type in [\"STOI\"]:\n return STOI(noisy, clean, sr=sr)\n elif metric_type in [\"WB_PESQ\"]:\n return WB_PESQ(noisy, clean)\n elif metric_type in [\"NB_PESQ\"]:\n return NB_PESQ(noisy, clean)\n\n\ndef compute_metric(noisy_files, clean_files, metrics, n_folds=1, n_jobs=8, pre_load=False):\n for metric_type, _ in metrics.items():\n assert metric_type in REGISTERED_METRICS\n\n split_num = len(noisy_files) // n_folds\n score = []\n for n in range(n_folds):\n metric_score = Parallel(n_jobs=n_jobs)(\n delayed(calculate_metric)(\n noisy_file,\n clean_file,\n sr=8000 if metric_type in [\"NB_PESQ\"] else 16000,\n metric_type=metric_type,\n pre_load=pre_load,\n )\n for noisy_file, clean_file in tqdm(\n zip(\n noisy_files[n * split_num : (n + 1) * split_num],\n clean_files[n * split_num : (n + 1) * split_num],\n )\n )\n )\n score.append(np.mean(metric_score))\n metrics[metric_type] = np.mean(score)\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser(description=\"compute_metrics\")\n parser.add_argument(\"-c\", \"--config\", required=True, type=str, help=\"Config (*.toml).\")\n args = parser.parse_args()\n\n # get dataset path\n dataset_path = os.path.join(os.getcwd(), \"dataset_csv\")\n\n # get set path\n train_path = os.path.join(dataset_path, \"train.csv\")\n valid_path = os.path.join(dataset_path, \"valid.csv\")\n test_path = os.path.join(dataset_path, \"test.csv\")\n\n # get train files\n train_files = pd.read_csv(train_path).values\n train_noisy_files = train_files[:, 0].reshape(1, len(train_files))[0]\n train_clean_files = train_files[:, 1].reshape(1, len(train_files))[0]\n # get valid files\n valid_files = pd.read_csv(valid_path).values\n valid_noisy_files = valid_files[:, 0].reshape(1, len(valid_files))[0]\n valid_clean_files = valid_files[:, 1].reshape(1, len(valid_files))[0]\n # get test files\n test_files = pd.read_csv(test_path).values\n test_noisy_files = test_files[:, 0].reshape(1, len(test_files))[0]\n test_clean_files = test_files[:, 1].reshape(1, len(test_files))[0]\n\n # get compute metrics config\n config = toml.load(args.config)\n # get n_jobs\n n_folds = config[\"ppl\"][\"n_folds\"]\n n_jobs = config[\"ppl\"][\"n_jobs\"]\n\n # get metrics\n metrics = {\n \"SI_SDR\": [],\n \"STOI\": [],\n \"WB_PESQ\": [],\n \"NB_PESQ\": [],\n }\n\n # compute train metrics\n compute_metric(\n train_noisy_files, train_clean_files, metrics, n_folds=n_folds, n_jobs=n_jobs, pre_load=False,\n )\n # save train metrics\n df = pd.DataFrame(metrics, index=[\"train\"])\n df.to_csv(os.path.join(dataset_path, \"train_metrics.csv\"))\n\n # get metrics\n metrics = {\n \"SI_SDR\": [],\n \"STOI\": [],\n \"WB_PESQ\": [],\n \"NB_PESQ\": [],\n }\n\n # compute valid metrics\n compute_metric(\n valid_noisy_files, valid_clean_files, metrics, n_folds=n_folds, n_jobs=n_jobs, pre_load=False,\n )\n # save train metrics\n df = pd.DataFrame(metrics, index=[\"valid\"])\n df.to_csv(os.path.join(dataset_path, \"valid_metrics.csv\"))\n\n # get metrics\n metrics = {\n \"SI_SDR\": [],\n \"STOI\": [],\n \"WB_PESQ\": [],\n \"NB_PESQ\": [],\n }\n\n # compute test metrics\n compute_metric(\n test_noisy_files, test_clean_files, metrics, n_folds=n_folds, n_jobs=n_jobs, pre_load=False,\n )\n # save train metrics\n df = pd.DataFrame(metrics, index=[\"test\"])\n df.to_csv(os.path.join(dataset_path, \"test_metrics.csv\"))\n"
] |
[
[
"pandas.read_csv",
"numpy.mean",
"pandas.DataFrame"
]
] |
xwhan/fairseq-wklm
|
[
"9c7c927fca75cd2b08c0207ff7f7682ed95a98e0"
] |
[
"fairseq/modules/fb_elmo_token_embedder.py"
] |
[
"# Copyright (c) 2017-present, Facebook, Inc.\n# All rights reserved.\n#\n# This source code is licensed under the license found in the LICENSE file in\n# the root directory of this source tree. An additional grant of patent rights\n# can be found in the PATENTS file in the same directory.\n\nfrom typing import Dict, List\n\nimport torch\nfrom torch import nn\n\nfrom fairseq.models import FairseqLanguageModel\nfrom fairseq.utils import buffered_arange\n\n\nclass ElmoTokenEmbedder(nn.Module):\n \"\"\"\n This is an implementation of the ELMo module which allows learning how to combine hidden states of a language model\n to learn task-specific word representations.\n For more information see the paper here: http://arxiv.org/abs/1802.05365\n\n This implementation was inspired by the implementation in AllenNLP found here:\n https://github.com/allenai/allennlp/blob/master/tutorials/how_to/elmo.md\n \"\"\"\n\n def __init__(\n self,\n language_model: FairseqLanguageModel,\n eos: int,\n pad: int,\n tune_lm: bool = False,\n lm_frozen_layers: int = 0,\n lm_tune_embedding: bool = False,\n weights_dropout: float = 0.,\n final_dropout: float = 0.,\n layer_norm: bool = True,\n affine_layer_norm: bool = False,\n projection_dim: int = None,\n apply_softmax: bool = True,\n combine_tower_states: bool = True,\n add_final_predictive: bool = True,\n add_final_context: bool = True,\n add_bos: bool = False,\n add_eos: bool = False,\n remove_bos: bool = False,\n remove_eos: bool = False,\n char_inputs: bool = False,\n max_char_len: int = 50,\n use_boundary_tokens: bool = False,\n ):\n\n super().__init__()\n\n self.onnx_trace = False\n self.language_model = language_model\n self.eos_idx = eos\n self.padding_idx = pad\n self.tune_lm = tune_lm\n self.combine_tower_states = combine_tower_states\n self.add_final_predictive = add_final_predictive\n self.add_final_context = add_final_context\n self.add_bos = add_bos\n self.add_eos = add_eos\n self.remove_bos = remove_bos\n self.remove_eos = remove_eos\n self.char_inputs = char_inputs\n # use_boundary_tokens will only use the bos/eos of the ELMO last layer,\n # will override some other options in _lm_states and forward,\n # for the purpose of fine-tuning the language model\n self.use_boundary_tokens = use_boundary_tokens\n\n if self.use_boundary_tokens:\n # make sure the bos and eos are not remove in fine tuning case\n assert (not self.remove_bos)\n assert (not self.remove_eos)\n\n self.num_layers = len(language_model.decoder.forward_layers)\n if self.add_final_context:\n self.num_layers += 1\n if not self.combine_tower_states:\n self.num_layers *= 2\n # +1 for token embedding layer\n self.num_layers += 1\n if language_model.decoder.self_target and self.add_final_predictive:\n self.num_layers += 1\n\n self.dim = language_model.decoder.embed_dim\n if not self.use_boundary_tokens and self.combine_tower_states:\n self.dim *= 2\n self.embedding_dim = projection_dim or self.dim\n\n self.weights_dropout = nn.Dropout(weights_dropout)\n self.final_dropout = nn.Dropout(final_dropout)\n self.layer_norm = nn.LayerNorm(self.dim, elementwise_affine=affine_layer_norm) if layer_norm else None\n\n if self.use_boundary_tokens:\n self.weights = None\n self.gamma = None\n else:\n self.weights = nn.Parameter(torch.ones(self.num_layers))\n self.gamma = nn.Parameter(torch.ones(1))\n\n self.softmax = nn.Softmax(dim=0) if apply_softmax else None\n\n self.projection = nn.Linear(self.dim, projection_dim,\n bias=False) if projection_dim is not None and projection_dim != self.dim else None\n\n trainable_params, non_trainable_params = self._get_params_by_trainability(\n lm_frozen_layers, lm_tune_embedding\n )\n\n self.trainable_params_by_layer: List[Dict[str, nn.Parameter]] = trainable_params\n for p in non_trainable_params:\n p.requires_grad = False\n if not tune_lm:\n language_model.eval()\n\n def _get_params_by_trainability(self, lm_frozen_layers, lm_tune_embedding):\n non_lm_params = self._non_lm_parameters()\n\n if not self.tune_lm:\n # Only non-lm parameters are trainable\n return [non_lm_params], self.language_model.parameters()\n\n if not hasattr(self.language_model, \"get_layers_by_depth_for_fine_tuning\"):\n assert lm_frozen_layers == 0\n # All params are trainable\n return [dict(self.named_parameters())], []\n\n lm_params_by_layer = self._lm_parameters_by_layer()\n assert len(lm_params_by_layer) >= lm_frozen_layers + 1 # +1 for embedding\n\n trainable_lm_params = []\n non_trainable_lm_params = []\n\n if lm_tune_embedding:\n trainable_lm_params.append(lm_params_by_layer[0])\n else:\n non_trainable_lm_params.append(lm_params_by_layer[0])\n\n trainable_lm_params.extend(lm_params_by_layer[lm_frozen_layers + 1:])\n non_trainable_lm_params.extend(lm_params_by_layer[1: lm_frozen_layers + 1])\n\n trainable_params = trainable_lm_params + [non_lm_params]\n non_trainable_params = [\n p for param_dict in non_trainable_lm_params for p in param_dict.values()\n ]\n return trainable_params, non_trainable_params\n\n def _non_lm_parameters(self):\n non_lm_parameters = dict(self.named_parameters())\n for name, _ in self.language_model.named_parameters():\n del non_lm_parameters[\"language_model.%s\" % name]\n return non_lm_parameters\n\n def _lm_parameters_by_layer(self):\n lm_layers = self.language_model.get_layers_by_depth_for_fine_tuning()\n return [\n {\n \"language_model.%s.%s\" % (module_name, param_name): param\n for module_name, module in lm_layer.items()\n for param_name, param in module.named_parameters()\n }\n for lm_layer in lm_layers\n ]\n\n def prepare_for_onnx_export_(self):\n self.onnx_trace = True\n\n def reset_parameters(self):\n if self.projection:\n nn.init.xavier_normal_(self.projection.weight)\n if self.softmax is None:\n nn.init.constant_(self.weights, 1 / (self.num_layers * 2))\n\n def _lm_states(self, input: torch.Tensor, eos_idx_mask=None):\n \"\"\"apply the language model on the input and get internal states\n Args:\n input: the sentence tensor\n eos_idx_mask: the mask for the index of eos for each sentence\n\n Returns:\n return a list of states from the language model,\n if use_boundary_tokens, only return the last layer\n if combine_tower_states, will combine forward and backward\n \"\"\"\n if self.tune_lm:\n x, model_out = self.language_model(input, src_lengths=None)\n else:\n with torch.no_grad():\n x, model_out = self.language_model(input, src_lengths=None)\n\n if self.use_boundary_tokens:\n bos_state = x[:, 0, :]\n if eos_idx_mask is None:\n return [bos_state.unsqueeze(1)]\n eos_state = x[eos_idx_mask] # batch_size * embeding_size\n return [torch.cat((bos_state.unsqueeze(1), eos_state.unsqueeze(1)), dim=1)]\n\n assert 'inner_states' in model_out\n\n # TBC -> BTC\n states = [s.transpose(0, 1) for s in model_out['inner_states']]\n\n has_final_predictive = len(states) % 2 == 0\n\n if self.add_final_context:\n zeros = states[-1].new_zeros(states[-1].size(0), 1, states[-1].size(2))\n if states[-1].size(1) == 1:\n s1 = s2 = zeros\n else:\n s1 = torch.cat([zeros, states[-1][:, :-1, :]], dim=1)\n s2 = torch.cat([states[-1][:, 1:, :], zeros], dim=1)\n if has_final_predictive:\n states.insert(-1, s1)\n states.insert(-1, s2)\n else:\n states.extend([s1, s2])\n\n if self.combine_tower_states:\n new_states = [torch.cat([states[0], states[0]], dim=-1)]\n\n start = 1 # first element is the token embeddings\n end = len(states)\n if has_final_predictive:\n end -= 1\n\n for i in range(start, end, 2):\n new_states.append(torch.cat([states[i], states[i + 1]], dim=-1))\n\n if self.add_final_predictive and has_final_predictive:\n new_states.append(torch.cat([states[-1], states[-1]], dim=-1))\n\n states = new_states\n elif not self.add_final_predictive and has_final_predictive:\n states = states[:-1]\n\n return states\n\n def _with_sentence_boundaries(\n self,\n input: torch.Tensor):\n \"\"\"\n Args:\n input: the sentence Tensor\n it's bs * seq_len * num_chars in case of char input and bs*seq_len in case of token input\n\n Returns:\n tuple,\n 1) processed input,\n 2) tensor mask for the eos position of each sentence,\n None if did not add eos\n \"\"\"\n if not self.add_bos and not self.add_eos:\n return input, None\n\n zero_block = input.new(0, 0)\n block_size = (input.size(0), 1, input.size(2)) if self.char_inputs else (input.size(0), 1)\n bos_block = torch.full(block_size, self.eos_idx).type_as(input) if self.add_bos else zero_block\n pad_block = torch.full(block_size, self.padding_idx).type_as(input) if self.add_eos else zero_block\n\n # add eos in the beginning and pad to the end of the sentence\n input = torch.cat([bos_block, input, pad_block], dim=1)\n\n first_pads = None # if not add_eos, then first_pads is not valid, set to None\n if self.add_eos:\n index_block = input[:, :, 0] if self.char_inputs else input\n padding_mask = index_block.eq(self.padding_idx)\n num_pads = padding_mask.long().sum(dim=1, keepdim=True)\n max_len = input.size(1)\n\n # index of the first pad\n if self.onnx_trace:\n first_pads = torch._dim_arange(input, 1).type_as(input).view(1, -1).\\\n repeat(input.size(0), 1).eq(max_len - num_pads)\n eos_indices = first_pads\n if self.char_inputs:\n eos_indices = eos_indices.unsqueeze(2).repeat(1, 1, input.size(-1))\n input = torch.where(eos_indices, torch.Tensor([self.eos_idx]).type_as(input), input)\n else:\n first_pads = buffered_arange(max_len).type_as(input).view(1, -1).\\\n expand(input.size(0), -1).eq(max_len - num_pads)\n eos_indices = first_pads\n if self.char_inputs:\n eos_indices = eos_indices.unsqueeze(2).expand_as(input)\n input[eos_indices] = self.eos_idx\n\n return input, first_pads\n\n def _without_sentence_boundaries(\n self,\n input: torch.Tensor,\n ):\n if self.remove_bos:\n # remove first token (beginning eos)\n input = input[:, 1:]\n if self.remove_eos:\n # just remove last one to match size since downstream task\n # needs to deal with padding value\n input = input[:, :-1]\n return input\n\n def forward(\n self,\n input: torch.Tensor,\n ):\n input, eos_idx_mask = self._with_sentence_boundaries(input)\n\n states = self._lm_states(input, eos_idx_mask)\n\n if self.use_boundary_tokens:\n return states[0] # only have one element and return it\n\n if self.layer_norm is not None:\n states = [self.layer_norm(s) for s in states]\n\n if self.softmax is not None:\n w = self.softmax(self.weights)\n else:\n w = self.weights\n\n w = self.weights_dropout(w)\n\n x = states[0].new_zeros(input.size()[:2] + (self.dim,))\n for i in range(len(states)):\n x += states[i] * w[i]\n\n x = self._without_sentence_boundaries(x)\n\n if self.projection is not None:\n x = self.projection(x)\n\n x = self.gamma * x\n\n x = self.final_dropout(x)\n\n return x\n"
] |
[
[
"torch.nn.Softmax",
"torch.nn.Dropout",
"torch.ones",
"torch.full",
"torch.cat",
"torch.nn.init.constant_",
"torch.Tensor",
"torch.nn.init.xavier_normal_",
"torch.nn.LayerNorm",
"torch.nn.Linear",
"torch.no_grad",
"torch._dim_arange"
]
] |
revsic/tf-attentive-neural-process
|
[
"efa3bb0a9b6cfebaa3c1e025a9da00aef8d0a1e2"
] |
[
"neural_process/anp.py"
] |
[
"import numpy as np\nimport tensorflow as tf\nimport tensorflow_probability as tfp\n\nfrom neural_process.module.base import Encoder, Decoder, GaussianProb\n\nclass AttentiveNP:\n \"\"\"Attentive Neural Process\n Attributes:\n z_encoder: Encoder, encoder for latent representation\n z_prob: GaussianProb, latent representation to probability distribution\n encoder: Encoder, context encoder with self attention\n cross_encoder: Encoder, cross context encoder with querying value attention\n decoder: Decoder, decoder for context and latent variable\n normal_dist: GaussianProb, converter for decoded context to probability distribution\n\n \"\"\"\n def __init__(self,\n z_output_sizes,\n enc_output_sizes,\n cross_output_sizes,\n dec_output_sizes,\n self_attention,\n cross_attention):\n \"\"\"Initializer\n Args:\n z_output_sizes: List[int], number of hidden units for latent representation encoder\n enc_output_sizes: List[int], number of hidden units for context encoder\n cross_output_sizes: List[int], number of hidden units for cross context encoder\n dec_output_sizes: List[int], number of hidden units for decoder\n self_attention: Callable[[tf.Tensor], tf.Tensor], self attention method\n cross_attention: Callable[[tf.Tensor], tf.Tensor], cross attention method\n \"\"\"\n self.z_encoder = Encoder(z_output_sizes[:-1], self_attention)\n self.z_prob = GaussianProb(z_output_sizes[-1],\n proj=np.mean(z_output_sizes[-2:]))\n\n self.encoder = Encoder(enc_output_sizes, self_attention, keepdims=True)\n self.cross_encoder = Encoder(cross_output_sizes, cross_attention)\n\n self.decoder = Decoder(dec_output_sizes[:-1])\n self.normal_dist = GaussianProb(dec_output_sizes[-1], multivariate=True)\n\n def __call__(self, context, query):\n cx, _ = context\n z_context = self.z_encoder(context, key=cx, query=cx)\n z_dist, _, _ = self.z_prob(z_context)\n latent = z_dist.sample()\n\n self_attended = self.encoder(context, key=cx, query=cx)\n cross_attended = self.cross_encoder(self_attended, key=cx, query=query)\n\n context = tf.concat([latent, cross_attended], axis=-1)\n context = tf.tile(tf.expand_dims(context, 1),\n [1, tf.shape(query)[1], 1])\n\n rep = self.decoder(context, query)\n dist, mu, sigma = self.normal_dist(rep)\n\n return dist, mu, sigma\n\n def loss(self, context, query, target):\n cx, _ = context\n dist, _, _ = self(context, query)\n log_prob = dist.log_prob(target)\n log_prob = tf.reduce_sum(log_prob)\n\n prior, _, _ = self.z_prob(self.z_encoder(context, key=cx, query=cx))\n posterior, _, _ = self.z_prob(self.z_encoder([query, target], key=query, query=query))\n\n kl = tfp.distributions.kl_divergence(prior, posterior)\n kl = tf.reduce_sum(kl)\n\n # maximize variational lower bound\n loss = -log_prob + kl\n return loss\n"
] |
[
[
"tensorflow.concat",
"tensorflow.shape",
"tensorflow.reduce_sum",
"tensorflow.expand_dims",
"numpy.mean"
]
] |
Spinch/CarND-Capstone
|
[
"7e507df9f1cc72c76514907464ca9ca3d3ac9e85"
] |
[
"ros/src/tl_detector/light_classification/tl_classifierNN.py"
] |
[
"\nimport rospy\nimport cv2\nimport numpy as np\nfrom styx_msgs.msg import TrafficLight\nfrom darknet_ros_msgs.msg import BoundingBoxes\n\nclass TLClassifierNN(object):\n def __init__(self):\n #TODO load classifier\n self.lastBBox = [[0, 0], [0, 0]]\n self.lastBBoxT = rospy.get_time()\n\n def get_classification(self, image):\n \"\"\"Determines the color of the traffic light in the image\n\n Args:\n image (cv::Mat): image containing the traffic light\n\n Returns:\n int: ID of traffic light color (specified in styx_msgs/TrafficLight)\n\n \"\"\"\n t = rospy.get_time()\n dt = t - self.lastBBoxT\n if dt < 0.1:\n rospy.loginfo(\"Got traffic picture past {} seconds, bbox: x {}:{}, y {}:{}\".format(dt,\n self.lastBBox[0][0], self.lastBBox[0][1], self.lastBBox[1][0], self.lastBBox[1][1]))\n else:\n return TrafficLight.UNKNOWN\n\n # Check if box is valid\n if self.lastBBox[0][0] == self.lastBBox[0][1] or self.lastBBox[1][0] == self.lastBBox[1][1]:\n return TrafficLight.UNKNOWN\n\n # Crop image\n bb_image = image[self.lastBBox[1][0]:self.lastBBox[1][1], self.lastBBox[0][0]:self.lastBBox[0][1]]\n\n height, width, channels = bb_image.shape\n\n # Partition into red, yellow and green areas of typical vertical traffic light on site\n red_area = bb_image[0:height//3, 0:width]\n yellow_area = bb_image[height//3: 2*height//3, 0:width]\n green_area = bb_image[2*height//3: height, 0:width]\n\n # Standard coefficients to convert red, yellow and green channels to grayscale\n coef_red = [0.1, 0.1, 0.8]\n coef_yellow = [0.114, 0.587, 0.299]\n coef_green = [0.1, 0.8, 0.1]\n\n # Apply coefficients\n red_area = cv2.transform(red_area, np.array(coef_red).reshape((1,3)))\n yellow_area = cv2.transform(yellow_area, np.array(coef_yellow).reshape((1,3)))\n green_area = cv2.transform(green_area, np.array(coef_green).reshape((1,3)))\n\n # Concatenate obtained grayscale images\n bb_image = np.concatenate((red_area,yellow_area,green_area),axis=0)\n\n # Reevaluate dimensions just in case\n height, width = bb_image.shape\n\n # Create mask\n mask = np.zeros((height, width), np.uint8)\n width_off = 3\n height_off = 4\n cv2.ellipse(mask, (width//2, 1*height//6), (width//2 - width_off, height//6 - height_off), 0, 0, 360, 1, -1)\n cv2.ellipse(mask, (width//2, 3*height//6), (width//2 - width_off, height//6 - height_off), 0, 0, 360, 1, -1)\n cv2.ellipse(mask, (width//2, 5*height//6), (width//2 - width_off, height//6 - height_off), 0, 0, 360, 1, -1)\n\n # Apply mask\n bb_image = np.multiply(bb_image, mask)\n\n # Cut not bright enough pixels\n bb_image = cv2.inRange(bb_image, 200, 255)\n\n # Partition into red, yellow and green areas\n red_area = bb_image[0:height//3, 0:width]\n yellow_area = bb_image[height//3: 2*height//3, 0:width]\n green_area = bb_image[2*height//3: height, 0:width]\n\n # Count the number of non-zero pixels in each area\n red_cnt = cv2.countNonZero(red_area)\n yellow_cnt = cv2.countNonZero(yellow_area)\n green_cnt = cv2.countNonZero(green_area)\n\n # Determine which color had max non-zero pixels\n if red_cnt > yellow_cnt and red_cnt > green_cnt:\n return TrafficLight.RED\n elif yellow_cnt > red_cnt and yellow_cnt > green_cnt:\n return TrafficLight.YELLOW\n # Do not differentiate green and unknown\n return TrafficLight.UNKNOWN\n\n def bboxes_cb(self, bBoxes):\n for box in bBoxes.bounding_boxes:\n # rospy.loginfo(\"Class: {}, prob: {}, x: {}:{}, y: {}:{}\".format(box.Class, box.probability, box.xmin,\n # box.xmax, box.ymin, box.ymax))\n if box.Class == 'traffic light':\n self.lastBBox = [[box.xmin, box.xmax], [box.ymin, box.ymax]]\n self.lastBBoxT = rospy.get_time()\n"
] |
[
[
"numpy.concatenate",
"numpy.array",
"numpy.zeros",
"numpy.multiply"
]
] |
maropu/lljvm-translator
|
[
"322fbe24a27976948c8e8081a9552152dda58b4b"
] |
[
"python/src/test/resources/pyfunc/numpy_random12_test.py"
] |
[
"import numpy as np\n\ndef numpy_random12_test(n):\n return np.random.random_sample(n)\n"
] |
[
[
"numpy.random.random_sample"
]
] |
ZachMontgomery/PolyFits
|
[
"0634bcd3a24b12a22b566a0c134cddf733d28641"
] |
[
"test/test_multivariablePolynomialFit_Function.py"
] |
[
"import numpy as np\nimport polyFits as pf\nimport json\n\nfn = './test/'\nf = open(fn+'database.txt', 'r')\ndatabase = f.readlines()\nf.close()\n\naoa, dp, cl, cd, cm = [], [], [], [], []\nfor line in database[1:]:\n aoa.append( float( line[ 8: 25] ) )\n dp.append( float( line[ 34: 51] ) )\n cl.append( float( line[ 60: 77] ) )\n cd.append( float( line[ 87:103] ) )\n cm.append( float( line[112: ] ) )\nX = np.array([aoa, dp]).T\n\nf = open(fn+'fit_CL.json', 'r')\nclDict = json.load(f)\nf.close()\nf = open(fn+'fit_CD.json', 'r')\ncdDict = json.load(f)\nf.close()\nf = open(fn+'fit_Cm.json', 'r')\ncmDict = json.load(f)\nf.close()\n\naCL, nvecCL, r2CL = pf.dict2list(clDict)\naCD, nvecCD, r2CD = pf.dict2list(cdDict)\naCm, nvecCm, r2Cm = pf.dict2list(cmDict)\n\nf = open(fn+'a5dp10.txt', 'r')\nclval = float(f.readline())\ncdval = float(f.readline())\ncmval = float(f.readline())\nf.close()\n\naoa, dp = 5.*np.pi/180., 10.*np.pi/180.\n\ndef test_simpleConstriants():\n \n aaCL, rr2CL = pf.multivariablePolynomialFit(nvecCL, X, cl, sym_same=[(0,1)], verbose=False)\n \n assert len(aCL) == len(aaCL)\n for j in range(pf.calcJ(nvecCL)):\n assert aCL[j] == aaCL[j]\n assert r2CL == rr2CL\n \n cclval = pf.multivariablePolynomialFunction(aCL, nvecCL, [aoa, dp])\n assert clval == cclval\n\ndef test_percent():\n \n aaCD, rr2CD = pf.multivariablePolynomialFit(nvecCD, X, cd, sym_diff=[(0,1)], percent=True, verbose=False)\n \n assert len(aCD) == len(aaCD)\n for j in range(pf.calcJ(nvecCD)):\n assert aCD[j] == aaCD[j]\n assert r2CD == rr2CD\n \n ccdval = pf.multivariablePolynomialFunction(aCD, nvecCD, [aoa, dp])\n assert cdval == ccdval\n\ndef test_weighting():\n \n def w(x, y, p):\n if abs(y[p]) < 0.0001:\n return 1.\n return 0.0001 / abs(y[p])\n \n aaCm, rr2Cm = pf.multivariablePolynomialFit(nvecCm, X, cm, sym_same=[(0,1)], weighting=w, verbose=False)\n \n assert len(aCm) == len(aaCm)\n for j in range(pf.calcJ(nvecCm)):\n assert aCm[j] == aaCm[j]\n assert r2Cm == rr2Cm\n \n ccmval = pf.multivariablePolynomialFunction(aCm, nvecCm, [aoa, dp])\n assert cmval == ccmval\n\n"
] |
[
[
"numpy.array"
]
] |
tpmp-inra/ipapi
|
[
"b0f6be8960a20dbf95ef9df96efdd22bd6e031c5"
] |
[
"ipt/ipt_filter_contour_by_size.py"
] |
[
"from ipso_phen.ipapi.base.ipt_abstract import IptBase\r\nfrom ipso_phen.ipapi.tools import regions\r\nimport numpy as np\r\nimport cv2\r\n\r\nimport logging\r\n\r\nlogger = logging.getLogger(__name__)\r\n\r\nfrom ipso_phen.ipapi.base import ip_common as ipc\r\n\r\n\r\nclass IptFilterContourBySize(IptBase):\r\n def build_params(self):\r\n self.add_enabled_checkbox()\r\n self.add_spin_box(\r\n name=\"min_threshold\",\r\n desc=\"Lower bound limit\",\r\n default_value=0,\r\n minimum=0,\r\n maximum=100000000,\r\n hint=\"Only contours bigger than lower limit bound will be kept\",\r\n )\r\n self.add_spin_box(\r\n name=\"max_threshold\",\r\n desc=\"Upper bound limit\",\r\n default_value=100000000,\r\n minimum=0,\r\n maximum=100000000,\r\n hint=\"Only contours smaller than lower limit bound will be kept\",\r\n )\r\n self.add_roi_selector()\r\n\r\n def process_wrapper(self, **kwargs):\r\n \"\"\"\r\n Filter contour by size:\r\n 'Keep or descard contours according to their size\r\n Real time: False\r\n\r\n Keyword Arguments (in parentheses, argument name):\r\n * Activate tool (enabled): Toggle whether or not tool is active\r\n * Lower bound limit (min_threshold): Only contours bigger than lower limit bound will be kept\r\n * Upper bound limit (max_threshold): Only contours smaller than lower limit bound will be kept\r\n * Name of ROI to be used (roi_names): Operation will only be applied inside of ROI\r\n * ROI selection mode (roi_selection_mode):\r\n \"\"\"\r\n wrapper = self.init_wrapper(**kwargs)\r\n if wrapper is None:\r\n return False\r\n\r\n res = False\r\n try:\r\n if self.get_value_of(\"enabled\") == 1:\r\n mask = self.get_mask()\r\n if mask is None:\r\n logger.error(f\"FAIL {self.name}: mask must be initialized\")\r\n return\r\n\r\n lt, ut = self.get_value_of(\"min_threshold\"), self.get_value_of(\r\n \"max_threshold\"\r\n )\r\n\r\n # Get source contours\r\n contours = [\r\n c\r\n for c in ipc.get_contours(\r\n mask=mask,\r\n retrieve_mode=cv2.RETR_LIST,\r\n method=cv2.CHAIN_APPROX_SIMPLE,\r\n )\r\n if cv2.contourArea(c, True) < 0\r\n ]\r\n contours.sort(key=lambda x: cv2.contourArea(x), reverse=True)\r\n colors = ipc.build_color_steps(step_count=len(contours))\r\n\r\n dbg_img = np.dstack(\r\n (np.zeros_like(mask), np.zeros_like(mask), np.zeros_like(mask))\r\n )\r\n for clr, cnt in zip(colors, contours):\r\n cv2.drawContours(dbg_img, [cnt], 0, clr, -1)\r\n dbg_img = np.dstack(\r\n (\r\n cv2.bitwise_and(dbg_img[:, :, 0], mask),\r\n cv2.bitwise_and(dbg_img[:, :, 1], mask),\r\n cv2.bitwise_and(dbg_img[:, :, 2], mask),\r\n )\r\n )\r\n wrapper.store_image(\r\n image=dbg_img,\r\n text=\"all_contours\",\r\n )\r\n\r\n fnt = (cv2.FONT_HERSHEY_SIMPLEX, 0.6)\r\n for cnt in contours:\r\n area_ = cv2.contourArea(cnt)\r\n x, y, w, h = cv2.boundingRect(cnt)\r\n x += w // 2 - 10\r\n y += h // 2\r\n if area_ > 0:\r\n cv2.putText(\r\n dbg_img,\r\n f\"{area_}\",\r\n (x, y),\r\n fnt[0],\r\n fnt[1],\r\n (255, 255, 255),\r\n 2,\r\n )\r\n wrapper.store_image(\r\n image=dbg_img,\r\n text=\"all_contours_with_sizes\",\r\n )\r\n\r\n dbg_img = np.dstack(\r\n (np.zeros_like(mask), np.zeros_like(mask), np.zeros_like(mask))\r\n )\r\n out_mask = np.zeros_like(mask)\r\n\r\n # Discarded contours\r\n size_cnts = np.dstack(\r\n (np.zeros_like(mask), np.zeros_like(mask), np.zeros_like(mask))\r\n )\r\n for cnt in contours:\r\n area_ = cv2.contourArea(cnt)\r\n if area_ < lt:\r\n cv2.drawContours(size_cnts, [cnt], 0, ipc.C_RED, -1)\r\n elif area_ > ut:\r\n cv2.drawContours(size_cnts, [cnt], 0, ipc.C_BLUE, -1)\r\n else:\r\n cv2.drawContours(size_cnts, [cnt], 0, ipc.C_WHITE, -1)\r\n wrapper.store_image(image=size_cnts, text=\"cnts_by_size\")\r\n\r\n # Discarded contours\r\n size_cnts = np.dstack(\r\n (np.zeros_like(mask), np.zeros_like(mask), np.zeros_like(mask))\r\n )\r\n for cnt in sorted(\r\n contours, key=lambda x: cv2.contourArea(x), reverse=True\r\n ):\r\n area_ = cv2.contourArea(cnt)\r\n if area_ < lt:\r\n cv2.drawContours(size_cnts, [cnt], 0, ipc.C_RED, -1)\r\n elif area_ > ut:\r\n cv2.drawContours(size_cnts, [cnt], 0, ipc.C_BLUE, -1)\r\n else:\r\n cv2.drawContours(size_cnts, [cnt], 0, ipc.C_WHITE, -1)\r\n wrapper.store_image(image=size_cnts, text=\"cnts_by_size_reversed\")\r\n\r\n for cnt in contours:\r\n area_ = cv2.contourArea(cnt)\r\n if not (lt < area_ < ut):\r\n cv2.drawContours(dbg_img, [cnt], 0, ipc.C_RED, -1)\r\n # Discarded contours borders\r\n for cnt in contours:\r\n area_ = cv2.contourArea(cnt)\r\n if not (lt < area_ < ut):\r\n cv2.drawContours(dbg_img, [cnt], 0, ipc.C_MAROON, 4)\r\n # Kept contours\r\n for cnt in contours:\r\n area_ = cv2.contourArea(cnt)\r\n if lt < area_ < ut:\r\n cv2.drawContours(out_mask, [cnt], 0, 255, -1)\r\n cv2.drawContours(dbg_img, [cnt], 0, ipc.C_GREEN, -1)\r\n else:\r\n cv2.drawContours(out_mask, [cnt], 0, 0, -1)\r\n cv2.drawContours(dbg_img, [cnt], 0, ipc.C_RED, -1)\r\n dbg_img = np.dstack(\r\n (\r\n cv2.bitwise_and(dbg_img[:, :, 0], mask),\r\n cv2.bitwise_and(dbg_img[:, :, 1], mask),\r\n cv2.bitwise_and(dbg_img[:, :, 2], mask),\r\n )\r\n )\r\n # Discarded sizes\r\n for cnt in contours:\r\n area_ = cv2.contourArea(cnt)\r\n if not (lt < area_ < ut):\r\n x, y, w, h = cv2.boundingRect(cnt)\r\n x += w // 2 - 10\r\n y += h // 2\r\n cv2.putText(\r\n dbg_img,\r\n f\"{area_}\",\r\n (x, y),\r\n fnt[0],\r\n fnt[1],\r\n ipc.C_RED,\r\n thickness=2,\r\n )\r\n # Kept sizes\r\n for cnt in contours:\r\n area_ = cv2.contourArea(cnt)\r\n if lt < area_ < ut:\r\n x, y, w, h = cv2.boundingRect(cnt)\r\n x += w // 2 - 10\r\n y += h // 2\r\n cv2.putText(\r\n dbg_img,\r\n f\"{area_}\",\r\n (x, y),\r\n fnt[0],\r\n fnt[1],\r\n ipc.C_LIME,\r\n thickness=2,\r\n )\r\n\r\n out_mask = cv2.bitwise_and(\r\n out_mask,\r\n mask,\r\n )\r\n\r\n # Apply ROIs if needed\r\n rois = self.get_ipt_roi(\r\n wrapper=wrapper,\r\n roi_names=self.get_value_of(\"roi_names\").replace(\" \", \"\").split(\",\"),\r\n selection_mode=self.get_value_of(\"roi_selection_mode\"),\r\n )\r\n if rois:\r\n untouched_mask = regions.delete_rois(rois=rois, image=self.get_mask())\r\n self.result = cv2.bitwise_or(\r\n untouched_mask, regions.keep_rois(rois=rois, image=out_mask)\r\n )\r\n self.demo_image = cv2.bitwise_or(\r\n dbg_img,\r\n np.dstack((untouched_mask, untouched_mask, untouched_mask)),\r\n )\r\n else:\r\n self.result = out_mask\r\n self.demo_image = dbg_img\r\n\r\n wrapper.store_image(image=self.result, text=\"filtered_contours\")\r\n wrapper.store_image(image=self.demo_image, text=\"tagged_contours\")\r\n\r\n res = True\r\n else:\r\n wrapper.store_image(wrapper.current_image, \"current_image\")\r\n res = True\r\n except Exception as e:\r\n res = False\r\n logger.exception(f\"Filter contour by size FAILED, exception: {repr(e)}\")\r\n else:\r\n pass\r\n finally:\r\n return res\r\n\r\n @property\r\n def name(self):\r\n return \"Filter contour by size\"\r\n\r\n @property\r\n def package(self):\r\n return \"TPMP\"\r\n\r\n @property\r\n def real_time(self):\r\n return False\r\n\r\n @property\r\n def result_name(self):\r\n return \"mask\"\r\n\r\n @property\r\n def output_kind(self):\r\n return \"mask\"\r\n\r\n @property\r\n def use_case(self):\r\n return [ipc.ToolFamily.MASK_CLEANUP]\r\n\r\n @property\r\n def description(self):\r\n return \"\"\"'Keep or descard contours according to their size\"\"\"\r\n"
] |
[
[
"numpy.zeros_like",
"numpy.dstack"
]
] |
ekhoda/optimization-tutorial
|
[
"8847625aa49813823b47165c5f457294729459b6"
] |
[
"process_data.py"
] |
[
"import pandas as pd\n\nfrom helper import load_raw_data\n\n\ndef load_data():\n return get_modified_data(load_raw_data())\n\n\ndef get_modified_data(input_df_dict):\n # Our \"parameters\" table is very simple here. So, we can create a new dictionary\n # for our parameters as follows or just modify our df a little in place.\n # There shouldn't be any performance gain here to concern us, so I went with\n # the dictionary. In the comment below, I also show the latter for illustration\n\n # input_df_dict['parameters'].set_index('attribute', inplace=True)\n\n input_param_dict = input_df_dict['parameters'].set_index('attribute')['value'].to_dict()\n return input_df_dict, input_param_dict\n\n\n# To not overkill, I only created one module here for processing the data, either input or output\ndef _create_outputs_df(opt_series, cols, name, output_df_dict):\n df = pd.DataFrame(data=opt_series, index=opt_series.index.values).reset_index()\n df.columns = cols\n output_df_dict[name] = df\n\n\ndef write_outputs(dict_of_variables, attr='varValue'):\n \"\"\"\n The outputs we want are very simple and can be achieved almost identically\n in either modules. The only difference is in the attribute name of their\n decision variable value.\n In gurobi you get it by 'your_dv.x',\n in pulp by 'your_dv.varValue',\n in cplex by 'your_dv.solution_value'.\n \"\"\"\n output_df_dict = {}\n cols = ['period', 'value']\n for name, var in dict_of_variables.items():\n opt_series = pd.Series({k + 1: getattr(v, attr) for k, v in var.items()})\n _create_outputs_df(opt_series, cols, name, output_df_dict)\n return output_df_dict\n\n\ndef write_outputs_xpress(dict_of_variables, model):\n output_df_dict = {}\n cols = ['period', 'value']\n for name, var in dict_of_variables.items():\n opt_series = pd.Series({k + 1: model.getSolution(v) for k, v in var.items()})\n _create_outputs_df(opt_series, cols, name, output_df_dict)\n return output_df_dict\n"
] |
[
[
"pandas.DataFrame"
]
] |
ericgarza70/machine-learning-book
|
[
"40520104c3d76d75ce4aa785e59e8034f74bcc8e"
] |
[
"ch16/ch16-part1-self-attention.py"
] |
[
"# coding: utf-8\n\n\nimport sys\nfrom python_environment_check import check_packages\nimport torch\nimport torch.nn.functional as F\n\n# # Machine Learning with PyTorch and Scikit-Learn \n# # -- Code Examples\n\n# ## Package version checks\n\n# Add folder to path in order to load from the check_packages.py script:\n\n\n\nsys.path.insert(0, '..')\n\n\n# Check recommended package versions:\n\n\n\n\n\nd = {\n 'torch': '1.9.0',\n}\ncheck_packages(d)\n\n\n# # Chapter 16: Transformers – Improving Natural Language Processing with Attention Mechanisms (Part 1/3)\n\n# **Outline**\n# \n# - [Adding an attention mechanism to RNNs](#Adding-an-attention-mechanism-to-RNNs)\n# - [Attention helps RNNs with accessing information](#Attention-helps-RNNs-with-accessing-information)\n# - [The original attention mechanism for RNNs](#The-original-attention-mechanism-for-RNNs)\n# - [Processing the inputs using a bidirectional RNN](#Processing-the-inputs-using-a-bidirectional-RNN)\n# - [Generating outputs from context vectors](#Generating-outputs-from-context-vectors)\n# - [Computing the attention weights](#Computing-the-attention-weights)\n# - [Introducing the self-attention mechanism](#Introducing-the-self-attention-mechanism)\n# - [Starting with a basic form of self-attention](#Starting-with-a-basic-form-of-self-attention)\n# - [Parameterizing the self-attention mechanism: scaled dot-product attention](#Parameterizing-the-self-attention-mechanism-scaled-dot-product-attention)\n# - [Attention is all we need: introducing the original transformer architecture](#Attention-is-all-we-need-introducing-the-original-transformer-architecture)\n# - [Encoding context embeddings via multi-head attention](#Encoding-context-embeddings-via-multi-head-attention)\n# - [Learning a language model: decoder and masked multi-head attention](#Learning-a-language-model-decoder-and-masked-multi-head-attention)\n# - [Implementation details: positional encodings and layer normalization](#Implementation-details-positional-encodings-and-layer-normalization)\n\n\n\n\n\n# ## Adding an attention mechanism to RNNs\n\n# ### Attention helps RNNs with accessing information\n\n\n\n\n\n\n\n\n\n# ### The original attention mechanism for RNNs\n\n\n\n\n\n# ### Processing the inputs using a bidirectional RNN\n# ### Generating outputs from context vectors\n# ### Computing the attention weights\n\n# ## Introducing the self-attention mechanism\n\n# ### Starting with a basic form of self-attention\n\n# - Assume we have an input sentence that we encoded via a dictionary, which maps the words to integers as discussed in the RNN chapter:\n\n\n\n\n\n# input sequence / sentence:\n# \"Can you help me to translate this sentence\"\n\nsentence = torch.tensor(\n [0, # can\n 7, # you \n 1, # help\n 2, # me\n 5, # to\n 6, # translate\n 4, # this\n 3] # sentence\n)\n\nsentence\n\n\n# - Next, assume we have an embedding of the words, i.e., the words are represented as real vectors.\n# - Since we have 8 words, there will be 8 vectors. Each vector is 16-dimensional:\n\n\n\ntorch.manual_seed(123)\nembed = torch.nn.Embedding(10, 16)\nembedded_sentence = embed(sentence).detach()\nembedded_sentence.shape\n\n\n# - The goal is to compute the context vectors $\\boldsymbol{z}^{(i)}=\\sum_{j=1}^{T} \\alpha_{i j} \\boldsymbol{x}^{(j)}$, which involve attention weights $\\alpha_{i j}$.\n# - In turn, the attention weights $\\alpha_{i j}$ involve the $\\omega_{i j}$ values\n# - Let's start with the $\\omega_{i j}$'s first, which are computed as dot-products:\n# \n# $$\\omega_{i j}=\\boldsymbol{x}^{(i)^{\\top}} \\boldsymbol{x}^{(j)}$$\n# \n# \n\n\n\nomega = torch.empty(8, 8)\n\nfor i, x_i in enumerate(embedded_sentence):\n for j, x_j in enumerate(embedded_sentence):\n omega[i, j] = torch.dot(x_i, x_j)\n\n\n# - Actually, let's compute this more efficiently by replacing the nested for-loops with a matrix multiplication:\n\n\n\nomega_mat = embedded_sentence.matmul(embedded_sentence.T)\n\n\n\n\ntorch.allclose(omega_mat, omega)\n\n\n# - Next, let's compute the attention weights by normalizing the \"omega\" values so they sum to 1\n# \n# $$\\alpha_{i j}=\\frac{\\exp \\left(\\omega_{i j}\\right)}{\\sum_{j=1}^{T} \\exp \\left(\\omega_{i j}\\right)}=\\operatorname{softmax}\\left(\\left[\\omega_{i j}\\right]_{j=1 \\ldots T}\\right)$$\n# \n# $$\\sum_{j=1}^{T} \\alpha_{i j}=1$$\n\n\n\n\nattention_weights = F.softmax(omega, dim=1)\nattention_weights.shape\n\n\n# - We can conform that the columns sum up to one:\n\n\n\nattention_weights.sum(dim=1)\n\n\n\n\n\n\n# - Now that we have the attention weights, we can compute the context vectors $\\boldsymbol{z}^{(i)}=\\sum_{j=1}^{T} \\alpha_{i j} \\boldsymbol{x}^{(j)}$, which involve attention weights $\\alpha_{i j}$\n# - For instance, to compute the context-vector of the 2nd input element (the element at index 1), we can perform the following computation:\n\n\n\nx_2 = embedded_sentence[1, :]\ncontext_vec_2 = torch.zeros(x_2.shape)\nfor j in range(8):\n x_j = embedded_sentence[j, :]\n context_vec_2 += attention_weights[1, j] * x_j\n \ncontext_vec_2\n\n\n# - Or, more effiently, using linear algebra and matrix multiplication:\n\n\n\ncontext_vectors = torch.matmul(\n attention_weights, embedded_sentence)\n\n\ntorch.allclose(context_vec_2, context_vectors[1])\n\n\n# ### Parameterizing the self-attention mechanism: scaled dot-product attention\n\n\n\n\n\n\n\ntorch.manual_seed(123)\n\nd = embedded_sentence.shape[1]\nU_query = torch.rand(d, d)\nU_key = torch.rand(d, d)\nU_value = torch.rand(d, d)\n\n\n\n\nx_2 = embedded_sentence[1]\nquery_2 = U_query.matmul(x_2)\n\n\n\n\nkey_2 = U_key.matmul(x_2)\nvalue_2 = U_value.matmul(x_2)\n\n\n\n\nkeys = U_key.matmul(embedded_sentence.T).T\ntorch.allclose(key_2, keys[1])\n\n\n\n\nvalues = U_value.matmul(embedded_sentence.T).T\ntorch.allclose(value_2, values[1])\n\n\n\n\nomega_23 = query_2.dot(keys[2])\nomega_23\n\n\n\n\nomega_2 = query_2.matmul(keys.T)\nomega_2\n\n\n\n\nattention_weights_2 = F.softmax(omega_2 / d**0.5, dim=0)\nattention_weights_2\n\n\n\n\n#context_vector_2nd = torch.zeros(values[1, :].shape)\n#for j in range(8):\n# context_vector_2nd += attention_weights_2[j] * values[j, :]\n \n#context_vector_2nd\n\n\n\n\ncontext_vector_2 = attention_weights_2.matmul(values)\ncontext_vector_2\n\n\n# ## Attention is all we need: introducing the original transformer architecture\n\n\n\n\n\n# ### Encoding context embeddings via multi-head attention \n\n\n\ntorch.manual_seed(123)\n\nd = embedded_sentence.shape[1]\none_U_query = torch.rand(d, d)\n\n\n\n\nh = 8\nmultihead_U_query = torch.rand(h, d, d)\nmultihead_U_key = torch.rand(h, d, d)\nmultihead_U_value = torch.rand(h, d, d)\n\n\n\n\nmultihead_query_2 = multihead_U_query.matmul(x_2)\nmultihead_query_2.shape\n\n\n\n\nmultihead_key_2 = multihead_U_key.matmul(x_2)\nmultihead_value_2 = multihead_U_value.matmul(x_2)\n\n\n\n\nmultihead_key_2[2]\n\n\n\n\nstacked_inputs = embedded_sentence.T.repeat(8, 1, 1)\nstacked_inputs.shape\n\n\n\n\nmultihead_keys = torch.bmm(multihead_U_key, stacked_inputs)\nmultihead_keys.shape\n\n\n\n\nmultihead_keys = multihead_keys.permute(0, 2, 1)\nmultihead_keys.shape\n\n\n\n\nmultihead_keys[2, 1] # index: [2nd attention head, 2nd key]\n\n\n\n\nmultihead_values = torch.matmul(multihead_U_value, stacked_inputs)\nmultihead_values = multihead_values.permute(0, 2, 1)\n\n\n\n\nmultihead_z_2 = torch.rand(8, 16)\n\n\n\n\n\n\n\n\nlinear = torch.nn.Linear(8*16, 16)\ncontext_vector_2 = linear(multihead_z_2.flatten())\ncontext_vector_2.shape\n\n\n# ### Learning a language model: decoder and masked multi-head attention\n\n\n\n\n\n# ### Implementation details: positional encodings and layer normalization\n\n\n\n\n\n# ---\n# \n# Readers may ignore the next cell.\n\n\n\n\n\n\n\n\n\n"
] |
[
[
"torch.nn.functional.softmax",
"torch.empty",
"torch.zeros",
"torch.manual_seed",
"torch.nn.Embedding",
"torch.tensor",
"torch.matmul",
"torch.nn.Linear",
"torch.rand",
"torch.bmm",
"torch.allclose",
"torch.dot"
]
] |
LucasPagano/sga-
|
[
"5b4b88ebf826c2be022f34eb66d5a712b911724a"
] |
[
"scripts/train.py"
] |
[
"import argparse\nimport gc\nimport logging\nimport os\nimport sys\nimport time\n\nfrom collections import defaultdict\n\nimport torch\nimport torch.nn as nn\nimport torch.optim as optim\n\nfrom sgan.data.loader import data_loader\nfrom sgan.losses import gan_g_loss, gan_d_loss, l2_loss\nfrom sgan.losses import displacement_error, final_displacement_error\n\nfrom sgan.models import TrajectoryGenerator, TrajectoryDiscriminator\nfrom sgan.utils import int_tuple, bool_flag, get_total_norm\nfrom sgan.utils import relative_to_abs, get_dset_path\n\ntorch.backends.cudnn.benchmark = True\n\nparser = argparse.ArgumentParser()\nFORMAT = '[%(levelname)s: %(filename)s: %(lineno)4d]: %(message)s'\nlogging.basicConfig(level=logging.INFO, format=FORMAT, stream=sys.stdout)\nlogger = logging.getLogger(__name__)\n\n# Dataset options\nparser.add_argument('--dataset_name', default='trajectory_forecasting_benchmark', type=str)\nparser.add_argument('--delim', default=' ')\nparser.add_argument('--loader_num_workers', default=4, type=int)\nparser.add_argument('--obs_len', default=8, type=int)\nparser.add_argument('--pred_len', default=8, type=int)\nparser.add_argument('--skip', default=1, type=int)\n\n# Optimization\nparser.add_argument('--batch_size', default=32, type=int)\nparser.add_argument('--num_iterations', default=10000, type=int)\nparser.add_argument('--num_epochs', default=200, type=int)\n\n# Model Options\nparser.add_argument('--embedding_dim', default=16, type=int)\nparser.add_argument('--num_layers', default=1, type=int)\nparser.add_argument('--dropout', default=0, type=float)\nparser.add_argument('--batch_norm', default=0, type=bool_flag)\nparser.add_argument('--mlp_dim', default=64, type=int)\n\n# Generator Options\nparser.add_argument('--encoder_h_dim_g', default=32, type=int)\nparser.add_argument('--decoder_h_dim_g', default=64, type=int)\nparser.add_argument('--noise_dim', default=8, type=int_tuple)\nparser.add_argument('--noise_type', default='gaussian')\nparser.add_argument('--noise_mix_type', default='gloval')\nparser.add_argument('--clipping_threshold_g', default=1.5, type=float)\nparser.add_argument('--g_learning_rate', default=1e-3, type=float)\nparser.add_argument('--g_steps', default=1, type=int)\n\n# Pooling Options\nparser.add_argument('--pooling_type', default='pool_net')\nparser.add_argument('--pool_every_timestep', default=0, type=bool_flag)\n\n# Pool Net Option\nparser.add_argument('--bottleneck_dim', default=32, type=int)\n\n# Social Pooling Options\nparser.add_argument('--neighborhood_size', default=2.0, type=float)\nparser.add_argument('--grid_size', default=8, type=int)\n\n# Discriminator Options\nparser.add_argument('--d_type', default='local', type=str)\nparser.add_argument('--encoder_h_dim_d', default=64, type=int)\nparser.add_argument('--d_learning_rate', default=1e-3, type=float)\nparser.add_argument('--d_steps', default=2, type=int)\nparser.add_argument('--clipping_threshold_d', default=0, type=float)\n\n# Loss Options\nparser.add_argument('--l2_loss_weight', default=1, type=float)\nparser.add_argument('--best_k', default=10, type=int)\n\n# Output\nparser.add_argument('--output_dir', default=os.getcwd())\nparser.add_argument('--print_every', default=50, type=int)\nparser.add_argument('--checkpoint_every', default=100, type=int)\nparser.add_argument('--checkpoint_name', default='checkpoint')\nparser.add_argument('--checkpoint_start_from', default=None)\nparser.add_argument('--restore_from_checkpoint', default=0, type=int)\nparser.add_argument('--num_samples_check', default=5000, type=int)\n\n# Misc\nparser.add_argument('--use_gpu', default=1, type=int)\nparser.add_argument('--timing', default=0, type=int)\nparser.add_argument('--gpu_num', default=\"0\", type=str)\n\n\ndef init_weights(m):\n classname = m.__class__.__name__\n if classname.find('Linear') != -1:\n nn.init.kaiming_normal_(m.weight)\n\n\ndef get_dtypes(args):\n long_dtype = torch.LongTensor\n float_dtype = torch.FloatTensor\n if args.use_gpu == 1:\n long_dtype = torch.cuda.LongTensor\n float_dtype = torch.cuda.FloatTensor\n return long_dtype, float_dtype\n\n\ndef main(args):\n os.environ[\"CUDA_VISIBLE_DEVICES\"] = args.gpu_num\n train_path = get_dset_path(args.dataset_name, 'train')\n val_path = get_dset_path(args.dataset_name, 'val')\n\n long_dtype, float_dtype = get_dtypes(args)\n\n logger.info(\"Initializing train dataset\")\n train_dset, train_loader = data_loader(args, train_path)\n logger.info(\"Initializing val dataset\")\n _, val_loader = data_loader(args, val_path)\n\n iterations_per_epoch = len(train_dset) / args.batch_size / args.d_steps\n if args.num_epochs:\n args.num_iterations = int(iterations_per_epoch * args.num_epochs)\n\n logger.info(\n 'There are {} iterations per epoch'.format(iterations_per_epoch)\n )\n generator = TrajectoryGenerator(\n obs_len=args.obs_len,\n pred_len=args.pred_len,\n embedding_dim=args.embedding_dim,\n encoder_h_dim=args.encoder_h_dim_g,\n decoder_h_dim=args.decoder_h_dim_g,\n mlp_dim=args.mlp_dim,\n num_layers=args.num_layers,\n noise_dim=args.noise_dim,\n noise_type=args.noise_type,\n noise_mix_type=args.noise_mix_type,\n pooling_type=args.pooling_type,\n pool_every_timestep=args.pool_every_timestep,\n dropout=args.dropout,\n bottleneck_dim=args.bottleneck_dim,\n neighborhood_size=args.neighborhood_size,\n grid_size=args.grid_size,\n batch_norm=args.batch_norm)\n\n generator.apply(init_weights)\n generator.type(float_dtype).train()\n logger.info('Here is the generator:')\n logger.info(generator)\n\n discriminator = TrajectoryDiscriminator(\n obs_len=args.obs_len,\n pred_len=args.pred_len,\n embedding_dim=args.embedding_dim,\n h_dim=args.encoder_h_dim_d,\n mlp_dim=args.mlp_dim,\n num_layers=args.num_layers,\n dropout=args.dropout,\n batch_norm=args.batch_norm,\n d_type=args.d_type)\n\n discriminator.apply(init_weights)\n discriminator.type(float_dtype).train()\n logger.info('Here is the discriminator:')\n logger.info(discriminator)\n\n g_loss_fn = gan_g_loss\n d_loss_fn = gan_d_loss\n\n optimizer_g = optim.Adam(generator.parameters(), lr=args.g_learning_rate)\n optimizer_d = optim.Adam(\n discriminator.parameters(), lr=args.d_learning_rate\n )\n\n # Maybe restore from checkpoint\n restore_path = None\n if args.checkpoint_start_from is not None:\n restore_path = args.checkpoint_start_from\n elif args.restore_from_checkpoint == 1:\n restore_path = os.path.join(args.output_dir,\n '%s_with_model.pt' % args.checkpoint_name)\n\n if restore_path is not None and os.path.isfile(restore_path):\n logger.info('Restoring from checkpoint {}'.format(restore_path))\n checkpoint = torch.load(restore_path)\n generator.load_state_dict(checkpoint['g_state'])\n discriminator.load_state_dict(checkpoint['d_state'])\n optimizer_g.load_state_dict(checkpoint['g_optim_state'])\n optimizer_d.load_state_dict(checkpoint['d_optim_state'])\n t = checkpoint['counters']['t']\n epoch = checkpoint['counters']['epoch']\n checkpoint['restore_ts'].append(t)\n else:\n # Starting from scratch, so initialize checkpoint data structure\n t, epoch = 0, 0\n checkpoint = {\n 'args': args.__dict__,\n 'G_losses': defaultdict(list),\n 'D_losses': defaultdict(list),\n 'losses_ts': [],\n 'metrics_val': defaultdict(list),\n 'metrics_train': defaultdict(list),\n 'sample_ts': [],\n 'restore_ts': [],\n 'norm_g': [],\n 'norm_d': [],\n 'counters': {\n 't': None,\n 'epoch': None,\n },\n 'g_state': None,\n 'g_optim_state': None,\n 'd_state': None,\n 'd_optim_state': None,\n 'g_best_state': None,\n 'd_best_state': None,\n 'best_t': None,\n 'g_best_nl_state': None,\n 'd_best_state_nl': None,\n 'best_t_nl': None,\n }\n t0 = None\n while t < args.num_iterations:\n gc.collect()\n d_steps_left = args.d_steps\n g_steps_left = args.g_steps\n epoch += 1\n logger.info('Starting epoch {}'.format(epoch))\n for batch in train_loader:\n if args.timing == 1:\n torch.cuda.synchronize()\n t1 = time.time()\n\n # Decide whether to use the batch for stepping on discriminator or\n # generator; an iteration consists of args.d_steps steps on the\n # discriminator followed by args.g_steps steps on the generator.\n if d_steps_left > 0:\n step_type = 'd'\n losses_d = discriminator_step(args, batch, generator,\n discriminator, d_loss_fn,\n optimizer_d)\n checkpoint['norm_d'].append(\n get_total_norm(discriminator.parameters()))\n d_steps_left -= 1\n elif g_steps_left > 0:\n step_type = 'g'\n losses_g = generator_step(args, batch, generator,\n discriminator, g_loss_fn,\n optimizer_g)\n checkpoint['norm_g'].append(\n get_total_norm(generator.parameters())\n )\n g_steps_left -= 1\n\n if args.timing == 1:\n torch.cuda.synchronize()\n t2 = time.time()\n logger.info('{} step took {}'.format(step_type, t2 - t1))\n\n # Skip the rest if we are not at the end of an iteration\n if d_steps_left > 0 or g_steps_left > 0:\n continue\n\n if args.timing == 1:\n if t0 is not None:\n logger.info('Interation {} took {}'.format(\n t - 1, time.time() - t0\n ))\n t0 = time.time()\n\n # Maybe save loss\n if t % args.print_every == 0:\n logger.info('t = {} / {}'.format(t + 1, args.num_iterations))\n for k, v in sorted(losses_d.items()):\n logger.info(' [D] {}: {:.3f}'.format(k, v))\n checkpoint['D_losses'][k].append(v)\n for k, v in sorted(losses_g.items()):\n logger.info(' [G] {}: {:.3f}'.format(k, v))\n checkpoint['G_losses'][k].append(v)\n checkpoint['losses_ts'].append(t)\n\n # Maybe save a checkpoint\n if t > 0 and t % args.checkpoint_every == 0:\n checkpoint['counters']['t'] = t\n checkpoint['counters']['epoch'] = epoch\n checkpoint['sample_ts'].append(t)\n\n # Check stats on the validation set\n logger.info('Checking stats on val ...')\n metrics_val = check_accuracy(\n args, val_loader, generator, discriminator, d_loss_fn\n )\n logger.info('Checking stats on train ...')\n metrics_train = check_accuracy(\n args, train_loader, generator, discriminator,\n d_loss_fn, limit=True\n )\n\n for k, v in sorted(metrics_val.items()):\n logger.info(' [val] {}: {:.3f}'.format(k, v))\n checkpoint['metrics_val'][k].append(v)\n for k, v in sorted(metrics_train.items()):\n logger.info(' [train] {}: {:.3f}'.format(k, v))\n checkpoint['metrics_train'][k].append(v)\n\n min_ade = min(checkpoint['metrics_val']['ade'])\n min_ade_nl = min(checkpoint['metrics_val']['ade_nl'])\n\n if metrics_val['ade'] == min_ade:\n logger.info('New low for avg_disp_error')\n checkpoint['best_t'] = t\n checkpoint['g_best_state'] = generator.state_dict()\n checkpoint['d_best_state'] = discriminator.state_dict()\n\n if metrics_val['ade_nl'] == min_ade_nl:\n logger.info('New low for avg_disp_error_nl')\n checkpoint['best_t_nl'] = t\n checkpoint['g_best_nl_state'] = generator.state_dict()\n checkpoint['d_best_nl_state'] = discriminator.state_dict()\n\n # Save another checkpoint with model weights and\n # optimizer state\n checkpoint['g_state'] = generator.state_dict()\n checkpoint['g_optim_state'] = optimizer_g.state_dict()\n checkpoint['d_state'] = discriminator.state_dict()\n checkpoint['d_optim_state'] = optimizer_d.state_dict()\n checkpoint_path = os.path.join(\n args.output_dir, '%s_with_model.pt' % args.checkpoint_name\n )\n logger.info('Saving checkpoint to {}'.format(checkpoint_path))\n torch.save(checkpoint, checkpoint_path)\n logger.info('Done.')\n\n # Save a checkpoint with no model weights by making a shallow\n # copy of the checkpoint excluding some items\n checkpoint_path = os.path.join(\n args.output_dir, '%s_no_model.pt' % args.checkpoint_name)\n logger.info('Saving checkpoint to {}'.format(checkpoint_path))\n key_blacklist = [\n 'g_state', 'd_state', 'g_best_state', 'g_best_nl_state',\n 'g_optim_state', 'd_optim_state', 'd_best_state',\n 'd_best_nl_state'\n ]\n small_checkpoint = {}\n for k, v in checkpoint.items():\n if k not in key_blacklist:\n small_checkpoint[k] = v\n torch.save(small_checkpoint, checkpoint_path)\n logger.info('Done.')\n\n t += 1\n d_steps_left = args.d_steps\n g_steps_left = args.g_steps\n if t >= args.num_iterations:\n break\n\n\ndef discriminator_step(\n args, batch, generator, discriminator, d_loss_fn, optimizer_d\n):\n batch = [tensor.cuda() for tensor in batch]\n (obs_traj, pred_traj_gt, obs_traj_rel, pred_traj_gt_rel, non_linear_ped,\n loss_mask, seq_start_end) = batch\n losses = {}\n loss = torch.zeros(1).to(pred_traj_gt)\n\n generator_out = generator(obs_traj, obs_traj_rel, seq_start_end)\n\n pred_traj_fake_rel = generator_out\n pred_traj_fake = relative_to_abs(pred_traj_fake_rel, obs_traj[-1])\n\n traj_real = torch.cat([obs_traj, pred_traj_gt], dim=0)\n traj_real_rel = torch.cat([obs_traj_rel, pred_traj_gt_rel], dim=0)\n traj_fake = torch.cat([obs_traj, pred_traj_fake], dim=0)\n traj_fake_rel = torch.cat([obs_traj_rel, pred_traj_fake_rel], dim=0)\n\n scores_fake = discriminator(traj_fake, traj_fake_rel, seq_start_end)\n scores_real = discriminator(traj_real, traj_real_rel, seq_start_end)\n\n # Compute loss with optional gradient penalty\n data_loss = d_loss_fn(scores_real, scores_fake)\n losses['D_data_loss'] = data_loss.item()\n loss += data_loss\n losses['D_total_loss'] = loss.item()\n\n optimizer_d.zero_grad()\n loss.backward()\n if args.clipping_threshold_d > 0:\n nn.utils.clip_grad_norm_(discriminator.parameters(),\n args.clipping_threshold_d)\n optimizer_d.step()\n\n return losses\n\n\ndef generator_step(\n args, batch, generator, discriminator, g_loss_fn, optimizer_g\n):\n batch = [tensor.cuda() for tensor in batch]\n (obs_traj, pred_traj_gt, obs_traj_rel, pred_traj_gt_rel, non_linear_ped,\n loss_mask, seq_start_end) = batch\n losses = {}\n loss = torch.zeros(1).to(pred_traj_gt)\n g_l2_loss_rel = []\n\n loss_mask = loss_mask[:, args.obs_len:]\n\n for _ in range(args.best_k):\n generator_out = generator(obs_traj, obs_traj_rel, seq_start_end)\n\n pred_traj_fake_rel = generator_out\n pred_traj_fake = relative_to_abs(pred_traj_fake_rel, obs_traj[-1])\n\n if args.l2_loss_weight > 0:\n g_l2_loss_rel.append(args.l2_loss_weight * l2_loss(\n pred_traj_fake_rel,\n pred_traj_gt_rel,\n loss_mask,\n mode='raw'))\n\n g_l2_loss_sum_rel = torch.zeros(1).to(pred_traj_gt)\n if args.l2_loss_weight > 0:\n g_l2_loss_rel = torch.stack(g_l2_loss_rel, dim=1)\n for start, end in seq_start_end.data:\n _g_l2_loss_rel = g_l2_loss_rel[start:end]\n _g_l2_loss_rel = torch.sum(_g_l2_loss_rel, dim=0)\n _g_l2_loss_rel = torch.min(_g_l2_loss_rel) / torch.sum(\n loss_mask[start:end])\n g_l2_loss_sum_rel += _g_l2_loss_rel\n losses['G_l2_loss_rel'] = g_l2_loss_sum_rel.item()\n loss += g_l2_loss_sum_rel\n\n traj_fake = torch.cat([obs_traj, pred_traj_fake], dim=0)\n traj_fake_rel = torch.cat([obs_traj_rel, pred_traj_fake_rel], dim=0)\n\n scores_fake = discriminator(traj_fake, traj_fake_rel, seq_start_end)\n discriminator_loss = g_loss_fn(scores_fake)\n\n loss += discriminator_loss\n losses['G_discriminator_loss'] = discriminator_loss.item()\n losses['G_total_loss'] = loss.item()\n\n optimizer_g.zero_grad()\n loss.backward()\n if args.clipping_threshold_g > 0:\n nn.utils.clip_grad_norm_(\n generator.parameters(), args.clipping_threshold_g\n )\n optimizer_g.step()\n\n return losses\n\n\ndef check_accuracy(\n args, loader, generator, discriminator, d_loss_fn, limit=False\n):\n d_losses = []\n metrics = {}\n g_l2_losses_abs, g_l2_losses_rel = ([],) * 2\n disp_error, disp_error_l, disp_error_nl = ([],) * 3\n f_disp_error, f_disp_error_l, f_disp_error_nl = ([],) * 3\n total_traj, total_traj_l, total_traj_nl = 0, 0, 0\n loss_mask_sum = 0\n generator.eval()\n with torch.no_grad():\n for batch in loader:\n batch = [tensor.cuda() for tensor in batch]\n (obs_traj, pred_traj_gt, obs_traj_rel, pred_traj_gt_rel,\n non_linear_ped, loss_mask, seq_start_end) = batch\n linear_ped = 1 - non_linear_ped\n loss_mask = loss_mask[:, args.obs_len:]\n\n pred_traj_fake_rel = generator(\n obs_traj, obs_traj_rel, seq_start_end\n )\n pred_traj_fake = relative_to_abs(pred_traj_fake_rel, obs_traj[-1])\n\n g_l2_loss_abs, g_l2_loss_rel = cal_l2_losses(\n pred_traj_gt, pred_traj_gt_rel, pred_traj_fake,\n pred_traj_fake_rel, loss_mask\n )\n ade, ade_l, ade_nl = cal_ade(\n pred_traj_gt, pred_traj_fake, linear_ped, non_linear_ped\n )\n\n fde, fde_l, fde_nl = cal_fde(\n pred_traj_gt, pred_traj_fake, linear_ped, non_linear_ped\n )\n\n traj_real = torch.cat([obs_traj, pred_traj_gt], dim=0)\n traj_real_rel = torch.cat([obs_traj_rel, pred_traj_gt_rel], dim=0)\n traj_fake = torch.cat([obs_traj, pred_traj_fake], dim=0)\n traj_fake_rel = torch.cat([obs_traj_rel, pred_traj_fake_rel], dim=0)\n\n scores_fake = discriminator(traj_fake, traj_fake_rel, seq_start_end)\n scores_real = discriminator(traj_real, traj_real_rel, seq_start_end)\n\n d_loss = d_loss_fn(scores_real, scores_fake)\n d_losses.append(d_loss.item())\n\n g_l2_losses_abs.append(g_l2_loss_abs.item())\n g_l2_losses_rel.append(g_l2_loss_rel.item())\n disp_error.append(ade.item())\n disp_error_l.append(ade_l.item())\n disp_error_nl.append(ade_nl.item())\n f_disp_error.append(fde.item())\n f_disp_error_l.append(fde_l.item())\n f_disp_error_nl.append(fde_nl.item())\n\n loss_mask_sum += torch.numel(loss_mask.data)\n total_traj += pred_traj_gt.size(1)\n total_traj_l += torch.sum(linear_ped).item()\n total_traj_nl += torch.sum(non_linear_ped).item()\n if limit and total_traj >= args.num_samples_check:\n break\n\n metrics['d_loss'] = sum(d_losses) / len(d_losses)\n metrics['g_l2_loss_abs'] = sum(g_l2_losses_abs) / loss_mask_sum\n metrics['g_l2_loss_rel'] = sum(g_l2_losses_rel) / loss_mask_sum\n\n metrics['ade'] = sum(disp_error) / (total_traj * args.pred_len)\n metrics['fde'] = sum(f_disp_error) / total_traj\n if total_traj_l != 0:\n metrics['ade_l'] = sum(disp_error_l) / (total_traj_l * args.pred_len)\n metrics['fde_l'] = sum(f_disp_error_l) / total_traj_l\n else:\n metrics['ade_l'] = 0\n metrics['fde_l'] = 0\n if total_traj_nl != 0:\n metrics['ade_nl'] = sum(disp_error_nl) / (\n total_traj_nl * args.pred_len)\n metrics['fde_nl'] = sum(f_disp_error_nl) / total_traj_nl\n else:\n metrics['ade_nl'] = 0\n metrics['fde_nl'] = 0\n\n generator.train()\n return metrics\n\n\ndef cal_l2_losses(\n pred_traj_gt, pred_traj_gt_rel, pred_traj_fake, pred_traj_fake_rel,\n loss_mask\n):\n g_l2_loss_abs = l2_loss(\n pred_traj_fake, pred_traj_gt, loss_mask, mode='sum'\n )\n g_l2_loss_rel = l2_loss(\n pred_traj_fake_rel, pred_traj_gt_rel, loss_mask, mode='sum'\n )\n return g_l2_loss_abs, g_l2_loss_rel\n\n\ndef cal_ade(pred_traj_gt, pred_traj_fake, linear_ped, non_linear_ped):\n ade = displacement_error(pred_traj_fake, pred_traj_gt)\n ade_l = displacement_error(pred_traj_fake, pred_traj_gt, linear_ped)\n ade_nl = displacement_error(pred_traj_fake, pred_traj_gt, non_linear_ped)\n return ade, ade_l, ade_nl\n\n\ndef cal_fde(\n pred_traj_gt, pred_traj_fake, linear_ped, non_linear_ped\n):\n fde = final_displacement_error(pred_traj_fake[-1], pred_traj_gt[-1])\n fde_l = final_displacement_error(\n pred_traj_fake[-1], pred_traj_gt[-1], linear_ped\n )\n fde_nl = final_displacement_error(\n pred_traj_fake[-1], pred_traj_gt[-1], non_linear_ped\n )\n return fde, fde_l, fde_nl\n\n\nif __name__ == '__main__':\n args = parser.parse_args()\n main(args)\n"
] |
[
[
"torch.cuda.synchronize",
"torch.load",
"torch.cat",
"torch.zeros",
"torch.min",
"torch.sum",
"torch.numel",
"torch.no_grad",
"torch.save",
"torch.stack",
"torch.nn.init.kaiming_normal_"
]
] |
BryanYehuda/CompressionMethodComparison
|
[
"79db365b46242e49116f92bb871545c0fce26635"
] |
[
"CompressionCheck.py"
] |
[
"from math import log10, sqrt\nimport cv2\nimport numpy as np\n \ndef PSNR(original, compressed):\n mse = np.mean((original - compressed) ** 2)\n if(mse == 0):\n return 100\n max_pixel = 255.0\n psnr = 20 * log10(max_pixel / sqrt(mse))\n return psnr\n\ndef SNR(original, compressed):\n mse = np.mean((original - compressed) ** 2)\n if(mse == 0):\n return 100\n snr = 20 * log10(np.mean(original) / sqrt(mse))\n return snr\n \ndef main():\n original = cv2.imread(\"raw.png\")\n compressed = cv2.imread(\"lossy.png\", 1)\n mse = np.mean((original - compressed) ** 2)\n snr = SNR(original, compressed)\n psnr = PSNR(original, compressed)\n print(f\"MSE value is {mse}\")\n print(f\"SNR value is {snr} dB\")\n print(f\"PSNR value is {psnr} dB\")\n \nif __name__ == \"__main__\":\n main()"
] |
[
[
"numpy.mean"
]
] |
A03ki/f-AnoGAN
|
[
"fecd9672f8f216e2d9ee618b2a03ed6b6d2fa3ba"
] |
[
"fanogan/test_anomaly_detection.py"
] |
[
"import torch\nimport torch.nn as nn\nfrom torch.utils.model_zoo import tqdm\n\n\ndef test_anomaly_detection(opt, generator, discriminator, encoder,\n dataloader, device, kappa=1.0):\n generator.load_state_dict(torch.load(\"results/generator\"))\n discriminator.load_state_dict(torch.load(\"results/discriminator\"))\n encoder.load_state_dict(torch.load(\"results/encoder\"))\n\n generator.to(device).eval()\n discriminator.to(device).eval()\n encoder.to(device).eval()\n\n criterion = nn.MSELoss()\n\n with open(\"results/score.csv\", \"w\") as f:\n f.write(\"label,img_distance,anomaly_score,z_distance\\n\")\n\n for (img, label) in tqdm(dataloader):\n\n real_img = img.to(device)\n\n real_z = encoder(real_img)\n fake_img = generator(real_z)\n fake_z = encoder(fake_img)\n\n real_feature = discriminator.forward_features(real_img)\n fake_feature = discriminator.forward_features(fake_img)\n\n # Scores for anomaly detection\n img_distance = criterion(fake_img, real_img)\n loss_feature = criterion(fake_feature, real_feature)\n anomaly_score = img_distance + kappa * loss_feature\n\n z_distance = criterion(fake_z, real_z)\n\n with open(\"results/score.csv\", \"a\") as f:\n f.write(f\"{label.item()},{img_distance},\"\n f\"{anomaly_score},{z_distance}\\n\")\n"
] |
[
[
"torch.nn.MSELoss",
"torch.utils.model_zoo.tqdm",
"torch.load"
]
] |
metehancekic/wireless-fingerprinting
|
[
"41872761260b3fc26f33acec983220e8b4d9f42f"
] |
[
"preproc/preproc_wifi.py"
] |
[
"'''\nContains code for fractionally spaced equalization, preamble detection\nAlso includes a modified version of Teledyne's data read and preprocessing code\n'''\n\nimport numpy as np\nimport os\nimport json\nimport csv\nimport math\nimport fractions\nimport resampy\nfrom tqdm import tqdm, trange\nimport matplotlib\nimport matplotlib.pyplot as plt\nfrom scipy.fftpack import fft, ifft, fftshift, ifftshift\nimport ipdb\nfrom sklearn.preprocessing import normalize\n\n\ndef preprocess_wifi(data_dict, sample_duration, sample_rate, preprocess_type=1, progress=True):\n '''\n Detects preamble and extract its\n '''\n\n signal_indices = range(len(data_dict['data_file']))\n if progress is True:\n signal_indices = tqdm(signal_indices)\n\n flag = 0\n\n for i in signal_indices:\n signal = data_dict['signal'][i]\n orig_sample_rate = data_dict['capture_sample_rate'][i]\n start_index = 0\n end_index = math.ceil(sample_duration * orig_sample_rate)\n\n if orig_sample_rate == np.int(200e6):\n if (preprocess_type == 2) or (preprocess_type == 3):\n lowFreq = data_dict['freq_lower_edge'][i]\n upFreq = data_dict['freq_upper_edge'][i]\n Fc = data_dict['capture_frequency'][i]\n signal, flag_i = detect_frame(signal, lowFreq, upFreq, Fc, verbose=False)\n flag = flag + flag_i\n if preprocess_type == 3:\n signal = frac_eq_preamble(signal)\n\n start_index = np.int(start_index)\n end_index = np.int(end_index)\n\n if (preprocess_type == 1) or (preprocess_type == 2) or (orig_sample_rate != np.int(200e6)):\n signal = signal[start_index:end_index] # extract needed section of signal\n\n with np.errstate(all='raise'):\n try:\n signal = signal / rms(signal) # normalize signal\n except FloatingPointError:\n # print('data_file = '+str(data_dict['data_file'][i]) + ',\\t reference_number = '+str(data_dict['reference_number'][i]))\n try:\n # print('Normalization error. RMS = {}, Max = {}, Min = {}, Data size = {}'.format(rms(signal), np.abs(signal).min(), np.abs(signal).max(), signal.shape))\n signal += 1.0/np.sqrt(2*signal.size) + 1.0/np.sqrt(2*signal.size)*1j\n except FloatingPointError:\n # print('i = {}, signal.shape = {}'.format(i, signal.shape))\n # print('start_index = {}, end_index = {}'.format(start_index, end_index))\n signal_size = end_index - start_index\n signal = np.ones([signal_size]) * (1.0 + 1.0*1j)/np.sqrt(2*signal_size)\n\n if (preprocess_type == 1) or (orig_sample_rate != np.int(200e6)):\n freq_shift = (data_dict['freq_upper_edge'][i] +\n data_dict['freq_lower_edge'][i])/2 - data_dict['capture_frequency'][i]\n # baseband signal w.r.t. center frequency\n signal = shift_frequency(signal, freq_shift, orig_sample_rate)\n # filter and downsample signal\n signal = resample(signal, orig_sample_rate, sample_rate)\n\n if (preprocess_type == 2):\n signal = resample(signal, orig_sample_rate, sample_rate)\n\n data_dict['signal'][i] = signal\n # data_dict['freq_lower_edge'][i] = -sample_rate/2.\n # data_dict['freq_upper_edge'][i] = sample_rate/2.\n # data_dict['sample_start'][i] = 0\n # data_dict['sample_count'][i] = len(signal)\n data_dict['center_frequency'][i] = (\n data_dict['freq_upper_edge'][i] + data_dict['freq_lower_edge'][i])/2.\n data_dict['sample_rate'][i] = sample_rate\n\n if (preprocess_type == 2) or (preprocess_type == 3):\n print('Successful frame detection on {:.2f}% of signals'.format(\n 100.0-flag*100.0/len(data_dict['data_file'])))\n\n return data_dict\n\n\ndef frac_eq_preamble(rx, verbose=False):\n '''\n Fractionally equalize preamble\n https://ieeexplore.ieee.org/document/489269\n '''\n\n # print('Hello!')\n\n Stf_64 = np.sqrt(13/6)*np.array([0, 0, 0, 0, 0, 0, 0, 0, 1+1j, 0, 0, 0, -1-1j, 0, 0, 0, 1+1j, 0, 0, 0, -1-1j, 0, 0, 0, -1-1j, 0, 0, 0,\n 1+1j, 0, 0, 0, 0, 0, 0, 0, -1-1j, 0, 0, 0, -1-1j, 0, 0, 0, 1+1j, 0, 0, 0, 1+1j, 0, 0, 0, 1+1j, 0, 0, 0, 1+1j, 0, 0, 0, 0, 0, 0, 0])\n stf_64 = ifft(ifftshift(Stf_64))\n # stf = stf_64[:16]\n\n Ltf = np.array([0, 0, 0, 0, 0, 0, 1, 1, -1, -1, 1, 1, -1, 1, -1, 1, 1, 1, 1, 1, 1, -1, -1, 1, 1, -1, 1, -1, 1, 1, 1,\n 1, 0, 1, -1, -1, 1, 1, -1, 1, -1, 1, -1, -1, -1, -1, -1, 1, 1, -1, -1, 1, -1, 1, -1, 1, 1, 1, 1, 0, 0, 0, 0, 0])\n ltf = ifft(ifftshift(Ltf))\n\n tx = np.concatenate((stf_64[:-32], stf_64, stf_64, ltf[-32:], ltf, ltf))\n L = 160\n N = 320\n\n rx = rx.reshape([-1, 1])\n\n R = np.zeros([L, L]) + 0j\n p = np.zeros([L, 1]) + 0j\n for i in range(N):\n j = 10*i\n R += rx[j:j+L].dot(rx[j:j+L].conj().T)\n p += rx[j:j+L] * tx[i].conj()\n\n c, residuals, rank, sing = np.linalg.lstsq(R, p)\n # h = c[::-1].conj()\n # rx_eq = np.convolve(h, rx, mode='full')[np.int(L/2):-np.int(L/2)]\n # signal_eq = rx_eq[::10][:1600]\n\n signal_eq = np.zeros([N, 1]) + 0j\n for i in range(N):\n j = 10*i\n signal_eq[i] = rx[j:j+L].T.dot(c.conj())\n\n return signal_eq.flatten()\n\n\ndef detect_frame(complex_signal, lowFreq, upFreq, Fc, verbose=False):\n '''\n Detects preamble and extract its\n '''\n\n Fs = 200e6\n flag = 0\n\n # ----------------------------------------------------\n # Filter out-of-band noise\n # ----------------------------------------------------\n\n N = complex_signal.shape[0]\n if N % 2 != 0:\n complex_signal = complex_signal[:-1]\n N -= 1\n low_ind = np.int((lowFreq-Fc)*(N/Fs) + N/2)\n up_ind = np.int((upFreq-Fc)*(N/Fs) + N/2)\n lag = np.int((-Fc + (lowFreq+upFreq)/2)*(N/Fs) + N/2) - np.int(N/2)\n X = fftshift(fft(complex_signal))\n X[:low_ind] = 0 + 0j\n X[up_ind:] = 0 + 0j\n X = np.roll(X, -lag)\n complex_signal = ifft(ifftshift(X))\n\n # ----------------------------------------------------\n # Coarse frame detection (using STF)\n # ----------------------------------------------------\n\n guard_band_upsamp = np.int(2e-6*Fs) # 2 usec\n n_win = 1600-160 # ?\n lag = 160\n search_length_stf_upsamp = min(2*guard_band_upsamp+1, np.int(complex_signal.size))\n autocorr_stf_upsamp = np.zeros(search_length_stf_upsamp)\n a = np.zeros(search_length_stf_upsamp)+0j\n p = np.zeros(search_length_stf_upsamp)\n for n in range(search_length_stf_upsamp):\n sig1 = complex_signal[n:n+n_win].reshape(1, -1)\n sig2 = complex_signal[n+lag:n+n_win+lag].conj().reshape(1, -1)\n a[n] = sig1.dot(sig2.T)\n # p[n] = np.sum(np.abs(sig1)**2)\n p[n] = np.sqrt(np.sum(np.abs(sig1)**2)*np.sum(np.abs(sig2)**2))\n autocorr_stf_upsamp = np.abs(a)/p\n frame_start_autocorr_upsamp = np.argmax(autocorr_stf_upsamp)\n\n # ----------------------------------------------------\n # Guard band sanity check\n # ----------------------------------------------------\n\n n_short_upsamp = 1600\n\n if frame_start_autocorr_upsamp <= 2*guard_band_upsamp:\n # sig3 = complex_signal[frame_start_autocorr_upsamp+np.int(n_short_upsamp/2):frame_start_autocorr_upsamp+n_short_upsamp-160].conj().copy()\n # sig4 = complex_signal[frame_start_autocorr_upsamp+np.int(n_short_upsamp/2)+160:frame_start_autocorr_upsamp+n_short_upsamp].copy()\n # df1_upsamp = 1/160 * np.angle(sig3.dot(sig4.T))\n # complex_signal[frame_start_autocorr_upsamp:] *= np.exp(-1j*np.arange(0,complex_signal.size - frame_start_autocorr_upsamp)*df1_upsamp).flatten()\n if verbose == True:\n print('Autocorr prediction = {}'.format(frame_start_autocorr_upsamp))\n # print('Freq offset_upsamp = {:.2f} KHz'.format(df1_upsamp* 2e8 / (2*np.pi*1e3)))\n else:\n if verbose == True:\n print('Autocorr detection failed\\n Prediction = {}'.format(frame_start_autocorr_upsamp))\n frame_start_autocorr_upsamp = guard_band_upsamp\n # df1_upsamp = 0\n flag = 1\n\n return complex_signal[frame_start_autocorr_upsamp:], flag\n\n\ndef offset_compensate_preamble(preamble_in, fs=200e6, verbose=False, option=1):\n \"\"\"\n Function that strips out the effect of the offset from the preamble. \n\n df = 1/16 arg(sum_{n=0}^{N_short - 1 - 16} s[n]* s'[n+16] )\n s[n] <---- s[n]* e^(j.n.df)\n\n Inputs:\n preamble - Preamble containing effects of the channel and Tx nonlinearities\n (320 samples)\n fs - Sampling frequency\n [Verbose] - Verbose\n ### NotImplemented: freq_offset - Dict containing freq offset\n\n Output: \n preamble_eq - Preamble with the channel stripped out (320 samples)\n ### NotImplemented: preamble_eq_offset - Equalized preamble with frequency offset\n\n \"\"\"\n\n # if fs!=20e6:\n # raise NotImplementedError\n\n preamble = preamble_in.copy()\n\n if fs == 200e6:\n if preamble.size != 3200:\n raise Exception('Size of preamble is {}, but it should be 3200.'.format(preamble.size))\n\n n_short = 1600 # Length of short preamble\n n_long = 1600 # Length of long preamble\n\n L = 160 # length of single short sequence\n N = 640 # length of single long sequnce\n\n # ----------------------------------------------------\n # Frequency offset correction\n # ----------------------------------------------------\n # Coarse estimation\n # sig3 = preamble[n_short//2: n_short-L].conj().copy()\n # sig4 = preamble[n_short//2 + L: n_short].copy()\n sig3 = preamble[: n_short-L].conj().copy()\n sig4 = preamble[L: n_short].copy()\n df1 = 1./L * np.angle(sig3.dot(sig4.T))\n preamble *= np.exp(-1j*np.arange(0, preamble.size)*df1).flatten()\n\n # Fine estimation\n sig5 = preamble[n_short + 2*L: n_short + 2*L + N].conj().copy()\n sig6 = preamble[n_short + N+2*L: n_short + n_long].reshape(1, -1).copy()\n df2 = 1./N * np.angle(sig5.dot(sig6.T))\n preamble *= np.exp(-1j*np.arange(0, preamble.size)*df2).flatten()\n freq_offset = np.array([df1, df2])\n\n elif fs == 20e6:\n\n if preamble.size != 320:\n raise Exception('Size of preamble is {}, but it should be 320.'.format(preamble.size))\n\n n_short = 160 # Length of short preamble\n n_long = 160 # Length of long preamble\n\n L = 16 # length of single short sequence\n N = 64 # length of single long sequence\n\n # ----------------------------------------------------\n # Frequency offset correction\n # ----------------------------------------------------\n # Coarse estimation\n sig3 = preamble[np.int(n_short/2):n_short-L].conj().copy()\n sig4 = preamble[np.int(n_short/2)+L:n_short].copy()\n df1 = 1./L * np.angle(sig3.dot(sig4.T))\n preamble *= np.exp(-1j*np.arange(0, preamble.size)*df1).flatten()\n\n # Fine estimation\n sig5 = preamble[n_short+32:n_short+32+N].conj().copy()\n sig6 = preamble[n_short+N+32:n_short+n_long].reshape(1, -1).copy()\n df2 = 1./N * np.angle(sig5.dot(sig6.T))\n preamble *= np.exp(-1j*np.arange(0, preamble.size)*df2).flatten()\n freq_offset = np.array([df1, df2])\n\n if option == 1:\n return preamble\n elif option == 2:\n return preamble, freq_offset\n else:\n raise NotImplementedError\n\n\ndef get_residuals_preamble(preamble_in, fs, method='subtraction', channel_method='frequency', verbose=False, label=''):\n \"\"\"\n Function that reconstructs the preamble fed into this function with the channel and CFO effects\n and returns the difference between original preamble and reconstructed one (residuals):\n Inputs:\n preamble - Preamble containing effects of the channel and Tx nonlinearities\n (3200 samples)\n ### NotImplemented: freq_offset - Dict containing freq offset\n\n Output: \n preamble_eq - Preamble with the channel stripped out (320 samples)\n ### NotImplemented: preamble_eq_offset - Equalized preamble with frequency offset\n\n \"\"\"\n # if fs!=20e6:\n # raise NotImplementedError\n\n preamble = preamble_in.copy()\n preamble_orig = preamble_in.copy()\n\n if fs == 200e6:\n if preamble.size != 3200:\n raise Exception('Size of preamble is {}, but it should be 3200.'.format(preamble.size))\n\n n_short = 1600\n n_long = 1600\n\n L = 160\n N = 640\n\n # ----------------------------------------------------\n # Frequency offset correction\n # ----------------------------------------------------\n sig3 = preamble[: n_short-L].conj().copy()\n sig4 = preamble[L: n_short].copy()\n df1 = 1./L * np.angle(sig3.dot(sig4.T))\n preamble *= np.exp(-1j*np.arange(0, preamble.size)*df1).flatten()\n\n # Fine estimation\n sig5 = preamble[n_short + 2*L: n_short + 2*L + N].conj().copy()\n sig6 = preamble[n_short + N+2*L: n_short + n_long].reshape(1, -1).copy()\n df2 = 1./N * np.angle(sig5.dot(sig6.T))\n preamble *= np.exp(-1j*np.arange(0, preamble.size)*df2).flatten()\n freq_offset = np.array([df1, df2])\n\n cfo_total = np.multiply(np.exp(1j*np.arange(0, preamble.size)*df1).flatten(),\n np.exp(1j*np.arange(0, preamble.size)*df2).flatten())\n\n # ------------------------------------------------------------------------\n # LTI channel estimation (with delay spread <= length of cyclic prefix)\n # ------------------------------------------------------------------------\n\n Stf_64 = np.sqrt(13/6)*np.array([0, 0, 0, 0, 0, 0, 0, 0, 1+1j, 0, 0, 0, -1-1j, 0, 0, 0, 1+1j, 0, 0, 0, -1-1j, 0, 0, 0, -1-1j, 0, 0, 0,\n 1+1j, 0, 0, 0, 0, 0, 0, 0, -1-1j, 0, 0, 0, -1-1j, 0, 0, 0, 1+1j, 0, 0, 0, 1+1j, 0, 0, 0, 1+1j, 0, 0, 0, 1+1j, 0, 0, 0, 0, 0, 0, 0])\n\n Ltf = np.array([0, 0, 0, 0, 0, 0, 1, 1, -1, -1, 1, 1, -1, 1, -1, 1, 1, 1, 1, 1, 1, -1, -1, 1, 1, -1, 1, -1, 1, 1, 1,\n 1, 0, 1, -1, -1, 1, 1, -1, 1, -1, 1, -1, -1, -1, -1, -1, 1, 1, -1, -1, 1, -1, 1, -1, 1, 1, 1, 1, 0, 0, 0, 0, 0])\n\n Ltf1_rx = fftshift(\n fft(preamble[n_short+np.int(n_long/5):n_short+np.int(n_long/5 + n_long*2/5)]))\n Ltf2_rx = fftshift(fft(preamble[n_short+np.int(n_long/5 + n_long*2/5):n_short+n_long]))\n Ltf_mid_rx = fftshift(\n fft(preamble[n_short + 2*L - np.int(L/2):n_short + 2*L+N - np.int(L/2)]))\n\n Ltf_avg_rx = (Ltf1_rx + Ltf2_rx)/2\n\n ind_all = np.arange(-32, 32) + (N//2)\n\n H_hat = np.zeros((N)) + 1j*np.zeros((N))\n\n # ipdb.set_trace()\n\n Ltf_interpolated = np.concatenate(\n (np.zeros(32*9) + 1j * np.zeros(32*9), Ltf, np.zeros(32*9) + 1j * np.zeros(32*9)))\n\n H_hat[ind_all] = Ltf_avg_rx[ind_all]*Ltf # because Ltf is 1's and 0's\n\n h_hat = np.roll(ifft(ifftshift(H_hat)), -N//2)\n # H_1_hat[ind_all] = Ltf_1_rx[ind_all]*Ltf\n # H_2_hat[ind_all] = Ltf_2_rx[ind_all]*Ltf\n\n # H_hat[ind_all] = Ltf/Ltf_avg_rx[ind_all]\n\n # ltf_1_interpolated = ifft(ifftshift(H_1_hat*Ltf_interpolated))\n # ltf_2_interpolated = ifft(ifftshift(H_2_hat*Ltf_interpolated))\n # ltf_total = np.concatenate((ltf_1_interpolated[-N//2:], ltf_1_interpolated, ltf_2_interpolated))\n\n # ltf_interpolated = ifft(ifftshift(H_hat * Ltf_interpolated))\n\n if channel_method == 'time':\n ltf_interpolated = ifft(ifftshift(Ltf_interpolated))\n ltf_total = np.concatenate(\n (ltf_interpolated[-N//2:], ltf_interpolated, ltf_interpolated))\n\n Stf_64_interpolated = np.concatenate(\n (np.zeros(32*9) + 1j * np.zeros(32*9), Stf_64, np.zeros(32*9) + 1j * np.zeros(32*9)))\n stf_64_interpolated = ifft(ifftshift(Stf_64_interpolated))\n stf_total = np.concatenate(\n (stf_64_interpolated[-N//2:], stf_64_interpolated, stf_64_interpolated))\n\n preamble_constructed = cfo_total * (np.convolve(np.concatenate((stf_total, ltf_total)), h_hat)[\n N//2-1:-N//2])/rms(np.convolve(np.concatenate((stf_total, ltf_total)), h_hat)[N//2-1:-N//2])\n elif channel_method == 'frequency':\n ltf_interpolated = ifft(ifftshift(H_hat * Ltf_interpolated))\n ltf_total = np.concatenate(\n (ltf_interpolated[-N//2:], ltf_interpolated, ltf_interpolated))\n\n Stf_64_interpolated = np.concatenate(\n (np.zeros(32*9) + 1j * np.zeros(32*9), Stf_64, np.zeros(32*9) + 1j * np.zeros(32*9)))\n stf_64_interpolated = ifft(ifftshift(H_hat * Stf_64_interpolated))\n stf_total = np.concatenate(\n (stf_64_interpolated[-N//2:], stf_64_interpolated, stf_64_interpolated))\n\n preamble_constructed = cfo_total * np.concatenate((stf_total, ltf_total))\n\n # stf_ch_cfo = ifft(ifftshift(fftshift(fft(preamble_constructed[N//2:N+N//2]))*H_hat))\n # ltf_ch_cfo = ifft(ifftshift(fftshift(fft(preamble_constructed[n_short+N//2:n_short+N//2+N]))*H_hat))\n\n # stf_total_cfo_ch_added = np.concatenate((stf_ch_cfo[-N//2:], stf_ch_cfo, stf_ch_cfo))\n # ltf_total_cfo_ch_added = np.concatenate((ltf_ch_cfo[-N//2:], ltf_ch_cfo, ltf_ch_cfo))\n # preamble_constructed = np.concatenate((stf_total_cfo_ch_added, ltf_total_cfo_ch_added))\n\n if method == 'division':\n residuals = preamble_orig/(preamble_constructed+0.001)\n elif method == 'subtraction':\n residuals = preamble_orig - preamble_constructed\n\n # # ----------------------------------------------------\n # # Preamble equalization\n # # ----------------------------------------------------\n # ind_guard = np.concatenate((np.arange(-32, -26), np.arange(27, 32))) + (N//2)\n # ind_null = np.concatenate((np.array([0]), np.arange(-(N//2), -32), np.arange(32, (N//2)) )) + (N//2)\n # ind_pilots = np.array([-21, -7, 7, 21]) + (N//2)\n\n # mask_data = np.ones(N)\n # mask_data_pilots = np.ones(N)\n # mask_data[list(np.concatenate((ind_guard, ind_null, ind_pilots)))] = 0\n # mask_data_pilots[list(np.concatenate((ind_guard, ind_null)))] = 0\n # ind_all_all = np.arange(-(N//2), (N//2)) + N//2\n # ind_data = ind_all_all[mask_data==1]\n # ind_data_pilots = ind_all_all[mask_data_pilots==1]\n\n # h_hat = ifft(ifftshift(H_hat))\n\n # Stf_1_eq = fftshift(fft(preamble[n_short-2*N:n_short-N]))\n # Stf_2_eq = fftshift(fft(preamble[n_short-N:n_short]))\n # Ltf_1_eq = fftshift(fft(preamble[n_short+n_long-2*N:n_short+n_long-N]))\n # Ltf_2_eq = fftshift(fft(preamble[n_short+n_long-N:n_short+n_long]))\n\n # Stf_1_eq[ind_data_pilots] /= (H_hat[ind_data_pilots]+0.001)\n # Stf_2_eq[ind_data_pilots] /= (H_hat[ind_data_pilots]+0.001)\n # Ltf_1_eq[ind_data_pilots] /= (H_hat[ind_data_pilots]+0.001)\n # Ltf_2_eq[ind_data_pilots] /= (H_hat[ind_data_pilots]+0.001)\n\n # Stf_1_eq[ind_guard] = 0\n # Stf_2_eq[ind_guard] = 0\n # Ltf_1_eq[ind_guard] = 0\n # Ltf_2_eq[ind_guard] = 0\n\n # Stf_1_eq[ind_null] = 0\n # Stf_2_eq[ind_null] = 0\n # Ltf_1_eq[ind_null] = 0\n # Ltf_2_eq[ind_null] = 0\n\n # # Sanity check\n # Ltf_1_eq = Ltf\n # Ltf_2_eq = Ltf\n # Stf_1_eq = Stf_64\n # Stf_2_eq = Stf_64\n\n # stf_1_eq = ifft(ifftshift(Stf_1_eq))\n # stf_2_eq = ifft(ifftshift(Stf_2_eq))\n # ltf_1_eq = ifft(ifftshift(Ltf_1_eq))\n # ltf_2_eq = ifft(ifftshift(Ltf_2_eq))\n\n # preamble_eq = np.concatenate((stf_1_eq[:-(N//4)], stf_1_eq, stf_2_eq[:-(N//4)], stf_2_eq, ltf_1_eq[:-(N//2)], ltf_1_eq, ltf_2_eq))\n\n return residuals, preamble_constructed # , h_hat, H_hat\n\n\ndef basic_equalize_preamble(preamble_in, fs, verbose=False, label=''):\n \"\"\"\n Function that strips out the effect of the channel from the preamble. \n It does the following:\n 1. LTI channel estimation (with delay spread <= length of cyclic prefix)\n 2. Remove the channel estimate from the preamble\n\n Inputs:\n preamble - Preamble containing effects of the channel and Tx nonlinearities\n (320 samples)\n ### NotImplemented: freq_offset - Dict containing freq offset\n\n Output: \n preamble_eq - Preamble with the channel stripped out (320 samples)\n ### NotImplemented: preamble_eq_offset - Equalized preamble with frequency offset\n\n \"\"\"\n # if fs!=20e6:\n # raise NotImplementedError\n\n preamble = preamble_in.copy()\n\n if fs == 200e6:\n if preamble.size != 3200:\n raise Exception('Size of preamble is {}, but it should be 3200.'.format(preamble.size))\n\n n_short = 1600\n n_long = 1600\n\n L = 160\n N = 640\n\n # ----------------------------------------------------\n # Frequency offset correction\n # ----------------------------------------------------\n # sig3 = preamble[np.int(n_short/2):n_short-L].conj().copy()\n # sig4 = preamble[np.int(n_short/2)+L:n_short].copy()\n # df1 = 1/L * np.angle(sig3.dot(sig4.T))\n # preamble *= np.exp(-1j*np.arange(0, preamble.size)*df1).flatten()\n\n # sig5 = preamble[n_short+2*L:n_short+2*L+N].conj().copy()\n # sig6 = preamble[n_short+N+2*L:n_short+n_long].reshape(1,-1).copy()\n # df2 = 1/N * np.angle(sig5.dot(sig6.T))\n # preamble *= np.exp(-1j*np.arange(0, preamble.size)*df2).flatten()\n\n # ------------------------------------------------------------------------\n # LTI channel estimation (with delay spread <= length of cyclic prefix)\n # ------------------------------------------------------------------------\n\n Stf_64 = np.sqrt(13/6)*np.array([0, 0, 0, 0, 0, 0, 0, 0, 1+1j, 0, 0, 0, -1-1j, 0, 0, 0, 1+1j, 0, 0, 0, -1-1j, 0, 0, 0, -1-1j, 0, 0, 0,\n 1+1j, 0, 0, 0, 0, 0, 0, 0, -1-1j, 0, 0, 0, -1-1j, 0, 0, 0, 1+1j, 0, 0, 0, 1+1j, 0, 0, 0, 1+1j, 0, 0, 0, 1+1j, 0, 0, 0, 0, 0, 0, 0])\n\n Ltf = np.array([0, 0, 0, 0, 0, 0, 1, 1, -1, -1, 1, 1, -1, 1, -1, 1, 1, 1, 1, 1, 1, -1, -1, 1, 1, -1, 1, -1, 1, 1, 1,\n 1, 0, 1, -1, -1, 1, 1, -1, 1, -1, 1, -1, -1, -1, -1, -1, 1, 1, -1, -1, 1, -1, 1, -1, 1, 1, 1, 1, 0, 0, 0, 0, 0])\n\n Ltf1_rx = fftshift(\n fft(preamble[n_short+np.int(n_long/5):n_short+np.int(n_long/5 + n_long*2/5)]))\n Ltf2_rx = fftshift(fft(preamble[n_short+np.int(n_long/5 + n_long*2/5):n_short+n_long]))\n Ltf_mid_rx = fftshift(\n fft(preamble[n_short + 2*L - np.int(L/2):n_short + 2*L+N - np.int(L/2)]))\n\n Ltf_avg_rx = (Ltf1_rx + Ltf2_rx)/2\n # Ltf_avg_rx = Ltf1_rx\n # Ltf_avg_rx = Ltf2_rx\n\n # Ltf_mid_rx = Ltf_avg_rx\n\n # AA = np.zeros((N, N)) + 0j\n # for m in range(N):\n # for n in range(L+1):\n # AA[m, n] = Ltf[m] * np.exp(-1j*2*np.pi*m*n/N)\n # A = AA[:, :L+1] * np.exp(1j*np.pi*np.arange(L+1)).reshape(1, -1)\n\n # ind_all = np.arange(-32, 32) + 32\n # ind_guard = np.concatenate((np.arange(-32, -26), np.arange(27, 32))) + 32\n # ind_null = np.array([0]) + 32\n # mask_data_pilots = np.ones(64)\n # mask_data_pilots[list(np.concatenate((ind_guard, ind_null)))] = 0\n # ind_data_pilots = ind_all[mask_data_pilots==1]\n\n # h_hat_small, residuals, rank, singular_values = np.linalg.lstsq(A[ind_data_pilots,:], Ltf_mid_rx[ind_data_pilots], rcond=None)\n\n # h_hat = np.zeros(N)+0j\n # h_hat[:L+1] = h_hat_small\n # # h_hat = np.roll(h_hat, -np.int(L/2))\n # H_hat = fftshift(fft(h_hat))\n\n ind_all = np.arange(-32, 32) + (N//2)\n\n H_hat = np.zeros((N)) + 1j*np.zeros((N))\n\n # ipdb.set_trace()\n\n H_hat[ind_all] = Ltf_avg_rx[ind_all]*Ltf\n # H_hat[ind_all] = Ltf/Ltf_avg_rx[ind_all]\n\n if verbose is True:\n freq = np.arange(-32, 32)\n\n # H_hat_coarse = Ltf_mid_rx*Ltf\n H_hat_coarse = H_hat[ind_all]\n h_hat_coarse = ifft(ifftshift(H_hat_coarse))\n\n plt.figure(figsize=[10, 3])\n plt.subplot(1, 2, 1)\n plt.stem(freq, np.abs(H_hat_coarse))\n plt.grid(True)\n plt.title('Magnitude')\n plt.xlabel('Frequency bin')\n plt.subplot(1, 2, 2)\n # plt.stem(freq, np.unwrap(np.angle(H_hat)))\n plt.stem(freq, np.angle(H_hat_coarse))\n plt.title('Phase')\n plt.xlabel('Frequency bin')\n plt.suptitle('Coarse estimation'+label)\n plt.grid(True)\n plt.tight_layout(rect=[0.01, 0.03, 0.98, 0.93])\n\n plt.figure(figsize=[10, 3])\n plt.subplot(1, 2, 1)\n plt.stem(np.abs(h_hat_coarse))\n plt.title('Magnitude')\n plt.xlabel('Time (in samples)')\n plt.grid(True)\n plt.subplot(1, 2, 2)\n # plt.stem(np.unwrap(np.angle(h_hat)))\n plt.stem(np.angle(h_hat_coarse))\n plt.title('Phase')\n plt.xlabel('Time (in samples)')\n plt.grid(True)\n plt.suptitle('Coarse estimation'+label)\n plt.tight_layout(rect=[0.01, 0.03, 0.98, 0.93])\n\n # plt.figure(figsize=[10, 3])\n # plt.subplot(1,2,1)\n # plt.stem(freq, np.abs(H_hat))\n # plt.grid(True)\n # plt.title('Magnitude')\n # plt.xlabel('Frequency bin')\n # plt.subplot(1,2,2)\n # # plt.stem(freq, np.unwrap(np.angle(H_hat)))\n # plt.stem(freq, np.angle(H_hat))\n # plt.title('Phase')\n # plt.xlabel('Frequency bin')\n # plt.suptitle('Frequency domain least squares estimation')\n # plt.grid(True)\n # plt.tight_layout(rect=[0.01, 0.03, 0.98, 0.9])\n\n # plt.figure(figsize=[10, 3])\n # plt.subplot(1,2,1)\n # plt.stem(np.abs(h_hat))\n # plt.title('Magnitude')\n # plt.xlabel('Time (in samples)')\n # plt.grid(True)\n # plt.subplot(1,2,2)\n # # plt.stem(np.unwrap(np.angle(h_hat)))\n # plt.stem(np.angle(h_hat))\n # plt.title('Phase')\n # plt.xlabel('Time (in samples)')\n # plt.grid(True)\n # plt.suptitle('Frequency domain least squares estimation')\n # plt.tight_layout(rect=[0.01, 0.03, 0.98, 0.9])\n\n plt.show()\n\n # ----------------------------------------------------\n # Preamble equalization\n # ----------------------------------------------------\n ind_guard = np.concatenate((np.arange(-32, -26), np.arange(27, 32))) + (N//2)\n ind_null = np.concatenate(\n (np.array([0]), np.arange(-(N//2), -32), np.arange(32, (N//2)))) + (N//2)\n ind_pilots = np.array([-21, -7, 7, 21]) + (N//2)\n\n mask_data = np.ones(N)\n mask_data_pilots = np.ones(N)\n mask_data[list(np.concatenate((ind_guard, ind_null, ind_pilots)))] = 0\n mask_data_pilots[list(np.concatenate((ind_guard, ind_null)))] = 0\n ind_all_all = np.arange(-(N//2), (N//2)) + N//2\n ind_data = ind_all_all[mask_data == 1]\n ind_data_pilots = ind_all_all[mask_data_pilots == 1]\n\n Stf_1_eq = fftshift(fft(preamble[n_short-2*N:n_short-N]))\n Stf_2_eq = fftshift(fft(preamble[n_short-N:n_short]))\n Ltf_1_eq = fftshift(fft(preamble[n_short+n_long-2*N:n_short+n_long-N]))\n Ltf_2_eq = fftshift(fft(preamble[n_short+n_long-N:n_short+n_long]))\n\n Stf_1_eq[ind_data_pilots] /= (H_hat[ind_data_pilots]+0.001)\n Stf_2_eq[ind_data_pilots] /= (H_hat[ind_data_pilots]+0.001)\n Ltf_1_eq[ind_data_pilots] /= (H_hat[ind_data_pilots]+0.001)\n Ltf_2_eq[ind_data_pilots] /= (H_hat[ind_data_pilots]+0.001)\n\n Stf_1_eq[ind_guard] = 0\n Stf_2_eq[ind_guard] = 0\n Ltf_1_eq[ind_guard] = 0\n Ltf_2_eq[ind_guard] = 0\n\n Stf_1_eq[ind_null] = 0\n Stf_2_eq[ind_null] = 0\n Ltf_1_eq[ind_null] = 0\n Ltf_2_eq[ind_null] = 0\n\n # # Sanity check\n # Ltf_1_eq = Ltf\n # Ltf_2_eq = Ltf\n # Stf_1_eq = Stf_64\n # Stf_2_eq = Stf_64\n\n if verbose is True:\n\n Stf_1_eq_down = Stf_1_eq[ind_all]\n Stf_2_eq_down = Stf_2_eq[ind_all]\n Ltf_1_eq_down = Ltf_1_eq[ind_all]\n Ltf_2_eq_down = Ltf_2_eq[ind_all]\n\n plt.figure(figsize=[13, 4.8])\n plt.subplot(1, 3, 1)\n plt.scatter(Stf_1_eq_down.real, Stf_1_eq_down.imag)\n plt.title('Equalized STF - 1')\n plt.subplot(1, 3, 2)\n plt.scatter(Stf_2_eq_down.real, Stf_2_eq_down.imag)\n plt.title('Equalized STF - 2')\n plt.subplot(1, 3, 3)\n plt.scatter(Stf_64.real, Stf_64.imag)\n plt.title('Actual STF')\n plt.suptitle('Signal constellations')\n plt.tight_layout(rect=[0.01, 0.03, 0.98, 0.93])\n\n plt.figure(figsize=[13, 4.8])\n plt.subplot(1, 3, 1)\n plt.scatter(Ltf_1_eq_down.real, Ltf_1_eq_down.imag)\n plt.title('Equalized LTF - 1')\n plt.subplot(1, 3, 2)\n plt.scatter(Ltf_2_eq_down.real, Ltf_2_eq_down.imag)\n plt.title('Equalized LTF - 2')\n plt.subplot(1, 3, 3)\n plt.scatter(Ltf.real, Ltf.imag)\n plt.title('Actual LTF')\n plt.suptitle('Signal constellations')\n plt.tight_layout(rect=[0.01, 0.03, 0.98, 0.93])\n plt.show()\n\n # ipdb.set_trace()\n\n stf_1_eq = ifft(ifftshift(Stf_1_eq))\n stf_2_eq = ifft(ifftshift(Stf_2_eq))\n ltf_1_eq = ifft(ifftshift(Ltf_1_eq))\n ltf_2_eq = ifft(ifftshift(Ltf_2_eq))\n\n # preamble_eq = np.concatenate((stf_1_eq[:-(N//2)], stf_1_eq, stf_2_eq, ltf_1_eq[:-(N//2)], ltf_1_eq, ltf_2_eq))\n preamble_eq = np.concatenate(\n (stf_1_eq[-(N//4):], stf_1_eq, stf_2_eq[-(N//4):], stf_2_eq, ltf_1_eq[-(N//2):], ltf_1_eq, ltf_2_eq))\n\n # import pdb\n # pdb.set_trace()\n\n # shift = freq_offset['shift_coarse']\n # df1 = freq_offset['carrier_coarse']\n # df2 = freq_offset['carrier_fine']\n\n # preamble_eq_offset = preamble_eq.copy()\n\n # Add in coarse carrier freq offset, taking the shift into account\n # if shift>=0:\n # preamble_eq_offset[shift:] = preamble_eq[shift:] * np.exp(1j*np.arange(0,preamble_eq.size - shift)*df1).flatten()\n # else:\n # preamble_eq_offset= preamble_eq * np.exp(1j*(np.arange(0, preamble_eq.size)+shift)*df1).flatten()\n\n # # Add in fine carrier freq offset\n # preamble_eq_offset *= np.exp(1j*np.arange(0, preamble_eq.size)*df2).flatten()\n\n # return preamble_eq, preamble_eq_offset\n\n elif fs == 20e6:\n\n if preamble.size != 320:\n raise Exception('Size of preamble is {}, but it should be 320.'.format(preamble.size))\n\n n_short = 160\n n_long = 160\n\n # ----------------------------------------------------\n # Frequency offset correction\n # ----------------------------------------------------\n # sig3 = preamble[np.int(n_short/2):n_short-16].conj().copy()\n # sig4 = preamble[np.int(n_short/2)+16:n_short].copy()\n # df1 = 1/16 * np.angle(sig3.dot(sig4.T))\n # preamble *= np.exp(-1j*np.arange(0, preamble.size)*df1).flatten()\n\n # sig5 = preamble[n_short+32:n_short+32+64].conj().copy()\n # sig6 = preamble[n_short+64+32:n_short+n_long].reshape(1,-1).copy()\n # df2 = 1/64 * np.angle(sig5.dot(sig6.T))\n # preamble *= np.exp(-1j*np.arange(0, preamble.size)*df2).flatten()\n\n # ------------------------------------------------------------------------\n # LTI channel estimation (with delay spread <= length of cyclic prefix)\n # ------------------------------------------------------------------------\n\n Stf_64 = np.sqrt(13/6)*np.array([0, 0, 0, 0, 0, 0, 0, 0, 1+1j, 0, 0, 0, -1-1j, 0, 0, 0, 1+1j, 0, 0, 0, -1-1j, 0, 0, 0, -1-1j, 0, 0, 0,\n 1+1j, 0, 0, 0, 0, 0, 0, 0, -1-1j, 0, 0, 0, -1-1j, 0, 0, 0, 1+1j, 0, 0, 0, 1+1j, 0, 0, 0, 1+1j, 0, 0, 0, 1+1j, 0, 0, 0, 0, 0, 0, 0])\n\n Ltf = np.array([0, 0, 0, 0, 0, 0, 1, 1, -1, -1, 1, 1, -1, 1, -1, 1, 1, 1, 1, 1, 1, -1, -1, 1, 1, -1, 1, -1, 1, 1, 1,\n 1, 0, 1, -1, -1, 1, 1, -1, 1, -1, 1, -1, -1, -1, -1, -1, 1, 1, -1, -1, 1, -1, 1, -1, 1, 1, 1, 1, 0, 0, 0, 0, 0])\n\n L = 16\n N = 64\n\n Ltf1_rx = fftshift(\n fft(preamble[n_short+np.int(n_long/5):n_short+np.int(n_long/5 + n_long*2/5)]))\n Ltf2_rx = fftshift(fft(preamble[n_short+np.int(n_long/5 + n_long*2/5):n_short+n_long]))\n Ltf_mid_rx = fftshift(\n fft(preamble[n_short + 2*L - np.int(L/2):n_short + 2*L+N - np.int(L/2)]))\n Ltf_avg_rx = (Ltf1_rx + Ltf2_rx)/2\n\n # Ltf_mid_rx = Ltf_avg_rx\n\n AA = np.zeros((N, N)) + 0j\n for m in range(N):\n for n in range(L+1):\n AA[m, n] = Ltf[m] * np.exp(-1j*2*np.pi*m*n/N)\n A = AA[:, :L+1] * np.exp(1j*np.pi*np.arange(L+1)).reshape(1, -1)\n\n ind_all = np.arange(-32, 32) + 32\n ind_guard = np.concatenate((np.arange(-32, -26), np.arange(27, 32))) + 32\n ind_null = np.array([0]) + 32\n mask_data_pilots = np.ones(64)\n mask_data_pilots[list(np.concatenate((ind_guard, ind_null)))] = 0\n ind_data_pilots = ind_all[mask_data_pilots == 1]\n\n h_hat_small, residuals, rank, singular_values = np.linalg.lstsq(\n A[ind_data_pilots, :], Ltf_mid_rx[ind_data_pilots], rcond=None)\n\n h_hat = np.zeros(N)+0j\n h_hat[:L+1] = h_hat_small\n # h_hat = np.roll(h_hat, -np.int(L/2))\n H_hat = fftshift(fft(h_hat))\n\n H_hat = Ltf_avg_rx*Ltf\n\n if verbose is True:\n freq = np.arange(-32, 32)\n\n H_hat_coarse = Ltf_mid_rx*Ltf\n h_hat_coarse = ifft(ifftshift(H_hat_coarse))\n\n plt.figure(figsize=[10, 3])\n plt.subplot(1, 2, 1)\n plt.stem(freq, np.abs(H_hat_coarse))\n plt.grid(True)\n plt.title('Magnitude')\n plt.xlabel('Frequency bin')\n plt.subplot(1, 2, 2)\n # plt.stem(freq, np.unwrap(np.angle(H_hat)))\n plt.stem(freq, np.angle(H_hat_coarse))\n plt.title('Phase')\n plt.xlabel('Frequency bin')\n plt.suptitle('Coarse estimation')\n plt.grid(True)\n plt.tight_layout(rect=[0.01, 0.03, 0.98, 0.93])\n\n plt.figure(figsize=[10, 3])\n plt.subplot(1, 2, 1)\n plt.stem(np.abs(h_hat_coarse))\n plt.title('Magnitude')\n plt.xlabel('Time (in samples)')\n plt.grid(True)\n plt.subplot(1, 2, 2)\n # plt.stem(np.unwrap(np.angle(h_hat)))\n plt.stem(np.angle(h_hat_coarse))\n plt.title('Phase')\n plt.xlabel('Time (in samples)')\n plt.grid(True)\n plt.suptitle('Coarse estimation')\n plt.tight_layout(rect=[0.01, 0.03, 0.98, 0.93])\n\n plt.figure(figsize=[10, 3])\n plt.subplot(1, 2, 1)\n plt.stem(freq, np.abs(H_hat))\n plt.grid(True)\n plt.title('Magnitude')\n plt.xlabel('Frequency bin')\n plt.subplot(1, 2, 2)\n # plt.stem(freq, np.unwrap(np.angle(H_hat)))\n plt.stem(freq, np.angle(H_hat))\n plt.title('Phase')\n plt.xlabel('Frequency bin')\n plt.suptitle('Frequency domain least squares estimation')\n plt.grid(True)\n plt.tight_layout(rect=[0.01, 0.03, 0.98, 0.9])\n\n plt.figure(figsize=[10, 3])\n plt.subplot(1, 2, 1)\n plt.stem(np.abs(h_hat))\n plt.title('Magnitude')\n plt.xlabel('Time (in samples)')\n plt.grid(True)\n plt.subplot(1, 2, 2)\n # plt.stem(np.unwrap(np.angle(h_hat)))\n plt.stem(np.angle(h_hat))\n plt.title('Phase')\n plt.xlabel('Time (in samples)')\n plt.grid(True)\n plt.suptitle('Frequency domain least squares estimation')\n plt.tight_layout(rect=[0.01, 0.03, 0.98, 0.9])\n\n # plt.show()\n\n # ----------------------------------------------------\n # Preamble equalization\n # ----------------------------------------------------\n ind_all = np.arange(-32, 32) + 32\n ind_guard = np.concatenate((np.arange(-32, -26), np.arange(27, 32))) + 32\n ind_null = np.array([0]) + 32\n ind_pilots = np.array([-21, -7, 7, 21]) + 32\n mask_data = np.ones(64)\n mask_data_pilots = np.ones(64)\n mask_data[list(np.concatenate((ind_guard, ind_null, ind_pilots)))] = 0\n mask_data_pilots[list(np.concatenate((ind_guard, ind_null)))] = 0\n ind_data = ind_all[mask_data == 1]\n ind_data_pilots = ind_all[mask_data_pilots == 1]\n\n Stf_1_eq = fftshift(fft(preamble[n_short-2*N:n_short-N]))\n Stf_2_eq = fftshift(fft(preamble[n_short-N:n_short]))\n Ltf_1_eq = fftshift(fft(preamble[n_short+n_long-2*N:n_short+n_long-N]))\n Ltf_2_eq = fftshift(fft(preamble[n_short+n_long-N:n_short+n_long]))\n\n Stf_1_eq[ind_data_pilots] /= H_hat[ind_data_pilots]\n Stf_2_eq[ind_data_pilots] /= H_hat[ind_data_pilots]\n Ltf_1_eq[ind_data_pilots] /= H_hat[ind_data_pilots]\n Ltf_2_eq[ind_data_pilots] /= H_hat[ind_data_pilots]\n\n Stf_1_eq[ind_guard] = 0\n Stf_2_eq[ind_guard] = 0\n Ltf_1_eq[ind_guard] = 0\n Ltf_2_eq[ind_guard] = 0\n\n Stf_1_eq[ind_null] = 0\n Stf_2_eq[ind_null] = 0\n Ltf_1_eq[ind_null] = 0\n Ltf_2_eq[ind_null] = 0\n\n # # Sanity check\n # Ltf_1_eq = Ltf\n # Ltf_2_eq = Ltf\n # Stf_1_eq = Stf_64\n # Stf_2_eq = Stf_64\n\n if verbose is True:\n\n plt.figure(figsize=[13, 4.8])\n plt.subplot(1, 3, 1)\n plt.scatter(Stf_1_eq.real, Stf_1_eq.imag)\n plt.title('Equalized STF - 1')\n plt.subplot(1, 3, 2)\n plt.scatter(Stf_2_eq.real, Stf_2_eq.imag)\n plt.title('Equalized STF - 2')\n plt.subplot(1, 3, 3)\n plt.scatter(Stf_64.real, Stf_64.imag)\n plt.title('Actual STF')\n plt.suptitle('Signal constellations')\n plt.tight_layout(rect=[0.01, 0.03, 0.98, 0.93])\n\n plt.figure(figsize=[13, 4.8])\n plt.subplot(1, 3, 1)\n plt.scatter(Ltf_1_eq.real, Ltf_1_eq.imag)\n plt.title('Equalized LTF - 1')\n plt.subplot(1, 3, 2)\n plt.scatter(Ltf_2_eq.real, Ltf_2_eq.imag)\n plt.title('Equalized LTF - 2')\n plt.subplot(1, 3, 3)\n plt.scatter(Ltf.real, Ltf.imag)\n plt.title('Actual LTF')\n plt.suptitle('Signal constellations')\n plt.tight_layout(rect=[0.01, 0.03, 0.98, 0.93])\n plt.show()\n\n stf_1_eq = ifft(ifftshift(Stf_1_eq))\n stf_2_eq = ifft(ifftshift(Stf_2_eq))\n ltf_1_eq = ifft(ifftshift(Ltf_1_eq))\n ltf_2_eq = ifft(ifftshift(Ltf_2_eq))\n\n preamble_eq = np.concatenate(\n (stf_1_eq[-32:], stf_1_eq, stf_2_eq, ltf_1_eq[-32:], ltf_1_eq, ltf_2_eq))\n\n # shift = freq_offset['shift_coarse']\n # df1 = freq_offset['carrier_coarse']\n # df2 = freq_offset['carrier_fine']\n\n # preamble_eq_offset = preamble_eq.copy()\n\n # Add in coarse carrier freq offset, taking the shift into account\n # if shift>=0:\n # preamble_eq_offset[shift:] = preamble_eq[shift:] * np.exp(1j*np.arange(0,preamble_eq.size - shift)*df1).flatten()\n # else:\n # preamble_eq_offset= preamble_eq * np.exp(1j*(np.arange(0, preamble_eq.size)+shift)*df1).flatten()\n\n # # Add in fine carrier freq offset\n # preamble_eq_offset *= np.exp(1j*np.arange(0, preamble_eq.size)*df2).flatten()\n\n # return preamble_eq, preamble_eq_offset\n\n return preamble_eq\n\n\ndef rms(x):\n # Root mean squared value\n return np.sqrt(np.mean(x * np.conjugate(x)))\n\n\ndef shift_frequency(vector, freq_shift, fs):\n # Shift frequency of time-series signal by specified amount\n # vector: complex time-series signal\n # freq_shift: frequency shift amount\n # fs: sampling frequency of complex signal\n\n t = np.arange(0, np.size(vector)) / fs # define time axis\n\n # Sqrt(2) factor ensures that the power of the frequency downconverted signal\n # is equal to the power of its passband counterpart\n modulation = np.exp(-1j * 2 * np.pi * freq_shift * t) / np.sqrt(2) # frequency shift factor\n\n return vector * modulation # baseband signal\n\n\ndef resample(vector, fs, dfs):\n # Resample signal from original sample rate to desired sample rate\n # fs: original sampling frequency\n # dfs: desired sampling frequency\n\n fs = int(round(fs)) # convert to integers\n dfs = int(round(dfs))\n cfs = lcm(fs, dfs) # common sampling frequency\n\n if cfs > fs:\n # Upsample from start-Hz to common-Hz\n vector = resampy.resample(vector, fs, cfs, filter='kaiser_best')\n\n # Downsample from common-Hz to desired-Hz\n return resampy.resample(vector, cfs, dfs, filter='kaiser_best')\n\n\ndef lcm(a, b):\n # Least common multiple of a and b\n return a * int(b / fractions.gcd(a, b)) if a and b else 0\n\n\ndef get_sliding_window(x, window_size=10, stride=1, fs=200e6, fs_natural=20e6):\n shape_ = x.shape\n\n window_size_samples = np.int(window_size * (fs/fs_natural))\n stride_samples = np.int(stride * (fs/fs_natural))\n\n # sliding_window = [None] * ((shape_[1]-100+10)//10)\n\n for i in tqdm(np.arange(0, shape_[1] - window_size_samples + stride_samples, stride_samples)):\n if i == 0:\n y = x[:, i:i + window_size_samples, :].copy()\n else:\n y = np.concatenate((y, x[:, i:i + window_size_samples, :]), axis=0)\n\n return y\n\n\ndef read_wifi(files, base_data_directory, device_map, progress=True):\n '''\n Read wifi data frin data directory\n '''\n\n csv = files['csv_objects'].items()\n if progress is True:\n csv = tqdm(csv)\n\n data_dict = dict(signal={}, device_key={}, # Complex signal and device label [0, N-1] from device_map\n sample_rate={}, capture_sample_rate={}, capture_frequency={}, capture_hw={},\n center_frequency={}, freq_lower_edge={}, freq_upper_edge={},\n reference_number={}, data_file={}, sample_start={}, sample_count={},\n device_type={}, device_id={}, device_manufacturer={}\n )\n\n signal_index = 0\n for file, signal_list in csv:\n # Example:\n # file = 'adsb_gfi_3_dataset/10_sigmf_files_dataset/A-23937.sigmf-data'\n # signal_list = ['A-23937-34', 'A-23937-54']\n\n # check to see if the first character in \"file\" is a slash:\n while file[0] == '/' or file[0] == '\\\\':\n file = file[1:]\n # if 'Windows' in platform():\n # file = file.replace(\"/\", \"\\\\\")\n\n data_file = os.path.join(base_data_directory, file)\n metadata_file = data_file.replace('sigmf-data', 'sigmf-meta')\n\n all_signals = json.load(open(metadata_file))\n capture = dict(capture_sample_rate=all_signals['global']['core:sample_rate'],\n sample_rate=all_signals['global']['core:sample_rate'],\n capture_hw=all_signals['global']['core:hw'],\n capture_frequency=all_signals['capture'][0]['core:frequency'],\n data_file=data_file)\n\n for signal_name in signal_list:\n # data_dict['reference_number'][signal_index] = signal_name\n\n for key, value in capture.items():\n data_dict[key][signal_index] = value\n\n capture_properties = all_signals['capture']\n signal_properties = get_json_signal(\n all_signals['annotations'], capture_properties[0], signal_name, type='wifi')\n\n for key, value in signal_properties.items():\n data_dict[key][signal_index] = value\n device_id = signal_properties['device_id']\n data_dict['device_key'][signal_index] = device_map[device_id]\n\n filename = data_dict['data_file'][signal_index]\n start_sample = data_dict['sample_start'][signal_index]\n sample_count = data_dict['sample_count'][signal_index]\n data, buffer_start, buffer_end = read_sample(\n filename, start_sample, sample_count, desired_buffer=0)\n data_dict['signal'][signal_index] = data\n\n data_dict['center_frequency'][signal_index] = data_dict['capture_frequency'][signal_index]\n\n # ipdb.set_trace()\n\n signal_index = signal_index + 1\n\n return data_dict\n\n\ndef parse_input_files(input_csv, devices_csv):\n '''\n Parser for wifi dataset\n '''\n device_list = [] # a list of the devices to be trained/tested with\n device_map = {} # a reverse map from device name to index\n csv_objects = {} # a dictionary with filenames for keys, lists of signals as values\n\n with open(devices_csv) as devices_csv_file:\n devices_reader = csv.reader(devices_csv_file, delimiter=',')\n for device in devices_reader:\n device_list.append(device[0])\n\n for i, device in enumerate(device_list):\n device_map[device] = i\n\n with open(input_csv) as input_csv_file:\n input_reader = csv.reader(input_csv_file, delimiter=',')\n for row in input_reader:\n csv_objects[row[0]] = row[1:]\n\n return {'device_list': device_list,\n 'device_map': device_map,\n 'csv_objects': csv_objects}\n\n\ndef get_json_signal(json_annotations, capture, signal_id, type=None):\n '''\n Get signal from json\n '''\n\n for signal in json_annotations:\n if signal != {} and signal['capture_details:signal_reference_number'] == signal_id:\n if 'rfml:label' in signal:\n signal_label = signal['rfml:label']\n if type is None:\n type = signal_label[0]\n else:\n signal_label = tuple(None, None, None)\n if type is None:\n type = \"unknown\"\n\n if type == \"wifi\":\n return {'freq_lower_edge': signal['core:freq_lower_edge'],\n 'freq_upper_edge': signal['core:freq_upper_edge'],\n 'sample_start': signal['core:sample_start'],\n 'sample_count': signal['core:sample_count'],\n 'device_type': signal_label[0],\n 'device_manufacturer': signal_label[1],\n 'device_id': signal_label[2]}\n elif type == \"ADS-B\":\n return{'snr': signal['capture_details:SNRdB'],\n 'reference_number': signal['capture_details:signal_reference_number'],\n 'freq_lower_edge': capture['core:freq_lower_edge'],\n 'freq_upper_edge': capture['core:freq_upper_edge'],\n 'sample_start': signal['core:sample_start'],\n 'sample_count': signal['core:sample_count'],\n 'device_type': signal_label[0],\n 'device_id': signal_label[1]}\n else:\n print('Unknown signal type', type)\n return None\n return None\n\n\ndef read_sample(filename, start_sample, sample_count, desired_buffer):\n ''' \n Read samples\n '''\n\n buffer_start = min(desired_buffer, start_sample)\n buffer_end = desired_buffer\n sample_count += (buffer_start + buffer_end)\n\n with open(filename, \"rb\") as f:\n # Seek to startSample\n f.seek((start_sample - buffer_start) * 4) # 4bytes per sample (2x16 bit ints)\n\n # Read in as ints\n raw = np.fromfile(f, dtype='int16', count=2*sample_count)\n\n samples_read = int(raw.size / 2)\n buffer_end -= (sample_count - samples_read)\n\n # Convert interleaved ints into two planes, real and imaginary\n array = raw.reshape([samples_read, 2])\n\n # convert the array to complex\n array = array[:, 0] + 1j*array[:, 1]\n\n return array, buffer_start, buffer_end\n"
] |
[
[
"numpy.sqrt",
"numpy.concatenate",
"numpy.int",
"scipy.fftpack.fft",
"numpy.angle",
"numpy.exp",
"numpy.roll",
"numpy.conjugate",
"matplotlib.pyplot.tight_layout",
"numpy.arange",
"numpy.size",
"numpy.argmax",
"matplotlib.pyplot.subplot",
"numpy.zeros",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.title",
"numpy.linalg.lstsq",
"numpy.errstate",
"numpy.array",
"matplotlib.pyplot.suptitle",
"matplotlib.pyplot.show",
"numpy.fromfile",
"numpy.abs",
"matplotlib.pyplot.scatter",
"numpy.ones",
"matplotlib.pyplot.grid",
"matplotlib.pyplot.xlabel",
"scipy.fftpack.ifftshift"
]
] |
tszssong/HRNet-Image-Classification
|
[
"6d8ee24aedf2e0b3134102c221a29fb9b0ce2e1b"
] |
[
"tools/train.py"
] |
[
"# ------------------------------------------------------------------------------\n# Copyright (c) Microsoft\n# Licensed under the MIT License.\n# Written by Bin Xiao ([email protected])\n# Modified by Ke Sun ([email protected])\n# ------------------------------------------------------------------------------\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport argparse\nimport os\nimport pprint\nimport shutil\nimport sys\n\nimport torch\nimport torch.nn.parallel\nimport torch.backends.cudnn as cudnn\nimport torch.optim\nimport torch.utils.data\nimport torch.utils.data.distributed\nimport torchvision.datasets as datasets\nimport torchvision.transforms as transforms\nfrom tensorboardX import SummaryWriter\n\nimport _init_paths\nimport models\nfrom config import config\nfrom config import update_config\nfrom core.function import train\nfrom core.function import validate\nfrom utils.modelsummary import get_model_summary\nfrom utils.utils import get_optimizer\nfrom utils.utils import save_checkpoint\nfrom utils.utils import create_logger\n\n\ndef parse_args():\n parser = argparse.ArgumentParser(description='Train classification network')\n \n parser.add_argument('--cfg',\n help='experiment configure file name',\n required=True,\n type=str)\n\n parser.add_argument('--modelDir',\n help='model directory',\n type=str,\n default='')\n parser.add_argument('--logDir',\n help='log directory',\n type=str,\n default='')\n parser.add_argument('--dataDir',\n help='data directory',\n type=str,\n default='')\n parser.add_argument('--testModel',\n help='testModel',\n type=str,\n default='')\n\n args = parser.parse_args()\n update_config(config, args)\n\n return args\n\ndef main():\n args = parse_args()\n\n logger, final_output_dir, tb_log_dir = create_logger(\n config, args.cfg, 'train')\n\n logger.info(pprint.pformat(args))\n logger.info(pprint.pformat(config))\n\n # cudnn related setting\n cudnn.benchmark = config.CUDNN.BENCHMARK\n torch.backends.cudnn.deterministic = config.CUDNN.DETERMINISTIC\n torch.backends.cudnn.enabled = config.CUDNN.ENABLED\n\n model = eval('models.'+config.MODEL.NAME+'.get_cls_net')(\n config)\n\n dump_input = torch.rand(\n (1, 3, config.MODEL.IMAGE_SIZE[1], config.MODEL.IMAGE_SIZE[0])\n )\n logger.info(get_model_summary(model, dump_input))\n\n # copy model file\n this_dir = os.path.dirname(__file__)\n models_dst_dir = os.path.join(final_output_dir, 'models')\n if os.path.exists(models_dst_dir):\n shutil.rmtree(models_dst_dir)\n shutil.copytree(os.path.join(this_dir, '../lib/models'), models_dst_dir)\n\n writer_dict = {\n 'writer': SummaryWriter(log_dir=tb_log_dir),\n 'train_global_steps': 0,\n 'valid_global_steps': 0,\n }\n\n gpus = list(config.GPUS)\n print(\"gpus:\",gpus,type(gpus))\n DEVICE = torch.device(\"cuda:%d\"%config.GPUS[0] if torch.cuda.is_available() else \"cpu\")\n \n model = torch.nn.DataParallel(model, device_ids=gpus).cuda()\n model = model.to(DEVICE)\n\n # define loss function (criterion) and optimizer\n criterion = torch.nn.CrossEntropyLoss().cuda()\n\n optimizer = get_optimizer(config, model)\n\n best_perf = 0.0\n best_model = False\n last_epoch = config.TRAIN.BEGIN_EPOCH\n if config.TRAIN.RESUME:\n model_state_file = os.path.join(final_output_dir,\n 'checkpoint.pth.tar')\n if os.path.isfile(model_state_file):\n checkpoint = torch.load(model_state_file)\n last_epoch = checkpoint['epoch']\n best_perf = checkpoint['perf']\n model.module.load_state_dict(checkpoint['state_dict'])\n optimizer.load_state_dict(checkpoint['optimizer'])\n logger.info(\"=> loaded checkpoint (epoch {})\"\n .format(checkpoint['epoch']))\n best_model = True\n \n if isinstance(config.TRAIN.LR_STEP, list):\n lr_scheduler = torch.optim.lr_scheduler.MultiStepLR(\n optimizer, config.TRAIN.LR_STEP, config.TRAIN.LR_FACTOR,\n last_epoch-1\n )\n else:\n lr_scheduler = torch.optim.lr_scheduler.StepLR(\n optimizer, config.TRAIN.LR_STEP, config.TRAIN.LR_FACTOR,\n last_epoch-1\n )\n\n # Data loading code\n traindir = os.path.join(config.DATASET.ROOT, config.DATASET.TRAIN_SET)\n valdir = os.path.join(config.DATASET.ROOT, config.DATASET.TEST_SET)\n\n normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],\n std=[0.229, 0.224, 0.225])\n\n train_dataset = datasets.ImageFolder(\n traindir,\n transforms.Compose([\n transforms.RandomResizedCrop(config.MODEL.IMAGE_SIZE[0]),\n transforms.RandomHorizontalFlip(),\n transforms.ToTensor(),\n normalize,\n ])\n )\n train_loader = torch.utils.data.DataLoader(\n train_dataset,\n batch_size=config.TRAIN.BATCH_SIZE_PER_GPU*len(gpus),\n shuffle=True,\n num_workers=config.WORKERS,\n pin_memory=True\n )\n\n valid_loader = torch.utils.data.DataLoader(\n datasets.ImageFolder(valdir, transforms.Compose([\n transforms.Resize(int(config.MODEL.IMAGE_SIZE[0] / 0.875)),\n transforms.CenterCrop(config.MODEL.IMAGE_SIZE[0]),\n transforms.ToTensor(),\n normalize,\n ])),\n batch_size=config.TEST.BATCH_SIZE_PER_GPU*len(gpus),\n shuffle=False,\n num_workers=config.WORKERS,\n pin_memory=True\n )\n\n for epoch in range(last_epoch, config.TRAIN.END_EPOCH):\n lr_scheduler.step()\n # train for one epoch\n train(config, train_loader, model, DEVICE, criterion, optimizer, epoch,\n final_output_dir, tb_log_dir, writer_dict)\n # evaluate on validation set\n perf_indicator = validate(config, valid_loader, model, criterion,\n final_output_dir, tb_log_dir, writer_dict)\n\n if perf_indicator > best_perf:\n best_perf = perf_indicator\n best_model = True\n else:\n best_model = False\n\n logger.info('=> saving checkpoint to {}'.format(final_output_dir))\n save_checkpoint({\n 'epoch': epoch + 1,\n 'model': config.MODEL.NAME,\n 'state_dict': model.module.state_dict(),\n 'perf': perf_indicator,\n 'optimizer': optimizer.state_dict(),\n }, best_model, final_output_dir, filename='checkpoint.pth.tar')\n\n final_model_state_file = os.path.join(final_output_dir,\n 'final_state.pth.tar')\n logger.info('saving final model state to {}'.format(\n final_model_state_file))\n torch.save(model.module.state_dict(), final_model_state_file)\n writer_dict['writer'].close()\n\n\nif __name__ == '__main__':\n main()\n"
] |
[
[
"torch.optim.lr_scheduler.MultiStepLR",
"torch.nn.CrossEntropyLoss",
"torch.load",
"torch.rand",
"torch.cuda.is_available",
"torch.nn.DataParallel",
"torch.optim.lr_scheduler.StepLR"
]
] |
eaidova/UNITER
|
[
"5b4c9faf8ed922176b20d89ac56a3e0b39374a22"
] |
[
"model/model.py"
] |
[
"\"\"\"\nCopyright (c) Microsoft Corporation.\nLicensed under the MIT license.\n\nPytorch modules\nsome classes are modified from HuggingFace\n(https://github.com/huggingface/transformers)\n\"\"\"\nimport copy\nimport json\nimport logging\nfrom io import open\n\nimport torch\nfrom torch import nn\nfrom apex.normalization.fused_layer_norm import FusedLayerNorm\n\nfrom .layer import BertLayer, BertPooler\n\n\nlogger = logging.getLogger(__name__)\n\n\nclass UniterConfig(object):\n \"\"\"Configuration class to store the configuration of a `UniterModel`.\n \"\"\"\n def __init__(self,\n vocab_size_or_config_json_file,\n hidden_size=768,\n num_hidden_layers=12,\n num_attention_heads=12,\n intermediate_size=3072,\n hidden_act=\"gelu\",\n hidden_dropout_prob=0.1,\n attention_probs_dropout_prob=0.1,\n max_position_embeddings=512,\n type_vocab_size=2,\n initializer_range=0.02):\n \"\"\"Constructs UniterConfig.\n Args:\n vocab_size_or_config_json_file: Vocabulary size of `inputs_ids` in\n `UniterModel`.\n hidden_size: Size of the encoder layers and the pooler layer.\n num_hidden_layers: Number of hidden layers in the Transformer\n encoder.\n num_attention_heads: Number of attention heads for each attention\n layer in the Transformer encoder.\n intermediate_size: The size of the \"intermediate\" (i.e.\n feed-forward) layer in the Transformer encoder.\n hidden_act: The non-linear activation function (function or string)\n in the encoder and pooler. If string, \"gelu\", \"relu\" and\n \"swish\" are supported.\n hidden_dropout_prob: The dropout probabilitiy for all fully\n connected layers in the embeddings, encoder, and pooler.\n attention_probs_dropout_prob: The dropout ratio for the attention\n probabilities.\n max_position_embeddings: The maximum sequence length that this\n model might ever be used with. Typically set this to something\n large just in case (e.g., 512 or 1024 or 2048).\n type_vocab_size: The vocabulary size of the `token_type_ids` passed\n into `UniterModel`.\n initializer_range: The sttdev of the truncated_normal_initializer\n for initializing all weight matrices.\n \"\"\"\n if isinstance(vocab_size_or_config_json_file, str):\n with open(vocab_size_or_config_json_file,\n \"r\", encoding='utf-8') as reader:\n json_config = json.loads(reader.read())\n for key, value in json_config.items():\n self.__dict__[key] = value\n elif isinstance(vocab_size_or_config_json_file, int):\n self.vocab_size = vocab_size_or_config_json_file\n self.hidden_size = hidden_size\n self.num_hidden_layers = num_hidden_layers\n self.num_attention_heads = num_attention_heads\n self.hidden_act = hidden_act\n self.intermediate_size = intermediate_size\n self.hidden_dropout_prob = hidden_dropout_prob\n self.attention_probs_dropout_prob = attention_probs_dropout_prob\n self.max_position_embeddings = max_position_embeddings\n self.type_vocab_size = type_vocab_size\n self.initializer_range = initializer_range\n else:\n raise ValueError(\"First argument must be either a vocabulary size \"\n \"(int) or the path to a pretrained model config \"\n \"file (str)\")\n\n @classmethod\n def from_dict(cls, json_object):\n \"\"\"Constructs a `UniterConfig` from a\n Python dictionary of parameters.\"\"\"\n config = UniterConfig(vocab_size_or_config_json_file=-1)\n for key, value in json_object.items():\n config.__dict__[key] = value\n return config\n\n @classmethod\n def from_json_file(cls, json_file):\n \"\"\"Constructs a `UniterConfig` from a json file of parameters.\"\"\"\n with open(json_file, \"r\", encoding='utf-8') as reader:\n text = reader.read()\n return cls.from_dict(json.loads(text))\n\n def __repr__(self):\n return str(self.to_json_string())\n\n def to_dict(self):\n \"\"\"Serializes this instance to a Python dictionary.\"\"\"\n output = copy.deepcopy(self.__dict__)\n return output\n\n def to_json_string(self):\n \"\"\"Serializes this instance to a JSON string.\"\"\"\n return json.dumps(self.to_dict(), indent=2, sort_keys=True) + \"\\n\"\n\n\nclass UniterPreTrainedModel(nn.Module):\n \"\"\" An abstract class to handle weights initialization and\n a simple interface for dowloading and loading pretrained models.\n \"\"\"\n def __init__(self, config, *inputs, **kwargs):\n super().__init__()\n if not isinstance(config, UniterConfig):\n raise ValueError(\n \"Parameter config in `{}(config)` should be an instance of \"\n \"class `UniterConfig`. To create a model from a Google \"\n \"pretrained model use \"\n \"`model = {}.from_pretrained(PRETRAINED_MODEL_NAME)`\".format(\n self.__class__.__name__, self.__class__.__name__\n ))\n self.config = config\n\n def init_weights(self, module):\n \"\"\" Initialize the weights.\n \"\"\"\n if isinstance(module, (nn.Linear, nn.Embedding)):\n # Slightly different from the TF version which uses\n # truncated_normal for initialization\n # cf https://github.com/pytorch/pytorch/pull/5617\n module.weight.data.normal_(mean=0.0,\n std=self.config.initializer_range)\n elif isinstance(module, FusedLayerNorm):\n module.bias.data.zero_()\n module.weight.data.fill_(1.0)\n if isinstance(module, nn.Linear) and module.bias is not None:\n module.bias.data.zero_()\n\n @classmethod\n def from_pretrained(cls, config_file, state_dict, *inputs, **kwargs):\n \"\"\"\n Instantiate a UniterPreTrainedModel from a pre-trained model file or a\n pytorch state dict.\n Params:\n config_file: config json file\n state_dict: an state dictionnary\n *inputs, **kwargs: additional input for the specific Uniter class\n \"\"\"\n # Load config\n config = UniterConfig.from_json_file(config_file)\n logger.info(\"Model config {}\".format(config))\n # Instantiate model.\n model = cls(config, *inputs, **kwargs)\n # Load from a PyTorch state_dict\n old_keys = []\n new_keys = []\n for key in state_dict.keys():\n new_key = None\n if 'gamma' in key:\n new_key = key.replace('gamma', 'weight')\n if 'beta' in key:\n new_key = key.replace('beta', 'bias')\n if new_key:\n old_keys.append(key)\n new_keys.append(new_key)\n for old_key, new_key in zip(old_keys, new_keys):\n state_dict[new_key] = state_dict.pop(old_key)\n\n missing_keys = []\n unexpected_keys = []\n error_msgs = []\n # copy state_dict so _load_from_state_dict can modify it\n metadata = getattr(state_dict, '_metadata', None)\n state_dict = state_dict.copy()\n if metadata is not None:\n state_dict._metadata = metadata\n\n def load(module, prefix=''):\n local_metadata = ({} if metadata is None\n else metadata.get(prefix[:-1], {}))\n module._load_from_state_dict(\n state_dict, prefix, local_metadata, True, missing_keys,\n unexpected_keys, error_msgs)\n for name, child in module._modules.items():\n if child is not None:\n load(child, prefix + name + '.')\n start_prefix = ''\n if not hasattr(model, 'bert') and any(s.startswith('bert.')\n for s in state_dict.keys()):\n start_prefix = 'bert.'\n load(model, prefix=start_prefix)\n if len(missing_keys) > 0:\n logger.info(\"Weights of {} not initialized from \"\n \"pretrained model: {}\".format(\n model.__class__.__name__, missing_keys))\n if len(unexpected_keys) > 0:\n logger.info(\"Weights from pretrained model not used in \"\n \"{}: {}\".format(\n model.__class__.__name__, unexpected_keys))\n if len(error_msgs) > 0:\n raise RuntimeError('Error(s) in loading state_dict for '\n '{}:\\n\\t{}'.format(\n model.__class__.__name__,\n \"\\n\\t\".join(error_msgs)))\n return model\n\n\nclass UniterTextEmbeddings(nn.Module):\n def __init__(self, config):\n super().__init__()\n self.word_embeddings = nn.Embedding(config.vocab_size,\n config.hidden_size, padding_idx=0)\n self.position_embeddings = nn.Embedding(config.max_position_embeddings,\n config.hidden_size)\n self.token_type_embeddings = nn.Embedding(config.type_vocab_size,\n config.hidden_size)\n\n # self.LayerNorm is not snake-cased to stick with TensorFlow model\n # variable name and be able to load any TensorFlow checkpoint file\n self.LayerNorm = FusedLayerNorm(config.hidden_size, eps=1e-12)\n self.dropout = nn.Dropout(config.hidden_dropout_prob)\n\n def forward(self, input_ids, position_ids, token_type_ids=None):\n if token_type_ids is None:\n token_type_ids = torch.zeros_like(input_ids)\n\n words_embeddings = self.word_embeddings(input_ids)\n position_embeddings = self.position_embeddings(position_ids)\n token_type_embeddings = self.token_type_embeddings(token_type_ids)\n\n embeddings = (words_embeddings\n + position_embeddings\n + token_type_embeddings)\n embeddings = self.LayerNorm(embeddings)\n embeddings = self.dropout(embeddings)\n return embeddings\n\n\nclass UniterImageEmbeddings(nn.Module):\n def __init__(self, config, img_dim):\n super().__init__()\n self.img_linear = nn.Linear(img_dim, config.hidden_size)\n self.img_layer_norm = FusedLayerNorm(config.hidden_size, eps=1e-12)\n self.pos_layer_norm = FusedLayerNorm(config.hidden_size, eps=1e-12)\n self.pos_linear = nn.Linear(7, config.hidden_size)\n self.mask_embedding = nn.Embedding(2, img_dim, padding_idx=0)\n\n # tf naming convention for layer norm\n self.LayerNorm = FusedLayerNorm(config.hidden_size, eps=1e-12)\n self.dropout = nn.Dropout(config.hidden_dropout_prob)\n\n def forward(self, img_feat, img_pos_feat, type_embeddings, img_masks=None):\n if img_masks is not None:\n self.mask_embedding.weight.data[0, :].fill_(0)\n mask = self.mask_embedding(img_masks.long())\n img_feat = img_feat + mask\n\n transformed_im = self.img_layer_norm(self.img_linear(img_feat))\n transformed_pos = self.pos_layer_norm(self.pos_linear(img_pos_feat))\n embeddings = transformed_im + transformed_pos + type_embeddings\n embeddings = self.LayerNorm(embeddings)\n embeddings = self.dropout(embeddings)\n return embeddings\n\n\nclass UniterEncoder(nn.Module):\n def __init__(self, config):\n super().__init__()\n layer = BertLayer(config)\n self.layer = nn.ModuleList([copy.deepcopy(layer)\n for _ in range(config.num_hidden_layers)])\n\n def forward(self, input_, attention_mask,\n output_all_encoded_layers=True):\n all_encoder_layers = []\n hidden_states = input_\n for layer_module in self.layer:\n hidden_states = layer_module(hidden_states, attention_mask)\n if output_all_encoded_layers:\n all_encoder_layers.append(hidden_states)\n if not output_all_encoded_layers:\n all_encoder_layers.append(hidden_states)\n return all_encoder_layers\n\n\nclass UniterModel(UniterPreTrainedModel):\n \"\"\" Modification for Joint Vision-Language Encoding\n \"\"\"\n def __init__(self, config, img_dim):\n super().__init__(config)\n self.embeddings = UniterTextEmbeddings(config)\n self.img_embeddings = UniterImageEmbeddings(config, img_dim)\n self.encoder = UniterEncoder(config)\n self.pooler = BertPooler(config)\n self.apply(self.init_weights)\n\n def _compute_txt_embeddings(self, input_ids, position_ids,\n txt_type_ids=None):\n output = self.embeddings(input_ids, position_ids, txt_type_ids)\n return output\n\n def _compute_img_embeddings(self, img_feat, img_pos_feat, img_masks=None,\n img_type_ids=None):\n if img_type_ids is None:\n img_type_ids = torch.ones_like(img_feat[:, :, 0].long())\n img_type_embeddings = self.embeddings.token_type_embeddings(\n img_type_ids)\n output = self.img_embeddings(img_feat, img_pos_feat,\n img_type_embeddings, img_masks)\n return output\n\n def _compute_img_txt_embeddings(self, input_ids, position_ids,\n img_feat, img_pos_feat,\n gather_index, img_masks=None,\n txt_type_ids=None, img_type_ids=None):\n txt_emb = self._compute_txt_embeddings(\n input_ids, position_ids, txt_type_ids)\n img_emb = self._compute_img_embeddings(\n img_feat, img_pos_feat, img_masks, img_type_ids)\n # align back to most compact input\n gather_index = gather_index.unsqueeze(-1).expand(\n -1, -1, self.config.hidden_size)\n embedding_output = torch.gather(torch.cat([txt_emb, img_emb], dim=1),\n dim=1, index=gather_index)\n return embedding_output\n\n def forward(self, input_ids, position_ids,\n img_feat, img_pos_feat,\n attention_mask, gather_index=None, img_masks=None,\n output_all_encoded_layers=True,\n txt_type_ids=None, img_type_ids=None):\n # compute self-attention mask\n extended_attention_mask = attention_mask.unsqueeze(1).unsqueeze(2)\n extended_attention_mask = extended_attention_mask.to(\n dtype=next(self.parameters()).dtype) # fp16 compatibility\n extended_attention_mask = (1.0 - extended_attention_mask) * -10000.0\n\n # embedding layer\n if input_ids is None:\n # image only\n embedding_output = self._compute_img_embeddings(\n img_feat, img_pos_feat, img_masks, img_type_ids)\n elif img_feat is None:\n # text only\n embedding_output = self._compute_txt_embeddings(\n input_ids, position_ids, txt_type_ids)\n else:\n embedding_output = self._compute_img_txt_embeddings(\n input_ids, position_ids,\n img_feat, img_pos_feat,\n gather_index, img_masks, txt_type_ids, img_type_ids)\n\n encoded_layers = self.encoder(\n embedding_output, extended_attention_mask,\n output_all_encoded_layers=output_all_encoded_layers)\n if not output_all_encoded_layers:\n encoded_layers = encoded_layers[-1]\n return encoded_layers\n"
] |
[
[
"torch.nn.Dropout",
"torch.cat",
"torch.zeros_like",
"torch.nn.Embedding",
"torch.nn.Linear"
]
] |
cybercore-co-ltd/Onnx2Caffe
|
[
"aa4a90b7539e2b5ee0ad42f507021585da58be80"
] |
[
"tools/verify_caffe_model.py"
] |
[
"import argparse\nimport numpy as np\nimport onnx\nimport onnxruntime as rt\nimport torch\nimport os\nimport mmcv\nimport caffe\n\nfrom terminaltables import AsciiTable\n\ndef parse_args():\n parser = argparse.ArgumentParser()\n parser.add_argument('onnx_checkpoint', help='onnx checkpoint file')\n parser.add_argument('caffe_checkpoint', help='caffe checkpoint file')\n parser.add_argument('--input_img', type=str, help='Images for input')\n parser.add_argument(\n '--shape',\n type=int,\n nargs='+',\n default=[1280, 768],\n help='input image size')\n args = parser.parse_args()\n return args\n\ndef imread_img(img_path):\n\n # read image\n one_img = mmcv.imread(img_path, 'color')\n one_img = mmcv.imresize(one_img, input_shape[1:]).transpose(2, 1, 0)\n one_img = one_img/255\n one_img = torch.from_numpy(one_img).unsqueeze(0).float()\n\n return one_img\n\ndef get_onnx_pred(onnx_model_path, one_img):\n\n onnx_model = onnx.load(onnx_model_path)\n onnx.checker.check_model(onnx_model)\n \n\n # get onnx output\n input_all = [node.name for node in onnx_model.graph.input]\n input_initializer = [\n node.name for node in onnx_model.graph.initializer\n ]\n net_feed_input = list(set(input_all) - set(input_initializer))\n assert (len(net_feed_input) == 1)\n sess = rt.InferenceSession(onnx_model_path)\n onnx_result = sess.run(\n None, {net_feed_input[0]: one_img.detach().numpy()})\n\n onnx_result_dict = dict()\n output_name = [node.name for node in onnx_model.graph.output]\n for i,name in enumerate(output_name):\n onnx_result_dict[name]=onnx_result[i]\n input_name = net_feed_input[0]\n return onnx_result_dict, input_name\n\ndef get_caffe_pred(model, input_name, inputs):\n\n caffe_model.blobs[input_name].data[...] = inputs\n caffe_outs = caffe_model.forward()\n \n return caffe_outs\n\ndef compute_relative_err_onnx2caffe(onnx_result, caffe_outs):\n\n total_err = 0\n table_data = [\n ['Output', 'MAE', 'Relative_err']\n ]\n for k,v in onnx_result.items():\n\n # calculate the mae error between onnx and caffe model\n mae_err = (np.abs(v - caffe_outs[k])).sum()\n\n # calculate the relative err mae/norm(onnx)\n norm_onnx = (np.linalg.norm(v)).sum()\n rel_err = mae_err/norm_onnx\n total_err = total_err + rel_err\n\n # table result\n table_data.append([k, mae_err, rel_err])\n\n table = AsciiTable(table_data)\n print(table.table)\n\n return total_err\n\ndef get_onnx_outputname(onnx_model_path):\n \n model = onnx.load(onnx_model_path)\n output_name = [node.name for node in model.graph.output]\n\n return output_name\n\nif __name__ == '__main__':\n \n args = parse_args()\n if len(args.shape) == 1:\n input_shape = (1, 3, args.shape[0], args.shape[0])\n elif len(args.shape) == 2:\n input_shape = (\n 3,\n ) + tuple(args.shape)\n else:\n raise ValueError('invalid input shape')\n \n # generate the random image for testing\n if args.input_img is None:\n input_data = torch.randn(1, (*input_shape))\n else:\n input_data = imread_img(args.input_img)\n\n # get the name of output branch\n output_name = get_onnx_outputname(args.onnx_checkpoint)\n\n # get onnx results\n onnx_result, input_name = get_onnx_pred(args.onnx_checkpoint, input_data)\n \n # Create caffe model\n prototxt_path=args.caffe_checkpoint.replace('.caffemodel','.prototxt')\n caffe_model = caffe.Net(prototxt_path, caffe.TEST)\n caffe_model.copy_from(args.caffe_checkpoint)\n # get caffe results\n caffe_result = get_caffe_pred(caffe_model, input_name, input_data)\n\n # compute the err between pytorch model and converted onnx model\n total_err = compute_relative_err_onnx2caffe(onnx_result, caffe_result)\n\n print(f'TOTAL ERR BETWEEN CAFFE MODEL AND ONNX MODEL : TOTAL_ERR {total_err} ')\n\n\n"
] |
[
[
"torch.randn",
"torch.from_numpy",
"numpy.linalg.norm",
"numpy.abs"
]
] |
lsternlicht/tia
|
[
"fe74d1876260a946e52bd733bc32da0698749f2c"
] |
[
"tia/tests/test_rlab_table.py"
] |
[
"import unittest\n\nimport pandas as pd\nimport pandas.util.testing as pdtest\n\nimport tia.rlab.table as tbl\n\n\nclass TestTable(unittest.TestCase):\n def setUp(self):\n self.df1 = df1 = pd.DataFrame({'A': [.55, .65], 'B': [1234., -5678.]}, index=['I1', 'I2'])\n # Multi-index frame with multi-index\n cols = pd.MultiIndex.from_arrays([['LEFT', 'LEFT', 'RIGHT', 'RIGHT'], ['A', 'B', 'A', 'B']])\n idx = pd.MultiIndex.from_arrays([['TOP', 'BOTTOM'], ['I1', 'I2']])\n self.mdf1 = pd.DataFrame([[.55, 1234., .55, 1234.], [.65, -5678., .65, -5678.]], columns=cols, index=idx)\n\n def test_span_iter(self):\n s = pd.Series([1, 1, 1, 3, 2, 2])\n items = list(tbl.span_iter(s))\n self.assertEqual(items, [(0, 2), (4, 5)])\n # reverse and ensure it does not break it\n s = s[::-1]\n items = list(tbl.span_iter(s))\n self.assertEqual(items, [(0, 2), (4, 5)])\n\n def test_level_iter(self):\n l1 = ['L_11', 'L_12']\n l2 = ['L_21', 'L_22']\n l3 = ['L_31', 'L_32']\n midx = pd.MultiIndex.from_arrays([l1, l2, l3], names=['1', '2', '3'])\n actual = list(tbl.level_iter(midx))\n expected = [(0, 0, 'L_11'), (0, 1, 'L_12'), (1, 0, 'L_21'), (1, 1, 'L_22'), (2, 0, 'L_31'), (2, 1, 'L_32')]\n self.assertEqual(actual, expected)\n\n actual = list(tbl.level_iter(midx, levels=[0, 2]))\n expected = [(0, 0, 'L_11'), (0, 1, 'L_12'), (2, 0, 'L_31'), (2, 1, 'L_32')]\n self.assertEqual(actual, expected)\n\n actual = list(tbl.level_iter(midx, levels=0))\n expected = [(0, 0, 'L_11'), (0, 1, 'L_12')]\n self.assertEqual(actual, expected)\n\n def test_region_formatter_iloc(self):\n tf = tbl.TableFormatter(self.df1)\n region = tf.cells\n region.apply_format(lambda x: 'A')\n expected = pd.DataFrame([['A', 'A'], ['A', 'A']], index=[1, 2], columns=[1, 2])\n pdtest.assert_frame_equal(tf.cells.formatted_values, expected)\n #\n # Use the location\n #\n region = region.iloc[:, 1]\n region.apply_format(lambda x: 'B')\n expected = pd.DataFrame([['A', 'B'], ['A', 'B']], index=[1, 2], columns=[1, 2])\n pdtest.assert_frame_equal(tf.cells.formatted_values, expected)\n # Get single cell\n region = region.iloc[1]\n region.apply_format(lambda x: 'D')\n expected = pd.DataFrame([['A', 'B'], ['A', 'D']], index=[1, 2], columns=[1, 2])\n pdtest.assert_frame_equal(tf.cells.formatted_values, expected)\n # Get single cell\n region = tf.cells.iloc[1, 0]\n region.apply_format(lambda x: 'C')\n expected = pd.DataFrame([['A', 'B'], ['C', 'D']], index=[1, 2], columns=[1, 2])\n pdtest.assert_frame_equal(tf.cells.formatted_values, expected)\n\n def test_region_empty(self):\n tf = tbl.TableFormatter(self.df1)\n empty = tf['ALL'].empty_frame()\n empty.apply_format(lambda x: x)\n\n def test_detect_spans(self):\n tf = tbl.TableFormatter(self.mdf1)\n tf.header.detect_colspans()\n self.assertEqual(['SPAN', (2, 0), (3, 0)], tf.style_cmds[0])\n self.assertEqual(['SPAN', (4, 0), (5, 0)], tf.style_cmds[1])\n\n tf = tbl.TableFormatter(self.mdf1.T)\n tf.index.detect_rowspans()\n self.assertEqual(['SPAN', (0, 2), (0, 3)], tf.style_cmds[0])\n self.assertEqual(['SPAN', (0, 4), (0, 5)], tf.style_cmds[1])\n\n def test_match(self):\n tf = tbl.TableFormatter(self.mdf1)\n vcopy = tf.formatted_values.copy()\n tf.cells.match_column_labels(['A']).percent_format(precision=1)\n vcopy.iloc[2, 4] = '55.0% ' # padded for neg\n vcopy.iloc[3, 4] = '65.0% '\n vcopy.iloc[2, 2] = '55.0% '\n vcopy.iloc[3, 2] = '65.0% '\n pdtest.assert_frame_equal(vcopy, tf.formatted_values)\n\n def test_period_index(self):\n df = pd.DataFrame({'x': [1., 2.], 'y': [3., 4.]}, index=pd.date_range('1/1/2015', freq='M', periods=2).to_period())\n tf = tbl.TableFormatter(df)\n # expected values\n vcopy = tf.formatted_values.copy()\n vcopy.iloc[1, 1] = '1 '\n vcopy.iloc[2, 1] = '2 '\n vcopy.iloc[1, 2] = '3 '\n vcopy.iloc[2, 2] = '4 '\n vcopy.iloc[1, 0] = '01/2015'\n vcopy.iloc[2, 0] = '02/2015'\n # buld the format\n tf.cells.int_format()\n tf.index.apply_format(lambda x: x.strftime('%m/%Y'))\n pdtest.assert_frame_equal(vcopy, tf.formatted_values)\n # Test when it is the columns\n dfT = df.T\n tfT = tbl.TableFormatter(dfT)\n vcopy = tfT.formatted_values.copy()\n vcopy.iloc[1, 1] = '1 '\n vcopy.iloc[1, 2] = '2 '\n vcopy.iloc[2, 1] = '3 '\n vcopy.iloc[2, 2] = '4 '\n vcopy.iloc[0, 1] = '01/2015'\n vcopy.iloc[0, 2] = '02/2015'\n # buld the format\n tfT.cells.int_format()\n tfT.header.apply_format(lambda x: x.strftime('%m/%Y'))\n pdtest.assert_frame_equal(vcopy, tfT.formatted_values)\n\n"
] |
[
[
"pandas.Series",
"pandas.DataFrame",
"pandas.MultiIndex.from_arrays",
"pandas.util.testing.assert_frame_equal",
"pandas.date_range"
]
] |
onlyrico/PyABSA
|
[
"d0905eb5253eaa564d2244cd777e3a734bca777a"
] |
[
"pyabsa/core/apc/classic/__bert__/dataset_utils/data_utils_for_training.py"
] |
[
"# -*- coding: utf-8 -*-\n# file: data_utils.py\n# author: songyouwei <[email protected]>\n# Copyright (C) 2018. All Rights Reserved.\n\nimport os\nimport pickle\n\nimport numpy as np\nimport tqdm\nfrom findfile import find_file\nfrom google_drive_downloader.google_drive_downloader import GoogleDriveDownloader as gdd\nfrom torch.utils.data import Dataset\nfrom transformers import AutoTokenizer\n\nfrom pyabsa.core.apc.classic.__glove__.dataset_utils.dependency_graph import prepare_dependency_graph\nfrom pyabsa.core.apc.dataset_utils.apc_utils import load_apc_datasets\nfrom pyabsa.utils.pyabsa_utils import check_and_fix_labels\n\n\ndef prepare_glove840_embedding(glove_path):\n glove840_id = '1G-vd6W1oF9ByyJ-pzp9dcqKnr_plh4Em'\n if not os.path.exists(glove_path):\n os.mkdir(glove_path)\n elif os.path.isfile(glove_path):\n return glove_path\n elif os.path.isdir(glove_path):\n embedding_file = None\n dir_path = os.path.dirname(glove_path)\n if find_file(dir_path, 'glove.42B.300d.txt', exclude_key='.zip'):\n embedding_file = find_file(dir_path, 'glove.42B.300d.txt', exclude_key='.zip')[0]\n elif find_file(dir_path, 'glove.840B.300d.txt', exclude_key='.zip'):\n embedding_file = find_file(dir_path, 'glove.840B.300d.txt', exclude_key='.zip')[0]\n elif find_file(dir_path, 'glove.twitter.27B.txt', exclude_key='.zip'):\n embedding_file = find_file(dir_path, 'glove.twitter.27B.txt', exclude_key='.zip')[0]\n\n if embedding_file:\n print('Find potential embedding files: {}'.format(embedding_file))\n return embedding_file\n zip_glove_path = os.path.join(glove_path, '__glove__.840B.300d.txt.zip')\n print('No GloVe embedding found at {},'\n ' downloading __glove__.840B.300d.txt (2GB transferred / 5.5GB unzipped)...'.format(glove_path))\n gdd.download_file_from_google_drive(file_id=glove840_id,\n dest_path=zip_glove_path,\n unzip=True\n )\n glove_path = find_file(glove_path, 'txt', exclude_key='.zip')\n return glove_path\n\n\ndef build_tokenizer(dataset_list, max_seq_len, dat_fname, opt):\n if os.path.exists(os.path.join(opt.dataset_path, dat_fname)):\n print('Loading tokenizer on {}'.format(os.path.join(opt.dataset_path, dat_fname)))\n tokenizer = pickle.load(open(os.path.join(opt.dataset_path, dat_fname), 'rb'))\n else:\n text = ''\n for dataset_type in dataset_list:\n for file in dataset_list[dataset_type]:\n fin = open(file, 'r', encoding='utf-8', newline='\\n', errors='ignore')\n lines = fin.readlines()\n fin.close()\n for i in range(0, len(lines), 3):\n text_left, _, text_right = [s.lower().strip() for s in lines[i].partition(\"$T$\")]\n aspect = lines[i + 1].lower().strip()\n text_raw = text_left + \" \" + aspect + \" \" + text_right\n text += text_raw + \" \"\n\n tokenizer = Tokenizer(max_seq_len)\n tokenizer.fit_on_text(text)\n pickle.dump(tokenizer, open(os.path.join(opt.dataset_path, dat_fname), 'wb'))\n return tokenizer\n\n\ndef _load_word_vec(path, word2idx=None, embed_dim=300):\n fin = open(path, 'r', encoding='utf-8', newline='\\n', errors='ignore')\n word_vec = {}\n for line in tqdm.tqdm(fin, postfix='Loading embedding file...'):\n tokens = line.rstrip().split()\n word, vec = ' '.join(tokens[:-embed_dim]), tokens[-embed_dim:]\n if word in word2idx.keys():\n word_vec[word] = np.asarray(vec, dtype='float32')\n return word_vec\n\n\ndef build_embedding_matrix(word2idx, embed_dim, dat_fname, opt):\n if os.path.exists(os.path.join(opt.dataset_path, dat_fname)):\n print('Loading cached embedding_matrix for {}'.format(os.path.join(opt.dataset_path, dat_fname)))\n embedding_matrix = pickle.load(open(os.path.join(opt.dataset_path, dat_fname), 'rb'))\n else:\n print('Extracting embedding_matrix for {}'.format(dat_fname))\n glove_path = prepare_glove840_embedding(opt.dataset_path)\n embedding_matrix = np.zeros((len(word2idx) + 2, embed_dim)) # idx 0 and len(word2idx)+1 are all-zeros\n\n word_vec = _load_word_vec(glove_path, word2idx=word2idx, embed_dim=embed_dim)\n\n for word, i in tqdm.tqdm(word2idx.items(), postfix='Building embedding_matrix {}'.format(dat_fname)):\n vec = word_vec.get(word)\n if vec is not None:\n # words not found in embedding index will be all-zeros.\n embedding_matrix[i] = vec\n pickle.dump(embedding_matrix, open(os.path.join(opt.dataset_path, dat_fname), 'wb'))\n return embedding_matrix\n\n\ndef pad_and_truncate(sequence, maxlen, dtype='int64', padding='post', truncating='post', value=0):\n x = (np.ones(maxlen) * value).astype(dtype)\n if truncating == 'pre':\n trunc = sequence[-maxlen:]\n else:\n trunc = sequence[:maxlen]\n trunc = np.asarray(trunc, dtype=dtype)\n if padding == 'post':\n x[:len(trunc)] = trunc\n else:\n x[-len(trunc):] = trunc\n return x\n\n\nclass Tokenizer(object):\n def __init__(self, max_seq_len, lower=True):\n self.lower = lower\n self.max_seq_len = max_seq_len\n self.word2idx = {}\n self.idx2word = {}\n self.idx = 1\n\n def fit_on_text(self, text):\n if self.lower:\n text = text.lower()\n words = text.split()\n for word in words:\n if word not in self.word2idx:\n self.word2idx[word] = self.idx\n self.idx2word[self.idx] = word\n self.idx += 1\n\n def text_to_sequence(self, text, reverse=False, padding='post', truncating='post'):\n if self.lower:\n text = text.lower()\n words = text.split()\n unknownidx = len(self.word2idx) + 1\n sequence = [self.word2idx[w] if w in self.word2idx else unknownidx for w in words]\n if len(sequence) == 0:\n sequence = [0]\n if reverse:\n sequence = sequence[::-1]\n return pad_and_truncate(sequence, self.max_seq_len, padding=padding, truncating=truncating)\n\n\nclass Tokenizer4Pretraining:\n def __init__(self, max_seq_len, pretrained_bert_name):\n self.tokenizer = AutoTokenizer.from_pretrained(pretrained_bert_name)\n self.max_seq_len = max_seq_len\n\n def text_to_sequence(self, text, reverse=False, padding='post', truncating='post'):\n sequence = self.tokenizer.convert_tokens_to_ids(self.tokenizer.tokenize(text))\n if len(sequence) == 0:\n sequence = [0]\n if reverse:\n sequence = sequence[::-1]\n return pad_and_truncate(sequence, self.max_seq_len, padding=padding, truncating=truncating)\n\n\nclass BERTBaselineABSADataset(Dataset):\n bert_baseline_input_colses = {\n 'lstm_bert': ['text_indices'],\n 'td_lstm_bert': ['left_with_aspect_indices', 'right_with_aspect_indices'],\n 'tc_lstm_bert': ['left_with_aspect_indices', 'right_with_aspect_indices', 'aspect_indices'],\n 'atae_lstm_bert': ['text_indices', 'aspect_indices'],\n 'ian_bert': ['text_indices', 'aspect_indices'],\n 'memnet_bert': ['context_indices', 'aspect_indices'],\n 'ram_bert': ['text_indices', 'aspect_indices', 'left_indices'],\n 'cabasc_bert': ['text_indices', 'aspect_indices', 'left_with_aspect_indices', 'right_with_aspect_indices'],\n 'tnet_lf_bert': ['text_indices', 'aspect_indices', 'aspect_boundary'],\n 'aoa_bert': ['text_indices', 'aspect_indices'],\n 'mgan_bert': ['text_indices', 'aspect_indices', 'left_indices'],\n 'asgcn_bert': ['text_indices', 'aspect_indices', 'left_indices', 'dependency_graph'],\n }\n\n def __init__(self, dataset_list, tokenizer, opt):\n\n lines = load_apc_datasets(dataset_list)\n\n all_data = []\n label_set = set()\n\n if not os.path.exists(opt.dataset_path):\n os.mkdir(os.path.join(os.getcwd(), opt.dataset_path))\n opt.dataset_path = os.path.join(os.getcwd(), opt.dataset_path)\n graph_path = prepare_dependency_graph(dataset_list, opt.dataset_path, opt.max_seq_len)\n fin = open(graph_path, 'rb')\n idx2graph = pickle.load(fin)\n\n for i in tqdm.tqdm(range(0, len(lines), 3), postfix='building word indices...'):\n text_left, _, text_right = [s.lower().strip() for s in lines[i].partition(\"$T$\")]\n aspect = lines[i + 1].lower().strip()\n polarity = lines[i + 2].strip()\n\n text_indices = tokenizer.text_to_sequence('[CLS] ' + text_left + ' ' + aspect + ' ' + text_right + \" [SEP]\")\n context_indices = tokenizer.text_to_sequence(text_left + text_right)\n left_indices = tokenizer.text_to_sequence(text_left)\n left_with_aspect_indices = tokenizer.text_to_sequence('[CLS] ' + text_left + \" \" + aspect + \" [SEP]\")\n right_indices = tokenizer.text_to_sequence(text_right, reverse=False)\n right_with_aspect_indices = tokenizer.text_to_sequence(aspect + \" \" + text_right, reverse=False)\n aspect_indices = tokenizer.text_to_sequence(aspect)\n aspect_len = np.sum(aspect_indices != 0)\n left_len = min(opt.max_seq_len - aspect_len, np.sum(left_indices != 0))\n left_indices = np.concatenate((left_indices[:left_len], np.asarray([0] * (opt.max_seq_len - left_len))))\n aspect_boundary = np.asarray([left_len, left_len + aspect_len - 1], dtype=np.int64)\n polarity = int(polarity)\n\n dependency_graph = np.pad(idx2graph[i],\n ((0, max(0, opt.max_seq_len - idx2graph[i].shape[0])),\n (0, max(0, opt.max_seq_len - idx2graph[i].shape[0]))),\n 'constant')\n dependency_graph = dependency_graph[:, range(0, opt.max_seq_len)]\n dependency_graph = dependency_graph[range(0, opt.max_seq_len), :]\n\n data = {\n 'text_indices': text_indices\n if 'text_indices' in opt.model.inputs else 0,\n\n 'context_indices': context_indices\n if 'context_indices' in opt.model.inputs else 0,\n\n 'left_indices': left_indices\n if 'left_indices' in opt.model.inputs else 0,\n\n 'left_with_aspect_indices': left_with_aspect_indices\n if 'left_with_aspect_indices' in opt.model.inputs else 0,\n\n 'right_indices': right_indices\n if 'right_indices' in opt.model.inputs else 0,\n\n 'right_with_aspect_indices': right_with_aspect_indices\n if 'right_with_aspect_indices' in opt.model.inputs else 0,\n\n 'aspect_indices': aspect_indices\n if 'aspect_indices' in opt.model.inputs else 0,\n\n 'aspect_boundary': aspect_boundary\n if 'aspect_boundary' in opt.model.inputs else 0,\n\n 'dependency_graph': dependency_graph\n if 'dependency_graph' in opt.model.inputs else 0,\n\n 'polarity': polarity,\n }\n\n label_set.add(polarity)\n\n all_data.append(data)\n\n check_and_fix_labels(label_set, 'polarity', all_data)\n opt.polarities_dim = len(label_set)\n\n self.data = all_data\n\n def __getitem__(self, index):\n return self.data[index]\n\n def __len__(self):\n return len(self.data)\n"
] |
[
[
"numpy.asarray",
"numpy.sum",
"numpy.ones"
]
] |
akashpattnaik/pre-ictal-similarity
|
[
"85f963aa0c6d2d0a6e971ffa005c400e136a0a76"
] |
[
"code/05-soz_subgraph.py"
] |
[
"# %%\n# %load_ext autoreload\n# %autoreload 2\n# Imports and environment setup\nimport numpy as np\nimport sys\nimport os\nfrom numpy.core.fromnumeric import sort\nimport pandas as pd\nimport json\nfrom scipy.io import loadmat\nimport matplotlib.pyplot as plt\nfrom tqdm import tqdm\nfrom os.path import join as ospj\nfrom scipy.stats import zscore\nimport time\nfrom kneed import KneeLocator\nfrom scipy.stats import mannwhitneyu\n\ncode_path = os.path.dirname(os.path.realpath(__file__))\n\nsys.path.append(ospj(code_path, 'tools'))\n\nfrom plot_spectrogram import plot_spectrogram\nfrom movmean import movmean\nfrom pull_sz_starts import pull_sz_starts\nfrom pull_patient_localization import pull_patient_localization\nfrom mpl_toolkits.axes_grid1 import make_axes_locatable\n\nfrom time2ind import time2ind\n\nfrom fastdtw import fastdtw\nfrom scipy.spatial.distance import euclidean\nfrom sklearn.decomposition import NMF\nfrom sklearn.metrics.cluster import adjusted_rand_score\n\nimport warnings\nfrom sklearn.exceptions import ConvergenceWarning\nwarnings.filterwarnings(action='ignore', category=ConvergenceWarning)\n\n# Get paths from config file and metadata\nwith open(ospj(code_path, \"config.json\")) as f:\n config = json.load(f)\nrepo_path = config['repositoryPath']\nmetadata_path = config['metadataPath']\npalette = config['lightColors']\nDTW_FLAG = config['flags'][\"DTW_FLAG\"]\nelectrodes = config['electrodes']\nbands = config['bands']\n\ndata_path = ospj(repo_path, 'data')\nfigure_path = ospj(repo_path, 'figures')\n\nmetadata_fname = ospj(metadata_path, \"DATA_MASTER.json\")\nwith open(metadata_fname) as f:\n metadata = json.load(f)['PATIENTS']\n\nseizure_metadata = pd.read_excel(ospj(data_path, \"seizure_metadata.xlsx\"))\n# flags\nSAVE_PLOT = True\n\nNMF_FLAG = True\nFIXED_PREICTAL_SEC = 60 * 30\nLEAD_SZ_WINDOW_SEC = (FIXED_PREICTAL_SEC + 60 * 15) # 15 min buffer\n\ndef soz_state(H, soz_electrodes, metric=\"max_all\", is_zscore=False):\n '''\n soz_mask: soz electrodes are true and non_soz electrodes are false\n\n metric: determines how to find soz state. max_all takes the state where soz \n channels have higher bandpower in all frequency bands\n\n '''\n n_components = H.shape[0]\n n_electrodes = soz_electrodes.shape[0] \n\n # reshape to (component, frequency band, electrode)\n component_arr = np.reshape(H, (n_components, -1, n_electrodes))\n if is_zscore:\n component_z = np.zeros(component_arr.shape)\n for i_comp in range(n_components):\n component_z[i_comp, :, :] = zscore(component_arr[i_comp, :, :], axis=1)\n component_arr = component_z\n # sort to put non-soz first\n sort_soz_inds = np.argsort(soz_electrodes)\n n_soz = np.sum(soz_electrodes)\n n_non_soz = n_electrodes - n_soz\n\n n_iter = 10000\n\n u_stats = np.zeros(n_components)\n null_z = np.zeros(n_components)\n\n for i_comp in range(n_components):\n # randomly resample electrodes and take the mean bandpower of sample\n means = np.zeros(n_iter)\n for iter in range(n_iter):\n means[iter] = np.mean(component_arr[i_comp, :, np.random.choice(n_electrodes, n_soz)])\n # append true soz\n means = np.append(means, np.mean(component_arr[i_comp, :, soz_electrodes]))\n # calculate z_score of true soz and save\n null_z[i_comp] = zscore(means)[-1]\n\n\n sz_u_stats = np.zeros(component_arr.shape[1])\n for i in range(component_arr.shape[1]):\n stat, p = mannwhitneyu(component_arr[i_comp][i, soz_electrodes], component_arr[i_comp][i, ~soz_electrodes])\n sz_u_stats[i] = stat\n u_stats[i_comp] = np.max(sz_u_stats)\n\n pt_soz_state_resamp = np.argmax(np.abs(null_z))\n pt_soz_state_u = np.argmax(u_stats)\n\n pct_non_zero = np.sum(component_arr[pt_soz_state_u,:,:] == 0) / np.size(component_arr[pt_soz_state_u,:,:])\n var = np.max(np.var(component_arr[pt_soz_state_u,:,:], axis=1))\n return pt_soz_state_resamp, pt_soz_state_u, pct_non_zero, var\n\npatient_localization_mat = loadmat(ospj(metadata_path, 'patient_localization_final.mat'))['patient_localization']\npatients, labels, ignore, resect, gm_wm, coords, region, soz = pull_patient_localization(ospj(metadata_path, 'patient_localization_final.mat'))\n\n# %%\n# Plot the NMF subgraphs and expression\nfor index, row in seizure_metadata.iterrows():\n# for index, row in patient_cohort.iterrows():\n# if row['Ignore']:\n# continue\n\n pt = row[\"Patient\"]\n pt_data_path = ospj(data_path, pt)\n\n sz_num = row[\"Seizure number\"]\n remaining_sz_ids = np.load(ospj(pt_data_path, \"remaining_sz_ids.npy\"))\n if sz_num not in remaining_sz_ids:\n continue\n if row[\"Seizure category\"] == \"Other\":\n continue\n\n print(\"Calculating dissimilarity for seizure {}, {}\".format(sz_num, pt))\n\n t_sec = np.load(ospj(pt_data_path, \"lead_sz_t_sec_band-{}_elec-{}.npy\".format(bands, electrodes)))\n sz_id = np.load(ospj(pt_data_path, \"lead_sz_sz_id_band-{}_elec-{}.npy\".format(bands, electrodes)))\n W = np.load(ospj(pt_data_path, \"nmf_expression_band-{}_elec-{}_sz-{}.npy\".format(bands, electrodes, sz_num)))\n H = np.load(ospj(pt_data_path, \"nmf_components_band-{}_elec-{}_sz_{}.npy\".format(bands, electrodes, sz_num)))\n n_components = H.shape[0]\n\n # pull and format electrode metadata\n electrodes_mat = loadmat(ospj(pt_data_path, \"selected_electrodes_elec-{}.mat\".format(electrodes)))\n target_electrode_region_inds = electrodes_mat['targetElectrodesRegionInds'][0]\n pt_index = patients.index(pt)\n sz_starts = pull_sz_starts(pt, metadata)\n\n # find seizure onset zone and state with most seizure onset zone\n soz_electrodes = np.array(np.squeeze(soz[pt_index][target_electrode_region_inds, :]), dtype=bool)\n pt_soz_state_resamp, pt_soz_state_u, pct_non_zero, var = soz_state(H, soz_electrodes)\n \n seizure_metadata.at[index, 'SOZ Sensitive State (resampling)'] = pt_soz_state_resamp\n seizure_metadata.at[index, 'SOZ Sensitive State (mann-whitney)'] = pt_soz_state_u\n seizure_metadata.at[index, 'SOZ Sensitive State (mann-whitney)'] = pt_soz_state_u\n seizure_metadata.at[index, 'Ratio of non-zero component entries'] = pct_non_zero\n seizure_metadata.at[index, 'Maximum variance across bands'] = var\n\n np.save(ospj(pt_data_path, \"soz_electrodes_band-{}_elec-{}.npy\".format(bands, electrodes)), soz_electrodes)\n\nseizure_metadata.to_excel(ospj(data_path, \"seizure_metadata_with_soz_subgraph.xlsx\"))\n# %%\n"
] |
[
[
"numpy.abs",
"numpy.random.choice",
"numpy.reshape",
"numpy.squeeze",
"scipy.stats.zscore",
"numpy.max",
"numpy.size",
"numpy.argmax",
"numpy.mean",
"scipy.stats.mannwhitneyu",
"numpy.var",
"numpy.argsort",
"numpy.zeros",
"numpy.sum"
]
] |
tkuri/irradiance_estimation
|
[
"3f7e0e8d4772222faad7257a70a8dec0198e4810"
] |
[
"models/variation/pix2pix_tm2_mc_full_in2_model.py"
] |
[
"import torch\nfrom .base_model import BaseModel\nfrom . import networks\nfrom torch.nn import functional as F\n\nclass Pix2PixTm2McFullIn2Model(BaseModel):\n \"\"\" This class implements the pix2pix model, for learning a mapping from input images to output images given paired data.\n\n The model training requires '--dataset_mode aligned' dataset.\n By default, it uses a '--netG unet256' U-Net generator,\n a '--netD basic' discriminator (PatchGAN),\n and a '--gan_mode' vanilla GAN loss (the cross-entropy objective used in the orignal GAN paper).\n\n pix2pix paper: https://arxiv.org/pdf/1611.07004.pdf\n \"\"\"\n @staticmethod\n def modify_commandline_options(parser, is_train=True):\n \"\"\"Add new dataset-specific options, and rewrite default values for existing options.\n\n Parameters:\n parser -- original option parser\n is_train (bool) -- whether training phase or test phase. You can use this flag to add training-specific or test-specific options.\n\n Returns:\n the modified parser.\n\n For pix2pix, we do not use image buffer\n The training objective is: GAN Loss + lambda_L1 * ||G(A)-B||_1\n By default, we use vanilla GAN loss, UNet with batchnorm, and aligned datasets.\n \"\"\"\n # changing the default values to match the pix2pix paper (https://phillipi.github.io/pix2pix/)\n parser.set_defaults(norm='batch', netG='unet_256', dataset_mode='aligned3')\n if is_train:\n parser.set_defaults(pool_size=0, gan_mode='vanilla')\n parser.add_argument('--lambda_L1', type=float, default=100.0, help='weight for L1 loss')\n\n return parser\n\n def __init__(self, opt):\n \"\"\"Initialize the pix2pix class.\n\n Parameters:\n opt (Option class)-- stores all the experiment flags; needs to be a subclass of BaseOptions\n \"\"\"\n BaseModel.__init__(self, opt)\n # specify the training losses you want to print out. The training/test scripts will call <BaseModel.get_current_losses>\n self.loss_names = ['G_GAN', 'G_L1', 'D_real', 'D_fake']\n # specify the images you want to save/display. The training/test scripts will call <BaseModel.get_current_visuals>\n \n self.visual_names = ['real_A', 'fake_B', 'real_B', 'real_C', 'real_C_itp', 'ltm_slice00', 'ltm_slice12', 'ltm_slice24', 'matrix_1_0', 'matrix_1_1', 'matrix_1_2', 'matrix_1_3', 'matrix_2_0', 'matrix_2_1', 'matrix_2_2', 'matrix_2_3']\n # specify the models you want to save to the disk. The training/test scripts will call <BaseModel.save_networks> and <BaseModel.load_networks>\n if self.isTrain:\n # self.model_names = ['G', 'D']\n self.model_names = ['G', 'G2', 'D']\n else: # during test time, only load G\n self.model_names = ['G', 'G2']\n\n # define networks (both generator and discriminator)\n self.output_nc = opt.output_nc\n self.light_res = opt.light_res\n self.intermediate_nc = opt.intermediate_nc\n print('opt.output_nc', opt.output_nc)\n print('light_res', self.light_res)\n print('intermediate_nc', self.intermediate_nc)\n\n self.netG = networks.define_G(opt.input_nc + opt.input2_nc, opt.output_nc*self.intermediate_nc, opt.ngf, 'unet_256_lastrelu', opt.norm,\n not opt.no_dropout, opt.init_type, opt.init_gain, self.gpu_ids)\n \n self.netG2 = networks.define_G(opt.input_nc + opt.input2_nc, self.intermediate_nc, opt.ngf, 'unet_256_lastrelu', opt.norm,\n not opt.no_dropout, opt.init_type, opt.init_gain, self.gpu_ids)\n\n\n if self.isTrain: # define a discriminator; conditional GANs need to take both input and output images; Therefore, #channels for D is input_nc + output_nc\n self.netD = networks.define_D(opt.input_nc + opt.input2_nc + opt.output_nc, opt.ndf, opt.netD,\n opt.n_layers_D, opt.norm, opt.init_type, opt.init_gain, self.gpu_ids)\n\n if self.isTrain:\n # define loss functions\n self.criterionGAN = networks.GANLoss(opt.gan_mode).to(self.device)\n self.criterionL1 = torch.nn.L1Loss()\n # initialize optimizers; schedulers will be automatically created by function <BaseModel.setup>.\n self.optimizer_G = torch.optim.Adam(self.netG.parameters(), lr=opt.lr, betas=(opt.beta1, 0.999))\n self.optimizer_G2 = torch.optim.Adam(self.netG2.parameters(), lr=opt.lr, betas=(opt.beta1, 0.999))\n self.optimizer_D = torch.optim.Adam(self.netD.parameters(), lr=opt.lr, betas=(opt.beta1, 0.999))\n self.optimizers.append(self.optimizer_G)\n self.optimizers.append(self.optimizer_G2)\n self.optimizers.append(self.optimizer_D)\n \n\n\n def set_input(self, input):\n \"\"\"Unpack input data from the dataloader and perform necessary pre-processing steps.\n\n Parameters:\n input (dict): include the data itself and its metadata information.\n\n The option 'direction' can be used to swap images in domain A and domain B.\n \"\"\"\n AtoB = self.opt.direction == 'AtoB'\n self.real_A = input['A' if AtoB else 'B'].to(self.device)\n self.real_B = input['B' if AtoB else 'A'].to(self.device)\n self.real_C = input['C'].to(self.device)\n # self.real_C_itp = F.interpolate(self.real_C, (self.light_res, self.light_res), mode='bicubic', align_corners=False)\n self.real_C_itp = F.interpolate(self.real_C, (self.light_res, self.light_res), mode='bilinear', align_corners=False)\n self.real_C_itp_flat = self.real_C_itp.view(-1, self.light_res**2, 1) # [1, lsxls, 1]\n self.real_C_itp = torch.clamp((F.interpolate(self.real_C_itp, (self.real_C.size(-2), self.real_C.size(-1)), mode='nearest')-0.5)/0.5, min=-1.0, max=1.0)\n self.real_AC = torch.cat([self.real_A, self.real_C], dim=1)\n self.image_paths = input['A_paths' if AtoB else 'B_paths']\n \n def forward(self):\n # print(\"test\")\n \"\"\"Run forward pass; called by both functions <optimize_parameters> and <test>.\"\"\"\n sub_matrix1 = self.netG(self.real_AC) # [1, 3xmc, 256, 256]\n sub_matrix2 = self.netG2(self.real_AC) # [1, mc, 256, 256]\n sub_matrix2 = F.interpolate(sub_matrix2, (self.light_res, self.light_res), mode='bilinear', align_corners=False)# [1, mc, ls, ls]\n\n self.sub_matrix_1 = sub_matrix1.clone()\n self.sub_matrix_2 = sub_matrix2.clone()\n \n self.matrix_1 = torch.clamp((sub_matrix1*self.matrix_1_gain-0.5)/0.5, min=-1.0, max=1.0)\n self.matrix_1_0 = self.matrix_1[:, [0, self.intermediate_nc, self.intermediate_nc*2], :, :]\n self.matrix_1_1 = self.matrix_1[:, [1, 1 + self.intermediate_nc, 1 + self.intermediate_nc*2], :, :]\n self.matrix_1_2 = self.matrix_1[:, [2, 2 + self.intermediate_nc, 3 + self.intermediate_nc*2], :, :]\n self.matrix_1_3 = self.matrix_1[:, [3, 3 + self.intermediate_nc, 3 + self.intermediate_nc*2], :, :]\n self.matrix_2 = torch.clamp((F.interpolate(sub_matrix2, (self.real_B.size(-2), self.real_B.size(-1)), mode='nearest')*self.matrix_2_gain-0.5)/0.5, min=-1.0, max=1.0)\n self.matrix_2_0 = torch.unsqueeze(self.matrix_2[:, 0, :, :], 1)\n self.matrix_2_1 = torch.unsqueeze(self.matrix_2[:, 1, :, :], 1)\n self.matrix_2_2 = torch.unsqueeze(self.matrix_2[:, 2, :, :], 1)\n self.matrix_2_3 = torch.unsqueeze(self.matrix_2[:, 3, :, :], 1)\n \n sub_matrix1 = sub_matrix1.view(-1, sub_matrix1.size(1), sub_matrix1.size(2)*sub_matrix1.size(3)) # [1, 3xmc, 256x256]\n sub_matrix2 = sub_matrix2.view(-1, sub_matrix2.size(1), sub_matrix2.size(2)*sub_matrix2.size(3)) # [1, mc, lsxls]\n sub_matrix1 = torch.transpose(sub_matrix1, 1, 2) # [1, 256x256, 3xmc]\n sm1R = sub_matrix1[:, :, 0:self.intermediate_nc] # [1, 256x256, mc]\n sm1G = sub_matrix1[:, :, self.intermediate_nc:self.intermediate_nc*2]\n sm1B = sub_matrix1[:, :, self.intermediate_nc*2:self.intermediate_nc*3]\n bufR = torch.matmul(sm1R, sub_matrix2) # [1, 256x256, lsxls]\n bufG = torch.matmul(sm1G, sub_matrix2)\n bufB = torch.matmul(sm1B, sub_matrix2)\n trans_matrix = torch.cat([bufR, bufG, bufB], dim=1) # [1, 3x256x256, lsxls]\n\n ltm = torch.transpose(trans_matrix, 1, 2) #[25, 25, 3x256x256]\n ltm = ltm.reshape(ltm.size(0), ltm.size(1)*self.real_B.size(1), self.real_B.size(2)*self.real_B.size(3)) #[25, 25x3, 256x256]\n ltm = ltm.reshape(ltm.size(0), ltm.size(1), self.real_B.size(2), self.real_B.size(3)) #[25, 25x3, 256, 256]\n\n self.ltm_slice00 = torch.clamp((ltm[:, 0:3, :, :] - 0.5) / 0.5, min=-1.0, max=1.0) # [25, 3, 256, 256]\n self.ltm_slice12 = torch.clamp((ltm[:, 3*12:3*12+3, :, :] - 0.5) / 0.5, min=-1.0, max=1.0) # [25, 3, 256, 256]\n self.ltm_slice24 = torch.clamp((ltm[:, 3*24:3*24+3, :, :] - 0.5) / 0.5, min=-1.0, max=1.0) # [25, 3, 256, 256]\n\n\n # trans_matrix = torch.matmul(sub_matrix1, sub_matrix2) #[1, 3x256x256, lsxls]\n # print('trans_matrix:', trans_matrix.size())\n tmR = trans_matrix[:, 0:256**2, :] # [1, 256x256, lsxls]\n tmG = trans_matrix[:, 256**2:(256**2)*2, :]\n tmB = trans_matrix[:, (256**2)*2:(256**2)*3, :]\n # print('tmR:', tmR.size())\n bufR = torch.matmul(tmR, self.real_C_itp_flat) # [1, 256x256, 1]\n bufG = torch.matmul(tmG, self.real_C_itp_flat)\n bufB = torch.matmul(tmB, self.real_C_itp_flat)\n # print('bufR:', bufR.size())\n buf = torch.cat([bufR, bufG, bufB], dim=2) # [1, 256x256, 3]\n buf = torch.transpose(buf, 1, 2) # [1, 3, 256x256]\n buf = (buf - 0.5) / 0.5\n buf = torch.clamp(buf, min=-1.0, max=1.0)\n # print('buf:', buf.size())\n self.fake_B = buf.view(self.real_B.size()) # [1, 3, 256, 256]\n\n def forward_linebuf(self):\n \"\"\"Run forward pass; called by both functions <optimize_parameters> and <test>.\"\"\"\n sub_matrix1 = self.netG(self.real_AC) # [1, 3, 256, 256]\n sub_matrix2 = self.netG2(self.real_AC) # [1, 1, 256, 256]\n sub_matrix2 = F.interpolate(sub_matrix2, (self.light_res, self.light_res), mode='bilinear', align_corners=False)\n self.fake_B = torch.zeros_like(self.real_B)\n sub_matrix2 = sub_matrix2.view(-1, 1, sub_matrix2.size(-2)*sub_matrix2.size(-1)) * 0.5 + 0.5 # [1, 1, 256x256]\n \n for l in range(sub_matrix1.size(2)):\n sub_matrix1_buf = sub_matrix1[:, :, l, :].reshape(-1, sub_matrix1.size(1)*sub_matrix1.size(3), 1) * 0.5 + 0.5 # [1, 3x256, 1]\n trans_matrix = torch.matmul(sub_matrix1_buf, sub_matrix2) #[1, 3x256, 256x256]\n # print('trans_matrix:', trans_matrix.size())\n tmR = trans_matrix[:, 0:256, :] # [1, 256, 256x256]\n tmG = trans_matrix[:, 256:256*2, :]\n tmB = trans_matrix[:, 256*2:256*3, :]\n # print('self.real_C_itp_flat:', self.real_C_itp_flat.size())\n # print('tmR:', tmR.size())\n bufR = torch.matmul(tmR, self.real_C_itp_flat * 10.0) # [1, 256, 1]\n bufG = torch.matmul(tmG, self.real_C_itp_flat * 10.0)\n bufB = torch.matmul(tmB, self.real_C_itp_flat * 10.0)\n # print('bufR:', bufR.size())\n buf = torch.cat([bufR, bufG, bufB], dim=2) # [1, 256, 3]\n buf = torch.transpose(buf, 1, 2) # [1, 3, 256]\n buf = (buf - 0.5) / 0.5\n buf = buf.reshape(self.fake_B.size(0), self.fake_B.size(1), self.fake_B.size(3))\n # print('buf:', buf.size())\n # print('fake_B:', self.fake_B.size())\n self.fake_B[:, :, l, :] = buf # [1, 3, 1, 256] <- [1,3,256]\n\n\n def backward_D(self):\n \"\"\"Calculate GAN loss for the discriminator\"\"\"\n # Fake; stop backprop to the generator by detaching fake_B\n # fake_AB = torch.cat((self.real_A, self.fake_B), 1) # we use conditional GANs; we need to feed both input and output to the discriminator\n # pred_fake = self.netD(fake_AB.detach())\n fake_ACB = torch.cat((self.real_AC, self.fake_B), 1) # we use conditional GANs; we need to feed both input and output to the discriminator\n pred_fake = self.netD(fake_ACB.detach())\n self.loss_D_fake = self.criterionGAN(pred_fake, False)\n # Real\n # real_AB = torch.cat((self.real_A, self.real_B), 1)\n # pred_real = self.netD(real_AB)\n real_ACB = torch.cat((self.real_AC, self.real_B), 1)\n pred_real = self.netD(real_ACB)\n self.loss_D_real = self.criterionGAN(pred_real, True)\n # combine loss and calculate gradients\n self.loss_D = (self.loss_D_fake + self.loss_D_real) * 0.5\n self.loss_D.backward()\n\n def backward_G(self):\n \"\"\"Calculate GAN and L1 loss for the generator\"\"\"\n # First, G(A) should fake the discriminator\n # fake_AB = torch.cat((self.real_A, self.fake_B), 1)\n # pred_fake = self.netD(fake_AB)\n fake_ACB = torch.cat((self.real_AC, self.fake_B), 1)\n pred_fake = self.netD(fake_ACB)\n self.loss_G_GAN = self.criterionGAN(pred_fake, True)\n # Second, G(A) = B\n self.loss_G_L1 = self.criterionL1(self.fake_B, self.real_B) * self.opt.lambda_L1\n # combine loss and calculate gradients\n self.loss_G = self.loss_G_GAN + self.loss_G_L1\n self.loss_G.backward()\n\n def optimize_parameters(self):\n self.forward() # compute fake images: G(A)\n # update D\n self.set_requires_grad(self.netD, True) # enable backprop for D\n self.optimizer_D.zero_grad() # set D's gradients to zero\n self.backward_D() # calculate gradients for D\n self.optimizer_D.step() # update D's weights\n # update G\n self.set_requires_grad(self.netD, False) # D requires no gradients when optimizing G\n # self.optimizer_G.zero_grad() # set G's gradients to zero\n # self.backward_G() # calculate graidents for G\n # self.optimizer_G.step() # udpate G's weights\n\n self.optimizer_G.zero_grad() # set G's gradients to zero\n self.optimizer_G2.zero_grad() # set G's gradients to zero\n self.backward_G() # calculate graidents for G\n self.optimizer_G.step() # udpate G's weights\n self.optimizer_G2.step() # udpate G's weights\n"
] |
[
[
"torch.transpose",
"torch.cat",
"torch.zeros_like",
"torch.unsqueeze",
"torch.matmul",
"torch.nn.functional.interpolate",
"torch.clamp",
"torch.nn.L1Loss"
]
] |
LI-Mingyu/GraphScope-MY
|
[
"942060983d3f7f8d3a3377467386e27aba285b33"
] |
[
"python/tests/unittest/test_context.py"
] |
[
"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n#\n# Copyright 2020 Alibaba Group Holding Limited. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\nimport os\n\nimport pandas as pd\nimport pytest\nimport vineyard.io\n\nfrom graphscope import lpa\nfrom graphscope import sssp\nfrom graphscope.framework.app import AppAssets\nfrom graphscope.framework.errors import InvalidArgumentError\n\n\ndef test_simple_context_to_numpy(simple_context):\n out = simple_context.to_numpy(\"v.id\")\n assert out.shape == (40521,)\n out = simple_context.to_numpy(\"v.data\")\n assert out.shape == (40521,)\n # selector of `e` is not done yet.\n # out = simple_context.to_numpy('e.src')\n # out = simple_context.to_numpy('e.dst')\n # out = simple_context.to_numpy('e.data')\n out = simple_context.to_numpy(\"r\")\n assert out.shape == (40521,)\n\n\ndef test_simple_context_to_dataframe(simple_context):\n out = simple_context.to_dataframe({\"id\": \"v.id\", \"data\": \"v.data\", \"result\": \"r\"})\n assert out.shape == (40521, 3)\n\n\ndef test_simple_context_to_vineyard_tensor(simple_context, p2p_project_directed_graph):\n out = simple_context.to_vineyard_tensor(\"v.id\")\n assert out is not None\n out = simple_context.to_vineyard_tensor(\"r\")\n assert out is not None\n\n has_path = AppAssets(algo=\"sssp_has_path\", context=\"tensor\")\n ctx = has_path(\n p2p_project_directed_graph._project_to_simple(), source=6, target=3728\n )\n assert ctx.to_vineyard_tensor(axis=0) is not None\n\n\ndef test_simple_context_to_vineyard_dataframe(\n simple_context, p2p_project_directed_graph\n):\n out = simple_context.to_vineyard_dataframe(\n {\"id\": \"v.id\", \"data\": \"v.data\", \"result\": \"r\"}\n )\n assert out is not None\n\n\ndef test_property_context_to_numpy(property_context):\n out = property_context.to_numpy(\"v:v0.dist\")\n assert out.shape == (40521,)\n out = property_context.to_numpy(\"r:v1.dist_1\")\n assert out.shape == (40786,)\n\n\ndef test_property_context_to_dataframe(property_context):\n out = property_context.to_dataframe({\"id\": \"v:v0.id\", \"result\": \"r:v0.dist_0\"})\n assert out.shape == (40521, 2)\n out = property_context.to_dataframe({\"id\": \"v:v1.id\", \"result\": \"r:v1.dist_1\"})\n assert out.shape == (40786, 2)\n\n\ndef test_property_context_output(property_context):\n property_context.output_to_client(\n fd=\"/tmp/r0\", selector={\"id\": \"v:v0.id\", \"result\": \"r:v0.dist_0\"}\n )\n out = pd.read_csv(\"/tmp/r0\")\n assert out.shape == (40521, 2)\n\n\ndef test_property_context_to_vineyard_tensor(property_context):\n out = property_context.to_vineyard_tensor(\"v:v0.id\")\n assert out is not None\n\n\ndef test_property_context_to_vineyard_dataframe(graphscope_session, property_context):\n out = property_context.to_vineyard_dataframe(\n {\"id\": \"v:v0.id\", \"data\": \"v:v0.dist\", \"result\": \"r:v0.dist_0\"}\n )\n assert out is not None\n\n\ndef test_add_column(arrow_property_graph, property_context):\n g2 = arrow_property_graph.add_column(\n property_context, {\"result_0\": \"r:v0.dist_0\", \"result_1\": \"r:v1.dist_1\"}\n )\n assert \"result_0\" in [p.name for p in g2.schema.get_vertex_properties(\"v0\")]\n assert \"result_1\" in [p.name for p in g2.schema.get_vertex_properties(\"v1\")]\n\n\ndef test_context_output(simple_context):\n simple_context.output(\n fd=\"file:///tmp/rlt.csv\",\n selector={\"id\": \"v.id\", \"data\": \"v.data\", \"result\": \"r\"},\n )\n\n\ndef test_add_column_after_computation(arrow_property_graph):\n sg = arrow_property_graph.project(vertices={\"v0\": [\"id\"]}, edges={\"e0\": [\"weight\"]})\n ret = sssp(sg, 20)\n g2 = arrow_property_graph.add_column(\n ret, {\"id_col\": \"v.id\", \"data_col\": \"v.data\", \"result_col\": \"r\"}\n )\n assert \"id_col\" in [p.name for p in g2.schema.get_vertex_properties(\"v0\")]\n assert \"data_col\" in [p.name for p in g2.schema.get_vertex_properties(\"v0\")]\n assert \"result_col\" in [p.name for p in g2.schema.get_vertex_properties(\"v0\")]\n\n\ndef test_lpa(arrow_property_graph_lpa):\n ret = (\n lpa(arrow_property_graph_lpa, max_round=20)\n .to_dataframe(\n {\"node\": \"v:v0.id\", \"label0\": \"r:v0.label_0\", \"label1\": \"r:v0.label_1\"}\n )\n .sort_values(by=[\"node\"])\n )\n\n\[email protected](\"NIGHTLY\" not in os.environ, reason=\"Run in nightly CI\")\ndef test_error_on_selector(property_context):\n with pytest.raises(KeyError, match=\"non_exist_label\"):\n out = property_context.to_numpy(\"v:non_exist_label.id\")\n with pytest.raises(KeyError, match=\"non_exist_prop\"):\n out = property_context.to_numpy(\"v:v0.non_exist_prop\")\n with pytest.raises(\n InvalidArgumentError,\n match=\"Selector in labeled vertex data context cannot be None\",\n ):\n out = property_context.to_numpy(selector=None)\n with pytest.raises(ValueError, match=\"not enough values to unpack\"):\n out = property_context.to_numpy(\"xxx\")\n with pytest.raises(SyntaxError, match=\"Invalid selector\"):\n out = property_context.to_numpy(\"xxx:a.b\")\n"
] |
[
[
"pandas.read_csv"
]
] |
proximal-dg/proximal_dg
|
[
"000e925c7daab099b2c3735f99e65e6b2a00a799"
] |
[
"torchgan/metrics/proximal_duality_gap.py"
] |
[
"import torch\nimport torch.nn.functional as F\nimport torchvision\nimport copy\nimport time\nimport os\nfrom ..utils import reduce\nfrom .metric import EvaluationMetric\nfrom torchgan.trainer import *\nimport torch.multiprocessing as mp\nimport numpy as np\nfrom ray import tune\nfrom torch.optim import Adam\n__all__ = [\"ProximalDualityGap\"]\n\n\nclass ProximalDualityGap(EvaluationMetric):\n r\"\"\"\n Computes the DualityGap of a Model.\n \n Args:\n \n optimizer : The optimizer to be used for DG estimation ('SGD','Adam')\n n_iter : The no. steps in M1 and M2 estimation (int)\n perturb : Use perturbed DG (Boolean)\n \"\"\"\n\n def __init__(self,perturbation=False,network_params=None,generator_loss=None,discriminator_loss=None,evaluation_loss=None,proximal_evaluation_loss=None,train_dataloader=None,eval_dataloader=None,n_iter=10,log_dir=\"./\",sample_size=28,n_row=7,verbose=False):\n \n super(ProximalDualityGap, self).__init__()\n self.perturbation = perturbation\n self.n_iter = n_iter\n self.network_params = network_params\n self.generator_loss = generator_loss\n self.discriminator_loss = discriminator_loss\n self.evaluation_loss = evaluation_loss\n self.proximal_evaluation_loss = proximal_evaluation_loss if proximal_evaluation_loss is not None else evaluation_loss\n self.train_dataloader = train_dataloader\n self.eval_dataloader = eval_dataloader if eval_dataloader is not None else train_dataloader\n self.log_dir = log_dir\n self.sample_size = sample_size\n self.n_row = n_row\n self.set_arg_map({\"ckpt_dir\":\"checkpoints\" , \"ckpt_no\":\"last_retained_checkpoint\"})\n self.verbose = verbose\n self.evaluation_loss.eval_only = True\n self.history = []\n \n \n def preprocess(self, x):\n r\"\"\"\n Preprocessor for the trainer object\n\n Args:\n x (torch.Tensor) : Instance of class BaseTrainer\n\n Returns:\n Trainer class after preprocessing\n \"\"\"\n return x\n \n def attempt_deviation(self,trainer):\n \n trainer(self.train_dataloader)\n trainer.losses[type(self.evaluation_loss).__name__] = self.evaluate\n trainer._store_loss_maps()\n\n batch_score = []\n for data in self.eval_dataloader:\n \n if type(data) is tuple or type(data) is list:\n trainer.real_inputs = data[0].to(trainer.device)\n trainer.labels = data[1].to(trainer.device)\n elif type(data) is torch.Tensor:\n trainer.real_inputs = data.to(trainer.device)\n else:\n trainer.real_inputs = data\n batch_score.append(-1*self.evaluate.train_ops(**trainer._get_arguments(trainer.loss_arg_maps[type(self.evaluation_loss).__name__])) )\n return np.mean(batch_score)\n\n def calculate_score(self,load_path=None,m1_dir=None,m2_dir=None,perturb_std=1e-3):\n r\"\"\"\n Computes the duality gap for a given trainer instance.\n\n Args:\n load_path (str) : Path to load the Instance of class BaseTrainer\n m1_dir (str) : Path to save the logs for estimating M1\n m2_dir (str) : Path to save the logs for estimating M2\n\n Returns:\n The Duality Gap.\n \"\"\"\n\n disc_trainer = Trainer(self.network_params,[self.discriminator_loss],log_dir=os.path.join(m1_dir,\"logs\"),recon=os.path.join(m1_dir,\"images\"),checkpoints=os.path.join(m1_dir,\"ckpts\",\"model_\"),n_critic=1,sample_size=self.sample_size,nrow=self.n_row,verbose=self.verbose) \n disc_trainer.load_model(load_path,model_only=True)\n disc_trainer.epochs = self.n_iter\n disc_trainer.loss_information[\"generator_iters\"] = 1\n disc_trainer.tune_report = \"DG\"\n\n if(perturb_std>0):\n with torch.no_grad():\n for x in disc_trainer.discriminator.parameters():\n x.add_(torch.normal(mean=0,std=perturb_std,size=x.size(),device=disc_trainer.device))\n \n gen_trainer = Trainer(self.network_params,[self.generator_loss],log_dir=os.path.join(m2_dir,\"logs\"),recon=os.path.join(m2_dir,\"images\"),checkpoints=os.path.join(m2_dir,\"ckpts\",\"model_\"),n_critic=1,sample_size=self.sample_size,nrow=self.n_row,verbose=self.verbose) \n gen_trainer.load_model(load_path,model_only=True)\n gen_trainer.epochs = self.n_iter\n gen_trainer.loss_information[\"discriminator_iters\"] = 1\n gen_trainer.tune_report = \"DG\"\n\n\n if(perturb_std>0):\n with torch.no_grad():\n for x in gen_trainer.generator.parameters():\n x.add_(torch.normal(mean=0,std=perturb_std,size=x.size(),device=gen_trainer.device))\n \n if(self.verbose):\n print(\"__\"*10,\"\\n{:30s}\\n\".format(\"Estimating M1\"),\"__\"*10)\n self.evaluate = self.evaluation_loss\n M1 = self.attempt_deviation(disc_trainer)\n if(self.verbose):\n print(\"M1 : \",M1)\n print(\"__\"*10,\"\\n{:30s}\\n\".format(\"Estimating M2\"),\"__\"*10)\n # M2 = 0\n self.evaluate = self.proximal_evaluation_loss\n M2 = self.attempt_deviation(gen_trainer)\n if(self.verbose):\n print(\"M2 : \",M2)\n disc_trainer.complete()\n gen_trainer.complete()\n \n return abs(M1 - M2)\n\n def metric_ops(self,ckpt_dir=None,ckpt_no=None):\n r\"\"\"Defines the set of operations necessary to compute the ClassifierScore.\n\n Args:\n generator (torchgan.models.Generator): The generator which needs to be evaluated.\n device (torch.device): Device on which the generator is present.\n\n Returns:\n The Classifier Score (scalar quantity)\n \"\"\"\n if(self.verbose):\n print(\"==\"*60,\"\\n{:^120s}\\n\".format(\"Estimating Proximal Duality Gap\"),\"==\"*60)\n load_path = ckpt_dir + str(ckpt_no-1)+ \".model\"\n m1_dir = os.path.join(self.log_dir,\"proximal_duality_gap\",\"M1\",\"iter_{}\".format(ckpt_no))\n m2_dir = os.path.join(self.log_dir,\"proximal_duality_gap\",\"M2\",\"iter_{}\".format(ckpt_no))\n\n start_time = time.time()\n score = self.calculate_score(load_path=load_path,m1_dir=m1_dir,m2_dir=m2_dir)\n time_taken = time.time()-start_time\n \n if(self.verbose):\n print(\"__\"*60,\"\\n{:^50s} : {}\\n\".format(\"Proximal Duality Gap\",score),\"__\"*60)\n self.history.append(abs(score))\n\n tune.report(score=np.mean(self.history))\n return score\n"
] |
[
[
"torch.no_grad",
"numpy.mean"
]
] |
danielhettegger-rl/stable-baselines3
|
[
"23de12e95d96b7bb6136c6a338e407ae7db7c545"
] |
[
"stable_baselines3/sac/policies.py"
] |
[
"import warnings\nfrom typing import Any, Dict, List, Optional, Tuple, Type, Union\n\nimport gym\nimport torch as th\nfrom torch import nn\n\nfrom stable_baselines3.common.distributions import SquashedDiagGaussianDistribution, StateDependentNoiseDistribution\nfrom stable_baselines3.common.policies import BaseModel, BasePolicy, ContinuousCritic, register_policy\nfrom stable_baselines3.common.preprocessing import get_action_dim\nfrom stable_baselines3.common.torch_layers import (\n BaseFeaturesExtractor,\n CombinedExtractor,\n FlattenExtractor,\n NatureCNN,\n create_mlp,\n get_actor_critic_arch,\n)\nfrom stable_baselines3.common.type_aliases import Schedule\n\n# CAP the standard deviation of the actor\nLOG_STD_MAX = 2\nLOG_STD_MIN = -20\n\n\nclass Actor(BasePolicy):\n \"\"\"\n Actor network (policy) for SAC.\n\n :param observation_space: Obervation space\n :param action_space: Action space\n :param net_arch: Network architecture\n :param features_extractor: Network to extract features\n (a CNN when using images, a nn.Flatten() layer otherwise)\n :param features_dim: Number of features\n :param activation_fn: Activation function\n :param use_sde: Whether to use State Dependent Exploration or not\n :param log_std_init: Initial value for the log standard deviation\n :param full_std: Whether to use (n_features x n_actions) parameters\n for the std instead of only (n_features,) when using gSDE.\n :param sde_net_arch: Network architecture for extracting features\n when using gSDE. If None, the latent features from the policy will be used.\n Pass an empty list to use the states as features.\n :param use_expln: Use ``expln()`` function instead of ``exp()`` when using gSDE to ensure\n a positive standard deviation (cf paper). It allows to keep variance\n above zero and prevent it from growing too fast. In practice, ``exp()`` is usually enough.\n :param clip_mean: Clip the mean output when using gSDE to avoid numerical instability.\n :param normalize_images: Whether to normalize images or not,\n dividing by 255.0 (True by default)\n \"\"\"\n\n def __init__(\n self,\n observation_space: gym.spaces.Space,\n action_space: gym.spaces.Space,\n net_arch: List[int],\n features_extractor: nn.Module,\n features_dim: int,\n activation_fn: Type[nn.Module] = nn.ReLU,\n use_sde: bool = False,\n log_std_init: float = -3,\n full_std: bool = True,\n sde_net_arch: Optional[List[int]] = None,\n use_expln: bool = False,\n clip_mean: float = 2.0,\n normalize_images: bool = True,\n ):\n super(Actor, self).__init__(\n observation_space,\n action_space,\n features_extractor=features_extractor,\n normalize_images=normalize_images,\n squash_output=True,\n )\n\n # Save arguments to re-create object at loading\n self.use_sde = use_sde\n self.sde_features_extractor = None\n self.net_arch = net_arch\n self.features_dim = features_dim\n self.activation_fn = activation_fn\n self.log_std_init = log_std_init\n self.sde_net_arch = sde_net_arch\n self.use_expln = use_expln\n self.full_std = full_std\n self.clip_mean = clip_mean\n\n if sde_net_arch is not None:\n warnings.warn(\"sde_net_arch is deprecated and will be removed in SB3 v2.4.0.\", DeprecationWarning)\n\n action_dim = get_action_dim(self.action_space)\n latent_pi_net = create_mlp(features_dim, -1, net_arch, activation_fn)\n self.latent_pi = nn.Sequential(*latent_pi_net)\n last_layer_dim = net_arch[-1] if len(net_arch) > 0 else features_dim\n\n if self.use_sde:\n self.action_dist = StateDependentNoiseDistribution(\n action_dim, full_std=full_std, use_expln=use_expln, learn_features=True, squash_output=True\n )\n self.mu, self.log_std = self.action_dist.proba_distribution_net(\n latent_dim=last_layer_dim, latent_sde_dim=last_layer_dim, log_std_init=log_std_init\n )\n # Avoid numerical issues by limiting the mean of the Gaussian\n # to be in [-clip_mean, clip_mean]\n if clip_mean > 0.0:\n self.mu = nn.Sequential(self.mu, nn.Hardtanh(min_val=-clip_mean, max_val=clip_mean))\n else:\n self.action_dist = SquashedDiagGaussianDistribution(action_dim)\n self.mu = nn.Linear(last_layer_dim, action_dim)\n self.log_std = nn.Linear(last_layer_dim, action_dim)\n\n def _get_constructor_parameters(self) -> Dict[str, Any]:\n data = super()._get_constructor_parameters()\n\n data.update(\n dict(\n net_arch=self.net_arch,\n features_dim=self.features_dim,\n activation_fn=self.activation_fn,\n use_sde=self.use_sde,\n log_std_init=self.log_std_init,\n full_std=self.full_std,\n use_expln=self.use_expln,\n features_extractor=self.features_extractor,\n clip_mean=self.clip_mean,\n )\n )\n return data\n\n def get_std(self) -> th.Tensor:\n \"\"\"\n Retrieve the standard deviation of the action distribution.\n Only useful when using gSDE.\n It corresponds to ``th.exp(log_std)`` in the normal case,\n but is slightly different when using ``expln`` function\n (cf StateDependentNoiseDistribution doc).\n\n :return:\n \"\"\"\n msg = \"get_std() is only available when using gSDE\"\n assert isinstance(self.action_dist, StateDependentNoiseDistribution), msg\n return self.action_dist.get_std(self.log_std)\n\n def reset_noise(self, batch_size: int = 1) -> None:\n \"\"\"\n Sample new weights for the exploration matrix, when using gSDE.\n\n :param batch_size:\n \"\"\"\n msg = \"reset_noise() is only available when using gSDE\"\n assert isinstance(self.action_dist, StateDependentNoiseDistribution), msg\n self.action_dist.sample_weights(self.log_std, batch_size=batch_size)\n\n def get_action_dist_params(self, obs: th.Tensor) -> Tuple[th.Tensor, th.Tensor, Dict[str, th.Tensor]]:\n \"\"\"\n Get the parameters for the action distribution.\n\n :param obs:\n :return:\n Mean, standard deviation and optional keyword arguments.\n \"\"\"\n features = self.extract_features(obs)\n latent_pi = self.latent_pi(features)\n mean_actions = self.mu(latent_pi)\n\n if self.use_sde:\n return mean_actions, self.log_std, dict(latent_sde=latent_pi)\n # Unstructured exploration (Original implementation)\n log_std = self.log_std(latent_pi)\n # Original Implementation to cap the standard deviation\n log_std = th.clamp(log_std, LOG_STD_MIN, LOG_STD_MAX)\n return mean_actions, log_std, {}\n\n def forward(self, obs: th.Tensor, deterministic: bool = False) -> th.Tensor:\n mean_actions, log_std, kwargs = self.get_action_dist_params(obs)\n # Note: the action is squashed\n return self.action_dist.actions_from_params(mean_actions, log_std, deterministic=deterministic, **kwargs)\n\n def action_log_prob(self, obs: th.Tensor) -> Tuple[th.Tensor, th.Tensor]:\n mean_actions, log_std, kwargs = self.get_action_dist_params(obs)\n # return action and associated log prob\n return self.action_dist.log_prob_from_params(mean_actions, log_std, **kwargs)\n\n def _predict(self, observation: th.Tensor, deterministic: bool = False) -> th.Tensor:\n return self.forward(observation, deterministic)\n\n\nclass SACPolicy(BasePolicy):\n \"\"\"\n Policy class (with both actor and critic) for SAC.\n\n :param observation_space: Observation space\n :param action_space: Action space\n :param lr_schedule: Learning rate schedule (could be constant)\n :param net_arch: The specification of the policy and value networks.\n :param activation_fn: Activation function\n :param use_sde: Whether to use State Dependent Exploration or not\n :param log_std_init: Initial value for the log standard deviation\n :param sde_net_arch: Network architecture for extracting features\n when using gSDE. If None, the latent features from the policy will be used.\n Pass an empty list to use the states as features.\n :param use_expln: Use ``expln()`` function instead of ``exp()`` when using gSDE to ensure\n a positive standard deviation (cf paper). It allows to keep variance\n above zero and prevent it from growing too fast. In practice, ``exp()`` is usually enough.\n :param clip_mean: Clip the mean output when using gSDE to avoid numerical instability.\n :param features_extractor_class: Features extractor to use.\n :param features_extractor_kwargs: Keyword arguments\n to pass to the features extractor.\n :param normalize_images: Whether to normalize images or not,\n dividing by 255.0 (True by default)\n :param optimizer_class: The optimizer to use,\n ``th.optim.Adam`` by default\n :param optimizer_kwargs: Additional keyword arguments,\n excluding the learning rate, to pass to the optimizer\n :param n_critics: Number of critic networks to create.\n :param share_features_extractor: Whether to share or not the features extractor\n between the actor and the critic (this saves computation time)\n \"\"\"\n\n def __init__(\n self,\n observation_space: gym.spaces.Space,\n action_space: gym.spaces.Space,\n lr_schedule: Schedule,\n net_arch: Optional[Union[List[int], Dict[str, List[int]]]] = None,\n activation_fn: Type[nn.Module] = nn.ReLU,\n use_sde: bool = False,\n log_std_init: float = -3,\n sde_net_arch: Optional[List[int]] = None,\n use_expln: bool = False,\n clip_mean: float = 2.0,\n features_extractor_class: Type[BaseFeaturesExtractor] = FlattenExtractor,\n features_extractor_kwargs: Optional[Dict[str, Any]] = None,\n normalize_images: bool = True,\n optimizer_class: Type[th.optim.Optimizer] = th.optim.Adam,\n optimizer_kwargs: Optional[Dict[str, Any]] = None,\n n_critics: int = 2,\n share_features_extractor: bool = True,\n ):\n super(SACPolicy, self).__init__(\n observation_space,\n action_space,\n features_extractor_class,\n features_extractor_kwargs,\n optimizer_class=optimizer_class,\n optimizer_kwargs=optimizer_kwargs,\n squash_output=True,\n )\n\n if net_arch is None:\n if features_extractor_class == NatureCNN:\n net_arch = []\n else:\n net_arch = [256, 256]\n\n actor_arch, critic_arch = get_actor_critic_arch(net_arch)\n\n self.net_arch = net_arch\n self.activation_fn = activation_fn\n self.net_args = {\n \"observation_space\": self.observation_space,\n \"action_space\": self.action_space,\n \"net_arch\": actor_arch,\n \"activation_fn\": self.activation_fn,\n \"normalize_images\": normalize_images,\n }\n self.actor_kwargs = self.net_args.copy()\n\n if sde_net_arch is not None:\n warnings.warn(\"sde_net_arch is deprecated and will be removed in SB3 v2.4.0.\", DeprecationWarning)\n\n sde_kwargs = {\n \"use_sde\": use_sde,\n \"log_std_init\": log_std_init,\n \"use_expln\": use_expln,\n \"clip_mean\": clip_mean,\n }\n self.actor_kwargs.update(sde_kwargs)\n self.critic_kwargs = self.net_args.copy()\n self.critic_kwargs.update(\n {\n \"n_critics\": n_critics,\n \"net_arch\": critic_arch,\n \"share_features_extractor\": share_features_extractor,\n }\n )\n\n self.actor, self.actor_target = None, None\n self.critic, self.critic_target = None, None\n self.share_features_extractor = share_features_extractor\n\n self._build(lr_schedule)\n\n def _build(self, lr_schedule: Schedule) -> None:\n self.actor = self.make_actor()\n self.actor.optimizer = self.optimizer_class(self.actor.parameters(), lr=lr_schedule(1), **self.optimizer_kwargs)\n\n if self.share_features_extractor:\n self.critic = self.make_critic(features_extractor=self.actor.features_extractor)\n # Do not optimize the shared features extractor with the critic loss\n # otherwise, there are gradient computation issues\n critic_parameters = [param for name, param in self.critic.named_parameters() if \"features_extractor\" not in name]\n else:\n # Create a separate features extractor for the critic\n # this requires more memory and computation\n self.critic = self.make_critic(features_extractor=None)\n critic_parameters = self.critic.parameters()\n\n # Critic target should not share the features extractor with critic\n self.critic_target = self.make_critic(features_extractor=None)\n self.critic_target.load_state_dict(self.critic.state_dict())\n\n self.critic.optimizer = self.optimizer_class(critic_parameters, lr=lr_schedule(1), **self.optimizer_kwargs)\n\n # Target networks should always be in eval mode\n self.critic_target.set_training_mode(False)\n\n def _get_constructor_parameters(self) -> Dict[str, Any]:\n data = super()._get_constructor_parameters()\n\n data.update(\n dict(\n net_arch=self.net_arch,\n activation_fn=self.net_args[\"activation_fn\"],\n use_sde=self.actor_kwargs[\"use_sde\"],\n log_std_init=self.actor_kwargs[\"log_std_init\"],\n use_expln=self.actor_kwargs[\"use_expln\"],\n clip_mean=self.actor_kwargs[\"clip_mean\"],\n n_critics=self.critic_kwargs[\"n_critics\"],\n lr_schedule=self._dummy_schedule, # dummy lr schedule, not needed for loading policy alone\n optimizer_class=self.optimizer_class,\n optimizer_kwargs=self.optimizer_kwargs,\n features_extractor_class=self.features_extractor_class,\n features_extractor_kwargs=self.features_extractor_kwargs,\n )\n )\n return data\n\n def reset_noise(self, batch_size: int = 1) -> None:\n \"\"\"\n Sample new weights for the exploration matrix, when using gSDE.\n\n :param batch_size:\n \"\"\"\n self.actor.reset_noise(batch_size=batch_size)\n\n def make_actor(self, features_extractor: Optional[BaseFeaturesExtractor] = None) -> Actor:\n actor_kwargs = self._update_features_extractor(self.actor_kwargs, features_extractor)\n return Actor(**actor_kwargs).to(self.device)\n\n def make_critic(self, features_extractor: Optional[BaseFeaturesExtractor] = None) -> ContinuousCritic:\n critic_kwargs = self._update_features_extractor(self.critic_kwargs, features_extractor)\n return ContinuousCritic(**critic_kwargs).to(self.device)\n\n def forward(self, obs: th.Tensor, deterministic: bool = False) -> th.Tensor:\n return self._predict(obs, deterministic=deterministic)\n\n def _predict(self, observation: th.Tensor, deterministic: bool = False) -> th.Tensor:\n return self.actor(observation, deterministic)\n\n def set_training_mode(self, mode: bool) -> None:\n \"\"\"\n Put the policy in either training or evaluation mode.\n\n This affects certain modules, such as batch normalisation and dropout.\n\n :param mode: if true, set to training mode, else set to evaluation mode\n \"\"\"\n self.actor.set_training_mode(mode)\n self.critic.set_training_mode(mode)\n self.training = mode\n\n\nMlpPolicy = SACPolicy\n\n\nclass IPTSACPolicy(SACPolicy):\n \"\"\"\n Policy Class for Interactive Policy Transfer (IPT) version of SAC.\n\n Most Parameters are passed through to the SAC policy class.\n\n :param observation_space: Observation space\n :param action_space: Action space\n :param lr_schedule: Learning rate schedule (could be constant)\n :param net_arch: The specification of the policy and value networks.\n :param activation_fn: Activation function\n :param use_sde: Whether to use State Dependent Exploration or not\n :param log_std_init: Initial value for the log standard deviation\n :param sde_net_arch: Network architecture for extracting features\n when using gSDE. If None, the latent features from the policy will be used.\n Pass an empty list to use the states as features.\n :param use_expln: Use ``expln()`` function instead of ``exp()`` when using gSDE to ensure\n a positive standard deviation (cf paper). It allows to keep variance\n above zero and prevent it from growing too fast. In practice, ``exp()`` is usually enough.\n :param clip_mean: Clip the mean output when using gSDE to avoid numerical instability.\n :param features_extractor_class: Features extractor to use.\n :param features_extractor_kwargs: Keyword arguments\n to pass to the features extractor.\n :param normalize_images: Whether to normalize images or not,\n dividing by 255.0 (True by default)\n :param optimizer_class: The optimizer to use,\n ``th.optim.Adam`` by default\n :param optimizer_kwargs: Additional keyword arguments,\n excluding the learning rate, to pass to the optimizer\n :param n_critics: Number of critic networks to create.\n :param share_features_extractor: Whether to share or not the features extractor\n between the actor and the critic (this saves computation time)\n :param teacher_policy: The policy, which is used to interactively guide the training process.\n :param ipt_weight_schedule: The schedule for the weight of the teacher policy.\n \"\"\"\n\n def __init__(\n self,\n observation_space: gym.spaces.Space,\n action_space: gym.spaces.Space,\n lr_schedule: Schedule,\n teacher_policy: BaseModel = None, \n ipt_weight_schedule: Schedule = None, \n **kwargs\n ):\n super().__init__(observation_space, action_space, lr_schedule, **kwargs)\n self.teacher_policy = teacher_policy\n self.ipt_weight_schedule = ipt_weight_schedule\n if ipt_weight_schedule is not None:\n self.ipt_weight = ipt_weight_schedule(1)\n else:\n self.ipt_weight = 0.0\n \n def forward(self, obs: th.Tensor, deterministic: bool = False) -> th.Tensor:\n return self._predict(obs, deterministic=deterministic)\n\n def _predict(self, observation: th.Tensor, deterministic: bool = False) -> th.Tensor:\n if self.ipt_weight == 0 or deterministic:\n return self.actor(observation, deterministic)\n \n mean_actions, log_std, kwargs = self.actor.get_action_dist_params(observation)\n \n # Note: the action is squashed\n actor_noise = self.actor.action_dist.actions_from_params(\n th.zeros_like(mean_actions),\n log_std,\n deterministic=deterministic,\n **kwargs\n )\n\n teacher_action = self.teacher_policy.forward(observation)\n \n action = (self.ipt_weight * teacher_action + (1.0 - self.ipt_weight) * mean_actions) + actor_noise\n\n return action\n\n def update_schedules(self, current_progress_remaining):\n if self.ipt_weight_schedule is not None:\n self.ipt_weight = self.ipt_weight_schedule(current_progress_remaining)\n\nclass CnnPolicy(SACPolicy):\n \"\"\"\n Policy class (with both actor and critic) for SAC.\n\n :param observation_space: Observation space\n :param action_space: Action space\n :param lr_schedule: Learning rate schedule (could be constant)\n :param net_arch: The specification of the policy and value networks.\n :param activation_fn: Activation function\n :param use_sde: Whether to use State Dependent Exploration or not\n :param log_std_init: Initial value for the log standard deviation\n :param sde_net_arch: Network architecture for extracting features\n when using gSDE. If None, the latent features from the policy will be used.\n Pass an empty list to use the states as features.\n :param use_expln: Use ``expln()`` function instead of ``exp()`` when using gSDE to ensure\n a positive standard deviation (cf paper). It allows to keep variance\n above zero and prevent it from growing too fast. In practice, ``exp()`` is usually enough.\n :param clip_mean: Clip the mean output when using gSDE to avoid numerical instability.\n :param features_extractor_class: Features extractor to use.\n :param normalize_images: Whether to normalize images or not,\n dividing by 255.0 (True by default)\n :param optimizer_class: The optimizer to use,\n ``th.optim.Adam`` by default\n :param optimizer_kwargs: Additional keyword arguments,\n excluding the learning rate, to pass to the optimizer\n :param n_critics: Number of critic networks to create.\n :param share_features_extractor: Whether to share or not the features extractor\n between the actor and the critic (this saves computation time)\n \"\"\"\n\n def __init__(\n self,\n observation_space: gym.spaces.Space,\n action_space: gym.spaces.Space,\n lr_schedule: Schedule,\n net_arch: Optional[Union[List[int], Dict[str, List[int]]]] = None,\n activation_fn: Type[nn.Module] = nn.ReLU,\n use_sde: bool = False,\n log_std_init: float = -3,\n sde_net_arch: Optional[List[int]] = None,\n use_expln: bool = False,\n clip_mean: float = 2.0,\n features_extractor_class: Type[BaseFeaturesExtractor] = NatureCNN,\n features_extractor_kwargs: Optional[Dict[str, Any]] = None,\n normalize_images: bool = True,\n optimizer_class: Type[th.optim.Optimizer] = th.optim.Adam,\n optimizer_kwargs: Optional[Dict[str, Any]] = None,\n n_critics: int = 2,\n share_features_extractor: bool = True,\n ):\n super(CnnPolicy, self).__init__(\n observation_space,\n action_space,\n lr_schedule,\n net_arch,\n activation_fn,\n use_sde,\n log_std_init,\n sde_net_arch,\n use_expln,\n clip_mean,\n features_extractor_class,\n features_extractor_kwargs,\n normalize_images,\n optimizer_class,\n optimizer_kwargs,\n n_critics,\n share_features_extractor,\n )\n\n\nclass MultiInputPolicy(SACPolicy):\n \"\"\"\n Policy class (with both actor and critic) for SAC.\n\n :param observation_space: Observation space\n :param action_space: Action space\n :param lr_schedule: Learning rate schedule (could be constant)\n :param net_arch: The specification of the policy and value networks.\n :param activation_fn: Activation function\n :param use_sde: Whether to use State Dependent Exploration or not\n :param log_std_init: Initial value for the log standard deviation\n :param sde_net_arch: Network architecture for extracting features\n when using gSDE. If None, the latent features from the policy will be used.\n Pass an empty list to use the states as features.\n :param use_expln: Use ``expln()`` function instead of ``exp()`` when using gSDE to ensure\n a positive standard deviation (cf paper). It allows to keep variance\n above zero and prevent it from growing too fast. In practice, ``exp()`` is usually enough.\n :param clip_mean: Clip the mean output when using gSDE to avoid numerical instability.\n :param features_extractor_class: Features extractor to use.\n :param normalize_images: Whether to normalize images or not,\n dividing by 255.0 (True by default)\n :param optimizer_class: The optimizer to use,\n ``th.optim.Adam`` by default\n :param optimizer_kwargs: Additional keyword arguments,\n excluding the learning rate, to pass to the optimizer\n :param n_critics: Number of critic networks to create.\n :param share_features_extractor: Whether to share or not the features extractor\n between the actor and the critic (this saves computation time)\n \"\"\"\n\n def __init__(\n self,\n observation_space: gym.spaces.Space,\n action_space: gym.spaces.Space,\n lr_schedule: Schedule,\n net_arch: Optional[Union[List[int], Dict[str, List[int]]]] = None,\n activation_fn: Type[nn.Module] = nn.ReLU,\n use_sde: bool = False,\n log_std_init: float = -3,\n sde_net_arch: Optional[List[int]] = None,\n use_expln: bool = False,\n clip_mean: float = 2.0,\n features_extractor_class: Type[BaseFeaturesExtractor] = CombinedExtractor,\n features_extractor_kwargs: Optional[Dict[str, Any]] = None,\n normalize_images: bool = True,\n optimizer_class: Type[th.optim.Optimizer] = th.optim.Adam,\n optimizer_kwargs: Optional[Dict[str, Any]] = None,\n n_critics: int = 2,\n share_features_extractor: bool = True,\n ):\n super(MultiInputPolicy, self).__init__(\n observation_space,\n action_space,\n lr_schedule,\n net_arch,\n activation_fn,\n use_sde,\n log_std_init,\n sde_net_arch,\n use_expln,\n clip_mean,\n features_extractor_class,\n features_extractor_kwargs,\n normalize_images,\n optimizer_class,\n optimizer_kwargs,\n n_critics,\n share_features_extractor,\n )\n\n\nregister_policy(\"MlpPolicy\", MlpPolicy)\nregister_policy(\"IptMlpPolicy\", IPTSACPolicy)\nregister_policy(\"CnnPolicy\", CnnPolicy)\nregister_policy(\"MultiInputPolicy\", MultiInputPolicy)\n"
] |
[
[
"torch.nn.Sequential",
"torch.zeros_like",
"torch.nn.Linear",
"torch.clamp",
"torch.nn.Hardtanh"
]
] |
ssahn3087/pedestrian_detection
|
[
"d9a6cb9d10246941cff8575c803ab60b3a9d7d04"
] |
[
"train.py"
] |
[
"import os\nimport torch\nimport numpy as np\nimport math\nfrom torch.autograd import Variable\nfrom datetime import datetime\nfrom faster_rcnn import network\nfrom faster_rcnn.network import init_data, data_to_variable\nfrom faster_rcnn.network import train_net_params, print_weight_grad\nfrom faster_rcnn.faster_rcnn_vgg import FasterRCNN as FasterRCNN_VGG\nfrom faster_rcnn.faster_rcnn_res import FasterRCNN as FasterRCNN_RES\nfrom faster_rcnn.utils.timer import Timer\nfrom val import test, id_match_test\nfrom faster_rcnn.roi_data_layer.sampler import sampler\nfrom faster_rcnn.roi_data_layer.roidb import extract_roidb\nfrom faster_rcnn.roi_data_layer.roibatchLoader import roibatchLoader\nfrom faster_rcnn.fast_rcnn.config import cfg, cfg_from_file\n\ntry:\n from termcolor import cprint\nexcept ImportError:\n cprint = None\n\ntry:\n from pycrayon import CrayonClient\nexcept ImportError:\n CrayonClient = None\n\n\ndef log_print(text, color='blue', on_color=None, attrs=None):\n if cprint is not None:\n cprint(text, color=color, on_color=on_color, attrs=attrs)\n else:\n print(text)\n\n\n# hyper-parameters\n# ------------\nimdb_name = 'voc_2007_trainval'\ntest_name = 'voc_2007_test'\n# imdb_name = 'coco_2017_train'\n# test_name = 'coco_2017_val'\n# imdb_name = 'CaltechPedestrians_train'\n# test_name = 'CaltechPedestrians_test'\n\n\ncfg_file = 'experiments/cfgs/faster_rcnn_end2end.yml'\nmodel_dir = 'data/pretrained_model/'\noutput_dir = 'models/saved_model3'\npre_model_name = 'voc_2007_trainval_14_vgg16_0.7_b1.h5'\npretrained_model = model_dir + pre_model_name\n\n\nstart_epoch = 1\nend_epoch = 10\nlr_decay_step = 5\nlr_decay = 0.1\nrand_seed = 1024\n\n\n_DEBUG = True\nuse_tensorboard = True\nremove_all_log = True # remove all historical experiments in TensorBoard\nexp_name = None # the previous experiment name in TensorBoard\n\n# ------------\n\nif rand_seed is not None:\n np.random.seed(rand_seed)\n\n# load config\ncfg_from_file(cfg_file)\nfg_thresh = cfg.TRAIN.RPN_POSITIVE_OVERLAP\nis_resnet = cfg.RESNET.IS_TRUE\nbatch_size = cfg.TRAIN.IMS_PER_BATCH\nlr = cfg.TRAIN.LEARNING_RATE\nmomentum = cfg.TRAIN.MOMENTUM\n\ndisp_interval = cfg.TRAIN.DISPLAY\nlog_interval = cfg.TRAIN.LOG_IMAGE_ITERS\nsave_interval = cfg.TRAIN.SNAPSHOT_ITERS\n# load data\nimdb, roidb, ratio_list, ratio_index = extract_roidb(imdb_name)\ntest_imdb, test_roidb, _, _ = extract_roidb(test_name)\ntrain_size = len(roidb)\nsampler_batch = sampler(train_size, batch_size, cfg.TRIPLET.IS_TRUE)\ndataset = roibatchLoader(imdb, roidb, ratio_list, ratio_index, batch_size,\n imdb.num_classes, training=True)\ndataloader = torch.utils.data.DataLoader(dataset, batch_size=batch_size,\n sampler=sampler_batch, num_workers=0)\n\n\n# load net\nif is_resnet:\n model_name = cfg.RESNET.MODEL\n cfg.TRAIN.DOUBLE_BIAS = False\n cfg.TRAIN.WEIGHT_DECAY = 0.0001\n net = FasterRCNN_RES(classes=imdb.classes, debug=_DEBUG)\n net.init_module()\nelse:\n model_name = 'vgg16'\n net = FasterRCNN_VGG(classes=imdb.classes, debug=_DEBUG)\n net.init_module()\nif cfg.TRIPLET.IS_TRUE:\n model_name += '_' + cfg.TRIPLET.LOSS\n# network.load_net(pretrained_model, net)\n# person_key = 15 (pascal_voc) user_defined_coco_set = 1\n#network.load_net_pedestrians(pretrained_model, net, person_key=15)\n\nblob = init_data(is_cuda=True)\n\n# set net to be prepared to train\nnet.cuda()\nparams = train_net_params(net, cfg, lr)\noptimizer = torch.optim.SGD(params, momentum=momentum)\n\n\ndef make_dir(output_dir):\n if not os.path.exists(output_dir):\n os.makedirs(output_dir)\nmake_dir(output_dir)\n\n# tensorboad\nuse_tensorboard = use_tensorboard and CrayonClient is not None\nif use_tensorboard:\n print('TENSORBOARD IS ON')\n cc = CrayonClient(hostname='127.0.0.1')\n\n if remove_all_log:\n cc.remove_all_experiments()\n if exp_name is None:\n name = '{}_{}'.format(imdb_name, model_name)\n exp_name = datetime.now().strftime(name+'_%m-%d_%H-%M')\n exp = cc.create_experiment(exp_name)\n else:\n exp = cc.open_experiment(exp_name)\n\niters_per_epoch = int(train_size / batch_size)\n# training\ntrain_loss = 0\nprevious_precision = 0.\ndescend = 0\nstep_cnt = 0\ncnt = 0\nre_cnt = False\n\nt = Timer()\nt.tic()\nfrom math import isnan\nfor epoch in range(start_epoch, end_epoch+1):\n pf, tot = 0., 0\n tp, fp, tn, fg, bg, tp_box, fg_box = 0., 0., 0., 0., 0., 0., 0.\n rpn_cls, rpn_box, rcnn_cls, rcnn_box, sim_loss = 0., 0., 0., 0., 0.\n net.train()\n if epoch > 1 and (epoch-1) % lr_decay_step == 0:\n lr *= lr_decay\n params = train_net_params(net, cfg, lr)\n optimizer = torch.optim.SGD(params, momentum=momentum)\n\n data_iter = iter(dataloader)\n for step in range(iters_per_epoch):\n\n # get one batch\n data = next(data_iter)\n (im_data, im_info, gt_boxes, num_boxes) = data_to_variable(blob, data)\n # forward\n net.zero_grad()\n net(im_data, im_info, gt_boxes, num_boxes)\n if _DEBUG:\n tp += float(net.tp)\n tn += float(net.tn)\n fp += float(net.fp)\n fg += net.fg_cnt\n bg += net.bg_cnt\n tp_box += float(net.rpn.tp)\n fg_box += net.rpn.fg_box\n rpn_box += net.rpn.cross_entropy.data.cpu().numpy()[0]\n rpn_cls += net.rpn.loss_box.data.cpu().numpy()[0]\n rcnn_box += net.loss_box.data.cpu().numpy()[0]\n rcnn_cls += net.cross_entropy.data.cpu().numpy()[0]\n sim_loss += net.triplet_loss.data.cpu().numpy()[0] if cfg.TRIPLET.IS_TRUE else 0.\n loss = net.rpn.loss + net.loss\n if isnan(loss):\n print(gt_boxes)\n print(net.rpn.loss, net.loss)\n train_loss += loss.data[0]\n step_cnt += 1\n cnt += 1\n\n # backward\n optimizer.zero_grad() # clear grad\n loss.backward()\n network.clip_gradient(net, 10.)\n # print_weight_grad(net)\n optimizer.step()\n\n if step % disp_interval == 0 and step > 0:\n duration = t.toc(average=False)\n fps = step_cnt / duration\n\n log_text = 'step %d, loss: %.4f, fps: %.2f (%.2fs per batch) --[epoch %2d] --[iter %4d/%4d]' % (\n step, train_loss / step_cnt, fps, 1./fps, epoch, step, iters_per_epoch)\n log_print(log_text, color='green', attrs=['bold'])\n\n if _DEBUG:\n if fg == 0 or bg == 0:\n pass\n else:\n tot += 1\n pf += tp/fg*100\n match_rate = net.match/net.set * 100. if cfg.TRIPLET.IS_TRUE else 0.\n log_print('\\tEP: %.2f%% PR: %.2f%% TP: %.2f%%, TF: %.2f%%, fg/bg=(%d/%d), TD: %.2f%%' %\n (tp_box/fg_box*100, tp/(tp+fp)*100, tp/fg*100., tn/bg*100., fg/step_cnt, bg/step_cnt, match_rate))\n log_print('\\trpn_cls: %.4f, rpn_box: %.4f, rcnn_cls: %.4f, rcnn_box: %.4f, sim_loss: %.4f' % (\n rpn_cls/step_cnt, rpn_box/step_cnt, rcnn_cls/step_cnt, rcnn_box/step_cnt, sim_loss/step_cnt )\n )\n re_cnt = True\n if use_tensorboard and cnt % log_interval == 0 and cnt > 0:\n exp.add_scalar_value('train_loss', train_loss / step_cnt, step=cnt)\n exp.add_scalar_value('learning_rate', lr, step=cnt)\n if _DEBUG:\n\n match_rate = net.match / net.set * 100. if cfg.TRIPLET.IS_TRUE else 0.\n triplet_loss = net.triplet_loss.data.cpu().numpy() if cfg.TRIPLET.IS_TRUE else 0.\n exp.add_scalar_value('true_positive', tp/fg*100., step=cnt)\n exp.add_scalar_value('true_negative', tn/bg*100., step=cnt)\n exp.add_scalar_value('precision', tp / (tp+fp) * 100., step=cnt)\n exp.add_scalar_value('true_distance', match_rate, step=cnt)\n losses = {'rpn_cls': float(rpn_cls/step_cnt),\n 'rpn_box': float(rpn_box/step_cnt),\n 'rcnn_cls': float(rcnn_cls/step_cnt),\n 'rcnn_box': float(rcnn_box/step_cnt),\n 'sim_loss': float(sim_loss/step_cnt)}\n exp.add_scalar_dict(losses, step=cnt)\n\n if re_cnt:\n train_loss = 0\n tp, fp, tn, fg, bg, tp_box, fg_box = 0., 0., 0., 0, 0, 0., 0\n rpn_cls, rpn_box, rcnn_cls, rcnn_box, sim_loss = 0., 0., 0., 0., 0.\n net.reset_match_count()\n step_cnt = 0\n t.tic()\n re_cnt = False\n\n # if epoch % save_interval == 0 and cnt > 0:\n save_dir = os.path.join(output_dir, model_name)\n make_dir(save_dir)\n save_name = os.path.join(save_dir, '{}_{}_{}_{}_b{}.h5'\n .format(imdb_name, epoch, model_name, fg_thresh, batch_size))\n network.save_net(save_name, net)\n print('save model: {}'.format(save_name))\n if pf/tot > 80:\n print('Entering Test Phase ...')\n f = open('PrecisionAndRecall.txt', 'a')\n prec, rec = test(save_name, net, test_imdb, test_roidb)\n match = id_match_test(save_name, net, test_imdb, test_roidb, cfg.TRIPLET.LOSS) if cfg.TRIPLET.IS_TRUE else 0.\n f.write(save_name + ' ----[prec: {:.2f}%, rec: {:.2f}%] / {:.2f}%\\n'.format(prec, rec, match))\n f.close()\n if previous_precision == 0.:\n previous_precision = prec\n else:\n if previous_precision > prec:\n print('Precision decreased {:.2f}% -> {:.2f}% ...' \\\n .format(previous_precision, prec))\n import warnings\n warnings.warn('test set Precision decreased. Keep Watching')\n else:\n previous_precision = prec\n"
] |
[
[
"torch.utils.data.DataLoader",
"numpy.random.seed",
"torch.optim.SGD"
]
] |
cameronliang/BayesVP
|
[
"3a38e6fc8b85f96f402289fde74f996971edec93"
] |
[
"bayesvp/tests/test_likelihood.py"
] |
[
"import unittest\nimport os\nimport sys\nimport numpy as np\n\nfrom bayesvp.config import DefineParams\nfrom bayesvp.likelihood import Posterior\nfrom bayesvp.utilities import get_bayesvp_Dir\n\n###############################################################################\n# TEST CASE 1: OVI line with stock config file and spectrum\n###############################################################################\n\nclass TCPosterior(unittest.TestCase):\n\n def setUp(self):\n\n # read example config file\n code_path = get_bayesvp_Dir()\n self.config_ex = code_path + '/data/example/config_OVI.dat'\n self.config_params = DefineParams(self.config_ex)\n self.posterior = Posterior(self.config_params)\n\n def tearDown(self):\n try:\n import shutil\n shutil.rmtree(self.config_params.output_path)\n except OSError as oserr:\n print(oserr)\n\n ###########################################################################\n # Basic Tests for likelihood, prior and posterior\n ###########################################################################\n\n def test_default_no_continuum(self):\n self.assertFalse(self.config_params.cont_normalize)\n\n def test_lnlike(self):\n vp_params = np.array([15,20,0]) # logN, b, z\n correct = -344.55470583729573\n self.assertEqual(self.posterior.lnlike(vp_params),correct)\n\n def test_prior(self):\n vp_params = np.array([15,20,0])\n correct = 0 \n self.assertEqual(self.posterior.lnprior(vp_params),correct)\n\n # Outside of prior (logN)\n vp_params = np.array([19,20,0])\n correct = -np.inf\n self.assertEqual(self.posterior.lnprior(vp_params),correct)\n\n # Outside of prior (b)\n vp_params = np.array([15,-10,0])\n correct = -np.inf\n self.assertEqual(self.posterior.lnprior(vp_params),correct)\n\n # Outside of prior (z)\n vp_params = np.array([10,20,-1])\n correct = -np.inf\n self.assertEqual(self.posterior.lnprior(vp_params),correct)\n\n\n def test_call(self):\n vp_params = np.array([15,20,0])\n correct = -344.55470583729573\n self.assertEqual(self.posterior.__call__(vp_params),correct)\n\n vp_params = np.array([10,20,-1])\n correct = -np.inf\n self.assertEqual(self.posterior.__call__(vp_params),correct)\n\n\n\nif __name__ == '__main__':\n unittest.main()"
] |
[
[
"numpy.array"
]
] |
Aravind-Suresh/CVJyo
|
[
"6cb324fb538a50939335fd28ee90e23fbb32f2c0"
] |
[
"cvjyo.py"
] |
[
"import cv2\nimport numpy as np\nimport sys\nimport math\n\ndef markPoints(pts, img):\n for pt in pts:\n cv2.circle(img, tuple((pt[0], pt[1])), 2, 0, -1)\n\ndef contourAreaComparator(cnt1, cnt2):\n\tif cv2.contourArea(cnt1) > cv2.contourArea(cnt2):\n\t\treturn 1\n\telse:\n\t\treturn -1\n\ndef orderClockwise(ptsO, pt):\n\tpts = ptsO - np.asarray(pt)\n\tpts = np.array(pts, dtype=np.float32)\n\tslopes = []\n\tfor p in pts:\n\t\tif p[0] > 0:\n\t\t\tslopes.append(math.atan(p[1]/p[0]))\n\t\telse:\n\t\t\tslopes.append(math.pi + math.atan(p[1]/p[0]))\n\tptsSorted = [y for x, y in sorted(zip(list(slopes), list(np.arange(len(ptsO)))))]\n\tptsSorted = ptsO[ptsSorted]\n\treturn ptsSorted\n\nimg = cv2.imread(sys.argv[1], 0)\nimg = cv2.GaussianBlur(img, (5, 5), 0)\nheight,width = img.shape\n\n_,otsu = cv2.threshold(img, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)\ncv2.imshow(\"img\", otsu); cv2.waitKey(0);\n\nimgAnd = cv2.bitwise_and(img, otsu)\ncv2.imshow(\"img\", imgAnd); cv2.waitKey(0);\n\n_, contours, hierarchy = cv2.findContours(otsu, 1, 2)\n\narea = []\nfor cnt in contours:\n area.append(cv2.contourArea(cnt))\n\narea = np.array(area)\nidx = np.max(area)\nidx = np.where(area==idx)[0][0]\ncnt = contours[idx]\n\nhull = cv2.convexHull(cnt, returnPoints = False)\ndefects = cv2.convexityDefects(cnt, hull)\n\nfor d in defects:\n s, e, f, appr = d[0]\n cv2.circle(imgAnd, tuple(cnt[f][0]), 2, 255, -1)\n\ndt = cv2.distanceTransform(otsu, cv2.DIST_L2, 3)\ncv2.normalize(dt, dt, 0.0, 1.0, cv2.NORM_MINMAX);\ncv2.imshow(\"img\", dt);cv2.waitKey(0)\n\nidx = np.where(dt==np.max(dt))\npt = (idx[1][0], idx[0][0])\n\ndefPts = cnt[defects[:, 0, 2]]\ndefPts = defPts.reshape(-1,2)\n\nthrDistTop = int(0.4*height)\nthrDistLeft = int(0.2*width)\ndefPts = defPts[np.where(defPts[:, 1] > thrDistTop)[0]]\ndefPts = defPts[np.where(defPts[:, 0] > thrDistLeft)[0]]\n\n#markPoints(defPts, img)\n#cv2.imshow(\"img\", img); cv2.waitKey(0)\n\ndefPtsC = defPts.copy()\ndefPts = orderClockwise(defPtsC, pt)\n\n# ii = 0\n# for p in defPts:\n# \tcv2.putText(img, str(ii), (p[0], p[1]), cv2.FONT_HERSHEY_SIMPLEX, 1, 255)\n# \tii = ii + 1\n\n# cv2.imshow(\"img\", img); cv2.waitKey(0)\n\nboundImg = np.zeros((height,width), np.uint8)\ncv2.fillPoly(boundImg, [defPts], 255)\nimgRoi = cv2.bitwise_and(img, boundImg)\nimgRoi = cv2.adaptiveThreshold(imgRoi, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY, 11, 2)\nkernel = np.ones((5,5),np.uint8)\nboundImg = cv2.erode(boundImg,kernel,iterations = 1)\nimgRoi = cv2.bitwise_and(imgRoi, boundImg)\ncv2.imshow(\"img\", imgRoi); cv2.waitKey(0)\n\nimgRoiC = imgRoi.copy()\n_, contours, hierarchy = cv2.findContours(imgRoiC, 1, 2)\n\ncontours.sort(contourAreaComparator)\nl = len(contours)\nll = np.arange(l-6, l-1)\n\nimgColor = cv2.imread(sys.argv[1])\n\nfor idx in ll:\n\tcv2.drawContours(imgRoi, contours, idx, 127, 3)\n\tcv2.drawContours(imgColor, contours, idx, (0, 0, 255), 3)\n\ncv2.imshow(\"img\", imgColor); cv2.waitKey(0)"
] |
[
[
"numpy.asarray",
"numpy.arange",
"numpy.ones",
"numpy.max",
"numpy.array",
"numpy.zeros",
"numpy.where"
]
] |
JuanCRCano/AmericanOpt_Methods
|
[
"38a4de4da20337e629ab47edf2d2e7e134586264"
] |
[
"options/valuation.py"
] |
[
"import pandas as pd\nimport numpy as np\nimport math as mt\nfrom sklearn.linear_model import LinearRegression\n\ndef Binomial_Tree(Spot, Strike, Vencimiento, Volatilidad, TLibre_Riesgo, Call_Put, Tasa_Foranea=0, Tasa_Dividendo=0,\n Ramificaciones_Arbol=100, Modelo=\"Cox Equity\"):\n if Modelo == \"Cox Equity\":\n ConfigModelo = TLibre_Riesgo - Tasa_Dividendo\n if Modelo == \"Cox Futuros\":\n ConfigModelo = 0\n if Modelo == \"Cox Divisas\":\n ConfigModelo = TLibre_Riesgo - Tasa_Foranea\n\n Arbol_Subyacente = np.zeros((Ramificaciones_Arbol + 1, Ramificaciones_Arbol + 1))\n Arbol_Derivado = np.zeros((Ramificaciones_Arbol + 1, Ramificaciones_Arbol + 1))\n\n Vencimiento = Vencimiento / 365.0\n Steps = Vencimiento / Ramificaciones_Arbol\n Up = mt.exp(Volatilidad * mt.sqrt(Steps))\n Down = mt.exp(-Volatilidad * mt.sqrt(Steps))\n P = (mt.exp(ConfigModelo * Steps) - Down) / (Up - Down)\n\n # Obtener las ultimas ramas del arbol binomial del precio del subyacente\n Arbol_Subyacente[0, 0] = Spot\n\n for i in range(1, Ramificaciones_Arbol + 1):\n Arbol_Subyacente[i, 0] = Arbol_Subyacente[i - 1, 0] * Up\n for j in range(1, i + 1):\n Arbol_Subyacente[i, j] = Arbol_Subyacente[i - 1, j - 1] * Down\n\n for j in range(Ramificaciones_Arbol + 1):\n Arbol_Derivado[Ramificaciones_Arbol, j] = max(0,\n Call_Put * (Arbol_Subyacente[Ramificaciones_Arbol, j] - Strike))\n\n for m in range(Ramificaciones_Arbol + 1):\n i = Ramificaciones_Arbol - m - 1\n for j in range(i + 1):\n Arbol_Derivado[i, j] = max(Call_Put * (Arbol_Subyacente[i, j] - Strike),\n (P * Arbol_Derivado[i + 1, j] + (1 - P) * Arbol_Derivado[i + 1, j + 1]) * mt.exp(\n -TLibre_Riesgo * Steps))\n\n # return pd.concat([pd.DataFrame(Arbol_Subyacente).replace(0,\"\"),pd.DataFrame(Arbol_Derivado).replace(0,\"\")])\n return Arbol_Derivado[0, 0]\n\n\ndef Trinomial_Tree(Spot, Strike, Vencimiento, Volatilidad, TLibre_Riesgo, Call_Put, Tasa_Foranea=0, Tasa_Dividendo=0,\n Ramificaciones_Arbol=100, Modelo=\"Cox Equity\"):\n if Modelo == \"Cox Equity\":\n ConfigModelo = TLibre_Riesgo - Tasa_Dividendo\n if Modelo == \"Cox Futuros\":\n ConfigModelo = 0\n if Modelo == \"Cox Divisas\":\n ConfigModelo = TLibre_Riesgo - Tasa_Foranea\n\n Arbol_Subyacente = np.zeros((Ramificaciones_Arbol + 1, (2 * Ramificaciones_Arbol) + 1))\n Arbol_Derivado = np.zeros((Ramificaciones_Arbol + 1, (2 * Ramificaciones_Arbol) + 1))\n\n Vencimiento = Vencimiento / 365.0\n Steps = Vencimiento / Ramificaciones_Arbol\n Up = mt.exp(Volatilidad * mt.sqrt(2 * Steps))\n Down = mt.exp(-Volatilidad * mt.sqrt(2 * Steps))\n Pu = ((mt.exp(TLibre_Riesgo * Steps / 2) - mt.exp(-Volatilidad * mt.sqrt(Steps / 2))) / (\n mt.exp(Volatilidad * mt.sqrt(Steps / 2)) - mt.exp(-Volatilidad * mt.sqrt(Steps / 2)))) ** 2\n Pd = ((mt.exp(Volatilidad * mt.sqrt(Steps / 2)) - mt.exp(TLibre_Riesgo * Steps / 2)) / (\n mt.exp(Volatilidad * mt.sqrt(Steps / 2)) - mt.exp(-Volatilidad * mt.sqrt(Steps / 2)))) ** 2\n Pm = 1 - (Pu + Pd)\n\n # Obtener las ultimas ramas del arbol binomial del precio del subyacente\n Arbol_Subyacente[0, 0] = Spot\n\n for i in range(1, Ramificaciones_Arbol + 1):\n Arbol_Subyacente[i, 0] = Arbol_Subyacente[i - 1, 0] * Up\n for j in range(1, (2 * i)):\n Arbol_Subyacente[i, j] = Arbol_Subyacente[i - 1, j - 1]\n Arbol_Subyacente[i, j + 1] = Arbol_Subyacente[i - 1, j - 1] * Down\n\n for j in range((2 * Ramificaciones_Arbol) + 1):\n Arbol_Derivado[Ramificaciones_Arbol, j] = max(Call_Put * (Arbol_Subyacente[Ramificaciones_Arbol, j] - Strike),\n 0)\n\n for m in range(Ramificaciones_Arbol + 1):\n i = Ramificaciones_Arbol - m - 1\n for j in range((2 * i) + 1):\n Arbol_Derivado[i, j] = max(Call_Put * (Arbol_Subyacente[i, j] - Strike), (\n Pu * Arbol_Derivado[i + 1, j] + Pm * Arbol_Derivado[i + 1, j + 1] + Pd * Arbol_Derivado[\n i + 1, j + 2]) * mt.exp(-TLibre_Riesgo * Steps))\n\n # return pd.concat([pd.DataFrame(Arbol_Subyacente).replace(0,\"\"),pd.DataFrame(Arbol_Derivado).replace(0,\"\")])\n return Arbol_Derivado[0, 0]\n\ndef LSM(Spot,Strike,Vencimiento,Volatilidad,TLibre_Riesgo,Call_Put,NumSim=10,CambiosXDia=1):\n\n Deltat = 1/(Vencimiento*CambiosXDia) # Asumo N Cambios en el precio del subyacente por cada día\n Caminos_Subyacente = np.zeros((NumSim,(Vencimiento*CambiosXDia)+1))\n v = Volatilidad/mt.sqrt(365/Vencimiento) # Se ajusta v pues v es anualizada\n r = TLibre_Riesgo/(365/Vencimiento) # Se ajusta r pues r es anualizada\n\n for m in range(0,NumSim):\n Caminos_Subyacente[m,0] = Spot\n for t in range(1,(Vencimiento*CambiosXDia)+1):\n Caminos_Subyacente[m,t] = Caminos_Subyacente[m,t-1]*mt.exp((r - (v**2)/2)*Deltat + np.random.normal(0,1)*mt.sqrt((v**2)*Deltat))\n\n Caminos_Derivado = np.zeros((NumSim,(Vencimiento*CambiosXDia)+1))\n Caminos_Derivado[:,(Vencimiento*CambiosXDia)] = np.maximum((Caminos_Subyacente[:,(Vencimiento*CambiosXDia)] - Strike)*Call_Put,0)\n\n for t in range((Vencimiento*CambiosXDia)-1,-1,-1):\n Caminos_Derivado[:,t] = Caminos_Derivado[:,t+1]*mt.exp(-r*Deltat) # Valor de Continuidad Observado (HV)\n Caminos_EnEl_Dinero = ((Caminos_Subyacente[:,t]-Strike)*Call_Put>0)\n if Caminos_EnEl_Dinero.sum()>0:\n Tabla_Regresion = np.zeros((Caminos_EnEl_Dinero.sum(),4))\n Tabla_Regresion[:,0] = Caminos_Subyacente[:,t][Caminos_EnEl_Dinero] #np.vectorize(mt.exp)(-Caminos_Subyacente[:,t][Caminos_EnEl_Dinero]/2)\n Tabla_Regresion[:,1] = Caminos_Subyacente[:,t][Caminos_EnEl_Dinero]**2 #np.vectorize(mt.exp)(-Caminos_Subyacente[:,t][Caminos_EnEl_Dinero]/2)*(1-Caminos_Subyacente[:,t][Caminos_EnEl_Dinero])\n Tabla_Regresion[:,2] = Caminos_Subyacente[:,t][Caminos_EnEl_Dinero]**3 #np.vectorize(mt.exp)(-Caminos_Subyacente[:,t][Caminos_EnEl_Dinero]/2)*(1-2*Caminos_Subyacente[:,t][Caminos_EnEl_Dinero]+(Caminos_Subyacente[:,t][Caminos_EnEl_Dinero]**2)/2)\n Modelo = LinearRegression().fit(Tabla_Regresion[:,0:3],Caminos_Derivado[:,t][Caminos_EnEl_Dinero])\n #print(Modelo.score(Tabla_Regresion[:,0:3],Caminos_Derivado[:,t][Caminos_EnEl_Dinero]))\n Tabla_Regresion[:,3] = Modelo.intercept_ + Modelo.coef_[0]*Tabla_Regresion[:,0] + Modelo.coef_[1]*Tabla_Regresion[:,1] + Modelo.coef_[2]*Tabla_Regresion[:,2] # Valor de Continuidad Esperado\n # Your next line is: Si E[HV]<EV entonces EV, HV En otro caso (OV)\n Caminos_Derivado[np.where(Caminos_EnEl_Dinero==True),t] = np.where(Tabla_Regresion[:,3]<(Caminos_Subyacente[:,t][Caminos_EnEl_Dinero]-Strike)*Call_Put,(Caminos_Subyacente[:,t][Caminos_EnEl_Dinero]-Strike)*Call_Put,Caminos_Derivado[:,t][Caminos_EnEl_Dinero])\n #Caminos_Derivado[np.where((Caminos_EnEl_Dinero==True)&(Tabla_Regresion[:,3]<(Caminos_Subyacente[:,t][Caminos_EnEl_Dinero]-Strike)*Call_Put)),t+1] = 0\n\n #return pd.DataFrame(Caminos_Subyacente)\n return Caminos_Derivado[:,0].mean()"
] |
[
[
"numpy.maximum",
"numpy.random.normal",
"sklearn.linear_model.LinearRegression",
"numpy.zeros",
"numpy.where"
]
] |
othesoluciones/TFM
|
[
"8ed46985604c83c517612b38326b39a61b4cf102"
] |
[
"static/generaMapas/generaCalendarioPolinico.py"
] |
[
"#Conectamos a la base de datos\nimport base64\nimport json\nfrom pymongo import MongoClient as Connection\n\ncadenaCon= 'mongodb://othesoluciones:'+base64.b64decode(\"b3RoZXNvbHVjaW9uZXM=\")+'@ds029635.mlab.com:29635/othesoluciones1'\nMONGODB_URI =cadenaCon\nconexion = Connection(MONGODB_URI)\ndb = conexion.othesoluciones1\n\nimport pandas as pd\n\n#Calendario polinico (http://encuentralainspiracion.es/la-alergia-respiratoria/tipos-de-alergenos/alergia-al-polen/calendario-de-polinizacion/)\ncolumnas =['Mes','Nivel']\ndatos = [(1,0),(2,0),(3,1),(4,2),(5,2),(6,2),(7,1),(8,0),(9,0),(10,0),(11,0),(12,0)]\n\ndf=pd.DataFrame(datos,columns=columnas)\n\nrecordsdf = json.loads(df.T.to_json()).values()\ndb.calendarioPolen.insert_many(recordsdf)\nconexion.close()"
] |
[
[
"pandas.DataFrame"
]
] |
insoo223/openCVhowse
|
[
"d8885ab4f87a9d577fd660e60d41222dc2156332"
] |
[
"chapter07/detect_car_bow_svm_sliding_window.py"
] |
[
"import cv2\nimport numpy as np\nimport os\n\nfrom non_max_suppression import non_max_suppression_fast as nms\n\nif not os.path.isdir('CarData'):\n print('CarData folder not found. Please download and unzip '\n 'http://l2r.cs.uiuc.edu/~cogcomp/Data/Car/CarData.tar.gz '\n 'or https://github.com/gcr/arc-evaluator/raw/master/CarData.tar.gz '\n 'into the same folder as this script.')\n exit(1)\n\nBOW_NUM_TRAINING_SAMPLES_PER_CLASS = 10\nSVM_NUM_TRAINING_SAMPLES_PER_CLASS = 100\n\nSVM_SCORE_THRESHOLD = 1.8\nNMS_OVERLAP_THRESHOLD = 0.15\n\nsift = cv2.xfeatures2d.SIFT_create()\n\nFLANN_INDEX_KDTREE = 1\nindex_params = dict(algorithm=FLANN_INDEX_KDTREE, trees=5)\nsearch_params = {}\nflann = cv2.FlannBasedMatcher(index_params, search_params)\n\nbow_kmeans_trainer = cv2.BOWKMeansTrainer(12)\nbow_extractor = cv2.BOWImgDescriptorExtractor(sift, flann)\n\ndef get_pos_and_neg_paths(i):\n pos_path = 'CarData/TrainImages/pos-%d.pgm' % (i+1)\n neg_path = 'CarData/TrainImages/neg-%d.pgm' % (i+1)\n return pos_path, neg_path\n\ndef add_sample(path):\n img = cv2.imread(path, cv2.IMREAD_GRAYSCALE)\n keypoints, descriptors = sift.detectAndCompute(img, None)\n if descriptors is not None:\n bow_kmeans_trainer.add(descriptors)\n\nfor i in range(BOW_NUM_TRAINING_SAMPLES_PER_CLASS):\n pos_path, neg_path = get_pos_and_neg_paths(i)\n add_sample(pos_path)\n add_sample(neg_path)\n\nvoc = bow_kmeans_trainer.cluster()\nbow_extractor.setVocabulary(voc)\n\ndef extract_bow_descriptors(img):\n features = sift.detect(img)\n return bow_extractor.compute(img, features)\n\ntraining_data = []\ntraining_labels = []\nfor i in range(SVM_NUM_TRAINING_SAMPLES_PER_CLASS):\n pos_path, neg_path = get_pos_and_neg_paths(i)\n pos_img = cv2.imread(pos_path, cv2.IMREAD_GRAYSCALE)\n pos_descriptors = extract_bow_descriptors(pos_img)\n if pos_descriptors is not None:\n training_data.extend(pos_descriptors)\n training_labels.append(1)\n neg_img = cv2.imread(neg_path, cv2.IMREAD_GRAYSCALE)\n neg_descriptors = extract_bow_descriptors(neg_img)\n if neg_descriptors is not None:\n training_data.extend(neg_descriptors)\n training_labels.append(-1)\n\nsvm = cv2.ml.SVM_create()\nsvm.setType(cv2.ml.SVM_C_SVC)\nsvm.setC(50)\nsvm.train(np.array(training_data), cv2.ml.ROW_SAMPLE,\n np.array(training_labels))\n\ndef pyramid(img, scale_factor=1.25, min_size=(200, 80),\n max_size=(600, 600)):\n h, w = img.shape\n min_w, min_h = min_size\n max_w, max_h = max_size\n while w >= min_w and h >= min_h:\n if w <= max_w and h <= max_h:\n yield img\n w /= scale_factor\n h /= scale_factor\n img = cv2.resize(img, (int(w), int(h)),\n interpolation=cv2.INTER_AREA)\n\ndef sliding_window(img, step=20, window_size=(100, 40)):\n img_h, img_w = img.shape\n window_w, window_h = window_size\n for y in range(0, img_w, step):\n for x in range(0, img_h, step):\n roi = img[y:y+window_h, x:x+window_w]\n roi_h, roi_w = roi.shape\n if roi_w == window_w and roi_h == window_h:\n yield (x, y, roi)\n\nfor test_img_path in ['CarData/TestImages/test-0.pgm',\n 'CarData/TestImages/test-1.pgm',\n '../images/car.jpg',\n '../images/haying.jpg',\n '../images/statue.jpg',\n '../images/woodcutters.jpg']:\n img = cv2.imread(test_img_path)\n gray_img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n pos_rects = []\n for resized in pyramid(gray_img):\n for x, y, roi in sliding_window(resized):\n descriptors = extract_bow_descriptors(roi)\n if descriptors is None:\n continue\n prediction = svm.predict(descriptors)\n if prediction[1][0][0] == 1.0:\n raw_prediction = svm.predict(\n descriptors, flags=cv2.ml.STAT_MODEL_RAW_OUTPUT)\n score = -raw_prediction[1][0][0]\n if score > SVM_SCORE_THRESHOLD:\n h, w = roi.shape\n scale = gray_img.shape[0] / float(resized.shape[0])\n pos_rects.append([int(x * scale),\n int(y * scale),\n int((x+w) * scale),\n int((y+h) * scale),\n score])\n pos_rects = nms(np.array(pos_rects), NMS_OVERLAP_THRESHOLD)\n for x0, y0, x1, y1, score in pos_rects:\n cv2.rectangle(img, (int(x0), int(y0)), (int(x1), int(y1)),\n (0, 255, 255), 2)\n text = '%.2f' % score\n cv2.putText(img, text, (int(x0), int(y0) - 20),\n cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 255), 2)\n cv2.imshow(test_img_path, img)\ncv2.waitKey(0)\n"
] |
[
[
"numpy.array"
]
] |
prokhn/onti-2019-bigdata
|
[
"b9296141958f544177388be94072efce7bdc7814"
] |
[
"experiments_dikower/controllers/drlbox/net/q_net.py"
] |
[
"\nimport tensorflow as tf\nfrom drlbox.common.namescope import TF_NAMESCOPE\nfrom drlbox.net.net_base import RLNet\n\n\nclass QNet(RLNet):\n\n def set_model(self, model):\n self.model = model\n self.weights = model.weights\n self.ph_state, = model.inputs\n self.tf_values, = model.outputs\n\n def set_loss(self):\n with tf.name_scope(TF_NAMESCOPE):\n ph_action = tf.placeholder(tf.int32, [None])\n onehot_act = tf.one_hot(ph_action, depth=self.tf_values.shape[1])\n ph_target = tf.placeholder(tf.float32, [None])\n value_act = tf.reduce_sum(self.tf_values * onehot_act, axis=1)\n\n # loss\n self.tf_loss = tf.losses.huber_loss(ph_target, value_act,\n reduction=tf.losses.Reduction.NONE)\n\n # error for prioritization: abs td error\n self.tf_error = tf.abs(ph_target - value_act)\n\n # kfac loss list\n self.kfac_loss_list = [('normal_predictive', (self.tf_values,))]\n\n # placeholder list\n self.ph_train_list = [self.ph_state, ph_action, ph_target]\n\n def action_values(self, state):\n return self.sess.run(self.tf_values, feed_dict={self.ph_state: state})\n\n"
] |
[
[
"tensorflow.reduce_sum",
"tensorflow.losses.huber_loss",
"tensorflow.placeholder",
"tensorflow.name_scope",
"tensorflow.one_hot",
"tensorflow.abs"
]
] |
NeutralKaon/spec2nii
|
[
"52f0dc42ad176fdbb173ac051803372909e9971c"
] |
[
"spec2nii/nifti_orientation.py"
] |
[
"import numpy as np\nfrom scipy.spatial.transform import Rotation\n\n\nclass NIFTIOrient:\n def __init__(self, affine):\n self.Q44 = affine\n qb, qc, qd, qx, qy, qz, dx, dy, dz, qfac = nifti_mat44_to_quatern(affine)\n self.qb = qb\n self.qc = qc\n self.qd = qd\n self.qx = qx\n self.qy = qy\n self.qz = qz\n self.dx = dx\n self.dy = dy\n self.dz = dz\n self.qfac = qfac\n\n\ndef calc_affine(angles, dimensions, shift):\n\n scalingMat = np.diag(dimensions)\n rot = Rotation.from_euler('xyz', angles, degrees=True)\n m33 = rot.as_matrix() @ scalingMat\n m44 = np.zeros((4, 4))\n m44[0:3, 0:3] = m33\n m44[3, 3] = 1.0\n m44[0:3, 3] = shift\n\n return m44\n\n\ndef nifti_mat44_to_quatern(R):\n \"\"\"4x4 affine to quaternion representation.\"\"\"\n # offset outputs are read out of input matrix\n qx = R[0, 3]\n qy = R[1, 3]\n qz = R[2, 3]\n\n # load 3x3 matrix into local variables\n r11 = R[0, 0]\n r12 = R[0, 1]\n r13 = R[0, 2]\n r21 = R[1, 0]\n r22 = R[1, 1]\n r23 = R[1, 2]\n r31 = R[2, 0]\n r32 = R[2, 1]\n r33 = R[2, 2]\n\n # compute lengths of each column; these determine grid spacings\n xd = np.sqrt(r11 * r11 + r21 * r21 + r31 * r31)\n yd = np.sqrt(r12 * r12 + r22 * r22 + r32 * r32)\n zd = np.sqrt(r13 * r13 + r23 * r23 + r33 * r33)\n\n # if a column length is zero, patch the trouble\n if xd == 0.0:\n r11 = 1.0\n r21 = 0.0\n r31 = 0.0\n xd = 1.0\n if yd == 0.0:\n r22 = 1.0\n r12 = 0.0\n r32 = 0.0\n yd = 1.0\n if zd == 0.0:\n r33 = 1.0\n r13 = 0.0\n r23 = 0.0\n zd = 1.0\n\n # assign the output lengths\n dx = xd\n dy = yd\n dz = zd\n\n # normalize the columns\n r11 /= xd\n r21 /= xd\n r31 /= xd\n r12 /= yd\n r22 /= yd\n r32 /= yd\n r13 /= zd\n r23 /= zd\n r33 /= zd\n\n zd = r11 * r22 * r33\\\n - r11 * r32 * r23\\\n - r21 * r12 * r33\\\n + r21 * r32 * r13\\\n + r31 * r12 * r23\\\n - r31 * r22 * r13\n # zd should be -1 or 1\n\n if zd > 0: # proper\n qfac = 1.0\n else: # improper ==> flip 3rd column\n qfac = -1.0\n r13 *= -1.0\n r23 *= -1.0\n r33 *= -1.0\n\n # now, compute quaternion parameters\n a = r11 + r22 + r33 + 1.0\n if a > 0.5: # simplest case\n a = 0.5 * np.sqrt(a)\n b = 0.25 * (r32 - r23) / a\n c = 0.25 * (r13 - r31) / a\n d = 0.25 * (r21 - r12) / a\n else: # trickier case\n xd = 1.0 + r11 - (r22 + r33) # 4*b*b\n yd = 1.0 + r22 - (r11 + r33) # 4*c*c\n zd = 1.0 + r33 - (r11 + r22) # 4*d*d\n if xd > 1.0:\n b = 0.5 * np.sqrt(xd)\n c = 0.25 * (r12 + r21) / b\n d = 0.25 * (r13 + r31) / b\n a = 0.25 * (r32 - r23) / b\n elif yd > 1.0:\n c = 0.5 * np.sqrt(yd)\n b = 0.25 * (r12 + r21) / c\n d = 0.25 * (r23 + r32) / c\n a = 0.25 * (r13 - r31) / c\n else:\n d = 0.5 * np.sqrt(zd)\n b = 0.25 * (r13 + r31) / d\n c = 0.25 * (r23 + r32) / d\n a = 0.25 * (r21 - r12) / d\n\n if a < 0.0:\n b = -b\n c = -c\n d = -d\n\n qb = b\n qc = c\n qd = d\n return qb, qc, qd, qx, qy, qz, dx, dy, dz, qfac\n"
] |
[
[
"numpy.diag",
"scipy.spatial.transform.Rotation.from_euler",
"numpy.zeros",
"numpy.sqrt"
]
] |
DavidBraun777/TensorNetwork
|
[
"55942a12a859a8c6f8be473e623dbf0ddfd790b5"
] |
[
"tensornetwork/backends/backend_test.py"
] |
[
"\"\"\"Tests for graphmode_tensornetwork.\"\"\"\nimport builtins\nimport sys\nimport pytest\nimport numpy as np\n\n\ndef clean_tensornetwork_modules():\n for mod in list(sys.modules.keys()):\n if mod.startswith('tensornetwork'):\n sys.modules.pop(mod, None)\n\n\[email protected](autouse=True)\ndef clean_backend_import():\n #never do this outside testing\n clean_tensornetwork_modules()\n yield # use as teardown\n clean_tensornetwork_modules()\n\n\[email protected]\ndef no_backend_dependency(monkeypatch):\n import_orig = builtins.__import__\n\n # pylint: disable=redefined-builtin\n def mocked_import(name, globals, locals, fromlist, level):\n if name in ['torch', 'tensorflow', 'jax']:\n raise ImportError()\n return import_orig(name, globals, locals, fromlist, level)\n\n monkeypatch.setattr(builtins, '__import__', mocked_import)\n\n\[email protected]('no_backend_dependency')\ndef test_backend_pytorch_missing_cannot_initialize_backend():\n with pytest.raises(ImportError):\n # pylint: disable=import-outside-toplevel\n from tensornetwork.backends.pytorch.pytorch_backend import PyTorchBackend\n PyTorchBackend()\n\n\[email protected]('no_backend_dependency')\ndef test_backend_tensorflow_missing_cannot_initialize_backend():\n with pytest.raises(ImportError):\n # pylint: disable=import-outside-toplevel\n from tensornetwork.backends.tensorflow.tensorflow_backend \\\n import TensorFlowBackend\n TensorFlowBackend()\n\n\[email protected]('no_backend_dependency')\ndef test_backend_jax_missing_cannot_initialize_backend():\n with pytest.raises(ImportError):\n # pylint: disable=import-outside-toplevel\n from tensornetwork.backends.jax.jax_backend import JaxBackend\n JaxBackend()\n\n\[email protected]('no_backend_dependency')\ndef test_config_backend_missing_can_import_config():\n #not sure why config is imported here?\n #pylint: disable=import-outside-toplevel\n #pylint: disable=unused-variable\n import tensornetwork.config\n with pytest.raises(ImportError):\n #pylint: disable=import-outside-toplevel\n #pylint: disable=unused-variable\n import torch\n with pytest.raises(ImportError):\n #pylint: disable=import-outside-toplevel\n #pylint: disable=unused-variable\n import tensorflow as tf\n with pytest.raises(ImportError):\n #pylint: disable=import-outside-toplevel\n #pylint: disable=unused-variable\n import jax\n\n\[email protected]('no_backend_dependency')\ndef test_import_tensornetwork_without_backends():\n #pylint: disable=import-outside-toplevel\n #pylint: disable=unused-variable\n import tensornetwork\n #pylint: disable=import-outside-toplevel\n import tensornetwork.backends.pytorch.pytorch_backend\n #pylint: disable=import-outside-toplevel\n import tensornetwork.backends.tensorflow.tensorflow_backend\n #pylint: disable=import-outside-toplevel\n import tensornetwork.backends.jax.jax_backend\n #pylint: disable=import-outside-toplevel\n import tensornetwork.backends.numpy.numpy_backend\n with pytest.raises(ImportError):\n #pylint: disable=import-outside-toplevel\n #pylint: disable=unused-variable\n import torch\n with pytest.raises(ImportError):\n #pylint: disable=unused-variable\n #pylint: disable=import-outside-toplevel\n import tensorflow as tf\n with pytest.raises(ImportError):\n #pylint: disable=unused-variable\n #pylint: disable=import-outside-toplevel\n import jax\n\n\[email protected]('no_backend_dependency')\ndef test_basic_numpy_network_without_backends():\n #pylint: disable=import-outside-toplevel\n import tensornetwork\n net = tensornetwork.TensorNetwork(backend=\"numpy\")\n a = net.add_node(np.ones((10,)))\n b = net.add_node(np.ones((10,)))\n edge = net.connect(a[0], b[0])\n final_node = net.contract(edge)\n assert final_node.tensor == np.array(10.)\n with pytest.raises(ImportError):\n #pylint: disable=unused-variable\n #pylint: disable=import-outside-toplevel\n import torch\n with pytest.raises(ImportError):\n #pylint: disable=unused-variable\n #pylint: disable=import-outside-toplevel\n import tensorflow as tf\n with pytest.raises(ImportError):\n #pylint: disable=unused-variable\n #pylint: disable=import-outside-toplevel\n import jax\n\n\[email protected]('no_backend_dependency')\ndef test_basic_network_without_backends_raises_error():\n #pylint: disable=import-outside-toplevel\n import tensornetwork\n with pytest.raises(ImportError):\n tensornetwork.TensorNetwork(backend=\"jax\")\n with pytest.raises(ImportError):\n tensornetwork.TensorNetwork(backend=\"tensorflow\")\n with pytest.raises(ImportError):\n tensornetwork.TensorNetwork(backend=\"pytorch\")\n"
] |
[
[
"numpy.array",
"numpy.ones"
]
] |
jvc2688/cpm
|
[
"409e9ada39fc6238a63a75fb8474a3af70410347"
] |
[
"cpm/code/leastSquareSolver.py"
] |
[
"# -*- coding: utf-8 -*-\n\nfrom __future__ import division, print_function\n\n__all__ = [\"linear_least_squares\"]\n\nimport numpy as np\nfrom scipy import linalg\n\n\ndef linear_least_squares(A, y, yvar=None, l2=None):\n \"\"\"\n Solve a linear system as fast as possible.\n \n :param A: ``(ndata, nbasis)``\n The basis matrix.\n \n :param y: ``(ndata)``\n The observations.\n \n :param yvar:\n The observational variance of the points ``y``.\n \n :param l2:\n The L2 regularization strength. Can be a scalar or a vector (of length\n ``A.shape[1]``).\n \n \"\"\"\n # Incorporate the observational uncertainties.\n if yvar is not None:\n CiA = A / yvar[:, None]\n Ciy = y / yvar[:, None]\n else:\n CiA = A\n Ciy = y\n \n # Compute the pre-factor.\n AT = A.T\n ATA = np.dot(AT, CiA)\n \n # Incorporate any L2 regularization.\n if l2 is not None:\n if np.isscalar(l2):\n l2 = l2 + np.zeros(A.shape[1])\n ATA[np.diag_indices_from(ATA)] += l2\n \n # Solve the equations overwriting the temporary arrays for speed.\n factor = linalg.cho_factor(ATA, overwrite_a=True)\n return linalg.cho_solve(factor, np.dot(AT, Ciy), overwrite_b=True)"
] |
[
[
"numpy.dot",
"scipy.linalg.cho_factor",
"numpy.diag_indices_from",
"numpy.isscalar",
"numpy.zeros"
]
] |
Jimmy2027/torchio
|
[
"98e5f4f379e877fa20c49f93645a3d0e0834f650"
] |
[
"torchio/data/inference/aggregator.py"
] |
[
"from typing import Tuple\nimport torch\nimport numpy as np\nfrom ...utils import to_tuple\nfrom ...torchio import TypeData, TypeTuple\nfrom ..subject import Subject\n\n\nclass GridAggregator:\n r\"\"\"Aggregate patches for dense inference.\n\n This class is typically used to build a volume made of batches after\n inference of patches extracted by a :py:class:`~torchio.data.GridSampler`.\n\n Args:\n sample: Instance of:py:class:`~torchio.data.subject.Subject`\n from which patches will be extracted (probably using a\n :py:class:`~torchio.data.GridSampler`).\n patch_overlap: Tuple of integers :math:`(d_o, h_o, w_o)` specifying the\n overlap between patches. If a single number\n :math:`n` is provided, :math:`d_o = h_o = w_o = n`.\n out_channels: Number of channels in the output tensor.\n\n .. note:: Adapted from NiftyNet. See `this NiftyNet tutorial\n <https://niftynet.readthedocs.io/en/dev/window_sizes.html>`_ for more\n information.\n \"\"\"\n def __init__(\n self,\n sample: Subject,\n patch_overlap: TypeTuple,\n out_channels: int = 1,\n ):\n self._output_tensor = torch.zeros(out_channels, *sample.shape)\n self.patch_overlap = to_tuple(patch_overlap, length=3)\n\n @staticmethod\n def _crop_batch(\n patches: torch.Tensor,\n location: np.ndarray,\n border: Tuple[int, int, int],\n ) -> Tuple[TypeData, np.ndarray]:\n location = location.astype(np.int)\n batch_shape = patches.shape\n spatial_shape = batch_shape[2:] # ignore batch and channels dim\n num_dimensions = 3\n for idx in range(num_dimensions):\n location[:, idx] = location[:, idx] + border[idx]\n location[:, idx + 3] = location[:, idx + 3] - border[idx]\n cropped_shape = np.max(location[:, 3:6] - location[:, 0:3], axis=0)\n diff = spatial_shape - cropped_shape\n left = np.floor(diff / 2).astype(np.int)\n i_ini, j_ini, k_ini = left\n i_fin, j_fin, k_fin = left + cropped_shape\n batch = patches[\n :, # batch dimension\n :, # channels dimension\n i_ini:i_fin,\n j_ini:j_fin,\n k_ini:k_fin,\n ]\n return batch, location\n\n def _ensure_output_dtype(self, tensor: torch.Tensor) -> None:\n \"\"\"Make sure the output tensor type is the same as the input patches.\"\"\"\n if self._output_tensor.dtype != tensor.dtype:\n self._output_tensor = self._output_tensor.type(tensor.dtype)\n\n def add_batch(self, patches: torch.Tensor, locations: TypeData) -> None:\n patches = patches.cpu()\n self._ensure_output_dtype(patches)\n location_init = np.copy(locations)\n init_ones = np.ones_like(patches)\n patches, _ = self._crop_batch(\n patches, location_init, self.patch_overlap)\n location_init = np.copy(locations)\n _, locations = self._crop_batch(\n init_ones, location_init, self.patch_overlap)\n for patch, location in zip(patches, locations):\n i_ini, j_ini, k_ini, i_fin, j_fin, k_fin = location\n channels = len(patch)\n for channel in range(channels):\n self._output_tensor[channel, i_ini:i_fin, j_ini:j_fin, k_ini:k_fin] = patch[channel]\n\n def get_output_tensor(self) -> torch.Tensor:\n return self._output_tensor\n"
] |
[
[
"numpy.ones_like",
"torch.zeros",
"numpy.max",
"numpy.copy",
"numpy.floor"
]
] |
life-game-player/Hephaestus
|
[
"0c695193d8d2d8c70061e2e26ec8c718544342c6"
] |
[
"services/models/mnemosyne.py"
] |
[
"import torch\n\n\ndef create(\n host, user, passwd,\n module, operator, operation, result\n):\n \"\"\"\n Operation:\n 1: Create\n 2: Modify\n 3: Query\n 4: Delete\n\n Result:\n 0: Succeeded\n 1: Failed\n \"\"\"\n conn = torch.connect(host, user, passwd, 'hephaestus')\n list_sql = list()\n list_sql.append(\n \"INSERT INTO mnemosyne(module, operator, operation, result) \"\n \"VALUES('{}', {}, {}, {})\".format(module, operator, operation, result)\n )\n torch.execute_list(conn, list_sql)\n"
] |
[
[
"torch.execute_list",
"torch.connect"
]
] |
HERMINDERSINGH1234/ML_Extra_Resolution_Increases
|
[
"1fefceeab83f03fa8194cb63f78c5dbf7e90aeae"
] |
[
"LPIPSmodels/dist_model.py"
] |
[
"\r\nfrom __future__ import absolute_import\r\n\r\nimport sys\r\nsys.path.append('..')\r\nsys.path.append('.')\r\nimport numpy as np\r\nimport torch\r\nfrom torch import nn\r\nimport os\r\nfrom collections import OrderedDict\r\nfrom torch.autograd import Variable\r\nimport itertools\r\nfrom .base_model import BaseModel\r\nfrom scipy.ndimage import zoom\r\nimport fractions\r\nimport functools\r\nimport skimage.transform\r\nfrom IPython import embed\r\n\r\nfrom . import networks_basic as networks\r\nfrom . import util\r\n\r\nclass DistModel(BaseModel):\r\n def name(self):\r\n return self.model_name\r\n\r\n def initialize(self, model='net-lin', net='alex', pnet_rand=False, pnet_tune=False, model_path=None, colorspace='Lab', use_gpu=True, printNet=False, spatial=False, spatial_shape=None, spatial_order=1, spatial_factor=None, is_train=False, lr=.0001, beta1=0.5, version='0.1'):\r\n '''\r\n INPUTS\r\n model - ['net-lin'] for linearly calibrated network\r\n ['net'] for off-the-shelf network\r\n ['L2'] for L2 distance in Lab colorspace\r\n ['SSIM'] for ssim in RGB colorspace\r\n net - ['squeeze','alex','vgg']\r\n model_path - if None, will look in weights/[NET_NAME].pth\r\n colorspace - ['Lab','RGB'] colorspace to use for L2 and SSIM\r\n use_gpu - bool - whether or not to use a GPU\r\n printNet - bool - whether or not to print network architecture out\r\n spatial - bool - whether to output an array containing varying distances across spatial dimensions\r\n spatial_shape - if given, output spatial shape. if None then spatial shape is determined automatically via spatial_factor (see below).\r\n spatial_factor - if given, specifies upsampling factor relative to the largest spatial extent of a convolutional layer. if None then resized to size of input images.\r\n spatial_order - spline order of filter for upsampling in spatial mode, by default 1 (bilinear).\r\n is_train - bool - [True] for training mode\r\n lr - float - initial learning rate\r\n beta1 - float - initial momentum term for adam\r\n version - 0.1 for latest, 0.0 was original\r\n '''\r\n BaseModel.initialize(self, use_gpu=use_gpu)\r\n\r\n self.model = model\r\n self.net = net\r\n self.use_gpu = use_gpu\r\n self.is_train = is_train\r\n self.spatial = spatial\r\n self.spatial_shape = spatial_shape\r\n self.spatial_order = spatial_order\r\n self.spatial_factor = spatial_factor\r\n\r\n self.model_name = '%s [%s]'%(model,net)\r\n if(self.model == 'net-lin'): # pretrained net + linear layer\r\n self.net = networks.PNetLin(use_gpu=use_gpu,pnet_rand=pnet_rand, pnet_tune=pnet_tune, pnet_type=net,use_dropout=True,spatial=spatial,version=version)\r\n kw = {}\r\n if not use_gpu:\r\n kw['map_location'] = 'cpu'\r\n if(model_path is None):\r\n import inspect\r\n # model_path = './PerceptualSimilarity/weights/v%s/%s.pth'%(version,net)\r\n model_path = os.path.abspath(os.path.join(inspect.getfile(self.initialize), '..', 'v%s/%s.pth'%(version,net)))\r\n\r\n if(not is_train):\r\n print('Loading model from: %s'%model_path)\r\n self.net.load_state_dict(torch.load(model_path, **kw))\r\n\r\n elif(self.model=='net'): # pretrained network\r\n assert not self.spatial, 'spatial argument not supported yet for uncalibrated networks'\r\n self.net = networks.PNet(use_gpu=use_gpu,pnet_type=net)\r\n self.is_fake_net = True\r\n elif(self.model in ['L2','l2']):\r\n self.net = networks.L2(use_gpu=use_gpu,colorspace=colorspace) # not really a network, only for testing\r\n self.model_name = 'L2'\r\n elif(self.model in ['DSSIM','dssim','SSIM','ssim']):\r\n self.net = networks.DSSIM(use_gpu=use_gpu,colorspace=colorspace)\r\n self.model_name = 'SSIM'\r\n else:\r\n raise ValueError(\"Model [%s] not recognized.\" % self.model)\r\n\r\n self.parameters = list(self.net.parameters())\r\n\r\n if self.is_train: # training mode\r\n # extra network on top to go from distances (d0,d1) => predicted human judgment (h*)\r\n self.rankLoss = networks.BCERankingLoss(use_gpu=use_gpu)\r\n self.parameters+=self.rankLoss.parameters\r\n self.lr = lr\r\n self.old_lr = lr\r\n self.optimizer_net = torch.optim.Adam(self.parameters, lr=lr, betas=(beta1, 0.999))\r\n else: # test mode\r\n self.net.eval()\r\n\r\n if(printNet):\r\n print('---------- Networks initialized -------------')\r\n networks.print_network(self.net)\r\n print('-----------------------------------------------')\r\n\r\n def forward_pair(self,in1,in2,retPerLayer=False):\r\n if(retPerLayer):\r\n return self.net.forward(in1,in2, retPerLayer=True)\r\n else:\r\n return self.net.forward(in1,in2)\r\n\r\n def forward(self, in0, in1, retNumpy=True):\r\n ''' Function computes the distance between image patches in0 and in1\r\n INPUTS\r\n in0, in1 - torch.Tensor object of shape Nx3xXxY - image patch scaled to [-1,1]\r\n retNumpy - [False] to return as torch.Tensor, [True] to return as numpy array\r\n OUTPUT\r\n computed distances between in0 and in1\r\n '''\r\n\r\n self.input_ref = in0\r\n self.input_p0 = in1\r\n\r\n if(self.use_gpu):\r\n self.input_ref = self.input_ref.cuda()\r\n self.input_p0 = self.input_p0.cuda()\r\n\r\n self.var_ref = Variable(self.input_ref,requires_grad=True)\r\n self.var_p0 = Variable(self.input_p0,requires_grad=True)\r\n\r\n self.d0 = self.forward_pair(self.var_ref, self.var_p0)\r\n self.loss_total = self.d0\r\n\r\n def convert_output(d0):\r\n if(retNumpy):\r\n ans = d0.cpu().data.numpy()\r\n if not self.spatial:\r\n ans = ans.flatten()\r\n else:\r\n assert(ans.shape[0] == 1 and len(ans.shape) == 4)\r\n return ans[0,...].transpose([1, 2, 0]) # Reshape to usual numpy image format: (height, width, channels)\r\n return ans\r\n else:\r\n return d0\r\n\r\n if self.spatial:\r\n L = [convert_output(x) for x in self.d0]\r\n spatial_shape = self.spatial_shape\r\n if spatial_shape is None:\r\n if(self.spatial_factor is None):\r\n spatial_shape = (in0.size()[2],in0.size()[3])\r\n else:\r\n spatial_shape = (max([x.shape[0] for x in L])*self.spatial_factor, max([x.shape[1] for x in L])*self.spatial_factor)\r\n \r\n L = [skimage.transform.resize(x, spatial_shape, order=self.spatial_order, mode='edge') for x in L]\r\n \r\n L = np.mean(np.concatenate(L, 2) * len(L), 2)\r\n return L\r\n else:\r\n return convert_output(self.d0)\r\n\r\n # ***** TRAINING FUNCTIONS *****\r\n def optimize_parameters(self):\r\n self.forward_train()\r\n self.optimizer_net.zero_grad()\r\n self.backward_train()\r\n self.optimizer_net.step()\r\n self.clamp_weights()\r\n\r\n def clamp_weights(self):\r\n for module in self.net.modules():\r\n if(hasattr(module, 'weight') and module.kernel_size==(1,1)):\r\n module.weight.data = torch.clamp(module.weight.data,min=0)\r\n\r\n def set_input(self, data):\r\n self.input_ref = data['ref']\r\n self.input_p0 = data['p0']\r\n self.input_p1 = data['p1']\r\n self.input_judge = data['judge']\r\n\r\n if(self.use_gpu):\r\n self.input_ref = self.input_ref.cuda()\r\n self.input_p0 = self.input_p0.cuda()\r\n self.input_p1 = self.input_p1.cuda()\r\n self.input_judge = self.input_judge.cuda()\r\n\r\n self.var_ref = Variable(self.input_ref,requires_grad=True)\r\n self.var_p0 = Variable(self.input_p0,requires_grad=True)\r\n self.var_p1 = Variable(self.input_p1,requires_grad=True)\r\n\r\n def forward_train(self): # run forward pass\r\n self.d0 = self.forward_pair(self.var_ref, self.var_p0)\r\n self.d1 = self.forward_pair(self.var_ref, self.var_p1)\r\n self.acc_r = self.compute_accuracy(self.d0,self.d1,self.input_judge)\r\n\r\n # var_judge\r\n self.var_judge = Variable(1.*self.input_judge).view(self.d0.size())\r\n\r\n self.loss_total = self.rankLoss.forward(self.d0, self.d1, self.var_judge*2.-1.)\r\n return self.loss_total\r\n\r\n def backward_train(self):\r\n torch.mean(self.loss_total).backward()\r\n\r\n def compute_accuracy(self,d0,d1,judge):\r\n ''' d0, d1 are Variables, judge is a Tensor '''\r\n d1_lt_d0 = (d1<d0).cpu().data.numpy().flatten()\r\n judge_per = judge.cpu().numpy().flatten()\r\n return d1_lt_d0*judge_per + (1-d1_lt_d0)*(1-judge_per)\r\n\r\n def get_current_errors(self):\r\n retDict = OrderedDict([('loss_total', self.loss_total.data.cpu().numpy()),\r\n ('acc_r', self.acc_r)])\r\n\r\n for key in retDict.keys():\r\n retDict[key] = np.mean(retDict[key])\r\n\r\n return retDict\r\n\r\n def get_current_visuals(self):\r\n zoom_factor = 256/self.var_ref.data.size()[2]\r\n\r\n ref_img = util.tensor2im(self.var_ref.data)\r\n p0_img = util.tensor2im(self.var_p0.data)\r\n p1_img = util.tensor2im(self.var_p1.data)\r\n\r\n ref_img_vis = zoom(ref_img,[zoom_factor, zoom_factor, 1],order=0)\r\n p0_img_vis = zoom(p0_img,[zoom_factor, zoom_factor, 1],order=0)\r\n p1_img_vis = zoom(p1_img,[zoom_factor, zoom_factor, 1],order=0)\r\n\r\n return OrderedDict([('ref', ref_img_vis),\r\n ('p0', p0_img_vis),\r\n ('p1', p1_img_vis)])\r\n\r\n def save(self, path, label):\r\n self.save_network(self.net, path, '', label)\r\n self.save_network(self.rankLoss.net, path, 'rank', label)\r\n\r\n def update_learning_rate(self,nepoch_decay):\r\n lrd = self.lr / nepoch_decay\r\n lr = self.old_lr - lrd\r\n\r\n for param_group in self.optimizer_net.param_groups:\r\n param_group['lr'] = lr\r\n\r\n print('update lr [%s] decay: %f -> %f' % (type,self.old_lr, lr))\r\n self.old_lr = lr\r\n\r\n\r\n\r\ndef score_2afc_dataset(data_loader,func):\r\n ''' Function computes Two Alternative Forced Choice (2AFC) score using\r\n distance function 'func' in dataset 'data_loader'\r\n INPUTS\r\n data_loader - CustomDatasetDataLoader object - contains a TwoAFCDataset inside\r\n func - callable distance function - calling d=func(in0,in1) should take 2\r\n pytorch tensors with shape Nx3xXxY, and return numpy array of length N\r\n OUTPUTS\r\n [0] - 2AFC score in [0,1], fraction of time func agrees with human evaluators\r\n [1] - dictionary with following elements\r\n d0s,d1s - N arrays containing distances between reference patch to perturbed patches \r\n gts - N array in [0,1], preferred patch selected by human evaluators\r\n (closer to \"0\" for left patch p0, \"1\" for right patch p1,\r\n \"0.6\" means 60pct people preferred right patch, 40pct preferred left)\r\n scores - N array in [0,1], corresponding to what percentage function agreed with humans\r\n CONSTS\r\n N - number of test triplets in data_loader\r\n '''\r\n\r\n d0s = []\r\n d1s = []\r\n gts = []\r\n\r\n # bar = pb.ProgressBar(max_value=data_loader.load_data().__len__())\r\n for (i,data) in enumerate(data_loader.load_data()):\r\n d0s+=func(data['ref'],data['p0']).tolist()\r\n d1s+=func(data['ref'],data['p1']).tolist()\r\n gts+=data['judge'].cpu().numpy().flatten().tolist()\r\n # bar.update(i)\r\n\r\n d0s = np.array(d0s)\r\n d1s = np.array(d1s)\r\n gts = np.array(gts)\r\n scores = (d0s<d1s)*(1.-gts) + (d1s<d0s)*gts + (d1s==d0s)*.5\r\n\r\n return(np.mean(scores), dict(d0s=d0s,d1s=d1s,gts=gts,scores=scores))\r\n\r\ndef score_jnd_dataset(data_loader,func):\r\n ''' Function computes JND score using distance function 'func' in dataset 'data_loader'\r\n INPUTS\r\n data_loader - CustomDatasetDataLoader object - contains a JNDDataset inside\r\n func - callable distance function - calling d=func(in0,in1) should take 2\r\n pytorch tensors with shape Nx3xXxY, and return numpy array of length N\r\n OUTPUTS\r\n [0] - JND score in [0,1], mAP score (area under precision-recall curve)\r\n [1] - dictionary with following elements\r\n ds - N array containing distances between two patches shown to human evaluator\r\n sames - N array containing fraction of people who thought the two patches were identical\r\n CONSTS\r\n N - number of test triplets in data_loader\r\n '''\r\n\r\n ds = []\r\n gts = []\r\n\r\n # bar = pb.ProgressBar(max_value=data_loader.load_data().__len__())\r\n for (i,data) in enumerate(data_loader.load_data()):\r\n ds+=func(data['p0'],data['p1']).tolist()\r\n gts+=data['same'].cpu().numpy().flatten().tolist()\r\n # bar.update(i)\r\n\r\n sames = np.array(gts)\r\n ds = np.array(ds)\r\n\r\n sorted_inds = np.argsort(ds)\r\n ds_sorted = ds[sorted_inds]\r\n sames_sorted = sames[sorted_inds]\r\n\r\n TPs = np.cumsum(sames_sorted)\r\n FPs = np.cumsum(1-sames_sorted)\r\n FNs = np.sum(sames_sorted)-TPs\r\n\r\n precs = TPs/(TPs+FPs)\r\n recs = TPs/(TPs+FNs)\r\n score = util.voc_ap(recs,precs)\r\n\r\n return(score, dict(ds=ds,sames=sames))\r\n"
] |
[
[
"torch.optim.Adam",
"torch.mean",
"torch.load",
"scipy.ndimage.zoom",
"numpy.cumsum",
"numpy.concatenate",
"numpy.mean",
"numpy.argsort",
"torch.clamp",
"numpy.array",
"numpy.sum",
"torch.autograd.Variable"
]
] |
abtinshahidi/astromodels
|
[
"580e972ccc69f4fad57e22030923ee27f9d59ee3"
] |
[
"astromodels/sources/extended_source.py"
] |
[
"import collections\n\nimport astropy.units as u\nimport numpy as np\n\nfrom astromodels.core.spectral_component import SpectralComponent\nfrom astromodels.core.tree import Node\nfrom astromodels.core.units import get_units\nfrom astromodels.functions.functions import Constant\nfrom astromodels.sources.source import Source, EXTENDED_SOURCE\nfrom astromodels.utils.pretty_list import dict_to_list\n\n\nclass ExtendedSource(Source, Node):\n\n def __init__(self, source_name, spatial_shape, spectral_shape=None, components=None):\n\n # Check that we have all the required information\n # and set the units\n\n current_u = get_units()\n\n if spatial_shape.n_dim == 2:\n\n # Now gather the component(s)\n\n # We need either a single component, or a list of components, but not both\n # (that's the ^ symbol)\n\n assert (spectral_shape is not None) ^ (components is not None), \"You have to provide either a single \" \\\n \"component, or a list of components \" \\\n \"(but not both).\"\n\n # If the user specified only one component, make a list of one element with a default name (\"main\")\n\n if spectral_shape is not None:\n\n components = [SpectralComponent(\"main\", spectral_shape)]\n\n # Components in this case have energy as x and differential flux as y\n\n diff_flux_units = (current_u.energy * current_u.area * current_u.time) ** (-1)\n\n # Now set the units of the components\n for component in components:\n\n component.shape.set_units(current_u.energy, diff_flux_units)\n\n # Set the units of the brightness\n spatial_shape.set_units(current_u.angle, current_u.angle, current_u.angle**(-2))\n\n elif spatial_shape.n_dim == 3:\n\n # If there is no spectral component then assume that the input is a template, which will provide the\n # spectrum by itself. We just use a renormalization (a bias)\n\n if spectral_shape is None and components is None:\n\n # This is a template. Add a component which is just a renormalization\n\n spectral_shape = Constant()\n components = [SpectralComponent(\"main\", spectral_shape)]\n\n # set the units\n diff_flux_units = (current_u.energy * current_u.area * current_u.time *\n current_u.angle**2) ** (-1)\n spatial_shape.set_units(current_u.angle, current_u.angle, current_u.energy, diff_flux_units)\n\n else:\n\n # the spectral shape has been given, so this is a case where the spatial template gives an\n # energy-dependent shape and the spectral components give the spectrum\n\n assert (spectral_shape is not None) ^ (components is not None), \"You can provide either a single \" \\\n \"component, or a list of components \" \\\n \"(but not both).\"\n\n if spectral_shape is not None:\n\n components = [SpectralComponent(\"main\", spectral_shape)]\n\n # Assign units\n diff_flux_units = (current_u.energy * current_u.area * current_u.time) ** (-1)\n\n # Now set the units of the components\n for component in components:\n component.shape.set_units(current_u.energy, diff_flux_units)\n\n # Set the unit of the spatial template\n spatial_shape.set_units(current_u.angle, current_u.angle, current_u.energy, current_u.angle**(-2))\n\n else:\n\n raise RuntimeError(\"The spatial shape must have either 2 or 3 dimensions.\")\n\n # Here we have a list of components\n\n Source.__init__(self, components, EXTENDED_SOURCE)\n\n # A source is also a Node in the tree\n\n Node.__init__(self, source_name)\n\n # Add the spatial shape as a child node, with an explicit name\n self._spatial_shape = spatial_shape\n self._add_child(self._spatial_shape)\n\n # Add the same node also with the name of the function\n #self._add_child(self._shape, self._shape.__name__)\n\n # Add a node called 'spectrum'\n\n spectrum_node = Node('spectrum')\n spectrum_node._add_children(self._components.values())\n\n self._add_child(spectrum_node)\n\n @property\n def spatial_shape(self):\n \"\"\"\n A generic name for the spatial shape.\n\n :return: the spatial shape instance\n \"\"\"\n\n return self._spatial_shape\n\n def get_spatially_integrated_flux( self, energies):\n \n \"\"\"\n Returns total flux of source at the given energy\n :param energies: energies (array or float)\n :return: differential flux at given energy\n \"\"\"\n \n if not isinstance(energies, np.ndarray):\n energies = np.array(energies, ndmin=1)\n\n # Get the differential flux from the spectral components\n\n results = [self.spatial_shape.get_total_spatial_integral(energies) * component.shape(energies) for component in self.components.values()]\n\n if isinstance(energies, u.Quantity):\n\n # Slow version with units\n\n # We need to sum like this (slower) because using np.sum will not preserve the units\n # (thanks astropy.units)\n\n differential_flux = sum(results)\n\n else:\n\n # Fast version without units, where x is supposed to be in the same units as currently defined in\n # units.get_units()\n\n differential_flux = np.sum(results, 0)\n\n return differential_flux\n\n\n def __call__(self, lon, lat, energies):\n \"\"\"\n Returns brightness of source at the given position and energy\n :param lon: longitude (array or float)\n :param lat: latitude (array or float)\n :param energies: energies (array or float)\n :return: differential flux at given position and energy\n \"\"\"\n\n assert type(lat) == type(lon) and type(lon) == type(energies), \"Type mismatch in input of call\"\n\n if not isinstance(lat, np.ndarray):\n\n lat = np.array(lat, ndmin=1)\n lon = np.array(lon, ndmin=1)\n energies = np.array(energies, ndmin=1)\n\n # Get the differential flux from the spectral components\n\n results = [component.shape(energies) for component in self.components.values()]\n\n if isinstance(energies, u.Quantity):\n\n # Slow version with units\n\n # We need to sum like this (slower) because using np.sum will not preserve the units\n # (thanks astropy.units)\n\n differential_flux = sum(results)\n\n else:\n\n # Fast version without units, where x is supposed to be in the same units as currently defined in\n # units.get_units()\n\n differential_flux = np.sum(results, 0)\n\n # Get brightness from spatial model\n\n if self._spatial_shape.n_dim == 2:\n\n brightness = self._spatial_shape(lon, lat)\n\n # In this case the spectrum is the same everywhere\n n_points = lat.shape[0]\n n_energies = differential_flux.shape[0]\n\n # The following is a little obscure, but it is 6x faster than doing a for loop\n\n cube = np.repeat(differential_flux, n_points).reshape(n_energies, n_points).T\n result = (cube.T * brightness).T\n\n else:\n\n result = self._spatial_shape(lon, lat, energies) * differential_flux\n\n # Do not clip the output, otherwise it will not be possible to use ext. sources\n # with negative fluxes\n\n return np.squeeze(result)\n \n def has_free_parameters(self):\n \"\"\"\n Returns True or False whether there is any parameter in this source\n\n :return:\n \"\"\"\n\n for component in self._components.values():\n\n for par in component.shape.parameters.values():\n\n if par.free:\n\n return True\n\n for par in self.spatial_shape.parameters.values():\n\n if par.free:\n\n return True\n\n return False\n\n @property\n def free_parameters(self):\n \"\"\"\n Returns a dictionary of free parameters for this source\n We use the parameter path as the key because it's \n guaranteed to be unique, unlike the parameter name.\n\n :return:\n \"\"\"\n free_parameters = collections.OrderedDict()\n\n for component in self._components.values():\n\n for par in component.shape.parameters.values():\n\n if par.free:\n\n free_parameters[par.path] = par\n\n for par in self.spatial_shape.parameters.values():\n\n if par.free:\n\n free_parameters[par.path] = par\n\n return free_parameters\n\n @property\n def parameters(self):\n \"\"\"\n Returns a dictionary of all parameters for this source.\n We use the parameter path as the key because it's \n guaranteed to be unique, unlike the parameter name.\n\n :return:\n \"\"\"\n all_parameters = collections.OrderedDict()\n\n for component in self._components.values():\n\n for par in component.shape.parameters.values():\n\n all_parameters[par.path] = par\n\n for par in self.spatial_shape.parameters.values():\n\n all_parameters[par.path] = par\n\n return all_parameters\n\n def _repr__base(self, rich_output=False):\n \"\"\"\n Representation of the object\n\n :param rich_output: if True, generates HTML, otherwise text\n :return: the representation\n \"\"\"\n\n # Make a dictionary which will then be transformed in a list\n\n repr_dict = collections.OrderedDict()\n\n key = '%s (extended source)' % self.name\n\n repr_dict[key] = collections.OrderedDict()\n repr_dict[key]['shape'] = self._spatial_shape.to_dict(minimal=True)\n repr_dict[key]['spectrum'] = collections.OrderedDict()\n\n for component_name, component in self.components.iteritems():\n repr_dict[key]['spectrum'][component_name] = component.to_dict(minimal=True)\n\n return dict_to_list(repr_dict, rich_output)\n\n def get_boundaries(self):\n \"\"\"\n Returns the boundaries for this extended source\n\n :return: a tuple of tuples ((min. lon, max. lon), (min lat, max lat))\n \"\"\"\n return self._spatial_shape.get_boundaries()\n"
] |
[
[
"numpy.squeeze",
"numpy.array",
"numpy.repeat",
"numpy.sum"
]
] |
hysunflower/Serving
|
[
"50d0c2900f3385b049f76b91e38cc69d8e8a102d"
] |
[
"python/paddle_serving_app/local_predict.py"
] |
[
"# -*- coding: utf-8 -*-\n\"\"\"\n# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"\n\nimport os\nimport google.protobuf.text_format\nimport numpy as np\nimport argparse\nimport paddle.fluid as fluid\nimport paddle.inference as inference\nfrom .proto import general_model_config_pb2 as m_config\nfrom paddle.fluid.core import PaddleTensor\nfrom paddle.fluid.core import AnalysisConfig\nfrom paddle.fluid.core import create_paddle_predictor\nimport logging\n\nlogging.basicConfig(format=\"%(asctime)s - %(levelname)s - %(message)s\")\nlogger = logging.getLogger(\"fluid\")\nlogger.setLevel(logging.INFO)\n\n\nclass LocalPredictor(object):\n \"\"\"\n Prediction in the current process of the local environment, in process\n call, Compared with RPC/HTTP, LocalPredictor has better performance, \n because of no network and packaging load.\n \"\"\"\n\n def __init__(self):\n self.feed_names_ = []\n self.fetch_names_ = []\n self.feed_types_ = {}\n self.fetch_types_ = {}\n self.feed_shapes_ = {}\n self.feed_names_to_idx_ = {}\n self.fetch_names_to_idx_ = {}\n self.fetch_names_to_type_ = {}\n\n def load_model_config(self,\n model_path,\n use_gpu=False,\n gpu_id=0,\n use_profile=False,\n thread_num=1,\n mem_optim=True,\n ir_optim=False,\n use_trt=False,\n use_lite=False,\n use_xpu=False,\n use_feed_fetch_ops=False):\n \"\"\"\n Load model config and set the engine config for the paddle predictor\n \n Args:\n model_path: model config path.\n use_gpu: calculating with gpu, False default.\n gpu_id: gpu id, 0 default.\n use_profile: use predictor profiles, False default.\n thread_num: thread nums, default 1. \n mem_optim: memory optimization, True default.\n ir_optim: open calculation chart optimization, False default.\n use_trt: use nvidia TensorRT optimization, False default\n use_lite: use Paddle-Lite Engint, False default\n use_xpu: run predict on Baidu Kunlun, False default\n use_feed_fetch_ops: use feed/fetch ops, False default.\n \"\"\"\n client_config = \"{}/serving_server_conf.prototxt\".format(model_path)\n model_conf = m_config.GeneralModelConfig()\n f = open(client_config, 'r')\n model_conf = google.protobuf.text_format.Merge(\n str(f.read()), model_conf)\n config = AnalysisConfig(model_path)\n logger.info(\"load_model_config params: model_path:{}, use_gpu:{},\\\n gpu_id:{}, use_profile:{}, thread_num:{}, mem_optim:{}, ir_optim:{},\\\n use_trt:{}, use_lite:{}, use_xpu: {}, use_feed_fetch_ops:{}\".format(\n model_path, use_gpu, gpu_id, use_profile, thread_num, mem_optim,\n ir_optim, use_trt, use_lite, use_xpu, use_feed_fetch_ops))\n\n self.feed_names_ = [var.alias_name for var in model_conf.feed_var]\n self.fetch_names_ = [var.alias_name for var in model_conf.fetch_var]\n self.feed_names_to_idx_ = {}\n self.fetch_names_to_idx_ = {}\n\n for i, var in enumerate(model_conf.feed_var):\n self.feed_names_to_idx_[var.alias_name] = i\n self.feed_types_[var.alias_name] = var.feed_type\n self.feed_shapes_[var.alias_name] = var.shape\n\n for i, var in enumerate(model_conf.fetch_var):\n self.fetch_names_to_idx_[var.alias_name] = i\n self.fetch_names_to_type_[var.alias_name] = var.fetch_type\n\n if use_profile:\n config.enable_profile()\n if mem_optim:\n config.enable_memory_optim()\n config.switch_ir_optim(ir_optim)\n config.set_cpu_math_library_num_threads(thread_num)\n config.switch_use_feed_fetch_ops(use_feed_fetch_ops)\n config.delete_pass(\"conv_transpose_eltwiseadd_bn_fuse_pass\")\n\n if not use_gpu:\n config.disable_gpu()\n else:\n config.enable_use_gpu(100, gpu_id)\n if use_trt:\n config.enable_tensorrt_engine(\n workspace_size=1 << 20,\n max_batch_size=32,\n min_subgraph_size=3,\n use_static=False,\n use_calib_mode=False)\n\n if use_lite:\n config.enable_lite_engine(\n precision_mode=inference.PrecisionType.Float32,\n zero_copy=True,\n passes_filter=[],\n ops_filter=[])\n\n if use_xpu:\n # 2MB l3 cache\n config.enable_xpu(8 * 1024 * 1024)\n\n self.predictor = create_paddle_predictor(config)\n\n def predict(self, feed=None, fetch=None, batch=False, log_id=0):\n \"\"\"\n Predict locally\n\n Args:\n feed: feed var\n fetch: fetch var\n batch: batch data or not, False default.If batch is False, a new\n dimension is added to header of the shape[np.newaxis].\n log_id: for logging\n\n Returns:\n fetch_map: dict \n \"\"\"\n if feed is None or fetch is None:\n raise ValueError(\"You should specify feed and fetch for prediction\")\n fetch_list = []\n if isinstance(fetch, str):\n fetch_list = [fetch]\n elif isinstance(fetch, list):\n fetch_list = fetch\n else:\n raise ValueError(\"Fetch only accepts string and list of string\")\n\n feed_batch = []\n if isinstance(feed, dict):\n feed_batch.append(feed)\n elif isinstance(feed, list):\n feed_batch = feed\n else:\n raise ValueError(\"Feed only accepts dict and list of dict\")\n\n int_slot_batch = []\n float_slot_batch = []\n int_feed_names = []\n float_feed_names = []\n int_shape = []\n float_shape = []\n fetch_names = []\n counter = 0\n batch_size = len(feed_batch)\n\n for key in fetch_list:\n if key in self.fetch_names_:\n fetch_names.append(key)\n\n if len(fetch_names) == 0:\n raise ValueError(\n \"Fetch names should not be empty or out of saved fetch list.\")\n return {}\n\n input_names = self.predictor.get_input_names()\n for name in input_names:\n if isinstance(feed[name], list):\n feed[name] = np.array(feed[name]).reshape(self.feed_shapes_[\n name])\n if self.feed_types_[name] == 0:\n feed[name] = feed[name].astype(\"int64\")\n elif self.feed_types_[name] == 1:\n feed[name] = feed[name].astype(\"float32\")\n elif self.feed_types_[name] == 2:\n feed[name] = feed[name].astype(\"int32\")\n else:\n raise ValueError(\"local predictor receives wrong data type\")\n input_tensor = self.predictor.get_input_tensor(name)\n if \"{}.lod\".format(name) in feed:\n input_tensor.set_lod([feed[\"{}.lod\".format(name)]])\n if batch == False:\n input_tensor.copy_from_cpu(feed[name][np.newaxis, :])\n else:\n input_tensor.copy_from_cpu(feed[name])\n output_tensors = []\n output_names = self.predictor.get_output_names()\n for output_name in output_names:\n output_tensor = self.predictor.get_output_tensor(output_name)\n output_tensors.append(output_tensor)\n outputs = []\n self.predictor.zero_copy_run()\n for output_tensor in output_tensors:\n output = output_tensor.copy_to_cpu()\n outputs.append(output)\n fetch_map = {}\n for i, name in enumerate(fetch):\n fetch_map[name] = outputs[i]\n if len(output_tensors[i].lod()) > 0:\n fetch_map[name + \".lod\"] = np.array(output_tensors[i].lod()[\n 0]).astype('int32')\n return fetch_map\n"
] |
[
[
"numpy.array"
]
] |
cclauss/darts
|
[
"77a461b62edb232406891028645b2331a24a8b4d"
] |
[
"rnn/train_search.py"
] |
[
"import argparse\nimport os, sys, glob\nimport time\nimport math\nimport numpy as np\nimport torch\nimport logging\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.backends.cudnn as cudnn\nfrom architect import Architect\n\nimport gc\n\nimport data\nimport model_search as model\n\nfrom utils import batchify, get_batch, repackage_hidden, create_exp_dir, save_checkpoint\n\nparser = argparse.ArgumentParser(description='PyTorch PennTreeBank/WikiText2 Language Model')\nparser.add_argument('--data', type=str, default='../data/penn/',\n help='location of the data corpus')\nparser.add_argument('--emsize', type=int, default=300,\n help='size of word embeddings')\nparser.add_argument('--nhid', type=int, default=300,\n help='number of hidden units per layer')\nparser.add_argument('--nhidlast', type=int, default=300,\n help='number of hidden units for the last rnn layer')\nparser.add_argument('--lr', type=float, default=20,\n help='initial learning rate')\nparser.add_argument('--clip', type=float, default=0.25,\n help='gradient clipping')\nparser.add_argument('--epochs', type=int, default=50,\n help='upper epoch limit')\nparser.add_argument('--batch_size', type=int, default=256, metavar='N',\n help='batch size')\nparser.add_argument('--bptt', type=int, default=35,\n help='sequence length')\nparser.add_argument('--dropout', type=float, default=0.75,\n help='dropout applied to layers (0 = no dropout)')\nparser.add_argument('--dropouth', type=float, default=0.25,\n help='dropout for hidden nodes in rnn layers (0 = no dropout)')\nparser.add_argument('--dropoutx', type=float, default=0.75,\n help='dropout for input nodes in rnn layers (0 = no dropout)')\nparser.add_argument('--dropouti', type=float, default=0.2,\n help='dropout for input embedding layers (0 = no dropout)')\nparser.add_argument('--dropoute', type=float, default=0,\n help='dropout to remove words from embedding layer (0 = no dropout)')\nparser.add_argument('--seed', type=int, default=2,\n help='random seed')\nparser.add_argument('--nonmono', type=int, default=5,\n help='random seed')\nparser.add_argument('--cuda', action='store_false',\n help='use CUDA')\nparser.add_argument('--log-interval', type=int, default=50, metavar='N',\n help='report interval')\nparser.add_argument('--save', type=str, default='EXP',\n help='path to save the final model')\nparser.add_argument('--alpha', type=float, default=0,\n help='alpha L2 regularization on RNN activation (alpha = 0 means no regularization)')\nparser.add_argument('--beta', type=float, default=1e-3,\n help='beta slowness regularization applied on RNN activiation (beta = 0 means no regularization)')\nparser.add_argument('--wdecay', type=float, default=5e-7,\n help='weight decay applied to all weights')\nparser.add_argument('--continue_train', action='store_true',\n help='continue train from a checkpoint')\nparser.add_argument('--small_batch_size', type=int, default=-1,\n help='the batch size for computation. batch_size should be divisible by small_batch_size.\\\n In our implementation, we compute gradients with small_batch_size multiple times, and accumulate the gradients\\\n until batch_size is reached. An update step is then performed.')\nparser.add_argument('--max_seq_len_delta', type=int, default=20,\n help='max sequence length')\nparser.add_argument('--single_gpu', default=True, action='store_false', \n help='use single GPU')\nparser.add_argument('--gpu', type=int, default=0, help='GPU device to use')\nparser.add_argument('--unrolled', action='store_true', default=False, help='use one-step unrolled validation loss')\nparser.add_argument('--arch_wdecay', type=float, default=1e-3,\n help='weight decay for the architecture encoding alpha')\nparser.add_argument('--arch_lr', type=float, default=3e-3,\n help='learning rate for the architecture encoding alpha')\nargs = parser.parse_args()\n\nif args.nhidlast < 0:\n args.nhidlast = args.emsize\nif args.small_batch_size < 0:\n args.small_batch_size = args.batch_size\n\nif not args.continue_train:\n args.save = 'search-{}-{}'.format(args.save, time.strftime(\"%Y%m%d-%H%M%S\"))\n create_exp_dir(args.save, scripts_to_save=glob.glob('*.py'))\n\nlog_format = '%(asctime)s %(message)s'\nlogging.basicConfig(stream=sys.stdout, level=logging.INFO,\n format=log_format, datefmt='%m/%d %I:%M:%S %p')\nfh = logging.FileHandler(os.path.join(args.save, 'log.txt'))\nfh.setFormatter(logging.Formatter(log_format))\nlogging.getLogger().addHandler(fh)\n\n# Set the random seed manually for reproducibility.\nnp.random.seed(args.seed)\ntorch.manual_seed(args.seed)\nif torch.cuda.is_available():\n if not args.cuda:\n print(\"WARNING: You have a CUDA device, so you should probably run with --cuda\")\n else:\n torch.cuda.set_device(args.gpu)\n cudnn.benchmark = True\n cudnn.enabled=True\n torch.cuda.manual_seed_all(args.seed)\n\ncorpus = data.Corpus(args.data)\n\neval_batch_size = 10\ntest_batch_size = 1\n\ntrain_data = batchify(corpus.train, args.batch_size, args)\nsearch_data = batchify(corpus.valid, args.batch_size, args)\nval_data = batchify(corpus.valid, eval_batch_size, args)\ntest_data = batchify(corpus.test, test_batch_size, args)\n\n\nntokens = len(corpus.dictionary)\nif args.continue_train:\n model = torch.load(os.path.join(args.save, 'model.pt'))\nelse:\n model = model.RNNModelSearch(ntokens, args.emsize, args.nhid, args.nhidlast, \n args.dropout, args.dropouth, args.dropoutx, args.dropouti, args.dropoute)\n\nsize = 0\nfor p in model.parameters():\n size += p.nelement()\nlogging.info('param size: {}'.format(size))\nlogging.info('initial genotype:')\nlogging.info(model.genotype())\n\nif args.cuda:\n if args.single_gpu:\n parallel_model = model.cuda()\n else:\n parallel_model = nn.DataParallel(model, dim=1).cuda()\nelse:\n parallel_model = model\narchitect = Architect(parallel_model, args)\n\ntotal_params = sum(x.data.nelement() for x in model.parameters())\nlogging.info('Args: {}'.format(args))\nlogging.info('Model total parameters: {}'.format(total_params))\n\n\ndef evaluate(data_source, batch_size=10):\n # Turn on evaluation mode which disables dropout.\n model.eval()\n total_loss = 0\n ntokens = len(corpus.dictionary)\n hidden = model.init_hidden(batch_size)\n for i in range(0, data_source.size(0) - 1, args.bptt):\n data, targets = get_batch(data_source, i, args, evaluation=True)\n targets = targets.view(-1)\n\n log_prob, hidden = parallel_model(data, hidden)\n loss = nn.functional.nll_loss(log_prob.view(-1, log_prob.size(2)), targets).data\n\n total_loss += loss * len(data)\n\n hidden = repackage_hidden(hidden)\n return total_loss[0] / len(data_source)\n\n\ndef train():\n assert args.batch_size % args.small_batch_size == 0, 'batch_size must be divisible by small_batch_size'\n\n # Turn on training mode which enables dropout.\n total_loss = 0\n start_time = time.time()\n ntokens = len(corpus.dictionary)\n hidden = [model.init_hidden(args.small_batch_size) for _ in range(args.batch_size // args.small_batch_size)]\n hidden_valid = [model.init_hidden(args.small_batch_size) for _ in range(args.batch_size // args.small_batch_size)]\n batch, i = 0, 0\n while i < train_data.size(0) - 1 - 1:\n bptt = args.bptt if np.random.random() < 0.95 else args.bptt / 2.\n # Prevent excessively small or negative sequence lengths\n # seq_len = max(5, int(np.random.normal(bptt, 5)))\n # # There's a very small chance that it could select a very long sequence length resulting in OOM\n # seq_len = min(seq_len, args.bptt + args.max_seq_len_delta)\n seq_len = int(bptt)\n\n lr2 = optimizer.param_groups[0]['lr']\n optimizer.param_groups[0]['lr'] = lr2 * seq_len / args.bptt\n model.train()\n\n data_valid, targets_valid = get_batch(search_data, i % (search_data.size(0) - 1), args)\n data, targets = get_batch(train_data, i, args, seq_len=seq_len)\n\n optimizer.zero_grad()\n\n start, end, s_id = 0, args.small_batch_size, 0\n while start < args.batch_size:\n cur_data, cur_targets = data[:, start: end], targets[:, start: end].contiguous().view(-1)\n cur_data_valid, cur_targets_valid = data_valid[:, start: end], targets_valid[:, start: end].contiguous().view(-1)\n\n # Starting each batch, we detach the hidden state from how it was previously produced.\n # If we didn't, the model would try backpropagating all the way to start of the dataset.\n hidden[s_id] = repackage_hidden(hidden[s_id])\n hidden_valid[s_id] = repackage_hidden(hidden_valid[s_id])\n\n hidden_valid[s_id], grad_norm = architect.step(\n hidden[s_id], cur_data, cur_targets,\n hidden_valid[s_id], cur_data_valid, cur_targets_valid,\n optimizer,\n args.unrolled)\n\n # assuming small_batch_size = batch_size so we don't accumulate gradients\n optimizer.zero_grad()\n hidden[s_id] = repackage_hidden(hidden[s_id])\n\n log_prob, hidden[s_id], rnn_hs, dropped_rnn_hs = parallel_model(cur_data, hidden[s_id], return_h=True)\n raw_loss = nn.functional.nll_loss(log_prob.view(-1, log_prob.size(2)), cur_targets)\n\n loss = raw_loss\n # Activiation Regularization\n if args.alpha > 0:\n loss = loss + sum(args.alpha * dropped_rnn_h.pow(2).mean() for dropped_rnn_h in dropped_rnn_hs[-1:])\n # Temporal Activation Regularization (slowness)\n loss = loss + sum(args.beta * (rnn_h[1:] - rnn_h[:-1]).pow(2).mean() for rnn_h in rnn_hs[-1:])\n loss *= args.small_batch_size / args.batch_size\n total_loss += raw_loss.data * args.small_batch_size / args.batch_size\n loss.backward()\n\n s_id += 1\n start = end\n end = start + args.small_batch_size\n\n gc.collect()\n\n # `clip_grad_norm` helps prevent the exploding gradient problem in RNNs.\n torch.nn.utils.clip_grad_norm(model.parameters(), args.clip)\n optimizer.step()\n\n # total_loss += raw_loss.data\n optimizer.param_groups[0]['lr'] = lr2\n if batch % args.log_interval == 0 and batch > 0:\n logging.info(parallel_model.genotype())\n print(F.softmax(parallel_model.weights, dim=-1))\n cur_loss = total_loss[0] / args.log_interval\n elapsed = time.time() - start_time\n logging.info('| epoch {:3d} | {:5d}/{:5d} batches | lr {:02.2f} | ms/batch {:5.2f} | '\n 'loss {:5.2f} | ppl {:8.2f}'.format(\n epoch, batch, len(train_data) // args.bptt, optimizer.param_groups[0]['lr'],\n elapsed * 1000 / args.log_interval, cur_loss, math.exp(cur_loss)))\n total_loss = 0\n start_time = time.time()\n batch += 1\n i += seq_len\n\n# Loop over epochs.\nlr = args.lr\nbest_val_loss = []\nstored_loss = 100000000\n\nif args.continue_train:\n optimizer_state = torch.load(os.path.join(args.save, 'optimizer.pt'))\n if 't0' in optimizer_state['param_groups'][0]:\n optimizer = torch.optim.ASGD(model.parameters(), lr=args.lr, t0=0, lambd=0., weight_decay=args.wdecay)\n else:\n optimizer = torch.optim.SGD(model.parameters(), lr=args.lr, weight_decay=args.wdecay)\n optimizer.load_state_dict(optimizer_state)\nelse:\n optimizer = torch.optim.SGD(model.parameters(), lr=args.lr, weight_decay=args.wdecay)\n\nfor epoch in range(1, args.epochs+1):\n epoch_start_time = time.time()\n train()\n\n val_loss = evaluate(val_data, eval_batch_size)\n logging.info('-' * 89)\n logging.info('| end of epoch {:3d} | time: {:5.2f}s | valid loss {:5.2f} | '\n 'valid ppl {:8.2f}'.format(epoch, (time.time() - epoch_start_time),\n val_loss, math.exp(val_loss)))\n logging.info('-' * 89)\n\n if val_loss < stored_loss:\n save_checkpoint(model, optimizer, epoch, args.save)\n logging.info('Saving Normal!')\n stored_loss = val_loss\n\n best_val_loss.append(val_loss)\n"
] |
[
[
"torch.nn.functional.softmax",
"numpy.random.random",
"torch.cuda.set_device",
"numpy.random.seed",
"torch.manual_seed",
"torch.cuda.is_available",
"torch.cuda.manual_seed_all",
"torch.nn.DataParallel"
]
] |
qiuqiangkong/dcase2019_task2
|
[
"62575c8cdd4723cfdf497b290b6dddcce316c60b"
] |
[
"utils/data_generator.py"
] |
[
"import numpy as np\nimport h5py\nimport csv\nimport time\nimport logging\nimport os\nimport glob\nimport matplotlib.pyplot as plt\nimport logging\nimport pandas as pd\n\nfrom utilities import scale\nimport config\n\n\nclass Base(object):\n def __init__(self):\n '''Base class for train, validate and test data generator. \n '''\n pass\n \n def load_hdf5(self, hdf5_path, cross_validation_path):\n '''Load hdf5 file. \n \n Args:\n hdf5_path: string, path of hdf5 file\n cross_validation_path, string | 'none', path of cross validation csv \n file\n \n Returns:\n data_dict: {'audio_name': (audios_num,), \n 'feature': (dataset_total_frames, mel_bins), \n 'begin_index': (audios_num,), \n 'end_index': (audios_num,), \n (if exist) 'target': (audios_num, classes_num), \n (if exist) 'fold': (audios_num,)}\n '''\n \n data_dict = {}\n \n with h5py.File(hdf5_path, 'r') as hf:\n data_dict['audio_name'] = np.array(\n [audio_name.decode() for audio_name in hf['audio_name'][:]])\n\n data_dict['feature'] = hf['feature'][:].astype(np.float32)\n data_dict['begin_index'] = hf['begin_index'][:].astype(np.int32)\n data_dict['end_index'] = hf['end_index'][:].astype(np.int32)\n \n if 'target' in hf.keys():\n data_dict['target'] = hf['target'][:].astype(np.float32)\n \n if cross_validation_path:\n df = pd.read_csv(cross_validation_path, sep=',') \n folds = []\n \n for n, audio_name in enumerate(data_dict['audio_name']):\n index = df.index[df['fname'] == audio_name][0]\n folds.append(df['fold'][index])\n \n data_dict['fold'] = np.array(folds)\n\n return data_dict\n \n def get_segment_metadata_dict(self, data_dict, audio_indexes, \n segment_frames, hop_frames, source):\n '''Get segments metadata for training or inference. Long audio \n recordings are split to segments with the same duration. Each segment \n inherit the label of the audio recording. \n \n Args:\n data_dict: {'audio_name': (audios_num,), \n 'feature': (dataset_total_frames, mel_bins), \n 'begin_index': (audios_num,), \n 'end_index': (audios_num,), \n (if exist) 'target': (audios_num, classes_num), \n (if exist) 'fold': (audios_num,)}\n audio_indexes: (audios_num,)\n segment_frames: int, frames number of a segment\n hop_frames: int, hop frames between segments\n source: 'curated' | 'noisy' | None\n \n Returns:\n segment_metadata_dict: {'audio_name': (segments_num,), \n 'begin_index': (segments_num,), \n 'end_index': (segments_num,), \n (if exist) 'target': (segments_num, classes_num), \n (if exist) 'source': (segments_num)}\n '''\n \n segment_metadata_dict = {'audio_name': [], 'begin_index': [], \n 'end_index': []}\n \n has_target = 'target' in data_dict.keys()\n\n if has_target:\n segment_metadata_dict['target'] = []\n \n if source:\n segment_metadata_dict['source'] = []\n \n for audio_index in audio_indexes:\n audio_name = data_dict['audio_name'][audio_index]\n begin_index = data_dict['begin_index'][audio_index]\n end_index = data_dict['end_index'][audio_index]\n \n if has_target:\n target = data_dict['target'][audio_index]\n else:\n target = None\n \n # If audio recording shorter than a segment\n if end_index - begin_index < segment_frames:\n segment_metadata_dict['begin_index'].append(begin_index)\n segment_metadata_dict['end_index'].append(end_index)\n \n self._append_to_meta_data(segment_metadata_dict, audio_name, \n target, source)\n \n # If audio recording longer than a segment then split\n else:\n shift = 0\n while end_index - (begin_index + shift) > segment_frames:\n segment_metadata_dict['begin_index'].append(\n begin_index + shift)\n \n segment_metadata_dict['end_index'].append(\n begin_index + shift + segment_frames)\n \n self._append_to_meta_data(segment_metadata_dict, \n audio_name, target, source)\n \n shift += hop_frames\n \n # Append the last segment\n segment_metadata_dict['begin_index'].append(\n end_index - segment_frames)\n \n segment_metadata_dict['end_index'].append(end_index)\n \n self._append_to_meta_data(segment_metadata_dict, audio_name, \n target, source)\n \n for key in segment_metadata_dict.keys():\n segment_metadata_dict[key] = np.array(segment_metadata_dict[key])\n \n return segment_metadata_dict\n \n \n def _append_to_meta_data(self, segment_metadata_dict, audio_name, target, \n source):\n '''Append audio_name, target, source to segment_metadata_dict. \n '''\n segment_metadata_dict['audio_name'].append(audio_name)\n \n if target is not None:\n segment_metadata_dict['target'].append(target)\n \n if source is not None:\n segment_metadata_dict['source'].append(source)\n \n def get_feature_mask(self, data_dict, begin_index, end_index, \n segment_frames, pad_type, logmel_eps):\n '''Get logmel feature and mask of one segment. \n \n Args:\n data_dict: {'audio_name': (audios_num,), \n 'feature': (dataset_total_frames, mel_bins), \n 'begin_index': (audios_num,), \n 'end_index': (audios_num,), \n (if exist) 'target': (audios_num, classes_num), \n (if exist) 'fold': (audios_num,)}\n begin_index: int, begin index of a segment\n end_index: int, end index of a segment\n segment_frames: int, frames number of a segment\n pad_type: string, 'constant' | 'repeat'\n logmel_eps: constant value to pad if pad_type == 'constant'\n '''\n \n this_segment_frames = end_index - begin_index\n \n # If segment frames of this audio is fewer than the designed segment \n # frames, then pad. \n if this_segment_frames < segment_frames:\n if pad_type == 'constant':\n this_feature = self.pad_constant(\n data_dict['feature'][begin_index : end_index], \n segment_frames, logmel_eps)\n \n elif pad_type == 'repeat':\n this_feature = self.pad_repeat(\n data_dict['feature'][begin_index : end_index], \n segment_frames)\n \n this_mask = np.zeros(segment_frames)\n this_mask[0 : this_segment_frames] = 1\n \n # If segment frames is equal to the designed segment frames, then load\n # data without padding. \n else:\n this_feature = data_dict['feature'][begin_index : end_index]\n this_mask = np.ones(self.segment_frames)\n \n return this_feature, this_mask\n \n def pad_constant(self, x, max_len, constant):\n '''Pad matrix with constant. \n \n Args:\n x: (frames, mel_bins)\n max_len: int, legnth to be padded\n constant: float, value used for padding\n '''\n pad = constant * np.ones((max_len - x.shape[0], x.shape[1]))\n padded_x = np.concatenate((x, pad), axis=0)\n \n return padded_x\n \n def pad_repeat(self, x, max_len):\n '''Repeat matrix to a legnth. \n \n Args:\n x: (frames, mel_bins)\n max_len: int, length to be padded\n '''\n repeat_num = int(max_len / x.shape[0]) + 1\n repeated_x = np.tile(x, (repeat_num, 1))\n repeated_x = repeated_x[0 : max_len]\n \n return repeated_x\n \n def transform(self, x):\n '''Transform data. \n '''\n return scale(x, self.scalar['mean'], self.scalar['std'])\n\n\nclass DataGenerator(Base):\n \n def __init__(self, curated_feature_hdf5_path, noisy_feature_hdf5_path, \n curated_cross_validation_path, noisy_cross_validation_path, train_source, \n holdout_fold, segment_seconds, hop_seconds, pad_type, scalar, batch_size, \n seed=1234):\n '''Data generator for training and validation. \n \n Args:\n curated_feature_hdf5_path: string, path of hdf5 file\n noisy_feature_hdf5_path: string, path of hdf5 file\n curated_cross_validation_path: path of cross validation csv file\n noisy_cross_validation_path: path of cross validation csv file\n train_source: 'curated' | 'noisy' | 'curated_and_noisy'\n holdout_fold: '1', '2', '3', '4' | 'none', set `none` for training \n on all data without validation\n segment_seconds: float, duration of audio recordings to be padded or split\n hop_seconds: float, hop seconds between segments\n pad_type: 'constant' | 'repeat'\n scalar: object, containing mean and std value\n batch_size: int\n seed: int\n '''\n\n self.scalar = scalar\n self.batch_size = batch_size\n self.random_state = np.random.RandomState(seed)\n self.segment_frames = int(segment_seconds * config.frames_per_second)\n self.hop_frames = int(hop_seconds * config.frames_per_second)\n self.pad_type = pad_type\n self.logmel_eps = config.logmel_eps\n \n # Load training data\n load_time = time.time()\n \n self.curated_data_dict = self.load_hdf5(\n curated_feature_hdf5_path, curated_cross_validation_path)\n \n self.noisy_data_dict = self.load_hdf5(\n noisy_feature_hdf5_path, noisy_cross_validation_path)\n \n # Get train and validate audio indexes\n (train_curated_audio_indexes, validate_curated_audio_indexes) = \\\n self.get_train_validate_audio_indexes(\n self.curated_data_dict, holdout_fold)\n \n (train_noisy_audio_indexes, validate_noisy_audio_indexes) = \\\n self.get_train_validate_audio_indexes(\n self.noisy_data_dict, holdout_fold)\n \n logging.info('Train curated audio num: {}'.format(\n len(train_curated_audio_indexes)))\n logging.info('Train noisy audio num: {}'.format(\n len(train_noisy_audio_indexes)))\n logging.info('Validate curated audio num: {}'.format(\n len(validate_curated_audio_indexes)))\n logging.info('Validate noisy audio num: {}'.format(\n len(validate_noisy_audio_indexes)))\n logging.info('Load data time: {:.3f} s'.format(time.time() - load_time))\n \n # Get segment metadata for training\n self.train_curated_segment_metadata_dict = \\\n self.get_segment_metadata_dict(\n self.curated_data_dict, train_curated_audio_indexes, \n self.segment_frames, self.hop_frames, 'curated')\n \n self.train_noisy_segment_metadata_dict = self.get_segment_metadata_dict(\n self.noisy_data_dict, train_noisy_audio_indexes, \n self.segment_frames, self.hop_frames, 'noisy')\n\n if train_source == 'curated':\n self.train_segment_metadata_dict = \\\n self.train_curated_segment_metadata_dict\n \n elif train_source == 'noisy':\n self.train_segment_metadata_dict = \\\n self.train_noisy_segment_metadata_dict\n \n elif train_source == 'curated_and_noisy': \n self.train_segment_metadata_dict = \\\n self.combine_curated_noisy_metadata_dict(\n self.train_curated_segment_metadata_dict, \n self.train_noisy_segment_metadata_dict)\n \n # Get segment metadata for validation\n self.validate_curated_segment_metadata_dict = \\\n self.get_segment_metadata_dict(\n self.curated_data_dict, validate_curated_audio_indexes, \n self.segment_frames, self.hop_frames, 'curated')\n \n self.validate_noisy_segment_metadata_dict = \\\n self.get_segment_metadata_dict(\n self.noisy_data_dict, validate_noisy_audio_indexes, \n self.segment_frames, self.hop_frames, 'noisy')\n \n # Print data statistics\n train_segments_num = len(self.train_segment_metadata_dict['audio_name'])\n \n validate_curated_segments_num = len(\n self.validate_curated_segment_metadata_dict['audio_name'])\n \n validate_noisy_segments_num = len(\n self.validate_noisy_segment_metadata_dict['audio_name'])\n \n logging.info('')\n logging.info('Total train segments num: {}'.format(train_segments_num))\n \n logging.info('Validate curated segments num: {}'.format(\n validate_curated_segments_num))\n \n logging.info('Validate noisy segments num: {}'.format(\n validate_noisy_segments_num))\n \n self.train_segments_indexes = np.arange(train_segments_num)\n self.random_state.shuffle(self.train_segments_indexes)\n self.pointer = 0\n \n def get_train_validate_audio_indexes(self, data_dict, holdout_fold): \n '''Get train and validate audio indexes. \n \n Args:\n data_dict: {'audio_name': (audios_num,), \n 'feature': (dataset_total_frames, mel_bins), \n 'target': (audios_num, classes_num), \n 'begin_index': (audios_num,), \n 'end_index': (audios_num,), \n (if exist) 'fold': (audios_num,)}\n holdout_fold: 'none' | int, if 'none' then validate indexes are empty\n \n Returns:\n train_audio_indexes: (train_audios_num,)\n validate_audio_indexes: (validate_audios_num)\n '''\n \n if holdout_fold == 'none':\n train_audio_indexes = np.arange(len(data_dict['audio_name']))\n validate_audio_indexes = np.array([])\n \n else:\n train_audio_indexes = np.where(\n data_dict['fold'] != int(holdout_fold))[0]\n \n validate_audio_indexes = np.where(\n data_dict['fold'] == int(holdout_fold))[0]\n \n return train_audio_indexes, validate_audio_indexes\n\n def combine_curated_noisy_metadata_dict(self, curated_metadata_dict, \n noisy_metadata_dict):\n '''Combine curated and noisy segment metadata dict. \n '''\n \n combined_metadata_dict = {}\n \n for key in curated_metadata_dict.keys():\n combined_metadata_dict[key] = np.concatenate(\n (curated_metadata_dict[key], noisy_metadata_dict[key]), axis=0)\n \n return combined_metadata_dict\n \n def generate_train(self):\n '''Generate mini-batch data for training. \n \n Returns:\n batch_data_dict: {'audio_name': (batch_size,), \n 'feature': (batch_size, segment_frames, mel_bins), \n 'mask': (batch_size, segment_frames), \n 'target': (batch_size, classes_num), \n 'source': (batch_size,)}\n '''\n \n while True:\n # Reset pointer\n if self.pointer >= len(self.train_segments_indexes):\n self.pointer = 0\n self.random_state.shuffle(self.train_segments_indexes)\n\n # Get batch segment indexes\n batch_segment_indexes = self.train_segments_indexes[\n self.pointer: self.pointer + self.batch_size]\n \n self.pointer += self.batch_size\n\n # Batch segment data\n batch_audio_name = self.train_segment_metadata_dict\\\n ['audio_name'][batch_segment_indexes]\n \n batch_begin_index = self.train_segment_metadata_dict\\\n ['begin_index'][batch_segment_indexes]\n \n batch_end_index = self.train_segment_metadata_dict\\\n ['end_index'][batch_segment_indexes]\n \n batch_target = self.train_segment_metadata_dict\\\n ['target'][batch_segment_indexes]\n \n batch_source = self.train_segment_metadata_dict\\\n ['source'][batch_segment_indexes]\n \n batch_feature = []\n batch_mask = []\n \n # Get logmel segments one by one, pad the short segments\n for n in range(len(batch_segment_indexes)):\n if batch_source[n] == 'curated':\n data_dict = self.curated_data_dict\n elif batch_source[n] == 'noisy':\n data_dict = self.noisy_data_dict\n else:\n raise Exception('Incorrect source type!')\n \n (this_feature, this_mask) = self.get_feature_mask(\n data_dict, batch_begin_index[n], batch_end_index[n], \n self.segment_frames, self.pad_type, self.logmel_eps)\n \n batch_feature.append(this_feature)\n batch_mask.append(this_mask)\n \n batch_feature = np.array(batch_feature)\n batch_feature = self.transform(batch_feature)\n \n batch_mask = np.array(batch_mask) \n \n batch_data_dict = {\n 'audio_name': batch_audio_name, \n 'feature': batch_feature, \n 'mask': batch_mask, \n 'target': batch_target, \n 'source': batch_source}\n \n yield batch_data_dict\n \n def generate_validate(self, data_type, target_source, max_iteration=None):\n '''Generate mini-batch data for validation. \n \n Returns:\n batch_data_dict: {'audio_name': (batch_size,), \n 'feature': (batch_size, segment_frames, mel_bins), \n 'mask': (batch_size, segment_frames), \n 'target': (batch_size, classes_num)}\n '''\n\n assert(data_type in ['train', 'validate'])\n assert(target_source in ['curated', 'noisy'])\n \n segment_metadata_dict = eval(\n 'self.{}_{}_segment_metadata_dict'.format(data_type, target_source))\n \n data_dict = eval('self.{}_data_dict'.format(target_source))\n \n segments_num = len(segment_metadata_dict['audio_name'])\n segment_indexes = np.arange(segments_num)\n \n iteration = 0\n pointer = 0\n \n while True:\n if iteration == max_iteration:\n break\n\n # Reset pointer\n if pointer >= segments_num:\n break\n\n # Get batch segment indexes\n batch_segment_indexes = segment_indexes[\n pointer: pointer + self.batch_size] \n \n pointer += self.batch_size\n iteration += 1\n \n # Batch segment data\n batch_audio_name = segment_metadata_dict\\\n ['audio_name'][batch_segment_indexes]\n \n batch_begin_index = segment_metadata_dict\\\n ['begin_index'][batch_segment_indexes]\n \n batch_end_index = segment_metadata_dict\\\n ['end_index'][batch_segment_indexes]\n \n batch_target = segment_metadata_dict\\\n ['target'][batch_segment_indexes]\n \n batch_feature = []\n batch_mask = []\n\n # Get logmel segments one by one, pad the short segments\n for n in range(len(batch_segment_indexes)):\n (this_feature, this_mask) = self.get_feature_mask(\n data_dict, batch_begin_index[n], batch_end_index[n], \n self.segment_frames, self.pad_type, self.logmel_eps)\n \n batch_feature.append(this_feature)\n batch_mask.append(this_mask)\n \n batch_feature = np.array(batch_feature)\n batch_feature = self.transform(batch_feature)\n \n batch_mask = np.array(batch_mask)\n \n batch_data_dict = {\n 'audio_name': batch_audio_name, \n 'feature': batch_feature, \n 'mask': batch_mask, \n 'target': batch_target}\n\n yield batch_data_dict\n \n\nclass TestDataGenerator(Base):\n def __init__(self, test_feature_hdf5_path, segment_seconds, hop_seconds, \n pad_type, scalar, batch_size, seed=1234):\n '''Data generator for testing. \n \n Args:\n test_feature_hdf5_path: string, path of hdf5 file\n segment_seconds: float, duration of audio recordings to be padded or split\n hop_seconds: float, hop seconds between segments\n pad_type: 'constant' | 'repeat'\n scalar: object, containing mean and std value\n batch_size: int\n seed: int\n '''\n \n self.scalar = scalar\n self.batch_size = batch_size\n self.random_state = np.random.RandomState(seed)\n self.segment_frames = int(segment_seconds * config.frames_per_second)\n self.hop_frames = int(hop_seconds * config.frames_per_second)\n self.pad_type = pad_type\n self.logmel_eps = config.logmel_eps\n \n # Load testing data\n self.test_data_dict = self.load_hdf5(\n test_feature_hdf5_path, cross_validation_path=None)\n \n audios_num = len(self.test_data_dict['audio_name'])\n test_audio_indexes = np.arange(audios_num)\n \n self.test_segment_metadata_dict = \\\n self.get_segment_metadata_dict(\n self.test_data_dict, test_audio_indexes, self.segment_frames, \n self.hop_frames, source=None)\n \n def generate_test(self):\n '''Generate mini-batch data for test. \n \n Returns:\n batch_data_dict: {'audio_name': (batch_size,), \n 'feature': (batch_size, segment_frames, mel_bins), \n 'mask': (batch_size, segment_frames)}\n '''\n \n segment_metadata_dict = self.test_segment_metadata_dict\n data_dict = self.test_data_dict\n \n segments_num = len(segment_metadata_dict['audio_name'])\n segment_indexes = np.arange(segments_num)\n \n iteration = 0\n pointer = 0\n \n while True:\n # Reset pointer\n if pointer >= segments_num:\n break\n\n # Get batch segment indexes\n batch_segment_indexes = segment_indexes[\n pointer: pointer + self.batch_size]\n \n pointer += self.batch_size\n iteration += 1\n \n # Batch segment data\n batch_audio_name = segment_metadata_dict\\\n ['audio_name'][batch_segment_indexes]\n \n batch_begin_index = segment_metadata_dict\\\n ['begin_index'][batch_segment_indexes]\n \n batch_end_index = segment_metadata_dict\\\n ['end_index'][batch_segment_indexes]\n\n batch_feature = []\n batch_mask = []\n\n # Get logmel segments one by one, pad the short segments\n for n in range(len(batch_segment_indexes)):\n (this_feature, this_mask) = self.get_feature_mask(\n data_dict, batch_begin_index[n], batch_end_index[n], \n self.segment_frames, self.pad_type, self.logmel_eps)\n \n batch_feature.append(this_feature)\n batch_mask.append(this_mask)\n \n batch_feature = np.array(batch_feature)\n batch_feature = self.transform(batch_feature)\n \n batch_mask = np.array(batch_mask) \n \n batch_data_dict = {\n 'audio_name': batch_audio_name, \n 'feature': batch_feature, \n 'mask': batch_mask} \n\n yield batch_data_dict"
] |
[
[
"pandas.read_csv",
"numpy.arange",
"numpy.tile",
"numpy.ones",
"numpy.concatenate",
"numpy.array",
"numpy.random.RandomState",
"numpy.zeros"
]
] |
dizcza/cdtw-python
|
[
"a83fffd6fc222a1691f07421fd4dbf46dc19e0aa"
] |
[
"tests/test_cdtw.py"
] |
[
"import unittest\nimport math\n\nimport numpy as np\nfrom cdtw.dtw import *\nfrom numpy.testing import assert_array_equal, assert_array_almost_equal\n\ntry:\n import dtaidistance\n DTAIDISTANCE_INSTALLED = True\nexcept ImportError:\n DTAIDISTANCE_INSTALLED = False\n\n\nclass TestCDTW(unittest.TestCase):\n\n def test_empty(self):\n self.assertRaises(ValueError, dtw_mat, [], [1.0, 2.0])\n self.assertRaises(ValueError, dtw_dist, [], [1.0, 2.0])\n\n def test_one_point(self):\n self.assertEqual(dtw_dist([1.0], [5.0]), 4.0)\n cost_mat = dtw_mat([1.0], [5.0])\n assert_array_equal(cost_mat, [[4.0]])\n assert_array_equal(dtw_path(cost_mat), [(0, 0)])\n\n def test_simple(self):\n x = [1, 2, 3, 4, 5]\n y = [2, 3, 4]\n\n cost_mat_expected = np.sqrt([\n [1, 5, 14],\n [1, 2, 6],\n [2, 1, 2],\n [6, 2, 1],\n [15, 6, 2]\n ])\n path_expected = [(0, 0), (1, 0), (2, 1), (3, 2), (4, 2)]\n\n cost_mat = dtw_mat(x, y)\n self.assertAlmostEqual(dtw_dist(x, y), math.sqrt(2.0), places=6)\n assert_array_almost_equal(cost_mat, cost_mat_expected)\n assert_array_equal(dtw_path(cost_mat), path_expected)\n\n def test_order_does_not_matter(self):\n np.random.seed(0)\n x = np.random.randn(100)\n y = np.random.randn(300)\n assert_array_almost_equal(dtw_mat(x, y), dtw_mat(y, x).T)\n self.assertAlmostEqual(dtw_dist(x, y), dtw_dist(y, x))\n\n def test_dtw_distance_path(self):\n np.random.seed(0)\n x = np.random.randn(10)\n y = np.random.randn(30)\n cost_mat = dtw_mat(x, y)\n self.assertAlmostEqual(cost_mat[-1, -1], dtw_dist(x, y), places=6)\n path = dtw_path(cost_mat)\n assert_array_equal(path[0], (0, 0))\n assert_array_equal(path[-1], (len(x) - 1, len(y) - 1))\n\n @unittest.skipUnless(DTAIDISTANCE_INSTALLED, \"dtaidistance not installed\")\n def test_dtaidistance(self):\n np.random.seed(0)\n x = np.random.randn(100).astype(np.float32)\n y = np.random.randn(30).astype(np.float32)\n self.assertAlmostEqual(dtw_dist(x, y),\n dtaidistance.dtw.distance(x, y),\n places=6)\n _, cost_mat_expected = dtaidistance.dtw.warping_paths(x, y)\n cost_mat = dtw_mat(x, y)\n assert_array_almost_equal(cost_mat, cost_mat_expected[1:, 1:],\n decimal=5)\n path_expected = dtaidistance.dtw.best_path(cost_mat_expected)\n assert_array_equal(dtw_path(cost_mat), path_expected)\n\n\nif __name__ == '__main__':\n unittest.main()\n"
] |
[
[
"numpy.sqrt",
"numpy.random.seed",
"numpy.testing.assert_array_equal",
"numpy.random.randn",
"numpy.testing.assert_array_almost_equal"
]
] |
huajianjiu/ANSMESC
|
[
"76323a46f638c717e23388cf529734081a70eeee"
] |
[
"attention.py"
] |
[
"# author - Richard Liao\n# Dec 26 2016\n# Attention GRU network\n\nfrom keras import backend as K\nfrom keras.engine.topology import Layer\nfrom keras import initializers, regularizers, constraints\n\n\nclass AttentionWithContext(Layer):\n \"\"\"\n Attention operation, with a context/query vector, for temporal data.\n Supports Masking.\n Follows the work of Yang et al. [https://www.cs.cmu.edu/~diyiy/docs/naacl16.pdf]\n \"Hierarchical Attention Networks for Document Classification\"\n by using a context vector to assist the attention\n # Input shape\n 3D tensor with shape: `(samples, steps, features)`.\n # Output shape\n 2D tensor with shape: `(samples, features)`.\n :param kwargs:\n Just put it on top of an RNN Layer (GRU/LSTM/SimpleRNN) with return_sequences=True.\n The dimensions are inferred based on the output shape of the RNN.\n Example:\n model.add(LSTM(64, return_sequences=True))\n model.add(AttentionWithContext())\n \"\"\"\n\n def __init__(self,\n W_regularizer=None, u_regularizer=None, b_regularizer=None,\n W_constraint=None, u_constraint=None, b_constraint=None,\n bias=True, **kwargs):\n\n self.supports_masking = True\n self.init = initializers.get('glorot_uniform')\n\n self.W_regularizer = regularizers.get(W_regularizer)\n self.u_regularizer = regularizers.get(u_regularizer)\n self.b_regularizer = regularizers.get(b_regularizer)\n\n self.W_constraint = constraints.get(W_constraint)\n self.u_constraint = constraints.get(u_constraint)\n self.b_constraint = constraints.get(b_constraint)\n\n self.bias = bias\n super(AttentionWithContext, self).__init__(**kwargs)\n\n def build(self, input_shape):\n assert len(input_shape) == 3\n\n self.W = self.add_weight(shape=(input_shape[-1], input_shape[-1],),\n initializer=self.init,\n name='{}_W'.format(self.name),\n regularizer=self.W_regularizer,\n constraint=self.W_constraint)\n if self.bias:\n self.b = self.add_weight(shape=(input_shape[-1],),\n initializer='zero',\n name='{}_b'.format(self.name),\n regularizer=self.b_regularizer,\n constraint=self.b_constraint)\n\n self.u = self.add_weight(shape=(input_shape[-1],),\n initializer=self.init,\n name='{}_u'.format(self.name),\n regularizer=self.u_regularizer,\n constraint=self.u_constraint)\n\n super(AttentionWithContext, self).build(input_shape)\n\n def compute_mask(self, input, input_mask=None):\n # do not pass the mask to the next layers\n return None\n\n def call(self, x, mask=None):\n uit = K.dot(x, self.W)\n\n if self.bias:\n uit += self.b\n\n uit = K.tanh(uit)\n # ait = K.dot(uit, self.u) # replace this\n mul_a = uit * self.u # with this\n ait = K.sum(mul_a, axis=2) # and this\n\n a = K.exp(ait)\n\n # apply mask after the exp. will be re-normalized next\n if mask is not None:\n # Cast the mask to floatX to avoid float64 upcasting in theano\n a *= K.cast(mask, K.floatx())\n\n # in some cases especially in the early stages of training the sum may be almost zero\n # and this results in NaN's. A workaround is to add a very small positive number ε to the sum.\n # a /= K.cast(K.sum(a, axis=1, keepdims=True), K.floatx())\n a /= K.cast(K.sum(a, axis=1, keepdims=True) + K.epsilon(), K.floatx())\n\n a = K.expand_dims(a)\n weighted_input = x * a\n return K.sum(weighted_input, axis=1)\n\n def compute_output_shape(self, input_shape):\n return input_shape[0], input_shape[-1]\n\n\nif __name__ == \"__main__\":\n from keras.models import Model\n from keras.layers import Input, Embedding, Bidirectional, TimeDistributed, GRU, Dense\n import numpy as np\n\n input_array = np.random.randint(25, size=(15, 100))\n embedding_layer = Embedding(25 + 1,\n 100,\n input_length=100,\n trainable=True)\n sentence_input = Input(shape=(100,), dtype='int32')\n embedded_sequences = embedding_layer(sentence_input)\n l_lstm = Bidirectional(GRU(100, return_sequences=True))(embedded_sequences)\n l_dense = TimeDistributed(Dense(200))(l_lstm)\n l_att = AttentionWithContext()(l_dense)\n model = Model(sentence_input, l_att)\n # model = Model(sentence_input, l_dense)\n model.compile('rmsprop', 'mse')\n output_array = model.predict(input_array)\n print(output_array.shape)\n"
] |
[
[
"numpy.random.randint"
]
] |
AlexandrovLab/SigProfilerTopography
|
[
"34c7cf24392bc77953370038a520ffc8d0bdee50"
] |
[
"SigProfilerTopography/source/plotting/TranscriptionReplicationStrandBiasFigures.py"
] |
[
"# This source code file is a part of SigProfilerTopography\n# SigProfilerTopography is a tool included as part of the SigProfiler\n# computational framework for comprehensive analysis of mutational\n# signatures from next-generation sequencing of cancer genomes.\n# SigProfilerTopography provides the downstream data analysis of\n# mutations and extracted mutational signatures w.r.t.\n# nucleosome occupancy, replication time, strand bias and processivity.\n# Copyright (C) 2018-2020 Burcak Otlu\n\nimport os\nimport numpy as np\nimport statsmodels.stats.multitest\n\n# import matplotlib\n# BACKEND = 'Agg'\n# if matplotlib.get_backend().lower() != BACKEND.lower():\n# # If backend is not set properly a call to describe will hang\n# matplotlib.use(BACKEND)\n\nfrom matplotlib import pyplot as plt\nfrom matplotlib.lines import Line2D\nfrom matplotlib import gridspec\n\n\nimport pandas as pd\n\nfrom SigProfilerTopography.source.commons.TopographyCommons import natural_key\nfrom SigProfilerTopography.source.commons.TopographyCommons import TRANSCRIBED_STRAND\nfrom SigProfilerTopography.source.commons.TopographyCommons import UNTRANSCRIBED_STRAND\nfrom SigProfilerTopography.source.commons.TopographyCommons import LAGGING\nfrom SigProfilerTopography.source.commons.TopographyCommons import LEADING\n\nfrom SigProfilerTopography.source.commons.TopographyCommons import six_mutation_types\nfrom SigProfilerTopography.source.commons.TopographyCommons import STRANDBIAS\nfrom SigProfilerTopography.source.commons.TopographyCommons import DATA\nfrom SigProfilerTopography.source.commons.TopographyCommons import FIGURE\nfrom SigProfilerTopography.source.commons.TopographyCommons import SCATTER_PLOTS\nfrom SigProfilerTopography.source.commons.TopographyCommons import BAR_PLOTS\nfrom SigProfilerTopography.source.commons.TopographyCommons import CIRCLE_PLOTS\nfrom SigProfilerTopography.source.commons.TopographyCommons import CIRCLE_BAR_PLOTS\nfrom SigProfilerTopography.source.commons.TopographyCommons import SAMPLES\nfrom SigProfilerTopography.source.commons.TopographyCommons import TABLES\n\nfrom SigProfilerTopography.source.commons.TopographyCommons import SUBS_STRAND_BIAS_NUMBER_OF_MUTATIONS_THRESHOLD\n\nfrom SigProfilerTopography.source.commons.TopographyCommons import TRANSCRIPTIONSTRANDBIAS\nfrom SigProfilerTopography.source.commons.TopographyCommons import REPLICATIONSTRANDBIAS\n\nfrom SigProfilerTopography.source.commons.TopographyCommons import LAGGING_VERSUS_LEADING\nfrom SigProfilerTopography.source.commons.TopographyCommons import TRANSCRIBED_VERSUS_UNTRANSCRIBED\nfrom SigProfilerTopography.source.commons.TopographyCommons import GENIC_VERSUS_INTERGENIC\n\nfrom SigProfilerTopography.source.commons.TopographyCommons import LAGGING_VERSUS_LEADING_P_VALUE\nfrom SigProfilerTopography.source.commons.TopographyCommons import TRANSCRIBED_VERSUS_UNTRANSCRIBED_P_VALUE\nfrom SigProfilerTopography.source.commons.TopographyCommons import GENIC_VERSUS_INTERGENIC_P_VALUE\n\nfrom SigProfilerTopography.source.commons.TopographyCommons import LAGGING_VERSUS_LEADING_Q_VALUE\nfrom SigProfilerTopography.source.commons.TopographyCommons import TRANSCRIBED_VERSUS_UNTRANSCRIBED_Q_VALUE\nfrom SigProfilerTopography.source.commons.TopographyCommons import GENIC_VERSUS_INTERGENIC_Q_VALUE\n\nfrom SigProfilerTopography.source.commons.TopographyCommons import TRANSCRIBED_REAL_COUNT\nfrom SigProfilerTopography.source.commons.TopographyCommons import UNTRANSCRIBED_REAL_COUNT\n\nfrom SigProfilerTopography.source.commons.TopographyCommons import GENIC_REAL_COUNT\nfrom SigProfilerTopography.source.commons.TopographyCommons import INTERGENIC_REAL_COUNT\n\nfrom SigProfilerTopography.source.commons.TopographyCommons import LAGGING_REAL_COUNT\nfrom SigProfilerTopography.source.commons.TopographyCommons import LEADING_REAL_COUNT\n\nfrom SigProfilerTopography.source.commons.TopographyCommons import TRANSCRIBED_SIMULATIONS_MEAN_COUNT\nfrom SigProfilerTopography.source.commons.TopographyCommons import UNTRANSCRIBED_SIMULATIONS_MEAN_COUNT\n\nfrom SigProfilerTopography.source.commons.TopographyCommons import GENIC_SIMULATIONS_MEAN_COUNT\nfrom SigProfilerTopography.source.commons.TopographyCommons import INTERGENIC_SIMULATIONS_MEAN_COUNT\n\nfrom SigProfilerTopography.source.commons.TopographyCommons import LAGGING_SIMULATIONS_MEAN_COUNT\nfrom SigProfilerTopography.source.commons.TopographyCommons import LEADING_SIMULATIONS_MEAN_COUNT\n\nfrom SigProfilerTopography.source.commons.TopographyCommons import GENIC\nfrom SigProfilerTopography.source.commons.TopographyCommons import INTERGENIC\n\nfrom SigProfilerTopography.source.commons.TopographyCommons import percentage_numbers\nfrom SigProfilerTopography.source.commons.TopographyCommons import percentage_strings\n\nfrom SigProfilerTopography.source.commons.TopographyCommons import AT_LEAST_10_PERCENT_DIFF\nfrom SigProfilerTopography.source.commons.TopographyCommons import AT_LEAST_20_PERCENT_DIFF\nfrom SigProfilerTopography.source.commons.TopographyCommons import AT_LEAST_30_PERCENT_DIFF\nfrom SigProfilerTopography.source.commons.TopographyCommons import AT_LEAST_50_PERCENT_DIFF\nfrom SigProfilerTopography.source.commons.TopographyCommons import AT_LEAST_75_PERCENT_DIFF\nfrom SigProfilerTopography.source.commons.TopographyCommons import AT_LEAST_100_PERCENT_DIFF\n\nfrom SigProfilerTopography.source.commons.TopographyCommons import ID\nfrom SigProfilerTopography.source.commons.TopographyCommons import DBS\nfrom SigProfilerTopography.source.commons.TopographyCommons import SBS_CONTEXTS\n\nfrom SigProfilerTopography.source.commons.TopographyCommons import PLOTTING_FOR_SIGPROFILERTOPOGRAPHY_TOOL\nfrom SigProfilerTopography.source.commons.TopographyCommons import PLOTTING_FOR_SIGPROFILERTOPOGRAPHY_MANUSCRIPT\n\nfrom SigProfilerTopography.source.commons.TopographyCommons import EXCEL_FILES\nfrom SigProfilerTopography.source.commons.TopographyCommons import write_excel_file\n\nfrom SigProfilerTopography.source.commons.TopographyCommons import NUMBER_OF_REQUIRED_MUTATIONS_FOR_STRAND_BIAS_BAR_PLOT\n\nSIGNATURE = 'signature'\nCANCER_TYPE = 'cancer_type'\nMUTATION_TYPE = 'mutation_type'\nTYPE = 'type'\nSIGNIFICANT_STRAND = 'significant_strand'\n\nSIGNIFICANCE_LEVEL = 0.05\n\nfrom SigProfilerTopography.source.commons.TopographyCommons import Table_SBS_Signature_Discreet_Mode_Cutoff_NumberofMutations_AverageProbability_Filename\nfrom SigProfilerTopography.source.commons.TopographyCommons import Table_DBS_Signature_Discreet_Mode_Cutoff_NumberofMutations_AverageProbability_Filename\nfrom SigProfilerTopography.source.commons.TopographyCommons import Table_ID_Signature_Discreet_Mode_Cutoff_NumberofMutations_AverageProbability_Filename\n\nfrom SigProfilerTopography.source.commons.TopographyCommons import Table_SBS_Signature_Probability_Mode_NumberofMutations_AverageProbability_Filename\nfrom SigProfilerTopography.source.commons.TopographyCommons import Table_DBS_Signature_Probability_Mode_NumberofMutations_AverageProbability_Filename\nfrom SigProfilerTopography.source.commons.TopographyCommons import Table_ID_Signature_Probability_Mode_NumberofMutations_AverageProbability_Filename\n\nfrom SigProfilerTopography.source.commons.TopographyCommons import getSample2NumberofSubsDict\nfrom SigProfilerTopography.source.commons.TopographyCommons import getSample2NumberofIndelsDict\nfrom SigProfilerTopography.source.commons.TopographyCommons import Sample2NumberofDinucsDictFilename\n\nfrom SigProfilerTopography.source.commons.TopographyCommons import getSample2SubsSignature2NumberofMutationsDict\nfrom SigProfilerTopography.source.commons.TopographyCommons import getSample2IndelsSignature2NumberofMutationsDict\nfrom SigProfilerTopography.source.commons.TopographyCommons import Sample2DinucsSignature2NumberofMutationsDictFilename\n\ntranscriptionStrands = [TRANSCRIBED_STRAND, UNTRANSCRIBED_STRAND]\ngenicVersusIntergenicStrands=[GENIC, INTERGENIC]\nreplicationStrands = [LAGGING, LEADING]\n\n########################################################################\n#New way\n#For Mutation Types\ndef plot_mutation_types_transcription_log10_ratio_replication_log_10_ratio_using_dataframes(sample,numberofMutations,\n type_transcribed_versus_untranscribed_df,\n type_lagging_versus_leading_df,\n outputDir, jobname):\n\n\n fig = plt.figure(figsize=(8,8), facecolor=None)\n plt.style.use('ggplot')\n\n # build a rectangle in axes coords\n left, width = .0, 1.\n bottom, height = .0, 1.\n right = left + width\n top = bottom + height\n\n # This code makes the background white.\n # Always put these statements after plt.figure\n ax = plt.gca()\n ax.set_facecolor('white')\n for edge_i in ['bottom','top','left','right']:\n ax.spines[edge_i].set_edgecolor(\"black\")\n ax.spines[edge_i].set_linewidth(1)\n ax.spines[edge_i].set_bounds(-0.3, 0.3)\n\n plt.text(0.05, 1.02, 'Leading', ha='center', va='center', transform=ax.transAxes)\n plt.text(0.95, 1.02, 'Lagging', ha='center', va='center', transform=ax.transAxes)\n plt.text((right+0.02),(bottom+top-0.08), 'Transcribed',horizontalalignment='center',verticalalignment='center',rotation='vertical',transform=ax.transAxes)\n plt.text((right+0.02),(bottom+0.1), 'Untranscribed', horizontalalignment='center',verticalalignment='center', rotation='vertical', transform=ax.transAxes)\n\n if (sample is not None):\n plt.title(sample, fontsize=15, fontweight='bold')\n\n plt.xlabel('Lagging/leading replication strand\\nratio(log10)', fontstyle='normal', fontsize=12, fontweight='bold')\n plt.ylabel('Transcribed/untranscribed strand\\nratio(log10)',fontstyle='normal', fontsize=12, fontweight='bold')\n\n # Put some extra place by xlim if necessary\n plt.xlim(-0.3, 0.3)\n plt.ylim(-0.3, 0.3)\n\n plt.tick_params(axis='y', which='major', labelsize=10, width=1, length=10)\n plt.tick_params(axis='y', which='minor', labelsize=10, width=1, length=10)\n plt.tick_params(axis='x', which='major', labelsize=10, width=1, length=10)\n plt.tick_params(axis='x', which='minor', labelsize=10, width=1, length=10)\n\n yticks = [-0.2, -0.1, 0.0, 0.1, 0.2]\n yticklabels = ['-0.2', '-0.1', '0.0', '0.1', '0.2']\n plt.yticks(yticks, yticklabels)\n\n xticks = [-0.2, -0.1, 0.0, 0.1, 0.2]\n xticklabels = ['-0.2', '-0.1', '0.0', '0.1', '0.2']\n plt.xticks(xticks, xticklabels)\n\n # type_transcribed_versus_untranscribed_df=type_transcribed_versus_untranscribed_df[['cancer_type', 'type',\n # 'Transcribed_real_count', 'UnTranscribed_real_count', 'Transcribed_mean_sims_count', 'UnTranscribed_mean_sims_count', 'transcribed_versus_untranscribed_p_value','transcribed_versus_untranscribed_q_value',\n # 'Transcribed_real_count.1', 'Transcribed_mean_sims_count.1', 'Transcribed_min_sims_count', 'Transcribed_max_sims_count', 'Transcribed_sims_count_list',\n # 'UnTranscribed_real_count.1', 'UnTranscribed_mean_sims_count.1', 'UnTranscribed_min_sims_count', 'UnTranscribed_max_sims_count', 'UnTranscribed_sims_count_list' ]]\n #\n # type_lagging_versus_leading_df=type_lagging_versus_leading_df[['cancer_type', 'type',\n # 'Lagging_real_count', 'Leading_real_count', 'Lagging_mean_sims_count', 'Leading_mean_sims_count', 'lagging_versus_leading_p_value', 'lagging_versus_leading_q_value',\n # 'Lagging_real_count.1', 'Lagging_mean_sims_count.1', 'Lagging_min_sims_count', 'Lagging_max_sims_count', 'Lagging_sims_count_list',\n # 'Leading_real_count.1', 'Leading_mean_sims_count.1', 'Leading_min_sims_count', 'Leading_max_sims_count', 'Leading_sims_count_list' ]]\n\n ########################################################################\n transcriptionRatiosDict = {}\n replicationRatiosDict = {}\n for mutationType in six_mutation_types:\n\n ##################################################################\n transcribed_real_count=0\n untranscribed_real_count=0\n\n if (type_transcribed_versus_untranscribed_df[type_transcribed_versus_untranscribed_df['type']==mutationType]['Transcribed_real_count'].values.size>0):\n transcribed_real_count= type_transcribed_versus_untranscribed_df[type_transcribed_versus_untranscribed_df['type'] == mutationType]['Transcribed_real_count'].values[0]\n\n if (type_transcribed_versus_untranscribed_df[type_transcribed_versus_untranscribed_df['type']==mutationType]['UnTranscribed_real_count'].values.size>0):\n untranscribed_real_count= type_transcribed_versus_untranscribed_df[type_transcribed_versus_untranscribed_df['type'] == mutationType]['UnTranscribed_real_count'].values[0]\n\n if (transcribed_real_count>0 and untranscribed_real_count>0):\n transcriptionRatiosDict[mutationType] = np.log10(transcribed_real_count/untranscribed_real_count)\n ##################################################################\n\n ##################################################################\n lagging_real_count = 0\n leading_real_count = 0\n\n if (type_lagging_versus_leading_df[type_lagging_versus_leading_df['type'] == mutationType]['Lagging_real_count'].values.size > 0):\n lagging_real_count = type_lagging_versus_leading_df[type_lagging_versus_leading_df['type'] == mutationType]['Lagging_real_count'].values[0]\n\n if (type_lagging_versus_leading_df[type_lagging_versus_leading_df['type'] == mutationType]['Leading_real_count'].values.size > 0):\n leading_real_count = type_lagging_versus_leading_df[type_lagging_versus_leading_df['type'] == mutationType]['Leading_real_count'].values[0]\n\n if (lagging_real_count>0 and leading_real_count>0):\n replicationRatiosDict[mutationType] = np.log10(lagging_real_count/leading_real_count)\n ##################################################################\n\n ##################################################################\n if (mutationType in replicationRatiosDict) and (mutationType in transcriptionRatiosDict):\n plt.scatter(replicationRatiosDict[mutationType], transcriptionRatiosDict[mutationType], label=mutationType)\n ##################################################################\n\n ########################################################################\n\n legend = plt.legend(loc='upper left', frameon=True, fancybox =False,labels=six_mutation_types, bbox_to_anchor=(-0.0095, 1.0095))\n legend.get_frame().set_linewidth(1)\n\n frame = legend.get_frame()\n frame.set_facecolor('white')\n frame.set_edgecolor('black')\n\n plt.axvline(x=0.0, color='gray', linestyle='--')\n plt.axhline(y=0.0, color='gray', linestyle='--')\n\n if sample is None:\n figureName = 'all_mutation_types_%s_scatter_plot.png' %(STRANDBIAS)\n figureFile = os.path.join(outputDir, jobname, FIGURE, STRANDBIAS,SCATTER_PLOTS,figureName)\n\n else:\n figureName = 'all_mutation_types_%s_%d_%s_scatter_plot.png' %(sample,numberofMutations,STRANDBIAS)\n os.makedirs(os.path.join(outputDir, jobname, FIGURE, SAMPLES, sample, STRANDBIAS,SCATTER_PLOTS), exist_ok=True)\n figureFile = os.path.join(outputDir, jobname, FIGURE, SAMPLES, sample, STRANDBIAS, SCATTER_PLOTS, figureName)\n\n fig.savefig(figureFile)\n plt.cla()\n plt.close(fig)\n\n########################################################################\n\n########################################################################\n#Old way\n#For Mutation Types\ndef plot_ncomms11383_Supp_FigG_AllMutationTypes_TranscriptionLog10Ratio_ReplicationLog10Ratio(sample,numberofMutations,type2TranscriptionStrand2CountDict,type2ReplicationStrand2CountDict,outputDir,jobname):\n\n fig = plt.figure(figsize=(8,8), facecolor=None)\n plt.style.use('ggplot')\n\n # build a rectangle in axes coords\n left, width = .0, 1.\n bottom, height = .0, 1.\n right = left + width\n top = bottom + height\n\n # This code makes the background white.\n # Always put these statements after plt.figure\n ax = plt.gca()\n ax.set_facecolor('white')\n for edge_i in ['bottom','top','left','right']:\n ax.spines[edge_i].set_edgecolor(\"black\")\n ax.spines[edge_i].set_linewidth(1)\n ax.spines[edge_i].set_bounds(-0.3, 0.3)\n\n plt.text(0.05, 1.02, 'Leading', ha='center', va='center', transform=ax.transAxes)\n plt.text(0.95, 1.02, 'Lagging', ha='center', va='center', transform=ax.transAxes)\n plt.text((right+0.02),(bottom+top-0.08), 'Transcribed',horizontalalignment='center',verticalalignment='center',rotation='vertical',transform=ax.transAxes)\n plt.text((right+0.02),(bottom+0.1), 'Untranscribed', horizontalalignment='center',verticalalignment='center', rotation='vertical', transform=ax.transAxes)\n\n if (sample is not None):\n plt.title(sample, fontsize=15, fontweight='bold')\n\n plt.xlabel('Lagging/leading replication strand\\nratio(log10)', fontstyle='normal', fontsize=12, fontweight='bold')\n plt.ylabel('Transcribed/untranscribed strand\\nratio(log10)',fontstyle='normal', fontsize=12, fontweight='bold')\n\n # Put some extra place by xlim if necessary\n plt.xlim(-0.3, 0.3)\n plt.ylim(-0.3, 0.3)\n\n plt.tick_params(axis='y', which='major', labelsize=10, width=1, length=10)\n plt.tick_params(axis='y', which='minor', labelsize=10, width=1, length=10)\n plt.tick_params(axis='x', which='major', labelsize=10, width=1, length=10)\n plt.tick_params(axis='x', which='minor', labelsize=10, width=1, length=10)\n\n # plt.tick_params(\n # axis='y', # changes apply to the x-axis\n # which='both', # both major and minor ticks are affected\n # left='off' # ticks along the bottom edge are off\n # )\n\n yticks = [-0.2, -0.1, 0.0, 0.1, 0.2]\n yticklabels = ['-0.2', '-0.1', '0.0', '0.1', '0.2']\n plt.yticks(yticks, yticklabels)\n\n xticks = [-0.2, -0.1, 0.0, 0.1, 0.2]\n xticklabels = ['-0.2', '-0.1', '0.0', '0.1', '0.2']\n plt.xticks(xticks, xticklabels)\n\n ########################################################################\n transcriptionRatiosDict = {}\n replicationRatiosDict = {}\n for mutationType in six_mutation_types:\n if (mutationType in type2TranscriptionStrand2CountDict) and (mutationType in type2ReplicationStrand2CountDict):\n\n if ((TRANSCRIBED_STRAND in type2TranscriptionStrand2CountDict[mutationType]) and (UNTRANSCRIBED_STRAND in type2TranscriptionStrand2CountDict[mutationType])):\n transcriptionRatiosDict[mutationType]= np.log10(type2TranscriptionStrand2CountDict[mutationType][TRANSCRIBED_STRAND]/type2TranscriptionStrand2CountDict[mutationType][UNTRANSCRIBED_STRAND])\n\n if ((LAGGING in type2ReplicationStrand2CountDict[mutationType]) and (LEADING in type2ReplicationStrand2CountDict[mutationType])):\n replicationRatiosDict[mutationType] = np.log10(type2ReplicationStrand2CountDict[mutationType][LAGGING]/type2ReplicationStrand2CountDict[mutationType][LEADING])\n\n if (mutationType in replicationRatiosDict) and (mutationType in transcriptionRatiosDict):\n plt.scatter(replicationRatiosDict[mutationType],transcriptionRatiosDict[mutationType], label=mutationType)\n ########################################################################\n\n legend = plt.legend(loc='upper left', frameon=True, fancybox =False,labels=six_mutation_types, bbox_to_anchor=(-0.0095, 1.0095))\n legend.get_frame().set_linewidth(1)\n\n frame = legend.get_frame()\n frame.set_facecolor('white')\n frame.set_edgecolor('black')\n\n plt.axvline(x=0.0, color='gray', linestyle='--')\n plt.axhline(y=0.0, color='gray', linestyle='--')\n\n if sample is None:\n figureName = 'all_mutation_types_%s_scatter_plot.png' %(STRANDBIAS)\n figureFile = os.path.join(outputDir, jobname, FIGURE, STRANDBIAS,SCATTER_PLOTS,figureName)\n\n else:\n figureName = 'all_mutation_types_%s_%d_%s_scatter_plot.png' %(sample,numberofMutations,STRANDBIAS)\n os.makedirs(os.path.join(outputDir, jobname, FIGURE, SAMPLES, sample, STRANDBIAS,SCATTER_PLOTS), exist_ok=True)\n figureFile = os.path.join(outputDir, jobname, FIGURE, SAMPLES, sample, STRANDBIAS, SCATTER_PLOTS, figureName)\n\n fig.savefig(figureFile)\n plt.cla()\n plt.close(fig)\n########################################################################\n\n########################################################################\n#July 7, 2020\ndef plot_types_transcription_log10_ratio_replication_log10_ratio_using_dataframes(signatureType,\n sample,\n numberofMutations,\n type_transcribed_versus_untranscribed_df,\n type_lagging_versus_leading_df,\n signature_cutoff_numberofmutations_averageprobability_df,\n outputDir,\n jobname):\n\n fig = plt.figure(figsize=(8,8), facecolor=None)\n plt.style.use('ggplot')\n\n # build a rectangle in axes coords\n left, width = .0, 1.\n bottom, height = .0, 1.\n right = left + width\n top = bottom + height\n\n # This code makes the background white.\n # Always put these statements after plt.figure\n ax = plt.gca()\n ax.set_facecolor('white')\n for edge_i in ['bottom','top','left','right']:\n ax.spines[edge_i].set_edgecolor(\"black\")\n ax.spines[edge_i].set_linewidth(1)\n ax.spines[edge_i].set_bounds(-0.3, 0.3)\n\n plt.text(0.05, 1.02, 'Leading', ha='center', va='center', transform=ax.transAxes)\n plt.text(0.95, 1.02, 'Lagging', ha='center', va='center', transform=ax.transAxes)\n plt.text((right+0.02),(bottom+top-0.08), 'Transcribed',horizontalalignment='center',verticalalignment='center',rotation='vertical',transform=ax.transAxes)\n plt.text((right+0.02),(bottom+0.1), 'Untranscribed', horizontalalignment='center',verticalalignment='center', rotation='vertical', transform=ax.transAxes)\n\n if (sample is not None):\n plt.title(sample, fontsize=15, fontweight='bold')\n\n plt.xlabel('Lagging/leading replication strand\\nratio(log10)', fontstyle='normal', fontsize=12, fontweight='bold')\n plt.ylabel('Transcribed/untranscribed strand\\nratio(log10)', fontstyle='normal', fontsize=12, fontweight='bold')\n\n # Put some extra place by xlim if necessary\n plt.xlim(-0.3, 0.3)\n plt.ylim(-0.3, 0.3)\n\n plt.tick_params(axis='y', which='major', labelsize=10, width=1, length=10)\n plt.tick_params(axis='y', which='minor', labelsize=10, width=1, length=10)\n plt.tick_params(axis='x', which='major', labelsize=10, width=1, length=10)\n plt.tick_params(axis='x', which='minor', labelsize=10, width=1, length=10)\n\n yticks = [-0.2, -0.1, 0.0, 0.1, 0.2]\n yticklabels = ['-0.2', '-0.1', '0.0', '0.1', '0.2']\n plt.yticks(yticks, yticklabels)\n\n xticks = [-0.2, -0.1, 0.0, 0.1, 0.2]\n xticklabels = ['-0.2', '-0.1', '0.0', '0.1', '0.2']\n plt.xticks(xticks, xticklabels)\n\n transcriptionRatiosDict = {}\n replicationRatiosDict = {}\n\n for signature in signature_cutoff_numberofmutations_averageprobability_df['signature'].unique():\n #################################################################################################\n #First check whether we have this signature or not\n # type_transcribed_versus_untranscribed_df=type_transcribed_versus_untranscribed_df[['cancer_type', 'type',\n # 'Transcribed_real_count', 'UnTranscribed_real_count', 'Transcribed_mean_sims_count', 'UnTranscribed_mean_sims_count', 'transcribed_versus_untranscribed_p_value','transcribed_versus_untranscribed_q_value',\n # 'Transcribed_real_count.1', 'Transcribed_mean_sims_count.1', 'Transcribed_min_sims_count', 'Transcribed_max_sims_count', 'Transcribed_sims_count_list',\n # 'UnTranscribed_real_count.1', 'UnTranscribed_mean_sims_count.1', 'UnTranscribed_min_sims_count', 'UnTranscribed_max_sims_count', 'UnTranscribed_sims_count_list' ]]\n\n transcribed_real_count=0\n untranscribed_real_count=0\n\n if (type_transcribed_versus_untranscribed_df[type_transcribed_versus_untranscribed_df['type']==signature]['Transcribed_real_count'].values.size>0):\n transcribed_real_count=type_transcribed_versus_untranscribed_df[type_transcribed_versus_untranscribed_df['type'] == signature]['Transcribed_real_count'].values[0]\n if (type_transcribed_versus_untranscribed_df[type_transcribed_versus_untranscribed_df['type']==signature]['UnTranscribed_real_count'].values.size>0):\n untranscribed_real_count=type_transcribed_versus_untranscribed_df[type_transcribed_versus_untranscribed_df['type'] == signature]['UnTranscribed_real_count'].values[0]\n\n if (transcribed_real_count+untranscribed_real_count>=SUBS_STRAND_BIAS_NUMBER_OF_MUTATIONS_THRESHOLD):\n transcriptionRatiosDict[signature] = np.log10(transcribed_real_count/untranscribed_real_count)\n #################################################################################################\n\n #################################################################################################\n # First check whether we have this signature or not\n # type_lagging_versus_leading_df=type_lagging_versus_leading_df[['cancer_type', 'type',\n # 'Lagging_real_count', 'Leading_real_count', 'Lagging_mean_sims_count', 'Leading_mean_sims_count', 'lagging_versus_leading_p_value', 'lagging_versus_leading_q_value',\n # 'Lagging_real_count.1', 'Lagging_mean_sims_count.1', 'Lagging_min_sims_count', 'Lagging_max_sims_count', 'Lagging_sims_count_list',\n # 'Leading_real_count.1', 'Leading_mean_sims_count.1', 'Leading_min_sims_count', 'Leading_max_sims_count', 'Leading_sims_count_list' ]]\n\n lagging_real_count=0\n leading_real_count = 0\n\n if (type_lagging_versus_leading_df[type_lagging_versus_leading_df['type']==signature]['Lagging_real_count'].values.size>0):\n lagging_real_count=type_lagging_versus_leading_df[type_lagging_versus_leading_df['type']==signature]['Lagging_real_count'].values[0]\n\n if (type_lagging_versus_leading_df[type_lagging_versus_leading_df['type']==signature]['Leading_real_count'].values.size>0):\n leading_real_count=type_lagging_versus_leading_df[type_lagging_versus_leading_df['type']==signature]['Leading_real_count'].values[0]\n\n if (lagging_real_count+leading_real_count>=SUBS_STRAND_BIAS_NUMBER_OF_MUTATIONS_THRESHOLD):\n replicationRatiosDict[signature] = np.log10(lagging_real_count/leading_real_count)\n #################################################################################################\n\n if (transcriptionRatiosDict and replicationRatiosDict):\n signaturesShownInLegend = []\n\n for signature in signature_cutoff_numberofmutations_averageprobability_df['signature'].unique():\n if ((signature in replicationRatiosDict.keys()) and (signature in transcriptionRatiosDict.keys())):\n signaturesShownInLegend.append(signature)\n plt.scatter(replicationRatiosDict[signature], transcriptionRatiosDict[signature], label=signature)\n\n legend = plt.legend(loc='upper left', frameon=True, fancybox=False, labels=signaturesShownInLegend,\n bbox_to_anchor=(-0.0095, 1.0095))\n legend.get_frame().set_linewidth(1)\n\n frame = legend.get_frame()\n frame.set_facecolor('white')\n frame.set_edgecolor('black')\n\n plt.axvline(x=0.0, color='gray', linestyle='--')\n plt.axhline(y=0.0, color='gray', linestyle='--')\n\n if sample is None:\n figureName = 'all_%s_signatures_%s_scatter_plot.png' % (signatureType, STRANDBIAS)\n figureFile = os.path.join(outputDir, jobname, FIGURE, STRANDBIAS,SCATTER_PLOTS,figureName)\n else:\n figureName = 'all_%s_signatures_%s_%d_%s_scatter_plot.png' % (signatureType, sample, numberofMutations, STRANDBIAS)\n os.makedirs(os.path.join(outputDir, jobname, FIGURE, SAMPLES, sample, STRANDBIAS,SCATTER_PLOTS), exist_ok=True)\n figureFile = os.path.join(outputDir, jobname, FIGURE, SAMPLES, sample, STRANDBIAS, SCATTER_PLOTS, figureName)\n\n fig.savefig(figureFile)\n plt.cla()\n plt.close(fig)\n########################################################################\n\n########################################################################\n#May 9, 2018 starts\n#For Signatures\ndef plot_ncomms11383_Supp_FigH_AllSignatures_TranscriptionLog10Ratio_ReplicationLog10Ratio(\n signatureType,\n sample,\n numberofMutations,\n signature2TranscriptionStrand2CountDict,\n signature2ReplicationStrand2CountDict,\n signature_cutoff_numberofmutations_averageprobability_df,\n outputDir,\n jobname):\n\n fig = plt.figure(figsize=(8,8), facecolor=None)\n plt.style.use('ggplot')\n\n # build a rectangle in axes coords\n left, width = .0, 1.\n bottom, height = .0, 1.\n right = left + width\n top = bottom + height\n\n # This code makes the background white.\n # Always put these statements after plt.figure\n ax = plt.gca()\n ax.set_facecolor('white')\n for edge_i in ['bottom','top','left','right']:\n ax.spines[edge_i].set_edgecolor(\"black\")\n ax.spines[edge_i].set_linewidth(1)\n ax.spines[edge_i].set_bounds(-0.3, 0.3)\n\n plt.text(0.05, 1.02, 'Leading', ha='center', va='center', transform=ax.transAxes)\n plt.text(0.95, 1.02, 'Lagging', ha='center', va='center', transform=ax.transAxes)\n plt.text((right+0.02),(bottom+top-0.08), 'Transcribed',horizontalalignment='center',verticalalignment='center',rotation='vertical',transform=ax.transAxes)\n plt.text((right+0.02),(bottom+0.1), 'Untranscribed', horizontalalignment='center',verticalalignment='center', rotation='vertical', transform=ax.transAxes)\n\n if (sample is not None):\n plt.title(sample, fontsize=15, fontweight='bold')\n\n plt.xlabel('Lagging/leading replication strand\\nratio(log10)', fontstyle='normal', fontsize=12, fontweight='bold')\n plt.ylabel('Transcribed/untranscribed strand\\nratio(log10)', fontstyle='normal', fontsize=12, fontweight='bold')\n\n # Put some extra place by xlim if necessary\n plt.xlim(-0.3, 0.3)\n plt.ylim(-0.3, 0.3)\n\n plt.tick_params(axis='y', which='major', labelsize=10, width=1, length=10)\n plt.tick_params(axis='y', which='minor', labelsize=10, width=1, length=10)\n plt.tick_params(axis='x', which='major', labelsize=10, width=1, length=10)\n plt.tick_params(axis='x', which='minor', labelsize=10, width=1, length=10)\n\n yticks = [-0.2, -0.1, 0.0, 0.1, 0.2]\n yticklabels = ['-0.2', '-0.1', '0.0', '0.1', '0.2']\n plt.yticks(yticks, yticklabels)\n\n xticks = [-0.2, -0.1, 0.0, 0.1, 0.2]\n xticklabels = ['-0.2', '-0.1', '0.0', '0.1', '0.2']\n plt.xticks(xticks, xticklabels)\n\n transcriptionRatiosDict = {}\n replicationRatiosDict = {}\n\n for signature in signature_cutoff_numberofmutations_averageprobability_df['signature'].unique():\n #################################################################################################\n #First check whether we have this signature or not\n if ((signature in signature2TranscriptionStrand2CountDict) and (TRANSCRIBED_STRAND in (signature2TranscriptionStrand2CountDict[signature])) and\n (UNTRANSCRIBED_STRAND in (signature2TranscriptionStrand2CountDict[signature])) ):\n\n if ((signature2TranscriptionStrand2CountDict[signature][TRANSCRIBED_STRAND]+signature2TranscriptionStrand2CountDict[signature][UNTRANSCRIBED_STRAND]) >= SUBS_STRAND_BIAS_NUMBER_OF_MUTATIONS_THRESHOLD):\n transcriptionRatiosDict[signature]= np.log10(signature2TranscriptionStrand2CountDict[signature][TRANSCRIBED_STRAND]/signature2TranscriptionStrand2CountDict[signature][UNTRANSCRIBED_STRAND])\n #################################################################################################\n\n #################################################################################################\n # First check whether we have this signature or not\n if ((signature in signature2ReplicationStrand2CountDict) and (LAGGING in (signature2ReplicationStrand2CountDict[signature])) and\n (LEADING in (signature2ReplicationStrand2CountDict[signature]))):\n\n if ((signature2ReplicationStrand2CountDict[signature][LAGGING]+signature2ReplicationStrand2CountDict[signature][LEADING])>= SUBS_STRAND_BIAS_NUMBER_OF_MUTATIONS_THRESHOLD):\n replicationRatiosDict[signature] = np.log10(signature2ReplicationStrand2CountDict[signature][LAGGING]/signature2ReplicationStrand2CountDict[signature][LEADING])\n #################################################################################################\n\n if (transcriptionRatiosDict and replicationRatiosDict):\n signaturesShownInLegend = []\n\n for signature in signature_cutoff_numberofmutations_averageprobability_df['signature'].unique():\n if ((signature in replicationRatiosDict.keys()) and (signature in transcriptionRatiosDict.keys())):\n signaturesShownInLegend.append(signature)\n plt.scatter(replicationRatiosDict[signature], transcriptionRatiosDict[signature], label=signature)\n\n legend = plt.legend(loc='upper left', frameon=True, fancybox=False, labels=signaturesShownInLegend,\n bbox_to_anchor=(-0.0095, 1.0095))\n legend.get_frame().set_linewidth(1)\n\n frame = legend.get_frame()\n frame.set_facecolor('white')\n frame.set_edgecolor('black')\n\n plt.axvline(x=0.0, color='gray', linestyle='--')\n plt.axhline(y=0.0, color='gray', linestyle='--')\n\n if sample is None:\n figureName = 'all_%s_signatures_%s_scatter_plot.png' % (signatureType, STRANDBIAS)\n figureFile = os.path.join(outputDir, jobname, FIGURE, STRANDBIAS,SCATTER_PLOTS,figureName)\n else:\n figureName = 'all_%s_signatures_%s_%d_%s_scatter_plot.png' % (\n signatureType, sample, numberofMutations, STRANDBIAS)\n os.makedirs(os.path.join(outputDir, jobname, FIGURE, SAMPLES, sample, STRANDBIAS,SCATTER_PLOTS), exist_ok=True)\n figureFile = os.path.join(outputDir, jobname, FIGURE, SAMPLES, sample, STRANDBIAS, SCATTER_PLOTS, figureName)\n\n fig.savefig(figureFile)\n plt.cla()\n plt.close(fig)\n########################################################################\n\n########################################################################\n#MutationTypeBased SampleBased Figures\ndef plot_ncomms11383_Supp_FigE_MutationTypeBased_AllSamples_TranscriptionLog10Ratio_ReplicationLog10Ratio(\n type2Sample2TranscriptionStrand2CountDict,\n type2Sample2ReplicationStrand2CountDict,\n outputDir,\n jobname,\n isFigureAugmentation):\n\n mutationType2ColorDict = {'C>A': 'blue', 'C>G':'black', 'C>T':'red', 'T>A':'gray', 'T>C':'green', 'T>G':'pink'}\n\n transcriptionRatiosDict = {}\n replicationRatiosDict = {}\n for mutationType in six_mutation_types:\n #initialization\n if mutationType not in transcriptionRatiosDict:\n transcriptionRatiosDict[mutationType] = {}\n if mutationType not in replicationRatiosDict:\n replicationRatiosDict[mutationType] = {}\n\n #Fill the dictionaries\n if mutationType in type2Sample2TranscriptionStrand2CountDict:\n for sample in type2Sample2TranscriptionStrand2CountDict[mutationType].keys():\n if ((TRANSCRIBED_STRAND in type2Sample2TranscriptionStrand2CountDict[mutationType][sample].keys()) and (UNTRANSCRIBED_STRAND in type2Sample2TranscriptionStrand2CountDict[mutationType][sample].keys())):\n transcriptionRatiosDict[mutationType][sample]= np.log10(type2Sample2TranscriptionStrand2CountDict[mutationType][sample][TRANSCRIBED_STRAND]/type2Sample2TranscriptionStrand2CountDict[mutationType][sample][UNTRANSCRIBED_STRAND])\n\n if mutationType in type2Sample2ReplicationStrand2CountDict:\n for sample in type2Sample2ReplicationStrand2CountDict[mutationType].keys():\n if ((LAGGING in type2Sample2ReplicationStrand2CountDict[mutationType][sample].keys()) and (LEADING in type2Sample2ReplicationStrand2CountDict[mutationType][sample].keys())):\n replicationRatiosDict[mutationType][sample] = np.log10(type2Sample2ReplicationStrand2CountDict[mutationType][sample][LAGGING]/type2Sample2ReplicationStrand2CountDict[mutationType][sample][LEADING])\n\n for mutationType in six_mutation_types:\n fig = plt.figure(figsize=(8, 8), facecolor=None)\n plt.style.use('ggplot')\n\n # build a rectangle in axes coords\n left, width = .0, 1.\n bottom, height = .0, 1.\n right = left + width\n top = bottom + height\n\n # This code makes the background white.\n # Always put these statements after plt.figure\n ax = plt.gca()\n ax.set_facecolor('white')\n for edge_i in ['bottom', 'top', 'left', 'right']:\n ax.spines[edge_i].set_edgecolor(\"black\")\n ax.spines[edge_i].set_linewidth(1)\n ax.spines[edge_i].set_bounds(-0.65, 0.65)\n\n plt.title(mutationType, fontsize=15, fontweight='bold')\n\n plt.text(0.05, 1.02, 'Leading', ha='center', va='center', transform=ax.transAxes)\n plt.text(0.95, 1.02, 'Lagging', ha='center', va='center', transform=ax.transAxes)\n plt.text((right + 0.02), (bottom + top - 0.08), 'Transcribed', horizontalalignment='center',verticalalignment='center', rotation='vertical', transform=ax.transAxes)\n plt.text((right + 0.02), (bottom + 0.1), 'Untranscribed', horizontalalignment='center',verticalalignment='center', rotation='vertical', transform=ax.transAxes)\n\n plt.xlabel('Lagging/leading replication strand\\nratio(log10)', fontstyle='normal', fontsize=12,fontweight='bold')\n plt.ylabel('Transcribed/untranscribed strand\\nratio(log10)', fontstyle='normal', fontsize=12,fontweight='bold')\n\n # Put some extra place by xlim if necessary\n plt.xlim(-0.65, 0.65)\n plt.ylim(-0.65, 0.65)\n\n plt.tick_params(axis='y', which='major', labelsize=10, width=1, length=10)\n plt.tick_params(axis='y', which='minor', labelsize=10, width=1, length=10)\n plt.tick_params(axis='x', which='major', labelsize=10, width=1, length=10)\n plt.tick_params(axis='x', which='minor', labelsize=10, width=1, length=10)\n\n yticks = [-0.6, -0.4, -0.2, 0.0, 0.2, 0.4, 0.6]\n yticklabels = ['-0.6', '-0.4', '-0.2', '0.0', '0.2', '0.4', '0.6']\n plt.yticks(yticks, yticklabels)\n\n xticks = [-0.6, -0.4, -0.2, 0.0, 0.2, 0.4, 0.6]\n xticklabels = ['-0.6', '-0.4', '-0.2', '0.0', '0.2', '0.4', '0.6']\n plt.xticks(xticks, xticklabels)\n\n if (mutationType in type2Sample2TranscriptionStrand2CountDict):\n for sample in type2Sample2TranscriptionStrand2CountDict[mutationType].keys():\n if ((sample in replicationRatiosDict[mutationType].keys()) and (sample in transcriptionRatiosDict[mutationType].keys())):\n plt.scatter(replicationRatiosDict[mutationType][sample],transcriptionRatiosDict[mutationType][sample], facecolor='none', color=mutationType2ColorDict[mutationType])\n\n plt.axvline(x=0.0, color='gray', linestyle='--')\n plt.axhline(y=0.0, color='gray', linestyle='--')\n\n if (isFigureAugmentation):\n plt.title(jobname + ' ' + mutationType)\n\n newMutationType = mutationType.replace('>', '2')\n\n figureName = newMutationType + '_MutationType_' + STRANDBIAS + '.png'\n figureFile = os.path.join(outputDir,jobname,FIGURE,STRANDBIAS,SCATTER_PLOTS,figureName)\n fig.savefig(figureFile)\n plt.cla()\n plt.close(fig)\n########################################################################\n\n\n########################################################################\n#SignatureBased SampleBased Figures\n#Sig26 is very different\ndef plot_ncomms11383_Supp_FigF_SignatureBased_AllSamples_TranscriptionLog10Ratio_ReplicationLog10Ratio(type2Sample2TranscriptionStrand2CountDict,type2Sample2ReplicationStrand2CountDict,signatures,outputDir,jobname,isFigureAugmentation):\n transcriptionRatiosDict = {}\n replicationRatiosDict = {}\n\n for signature in signatures:\n # initialization\n if signature not in transcriptionRatiosDict:\n transcriptionRatiosDict[signature] = {}\n if signature not in replicationRatiosDict:\n replicationRatiosDict[signature] = {}\n # Fill the dictionaries\n if signature in type2Sample2TranscriptionStrand2CountDict:\n for sample in type2Sample2TranscriptionStrand2CountDict[signature].keys():\n if (UNTRANSCRIBED_STRAND in type2Sample2TranscriptionStrand2CountDict[signature][sample]) and (TRANSCRIBED_STRAND in type2Sample2TranscriptionStrand2CountDict[signature][sample]):\n transcriptionRatiosDict[signature][sample] = np.log10(type2Sample2TranscriptionStrand2CountDict[signature][sample][TRANSCRIBED_STRAND] /type2Sample2TranscriptionStrand2CountDict[signature][sample][UNTRANSCRIBED_STRAND])\n # print(signature, sample)\n # print(signature2Sample2TranscriptionStrand2CountDict[signature][sample][TRANSCRIBED_STRAND])\n # print(signature2Sample2TranscriptionStrand2CountDict[signature][sample][UNTRANSCRIBED_STRAND])\n # print(signature,sample,transcriptionRatiosDict[signature][sample])\n\n if signature in type2Sample2ReplicationStrand2CountDict:\n for sample in type2Sample2ReplicationStrand2CountDict[signature].keys():\n if (LAGGING in type2Sample2ReplicationStrand2CountDict[signature][sample]) and (LEADING in type2Sample2ReplicationStrand2CountDict[signature][sample]):\n replicationRatiosDict[signature][sample] = np.log10(type2Sample2ReplicationStrand2CountDict[signature][sample][LAGGING] /type2Sample2ReplicationStrand2CountDict[signature][sample][LEADING])\n\n for signature in signatures:\n if (len(replicationRatiosDict[signature].keys())>0 and len(transcriptionRatiosDict[signature].keys())>0):\n fig = plt.figure(figsize=(8, 8), facecolor=None)\n plt.style.use('ggplot')\n\n # build a rectangle in axes coords\n left, width = .0, 1.\n bottom, height = .0, 1.\n right = left + width\n top = bottom + height\n\n # This code makes the background white.\n # Always put these statements after plt.figure\n ax = plt.gca()\n ax.set_facecolor('white')\n for edge_i in ['bottom', 'top', 'left', 'right']:\n ax.spines[edge_i].set_edgecolor(\"black\")\n ax.spines[edge_i].set_linewidth(1)\n ax.spines[edge_i].set_bounds(-0.65, 0.65)\n\n plt.title(signature, fontsize=15, fontweight='bold')\n\n plt.text(0.05, 1.02, 'Leading', ha='center', va='center', transform=ax.transAxes)\n plt.text(0.95, 1.02, 'Lagging', ha='center', va='center', transform=ax.transAxes)\n plt.text((right + 0.02), (bottom + top - 0.08), 'Transcribed', horizontalalignment='center',verticalalignment='center', rotation='vertical', transform=ax.transAxes)\n plt.text((right + 0.02), (bottom + 0.1), 'Untranscribed', horizontalalignment='center',verticalalignment='center', rotation='vertical', transform=ax.transAxes)\n\n plt.xlabel('Lagging/leading replication strand\\nratio(log10)', fontstyle='normal', fontsize=12,fontweight='bold')\n plt.ylabel('Transcribed/untranscribed strand\\nratio(log10)', fontstyle='normal', fontsize=12,fontweight='bold')\n\n # Put some extra place by xlim if necessary\n plt.xlim(-0.65, 0.65)\n plt.ylim(-0.65, 0.65)\n\n plt.tick_params(axis='y', which='major', labelsize=10, width=1, length=10)\n plt.tick_params(axis='y', which='minor', labelsize=10, width=1, length=10)\n plt.tick_params(axis='x', which='major', labelsize=10, width=1, length=10)\n plt.tick_params(axis='x', which='minor', labelsize=10, width=1, length=10)\n\n yticks = [-0.6, -0.4, -0.2, 0.0, 0.2, 0.4, 0.6]\n yticklabels = ['-0.6', '-0.4', '-0.2', '0.0', '0.2', '0.4', '0.6']\n plt.yticks(yticks, yticklabels)\n\n xticks = [-0.6, -0.4, -0.2, 0.0, 0.2, 0.4, 0.6]\n xticklabels = ['-0.6', '-0.4', '-0.2', '0.0', '0.2', '0.4', '0.6']\n plt.xticks(xticks, xticklabels)\n\n for sample in type2Sample2TranscriptionStrand2CountDict[signature].keys():\n if (sample in replicationRatiosDict[signature]) and (sample in transcriptionRatiosDict[signature]):\n plt.scatter(replicationRatiosDict[signature][sample], transcriptionRatiosDict[signature][sample],facecolor='none',color='green')\n\n plt.axvline(x=0.0, color='gray', linestyle='--')\n plt.axhline(y=0.0, color='gray', linestyle='--')\n\n if (isFigureAugmentation):\n plt.title(jobname + ' ' + signature)\n\n figureName = signature.replace(' ','') + '_Signature_' + STRANDBIAS + '.png'\n figureFile = os.path.join(outputDir,jobname,FIGURE,STRANDBIAS,SCATTER_PLOTS,figureName)\n fig.savefig(figureFile)\n plt.cla()\n plt.close(fig)\n########################################################################\n\n\ndef is_there_at_least_10perc_diff(strand1_value, strand2_value):\n diff = abs(strand1_value - strand2_value)\n if (diff >= (strand1_value/10)) or (diff >= (strand2_value/10)):\n return True\n else:\n return False\n\n# Only this method supports simulations\n# key can be a sample or a signature\ndef plotStrandBiasFigureWithBarPlots(outputDir,\n jobname,\n numberofSimulations,\n key,\n isKeySample,\n numberofMutations,\n N,\n x_axis_labels,\n strand1_values,\n strand2_values,\n strand1_simulations_median_values,\n strand2_simulations_median_values,\n fdr_bh_adjusted_pvalues,\n strand1Name,\n strand2Name,\n mutationsOrSignatures,\n color1,\n color2,\n figureName,\n width,\n plot_mode):\n\n # Here we can take into difference between strand1_values and strand2_values while deciding on significance\n from matplotlib import rcParams\n rcParams.update({'figure.autolayout': True})\n\n # the x locations for the groups\n ind = np.arange(N)\n\n fig, ax = plt.subplots(figsize=(16,10),dpi=300)\n\n legend=None\n rects1=None\n rects2=None\n rects3=None\n rects4=None\n\n rects1 = ax.bar(ind, strand1_values, width=width, edgecolor='black', color=color1)\n rects2 = ax.bar(ind + width, strand2_values, width=width, edgecolor='black', color=color2)\n\n if ((strand1_simulations_median_values is not None) and strand1_simulations_median_values):\n rects3 = ax.bar(ind+ 2*width, strand1_simulations_median_values, width=width, edgecolor='black', color=color1, hatch = '///')\n if ((strand2_simulations_median_values is not None) and strand2_simulations_median_values):\n rects4 = ax.bar(ind +3*width, strand2_simulations_median_values, width=width, edgecolor='black', color=color2, hatch = '///')\n\n # add some text for labels, title and axes ticks\n if plot_mode==PLOTTING_FOR_SIGPROFILERTOPOGRAPHY_TOOL:\n ax.tick_params(axis='x', labelsize=35)\n ax.tick_params(axis='y', labelsize=35)\n\n locs, labels = plt.yticks()\n ax.set_ylim(0, locs[-1] + 5000)\n\n # To make the bar width not too wide\n if len(ind) < 6:\n maxn = 6\n ax.set_xlim(-0.5, maxn - 0.5)\n\n # Set title\n if key is not None:\n ax.set_title('%s %s vs. %s %s' %(key,strand1Name,strand2Name,mutationsOrSignatures), fontsize=20,fontweight='bold')\n else:\n ax.set_title('%s vs. %s %s' %(strand1Name,strand2Name,mutationsOrSignatures), fontsize=20,fontweight='bold')\n\n # Set x tick labels\n if len(x_axis_labels) > 6:\n ax.set_xticklabels(x_axis_labels, fontsize=35, rotation=90)\n else:\n ax.set_xticklabels(x_axis_labels, fontsize=35)\n\n # Set the ylabel\n plt.ylabel('Number of single base substitutions', fontsize=35, fontweight='normal')\n\n # set the x axis tick locations\n if (numberofSimulations > 0):\n ax.set_xticks(ind + (3 * width) / 2)\n realStrand1Name = 'Real %s' % (strand1Name)\n realStrand2Name = 'Real %s' % (strand2Name)\n simulationsStrand1Name = 'Simulated %s' % (strand1Name)\n simulationsStrand2Name = 'Simulated %s' % (strand2Name)\n if ((rects1 is not None) and (rects2 is not None) and (rects3 is not None) and (rects4 is not None)):\n if ((len(rects1) > 0) and (len(rects2) > 0) and (len(rects3) > 0) and (len(rects4) > 0)):\n legend = ax.legend((rects1[0], rects2[0], rects3[0], rects4[0]),(realStrand1Name, realStrand2Name, simulationsStrand1Name, simulationsStrand2Name),prop={'size': 25}, ncol=1, loc='best')\n else:\n # Old way with no simulations\n ax.set_xticks(ind + width / 2)\n if ((rects1 is not None) and (rects2 is not None)):\n if ((len(rects1) > 0) and (len(rects2) > 0)):\n legend = ax.legend((rects1[0], rects2[0]), (strand1Name, strand2Name), prop={'size': 25}, ncol=1, loc='upper right')\n\n elif plot_mode == PLOTTING_FOR_SIGPROFILERTOPOGRAPHY_MANUSCRIPT:\n # set axis ticks\n # ax.tick_params(axis='both', which='both', length=0)\n ax.tick_params(axis='x', which='both', length=0)\n ax.tick_params(axis='y', which='both', length=0)\n # set axis labels\n plt.setp(ax.get_xticklabels(), visible=False)\n plt.setp(ax.get_yticklabels(), visible=False)\n if (numberofSimulations > 0):\n realStrand1Name = 'Real %s' % (strand1Name)\n realStrand2Name = 'Real %s' % (strand2Name)\n simulationsStrand1Name = 'Simulated %s' % (strand1Name)\n simulationsStrand2Name = 'Simulated %s' % (strand2Name)\n\n if ((rects1 is not None) and (rects2 is not None) and (rects3 is not None) and (rects4 is not None)):\n if ((len(rects1) > 0) and (len(rects2) > 0) and (len(rects3) > 0) and (len(rects4) > 0)):\n legend = ax.legend((rects1[0], rects2[0], rects3[0], rects4[0]),(realStrand1Name, realStrand2Name, simulationsStrand1Name, simulationsStrand2Name),prop={'size': 30}, ncol=1, loc='best')\n\n else:\n if ((rects1 is not None) and (rects2 is not None)):\n if ((len(rects1) > 0) and (len(rects2) > 0)):\n legend = ax.legend((rects1[0], rects2[0]), (strand1Name, strand2Name), prop={'size': 35},loc='upper right')\n\n # To make the barplot background white\n ax.set_facecolor('white')\n # To makes spines black like a rectangle with black stroke\n ax.spines[\"bottom\"].set_color('black')\n ax.spines[\"left\"].set_color('black')\n ax.spines[\"top\"].set_color('black')\n ax.spines[\"right\"].set_color('black')\n\n if (legend is not None):\n frame = legend.get_frame()\n frame.set_facecolor('white')\n frame.set_edgecolor('black')\n\n # Add star above the bars for significant differences between the number of mutations on each strand starts\n # For each bar: Place a label\n if fdr_bh_adjusted_pvalues is not None:\n for fdr_bh_adjusted_pvalue, strand1_value, strand2_value, rect1, rect2 in zip(fdr_bh_adjusted_pvalues, strand1_values, strand2_values, rects1, rects2):\n # Get X and Y placement of label from rect.\n y_value = max(rect1.get_height(),rect2.get_height())\n x_value = rect1.get_x() + rect1.get_width()\n\n # Number of points between bar and label. Change to your liking.\n space = 3\n # Vertical alignment for positive values\n va = 'bottom'\n\n # If value of bar is negative: Place label below bar\n if y_value < 0:\n # Invert space to place label below\n space *= -1\n # Vertically align label at top\n va = 'top'\n\n # Use Y value as label and format number with one decimal place\n label = \"{:.1f}\".format(y_value)\n\n # Create annotation\n if ((fdr_bh_adjusted_pvalue is not None) and (fdr_bh_adjusted_pvalue <= 0.0001) and (is_there_at_least_10perc_diff(strand1_value, strand2_value))):\n plt.annotate(\n '***', # Use `label` as label\n (x_value, y_value), # Place label at end of the bar\n xytext=(0, space), # Vertically shift label by `space`\n textcoords=\"offset points\", # Interpret `xytext` as offset in points\n ha='center', # Horizontally center label\n va=va,\n fontsize=20) # Vertically align label differently for\n\n elif ((fdr_bh_adjusted_pvalue is not None) and (fdr_bh_adjusted_pvalue <= 0.001) and (is_there_at_least_10perc_diff(strand1_value, strand2_value))):\n plt.annotate(\n '**', # Use `label` as label\n (x_value, y_value), # Place label at end of the bar\n xytext=(0, space), # Vertically shift label by `space`\n textcoords=\"offset points\", # Interpret `xytext` as offset in points\n ha='center', # Horizontally center label\n va=va,\n fontsize=20) # Vertically align label differently for\n\n elif ((fdr_bh_adjusted_pvalue is not None) and (fdr_bh_adjusted_pvalue <= SIGNIFICANCE_LEVEL) and (is_there_at_least_10perc_diff(strand1_value, strand2_value))):\n plt.annotate(\n '*', # Use `label` as label\n (x_value, y_value), # Place label at end of the bar\n xytext=(0, space), # Vertically shift label by `space`\n textcoords=\"offset points\", # Interpret `xytext` as offset in points\n ha='center', # Horizontally center label\n va=va,\n fontsize=20) # Vertically align label differently for\n\n # positive and negative values.\n # Add star above the bars for significant differences between the number of mutations on each strand ends\n #########################################################################################################\n\n if (key is None):\n figureName = '%s_bar_plot.png' %(figureName)\n figureFile = os.path.join(outputDir, jobname, FIGURE, STRANDBIAS, BAR_PLOTS, figureName)\n elif (not isKeySample):\n figureName = '%s_%s_bar_plot.png' %(key,figureName)\n figureFile = os.path.join(outputDir, jobname, FIGURE, STRANDBIAS, BAR_PLOTS, figureName)\n else:\n figureName = '%s_%s_%d_bar_plot.png' %(figureName,key,numberofMutations)\n os.makedirs(os.path.join(outputDir, jobname, FIGURE, SAMPLES, key, STRANDBIAS, BAR_PLOTS), exist_ok=True)\n figureFile = os.path.join(outputDir, jobname, FIGURE, SAMPLES, key, STRANDBIAS, BAR_PLOTS, figureName)\n\n fig.savefig(figureFile)\n plt.cla()\n plt.close(fig)\n\n\n# June 2, 2021\ndef plot_circle_plot_in_given_axis(ax,\n percentage_strings,\n sbs_signature,\n six_mutation_types,\n xticklabels_list,\n signature2mutation_type2strand2percentagedict):\n\n strand_bias_list=[LAGGING_VERSUS_LEADING, TRANSCRIBED_VERSUS_UNTRANSCRIBED, GENIC_VERSUS_INTERGENIC]\n\n # make aspect ratio square\n ax.set_aspect(1.0)\n\n # set title\n title = '%s Strand Bias' %(sbs_signature)\n ax.text(len(percentage_strings) * 3, len(strand_bias_list) + 2.5, title, horizontalalignment='center',fontsize=60, fontweight='bold', fontname='Arial')\n\n # Colors are from SigProfilerPlotting tool to be consistent\n colors = [[3 / 256, 189 / 256, 239 / 256],\n [1 / 256, 1 / 256, 1 / 256],\n [228 / 256, 41 / 256, 38 / 256],\n [203 / 256, 202 / 256, 202 / 256],\n [162 / 256, 207 / 256, 99 / 256],\n [236 / 256, 199 / 256, 197 / 256]]\n\n # Put rectangles\n x = 0\n\n for i in range(0, len(six_mutation_types), 1):\n ax.text((x + (len(percentage_strings) / 2) - 0.75), len(strand_bias_list) + 1.5, six_mutation_types[i],fontsize=55, fontweight='bold', fontname='Arial')\n ax.add_patch(plt.Rectangle((x + .0415, len(strand_bias_list) + 0.75), len(percentage_strings) - (2 * .0415), .5,facecolor=colors[i], clip_on=False))\n ax.add_patch(plt.Rectangle((x, 0), len(percentage_strings), len(strand_bias_list), facecolor=colors[i], zorder=0,alpha=0.25, edgecolor='grey'))\n x += len(percentage_strings)\n\n # CODE GOES HERE TO CENTER X-AXIS LABELS...\n ax.set_xlim([0, len(six_mutation_types) * len(percentage_strings)])\n ax.set_xticklabels([])\n ax.tick_params(axis='x', which='minor', length=0, labelsize=35)\n\n # major ticks\n ax.set_xticks(np.arange(0, len(six_mutation_types) * len(percentage_strings), 1))\n # minor ticks\n ax.set_xticks(np.arange(0, len(six_mutation_types) * len(percentage_strings), 1) + 0.5, minor=True)\n\n ax.set_xticklabels(xticklabels_list, minor=True)\n ax.xaxis.set_label_position('top')\n ax.xaxis.set_ticks_position('top')\n\n ax.tick_params(\n axis='x', # changes apply to the x-axis\n which='major', # both major and minor ticks are affected\n bottom=False, # ticks along the bottom edge are off\n top=False) # labels along the bottom edge are off\n\n # CODE GOES HERE TO CENTER Y-AXIS LABELS...\n ax.set_ylim([0, len(strand_bias_list)])\n ax.set_yticklabels([])\n ax.tick_params(axis='y', which='minor', length=0, labelsize=40)\n\n # major ticks\n ax.set_yticks(np.arange(0, len(strand_bias_list), 1))\n # minor ticks\n ax.set_yticks(np.arange(0, len(strand_bias_list), 1) + 0.5, minor=True)\n ax.set_yticklabels(['', sbs_signature,''], minor=True) # fontsize\n\n ax.tick_params(\n axis='y', # changes apply to the x-axis\n which='major', # both major and minor ticks are affected\n left=False) # labels along the bottom edge are off\n\n # Gridlines based on major ticks\n ax.grid(which='major', color='black', zorder=3)\n\n # Put the legend\n legend_elements = [\n Line2D([0], [0], marker='o', color='white', label=GENIC, markerfacecolor='cyan', markersize=40),\n Line2D([0], [0], marker='o', color='white', label=INTERGENIC, markerfacecolor='gray', markersize=40),\n Line2D([0], [0], marker='o', color='white', label=TRANSCRIBED_STRAND, markerfacecolor='royalblue',markersize=40),\n Line2D([0], [0], marker='o', color='white', label=UNTRANSCRIBED_STRAND, markerfacecolor='yellowgreen',markersize=40),\n Line2D([0], [0], marker='o', color='white', label=LAGGING, markerfacecolor='indianred', markersize=40),\n Line2D([0], [0], marker='o', color='white', label=LEADING, markerfacecolor='goldenrod', markersize=40)]\n\n legend = ax.legend(handles=legend_elements, ncol=len(legend_elements), bbox_to_anchor=(0.5, 0), loc='upper center',fontsize=40)\n # legend.get_frame().set_linewidth(1)\n frame = legend.get_frame()\n frame.set_facecolor('white')\n frame.set_edgecolor('black')\n\n for percentage_diff_index, percentage_string in enumerate(percentage_strings):\n for mutation_type_index, mutation_type in enumerate(six_mutation_types):\n # for row_sbs_signature_index, row_sbs_signature in enumerate(rows_sbs_signatures):\n # strand_bias_list = [TRANSCRIBED_VERSUS_UNTRANSCRIBED, GENIC_VERSUS_INTERGENIC, LAGGING_VERSUS_LEADING]\n for strand_bias_index, strand_bias in enumerate(strand_bias_list):\n if (strand_bias == LAGGING_VERSUS_LEADING):\n if sbs_signature in signature2mutation_type2strand2percentagedict:\n if mutation_type in signature2mutation_type2strand2percentagedict[sbs_signature]:\n lagging_percentage = None\n leading_percentage = None\n\n if (LAGGING in signature2mutation_type2strand2percentagedict[sbs_signature][mutation_type]) and (\n signature2mutation_type2strand2percentagedict[sbs_signature][mutation_type][LAGGING][percentage_string] == 1):\n lagging_percentage = 100\n if (LEADING in signature2mutation_type2strand2percentagedict[sbs_signature][mutation_type]) and (\n signature2mutation_type2strand2percentagedict[sbs_signature][mutation_type][LEADING][percentage_string] == 1):\n leading_percentage = 100\n\n if (lagging_percentage is not None) and (leading_percentage is None):\n radius = 0.49\n if (radius > 0):\n # print('Plot circle at x=%d y=%d for %s %s %s' % (mutation_type_index * len(percentage_strings) + percentage_diff_index, row_sbs_signature_index, row_sbs_signature,mutation_type, percentage_string))\n ax.add_patch(plt.Circle((mutation_type_index * len(percentage_strings) + percentage_diff_index + 0.5,strand_bias_index + 0.5), radius, color='indianred', fill=True))\n elif (leading_percentage is not None) and (lagging_percentage is None):\n radius = 0.49\n if (radius > 0):\n # print('Plot circle at x=%d y=%d for %s %s %s' % (mutation_type_index * len(percentage_strings) + percentage_diff_index, row_sbs_signature_index, row_sbs_signature,mutation_type, percentage_string))\n ax.add_patch(plt.Circle((mutation_type_index * len(\n percentage_strings) + percentage_diff_index + 0.5,\n strand_bias_index + 0.5), radius,\n color='goldenrod', fill=True))\n\n elif (lagging_percentage is not None) and (leading_percentage is not None):\n radius_lagging = 0.49\n radius_leading = 0.49\n if (radius_lagging > radius_leading):\n # First lagging\n ax.add_patch(plt.Circle((mutation_type_index * len(\n percentage_strings) + percentage_diff_index + 0.5,\n strand_bias_index + 0.5), radius_lagging,\n color='indianred', fill=True))\n # Second leading\n ax.add_patch(plt.Circle((mutation_type_index * len(\n percentage_strings) + percentage_diff_index + 0.5,\n strand_bias_index + 0.5), radius_leading,\n color='goldenrod', fill=True))\n else:\n # First leading\n ax.add_patch(plt.Circle((mutation_type_index * len(\n percentage_strings) + percentage_diff_index + 0.5,\n strand_bias_index + 0.5), radius_leading,\n color='goldenrod', fill=True))\n # Second lagging\n ax.add_patch(plt.Circle((mutation_type_index * len(\n percentage_strings) + percentage_diff_index + 0.5,\n strand_bias_index + 0.5), radius_lagging,\n color='indianred', fill=True))\n\n elif (strand_bias == GENIC_VERSUS_INTERGENIC):\n if sbs_signature in signature2mutation_type2strand2percentagedict:\n if mutation_type in signature2mutation_type2strand2percentagedict[sbs_signature]:\n genic_percentage = None\n intergenic_percentage = None\n\n if (GENIC in signature2mutation_type2strand2percentagedict[sbs_signature][\n mutation_type]) and (\n signature2mutation_type2strand2percentagedict[sbs_signature][mutation_type][\n GENIC][percentage_string] == 1):\n genic_percentage = 100\n if (INTERGENIC in signature2mutation_type2strand2percentagedict[sbs_signature][\n mutation_type]) and (\n signature2mutation_type2strand2percentagedict[sbs_signature][mutation_type][\n INTERGENIC][percentage_string] == 1):\n intergenic_percentage = 100\n\n if (genic_percentage is not None) and (intergenic_percentage is None):\n radius = 0.49\n if (radius > 0):\n # print('Plot circle at x=%d y=%d for %s %s %s' % (mutation_type_index * len(percentage_strings) + percentage_diff_index, row_sbs_signature_index,row_sbs_signature, mutation_type, percentage_string))\n ax.add_patch(plt.Circle((mutation_type_index * len(\n percentage_strings) + percentage_diff_index + 0.5,\n strand_bias_index + 0.5), radius, color='cyan',\n fill=True))\n\n elif (intergenic_percentage is not None) and (genic_percentage is None):\n radius = 0.49\n if (radius > 0):\n # print('Plot circle at x=%d y=%d for %s %s %s' % (mutation_type_index * len(percentage_strings) + percentage_diff_index, row_sbs_signature_index,row_sbs_signature, mutation_type, percentage_string))\n ax.add_patch(plt.Circle((mutation_type_index * len(\n percentage_strings) + percentage_diff_index + 0.5,\n strand_bias_index + 0.5), radius, color='gray',\n fill=True))\n\n elif (genic_percentage is not None) and (intergenic_percentage is not None):\n radius_genic = 0.49\n radius_intergenic = 0.49\n if (radius_genic > radius_intergenic):\n # First genic\n ax.add_patch(plt.Circle((mutation_type_index * len(\n percentage_strings) + percentage_diff_index + 0.5,\n strand_bias_index + 0.5), radius_genic,\n color='cyan', fill=True))\n # Second intergenic\n ax.add_patch(plt.Circle((mutation_type_index * len(\n percentage_strings) + percentage_diff_index + 0.5,\n strand_bias_index + 0.5), radius_intergenic,\n color='gray', fill=True))\n\n else:\n # First intergenic\n ax.add_patch(plt.Circle((mutation_type_index * len(\n percentage_strings) + percentage_diff_index + 0.5,\n strand_bias_index + 0.5), radius_intergenic,\n color='gray', fill=True))\n # Second genic\n ax.add_patch(plt.Circle((mutation_type_index * len(\n percentage_strings) + percentage_diff_index + 0.5,\n strand_bias_index + 0.5), radius_genic,\n color='cyan', fill=True))\n\n\n elif (strand_bias == TRANSCRIBED_VERSUS_UNTRANSCRIBED):\n if sbs_signature in signature2mutation_type2strand2percentagedict:\n if mutation_type in signature2mutation_type2strand2percentagedict[sbs_signature]:\n transcribed_percentage = None\n untranscribed_percentage = None\n\n if (TRANSCRIBED_STRAND in signature2mutation_type2strand2percentagedict[sbs_signature][\n mutation_type]) and (\n signature2mutation_type2strand2percentagedict[sbs_signature][mutation_type][\n TRANSCRIBED_STRAND][percentage_string] == 1):\n transcribed_percentage = 100\n if (UNTRANSCRIBED_STRAND in\n signature2mutation_type2strand2percentagedict[sbs_signature][mutation_type]) and (\n signature2mutation_type2strand2percentagedict[sbs_signature][mutation_type][\n UNTRANSCRIBED_STRAND][percentage_string] == 1):\n untranscribed_percentage = 100\n\n if (transcribed_percentage is not None) and (untranscribed_percentage is None):\n radius = 0.49\n if (radius > 0):\n # print('Plot circle at x=%d y=%d for %s %s %s' % (mutation_type_index * len(percentage_strings) + percentage_diff_index, row_sbs_signature_index,row_sbs_signature, mutation_type, percentage_string))\n ax.add_patch(plt.Circle((mutation_type_index * len(\n percentage_strings) + percentage_diff_index + 0.5,\n strand_bias_index + 0.5), radius,\n color='royalblue', fill=True))\n\n elif (untranscribed_percentage is not None) and (transcribed_percentage is None):\n radius = 0.49\n if (radius > 0):\n # print('Plot circle at x=%d y=%d for %s %s %s' % (mutation_type_index * len(percentage_strings) + percentage_diff_index, row_sbs_signature_index,row_sbs_signature, mutation_type, percentage_string))\n ax.add_patch(plt.Circle((mutation_type_index * len(\n percentage_strings) + percentage_diff_index + 0.5,\n strand_bias_index + 0.5), radius,\n color='yellowgreen', fill=True))\n\n elif (transcribed_percentage is not None) and (untranscribed_percentage is not None):\n radius_transcribed = 0.49\n radius_untranscribed = 0.49\n if (radius_transcribed > radius_untranscribed):\n # First transcribed\n ax.add_patch(plt.Circle((mutation_type_index * len(\n percentage_strings) + percentage_diff_index + 0.5,\n strand_bias_index + 0.5), radius_transcribed,\n color='royalblue', fill=True))\n # Second untranscribed\n ax.add_patch(plt.Circle((mutation_type_index * len(\n percentage_strings) + percentage_diff_index + 0.5,\n strand_bias_index + 0.5), radius_untranscribed,\n color='yellowgreen', fill=True))\n\n else:\n # First untranscribed\n ax.add_patch(plt.Circle((mutation_type_index * len(\n percentage_strings) + percentage_diff_index + 0.5,\n strand_bias_index + 0.5), radius_untranscribed,\n color='yellowgreen', fill=True))\n # Second transcribed\n ax.add_patch(plt.Circle((mutation_type_index * len(\n percentage_strings) + percentage_diff_index + 0.5,\n strand_bias_index + 0.5), radius_transcribed,\n color='royalblue', fill=True))\n\n\n# June 2, 2021\ndef plot_strand_bias_figure_with_bar_plots(strand_bias,\n strandbias_figures_outputDir,\n numberofSimulations,\n signature,\n N,\n x_axis_tick_labels,\n y_axis_label,\n strand1_values,\n strand2_values,\n strand1_simulations_median_values,\n strand2_simulations_median_values,\n fdr_bh_adjusted_pvalues,\n strand1Name,\n strand2Name,\n color1,\n color2,\n width,\n axis_given=None):\n\n # Here we can take into difference between strand1_values and strand2_values while deciding on significance\n # the x locations for the groups\n ind = np.arange(N)\n if axis_given == None:\n fig, ax = plt.subplots(figsize=(16,10),dpi=100)\n else:\n ax = axis_given\n\n legend = None\n rects3 = None\n rects4 = None\n\n rects1 = ax.bar(ind, strand1_values, width=width, edgecolor='black', color=color1)\n rects2 = ax.bar(ind + width, strand2_values, width=width, edgecolor='black', color=color2)\n\n if ((strand1_simulations_median_values is not None) and strand1_simulations_median_values):\n rects3 = ax.bar(ind+ 2*width, strand1_simulations_median_values, width=width, edgecolor='black', color=color1, hatch = '///')\n if ((strand2_simulations_median_values is not None) and strand2_simulations_median_values):\n rects4 = ax.bar(ind + 3*width, strand2_simulations_median_values, width=width, edgecolor='black', color=color2, hatch = '///')\n\n # add some text for labels, title and axes ticks\n ax.tick_params(axis='x', labelsize=35)\n ax.tick_params(axis='y', labelsize=35)\n\n ymax = np.nanmax([np.nanmax(strand1_values),\n np.nanmax(strand2_values),\n np.nanmax(strand1_simulations_median_values),\n np.nanmax(strand2_simulations_median_values)])\n y = ymax / 1.025\n ytick_offest = float(y / 3)\n ylabs = [0, ytick_offest, ytick_offest * 2, ytick_offest * 3, ytick_offest * 4]\n ylabels = [0, ytick_offest, ytick_offest * 2, ytick_offest * 3, ytick_offest * 4]\n\n ylabels = ['{:,}'.format(int(x)) for x in ylabels]\n if len(ylabels[-1]) > 3:\n ylabels_temp = []\n if len(ylabels[-1]) > 7:\n for label in ylabels:\n if len(label) > 7:\n ylabels_temp.append(label[0:-8] + \"m\")\n elif len(label) > 3:\n ylabels_temp.append(label[0:-4] + \"k\")\n else:\n ylabels_temp.append(label)\n else:\n for label in ylabels:\n if len(label) > 3:\n ylabels_temp.append(label[0:-4] + \"k\")\n else:\n ylabels_temp.append(label)\n ylabels = ylabels_temp\n\n ax.set_ylim([0, y])\n ax.set_yticks(ylabs)\n ax.set_yticklabels(ylabels, fontsize=35, fontweight='bold', fontname='Arial')\n\n # To make the bar width not too wide\n if len(ind) < 6:\n maxn = 6\n ax.set_xlim(-0.5, maxn - 0.5)\n\n # Set title\n ax.set_title('%s vs. %s' %(strand1Name,strand2Name), fontsize=40, fontweight='bold')\n\n # Set x tick labels\n if len(x_axis_tick_labels) > 6:\n ax.set_xticklabels(x_axis_tick_labels, fontsize=35, rotation=90)\n else:\n ax.set_xticklabels(x_axis_tick_labels, fontsize=35)\n\n # Set the ylabel\n if y_axis_label:\n ax.set_ylabel(y_axis_label, fontsize=35, fontweight='normal', labelpad=15)\n\n # Set the x axis tick locations\n if (numberofSimulations > 0):\n ax.set_xticks(ind + (3 * width) / 2)\n realStrand1Name = 'Real %s' % (strand1Name)\n realStrand2Name = 'Real %s' % (strand2Name)\n simulationsStrand1Name = 'Simulated %s' % (strand1Name)\n simulationsStrand2Name = 'Simulated %s' % (strand2Name)\n if ((rects1 is not None) and (rects2 is not None) and (rects3 is not None) and (rects4 is not None)):\n if ((len(rects1) > 0) and (len(rects2) > 0) and (len(rects3) > 0) and (len(rects4) > 0)):\n legend = ax.legend((rects1[0], rects2[0], rects3[0], rects4[0]),\n (realStrand1Name, realStrand2Name, simulationsStrand1Name, simulationsStrand2Name),prop={'size': 25}, ncol=1, loc='best')\n else:\n # Old way with no simulations\n ax.set_xticks(ind + width / 2)\n if ((rects1 is not None) and (rects2 is not None)):\n if ((len(rects1) > 0) and (len(rects2) > 0)):\n legend = ax.legend((rects1[0], rects2[0]), (strand1Name, strand2Name), prop={'size': 25}, ncol=1, loc='upper right')\n\n # To make the barplot background white\n ax.set_facecolor('white')\n # To makes spines black like a rectangle with black stroke\n ax.spines[\"bottom\"].set_color('black')\n ax.spines[\"left\"].set_color('black')\n ax.spines[\"top\"].set_color('black')\n ax.spines[\"right\"].set_color('black')\n\n if (legend is not None):\n frame = legend.get_frame()\n frame.set_facecolor('white')\n frame.set_edgecolor('black')\n\n # Add star above the bars for significant differences between the number of mutations on each strand starts\n # For each bar: Place a label\n if fdr_bh_adjusted_pvalues is not None:\n for fdr_bh_adjusted_pvalue, strand1_value, strand2_value, rect1, rect2 in zip(fdr_bh_adjusted_pvalues, strand1_values, strand2_values, rects1, rects2):\n # Get X and Y placement of label from rect.\n y_value = max(rect1.get_height(),rect2.get_height())\n x_value = rect1.get_x() + rect1.get_width()\n\n # Number of points between bar and label. Change to your liking.\n space = 3\n # Vertical alignment for positive values\n va = 'bottom'\n\n # If value of bar is negative: Place label below bar\n if y_value < 0:\n # Invert space to place label below\n space *= -1\n # Vertically align label at top\n va = 'top'\n\n # Use Y value as label and format number with one decimal place\n label = \"{:.1f}\".format(y_value)\n\n # Create annotation\n if ((fdr_bh_adjusted_pvalue is not None) and (fdr_bh_adjusted_pvalue <= 0.0001) and is_there_at_least_10perc_diff(strand1_value, strand2_value)):\n ax.annotate(\n '***', # Use `label` as label\n (x_value, y_value), # Place label at end of the bar\n xytext=(0, space), # Vertically shift label by `space`\n textcoords=\"offset points\", # Interpret `xytext` as offset in points\n ha='center', # Horizontally center label\n va=va,\n fontsize=25) # Vertically align label differently for\n\n elif ((fdr_bh_adjusted_pvalue is not None) and (fdr_bh_adjusted_pvalue <= 0.001) and is_there_at_least_10perc_diff(strand1_value, strand2_value)):\n ax.annotate(\n '**', # Use `label` as label\n (x_value, y_value), # Place label at end of the bar\n xytext=(0, space), # Vertically shift label by `space`\n textcoords=\"offset points\", # Interpret `xytext` as offset in points\n ha='center', # Horizontally center label\n va=va,\n fontsize=25) # Vertically align label differently for\n\n elif ((fdr_bh_adjusted_pvalue is not None) and (fdr_bh_adjusted_pvalue <= SIGNIFICANCE_LEVEL) and is_there_at_least_10perc_diff(strand1_value, strand2_value)) :\n ax.annotate(\n '*', # Use `label` as label\n (x_value, y_value), # Place label at end of the bar\n xytext=(0, space), # Vertically shift label by `space`\n textcoords=\"offset points\", # Interpret `xytext` as offset in points\n ha='center', # Horizontally center label\n va=va,\n fontsize=25) # Vertically align label differently for\n\n if axis_given == None:\n filename = '%s_%s_with_bars.png' %(signature,strand_bias)\n figFile = os.path.join(strandbias_figures_outputDir, filename)\n fig.savefig(figFile, dpi=100, bbox_inches=\"tight\")\n\n plt.cla()\n plt.close(fig)\n\n\n# June 2, 2021\ndef plot_bar_plot_in_given_axis(axis,\n sbs_signature,\n strand_bias,\n strands_list,\n signature_strand1_versus_strand2_df,\n y_axis_label = None):\n box = axis.get_position()\n axis.set_position([box.x0, box.y0 + 0.125, box.width * 1, box.height * 1], which='both')\n\n mutation_types = six_mutation_types\n numberofSimulations = 100\n width = 0.20\n\n if strand_bias == LAGGING_VERSUS_LEADING:\n strands = strands_list\n strand1 = \"Lagging_real_count\"\n strand2 = \"Leading_real_count\"\n strand1_sims = \"Lagging_mean_sims_count\"\n strand2_sims = \"Leading_mean_sims_count\"\n q_value_column_name = \"lagging_versus_leading_q_value\"\n color1 = 'indianred'\n color2 = 'goldenrod'\n elif strand_bias==TRANSCRIBED_VERSUS_UNTRANSCRIBED:\n strands = strands_list\n strand1 = \"Transcribed_real_count\"\n strand2 = \"UnTranscribed_real_count\"\n strand1_sims = \"Transcribed_mean_sims_count\"\n strand2_sims = \"UnTranscribed_mean_sims_count\"\n q_value_column_name = \"transcribed_versus_untranscribed_q_value\"\n color1 = 'royalblue'\n color2 = 'yellowgreen'\n elif strand_bias == GENIC_VERSUS_INTERGENIC:\n strands = strands_list\n strand1 = \"genic_real_count\"\n strand2 = \"intergenic_real_count\"\n strand1_sims = \"genic_mean_sims_count\"\n strand2_sims = \"intergenic_mean_sims_count\"\n q_value_column_name = \"genic_versus_intergenic_q_value\"\n color1 = 'cyan'\n color2 = 'gray'\n\n groupby_df = signature_strand1_versus_strand2_df.groupby(['signature'])\n group_df = groupby_df.get_group(sbs_signature)\n\n mutationtype_strand1_real_list = []\n mutationtype_strand2_real_list = []\n mutationtype_strand1_sims_mean_list = []\n mutationtype_strand2_sims_mean_list = []\n mutationtype_FDR_BH_adjusted_pvalues_list = []\n\n for mutation_type in six_mutation_types:\n strand1_real_count=group_df[group_df['mutation_type'] == mutation_type][strand1].values[0]\n strand2_real_count=group_df[group_df['mutation_type'] == mutation_type][strand2].values[0]\n strand1_sims_count=group_df[group_df['mutation_type'] == mutation_type][strand1_sims].values[0]\n strand2_sims_count=group_df[group_df['mutation_type'] == mutation_type][strand2_sims].values[0]\n q_value=group_df[group_df['mutation_type'] == mutation_type][q_value_column_name].values[0]\n mutationtype_strand1_real_list.append(strand1_real_count)\n mutationtype_strand2_real_list.append(strand2_real_count)\n mutationtype_strand1_sims_mean_list.append(strand1_sims_count)\n mutationtype_strand2_sims_mean_list.append(strand2_sims_count)\n mutationtype_FDR_BH_adjusted_pvalues_list.append(q_value)\n\n plot_strand_bias_figure_with_bar_plots(strand_bias,\n None,\n numberofSimulations,\n sbs_signature,\n len(mutation_types),\n mutation_types,\n y_axis_label,\n mutationtype_strand1_real_list,\n mutationtype_strand2_real_list,\n mutationtype_strand1_sims_mean_list,\n mutationtype_strand2_sims_mean_list,\n mutationtype_FDR_BH_adjusted_pvalues_list,\n strands[0],\n strands[1],\n color1,\n color2,\n width,\n axis_given = axis)\n\n\n# June 2, 2021\ndef plot_strand_bias_figure_with_stacked_bar_plots(strand_bias,\n strandbias_figures_outputDir,\n numberofSimulations,\n signature,\n N,\n x_axis_tick_labels,\n y_axis_label,\n strand1_values,\n strand2_values,\n strand1_simulations_median_values,\n strand2_simulations_median_values,\n fdr_bh_adjusted_pvalues,\n strand1Name,\n strand2Name,\n color1,\n color2,\n width,\n axis_given=None):\n\n # Replace np.nans with 0\n strand1_values = [0 if np.isnan(x) else x for x in strand1_values]\n strand2_values = [0 if np.isnan(x) else x for x in strand2_values]\n strand1_simulations_median_values = [0 if np.isnan(x) else x for x in strand1_simulations_median_values]\n strand2_simulations_median_values = [0 if np.isnan(x) else x for x in strand2_simulations_median_values]\n\n # Fill odds_ratio_list\n odds_real_list = []\n odds_sims_list = []\n for a, b in zip(strand1_values, strand2_values):\n odds_real = np.nan\n if b>0:\n odds_real = a/b\n odds_real_list.append(odds_real)\n\n for x, y in zip(strand1_simulations_median_values, strand2_simulations_median_values):\n odds_sims = np.nan\n if y > 0:\n odds_sims = x/y\n odds_sims_list.append(odds_sims)\n\n odds_ratio_list = [odds_real/odds_sims if odds_sims>0 else np.nan for (odds_real, odds_sims) in zip(odds_real_list,odds_sims_list)]\n\n # Here we can take into difference between strand1_values and strand2_values while deciding on significance\n # the x locations for the groups\n ind = np.arange(N)\n if axis_given == None:\n fig, ax = plt.subplots(figsize=(16,10),dpi=100)\n else:\n ax = axis_given\n\n legend=None\n\n rects1 = ax.bar(ind, strand1_values, width=width, edgecolor='black', color=color1)\n rects2 = ax.bar(ind, strand2_values, width=width, edgecolor='black', color=color2, bottom=strand1_values)\n\n if ((strand1_simulations_median_values is not None) and strand1_simulations_median_values):\n ax.bar(ind + width, strand1_simulations_median_values, width=width, edgecolor='black', color=color1, hatch = '///')\n if ((strand2_simulations_median_values is not None) and strand2_simulations_median_values):\n ax.bar(ind + width, strand2_simulations_median_values, width=width, edgecolor='black', color=color2, hatch = '///', bottom=strand1_simulations_median_values)\n\n # Add some text for labels, title and axes ticks\n ax.tick_params(axis='x', labelsize=35)\n ax.tick_params(axis='y', labelsize=35)\n\n ax.set_ylim(0, 1.1)\n ax.set_yticklabels([0.0, 0.2, 0.4, 0.6, 0.8, 1.0], fontsize=35)\n\n # To make the bar width not too wide\n if len(ind) < 6:\n maxn = 6\n ax.set_xlim(-0.5, maxn - 0.5)\n\n # Set title\n stacked_bar_title = 'Real vs. Simulated\\nOdds Ratio of %s vs. %s' %(strand1Name, strand2Name)\n ax.set_title(stacked_bar_title, fontsize=40, fontweight='bold')\n\n # Set x tick labels\n if len(x_axis_tick_labels) > 6:\n ax.set_xticklabels(x_axis_tick_labels, fontsize=35, rotation=90)\n else:\n ax.set_xticklabels(x_axis_tick_labels, fontsize=35)\n\n # Set the ylabel\n if y_axis_label:\n ax.set_ylabel(y_axis_label, fontsize=35, fontweight='normal', labelpad=15)\n\n # Set the x axis tick locations\n if (numberofSimulations > 0):\n ax.set_xticks(ind + (width/2))\n else:\n # Old way with no simulations\n ax.set_xticks(ind + width / 2)\n\n # To make the barplot background white\n ax.set_facecolor('white')\n # To makes spines black like a rectangle with black stroke\n ax.spines[\"bottom\"].set_color('black')\n ax.spines[\"left\"].set_color('black')\n ax.spines[\"top\"].set_color('black')\n ax.spines[\"right\"].set_color('black')\n\n if (legend is not None):\n frame = legend.get_frame()\n frame.set_facecolor('white')\n frame.set_edgecolor('black')\n\n # Add star above the bars for significant differences between the number of mutations on each strand starts\n # For each bar: Place a label\n if odds_ratio_list is not None:\n for odds_ratio, fdr_bh_adjusted_pvalue, strand1_value, strand2_value, rect1, rect2 in zip(odds_ratio_list, fdr_bh_adjusted_pvalues, strand1_values, strand2_values, rects1, rects2):\n # Get X and Y placement of label from rect.\n # y_value = max(rect1.get_height(),rect2.get_height())\n y_value = rect1.get_height() + rect2.get_height()\n x_value = rect1.get_x() + rect1.get_width()\n\n # Number of points between bar and label. Change to your liking.\n space = 3\n # Vertical alignment for positive values\n va = 'bottom'\n\n # If value of bar is negative: Place label below bar\n if y_value < 0:\n # Invert space to place label below\n space *= -1\n # Vertically align label at top\n va = 'top'\n\n # Use Y value as label and format number with one decimal place\n label = \"{:.1f}\".format(y_value)\n\n # Create annotation\n if not np.isnan(odds_ratio):\n if ((fdr_bh_adjusted_pvalue is not None) and (fdr_bh_adjusted_pvalue <= 0.0001) and is_there_at_least_10perc_diff(strand1_value, strand2_value)):\n ax.annotate(\n '%.2f ***' %(odds_ratio), # Use `label` as label\n (x_value, y_value), # Place label at end of the bar\n xytext=(0, space), # Vertically shift label by `space`\n textcoords=\"offset points\", # Interpret `xytext` as offset in points\n ha='center', # Horizontally center label\n va=va,\n fontsize=25) # Vertically align label differently for\n\n elif ((fdr_bh_adjusted_pvalue is not None) and (fdr_bh_adjusted_pvalue <= 0.001) and is_there_at_least_10perc_diff(strand1_value, strand2_value)):\n ax.annotate(\n '%.2f **' %(odds_ratio), # Use `label` as label\n (x_value, y_value), # Place label at end of the bar\n xytext=(0, space), # Vertically shift label by `space`\n textcoords=\"offset points\", # Interpret `xytext` as offset in points\n ha='center', # Horizontally center label\n va=va,\n fontsize=25) # Vertically align label differently for\n\n elif ((fdr_bh_adjusted_pvalue is not None) and (fdr_bh_adjusted_pvalue <= SIGNIFICANCE_LEVEL) and is_there_at_least_10perc_diff(strand1_value, strand2_value)):\n ax.annotate(\n '%.2f *' %(odds_ratio), # Use `label` as label\n (x_value, y_value), # Place label at end of the bar\n xytext=(0, space), # Vertically shift label by `space`\n textcoords=\"offset points\", # Interpret `xytext` as offset in points\n ha='center', # Horizontally center label\n va=va,\n fontsize=25) # Vertically align label differently for\n else:\n ax.annotate(\n '%.2f' %(odds_ratio), # Use `label` as label\n (x_value, y_value), # Place label at end of the bar\n xytext=(0, space), # Vertically shift label by `space`\n textcoords=\"offset points\", # Interpret `xytext` as offset in points\n ha='center', # Horizontally center label\n va=va,\n fontsize=25) # Vertically align label differently for\n\n if axis_given==None:\n filename = '%s_%s_with_bars.png' %(signature,strand_bias)\n figFile = os.path.join(strandbias_figures_outputDir, filename)\n fig.savefig(figFile, dpi=100, bbox_inches=\"tight\")\n\n plt.cla()\n plt.close(fig)\n\n\n# June 2, 2021\ndef plot_stacked_bar_plot_in_given_axis(axis,\n sbs_signature,\n strand_bias,\n strands_list,\n signature_strand1_versus_strand2_df,\n y_axis_label = None):\n box = axis.get_position()\n axis.set_position([box.x0, box.y0+0.125, box.width * 1, box.height * 1], which='both')\n\n mutation_types = six_mutation_types\n numberofSimulations = 100\n width = 0.20\n\n if strand_bias == LAGGING_VERSUS_LEADING:\n strands = strands_list\n strand1 = \"Lagging_real_count\"\n strand2 = \"Leading_real_count\"\n strand1_sims = \"Lagging_mean_sims_count\"\n strand2_sims = \"Leading_mean_sims_count\"\n q_value_column_name = \"lagging_versus_leading_q_value\"\n color1 = 'indianred'\n color2 = 'goldenrod'\n elif strand_bias == TRANSCRIBED_VERSUS_UNTRANSCRIBED:\n strands = strands_list\n strand1 = \"Transcribed_real_count\"\n strand2 = \"UnTranscribed_real_count\"\n strand1_sims = \"Transcribed_mean_sims_count\"\n strand2_sims = \"UnTranscribed_mean_sims_count\"\n q_value_column_name = \"transcribed_versus_untranscribed_q_value\"\n color1 = 'royalblue'\n color2 = 'yellowgreen'\n elif strand_bias == GENIC_VERSUS_INTERGENIC:\n strands = strands_list\n strand1 = \"genic_real_count\"\n strand2 = \"intergenic_real_count\"\n strand1_sims = \"genic_mean_sims_count\"\n strand2_sims = \"intergenic_mean_sims_count\"\n q_value_column_name = \"genic_versus_intergenic_q_value\"\n color1 = 'cyan'\n color2 = 'gray'\n\n groupby_df = signature_strand1_versus_strand2_df.groupby(['signature'])\n group_df = groupby_df.get_group(sbs_signature)\n\n mutationtype_strand1_real_list = []\n mutationtype_strand2_real_list = []\n mutationtype_strand1_sims_mean_list = []\n mutationtype_strand2_sims_mean_list = []\n mutationtype_FDR_BH_adjusted_pvalues_list = []\n\n for mutation_type in six_mutation_types:\n strand1_real_count=group_df[group_df['mutation_type'] == mutation_type][strand1].values[0]\n strand2_real_count=group_df[group_df['mutation_type'] == mutation_type][strand2].values[0]\n strand1_sims_count=group_df[group_df['mutation_type'] == mutation_type][strand1_sims].values[0]\n strand2_sims_count=group_df[group_df['mutation_type'] == mutation_type][strand2_sims].values[0]\n q_value=group_df[group_df['mutation_type'] == mutation_type][q_value_column_name].values[0]\n mutationtype_FDR_BH_adjusted_pvalues_list.append(q_value)\n\n if (strand1_real_count >= NUMBER_OF_REQUIRED_MUTATIONS_FOR_STRAND_BIAS_BAR_PLOT) or (strand2_real_count >= NUMBER_OF_REQUIRED_MUTATIONS_FOR_STRAND_BIAS_BAR_PLOT):\n mutationtype_strand1_real_list.append(strand1_real_count/(strand1_real_count+strand2_real_count))\n mutationtype_strand2_real_list.append(strand2_real_count/(strand1_real_count+strand2_real_count))\n else:\n mutationtype_strand1_real_list.append(np.nan)\n mutationtype_strand2_real_list.append(np.nan)\n\n if (strand1_sims_count >= NUMBER_OF_REQUIRED_MUTATIONS_FOR_STRAND_BIAS_BAR_PLOT) or (strand2_sims_count >= NUMBER_OF_REQUIRED_MUTATIONS_FOR_STRAND_BIAS_BAR_PLOT):\n mutationtype_strand1_sims_mean_list.append(strand1_sims_count/(strand1_sims_count+strand2_sims_count))\n mutationtype_strand2_sims_mean_list.append(strand2_sims_count/(strand1_sims_count+strand2_sims_count))\n else:\n mutationtype_strand1_sims_mean_list.append(np.nan)\n mutationtype_strand2_sims_mean_list.append(np.nan)\n\n plot_strand_bias_figure_with_stacked_bar_plots(strand_bias,\n None,\n numberofSimulations,\n sbs_signature,\n len(mutation_types),\n mutation_types,\n y_axis_label,\n mutationtype_strand1_real_list,\n mutationtype_strand2_real_list,\n mutationtype_strand1_sims_mean_list,\n mutationtype_strand2_sims_mean_list,\n mutationtype_FDR_BH_adjusted_pvalues_list,\n strands[0],\n strands[1],\n color1,\n color2,\n width,\n axis_given=axis)\n\n\n\ndef plot_circle_bar_plots_together(outputDir,\n jobname,\n sbs_signature,\n six_mutation_types,\n signature2mutation_type2strand2percentagedict,\n signature_genic_versus_intergenic_df,\n signature_transcribed_versus_untranscribed_df,\n signature_lagging_versus_leading_df,\n genic_vs_intergenic_strands,\n transcription_strands,\n replication_strands):\n\n x_ticklabels_list = percentage_strings * 6\n fig = plt.figure(figsize=(5 + 1.5 * len(x_ticklabels_list), 30 + 1.5))\n plt.rc('axes', edgecolor='lightgray')\n\n width = 6\n height = 6\n width_ratios = [1] * width\n height_ratios = [1] * height\n gs = gridspec.GridSpec(height, width, height_ratios = height_ratios, width_ratios = width_ratios)\n fig.subplots_adjust(hspace=0, wspace=3)\n\n cirle_plot_axis = plt.subplot(gs[0:2, :])\n\n genic_vs_intergenic_bar_plot_axis = plt.subplot(gs[2:4, 0:2])\n transcribed_vs_untranscribed_bar_plot_axis = plt.subplot(gs[2:4, 2:4])\n lagging_vs_leading_bar_plot_axis = plt.subplot(gs[2:4, 4:6])\n\n genic_vs_intergenic_stacked_bar_plot_axis = plt.subplot(gs[4:, 0:2])\n transcribed_vs_untranscribed_stacked_bar_plot_axis = plt.subplot(gs[4:, 2:4])\n lagging_vs_leading_stacked_bar_plot_axis = plt.subplot(gs[4:, 4:6])\n\n # Circle plot with legends\n plot_circle_plot_in_given_axis(cirle_plot_axis,\n percentage_strings,\n sbs_signature,\n six_mutation_types,\n x_ticklabels_list,\n signature2mutation_type2strand2percentagedict)\n\n # 3 Bar plots side by side\n plot_bar_plot_in_given_axis(genic_vs_intergenic_bar_plot_axis,\n sbs_signature,\n GENIC_VERSUS_INTERGENIC,\n genic_vs_intergenic_strands,\n signature_genic_versus_intergenic_df,\n y_axis_label = 'Number of Single Base Substitutions')\n\n plot_bar_plot_in_given_axis(transcribed_vs_untranscribed_bar_plot_axis,\n sbs_signature,\n TRANSCRIBED_VERSUS_UNTRANSCRIBED,\n transcription_strands,\n signature_transcribed_versus_untranscribed_df)\n\n plot_bar_plot_in_given_axis(lagging_vs_leading_bar_plot_axis,\n sbs_signature,\n LAGGING_VERSUS_LEADING,\n replication_strands,\n signature_lagging_versus_leading_df)\n\n # 3 Stacked Bar plots side by side\n plot_stacked_bar_plot_in_given_axis(genic_vs_intergenic_stacked_bar_plot_axis,\n sbs_signature,\n GENIC_VERSUS_INTERGENIC,\n genic_vs_intergenic_strands,\n signature_genic_versus_intergenic_df,\n y_axis_label = 'Ratio of mutations on each strand')\n\n plot_stacked_bar_plot_in_given_axis(transcribed_vs_untranscribed_stacked_bar_plot_axis,\n sbs_signature,\n TRANSCRIBED_VERSUS_UNTRANSCRIBED,\n transcription_strands,\n signature_transcribed_versus_untranscribed_df)\n\n plot_stacked_bar_plot_in_given_axis(lagging_vs_leading_stacked_bar_plot_axis,\n sbs_signature,\n LAGGING_VERSUS_LEADING,\n replication_strands,\n signature_lagging_versus_leading_df)\n\n # filename = '%s_circle_bar_plot_together_%s.png' % (sbs_signature, str(significance_level).replace('.', '_'))\n filename = '%s_circle_bar_plots.png' % (sbs_signature)\n figurepath = os.path.join(outputDir, jobname, FIGURE, STRANDBIAS, CIRCLE_BAR_PLOTS, filename)\n fig.savefig(figurepath, dpi=100, bbox_inches=\"tight\")\n\n plt.cla()\n plt.close(fig)\n\n\n# Key can be signature or sample\ndef plotBarPlotsUsingDataframes(outputDir,\n jobname,\n numberofSimulations,\n signature_cutoff_numberofmutations_averageprobability_df,\n isKeySample,\n existingMutationTypesList,\n signature_strand1_versus_strand2_df,\n width,\n strand1_versus_strand2,\n strands,\n color1,\n color2,\n title,\n figureName,\n plot_mode):\n\n # signature_strand1_versus_strand2_df column names here\n # ['cancer_type', 'signature', 'mutation_type',\n # 'Transcribed_real_count', 'UnTranscribed_real_count', 'Transcribed_mean_sims_count', 'UnTranscribed_mean_sims_count',\n # 'transcribed_versus_untranscribed_p_value', 'transcribed_versus_untranscribed_q_value',\n # 'Transcribed_real_count.1', 'Transcribed_mean_sims_count.1', 'Transcribed_min_sims_count', 'Transcribed_max_sims_count', 'Transcribed_sims_count_list',\n # 'UnTranscribed_real_count.1', 'UnTranscribed_mean_sims_count.1', 'UnTranscribed_min_sims_count', 'UnTranscribed_max_sims_count', 'UnTranscribed_sims_count_list']\n\n signatures = signature_strand1_versus_strand2_df['signature'].unique()\n\n x_axis_labels = existingMutationTypesList\n N = len(x_axis_labels)\n\n for signature in signatures:\n\n numberofMutations = int(signature_cutoff_numberofmutations_averageprobability_df[signature_cutoff_numberofmutations_averageprobability_df['signature'] == signature]['number_of_mutations'].values[0])\n\n mutationtype_strand1_real_list=[]\n mutationtype_strand2_real_list=[]\n mutationtype_strand1_sims_mean_list=[]\n mutationtype_strand2_sims_mean_list=[]\n mutationtype_FDR_BH_adjusted_pvalues_list=[]\n\n for mutation_type in existingMutationTypesList:\n if (strand1_versus_strand2==TRANSCRIBED_VERSUS_UNTRANSCRIBED):\n strand1_real_count_column_name=TRANSCRIBED_REAL_COUNT\n strand1_sims_mean_count_Column_name=TRANSCRIBED_SIMULATIONS_MEAN_COUNT\n strand2_real_count_column_name=UNTRANSCRIBED_REAL_COUNT\n strand2_sims_mean_count_Column_name=UNTRANSCRIBED_SIMULATIONS_MEAN_COUNT\n q_value_column_name = TRANSCRIBED_VERSUS_UNTRANSCRIBED_Q_VALUE\n elif (strand1_versus_strand2 == GENIC_VERSUS_INTERGENIC):\n strand1_real_count_column_name=GENIC_REAL_COUNT\n strand1_sims_mean_count_Column_name=GENIC_SIMULATIONS_MEAN_COUNT\n strand2_real_count_column_name=INTERGENIC_REAL_COUNT\n strand2_sims_mean_count_Column_name=INTERGENIC_SIMULATIONS_MEAN_COUNT\n q_value_column_name = GENIC_VERSUS_INTERGENIC_Q_VALUE\n elif (strand1_versus_strand2 == LAGGING_VERSUS_LEADING):\n strand1_real_count_column_name=LAGGING_REAL_COUNT\n strand1_sims_mean_count_Column_name=LAGGING_SIMULATIONS_MEAN_COUNT\n strand2_real_count_column_name=LEADING_REAL_COUNT\n strand2_sims_mean_count_Column_name=LEADING_SIMULATIONS_MEAN_COUNT\n q_value_column_name = LAGGING_VERSUS_LEADING_Q_VALUE\n\n strand1_real_count = 0\n strand1_sims_mean_count = 0\n strand2_real_count = 0\n strand2_sims_mean_count = 0\n q_value = None\n\n if (signature_strand1_versus_strand2_df[(signature_strand1_versus_strand2_df['signature']==signature) & (signature_strand1_versus_strand2_df['mutation_type']==mutation_type)][strand1_real_count_column_name].values.size>0):\n strand1_real_count=signature_strand1_versus_strand2_df[(signature_strand1_versus_strand2_df['signature']==signature) & (signature_strand1_versus_strand2_df['mutation_type']==mutation_type)][strand1_real_count_column_name].values[0]\n\n if (signature_strand1_versus_strand2_df[(signature_strand1_versus_strand2_df['signature'] == signature) & (signature_strand1_versus_strand2_df['mutation_type'] == mutation_type)][strand1_sims_mean_count_Column_name].values.size>0):\n strand1_sims_mean_count = signature_strand1_versus_strand2_df[(signature_strand1_versus_strand2_df['signature'] == signature) & (signature_strand1_versus_strand2_df['mutation_type'] == mutation_type)][strand1_sims_mean_count_Column_name].values[0]\n\n if (signature_strand1_versus_strand2_df[(signature_strand1_versus_strand2_df['signature']==signature) & (signature_strand1_versus_strand2_df['mutation_type']==mutation_type)][strand2_real_count_column_name].values.size>0):\n strand2_real_count=signature_strand1_versus_strand2_df[(signature_strand1_versus_strand2_df['signature']==signature) & (signature_strand1_versus_strand2_df['mutation_type']==mutation_type)][strand2_real_count_column_name].values[0]\n\n if (signature_strand1_versus_strand2_df[(signature_strand1_versus_strand2_df['signature'] == signature) & (signature_strand1_versus_strand2_df['mutation_type'] == mutation_type)][strand2_sims_mean_count_Column_name].values.size>0):\n strand2_sims_mean_count = signature_strand1_versus_strand2_df[(signature_strand1_versus_strand2_df['signature'] == signature) & (signature_strand1_versus_strand2_df['mutation_type'] == mutation_type)][strand2_sims_mean_count_Column_name].values[0]\n\n if (signature_strand1_versus_strand2_df[(signature_strand1_versus_strand2_df['signature'] == signature) & (signature_strand1_versus_strand2_df['mutation_type'] == mutation_type)][q_value_column_name].values.size>0):\n q_value = signature_strand1_versus_strand2_df[(signature_strand1_versus_strand2_df['signature'] == signature) & (signature_strand1_versus_strand2_df['mutation_type'] == mutation_type)][q_value_column_name].values[0]\n\n mutationtype_strand1_real_list.append(strand1_real_count)\n mutationtype_strand1_sims_mean_list.append(strand1_sims_mean_count)\n mutationtype_strand2_real_list.append(strand2_real_count)\n mutationtype_strand2_sims_mean_list.append(strand2_sims_mean_count)\n mutationtype_FDR_BH_adjusted_pvalues_list.append(q_value)\n\n plotStrandBiasFigureWithBarPlots(outputDir,\n jobname,\n numberofSimulations,\n signature,\n isKeySample,\n numberofMutations,\n N,\n x_axis_labels,\n mutationtype_strand1_real_list,\n mutationtype_strand2_real_list,\n mutationtype_strand1_sims_mean_list,\n mutationtype_strand2_sims_mean_list,\n mutationtype_FDR_BH_adjusted_pvalues_list,\n strands[0],\n strands[1],\n title,\n color1,\n color2,\n figureName,\n width,\n plot_mode)\n\n\n\n\n###################################################################\n# April 20, 2020\n# July 4, 2020 starts\n# Using dataframes\ndef transcriptionReplicationStrandBiasFiguresUsingDataframes(outputDir, jobname, numberofSimulations, mutation_types_contexts, strand_bias_list, is_discreet, plot_mode):\n\n # Initialize these dataframes as empty dataframe\n # We will read these dataframes if there is the corresponding data\n subsSignature_cutoff_numberofmutations_averageprobability_df = pd.DataFrame()\n dinucsSignature_cutoff_numberofmutations_averageprobability_df = pd.DataFrame()\n indelsSignature_cutoff_numberofmutations_averageprobability_df = pd.DataFrame()\n\n sbs_df = pd.DataFrame()\n dbs_df = pd.DataFrame()\n id_df = pd.DataFrame()\n\n subsSignatures = np.array([])\n dinucsSignatures = np.array([])\n indelsSignatures = np.array([])\n\n os.makedirs(os.path.join(outputDir, jobname, FIGURE, STRANDBIAS,SCATTER_PLOTS), exist_ok=True)\n os.makedirs(os.path.join(outputDir, jobname, FIGURE, STRANDBIAS,BAR_PLOTS), exist_ok=True)\n os.makedirs(os.path.join(outputDir, jobname, FIGURE, STRANDBIAS,CIRCLE_PLOTS), exist_ok=True)\n os.makedirs(os.path.join(outputDir, jobname, FIGURE, STRANDBIAS,CIRCLE_BAR_PLOTS), exist_ok=True)\n os.makedirs(os.path.join(outputDir, jobname, FIGURE, STRANDBIAS,TABLES), exist_ok=True)\n os.makedirs(os.path.join(outputDir, jobname, FIGURE, STRANDBIAS,EXCEL_FILES), exist_ok=True)\n\n strandbias_figures_outputDir = os.path.join(outputDir, jobname, FIGURE, STRANDBIAS)\n strandbias_figures_tables_outputDir = os.path.join(outputDir, jobname, FIGURE, STRANDBIAS, TABLES)\n strandbias_figures_excel_files_outputDir = os.path.join(outputDir, jobname, FIGURE, STRANDBIAS, EXCEL_FILES)\n\n\n ##########################################################################################\n ######################### Read dictionaries related with ################################\n ######################### signatures and samples starts ################################\n ##########################################################################################\n for mutation_type_context in mutation_types_contexts:\n if (mutation_type_context in SBS_CONTEXTS):\n subsSignature_cutoff_numberofmutations_averageprobability_df = pd.read_csv(os.path.join(outputDir, jobname, DATA, Table_SBS_Signature_Discreet_Mode_Cutoff_NumberofMutations_AverageProbability_Filename),sep='\\t', header=0,dtype={'cutoff': np.float32,'signature': str,'number_of_mutations': np.int32,'average_probability': np.float32})\n subsSignatures = subsSignature_cutoff_numberofmutations_averageprobability_df['signature'].unique()\n if (DBS in mutation_types_contexts):\n dinucsSignature_cutoff_numberofmutations_averageprobability_df = pd.read_csv(os.path.join(outputDir, jobname, DATA, Table_DBS_Signature_Discreet_Mode_Cutoff_NumberofMutations_AverageProbability_Filename),sep='\\t', header=0,dtype={'cutoff': np.float32,'signature': str,'number_of_mutations': np.int32,'average_probability': np.float32})\n dinucsSignatures = dinucsSignature_cutoff_numberofmutations_averageprobability_df['signature'].unique()\n if (ID in mutation_types_contexts):\n indelsSignature_cutoff_numberofmutations_averageprobability_df = pd.read_csv(os.path.join(outputDir, jobname, DATA, Table_ID_Signature_Discreet_Mode_Cutoff_NumberofMutations_AverageProbability_Filename),sep='\\t', header=0,dtype={'cutoff': np.float32,'signature': str,'number_of_mutations': np.int32,'average_probability': np.float32})\n indelsSignatures = indelsSignature_cutoff_numberofmutations_averageprobability_df['signature'].unique()\n ##########################################################################################\n ######################### Read dictionaries related with ################################\n ######################### signatures and samples ends ##################################\n ##########################################################################################\n\n if is_discreet:\n sbs_df = subsSignature_cutoff_numberofmutations_averageprobability_df\n dbs_df = dinucsSignature_cutoff_numberofmutations_averageprobability_df\n id_df = indelsSignature_cutoff_numberofmutations_averageprobability_df\n else:\n if os.path.exists(os.path.join(outputDir, jobname, DATA, Table_SBS_Signature_Probability_Mode_NumberofMutations_AverageProbability_Filename)):\n sbs_df = pd.read_csv(os.path.join(outputDir, jobname, DATA, Table_SBS_Signature_Probability_Mode_NumberofMutations_AverageProbability_Filename), sep='\\t', header=0, dtype={'signature': str,'number_of_mutations': np.int32,'average_probability': np.float32})\n subsSignatures = sbs_df['signature'].unique()\n if os.path.exists(os.path.join(outputDir, jobname, DATA, Table_DBS_Signature_Probability_Mode_NumberofMutations_AverageProbability_Filename)):\n dbs_df = pd.read_csv(os.path.join(outputDir, jobname, DATA, Table_DBS_Signature_Probability_Mode_NumberofMutations_AverageProbability_Filename), sep='\\t', header=0, dtype={'signature': str,'number_of_mutations': np.int32,'average_probability': np.float32})\n dinucsSignatures = dbs_df['signature'].unique()\n if os.path.exists(os.path.join(outputDir, jobname, DATA, Table_ID_Signature_Probability_Mode_NumberofMutations_AverageProbability_Filename)):\n id_df = pd.read_csv(os.path.join(outputDir, jobname, DATA, Table_ID_Signature_Probability_Mode_NumberofMutations_AverageProbability_Filename), sep='\\t', header=0, dtype={'signature': str,'number_of_mutations': np.int32,'average_probability': np.float32})\n indelsSignatures = id_df['signature'].unique()\n\n\n #######################################################################\n # Step1 Read p_value\n if LAGGING_VERSUS_LEADING in strand_bias_list:\n # Replication Strand Bias\n signature_mutation_type_lagging_versus_leading_table_file_name = 'Signature_Mutation_Type_%s_Strand_Table.txt' % (LAGGING_VERSUS_LEADING)\n signature_mutation_type_lagging_versus_leading_table_filepath = os.path.join(outputDir, jobname, DATA, REPLICATIONSTRANDBIAS,signature_mutation_type_lagging_versus_leading_table_file_name)\n signature_lagging_versus_leading_df = pd.read_csv(signature_mutation_type_lagging_versus_leading_table_filepath, header=0, sep='\\t')\n\n type_lagging_versus_leading_table_file_name = 'Type_%s_Strand_Table.txt' % (LAGGING_VERSUS_LEADING)\n type_lagging_versus_leading_table_filepath = os.path.join(outputDir, jobname, DATA, REPLICATIONSTRANDBIAS,type_lagging_versus_leading_table_file_name)\n type_lagging_versus_leading_df = pd.read_csv(type_lagging_versus_leading_table_filepath, header=0, sep='\\t')\n\n if TRANSCRIBED_VERSUS_UNTRANSCRIBED in strand_bias_list:\n # Transcription Strand Bias\n signature_mutation_type_transcribed_versus_untranscribed_table_file_name = 'Signature_Mutation_Type_%s_Strand_Table.txt' % (TRANSCRIBED_VERSUS_UNTRANSCRIBED)\n signature_mutation_type_transcribed_versus_untranscribed_table_filepath = os.path.join(outputDir, jobname, DATA, TRANSCRIPTIONSTRANDBIAS, signature_mutation_type_transcribed_versus_untranscribed_table_file_name)\n signature_transcribed_versus_untranscribed_df = pd.read_csv(signature_mutation_type_transcribed_versus_untranscribed_table_filepath, header=0, sep='\\t')\n\n type_transcribed_versus_untranscribed_table_file_name = 'Type_%s_Strand_Table.txt' % (TRANSCRIBED_VERSUS_UNTRANSCRIBED)\n type_transcribed_versus_untranscribed_table_filepath = os.path.join(outputDir, jobname, DATA, TRANSCRIPTIONSTRANDBIAS, type_transcribed_versus_untranscribed_table_file_name)\n type_transcribed_versus_untranscribed_df = pd.read_csv(type_transcribed_versus_untranscribed_table_filepath, header=0, sep='\\t')\n\n if GENIC_VERSUS_INTERGENIC in strand_bias_list:\n # Transcription Strand Bias\n signature_mutation_type_genic_versus_intergenic_table_file_name = 'Signature_Mutation_Type_%s_Strand_Table.txt' % (GENIC_VERSUS_INTERGENIC)\n signature_mutation_type_genic_versus_intergenic_table_filepath = os.path.join(outputDir, jobname, DATA, TRANSCRIPTIONSTRANDBIAS, signature_mutation_type_genic_versus_intergenic_table_file_name)\n signature_genic_versus_intergenic_df = pd.read_csv(signature_mutation_type_genic_versus_intergenic_table_filepath, header=0, sep='\\t')\n\n type_genic_versus_intergenic_table_file_name = 'Type_%s_Strand_Table.txt' % (GENIC_VERSUS_INTERGENIC)\n type_genic_versus_intergenic_table_filepath = os.path.join(outputDir, jobname, DATA, TRANSCRIPTIONSTRANDBIAS, type_genic_versus_intergenic_table_file_name)\n type_genic_versus_intergenic_df = pd.read_csv(type_genic_versus_intergenic_table_filepath, header=0, sep='\\t')\n #######################################################################\n\n #######################################################################\n # Step2 Compute q_value\n p_values_list=[]\n element_names=[]\n\n # Fill p_values_list\n if LAGGING_VERSUS_LEADING in strand_bias_list:\n for index, row in signature_lagging_versus_leading_df.iterrows():\n element_name = (row[CANCER_TYPE], row[SIGNATURE], row[MUTATION_TYPE], LAGGING_VERSUS_LEADING)\n element_names.append(element_name)\n p_values_list.append(row[LAGGING_VERSUS_LEADING_P_VALUE])\n\n for index, row in type_lagging_versus_leading_df.iterrows():\n element_name=(row[CANCER_TYPE], None, row[TYPE], LAGGING_VERSUS_LEADING)\n element_names.append(element_name)\n p_values_list.append(row[LAGGING_VERSUS_LEADING_P_VALUE])\n\n if TRANSCRIBED_VERSUS_UNTRANSCRIBED in strand_bias_list:\n for index, row in signature_transcribed_versus_untranscribed_df.iterrows():\n element_name=(row[CANCER_TYPE], row[SIGNATURE], row[MUTATION_TYPE], TRANSCRIBED_VERSUS_UNTRANSCRIBED)\n element_names.append(element_name)\n p_values_list.append(row[TRANSCRIBED_VERSUS_UNTRANSCRIBED_P_VALUE])\n\n for index, row in type_transcribed_versus_untranscribed_df.iterrows():\n element_name=(row[CANCER_TYPE], None, row[TYPE], TRANSCRIBED_VERSUS_UNTRANSCRIBED)\n element_names.append(element_name)\n p_values_list.append(row[TRANSCRIBED_VERSUS_UNTRANSCRIBED_P_VALUE])\n\n if GENIC_VERSUS_INTERGENIC in strand_bias_list:\n for index, row in signature_genic_versus_intergenic_df.iterrows():\n element_name = (row[CANCER_TYPE], row[SIGNATURE], row[MUTATION_TYPE], GENIC_VERSUS_INTERGENIC)\n element_names.append(element_name)\n p_values_list.append(row[GENIC_VERSUS_INTERGENIC_P_VALUE])\n\n for index, row in type_genic_versus_intergenic_df.iterrows():\n element_name=(row[CANCER_TYPE], None, row[TYPE], GENIC_VERSUS_INTERGENIC)\n element_names.append(element_name)\n p_values_list.append(row[GENIC_VERSUS_INTERGENIC_P_VALUE])\n\n # print('len(p_values_list): %d' %(len(p_values_list)))\n #######################################################################\n\n #######################################################################\n if ((p_values_list is not None) and p_values_list):\n rejected, all_FDR_BH_adjusted_p_values, alphacSidak, alphacBonf = statsmodels.stats.multitest.multipletests(p_values_list, alpha=0.05, method='fdr_bh', is_sorted=False, returnsorted=False)\n\n # Add None q_values\n if LAGGING_VERSUS_LEADING in strand_bias_list:\n signature_lagging_versus_leading_df[LAGGING_VERSUS_LEADING_Q_VALUE] = np.nan\n type_lagging_versus_leading_df[LAGGING_VERSUS_LEADING_Q_VALUE] = np.nan\n\n if TRANSCRIBED_VERSUS_UNTRANSCRIBED in strand_bias_list:\n signature_transcribed_versus_untranscribed_df[TRANSCRIBED_VERSUS_UNTRANSCRIBED_Q_VALUE] = np.nan\n type_transcribed_versus_untranscribed_df[TRANSCRIBED_VERSUS_UNTRANSCRIBED_Q_VALUE]= np.nan\n\n if GENIC_VERSUS_INTERGENIC in strand_bias_list:\n signature_genic_versus_intergenic_df[GENIC_VERSUS_INTERGENIC_Q_VALUE]= np.nan\n type_genic_versus_intergenic_df[GENIC_VERSUS_INTERGENIC_Q_VALUE]= np.nan\n\n # Update q_value\n for element_index, element_name in enumerate(element_names,0):\n (cancer_type, signature, mutation_type, versus_type)=element_name\n q_value=all_FDR_BH_adjusted_p_values[element_index]\n\n if (signature is not None) and (versus_type == TRANSCRIBED_VERSUS_UNTRANSCRIBED):\n signature_transcribed_versus_untranscribed_df.loc[(signature_transcribed_versus_untranscribed_df[CANCER_TYPE]==cancer_type) &\n (signature_transcribed_versus_untranscribed_df[SIGNATURE]==signature) &\n (signature_transcribed_versus_untranscribed_df[MUTATION_TYPE]==mutation_type),TRANSCRIBED_VERSUS_UNTRANSCRIBED_Q_VALUE]=q_value\n\n elif (signature is not None) and (versus_type == GENIC_VERSUS_INTERGENIC):\n signature_genic_versus_intergenic_df.loc[(signature_genic_versus_intergenic_df[CANCER_TYPE]==cancer_type) &\n (signature_genic_versus_intergenic_df[SIGNATURE]==signature) &\n (signature_genic_versus_intergenic_df[MUTATION_TYPE]==mutation_type),GENIC_VERSUS_INTERGENIC_Q_VALUE]=q_value\n\n elif (signature is not None) and (versus_type==LAGGING_VERSUS_LEADING):\n signature_lagging_versus_leading_df.loc[(signature_lagging_versus_leading_df[CANCER_TYPE]==cancer_type) &\n (signature_lagging_versus_leading_df[SIGNATURE]==signature) &\n (signature_lagging_versus_leading_df[MUTATION_TYPE]==mutation_type),LAGGING_VERSUS_LEADING_Q_VALUE]=q_value\n\n\n elif (signature is None) and (versus_type == TRANSCRIBED_VERSUS_UNTRANSCRIBED):\n type_transcribed_versus_untranscribed_df.loc[(type_transcribed_versus_untranscribed_df[CANCER_TYPE] == cancer_type) & (type_transcribed_versus_untranscribed_df[TYPE] == mutation_type),TRANSCRIBED_VERSUS_UNTRANSCRIBED_Q_VALUE] = q_value\n elif (signature is None) and (versus_type == GENIC_VERSUS_INTERGENIC):\n type_genic_versus_intergenic_df.loc[(type_genic_versus_intergenic_df[CANCER_TYPE] == cancer_type) & (type_genic_versus_intergenic_df[TYPE] == mutation_type),GENIC_VERSUS_INTERGENIC_Q_VALUE] = q_value\n elif (signature is None) and (versus_type == LAGGING_VERSUS_LEADING):\n type_lagging_versus_leading_df.loc[(type_lagging_versus_leading_df[CANCER_TYPE] == cancer_type) & (type_lagging_versus_leading_df[TYPE] == mutation_type),LAGGING_VERSUS_LEADING_Q_VALUE] = q_value\n\n\n # Reorder columns\n # Write dataframes\n if LAGGING_VERSUS_LEADING in strand_bias_list:\n signature_lagging_versus_leading_df = signature_lagging_versus_leading_df[\n ['cancer_type', 'signature', 'mutation_type',\n 'Lagging_real_count', 'Leading_real_count', 'Lagging_mean_sims_count', 'Leading_mean_sims_count',\n 'lagging_versus_leading_p_value', 'lagging_versus_leading_q_value',\n 'Lagging_real_count.1', 'Lagging_mean_sims_count.1', 'Lagging_min_sims_count',\n 'Lagging_max_sims_count', 'Lagging_sims_count_list',\n 'Leading_real_count.1', 'Leading_mean_sims_count.1', 'Leading_min_sims_count',\n 'Leading_max_sims_count', 'Leading_sims_count_list']]\n\n type_lagging_versus_leading_df=type_lagging_versus_leading_df[['cancer_type', 'type',\n 'Lagging_real_count', 'Leading_real_count', 'Lagging_mean_sims_count', 'Leading_mean_sims_count', 'lagging_versus_leading_p_value', 'lagging_versus_leading_q_value',\n 'Lagging_real_count.1', 'Lagging_mean_sims_count.1', 'Lagging_min_sims_count', 'Lagging_max_sims_count', 'Lagging_sims_count_list',\n 'Leading_real_count.1', 'Leading_mean_sims_count.1', 'Leading_min_sims_count', 'Leading_max_sims_count', 'Leading_sims_count_list' ]]\n\n signature_filename = 'Signature_Mutation_Type_%s_Q_Value_Table.txt' % (LAGGING_VERSUS_LEADING)\n signature_filepath = os.path.join(strandbias_figures_tables_outputDir, signature_filename)\n signature_lagging_versus_leading_df.to_csv(signature_filepath, sep='\\t', header=True, index=False)\n\n type_filename = 'Type_%s_Q_Value_Table.txt' % (LAGGING_VERSUS_LEADING)\n type_filepath = os.path.join(strandbias_figures_tables_outputDir, type_filename)\n type_lagging_versus_leading_df.to_csv(type_filepath, sep='\\t', header=True, index=False)\n\n if TRANSCRIBED_VERSUS_UNTRANSCRIBED in strand_bias_list:\n signature_transcribed_versus_untranscribed_df=signature_transcribed_versus_untranscribed_df[['cancer_type', 'signature', 'mutation_type',\n 'Transcribed_real_count', 'UnTranscribed_real_count', 'NonTranscribed_real_count',\n 'Transcribed_mean_sims_count', 'UnTranscribed_mean_sims_count', 'NonTranscribed_mean_sims_count',\n 'transcribed_versus_untranscribed_p_value', 'transcribed_versus_untranscribed_q_value',\n 'Transcribed_real_count.1', 'Transcribed_mean_sims_count.1', 'Transcribed_min_sims_count', 'Transcribed_max_sims_count', 'Transcribed_sims_count_list',\n 'UnTranscribed_real_count.1', 'UnTranscribed_mean_sims_count.1', 'UnTranscribed_min_sims_count', 'UnTranscribed_max_sims_count', 'UnTranscribed_sims_count_list',\n 'NonTranscribed_real_count.1', 'NonTranscribed_mean_sims_count.1', 'NonTranscribed_min_sims_count', 'NonTranscribed_max_sims_count', 'NonTranscribed_sims_count_list']]\n\n type_transcribed_versus_untranscribed_df=type_transcribed_versus_untranscribed_df[['cancer_type', 'type',\n 'Transcribed_real_count', 'UnTranscribed_real_count', 'NonTranscribed_real_count',\n 'Transcribed_mean_sims_count', 'UnTranscribed_mean_sims_count', 'NonTranscribed_mean_sims_count',\n 'transcribed_versus_untranscribed_p_value', 'transcribed_versus_untranscribed_q_value',\n 'Transcribed_real_count.1', 'Transcribed_mean_sims_count.1', 'Transcribed_min_sims_count', 'Transcribed_max_sims_count', 'Transcribed_sims_count_list',\n 'UnTranscribed_real_count.1', 'UnTranscribed_mean_sims_count.1', 'UnTranscribed_min_sims_count', 'UnTranscribed_max_sims_count', 'UnTranscribed_sims_count_list',\n 'NonTranscribed_real_count.1', 'NonTranscribed_mean_sims_count.1', 'NonTranscribed_min_sims_count', 'NonTranscribed_max_sims_count', 'NonTranscribed_sims_count_list']]\n\n signature_filename = 'Signature_Mutation_Type_%s_Q_Value_Table.txt' % (TRANSCRIBED_VERSUS_UNTRANSCRIBED)\n signature_filepath = os.path.join(strandbias_figures_tables_outputDir, signature_filename)\n signature_transcribed_versus_untranscribed_df.to_csv(signature_filepath, sep='\\t', header=True, index=False)\n\n type_filename = 'Type_%s_Q_Value_Table.txt' % (TRANSCRIBED_VERSUS_UNTRANSCRIBED)\n type_filepath = os.path.join(strandbias_figures_tables_outputDir, type_filename)\n type_transcribed_versus_untranscribed_df.to_csv(type_filepath, sep='\\t', header=True, index=False)\n\n if GENIC_VERSUS_INTERGENIC in strand_bias_list:\n signature_genic_versus_intergenic_df=signature_genic_versus_intergenic_df[['cancer_type', 'signature', 'mutation_type',\n 'genic_real_count', 'intergenic_real_count', 'genic_mean_sims_count', 'intergenic_mean_sims_count', 'genic_versus_intergenic_p_value', 'genic_versus_intergenic_q_value',\n 'Transcribed_real_count', 'Transcribed_mean_sims_count', 'Transcribed_min_sims_count', 'Transcribed_max_sims_count', 'Transcribed_sims_count_list',\n 'UnTranscribed_real_count', 'UnTranscribed_mean_sims_count', 'UnTranscribed_min_sims_count', 'UnTranscribed_max_sims_count', 'UnTranscribed_sims_count_list',\n 'NonTranscribed_real_count', 'NonTranscribed_mean_sims_count', 'NonTranscribed_min_sims_count', 'NonTranscribed_max_sims_count', 'NonTranscribed_sims_count_list' ]]\n\n type_genic_versus_intergenic_df=type_genic_versus_intergenic_df[['cancer_type', 'type',\n 'genic_real_count', 'intergenic_real_count', 'genic_mean_sims_count', 'intergenic_mean_sims_count', 'genic_versus_intergenic_p_value', 'genic_versus_intergenic_q_value',\n 'Transcribed_real_count', 'Transcribed_mean_sims_count', 'Transcribed_min_sims_count', 'Transcribed_max_sims_count', 'Transcribed_sims_count_list',\n 'UnTranscribed_real_count', 'UnTranscribed_mean_sims_count', 'UnTranscribed_min_sims_count', 'UnTranscribed_max_sims_count', 'UnTranscribed_sims_count_list',\n 'NonTranscribed_real_count', 'NonTranscribed_mean_sims_count', 'NonTranscribed_min_sims_count', 'NonTranscribed_max_sims_count', 'NonTranscribed_sims_count_list' ]]\n\n signature_filename = 'Signature_Mutation_Type_%s_Q_Value_Table.txt' % (GENIC_VERSUS_INTERGENIC)\n signature_filepath = os.path.join(strandbias_figures_tables_outputDir, signature_filename)\n signature_genic_versus_intergenic_df.to_csv(signature_filepath, sep='\\t', header=True, index=False)\n\n type_filename = 'Type_%s_Q_Value_Table.txt' % (GENIC_VERSUS_INTERGENIC)\n type_filepath = os.path.join(strandbias_figures_tables_outputDir, type_filename)\n type_genic_versus_intergenic_df.to_csv(type_filepath, sep='\\t', header=True, index=False)\n #######################################################################\n\n #######################################################################\n # Step3 Filter q-values, Decide significant strand and set 10,20,30,50,75, 100 percent\n # Add Significant Strand\n # Set significant strands\n # Set percentages\n # Write Filtered Q Values dataframes with percentages\n ##################################################################################################################################\n if LAGGING_VERSUS_LEADING in strand_bias_list:\n signature_lagging_versus_leading_filtered_q_value_df = signature_lagging_versus_leading_df[signature_lagging_versus_leading_df[LAGGING_VERSUS_LEADING_Q_VALUE] <= SIGNIFICANCE_LEVEL].copy()\n type_lagging_versus_leading_filtered_q_value_df= type_lagging_versus_leading_df[type_lagging_versus_leading_df[LAGGING_VERSUS_LEADING_Q_VALUE] <= SIGNIFICANCE_LEVEL].copy()\n\n signature_lagging_versus_leading_filtered_q_value_df[SIGNIFICANT_STRAND] = None\n type_lagging_versus_leading_filtered_q_value_df[SIGNIFICANT_STRAND] = None\n\n for percentage_string in percentage_strings:\n signature_lagging_versus_leading_filtered_q_value_df[percentage_string] = None\n type_lagging_versus_leading_filtered_q_value_df[percentage_string] = None\n\n signature_lagging_versus_leading_filtered_q_value_df.loc[(signature_lagging_versus_leading_filtered_q_value_df[LAGGING_REAL_COUNT] > signature_lagging_versus_leading_filtered_q_value_df[LEADING_REAL_COUNT]), SIGNIFICANT_STRAND] = LAGGING\n signature_lagging_versus_leading_filtered_q_value_df.loc[(signature_lagging_versus_leading_filtered_q_value_df[LEADING_REAL_COUNT] > signature_lagging_versus_leading_filtered_q_value_df[LAGGING_REAL_COUNT]), SIGNIFICANT_STRAND] = LEADING\n type_lagging_versus_leading_filtered_q_value_df.loc[(type_lagging_versus_leading_filtered_q_value_df[LAGGING_REAL_COUNT] > type_lagging_versus_leading_filtered_q_value_df[LEADING_REAL_COUNT]), SIGNIFICANT_STRAND]=LAGGING\n type_lagging_versus_leading_filtered_q_value_df.loc[(type_lagging_versus_leading_filtered_q_value_df[LEADING_REAL_COUNT] > type_lagging_versus_leading_filtered_q_value_df[LAGGING_REAL_COUNT]),SIGNIFICANT_STRAND]=LEADING\n\n for percentage_index, percentage_number in enumerate(percentage_numbers, 0):\n percentage_string = percentage_strings[percentage_index]\n # Set percentages for signature mutation_type\n signature_lagging_versus_leading_filtered_q_value_df.loc[((signature_lagging_versus_leading_filtered_q_value_df[LAGGING_REAL_COUNT] - signature_lagging_versus_leading_filtered_q_value_df[LEADING_REAL_COUNT]) >= (signature_lagging_versus_leading_filtered_q_value_df[LEADING_REAL_COUNT] * percentage_number / 100)), percentage_string] = 1\n signature_lagging_versus_leading_filtered_q_value_df.loc[((signature_lagging_versus_leading_filtered_q_value_df[LEADING_REAL_COUNT] - signature_lagging_versus_leading_filtered_q_value_df[LAGGING_REAL_COUNT]) >= (signature_lagging_versus_leading_filtered_q_value_df[LAGGING_REAL_COUNT] * percentage_number / 100)), percentage_string] = 1\n # Set percentages for type\n type_lagging_versus_leading_filtered_q_value_df.loc[((type_lagging_versus_leading_filtered_q_value_df[LAGGING_REAL_COUNT] - type_lagging_versus_leading_filtered_q_value_df[LEADING_REAL_COUNT]) >= (type_lagging_versus_leading_filtered_q_value_df[LEADING_REAL_COUNT] * percentage_number / 100)), percentage_string] = 1\n type_lagging_versus_leading_filtered_q_value_df.loc[((type_lagging_versus_leading_filtered_q_value_df[LEADING_REAL_COUNT] - type_lagging_versus_leading_filtered_q_value_df[LAGGING_REAL_COUNT]) >= (type_lagging_versus_leading_filtered_q_value_df[LAGGING_REAL_COUNT] * percentage_number / 100)), percentage_string] = 1\n\n signature_filename = 'Signature_Mutation_Type_%s_Filtered_Q_Value_Percentages_Table.txt' % (LAGGING_VERSUS_LEADING)\n signature_filepath = os.path.join(strandbias_figures_tables_outputDir, signature_filename)\n signature_lagging_versus_leading_filtered_q_value_df.to_csv(signature_filepath, sep='\\t', header=True,index=False)\n\n type_filename = 'Type_%s_Filtered_Q_Value_Percentages_Table.txt' % (LAGGING_VERSUS_LEADING)\n type_filepath = os.path.join(strandbias_figures_tables_outputDir, type_filename)\n type_lagging_versus_leading_filtered_q_value_df.to_csv(type_filepath, sep='\\t', header=True, index=False)\n ##################################################################################################################################\n\n ##################################################################################################################################\n if TRANSCRIBED_VERSUS_UNTRANSCRIBED in strand_bias_list:\n signature_transcribed_versus_untranscribed_filtered_q_value_df = signature_transcribed_versus_untranscribed_df[signature_transcribed_versus_untranscribed_df[TRANSCRIBED_VERSUS_UNTRANSCRIBED_Q_VALUE] <= SIGNIFICANCE_LEVEL].copy()\n type_transcribed_versus_untranscribed_filtered_q_value_df= type_transcribed_versus_untranscribed_df[type_transcribed_versus_untranscribed_df[TRANSCRIBED_VERSUS_UNTRANSCRIBED_Q_VALUE]<= SIGNIFICANCE_LEVEL].copy()\n\n signature_transcribed_versus_untranscribed_filtered_q_value_df[SIGNIFICANT_STRAND] = None\n type_transcribed_versus_untranscribed_filtered_q_value_df[SIGNIFICANT_STRAND]=None\n\n for percentage_string in percentage_strings:\n signature_transcribed_versus_untranscribed_filtered_q_value_df[percentage_string]=None\n type_transcribed_versus_untranscribed_filtered_q_value_df[percentage_string] = None\n\n signature_transcribed_versus_untranscribed_filtered_q_value_df.loc[(signature_transcribed_versus_untranscribed_filtered_q_value_df[TRANSCRIBED_REAL_COUNT] > signature_transcribed_versus_untranscribed_filtered_q_value_df[UNTRANSCRIBED_REAL_COUNT]), SIGNIFICANT_STRAND] = TRANSCRIBED_STRAND\n signature_transcribed_versus_untranscribed_filtered_q_value_df.loc[(signature_transcribed_versus_untranscribed_filtered_q_value_df[UNTRANSCRIBED_REAL_COUNT] > signature_transcribed_versus_untranscribed_filtered_q_value_df[TRANSCRIBED_REAL_COUNT]), SIGNIFICANT_STRAND] = UNTRANSCRIBED_STRAND\n type_transcribed_versus_untranscribed_filtered_q_value_df.loc[(type_transcribed_versus_untranscribed_filtered_q_value_df[TRANSCRIBED_REAL_COUNT] > type_transcribed_versus_untranscribed_filtered_q_value_df[UNTRANSCRIBED_REAL_COUNT]), SIGNIFICANT_STRAND] = TRANSCRIBED_STRAND\n type_transcribed_versus_untranscribed_filtered_q_value_df.loc[(type_transcribed_versus_untranscribed_filtered_q_value_df[UNTRANSCRIBED_REAL_COUNT] > type_transcribed_versus_untranscribed_filtered_q_value_df[TRANSCRIBED_REAL_COUNT]), SIGNIFICANT_STRAND] = UNTRANSCRIBED_STRAND\n\n for percentage_index, percentage_number in enumerate(percentage_numbers,0):\n percentage_string=percentage_strings[percentage_index]\n # Set percentages for signature mutation_type\n signature_transcribed_versus_untranscribed_filtered_q_value_df.loc[((signature_transcribed_versus_untranscribed_filtered_q_value_df[TRANSCRIBED_REAL_COUNT]-signature_transcribed_versus_untranscribed_filtered_q_value_df[UNTRANSCRIBED_REAL_COUNT]) >= (signature_transcribed_versus_untranscribed_filtered_q_value_df[UNTRANSCRIBED_REAL_COUNT]*percentage_number/100)), percentage_string] = 1\n signature_transcribed_versus_untranscribed_filtered_q_value_df.loc[((signature_transcribed_versus_untranscribed_filtered_q_value_df[UNTRANSCRIBED_REAL_COUNT]-signature_transcribed_versus_untranscribed_filtered_q_value_df[TRANSCRIBED_REAL_COUNT]) >= (signature_transcribed_versus_untranscribed_filtered_q_value_df[TRANSCRIBED_REAL_COUNT]*percentage_number/100)), percentage_string] = 1\n # Set percentages for type\n type_transcribed_versus_untranscribed_filtered_q_value_df.loc[((type_transcribed_versus_untranscribed_filtered_q_value_df[TRANSCRIBED_REAL_COUNT]-type_transcribed_versus_untranscribed_filtered_q_value_df[UNTRANSCRIBED_REAL_COUNT]) >= (type_transcribed_versus_untranscribed_filtered_q_value_df[UNTRANSCRIBED_REAL_COUNT]*percentage_number/100)), percentage_string] = 1\n type_transcribed_versus_untranscribed_filtered_q_value_df.loc[((type_transcribed_versus_untranscribed_filtered_q_value_df[UNTRANSCRIBED_REAL_COUNT]-type_transcribed_versus_untranscribed_filtered_q_value_df[TRANSCRIBED_REAL_COUNT]) >= (type_transcribed_versus_untranscribed_filtered_q_value_df[TRANSCRIBED_REAL_COUNT]*percentage_number/100)), percentage_string] = 1\n\n signature_filename = 'Signature_Mutation_Type_%s_Filtered_Q_Value_Percentages_Table.txt' % (TRANSCRIBED_VERSUS_UNTRANSCRIBED)\n signature_filepath = os.path.join(strandbias_figures_tables_outputDir, signature_filename)\n signature_transcribed_versus_untranscribed_filtered_q_value_df.to_csv(signature_filepath, sep='\\t', header=True, index=False)\n\n type_filename = 'Type_%s_Filtered_Q_Value_Percentages_Table.txt' % (TRANSCRIBED_VERSUS_UNTRANSCRIBED)\n type_filepath = os.path.join(strandbias_figures_tables_outputDir, type_filename)\n type_transcribed_versus_untranscribed_filtered_q_value_df.to_csv(type_filepath, sep='\\t', header=True,index=False)\n ##################################################################################################################################\n\n\n ##################################################################################################################################\n if GENIC_VERSUS_INTERGENIC in strand_bias_list:\n signature_genic_versus_intergenic_filtered_q_value_df = signature_genic_versus_intergenic_df[signature_genic_versus_intergenic_df[GENIC_VERSUS_INTERGENIC_Q_VALUE] <= SIGNIFICANCE_LEVEL].copy()\n type_genic_versus_intergenic_filtered_q_value_df= type_genic_versus_intergenic_df[type_genic_versus_intergenic_df[GENIC_VERSUS_INTERGENIC_Q_VALUE]<= SIGNIFICANCE_LEVEL].copy()\n\n signature_genic_versus_intergenic_filtered_q_value_df[SIGNIFICANT_STRAND] = None\n type_genic_versus_intergenic_filtered_q_value_df[SIGNIFICANT_STRAND] = None\n\n for percentage_string in percentage_strings:\n signature_genic_versus_intergenic_filtered_q_value_df[percentage_string] = None\n type_genic_versus_intergenic_filtered_q_value_df[percentage_string] = None\n\n signature_genic_versus_intergenic_filtered_q_value_df.loc[(signature_genic_versus_intergenic_filtered_q_value_df[GENIC_REAL_COUNT] > signature_genic_versus_intergenic_filtered_q_value_df[INTERGENIC_REAL_COUNT]), SIGNIFICANT_STRAND] = GENIC\n signature_genic_versus_intergenic_filtered_q_value_df.loc[(signature_genic_versus_intergenic_filtered_q_value_df[INTERGENIC_REAL_COUNT] > signature_genic_versus_intergenic_filtered_q_value_df[GENIC_REAL_COUNT]),SIGNIFICANT_STRAND] = INTERGENIC\n type_genic_versus_intergenic_filtered_q_value_df.loc[(type_genic_versus_intergenic_filtered_q_value_df[GENIC_REAL_COUNT] > type_genic_versus_intergenic_filtered_q_value_df[INTERGENIC_REAL_COUNT]), SIGNIFICANT_STRAND] = GENIC\n type_genic_versus_intergenic_filtered_q_value_df.loc[(type_genic_versus_intergenic_filtered_q_value_df[INTERGENIC_REAL_COUNT] > type_genic_versus_intergenic_filtered_q_value_df[GENIC_REAL_COUNT]), SIGNIFICANT_STRAND] = INTERGENIC\n\n # Set percentages\n for percentage_index, percentage_number in enumerate(percentage_numbers,0):\n percentage_string=percentage_strings[percentage_index]\n # Set percentages for signature mutation_type\n signature_genic_versus_intergenic_filtered_q_value_df.loc[((signature_genic_versus_intergenic_filtered_q_value_df[GENIC_REAL_COUNT]-signature_genic_versus_intergenic_filtered_q_value_df[INTERGENIC_REAL_COUNT]) >= (signature_genic_versus_intergenic_filtered_q_value_df[INTERGENIC_REAL_COUNT]*percentage_number/100)), percentage_string] = 1\n signature_genic_versus_intergenic_filtered_q_value_df.loc[((signature_genic_versus_intergenic_filtered_q_value_df[INTERGENIC_REAL_COUNT]-signature_genic_versus_intergenic_filtered_q_value_df[GENIC_REAL_COUNT]) >= (signature_genic_versus_intergenic_filtered_q_value_df[GENIC_REAL_COUNT]*percentage_number/100)), percentage_string] = 1\n # Set percentages for type\n type_genic_versus_intergenic_filtered_q_value_df.loc[((type_genic_versus_intergenic_filtered_q_value_df[GENIC_REAL_COUNT]-type_genic_versus_intergenic_filtered_q_value_df[INTERGENIC_REAL_COUNT]) >= (type_genic_versus_intergenic_filtered_q_value_df[INTERGENIC_REAL_COUNT]*percentage_number/100)), percentage_string] = 1\n type_genic_versus_intergenic_filtered_q_value_df.loc[((type_genic_versus_intergenic_filtered_q_value_df[INTERGENIC_REAL_COUNT]-type_genic_versus_intergenic_filtered_q_value_df[GENIC_REAL_COUNT]) >= (type_genic_versus_intergenic_filtered_q_value_df[GENIC_REAL_COUNT]*percentage_number/100)), percentage_string] = 1\n\n signature_filename = 'Signature_Mutation_Type_%s_Filtered_Q_Value_Percentages_Table.txt' % (GENIC_VERSUS_INTERGENIC)\n signature_filepath = os.path.join(strandbias_figures_tables_outputDir, signature_filename)\n signature_genic_versus_intergenic_filtered_q_value_df.to_csv(signature_filepath, sep='\\t', header=True,index=False)\n\n type_filename = 'Type_%s_Filtered_Q_Value_Percentages_Table.txt' % (GENIC_VERSUS_INTERGENIC)\n type_filepath = os.path.join(strandbias_figures_tables_outputDir, type_filename)\n type_genic_versus_intergenic_filtered_q_value_df.to_csv(type_filepath, sep='\\t', header=True, index=False)\n ##################################################################################################################################\n\n #######################################################################\n # Write Excel Files\n sheet_list = ['corrected_p_value', 'percentages']\n for strand1_versus_strand2 in strand_bias_list:\n if strand1_versus_strand2==LAGGING_VERSUS_LEADING:\n signatures_df_list=[signature_lagging_versus_leading_df,signature_lagging_versus_leading_filtered_q_value_df]\n types_df_list = [type_lagging_versus_leading_df, type_lagging_versus_leading_filtered_q_value_df]\n elif strand1_versus_strand2==TRANSCRIBED_VERSUS_UNTRANSCRIBED:\n signatures_df_list = [signature_transcribed_versus_untranscribed_df,signature_transcribed_versus_untranscribed_filtered_q_value_df]\n types_df_list = [type_transcribed_versus_untranscribed_df, type_transcribed_versus_untranscribed_filtered_q_value_df]\n elif strand1_versus_strand2==GENIC_VERSUS_INTERGENIC:\n signatures_df_list = [signature_genic_versus_intergenic_df,signature_genic_versus_intergenic_filtered_q_value_df]\n types_df_list = [type_genic_versus_intergenic_df, type_genic_versus_intergenic_filtered_q_value_df]\n\n signatures_filename=\"Signatures_Mutation_Types_%s.xlsx\" %(strand1_versus_strand2)\n file_name_with_path=os.path.join(strandbias_figures_excel_files_outputDir, signatures_filename)\n write_excel_file(signatures_df_list, sheet_list, file_name_with_path)\n\n types_filename=\"Types_%s.xlsx\" %(strand1_versus_strand2)\n file_name_with_path=os.path.join(strandbias_figures_excel_files_outputDir, types_filename)\n write_excel_file(types_df_list, sheet_list, file_name_with_path)\n #######################################################################\n\n\n #######################################################################\n #Circle plots starts\n\n #######################################################################\n #Step4 Fill this dictionary\n signature2mutation_type2strand2percentagedict={}\n\n df_list=[]\n if LAGGING_VERSUS_LEADING in strand_bias_list:\n df_list.append(signature_lagging_versus_leading_filtered_q_value_df)\n if TRANSCRIBED_VERSUS_UNTRANSCRIBED in strand_bias_list:\n df_list.append(signature_transcribed_versus_untranscribed_filtered_q_value_df)\n if GENIC_VERSUS_INTERGENIC in strand_bias_list:\n df_list.append(signature_genic_versus_intergenic_filtered_q_value_df)\n\n for df in df_list:\n for index, row in df.iterrows():\n cancer_type = row[CANCER_TYPE]\n signature = row[SIGNATURE]\n mutation_type = row[MUTATION_TYPE]\n significant_strand=row[SIGNIFICANT_STRAND]\n percent_10 = row[AT_LEAST_10_PERCENT_DIFF]\n percent_20 = row[AT_LEAST_20_PERCENT_DIFF]\n percent_30 = row[AT_LEAST_30_PERCENT_DIFF]\n percent_50 = row[AT_LEAST_50_PERCENT_DIFF]\n percent_75 = row[AT_LEAST_75_PERCENT_DIFF]\n percent_100 = row[AT_LEAST_100_PERCENT_DIFF]\n\n if signature in signature2mutation_type2strand2percentagedict:\n if mutation_type in signature2mutation_type2strand2percentagedict[signature]:\n if significant_strand in signature2mutation_type2strand2percentagedict[signature][mutation_type]:\n if (percent_10 == 1):\n signature2mutation_type2strand2percentagedict[signature][mutation_type][significant_strand][AT_LEAST_10_PERCENT_DIFF]=1\n if (percent_20 == 1):\n signature2mutation_type2strand2percentagedict[signature][mutation_type][significant_strand][AT_LEAST_20_PERCENT_DIFF]=1\n if (percent_30 == 1):\n signature2mutation_type2strand2percentagedict[signature][mutation_type][significant_strand][AT_LEAST_30_PERCENT_DIFF]=1\n if (percent_50 == 1):\n signature2mutation_type2strand2percentagedict[signature][mutation_type][significant_strand][AT_LEAST_50_PERCENT_DIFF]=1\n if (percent_75 == 1):\n signature2mutation_type2strand2percentagedict[signature][mutation_type][significant_strand][AT_LEAST_75_PERCENT_DIFF]=1\n if (percent_100 == 1):\n signature2mutation_type2strand2percentagedict[signature][mutation_type][significant_strand][AT_LEAST_100_PERCENT_DIFF]=1\n\n else:\n signature2mutation_type2strand2percentagedict[signature][mutation_type][significant_strand]={}\n signature2mutation_type2strand2percentagedict[signature][mutation_type][significant_strand][AT_LEAST_10_PERCENT_DIFF] = 0\n signature2mutation_type2strand2percentagedict[signature][mutation_type][significant_strand][AT_LEAST_20_PERCENT_DIFF] = 0\n signature2mutation_type2strand2percentagedict[signature][mutation_type][significant_strand][AT_LEAST_30_PERCENT_DIFF] = 0\n signature2mutation_type2strand2percentagedict[signature][mutation_type][significant_strand][AT_LEAST_50_PERCENT_DIFF] = 0\n signature2mutation_type2strand2percentagedict[signature][mutation_type][significant_strand][AT_LEAST_75_PERCENT_DIFF] = 0\n signature2mutation_type2strand2percentagedict[signature][mutation_type][significant_strand][AT_LEAST_100_PERCENT_DIFF] = 0\n\n if (percent_10 == 1):\n signature2mutation_type2strand2percentagedict[signature][mutation_type][significant_strand][AT_LEAST_10_PERCENT_DIFF] = 1\n if (percent_20 == 1):\n signature2mutation_type2strand2percentagedict[signature][mutation_type][significant_strand][AT_LEAST_20_PERCENT_DIFF] = 1\n if (percent_30 == 1):\n signature2mutation_type2strand2percentagedict[signature][mutation_type][significant_strand][AT_LEAST_30_PERCENT_DIFF] = 1\n if (percent_50 == 1):\n signature2mutation_type2strand2percentagedict[signature][mutation_type][significant_strand][AT_LEAST_50_PERCENT_DIFF] = 1\n if (percent_75 == 1):\n signature2mutation_type2strand2percentagedict[signature][mutation_type][significant_strand][AT_LEAST_75_PERCENT_DIFF] = 1\n if (percent_100 == 1):\n signature2mutation_type2strand2percentagedict[signature][mutation_type][significant_strand][AT_LEAST_100_PERCENT_DIFF] = 1\n else:\n signature2mutation_type2strand2percentagedict[signature][mutation_type] = {}\n signature2mutation_type2strand2percentagedict[signature][mutation_type][significant_strand] = {}\n\n signature2mutation_type2strand2percentagedict[signature][mutation_type][significant_strand][AT_LEAST_10_PERCENT_DIFF] = 0\n signature2mutation_type2strand2percentagedict[signature][mutation_type][significant_strand][AT_LEAST_20_PERCENT_DIFF] = 0\n signature2mutation_type2strand2percentagedict[signature][mutation_type][significant_strand][AT_LEAST_30_PERCENT_DIFF] = 0\n signature2mutation_type2strand2percentagedict[signature][mutation_type][significant_strand][AT_LEAST_50_PERCENT_DIFF] = 0\n signature2mutation_type2strand2percentagedict[signature][mutation_type][significant_strand][AT_LEAST_75_PERCENT_DIFF] = 0\n signature2mutation_type2strand2percentagedict[signature][mutation_type][significant_strand][AT_LEAST_100_PERCENT_DIFF] = 0\n\n if (percent_10 == 1):\n signature2mutation_type2strand2percentagedict[signature][mutation_type][significant_strand][AT_LEAST_10_PERCENT_DIFF] = 1\n if (percent_20 == 1):\n signature2mutation_type2strand2percentagedict[signature][mutation_type][significant_strand][AT_LEAST_20_PERCENT_DIFF] = 1\n if (percent_30 == 1):\n signature2mutation_type2strand2percentagedict[signature][mutation_type][significant_strand][AT_LEAST_30_PERCENT_DIFF] = 1\n if (percent_50 == 1):\n signature2mutation_type2strand2percentagedict[signature][mutation_type][significant_strand][AT_LEAST_50_PERCENT_DIFF] = 1\n if (percent_75 == 1):\n signature2mutation_type2strand2percentagedict[signature][mutation_type][significant_strand][AT_LEAST_75_PERCENT_DIFF] = 1\n if (percent_100 == 1):\n signature2mutation_type2strand2percentagedict[signature][mutation_type][significant_strand][AT_LEAST_100_PERCENT_DIFF] = 1\n\n else:\n signature2mutation_type2strand2percentagedict[signature] = {}\n signature2mutation_type2strand2percentagedict[signature][mutation_type] = {}\n signature2mutation_type2strand2percentagedict[signature][mutation_type][significant_strand] = {}\n\n signature2mutation_type2strand2percentagedict[signature][mutation_type][significant_strand][AT_LEAST_10_PERCENT_DIFF] = 0\n signature2mutation_type2strand2percentagedict[signature][mutation_type][significant_strand][AT_LEAST_20_PERCENT_DIFF] = 0\n signature2mutation_type2strand2percentagedict[signature][mutation_type][significant_strand][AT_LEAST_30_PERCENT_DIFF] = 0\n signature2mutation_type2strand2percentagedict[signature][mutation_type][significant_strand][AT_LEAST_50_PERCENT_DIFF] = 0\n signature2mutation_type2strand2percentagedict[signature][mutation_type][significant_strand][AT_LEAST_75_PERCENT_DIFF] = 0\n signature2mutation_type2strand2percentagedict[signature][mutation_type][significant_strand][AT_LEAST_100_PERCENT_DIFF] = 0\n\n if (percent_10 == 1):\n signature2mutation_type2strand2percentagedict[signature][mutation_type][significant_strand][AT_LEAST_10_PERCENT_DIFF] = 1\n if (percent_20 == 1):\n signature2mutation_type2strand2percentagedict[signature][mutation_type][significant_strand][AT_LEAST_20_PERCENT_DIFF] = 1\n if (percent_30 == 1):\n signature2mutation_type2strand2percentagedict[signature][mutation_type][significant_strand][AT_LEAST_30_PERCENT_DIFF] = 1\n if (percent_50 == 1):\n signature2mutation_type2strand2percentagedict[signature][mutation_type][significant_strand][AT_LEAST_50_PERCENT_DIFF] = 1\n if (percent_75 == 1):\n signature2mutation_type2strand2percentagedict[signature][mutation_type][significant_strand][AT_LEAST_75_PERCENT_DIFF] = 1\n if (percent_100 == 1):\n signature2mutation_type2strand2percentagedict[signature][mutation_type][significant_strand][AT_LEAST_100_PERCENT_DIFF] = 1\n #######################################################################\n\n #######################################################################\n # Step4 Fill this dictionary\n type2strand2percentagedict={}\n\n df_list=[]\n if LAGGING_VERSUS_LEADING in strand_bias_list:\n df_list.append(type_lagging_versus_leading_filtered_q_value_df)\n if TRANSCRIBED_VERSUS_UNTRANSCRIBED in strand_bias_list:\n df_list.append(type_transcribed_versus_untranscribed_filtered_q_value_df)\n if GENIC_VERSUS_INTERGENIC in strand_bias_list:\n df_list.append(type_genic_versus_intergenic_filtered_q_value_df)\n\n for df in df_list:\n for index, row in df.iterrows():\n cancer_type = row[CANCER_TYPE]\n my_type = row[TYPE]\n significant_strand=row[SIGNIFICANT_STRAND]\n percent_10 = row[AT_LEAST_10_PERCENT_DIFF]\n percent_20 = row[AT_LEAST_20_PERCENT_DIFF]\n percent_30 = row[AT_LEAST_30_PERCENT_DIFF]\n percent_50 = row[AT_LEAST_50_PERCENT_DIFF]\n percent_75 = row[AT_LEAST_75_PERCENT_DIFF]\n percent_100 = row[AT_LEAST_100_PERCENT_DIFF]\n\n if my_type in type2strand2percentagedict:\n if significant_strand in type2strand2percentagedict[my_type]:\n if (percent_10 == 1):\n type2strand2percentagedict[my_type][significant_strand][AT_LEAST_10_PERCENT_DIFF]=1\n if (percent_20 == 1):\n type2strand2percentagedict[my_type][significant_strand][AT_LEAST_20_PERCENT_DIFF]=1\n if (percent_30 == 1):\n type2strand2percentagedict[my_type][significant_strand][AT_LEAST_30_PERCENT_DIFF]=1\n if (percent_50 == 1):\n type2strand2percentagedict[my_type][significant_strand][AT_LEAST_50_PERCENT_DIFF]=1\n if (percent_75 == 1):\n type2strand2percentagedict[my_type][significant_strand][AT_LEAST_75_PERCENT_DIFF]=1\n if (percent_100 == 1):\n type2strand2percentagedict[my_type][significant_strand][AT_LEAST_100_PERCENT_DIFF]=1\n\n else:\n type2strand2percentagedict[my_type][significant_strand]={}\n type2strand2percentagedict[my_type][significant_strand][AT_LEAST_10_PERCENT_DIFF] = 0\n type2strand2percentagedict[my_type][significant_strand][AT_LEAST_20_PERCENT_DIFF] = 0\n type2strand2percentagedict[my_type][significant_strand][AT_LEAST_30_PERCENT_DIFF] = 0\n type2strand2percentagedict[my_type][significant_strand][AT_LEAST_50_PERCENT_DIFF] = 0\n type2strand2percentagedict[my_type][significant_strand][AT_LEAST_75_PERCENT_DIFF] = 0\n type2strand2percentagedict[my_type][significant_strand][AT_LEAST_100_PERCENT_DIFF] = 0\n\n if (percent_10 == 1):\n type2strand2percentagedict[my_type][significant_strand][AT_LEAST_10_PERCENT_DIFF]=1\n if (percent_20 == 1):\n type2strand2percentagedict[my_type][significant_strand][AT_LEAST_20_PERCENT_DIFF]=1\n if (percent_30 == 1):\n type2strand2percentagedict[my_type][significant_strand][AT_LEAST_30_PERCENT_DIFF]=1\n if (percent_50 == 1):\n type2strand2percentagedict[my_type][significant_strand][AT_LEAST_50_PERCENT_DIFF]=1\n if (percent_75 == 1):\n type2strand2percentagedict[my_type][significant_strand][AT_LEAST_75_PERCENT_DIFF]=1\n if (percent_100 == 1):\n type2strand2percentagedict[my_type][significant_strand][AT_LEAST_100_PERCENT_DIFF]=1\n else:\n type2strand2percentagedict[my_type] = {}\n type2strand2percentagedict[my_type][significant_strand] = {}\n type2strand2percentagedict[my_type][significant_strand][AT_LEAST_10_PERCENT_DIFF] = 0\n type2strand2percentagedict[my_type][significant_strand][AT_LEAST_20_PERCENT_DIFF] = 0\n type2strand2percentagedict[my_type][significant_strand][AT_LEAST_30_PERCENT_DIFF] = 0\n type2strand2percentagedict[my_type][significant_strand][AT_LEAST_50_PERCENT_DIFF] = 0\n type2strand2percentagedict[my_type][significant_strand][AT_LEAST_75_PERCENT_DIFF] = 0\n type2strand2percentagedict[my_type][significant_strand][AT_LEAST_100_PERCENT_DIFF] = 0\n\n if (percent_10 == 1):\n type2strand2percentagedict[my_type][significant_strand][AT_LEAST_10_PERCENT_DIFF]=1\n if (percent_20 == 1):\n type2strand2percentagedict[my_type][significant_strand][AT_LEAST_20_PERCENT_DIFF]=1\n if (percent_30 == 1):\n type2strand2percentagedict[my_type][significant_strand][AT_LEAST_30_PERCENT_DIFF]=1\n if (percent_50 == 1):\n type2strand2percentagedict[my_type][significant_strand][AT_LEAST_50_PERCENT_DIFF]=1\n if (percent_75 == 1):\n type2strand2percentagedict[my_type][significant_strand][AT_LEAST_75_PERCENT_DIFF]=1\n if (percent_100 == 1):\n type2strand2percentagedict[my_type][significant_strand][AT_LEAST_100_PERCENT_DIFF]=1\n #######################################################################\n\n\n #######################################################################\n # Step5 Plot figures\n plot_legend(strandbias_figures_outputDir)\n\n for strand_bias in strand_bias_list:\n if np.any(subsSignatures):\n plot_six_mutations_sbs_signatures_circle_figures(subsSignatures,\n strand_bias,\n strandbias_figures_outputDir,\n SIGNIFICANCE_LEVEL,\n signature2mutation_type2strand2percentagedict,\n percentage_strings)\n if np.any(dinucsSignatures):\n plot_dbs_and_id_signatures_circle_figures(DBS,\n dinucsSignatures,\n strand_bias,\n strandbias_figures_outputDir,\n SIGNIFICANCE_LEVEL,\n type2strand2percentagedict,\n percentage_strings)\n if np.any(indelsSignatures):\n plot_dbs_and_id_signatures_circle_figures(ID,\n indelsSignatures,\n strand_bias,\n strandbias_figures_outputDir,\n SIGNIFICANCE_LEVEL,\n type2strand2percentagedict,\n percentage_strings)\n\n # Circle plots ends\n #######################################################################\n\n ########################################################################\n ########################## Part 2 starts ##############################\n ############## Mutation Types Scatter Plots starts #####################\n ############## Signatures Scatter Plots starts #########################\n ########################################################################\n if (TRANSCRIBED_VERSUS_UNTRANSCRIBED in strand_bias_list) and (LAGGING_VERSUS_LEADING in strand_bias_list):\n if ((not type_transcribed_versus_untranscribed_df.empty) and (not type_lagging_versus_leading_df.empty)):\n plot_mutation_types_transcription_log10_ratio_replication_log_10_ratio_using_dataframes(None,None,\n type_transcribed_versus_untranscribed_df,\n type_lagging_versus_leading_df,\n outputDir, jobname)\n\n if ((not type_transcribed_versus_untranscribed_df.empty) and (not type_lagging_versus_leading_df.empty) and (not sbs_df.empty)):\n plot_types_transcription_log10_ratio_replication_log10_ratio_using_dataframes('subs', None, None,\n type_transcribed_versus_untranscribed_df,\n type_lagging_versus_leading_df,\n sbs_df,\n outputDir, jobname)\n\n if ((not type_transcribed_versus_untranscribed_df.empty) and (not type_lagging_versus_leading_df.empty) and (not dbs_df.empty)):\n plot_types_transcription_log10_ratio_replication_log10_ratio_using_dataframes('dinucs', None, None,\n type_transcribed_versus_untranscribed_df,\n type_lagging_versus_leading_df,\n dbs_df,\n outputDir, jobname)\n\n if ((not type_transcribed_versus_untranscribed_df.empty) and (not type_lagging_versus_leading_df.empty) and (not id_df.empty)):\n plot_types_transcription_log10_ratio_replication_log10_ratio_using_dataframes('indels', None, None,\n type_transcribed_versus_untranscribed_df,\n type_lagging_versus_leading_df,\n id_df,\n outputDir, jobname)\n ########################################################################\n ############## Mutation Types Scatter Plots ends #######################\n ############## Signatures Scatter Plots ends ###########################\n ########################## Part 2 ends ################################\n ########################################################################\n\n ########################################################################\n ########################## Part 4 starts ##############################\n ######## Bar plot starts includes sample based bar plots ###############\n ########################################################################\n isKeySample = False\n width = 0.20\n\n #######################################################\n ################# Plot types starts ###################\n #######################################################\n types_list= [('All Mutations', 'mutationtypes', six_mutation_types),\n ('All Signatures', 'subs_signatures', subsSignatures),\n ('All Signatures', 'indels_signatures', indelsSignatures),\n ('All Signatures', 'dinucs_signatures', dinucsSignatures)]\n\n for mutationsOrSignatures, sub_figure_name, x_axis_labels in types_list:\n x_axis_labels = sorted(x_axis_labels, key=natural_key)\n N = len(x_axis_labels)\n\n for strand_bias in strand_bias_list:\n if (strand_bias == TRANSCRIBED_VERSUS_UNTRANSCRIBED):\n type_strand1_versus_strand2_df = type_transcribed_versus_untranscribed_df\n strand1 = transcriptionStrands[0]\n strand2 = transcriptionStrands[1]\n strand1_real_count_column_name = 'Transcribed_real_count'\n strand2_real_count_column_name = 'UnTranscribed_real_count'\n strand1_sims_mean_count_column_name = 'Transcribed_mean_sims_count'\n strand2_sims_mean_count_column_name = 'UnTranscribed_mean_sims_count'\n q_value_column_name = 'transcribed_versus_untranscribed_q_value'\n color1 = 'royalblue'\n color2 = 'yellowgreen'\n figureName = '%s_transcription_strand_bias' %(sub_figure_name)\n\n elif (strand_bias == GENIC_VERSUS_INTERGENIC):\n type_strand1_versus_strand2_df = type_genic_versus_intergenic_df\n strand1 = genicVersusIntergenicStrands[0]\n strand2 = genicVersusIntergenicStrands[1]\n strand1_real_count_column_name = 'genic_real_count'\n strand2_real_count_column_name = 'intergenic_real_count'\n strand1_sims_mean_count_column_name = 'genic_mean_sims_count'\n strand2_sims_mean_count_column_name = 'intergenic_mean_sims_count'\n q_value_column_name = 'genic_versus_intergenic_q_value'\n color1 = 'cyan'\n color2 = 'gray'\n figureName = '%s_genic_versus_intergenic_strand_bias' %(sub_figure_name)\n\n elif (strand_bias == LAGGING_VERSUS_LEADING):\n type_strand1_versus_strand2_df = type_lagging_versus_leading_df\n strand1 = replicationStrands[0]\n strand2 = replicationStrands[1]\n strand1_real_count_column_name = 'Lagging_real_count'\n strand2_real_count_column_name = 'Leading_real_count'\n strand1_sims_mean_count_column_name = 'Lagging_mean_sims_count'\n strand2_sims_mean_count_column_name = 'Leading_mean_sims_count'\n q_value_column_name = 'lagging_versus_leading_q_value'\n color1 = 'indianred'\n color2 = 'goldenrod'\n figureName = '%s_replication_strand_bias' %(sub_figure_name)\n\n types_strand1_real_count_list = []\n types_strand2_real_count_list = []\n types_strand1_sims_mean_count_list = []\n types_strand2_sims_mean_count_list = []\n types_strand1_versus_strand2_FDR_BH_adjusted_pvalues = []\n\n for my_type in x_axis_labels:\n strand1_real_count = 0\n strand2_real_count = 0\n strand1_sims_mean_count = 0\n strand2_sims_mean_count = 0\n q_value = None\n\n if type_strand1_versus_strand2_df[(type_strand1_versus_strand2_df['type'] == my_type)][strand1_real_count_column_name].values.size>0:\n strand1_real_count= type_strand1_versus_strand2_df[(type_strand1_versus_strand2_df['type'] == my_type)][strand1_real_count_column_name].values[0]\n if type_strand1_versus_strand2_df[(type_strand1_versus_strand2_df['type'] == my_type)][strand2_real_count_column_name].values.size>0:\n strand2_real_count= type_strand1_versus_strand2_df[(type_strand1_versus_strand2_df['type'] == my_type)][strand2_real_count_column_name].values[0]\n if type_strand1_versus_strand2_df[(type_strand1_versus_strand2_df['type'] == my_type)][strand1_sims_mean_count_column_name].values.size>0:\n strand1_sims_mean_count= type_strand1_versus_strand2_df[(type_strand1_versus_strand2_df['type'] == my_type)][strand1_sims_mean_count_column_name].values[0]\n if type_strand1_versus_strand2_df[(type_strand1_versus_strand2_df['type'] == my_type)][strand2_sims_mean_count_column_name].values.size>0:\n strand2_sims_mean_count= type_strand1_versus_strand2_df[(type_strand1_versus_strand2_df['type'] == my_type)][strand2_sims_mean_count_column_name].values[0]\n if type_strand1_versus_strand2_df[(type_strand1_versus_strand2_df['type'] == my_type)][q_value_column_name].values.size>0:\n q_value= type_strand1_versus_strand2_df[(type_strand1_versus_strand2_df['type'] == my_type)][q_value_column_name].values[0]\n\n types_strand1_real_count_list.append(strand1_real_count)\n types_strand2_real_count_list.append(strand2_real_count)\n types_strand1_sims_mean_count_list.append(strand1_sims_mean_count)\n types_strand2_sims_mean_count_list.append(strand2_sims_mean_count)\n types_strand1_versus_strand2_FDR_BH_adjusted_pvalues.append(q_value)\n\n if ((len(x_axis_labels) > 0) and types_strand1_real_count_list and types_strand2_real_count_list and types_strand1_sims_mean_count_list and types_strand2_sims_mean_count_list and (len(types_strand1_versus_strand2_FDR_BH_adjusted_pvalues)>0)):\n\n if (types_strand1_real_count_list and types_strand2_real_count_list):\n plotStrandBiasFigureWithBarPlots(outputDir,\n jobname,\n numberofSimulations,\n None,\n isKeySample,\n None,\n N,\n x_axis_labels,\n types_strand1_real_count_list,\n types_strand2_real_count_list,\n types_strand1_sims_mean_count_list,\n types_strand2_sims_mean_count_list,\n types_strand1_versus_strand2_FDR_BH_adjusted_pvalues,\n strand1,strand2,\n mutationsOrSignatures,\n color1, color2,\n figureName,\n width,\n plot_mode)\n\n #######################################################\n ################# Plot types ends #####################\n #######################################################\n\n #################################################################\n ########### Plot sub signatures mutation types starts ###########\n #################################################################\n if not sbs_df.empty:\n if TRANSCRIBED_VERSUS_UNTRANSCRIBED in strand_bias_list:\n plotBarPlotsUsingDataframes(outputDir,\n jobname,\n numberofSimulations,\n sbs_df,\n isKeySample,\n six_mutation_types,\n signature_transcribed_versus_untranscribed_df,\n width,\n TRANSCRIBED_VERSUS_UNTRANSCRIBED,\n transcriptionStrands,\n 'royalblue',\n 'yellowgreen',\n 'All Mutations',\n 'mutationtypes_transcription_strand_bias',\n plot_mode)\n\n if GENIC_VERSUS_INTERGENIC in strand_bias_list:\n plotBarPlotsUsingDataframes(outputDir,\n jobname,\n numberofSimulations,\n sbs_df,\n isKeySample,\n six_mutation_types,\n signature_genic_versus_intergenic_df,\n width,\n GENIC_VERSUS_INTERGENIC,\n genicVersusIntergenicStrands,\n 'cyan',\n 'gray',\n 'All Mutations',\n 'mutationtypes_genic_versus_intergenic_strand_bias',\n plot_mode)\n\n if LAGGING_VERSUS_LEADING in strand_bias_list:\n plotBarPlotsUsingDataframes(outputDir,\n jobname,\n numberofSimulations,\n sbs_df,\n isKeySample,\n six_mutation_types,\n signature_lagging_versus_leading_df,\n width,\n LAGGING_VERSUS_LEADING,\n replicationStrands,\n 'indianred',\n 'goldenrod',\n 'All Mutations',\n 'mutationtypes_replication_strand_bias',\n plot_mode)\n #################################################################\n ########### Plot sub signatures mutation types ends #############\n #################################################################\n\n ########################################################################\n ######## Bar plot starts includes sample based bar plots ###############\n ########################## Part 4 ends ################################\n ########################################################################\n\n # Circle Bar Plots\n # Plot circle plots and bar plots all together\n # At top ax, circle plots with 3 rows: for genic vs. intergenic, transcribed vs. untranscribed, lagging vs. leading\n # At middle ax, 3 bar plots: for genic vs. intergenic, transcribed vs. untranscribed, lagging vs. leading\n # At below ax, 3 normalized bar plots: for genic vs. intergenic, transcribed vs. untranscribed, lagging vs. leading\n if (TRANSCRIBED_VERSUS_UNTRANSCRIBED in strand_bias_list) and (LAGGING_VERSUS_LEADING in strand_bias_list):\n sbs_signatures = sbs_df['signature'].unique()\n for sbs_signature in sbs_signatures:\n plot_circle_bar_plots_together(outputDir,\n jobname,\n sbs_signature,\n six_mutation_types,\n signature2mutation_type2strand2percentagedict,\n signature_genic_versus_intergenic_df,\n signature_transcribed_versus_untranscribed_df,\n signature_lagging_versus_leading_df,\n genicVersusIntergenicStrands,\n transcriptionStrands,\n replicationStrands)\n\n###################################################################\n\n\n############################################################################################################################\ndef plot_dbs_and_id_signatures_circle_figures(signature_type,\n signatures,\n strand_bias,\n strandbias_figures_outputDir,\n SIGNIFICANCE_LEVEL,\n type2strand2percentagedict,\n percentage_strings):\n\n rows_signatures=[]\n\n #####################################################################\n if strand_bias==LAGGING_VERSUS_LEADING:\n strands=replicationStrands\n elif strand_bias==TRANSCRIBED_VERSUS_UNTRANSCRIBED:\n strands=transcriptionStrands\n elif strand_bias==GENIC_VERSUS_INTERGENIC:\n strands=genicVersusIntergenicStrands\n #####################################################################\n\n #####################################################################\n #Fill rows_DBS_signatures\n #Fill rows_ID_signatures\n for signature in signatures:\n if signature in type2strand2percentagedict:\n for strand in strands:\n if strand in type2strand2percentagedict[signature]:\n for percentage_string in percentage_strings:\n if percentage_string in type2strand2percentagedict[signature][strand]:\n print('signature:%s strand:%s percentage_string:%s' %(signature,strand,percentage_string))\n if signature not in rows_signatures:\n rows_signatures.append(signature)\n #####################################################################\n\n #####################################################################\n rows_signatures=sorted(rows_signatures,key=natural_key,reverse=True)\n #####################################################################\n\n\n if (len(rows_signatures)>0):\n #####################################################################\n #New plot (width,height)\n fig, ax = plt.subplots(figsize=(5+1.5*len(percentage_strings), 10+1.5*len(rows_signatures)))\n\n #make aspect ratio square\n ax.set_aspect(1.0)\n #####################################################################\n\n ######################################################################################################################################\n for percentage_diff_index, percentage_string in enumerate(percentage_strings):\n for row_signature_index, row_signature in enumerate(rows_signatures):\n if (strand_bias==LAGGING_VERSUS_LEADING):\n if row_signature in type2strand2percentagedict:\n lagging_percentage=None\n leading_percentage=None\n\n if LAGGING in type2strand2percentagedict[row_signature] and type2strand2percentagedict[row_signature][LAGGING][percentage_string]==1:\n lagging_percentage = 100\n if LEADING in type2strand2percentagedict[row_signature] and type2strand2percentagedict[row_signature][LEADING][percentage_string]==1:\n leading_percentage = 100\n\n if (lagging_percentage is not None) and (leading_percentage is None):\n radius = 0.49\n if (radius > 0):\n # print('Plot circle at x=%d y=%d for %s %s' % (percentage_diff_index, row_signature_index, row_signature,percentage_string))\n circle = plt.Circle((percentage_diff_index + 0.5, row_signature_index + 0.5), radius, color='indianred', fill=True)\n ax.add_artist(circle)\n\n elif (leading_percentage is not None) and (lagging_percentage is None):\n radius = 0.49\n if (radius > 0):\n # print('Plot circle at x=%d y=%d for %s %s' % (percentage_diff_index, row_signature_index, row_signature,percentage_string))\n circle = plt.Circle((percentage_diff_index + 0.5, row_signature_index + 0.5), radius, color='goldenrod', fill=True)\n ax.add_artist(circle)\n\n elif (lagging_percentage is not None) and (leading_percentage is not None):\n radius_lagging = 0.49\n radius_leading = 0.49\n if (radius_lagging>radius_leading):\n #First lagging\n circle = plt.Circle((percentage_diff_index + 0.5, row_signature_index + 0.5), radius_lagging, color='goldenrod', fill=True)\n ax.add_artist(circle)\n #Second leading\n circle = plt.Circle((percentage_diff_index + 0.5, row_signature_index + 0.5), radius_leading, color='goldenrod', fill=True)\n ax.add_artist(circle)\n else:\n #First leading\n circle = plt.Circle((percentage_diff_index + 0.5, row_signature_index + 0.5), radius_leading, color='goldenrod', fill=True)\n ax.add_artist(circle)\n #Second lagging\n circle = plt.Circle((percentage_diff_index + 0.5, row_signature_index + 0.5), radius_lagging, color='goldenrod', fill=True)\n ax.add_artist(circle)\n\n elif (strand_bias==TRANSCRIBED_VERSUS_UNTRANSCRIBED):\n if row_signature in type2strand2percentagedict:\n transcribed_percentage=None\n untranscribed_percentage=None\n\n if TRANSCRIBED_STRAND in type2strand2percentagedict[row_signature] and type2strand2percentagedict[row_signature][TRANSCRIBED_STRAND][percentage_string]==1:\n transcribed_percentage = 100\n if UNTRANSCRIBED_STRAND in type2strand2percentagedict[row_signature] and type2strand2percentagedict[row_signature][UNTRANSCRIBED_STRAND][percentage_string]==1:\n untranscribed_percentage = 100\n\n if (transcribed_percentage is not None) and (untranscribed_percentage is None):\n radius = 0.49\n if (radius > 0):\n # print('Plot circle at x=%d y=%d for %s %s' % (percentage_diff_index, row_signature_index, row_signature,percentage_string))\n circle = plt.Circle((percentage_diff_index + 0.5, row_signature_index + 0.5), radius, color='royalblue', fill=True)\n ax.add_artist(circle)\n elif (untranscribed_percentage is not None) and (transcribed_percentage is None):\n radius = 0.49\n if (radius > 0):\n # print('Plot circle at x=%d y=%d for %s %s' % (percentage_diff_index, row_signature_index, row_signature,percentage_string))\n circle = plt.Circle((percentage_diff_index + 0.5, row_signature_index + 0.5), radius, color='yellowgreen', fill=True)\n ax.add_artist(circle)\n elif (transcribed_percentage is not None) and (untranscribed_percentage is not None):\n radius_transcribed = 0.49\n radius_untranscribed = 0.49\n if (radius_transcribed>radius_untranscribed):\n #First transcribed\n circle = plt.Circle((percentage_diff_index + 0.5, row_signature_index + 0.5), radius_transcribed, color='royalblue', fill=True)\n ax.add_artist(circle)\n #Second untranscribed\n circle = plt.Circle((percentage_diff_index + 0.5, row_signature_index + 0.5), radius_untranscribed, color='yellowgreen', fill=True)\n ax.add_artist(circle)\n else:\n #First untranscribed\n circle = plt.Circle((percentage_diff_index + 0.5, row_signature_index + 0.5), radius_untranscribed, color='yellowgreen', fill=True)\n ax.add_artist(circle)\n #Second transcribed\n circle = plt.Circle((percentage_diff_index + 0.5, row_signature_index + 0.5), radius_transcribed, color='royalblue', fill=True)\n ax.add_artist(circle)\n\n\n elif (strand_bias==GENIC_VERSUS_INTERGENIC):\n if row_signature in type2strand2percentagedict:\n genic_percentage=None\n intergenic_percentage=None\n if GENIC in type2strand2percentagedict[row_signature] and type2strand2percentagedict[row_signature][GENIC][percentage_string]==1:\n genic_percentage = 100\n if INTERGENIC in type2strand2percentagedict[row_signature] and type2strand2percentagedict[row_signature][INTERGENIC][percentage_string]==1:\n intergenic_percentage = 100\n\n if (genic_percentage is not None) and (intergenic_percentage is None):\n radius = 0.49\n if (radius > 0):\n # print('Plot circle at x=%d y=%d for %s %s' % (percentage_diff_index, row_signature_index, row_signature,percentage_string))\n circle = plt.Circle((percentage_diff_index + 0.5, row_signature_index + 0.5), radius, color='cyan', fill=True)\n ax.add_artist(circle)\n elif (intergenic_percentage is not None) and (genic_percentage is None):\n radius = 0.49\n if (radius > 0):\n # print('Plot circle at x=%d y=%d for %s %s' % (percentage_diff_index, row_signature_index, row_signature,percentage_string))\n circle = plt.Circle((percentage_diff_index + 0.5, row_signature_index + 0.5), radius, color='gray', fill=True)\n ax.add_artist(circle)\n elif (genic_percentage is not None) and (intergenic_percentage is not None):\n radius_genic = 0.49\n radius_intergenic = 0.49\n if (radius_genic>radius_intergenic):\n #First genic\n circle = plt.Circle((percentage_diff_index + 0.5, row_signature_index + 0.5), radius_genic, color='cyan', fill=True)\n ax.add_artist(circle)\n #Second intergenic\n circle = plt.Circle((percentage_diff_index + 0.5, row_signature_index + 0.5), radius_intergenic, color='gray', fill=True)\n ax.add_artist(circle)\n else:\n #First untranscribed\n circle = plt.Circle((percentage_diff_index + 0.5, row_signature_index + 0.5), radius_intergenic, color='gray', fill=True)\n ax.add_artist(circle)\n #Second transcribed\n circle = plt.Circle((percentage_diff_index + 0.5, row_signature_index + 0.5), radius_genic, color='cyan', fill=True)\n ax.add_artist(circle)\n ######################################################################################################################################\n\n\n ##################################################################################\n # CODE GOES HERE TO CENTER X-AXIS LABELS...\n ax.set_xlim([0,len(percentage_strings)])\n ax.set_xticklabels([])\n\n ax.tick_params(axis='x', which='minor', length=0, labelsize=20)\n\n #major ticks\n ax.set_xticks(np.arange(0, len(percentage_strings), 1))\n #minor ticks\n ax.set_xticks(np.arange(0, len(percentage_strings), 1)+0.5,minor=True)\n ax.set_xticklabels(percentage_strings,minor=True)\n\n #Jul 7, 2020\n if strand_bias==LAGGING_VERSUS_LEADING:\n fig.suptitle('Lagging versus Leading Strand Bias', fontsize=30)\n elif strand_bias==TRANSCRIBED_VERSUS_UNTRANSCRIBED:\n fig.suptitle('Transcribed versus Untranscribed Strand Bias', fontsize=30)\n elif strand_bias==GENIC_VERSUS_INTERGENIC:\n fig.suptitle('Genic versus Intergenic Strand Bias', fontsize=30)\n\n ax.xaxis.set_ticks_position('top')\n\n plt.tick_params(\n axis='x', # changes apply to the x-axis\n which='major', # both major and minor ticks are affected\n bottom=False, # ticks along the bottom edge are off\n top=False) # labels along the bottom edge are off\n ##################################################################################\n\n\n ##################################################################################\n # CODE GOES HERE TO CENTER Y-AXIS LABELS...\n ax.set_ylim([0,len(rows_signatures)])\n ax.set_yticklabels([])\n\n ax.tick_params(axis='y', which='minor', length=0, labelsize=30)\n\n #major ticks\n ax.set_yticks(np.arange(0, len(rows_signatures), 1))\n #minor ticks\n ax.set_yticks(np.arange(0, len(rows_signatures), 1)+0.5,minor=True)\n ax.set_yticklabels(rows_signatures, minor=True) # fontsize\n\n plt.tick_params(\n axis='y', # changes apply to the x-axis\n which='major', # both major and minor ticks are affected\n left=False) # labels along the bottom edge are off\n ##################################################################################\n\n ##################################################################################\n # Gridlines based on major ticks\n ax.grid(which='major', color='black')\n ##################################################################################\n\n ##################################################################################\n # create the directory if it does not exists\n filename = '%s_Signatures_%s_with_circles_%s.png' % (signature_type,strand_bias,str(SIGNIFICANCE_LEVEL).replace('.','_'))\n figFile = os.path.join(strandbias_figures_outputDir, CIRCLE_PLOTS, filename)\n fig.savefig(figFile)\n fig.tight_layout()\n\n plt.cla()\n plt.close(fig)\n ##################################################################################\n\n############################################################################################################################\n\n\n\n############################################################################################################################\n#Plot Legend only\ndef plot_legend(strandbias_figures_outputDir):\n\n strand_biases=[TRANSCRIBED_VERSUS_UNTRANSCRIBED, GENIC_VERSUS_INTERGENIC, LAGGING_VERSUS_LEADING]\n\n for strandbias in strand_biases:\n ##################################################################################\n fig = plt.figure(figsize=(4,1), dpi=300)\n ax = plt.gca()\n plt.axis('off')\n ##################################################################################\n\n ##################################################################################\n if strandbias==TRANSCRIBED_VERSUS_UNTRANSCRIBED:\n legend_elements = [\n Line2D([0], [0], marker='o', color='white', label=TRANSCRIBED_STRAND, markerfacecolor='royalblue' ,markersize=20),\n Line2D([0], [0], marker='o', color='white', label=UNTRANSCRIBED_STRAND, markerfacecolor='yellowgreen',markersize=20)]\n elif strandbias == GENIC_VERSUS_INTERGENIC:\n legend_elements = [\n Line2D([0], [0], marker='o', color='white', label=GENIC, markerfacecolor='cyan',markersize=20),\n Line2D([0], [0], marker='o', color='white', label=INTERGENIC, markerfacecolor='gray',markersize=20)]\n elif (strandbias==LAGGING_VERSUS_LEADING):\n legend_elements = [\n Line2D([0], [0], marker='o', color='white', label=LAGGING, markerfacecolor='indianred', markersize=20),\n Line2D([0], [0], marker='o', color='white', label=LEADING, markerfacecolor='goldenrod', markersize=20)]\n\n ax.legend(handles=legend_elements, bbox_to_anchor=(0, 0.5), loc='center left' ,fontsize = 20)\n ##################################################################################\n\n ##################################################################################\n # create the directory if it does not exists\n filename = 'Legend_%s.png' % (strandbias)\n figFile = os.path.join(strandbias_figures_outputDir, CIRCLE_PLOTS, filename)\n fig.savefig(figFile)\n fig.tight_layout()\n\n plt.cla()\n plt.close(fig)\n ##################################################################################\n\n############################################################################################################################\n\n\n############################################################################################################################\n#Sep 19, 2020\ndef plot_six_mutations_sbs_signatures_circle_figures(sbs_signatures,\n strand_bias,\n strandbias_figures_outputDir,\n significance_level,\n signature2mutation_type2strand2percentagedict,\n percentage_strings):\n\n mutation_types=six_mutation_types\n\n #####################################################################\n if strand_bias==LAGGING_VERSUS_LEADING:\n strands=replicationStrands\n elif strand_bias==TRANSCRIBED_VERSUS_UNTRANSCRIBED:\n strands=transcriptionStrands\n elif strand_bias==GENIC_VERSUS_INTERGENIC:\n strands=genicVersusIntergenicStrands\n #####################################################################\n\n #####################################################################\n rows_sbs_signatures=[]\n\n #Fill rows_sbs_signatures\n for signature in sbs_signatures:\n if signature in signature2mutation_type2strand2percentagedict:\n for mutation_type in signature2mutation_type2strand2percentagedict[signature]:\n for strand in strands:\n if strand in signature2mutation_type2strand2percentagedict[signature][mutation_type]:\n for percentage_string in percentage_strings:\n if (percentage_string in signature2mutation_type2strand2percentagedict[signature][mutation_type][strand]) and (signature2mutation_type2strand2percentagedict[signature][mutation_type][strand][percentage_string]==1):\n if signature not in rows_sbs_signatures:\n rows_sbs_signatures.append(signature)\n #####################################################################\n\n #####################################################################\n rows_sbs_signatures=sorted(rows_sbs_signatures,key=natural_key,reverse=True)\n #####################################################################\n\n #####################################################################\n xticklabels_list = percentage_strings * len(mutation_types)\n #####################################################################\n\n if (len(rows_sbs_signatures)>0):\n\n #####################################################################\n plot1, panel1 = plt.subplots(figsize=(5+1.5*len(xticklabels_list), 10+1.5*len(rows_sbs_signatures)))\n # plot1, panel1 = plt.subplots(figsize=(5+1.4*len(xticklabels_list), 10+len(rows_sbs_signatures))) Title and mutation texts are not seen.\n plt.rc('axes', edgecolor='lightgray')\n # panel1 = plt.axes([0.04, 0.09, 0.95, 0.75])\n\n #make aspect ratio square\n panel1.set_aspect(1.0)\n #####################################################################\n\n\n ##################################################################################\n #set title\n if strand_bias==LAGGING_VERSUS_LEADING:\n title='Lagging versus Leading Strand Bias'\n elif strand_bias==TRANSCRIBED_VERSUS_UNTRANSCRIBED:\n title='Transcribed versus Untranscribed Strand Bias'\n elif strand_bias==GENIC_VERSUS_INTERGENIC:\n title='Genic versus Intergenic Strand Bias'\n panel1.text(len(percentage_strings)*3, len(rows_sbs_signatures)+2.5, title, horizontalalignment='center', fontsize=60, fontweight='bold', fontname='Arial')\n ##################################################################################\n\n ##################################################################################\n #Colors from SigProfilerPlotting tool to be consistent\n colors = [[3 / 256, 189 / 256, 239 / 256],\n [1 / 256, 1 / 256, 1 / 256],\n [228 / 256, 41 / 256, 38 / 256],\n [203 / 256, 202 / 256, 202 / 256],\n [162 / 256, 207 / 256, 99 / 256],\n [236 / 256, 199 / 256, 197 / 256]]\n\n #Put rectangles\n x = 0\n\n for i in range(0, len(mutation_types), 1):\n panel1.text((x+(len(percentage_strings)/2)-0.75), len(rows_sbs_signatures)+1.5, mutation_types[i], fontsize=55, fontweight='bold', fontname='Arial')\n panel1.add_patch(plt.Rectangle((x+.0415, len(rows_sbs_signatures)+0.75), len(percentage_strings)-(2*.0415), .5, facecolor=colors[i], clip_on=False))\n panel1.add_patch(plt.Rectangle((x, 0), len(percentage_strings), len(rows_sbs_signatures), facecolor=colors[i], zorder=0, alpha=0.25,edgecolor='grey'))\n x += len(percentage_strings)\n ##################################################################################\n\n ##################################################################################\n # CODE GOES HERE TO CENTER X-AXIS LABELS...\n panel1.set_xlim([0,len(mutation_types)*len(percentage_strings)])\n panel1.set_xticklabels([])\n\n panel1.tick_params(axis='x', which='minor', length=0, labelsize=35)\n\n #major ticks\n panel1.set_xticks(np.arange(0, len(mutation_types)*len(percentage_strings), 1))\n #minor ticks\n panel1.set_xticks(np.arange(0, len(mutation_types)*len(percentage_strings), 1)+0.5,minor=True)\n\n panel1.set_xticklabels(xticklabels_list,minor=True)\n\n panel1.xaxis.set_label_position('top')\n panel1.xaxis.set_ticks_position('top')\n\n plt.tick_params(\n axis='x', # changes apply to the x-axis\n which='major', # both major and minor ticks are affected\n bottom=False, # ticks along the bottom edge are off\n top=False) # labels along the bottom edge are off\n ##################################################################################\n\n\n ##################################################################################\n # CODE GOES HERE TO CENTER Y-AXIS LABELS...\n panel1.set_ylim([0,len(rows_sbs_signatures)])\n panel1.set_yticklabels([])\n\n panel1.tick_params(axis='y', which='minor', length=0, labelsize=40)\n\n #major ticks\n panel1.set_yticks(np.arange(0, len(rows_sbs_signatures), 1))\n #minor ticks\n panel1.set_yticks(np.arange(0, len(rows_sbs_signatures), 1)+0.5,minor=True)\n\n panel1.set_yticklabels(rows_sbs_signatures, minor=True) # fontsize\n\n plt.tick_params(\n axis='y', # changes apply to the x-axis\n which='major', # both major and minor ticks are affected\n left=False) # labels along the bottom edge are off\n ##################################################################################\n\n ##################################################################################\n # Gridlines based on major ticks\n panel1.grid(which='major', color='black', zorder=3)\n ##################################################################################\n\n ##################################################################################\n #Put the legend\n if strand_bias==TRANSCRIBED_VERSUS_UNTRANSCRIBED:\n legend_elements = [\n Line2D([0], [0], marker='o', color='white', label=TRANSCRIBED_STRAND, markerfacecolor='royalblue' ,markersize=40),\n Line2D([0], [0], marker='o', color='white', label=UNTRANSCRIBED_STRAND, markerfacecolor='yellowgreen',markersize=40)]\n elif strand_bias == GENIC_VERSUS_INTERGENIC:\n legend_elements = [\n Line2D([0], [0], marker='o', color='white', label=GENIC, markerfacecolor='cyan',markersize=40),\n Line2D([0], [0], marker='o', color='white', label=INTERGENIC, markerfacecolor='gray',markersize=40)]\n elif (strand_bias==LAGGING_VERSUS_LEADING):\n legend_elements = [\n Line2D([0], [0], marker='o', color='white', label=LAGGING, markerfacecolor='indianred', markersize=40),\n Line2D([0], [0], marker='o', color='white', label=LEADING, markerfacecolor='goldenrod', markersize=40)]\n\n panel1.legend(handles=legend_elements,ncol=len(legend_elements), bbox_to_anchor=(1, -0.1),loc='upper right', fontsize=40)\n ##################################################################################\n\n\n ######################################################################################################################################\n for percentage_diff_index, percentage_string in enumerate(percentage_strings):\n for mutation_type_index, mutation_type in enumerate(mutation_types):\n for row_sbs_signature_index, row_sbs_signature in enumerate(rows_sbs_signatures):\n if (strand_bias==LAGGING_VERSUS_LEADING):\n if row_sbs_signature in signature2mutation_type2strand2percentagedict:\n if mutation_type in signature2mutation_type2strand2percentagedict[row_sbs_signature]:\n lagging_percentage = None\n leading_percentage = None\n\n if (LAGGING in signature2mutation_type2strand2percentagedict[row_sbs_signature][mutation_type]) and (signature2mutation_type2strand2percentagedict[row_sbs_signature][mutation_type][LAGGING][percentage_string]==1):\n lagging_percentage = 100\n if (LEADING in signature2mutation_type2strand2percentagedict[row_sbs_signature][mutation_type]) and (signature2mutation_type2strand2percentagedict[row_sbs_signature][mutation_type][LEADING][percentage_string]==1):\n leading_percentage = 100\n\n if (lagging_percentage is not None) and (leading_percentage is None):\n radius = 0.49\n if (radius > 0):\n # print('Plot circle at x=%d y=%d for %s %s %s' % (mutation_type_index * len(percentage_strings) + percentage_diff_index, row_sbs_signature_index, row_sbs_signature,mutation_type, percentage_string))\n panel1.add_patch(plt.Circle((mutation_type_index * len(percentage_strings) + percentage_diff_index + 0.5, row_sbs_signature_index + 0.5),radius, color='indianred', fill=True))\n elif (leading_percentage is not None) and (lagging_percentage is None):\n radius = 0.49\n if (radius > 0):\n # print('Plot circle at x=%d y=%d for %s %s %s' % (mutation_type_index * len(percentage_strings) + percentage_diff_index, row_sbs_signature_index, row_sbs_signature,mutation_type, percentage_string))\n panel1.add_patch(plt.Circle((mutation_type_index * len(percentage_strings) + percentage_diff_index + 0.5, row_sbs_signature_index + 0.5),radius, color='goldenrod', fill=True))\n\n elif (lagging_percentage is not None) and (leading_percentage is not None):\n radius_lagging = 0.49\n radius_leading = 0.49\n if (radius_lagging > radius_leading):\n # First lagging\n panel1.add_patch(plt.Circle((mutation_type_index * len(percentage_strings) + percentage_diff_index + 0.5, row_sbs_signature_index + 0.5),radius_lagging, color='indianred', fill=True))\n # Second leading\n panel1.add_patch(plt.Circle((mutation_type_index * len(percentage_strings) + percentage_diff_index + 0.5, row_sbs_signature_index + 0.5),radius_leading, color='goldenrod', fill=True))\n else:\n # First leading\n panel1.add_patch(plt.Circle((mutation_type_index * len(percentage_strings) + percentage_diff_index + 0.5, row_sbs_signature_index + 0.5),radius_leading, color='goldenrod', fill=True))\n # Second lagging\n panel1.add_patch(plt.Circle((mutation_type_index * len(percentage_strings) + percentage_diff_index + 0.5, row_sbs_signature_index + 0.5),radius_lagging, color='indianred', fill=True))\n\n elif (strand_bias == GENIC_VERSUS_INTERGENIC):\n if row_sbs_signature in signature2mutation_type2strand2percentagedict:\n if mutation_type in signature2mutation_type2strand2percentagedict[row_sbs_signature]:\n genic_percentage = None\n intergenic_percentage = None\n\n if (GENIC in signature2mutation_type2strand2percentagedict[row_sbs_signature][mutation_type]) and (signature2mutation_type2strand2percentagedict[row_sbs_signature][mutation_type][GENIC][percentage_string]==1):\n genic_percentage = 100\n if (INTERGENIC in signature2mutation_type2strand2percentagedict[row_sbs_signature][mutation_type]) and (signature2mutation_type2strand2percentagedict[row_sbs_signature][mutation_type][INTERGENIC][percentage_string]==1):\n intergenic_percentage = 100\n\n if (genic_percentage is not None) and (intergenic_percentage is None):\n radius = 0.49\n if (radius > 0):\n # print('Plot circle at x=%d y=%d for %s %s %s' % (mutation_type_index * len(percentage_strings) + percentage_diff_index, row_sbs_signature_index,row_sbs_signature, mutation_type, percentage_string))\n panel1.add_patch(plt.Circle((mutation_type_index * len(percentage_strings) + percentage_diff_index + 0.5,row_sbs_signature_index + 0.5), radius, color='cyan',fill=True))\n\n elif (intergenic_percentage is not None) and (genic_percentage is None):\n radius = 0.49\n if (radius > 0):\n # print('Plot circle at x=%d y=%d for %s %s %s' % (mutation_type_index * len(percentage_strings) + percentage_diff_index, row_sbs_signature_index,row_sbs_signature, mutation_type, percentage_string))\n panel1.add_patch(plt.Circle((mutation_type_index * len(percentage_strings) + percentage_diff_index + 0.5,row_sbs_signature_index + 0.5), radius, color='gray',fill=True))\n\n elif (genic_percentage is not None) and (intergenic_percentage is not None):\n radius_genic = 0.49\n radius_intergenic = 0.49\n if (radius_genic > radius_intergenic):\n # First genic\n panel1.add_patch(plt.Circle((mutation_type_index * len(percentage_strings) + percentage_diff_index + 0.5,row_sbs_signature_index + 0.5), radius_genic,color='cyan', fill=True))\n # Second intergenic\n panel1.add_patch(plt.Circle((mutation_type_index * len(percentage_strings) + percentage_diff_index + 0.5,row_sbs_signature_index + 0.5), radius_intergenic,color='gray', fill=True))\n\n else:\n # First intergenic\n panel1.add_patch(plt.Circle((mutation_type_index * len(percentage_strings) + percentage_diff_index + 0.5,row_sbs_signature_index + 0.5), radius_intergenic, color='gray', fill=True))\n # Second genic\n panel1.add_patch(plt.Circle((mutation_type_index * len(percentage_strings) + percentage_diff_index + 0.5,row_sbs_signature_index + 0.5), radius_genic,color='cyan', fill=True))\n\n\n elif (strand_bias == TRANSCRIBED_VERSUS_UNTRANSCRIBED):\n if row_sbs_signature in signature2mutation_type2strand2percentagedict:\n if mutation_type in signature2mutation_type2strand2percentagedict[row_sbs_signature]:\n transcribed_percentage = None\n untranscribed_percentage = None\n\n if (TRANSCRIBED_STRAND in signature2mutation_type2strand2percentagedict[row_sbs_signature][mutation_type]) and (signature2mutation_type2strand2percentagedict[row_sbs_signature][mutation_type][TRANSCRIBED_STRAND][percentage_string]==1):\n transcribed_percentage = 100\n if (UNTRANSCRIBED_STRAND in signature2mutation_type2strand2percentagedict[row_sbs_signature][mutation_type]) and (signature2mutation_type2strand2percentagedict[row_sbs_signature][mutation_type][UNTRANSCRIBED_STRAND][percentage_string]==1):\n untranscribed_percentage = 100\n\n if (transcribed_percentage is not None) and (untranscribed_percentage is None):\n radius = 0.49\n if (radius > 0):\n # print('Plot circle at x=%d y=%d for %s %s %s' % (mutation_type_index * len(percentage_strings) + percentage_diff_index, row_sbs_signature_index,row_sbs_signature, mutation_type, percentage_string))\n panel1.add_patch(plt.Circle((mutation_type_index * len(percentage_strings) + percentage_diff_index + 0.5,row_sbs_signature_index + 0.5), radius, color='royalblue',fill=True))\n\n elif (untranscribed_percentage is not None) and (transcribed_percentage is None):\n radius = 0.49\n if (radius > 0):\n # print('Plot circle at x=%d y=%d for %s %s %s' % (mutation_type_index * len(percentage_strings) + percentage_diff_index, row_sbs_signature_index,row_sbs_signature, mutation_type, percentage_string))\n panel1.add_patch(plt.Circle((mutation_type_index * len(percentage_strings) + percentage_diff_index + 0.5,row_sbs_signature_index + 0.5), radius, color='yellowgreen',fill=True))\n\n elif (transcribed_percentage is not None) and (untranscribed_percentage is not None):\n radius_transcribed = 0.49\n radius_untranscribed = 0.49\n if (radius_transcribed > radius_untranscribed):\n # First transcribed\n panel1.add_patch(plt.Circle((mutation_type_index * len(percentage_strings) + percentage_diff_index + 0.5,row_sbs_signature_index + 0.5), radius_transcribed,color='royalblue', fill=True))\n # Second untranscribed\n panel1.add_patch(plt.Circle((mutation_type_index * len(percentage_strings) + percentage_diff_index + 0.5,row_sbs_signature_index + 0.5), radius_untranscribed,color='yellowgreen', fill=True))\n\n else:\n # First untranscribed\n panel1.add_patch(plt.Circle((mutation_type_index * len(percentage_strings) + percentage_diff_index + 0.5,row_sbs_signature_index + 0.5), radius_untranscribed,color='yellowgreen', fill=True))\n # Second transcribed\n panel1.add_patch(plt.Circle((mutation_type_index * len(percentage_strings) + percentage_diff_index + 0.5,row_sbs_signature_index + 0.5), radius_transcribed,color='royalblue', fill=True))\n ######################################################################################################################################\n\n\n ##################################################################################\n # create the directory if it does not exists\n filename = 'SBS_Signatures_%s_with_circle_plot_%s.png' % (strand_bias,str(significance_level).replace('.','_'))\n figFile = os.path.join(strandbias_figures_outputDir,CIRCLE_PLOTS, filename)\n plot1.savefig(figFile,bbox_inches='tight')\n plot1.tight_layout()\n\n plt.cla()\n plt.close(plot1)\n ##################################################################################\n\n############################################################################################################################\n"
] |
[
[
"numpy.nanmax",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.rc",
"pandas.DataFrame",
"numpy.any",
"matplotlib.pyplot.gca",
"pandas.read_csv",
"numpy.arange",
"matplotlib.pyplot.Circle",
"matplotlib.pyplot.subplot",
"matplotlib.gridspec.GridSpec",
"matplotlib.pyplot.close",
"matplotlib.pyplot.axis",
"matplotlib.pyplot.text",
"matplotlib.pyplot.style.use",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.title",
"numpy.isnan",
"matplotlib.pyplot.ylim",
"matplotlib.pyplot.annotate",
"numpy.log10",
"matplotlib.rcParams.update",
"numpy.array",
"matplotlib.pyplot.xticks",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.axvline",
"matplotlib.pyplot.axhline",
"matplotlib.pyplot.scatter",
"matplotlib.pyplot.cla",
"matplotlib.lines.Line2D",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.xlim",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.yticks",
"matplotlib.pyplot.tick_params"
]
] |
yourtrading-ai/py_yourtrading_ai
|
[
"b69424f2afc40fe258c7ddae2fb47acc383ecbe5"
] |
[
"src/data_upload/batch.py"
] |
[
"import asyncio\nimport io\nimport ssl\n\nimport aiohttp\nimport aleph_client.asynchronous\nimport certifi\nimport pandas as pd\n\nfrom data_upload.data_utils import clean_time_duplicates\n\n\ndef get_download_url(symbol, interval=\"hourly\"):\n if interval == \"daily\":\n interval = \"d\"\n elif interval == \"hourly\":\n interval = \"1h\"\n elif interval == \"minutely\":\n interval = \"minute\"\n return f\"https://www.cryptodatadownload.com/cdd/Binance_{symbol}USDT_{interval}.csv\"\n\n\n# Code for all async\n# responses = asyncio.get_event_loop().run_until_complete(post_all_to_aleph_async(currencies))\n# hashes = [resp['item_hash'] for resp in responses]\nasync def post_to_aleph_async(account, client, symbol, interval=\"hourly\"):\n url = get_download_url(symbol, interval)\n sslcontext = ssl.create_default_context(cafile=certifi.where())\n async with client.get(url, ssl=sslcontext) as response:\n with io.StringIO(await response.text()) as text_io:\n df = pd.read_csv(text_io, header=1)\n clean_time_duplicates(df)\n print(df.describe())\n return await aleph_client.asynchronous.create_post(account=account,\n post_content=df.to_dict(),\n post_type=\"ohlcv_timeseries\",\n channel=\"TEST-CRYPTODATADOWNLOAD\")\n\n\nasync def post_all_to_aleph_async(account, symbols: list, interval=\"hourly\"):\n async with aiohttp.ClientSession(trust_env=True, connector=aiohttp.TCPConnector(limit_per_host=4)) as client:\n futures = [post_to_aleph_async(account, client, symbol, interval) for symbol in symbols]\n return await asyncio.gather(*futures)\n\n\ndef post_to_aleph(account, url, amend_hash=None):\n df = pd.read_csv(url, header=1)\n print(df.describe())\n post_type = 'ohlcv_timeseries' if amend_hash is None else 'amend'\n return aleph_client.create_post(account=account,\n post_content=df.describe().to_dict(),\n post_type=post_type,\n channel=\"TEST-CRYPTODATADOWNLOAD\",\n ref=amend_hash)\n\n\ndef post_all_to_aleph(account, symbols: list, amend_hashes=None, interval=\"hourly\"):\n hashes = {}\n for symbol in symbols:\n url = get_download_url(symbol, interval)\n if amend_hashes:\n resp = post_to_aleph(account, url, amend_hashes[symbol])\n print(f\"Amended {symbol}: {amend_hashes[symbol]}\")\n else:\n resp = post_to_aleph(account, url)\n print(f\"Posted {symbol}: {resp['item_hash']}\")\n hashes[symbol] = resp['item_hash']\n return hashes\n\n"
] |
[
[
"pandas.read_csv"
]
] |
otsubo/CIFAR-ConvolutionalAutoEncoder-Chainer
|
[
"bbda81dc7b52f42e07e9daaff38ce7453b24e008"
] |
[
"generate_cloth_img.py"
] |
[
"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat Jul 21 08:51:18 2018\n\n@author: user\n\"\"\"\n\nimport argparse\n\nimport os\nimport os.path as osp\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom tqdm import tqdm\n\nimport chainer\nfrom chainer import cuda\nfrom chainer.datasets import get_cifar10\nfrom chainer import dataset\nfrom chainer import Variable\nfrom chainer import serializers\nimport chainer.functions as F\nimport numpy as np\nfrom sklearn.model_selection import train_test_split\nfrom skimage.io import imread\n\nimport network\n\n\n# Load data\nclass LoadDataset(dataset.DatasetMixin):\n def __init__(self, split, return_image=False):\n assert split in ('train', 'val')\n ids = self._get_ids()\n iter_train, iter_val = train_test_split(\n ids, test_size=0.2, random_state=np.random.RandomState(1234))\n self.ids = iter_train if split == 'train' else iter_val\n self._return_image = return_image\n\n def __len__(self):\n return len(self.ids)\n\n def _get_ids(self):\n ids = []\n dataset_dir = chainer.dataset.get_dataset_directory(\n '2019_11_28_pr2')\n for data_id in os.listdir(dataset_dir):\n ids.append(osp.join(dataset_dir , data_id))\n return ids\n\n def img_to_datum(self, img):\n img = img.copy()\n datum = img.astype(np.float32)\n datum = datum[:, :, ::-1] #RGB -> BGR\n datum = datum.transpose((2, 0, 1))\n return datum\n\n def get_example(self, i):\n id = self.ids[i]\n image_file = osp.join(id , \"image.png\")\n img = imread(image_file)\n datum = self.img_to_datum(img)\n if self._return_image:\n return img\n else:\n return datum, datum\n\ndef main():\n parser = argparse.ArgumentParser()\n parser.add_argument('--gpu', '-g', type=int, default=-1)\n parser.add_argument('--model', '-m', type=str, default=\"./results/cloth/model\")\n parser.add_argument('--begin', '-b', type=int, default=0)\n args = parser.parse_args()\n\n # Set up a neural network to train.\n test = LoadDataset(split='val')\n\n model = network.CAE(3,3, return_out=True)\n \n if args.model != None:\n print( \"loading model from \" + args.model )\n serializers.load_npz(args.model, model)\n \n # Show 64 images\n fig = plt.figure(figsize=(6,6))\n plt.title(\"Original images: first rows,\\n Predicted images: second rows\")\n plt.axis('off')\n plt.tight_layout()\n \n pbar = tqdm(total=8)\n #import ipdb; ipdb.set_trace()\n for i in range(2):\n for j in range(2):\n ax = fig.add_subplot(4, 2, i*4+j+1, xticks=[], yticks=[])\n x, t = test[i*2+j]\n xT = x.transpose(1, 2, 0)\n xT = xT.astype(np.uint8)\n ax.imshow(xT, cmap=plt.cm.bone, interpolation='nearest')\n \n x = np.expand_dims(x, 0)\n t = np.expand_dims(t, 0)\n \n if args.gpu >= 0:\n cuda.get_device_from_id(0).use()\n model.to_gpu()\n x = cuda.cupy.array(x)\n t = cuda.cupy.array(t)\n \n predicted, loss = model(Variable(x), Variable(t))\n #print(predicted.shape)\n #print(loss) \n \n predicted = F.transpose(predicted[0], (1, 2, 0))\n predicted = cuda.to_cpu(predicted.data) #Variable to numpy\n predicted = predicted * 255\n predicted = predicted.astype(np.uint8) \n ax = fig.add_subplot(4, 2, i*4+j+3, xticks=[], yticks=[])\n ax.imshow(predicted, cmap=plt.cm.bone, interpolation='nearest')\n\n pbar.update(1)\n \n pbar.close()\n \n plt.savefig(\"result.png\")\n plt.show()\n plt.close()\n\nif __name__ == '__main__':\n main()\n"
] |
[
[
"matplotlib.pyplot.tight_layout",
"numpy.expand_dims",
"matplotlib.pyplot.title",
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.close",
"matplotlib.pyplot.axis",
"matplotlib.pyplot.show",
"numpy.random.RandomState",
"matplotlib.pyplot.figure"
]
] |
nataliepopescu/osdi21-artifact
|
[
"6a268c90a8ce449256b5c290caeb7e0e3b9d7e5c"
] |
[
"scripts/table4_run.py"
] |
[
"import os\nimport subprocess\nimport re\nimport time\nfrom numpy import average \nfrom ExpStats import runExpWithName\n\nROOT_PATH = os.path.dirname(os.path.realpath(__file__))\n\ndef parseThroughput(out):\n try:\n m = re.search(r'Requests/sec: ([0-9,.]+)', out)\n # m = re.search(r'([0-9,]+) ns/iter', out)\n s = m.group(1)\n result = float(s.strip())\n #s = s.replace(',', '')\n #result = int(s)\n except Exception:\n print(out)\n print(\"Run experiment failed\")\n return None\n\n return result\n\ndef parseZola(out):\n try:\n m = re.search(r'Done in ([0-9]+)ms.', out)\n # m = re.search(r'([0-9,]+) ns/iter', out)\n s = m.group(1)\n result = float(s.strip())\n #s = s.replace(',', '')\n #result = int(s)\n except Exception:\n print(out)\n print(\"Run experiment failed\")\n return None\n\n return result\n\ndef test_swc():\n print(\"Testing swc\")\n os.chdir(ROOT_PATH + \"/../benchmarks/swc\")\n safe_time, _, _ = runExpWithName(\"./test_bc-safe\", None, 20, False)\n unsafe_time, _, _ = runExpWithName(\"./test_bc-unsafe\", None, 20, False)\n perf_diff = (safe_time - unsafe_time) / unsafe_time\n print(\"Performance difference of swc is: {:2.2%}\".format(perf_diff))\n\ndef test_warp():\n print(\"Testing warp\")\n os.chdir(ROOT_PATH + \"/../benchmarks/warp\")\n out = subprocess.Popen([ROOT_PATH + '/runWarp.sh', 'safe'], stdout=subprocess.PIPE, stderr=subprocess.STDOUT)\n out, _ = out.communicate()\n out = out.decode(\"utf-8\") # convert to string from bytes\n safe_throughput = parseThroughput(out)\n\n out = subprocess.Popen([ROOT_PATH + '/runWarp.sh', 'unsafe'], stdout=subprocess.PIPE, stderr=subprocess.STDOUT)\n out, _ = out.communicate()\n out = out.decode(\"utf-8\") # convert to string from bytes\n unsafe_throughput = parseThroughput(out)\n\n if safe_throughput and unsafe_throughput:\n perf_diff = (unsafe_throughput - safe_throughput) / unsafe_throughput \n print(\"Performance difference of warp is: {:2.2%}\".format(perf_diff))\n\n\ndef test_iron():\n print(\"Testing iron\")\n os.chdir(ROOT_PATH + \"/../benchmarks/iron\")\n out = subprocess.Popen([ROOT_PATH + '/runIron.sh', 'safe'], stdout=subprocess.PIPE, stderr=subprocess.STDOUT)\n out, _ = out.communicate()\n out = out.decode(\"utf-8\") # convert to string from bytes\n safe_throughput = parseThroughput(out)\n\n out = subprocess.Popen([ROOT_PATH + '/runIron.sh', 'unsafe'], stdout=subprocess.PIPE, stderr=subprocess.STDOUT)\n out, _ = out.communicate()\n out = out.decode(\"utf-8\") # convert to string from bytes\n unsafe_throughput = parseThroughput(out)\n\n if safe_throughput and unsafe_throughput:\n perf_diff = (unsafe_throughput - safe_throughput) / unsafe_throughput \n print(\"Performance difference of iron is: {:2.2%}\".format(perf_diff))\n\n\ndef test_zola():\n print(\"Testing zola\")\n os.chdir(ROOT_PATH + \"/../benchmarks/zola\")\n\n time_list = []\n for _ in range(100):\n out = subprocess.Popen([ROOT_PATH + '/runZola.sh', 'safe'], stdout=subprocess.PIPE, stderr=subprocess.STDOUT)\n out, _ = out.communicate()\n out = out.decode(\"utf-8\") # convert to string from bytes\n time = parseZola(out)\n time_list.append(time)\n\n unsafe_time = average(time_list)\n\n time_list = []\n for _ in range(100):\n out = subprocess.Popen([ROOT_PATH + '/runZola.sh', 'safe'], stdout=subprocess.PIPE, stderr=subprocess.STDOUT)\n out, _ = out.communicate()\n out = out.decode(\"utf-8\") # convert to string from bytes\n time = parseZola(out)\n time_list.append(time)\n safe_time = average(time_list)\n perf_diff = (safe_time - unsafe_time) / unsafe_time\n print(\"Performance difference of zola is: {:2.2%}\".format(perf_diff))\n \n\ndef test_rustpython():\n print(\"Testing RustPython\")\n os.chdir(ROOT_PATH + \"/../benchmarks/RustPython\")\n arg = ROOT_PATH + \"/../benchmarks/RustPython/benches/benchmarks/pystone.py\"\n safe_time, _, _ = runExpWithName(\"./test_bc-safe\", arg, 10, False)\n unsafe_time, _, _ = runExpWithName(\"./test_bc-unsafe\", arg, 10, False)\n perf_diff = (safe_time - unsafe_time) / unsafe_time\n print(\"Performance difference of RustPython is: {:2.2%}\".format(perf_diff))\n\nos.chdir(ROOT_PATH + \"/../benchmarks\")\ntest_iron()\ntest_swc()\ntest_warp()\ntest_zola()\ntest_rustpython()\n"
] |
[
[
"numpy.average"
]
] |
noemiefedon/RELAY
|
[
"1bf9c27ee1bcf1be0a7652fcca0ea38dd47b14b8"
] |
[
"src/one_stack.py"
] |
[
"# -*- coding: utf-8 -*-\n\"\"\"\nFunctions to check a design manufacturability\n\n- check_ss_manufacturability\n checks the manufacturability of a stacking sequence list\n\"\"\"\n__version__ = '1.0'\n__author__ = 'Noemie Fedon'\n\nimport sys\nimport numpy as np\nsys.path.append(r'C:\\RELAY')\nfrom src.contiguity import is_contig\nfrom src.disorientation import is_diso_ss\nfrom src.balance import is_balanced\nfrom src.dam_tol import is_dam_tol\nfrom src.ten_percent_rule import is_ten_percent_rule\nfrom src.lp_functions_2 import calc_lampamA\nfrom src.constraints import Constraints\nfrom src.pretty_print import print_ss\n\ndef check_ss_manufacturability(\n ss, constraints, no_ipo_check=False, no_bal_check=False,\n equality_45_135=False, equality_0_90=False, n_plies=None):\n \"\"\"\n checks the manufacturability of a stacking sequence list\n \"\"\"\n if n_plies is not None and ss.size != n_plies:\n raise Exception(\"Wrong number of plies\")\n\n if constraints.dam_tol:\n if not is_dam_tol(ss, constraints):\n print_ss(ss)\n raise Exception(\"Damage tolerance constraint not satisfied\")\n\n if not no_bal_check and constraints.bal:\n if not is_balanced(ss, constraints):\n raise Exception(\"Balance constraint not satisfied\")\n\n if not no_ipo_check and constraints.ipo:\n lampamA = calc_lampamA(ss, constraints)\n if (abs(lampamA[2:4]) > 1e-10).any():\n print_ss(ss)\n print('lampamA', lampamA)\n# print('ipo')\n raise Exception(\"In plane orthotropy constraint not satisfied\")\n\n\n if constraints.diso:\n if hasattr(constraints, 'dam_tol_rule'):\n if not is_diso_ss(ss, constraints.delta_angle,\n constraints.dam_tol, constraints.dam_tol_rule):\n raise Exception(\"Disorientation constraint not satisfied\")\n else:\n if not is_diso_ss(ss, constraints.delta_angle,\n constraints.dam_tol, constraints.n_plies_dam_tol):\n raise Exception(\"Disorientation constraint not satisfied\")\n\n if constraints.contig:\n if not is_contig(ss, constraints.n_contig):\n raise Exception(\"Contiguity constraint not satisfied\")\n\n if constraints.rule_10_percent:\n if not is_ten_percent_rule(\n constraints, stack=ss,\n equality_45_135=equality_45_135,\n equality_0_90=equality_0_90):\n raise Exception(\"10% rule not satisfied\")\n\n return 0\n\n\n\nif __name__ == \"__main__\":\n\n print('\\n*** Test for the function check_ss_manufacturability ***')\n constraints = Constraints(\n sym=True,\n bal=True,\n ipo=True,\n oopo=False,\n dam_tol=False,\n rule_10_percent=True,\n percent_0=10,\n percent_45=0,\n percent_90=10,\n percent_135=0,\n percent_45_135=10,\n diso=True,\n contig=True,\n n_contig=5,\n delta_angle=45,\n set_of_angles=np.array([0, 45, -45, 90]))\n ss = np.array([ 0, 0, 0, 0, 0, -45, 0, 0, 0, 0, 0, -45, 0, 0, 0, 0, 0, -45, 0, 0, 0, 0, 0, -45, 0, 0, 0, 0, 0, 45, 0, 0, 0, 45, 0, 0, 0, 0, 45, 90, 90, 90, 90, 90, -45, 90, 90, 90, 90, 45, 0, 0, 0, 0, 0, 45, 0, 0, 0, 0, -45, 90, 90, 90, 90, 90, -45, 90, 90, 90, 90, 90, 45, 0, 0, 45, 90, 90, 90, 90, 45, 0, 0, 0, 0, 0, -45, 0, 0, 0, 0, 0, 45, 90, 90, 90, -45, 0, 0, 45, 45, 0, 0, -45, 90, 90, 90, 45, 0, 0, 0, 0, 0, -45, 0, 0, 0, 0, 0, 45, 90, 90, 90, 90, 45, 0, 0, 45, 90, 90, 90, 90, 90, -45, 90, 90, 90, 90, 90, -45, 0, 0, 0, 0, 45, 0, 0, 0, 0, 0, 45, 90, 90, 90, 90, -45, 90, 90, 90, 90, 90, 45, 0, 0, 0, 0, 45, 0, 0, 0, 45, 0, 0, 0, 0, 0, -45, 0, 0, 0, 0, 0, -45, 0, 0, 0, 0, 0, -45, 0, 0, 0, 0, 0, -45, 0, 0, 0, 0, 0], int)\n check_ss_manufacturability(ss, constraints)\n"
] |
[
[
"numpy.array"
]
] |
postpascal/py-futu-api
|
[
"cb274d5ab5387dca190b739d161f2bc8eabe073d"
] |
[
"futu/quote/open_quote_context.py"
] |
[
"# -*- coding: utf-8 -*-\n\"\"\"\n Market quote and trade context setting\n\"\"\"\n\nimport datetime\nimport math\nfrom time import sleep\n\nimport pandas as pd\nfrom futu.common.open_context_base import OpenContextBase, ContextStatus\nfrom futu.quote.quote_query import *\n\n\nclass OpenQuoteContext(OpenContextBase):\n \"\"\"行情上下文对象类\"\"\"\n\n def __init__(self, host='127.0.0.1', port=11111):\n \"\"\"\n 初始化Context对象\n :param host: host地址\n :param port: 端口\n \"\"\"\n self._ctx_subscribe = {}\n super(OpenQuoteContext, self).__init__(host, port, True)\n\n def close(self):\n \"\"\"\n 关闭上下文对象。\n\n .. code:: python\n\n from futu import *\n quote_ctx = OpenQuoteContext(host='127.0.0.1', port=11111)\n quote_ctx.close()\n \"\"\"\n super(OpenQuoteContext, self).close()\n\n def on_api_socket_reconnected(self):\n \"\"\"for API socket reconnected\"\"\"\n # auto subscriber\n resub_count = 0\n subtype_list = []\n code_list = []\n\n resub_dict = copy(self._ctx_subscribe)\n subtype_all_cnt = len(resub_dict.keys())\n subtype_cur_cnt = 0\n\n ret_code = RET_OK\n ret_msg = ''\n\n for subtype in resub_dict.keys():\n subtype_cur_cnt += 1\n code_set = resub_dict[subtype]\n code_list_new = [code for code in code_set]\n if len(code_list_new) == 0:\n continue\n\n if len(code_list) == 0:\n code_list = code_list_new\n subtype_list = [subtype]\n\n is_need_sub = False\n if code_list == code_list_new:\n if subtype not in subtype_list:\n subtype_list.append(subtype) # 合并subtype请求\n else:\n ret_code, ret_msg = self._reconnect_subscribe(code_list, subtype_list)\n logger.debug(\"reconnect subscribe code_count={} ret_code={} ret_msg={} subtype_list={} code_list={}\".format(\n len(code_list), ret_code, ret_msg, subtype_list, code_list))\n if ret_code != RET_OK:\n break\n\n resub_count += len(code_list)\n code_list = code_list_new\n subtype_list = [subtype]\n\n # 循环即将结束\n if subtype_cur_cnt == subtype_all_cnt and len(code_list):\n ret_code, ret_msg = self._reconnect_subscribe(code_list, subtype_list)\n logger.debug(\"reconnect subscribe code_count={} ret_code={} ret_msg={} subtype_list={} code_list={}\".format(len(code_list), ret_code, ret_msg, subtype_list, code_list))\n if ret_code != RET_OK:\n break\n resub_count += len(code_list)\n code_list = []\n subtype_list = []\n\n logger.debug(\"reconnect subscribe all code_count={} ret_code={} ret_msg={}\".format(resub_count, ret_code, ret_msg))\n\n # 重定阅失败,重连\n if ret_code != RET_OK:\n logger.error(\"reconnect subscribe error, close connect and retry!!\")\n self._status = ContextStatus.Start\n self._wait_reconnect()\n return ret_code, ret_msg\n\n\n\n def get_trading_days(self, market, start=None, end=None):\n \"\"\"获取交易日\n :param market: 市场类型,Market_\n :param start: 起始日期。例如'2018-01-01'。\n :param end: 结束日期。例如'2018-01-01'。\n start和end的组合如下:\n ========== ========== ========================================\n start类型 end类型 说明\n ========== ========== ========================================\n str str start和end分别为指定的日期\n None str start为end往前365天\n str None end为start往后365天\n None None end为当前日期,start为end往前365天\n ========== ========== ========================================\n :return: 成功时返回(RET_OK, data),data是[{'trade_date_type': 0, 'time': '2018-01-05'}]数组;失败时返回(RET_ERROR, data),其中data是错误描述字符串\n \"\"\"\n if market is None or is_str(market) is False:\n error_str = ERROR_STR_PREFIX + \"the type of market param is wrong\"\n return RET_ERROR, error_str\n\n ret, msg, start, end = normalize_start_end_date(start, end, 365)\n if ret != RET_OK:\n return ret, msg\n\n query_processor = self._get_sync_query_processor(\n TradeDayQuery.pack_req, TradeDayQuery.unpack_rsp)\n\n # the keys of kargs should be corresponding to the actual function arguments\n kargs = {\n 'market': market,\n 'start_date': start,\n 'end_date': end,\n 'conn_id': self.get_sync_conn_id()\n }\n ret_code, msg, trade_day_list = query_processor(**kargs)\n\n if ret_code != RET_OK:\n return RET_ERROR, msg\n\n\n return RET_OK, trade_day_list\n\n def get_stock_basicinfo(self, market, stock_type=SecurityType.STOCK, code_list=None):\n \"\"\"\n 获取指定市场中特定类型的股票基本信息\n :param market: 市场类型,futu.common.constant.Market\n :param stock_type: 股票类型, futu.common.constant.SecurityType\n :param code_list: 如果不为None,应该是股票code的iterable类型,将只返回指定的股票信息\n :return: (ret_code, content)\n ret_code 等于RET_OK时, content为Pandas.DataFrame数据, 否则为错误原因字符串, 数据列格式如下\n ================= =========== ==============================================================================\n 参数 类型 说明\n ================= =========== ==============================================================================\n code str 股票代码\n name str 名字\n lot_size int 每手数量\n stock_type str 股票类型,参见SecurityType\n stock_child_type str 涡轮子类型,参见WrtType\n stock_owner str 所属正股的代码\n option_type str 期权类型,Qot_Common.OptionType\n strike_time str 行权日\n strike_price float 行权价\n suspension bool 是否停牌(True表示停牌)\n listing_date str 上市时间\n stock_id int 股票id\n delisting bool 是否退市\n ================= =========== ==============================================================================\n\n :example:\n\n .. code-block:: python\n\n from futu import *\n quote_ctx = OpenQuoteContext(host='127.0.0.1', port=11111)\n print(quote_ctx.get_stock_basicinfo(Market.HK, SecurityType.WARRANT))\n print(quote_ctx.get_stock_basicinfo(Market.US, SecurityType.DRVT, 'US.AAPL210115C185000'))\n quote_ctx.close()\n \"\"\"\n param_table = {'market': market, 'stock_type': stock_type}\n for x in param_table:\n param = param_table[x]\n if param is None or is_str(param) is False:\n error_str = ERROR_STR_PREFIX + \"the type of %s param is wrong\" % x\n return RET_ERROR, error_str\n\n if code_list is not None:\n if is_str(code_list):\n code_list = code_list.split(',')\n elif isinstance(code_list, list):\n pass\n else:\n return RET_ERROR, \"code list must be like ['HK.00001', 'HK.00700'] or 'HK.00001,HK.00700'\"\n\n query_processor = self._get_sync_query_processor(\n StockBasicInfoQuery.pack_req, StockBasicInfoQuery.unpack_rsp)\n kargs = {\n \"market\": market,\n 'stock_type': stock_type,\n 'code_list': code_list,\n 'conn_id': self.get_sync_conn_id()\n }\n\n ret_code, msg, basic_info_list = query_processor(**kargs)\n if ret_code != RET_OK:\n return ret_code, msg\n\n col_list = [\n 'code', 'name', 'lot_size', 'stock_type', 'stock_child_type', 'stock_owner',\n 'option_type', 'strike_time', 'strike_price', 'suspension',\n 'listing_date', 'stock_id', 'delisting'\n ]\n\n basic_info_table = pd.DataFrame(basic_info_list, columns=col_list)\n\n return RET_OK, basic_info_table\n\n def get_multiple_history_kline(self,\n codelist,\n start=None,\n end=None,\n ktype=KLType.K_DAY,\n autype=AuType.QFQ):\n \"\"\"\n 获取多只股票的本地历史k线数据\n\n :param codelist: 股票代码列表,list或str。例如:['HK.00700', 'HK.00001'],'HK.00700,HK.00001'\n :param start: 起始时间,例如'2017-06-20'\n :param end: 结束时间, 例如'2017-07-20',start与end组合关系参见 get_history_kline_\n :param ktype: k线类型,参见KLType\n :param autype: 复权类型,参见AuType\n :return: 成功时返回(RET_OK, [data]),data是DataFrame数据, 数据列格式如下\n\n ================= =========== ==============================================================================\n 参数 类型 说明\n ================= =========== ==============================================================================\n code str 股票代码\n time_key str k线时间\n open float 开盘价\n close float 收盘价\n high float 最高价\n low float 最低价\n pe_ratio float 市盈率(该字段为比例字段,默认不展示%)\n turnover_rate float 换手率\n volume int 成交量\n turnover float 成交额\n change_rate float 涨跌幅\n last_close float 昨收价\n ================= =========== ==============================================================================\n\n 失败时返回(RET_ERROR, data),其中data是错误描述字符串\n\n \"\"\"\n if is_str(codelist):\n codelist = codelist.split(',')\n elif isinstance(codelist, list):\n pass\n else:\n return RET_ERROR, \"code list must be like ['HK.00001', 'HK.00700'] or 'HK.00001,HK.00700'\"\n result = []\n for code in codelist:\n ret, data = self.get_history_kline(code, start, end, ktype, autype)\n if ret != RET_OK:\n return RET_ERROR, 'get history kline error: {}, {},{},{},{}'.format(data, code, start, end, ktype)\n result.append(data)\n return 0, result\n\n def _get_history_kline_impl(self,\n query_cls,\n code,\n start=None,\n end=None,\n ktype=KLType.K_DAY,\n autype=AuType.QFQ,\n fields=[KL_FIELD.ALL]\n ):\n\n ret, msg, req_start, end = normalize_start_end_date(start, end, 365)\n if ret != RET_OK:\n return ret, msg\n\n req_fields = unique_and_normalize_list(fields)\n if not fields:\n req_fields = copy(KL_FIELD.ALL_REAL)\n req_fields = KL_FIELD.normalize_field_list(req_fields)\n if not req_fields:\n error_str = ERROR_STR_PREFIX + \"the type of fields param is wrong\"\n return RET_ERROR, error_str\n\n if autype is None:\n autype = 'None'\n\n param_table = {'code': code, 'ktype': ktype, 'autype': autype}\n for x in param_table:\n param = param_table[x]\n if param is None or is_str(param) is False:\n error_str = ERROR_STR_PREFIX + \"the type of %s param is wrong\" % x\n return RET_ERROR, error_str\n\n\n max_kl_num = 1000\n data_finish = False\n list_ret = []\n # 循环请求数据,避免一次性取太多超时\n while not data_finish:\n kargs = {\n \"code\": code,\n \"start_date\": req_start,\n \"end_date\": end,\n \"ktype\": ktype,\n \"autype\": autype,\n \"fields\": copy(req_fields),\n \"max_num\": max_kl_num,\n \"conn_id\": self.get_sync_conn_id()\n }\n query_processor = self._get_sync_query_processor(query_cls.pack_req, query_cls.unpack_rsp)\n ret_code, msg, content = query_processor(**kargs)\n if ret_code != RET_OK:\n return ret_code, msg\n\n list_kline, has_next, next_time = content\n data_finish = (not has_next) or (not next_time)\n req_start = next_time\n for dict_item in list_kline:\n list_ret.append(dict_item)\n\n # 表头列\n col_list = ['code']\n for field in req_fields:\n str_field = KL_FIELD.DICT_KL_FIELD_STR[field]\n if str_field not in col_list:\n col_list.append(str_field)\n\n kline_frame_table = pd.DataFrame(list_ret, columns=col_list)\n\n return RET_OK, kline_frame_table\n\n def get_history_kline(self,\n code,\n start=None,\n end=None,\n ktype=KLType.K_DAY,\n autype=AuType.QFQ,\n fields=[KL_FIELD.ALL]):\n \"\"\"\n 得到本地历史k线,需先参照帮助文档下载k线\n\n :param code: 股票代码\n :param start: 开始时间,例如'2017-06-20'\n :param end: 结束时间,例如'2017-06-30'\n start和end的组合如下:\n ========== ========== ========================================\n start类型 end类型 说明\n ========== ========== ========================================\n str str start和end分别为指定的日期\n None str start为end往前365天\n str None end为start往后365天\n None None end为当前日期,start为end往前365天\n ========== ========== ========================================\n :param ktype: k线类型, 参见 KLType 定义\n :param autype: 复权类型, 参见 AuType 定义\n :param fields: 需返回的字段列表,参见 KL_FIELD 定义 KL_FIELD.ALL KL_FIELD.OPEN ....\n :return: (ret, data)\n\n ret == RET_OK 返回pd dataframe数据,data.DataFrame数据, 数据列格式如下\n\n ret != RET_OK 返回错误字符串\n\n ================= =========== ==============================================================================\n 参数 类型 说明\n ================= =========== ==============================================================================\n code str 股票代码\n time_key str k线时间\n open float 开盘价\n close float 收盘价\n high float 最高价\n low float 最低价\n pe_ratio float 市盈率(该字段为比例字段,默认不展示%)\n turnover_rate float 换手率\n volume int 成交量\n turnover float 成交额\n change_rate float 涨跌幅\n last_close float 昨收价\n ================= =========== ==============================================================================\n\n :example:\n\n .. code:: python\n\n from futu import *\n quote_ctx = OpenQuoteContext(host='127.0.0.1', port=11111)\n print(quote_ctx.get_history_kline('HK.00700', start='2017-06-20', end='2017-06-22'))\n quote_ctx.close()\n \"\"\"\n return self._get_history_kline_impl(GetHistoryKlineQuery, code, start=start, end=end,\n ktype=ktype, autype=autype, fields=fields)\n\n def request_history_kline(self,\n code,\n start=None,\n end=None,\n ktype=KLType.K_DAY,\n autype=AuType.QFQ,\n fields=[KL_FIELD.ALL],\n max_count=1000,\n page_req_key=None):\n \"\"\"\n 拉取历史k线,不需要先下载历史数据。\n\n :param code: 股票代码\n :param start: 开始时间,例如'2017-06-20'\n :param end: 结束时间,例如'2017-07-20'。\n start和end的组合如下:\n ========== ========== ========================================\n start类型 end类型 说明\n ========== ========== ========================================\n str str start和end分别为指定的日期\n None str start为end往前365天\n str None end为start往后365天\n None None end为当前日期,start为end往前365天\n ========== ========== ========================================\n :param ktype: k线类型, 参见 KLType 定义\n :param autype: 复权类型, 参见 AuType 定义\n :param fields: 需返回的字段列表,参见 KL_FIELD 定义 KL_FIELD.ALL KL_FIELD.OPEN ....\n :param max_count: 本次请求最大返回的数据点个数,传None表示返回start和end之间所有的数据。\n :param page_req_key: 分页请求的key。如果start和end之间的数据点多于max_count,那么后续请求时,要传入上次调用返回的page_req_key。初始请求时应该传None。\n :return: (ret, data, page_req_key)\n\n ret == RET_OK 返回pd dataframe数据,data.DataFrame数据, 数据列格式如下。page_req_key在分页请求时(即max_count>0)\n 可能返回,并且需要在后续的请求中传入。如果没有更多数据,page_req_key返回None。\n\n ret != RET_OK 返回错误字符串\n\n ================= =========== ==============================================================================\n 参数 类型 说明\n ================= =========== ==============================================================================\n code str 股票代码\n time_key str k线时间\n open float 开盘价\n close float 收盘价\n high float 最高价\n low float 最低价\n pe_ratio float 市盈率(该字段为比例字段,默认不展示%)\n turnover_rate float 换手率\n volume int 成交量\n turnover float 成交额\n change_rate float 涨跌幅\n last_close float 昨收价\n ================= =========== ==============================================================================\n\n :note\n\n :example:\n\n .. code:: python\n\n from futu import *\n quote_ctx = OpenQuoteContext(host='127.0.0.1', port=11111)\n ret, data, page_req_key = quote_ctx.request_history_kline('HK.00700', start='2017-06-20', end='2018-06-22', max_count=50)\n print(ret, data)\n ret, data, page_req_key = quote_ctx.request_history_kline('HK.00700', start='2017-06-20', end='2018-06-22', max_count=50, page_req_key=page_req_key)\n print(ret, data)\n quote_ctx.close()\n \"\"\"\n next_page_req_key = None\n ret, msg, req_start, end = normalize_start_end_date(start, end, 365)\n if ret != RET_OK:\n return ret, msg, next_page_req_key\n\n req_fields = unique_and_normalize_list(fields)\n if not fields:\n req_fields = copy(KL_FIELD.ALL_REAL)\n req_fields = KL_FIELD.normalize_field_list(req_fields)\n if not req_fields:\n error_str = ERROR_STR_PREFIX + \"the type of fields param is wrong\"\n return RET_ERROR, error_str, next_page_req_key\n\n if autype is None:\n autype = 'None'\n\n param_table = {'code': code, 'ktype': ktype, 'autype': autype}\n for x in param_table:\n param = param_table[x]\n if param is None or is_str(param) is False:\n error_str = ERROR_STR_PREFIX + \"the type of %s param is wrong\" % x\n return RET_ERROR, error_str, next_page_req_key\n\n max_kl_num = min(1000, max_count) if max_count is not None else 1000\n data_finish = False\n list_ret = []\n # 循环请求数据,避免一次性取太多超时\n while not data_finish:\n kargs = {\n \"code\": code,\n \"start_date\": req_start,\n \"end_date\": end,\n \"ktype\": ktype,\n \"autype\": autype,\n \"fields\": copy(req_fields),\n \"max_num\": max_kl_num,\n \"conn_id\": self.get_sync_conn_id(),\n \"next_req_key\": page_req_key\n }\n query_processor = self._get_sync_query_processor(RequestHistoryKlineQuery.pack_req,\n RequestHistoryKlineQuery.unpack_rsp)\n ret_code, msg, content = query_processor(**kargs)\n if ret_code != RET_OK:\n return ret_code, msg, next_page_req_key\n\n list_kline, has_next, page_req_key = content\n list_ret.extend(list_kline)\n next_page_req_key = page_req_key\n if max_count is not None:\n if max_count > len(list_ret) and has_next:\n data_finish = False\n max_kl_num = min(max_count - len(list_ret), 1000)\n else:\n data_finish = True\n else:\n data_finish = not has_next\n\n # 表头列\n col_list = ['code']\n for field in req_fields:\n str_field = KL_FIELD.DICT_KL_FIELD_STR[field]\n if str_field not in col_list:\n col_list.append(str_field)\n\n kline_frame_table = pd.DataFrame(list_ret, columns=col_list)\n\n return RET_OK, kline_frame_table, next_page_req_key\n\n def get_autype_list(self, code_list):\n \"\"\"\n 获取给定股票列表的复权因子\n\n :param code_list: 股票列表,例如['HK.00700']\n :return: (ret, data)\n\n ret == RET_OK 返回pd dataframe数据,data.DataFrame数据, 数据列格式如下\n\n ret != RET_OK 返回错误字符串\n\n ===================== =========== =================================================================================\n 参数 类型 说明\n ===================== =========== =================================================================================\n code str 股票代码\n ex_div_date str 除权除息日\n split_ratio float 拆合股比例(该字段为比例字段,默认不展示%),例如,对于5股合1股为1/5,对于1股拆5股为5/1\n per_cash_div float 每股派现\n per_share_div_ratio float 每股送股比例(该字段为比例字段,默认不展示%)\n per_share_trans_ratio float 每股转增股比例(该字段为比例字段,默认不展示%)\n allotment_ratio float 每股配股比例(该字段为比例字段,默认不展示%)\n allotment_price float 配股价\n stk_spo_ratio float 增发比例(该字段为比例字段,默认不展示%)\n stk_spo_price float 增发价格\n forward_adj_factorA float 前复权因子A\n forward_adj_factorB float 前复权因子B\n backward_adj_factorA float 后复权因子A\n backward_adj_factorB float 后复权因子B\n ===================== =========== =================================================================================\n\n \"\"\"\n code_list = unique_and_normalize_list(code_list)\n\n for code in code_list:\n if code is None or is_str(code) is False:\n error_str = ERROR_STR_PREFIX + \"the type of param in code_list is wrong\"\n return RET_ERROR, error_str\n\n query_processor = self._get_sync_query_processor(\n ExrightQuery.pack_req, ExrightQuery.unpack_rsp)\n kargs = {\n \"stock_list\": code_list,\n \"conn_id\": self.get_sync_conn_id()\n }\n ret_code, msg, exr_record = query_processor(**kargs)\n if ret_code == RET_ERROR:\n return ret_code, msg\n\n col_list = [\n 'code', 'ex_div_date', 'split_ratio', 'per_cash_div',\n 'per_share_div_ratio', 'per_share_trans_ratio', 'allotment_ratio',\n 'allotment_price', 'stk_spo_ratio', 'stk_spo_price',\n 'forward_adj_factorA', 'forward_adj_factorB',\n 'backward_adj_factorA', 'backward_adj_factorB'\n ]\n\n exr_frame_table = pd.DataFrame(exr_record, columns=col_list)\n\n return RET_OK, exr_frame_table\n\n def get_market_snapshot(self, code_list):\n \"\"\"\n 获取市场快照\n\n :param code_list: 股票列表\n :return: (ret, data)\n\n ret == RET_OK 返回pd dataframe数据,data.DataFrame数据, 数据列格式如下\n\n ret != RET_OK 返回错误字符串\n\n ======================= ============= ==============================================================================\n 参数 类型 说明\n ======================= ============= ==============================================================================\n code str 股票代码\n update_time str 更新时间(yyyy-MM-dd HH:mm:ss),(美股默认是美东时间,港股A股默认是北京时间)\n last_price float 最新价格\n open_price float 今日开盘价\n high_price float 最高价格\n low_price float 最低价格\n prev_close_price float 昨收盘价格\n volume int 成交数量\n turnover float 成交金额\n turnover_rate float 换手率\n suspension bool 是否停牌(True表示停牌)\n listing_date str 上市日期 (yyyy-MM-dd)\n equity_valid bool 是否正股(为true时以下正股相关字段才有合法数值)\n issued_shares int 发行股本\n total_market_val float 总市值\n net_asset int 资产净值\n net_profit int 净利润\n earning_per_share float 每股盈利\n outstanding_shares int 流通股本\n net_asset_per_share float 每股净资产\n circular_market_val float 流通市值\n ey_ratio float 收益率(该字段为比例字段,默认不展示%)\n pe_ratio float 市盈率(该字段为比例字段,默认不展示%)\n pb_ratio float 市净率(该字段为比例字段,默认不展示%)\n pe_ttm_ratio float 市盈率TTM(该字段为比例字段,默认不展示%)\n stock_owner str 涡轮所属正股的代码或期权的标的股代码\n wrt_valid bool 是否是窝轮(为true时以下涡轮相关的字段才有合法数据)\n wrt_conversion_ratio float 换股比率(该字段为比例字段,默认不展示%)\n wrt_type str 窝轮类型,参见WrtType\n wrt_strike_price float 行使价格\n wrt_maturity_date str 格式化窝轮到期时间\n wrt_end_trade str 格式化窝轮最后交易时间\n wrt_code str 窝轮对应的正股(此字段已废除,修改为stock_owner)\n wrt_recovery_price float 窝轮回收价\n wrt_street_vol float 窝轮街货量\n wrt_issue_vol float 窝轮发行量\n wrt_street_ratio float 窝轮街货占比(该字段为比例字段,默认不展示%)\n wrt_delta float 窝轮对冲值\n wrt_implied_volatility float 窝轮引伸波幅\n wrt_premium float 窝轮溢价\n lot_size int 每手股数\n price_spread float 当前摆盘价差亦即摆盘数据的买档或卖档的相邻档位的报价差\n option_valid bool 是否是期权(为true时以下期权相关的字段才有合法数值)\n option_type str 期权类型,参见OptionType\n strike_time str 行权日(美股默认是美东时间,港股A股默认是北京时间)\n option_strike_price float 行权价\n option_contract_size int 每份合约数\n option_open_interest int 未平仓合约数\n option_implied_volatility float 隐含波动率\n option_premium float 溢价\n option_delta float 希腊值 Delta\n option_gamma float 希腊值 Gamma\n option_vega float 希腊值 Vega\n option_theta float 希腊值 Theta\n option_rho float 希腊值 Rho\n ======================= ============= ==============================================================================\n \"\"\"\n code_list = unique_and_normalize_list(code_list)\n if not code_list:\n error_str = ERROR_STR_PREFIX + \"the type of code param is wrong\"\n return RET_ERROR, error_str\n\n query_processor = self._get_sync_query_processor(\n MarketSnapshotQuery.pack_req, MarketSnapshotQuery.unpack_rsp)\n kargs = {\n \"stock_list\": code_list,\n \"conn_id\": self.get_sync_conn_id()\n }\n\n ret_code, msg, snapshot_list = query_processor(**kargs)\n if ret_code == RET_ERROR:\n return ret_code, msg\n\n equity_col_list = ['issued_shares',\n 'total_market_val',\n 'net_asset',\n 'net_profit',\n 'earning_per_share',\n 'outstanding_shares',\n 'circular_market_val',\n 'net_asset_per_share',\n 'ey_ratio',\n 'pe_ratio',\n 'pb_ratio',\n 'pe_ttm_ratio'\n ]\n wrt_col_list = ['wrt_conversion_ratio',\n 'wrt_type',\n 'wrt_strike_price',\n 'wrt_maturity_date',\n 'wrt_end_trade',\n 'wrt_recovery_price',\n 'wrt_street_vol',\n 'wrt_issue_vol',\n 'wrt_street_ratio',\n 'wrt_delta',\n 'wrt_implied_volatility',\n 'wrt_premium'\n ]\n option_col_list = ['option_type',\n 'strike_time',\n 'option_strike_price',\n 'option_contract_size',\n 'option_open_interest',\n 'option_implied_volatility',\n 'option_premium',\n 'option_delta',\n 'option_gamma',\n 'option_vega',\n 'option_theta',\n 'option_rho'\n ]\n col_list = [\n 'code',\n 'update_time',\n 'last_price',\n 'open_price',\n 'high_price',\n 'low_price',\n 'prev_close_price',\n 'volume',\n 'turnover',\n 'turnover_rate',\n 'suspension',\n 'listing_date',\n 'lot_size',\n 'price_spread',\n 'stock_owner',\n 'ask_price',\n 'bid_price',\n 'ask_vol',\n 'bid_vol'\n ]\n\n col_list.append('equity_valid')\n col_list.extend(equity_col_list)\n col_list.append('wrt_valid')\n col_list.extend(wrt_col_list)\n col_list.append('option_valid')\n col_list.extend(option_col_list)\n\n snapshot_frame_table = pd.DataFrame(snapshot_list, columns=col_list)\n\n return RET_OK, snapshot_frame_table\n\n def get_rt_data(self, code):\n \"\"\"\n 获取指定股票的分时数据\n\n :param code: 股票代码,例如,HK.00700,US.APPL\n :return: (ret, data)\n\n ret == RET_OK 返回pd dataframe数据,data.DataFrame数据, 数据列格式如下\n\n ret != RET_OK 返回错误字符串\n\n ===================== =========== ==========================================================================\n 参数 类型 说明\n ===================== =========== ==========================================================================\n code str 股票代码\n time str 时间(yyyy-MM-dd HH:mm:ss)(美股默认是美东时间,港股A股默认是北京时间)\n is_blank bool 数据状态;正常数据为False,伪造数据为True\n opened_mins int 零点到当前多少分钟\n cur_price float 当前价格\n last_close float 昨天收盘的价格\n avg_price float 平均价格\n volume float 成交量\n turnover float 成交金额\n ===================== =========== ==========================================================================\n \"\"\"\n if code is None or is_str(code) is False:\n error_str = ERROR_STR_PREFIX + \"the type of param in code is wrong\"\n return RET_ERROR, error_str\n\n query_processor = self._get_sync_query_processor(\n RtDataQuery.pack_req, RtDataQuery.unpack_rsp)\n kargs = {\n \"code\": code,\n \"conn_id\": self.get_sync_conn_id()\n }\n\n ret_code, msg, rt_data_list = query_processor(**kargs)\n if ret_code == RET_ERROR:\n return ret_code, msg\n\n for x in rt_data_list:\n x['code'] = code\n\n col_list = [\n 'code', 'time', 'is_blank', 'opened_mins', 'cur_price',\n 'last_close', 'avg_price', 'volume', 'turnover'\n ]\n\n rt_data_table = pd.DataFrame(rt_data_list, columns=col_list)\n\n return RET_OK, rt_data_table\n\n def get_plate_list(self, market, plate_class):\n \"\"\"\n 获取板块集合下的子板块列表\n\n :param market: 市场标识,注意这里不区分沪,深,输入沪或者深都会返回沪深市场的子板块(这个是和客户端保持一致的)参见Market\n :param plate_class: 板块分类,参见Plate\n :return: ret == RET_OK 返回pd dataframe数据,data.DataFrame数据, 数据列格式如下\n\n ret != RET_OK 返回错误字符串\n\n ===================== =========== ==============================================================\n 参数 类型 说明\n ===================== =========== ==============================================================\n code str 股票代码\n plate_name str 板块名字\n plate_id str 板块id\n ===================== =========== ==============================================================\n \"\"\"\n param_table = {'market': market, 'plate_class': plate_class}\n for x in param_table:\n param = param_table[x]\n if param is None or is_str(market) is False:\n error_str = ERROR_STR_PREFIX + \"the type of market param is wrong\"\n return RET_ERROR, error_str\n\n if market not in MKT_MAP:\n error_str = ERROR_STR_PREFIX + \"the value of market param is wrong \"\n return RET_ERROR, error_str\n\n if plate_class not in PLATE_CLASS_MAP:\n error_str = ERROR_STR_PREFIX + \"the class of plate is wrong\"\n return RET_ERROR, error_str\n\n query_processor = self._get_sync_query_processor(\n SubplateQuery.pack_req, SubplateQuery.unpack_rsp)\n kargs = {\n 'market': market,\n 'plate_class': plate_class,\n 'conn_id': self.get_sync_conn_id()\n }\n\n ret_code, msg, subplate_list = query_processor(**kargs)\n if ret_code == RET_ERROR:\n return ret_code, msg\n\n col_list = ['code', 'plate_name', 'plate_id']\n\n subplate_frame_table = pd.DataFrame(subplate_list, columns=col_list)\n\n return RET_OK, subplate_frame_table\n\n def get_plate_stock(self, plate_code):\n \"\"\"\n 获取特定板块下的股票列表\n\n :param plate_code: 板块代码, string, 例如,”SH.BK0001”,”SH.BK0002”,先利用获取子版块列表函数获取子版块代码\n :return: (ret, data)\n\n ret == RET_OK 返回pd dataframe数据,data.DataFrame数据, 数据列格式如下\n\n ret != RET_OK 返回错误字符串\n\n ===================== =========== ==============================================================\n 参数 类型 说明\n ===================== =========== ==============================================================\n code str 股票代码\n lot_size int 每手股数\n stock_name str 股票名称\n stock_owner str 所属正股的代码\n stock_child_type str 股票子类型,参见WrtType\n stock_type str 股票类型,参见SecurityType\n list_time str 上市时间(美股默认是美东时间,港股A股默认是北京时间)\n stock_id int 股票id\n ===================== =========== ==============================================================\n \"\"\"\n if plate_code is None or is_str(plate_code) is False:\n error_str = ERROR_STR_PREFIX + \"the type of code is wrong\"\n return RET_ERROR, error_str\n\n query_processor = self._get_sync_query_processor(\n PlateStockQuery.pack_req, PlateStockQuery.unpack_rsp)\n kargs = {\n \"plate_code\": plate_code,\n \"conn_id\": self.get_sync_conn_id()\n }\n\n ret_code, msg, plate_stock_list = query_processor(**kargs)\n if ret_code == RET_ERROR:\n return ret_code, msg\n\n col_list = [\n 'code', 'lot_size', 'stock_name', 'stock_owner',\n 'stock_child_type', 'stock_type', 'list_time', 'stock_id',\n ]\n plate_stock_table = pd.DataFrame(plate_stock_list, columns=col_list)\n\n return RET_OK, plate_stock_table\n\n def get_broker_queue(self, code):\n \"\"\"\n 获取股票的经纪队列\n\n :param code: 股票代码\n :return: (ret, bid_frame_table, ask_frame_table)或(ret, err_message)\n\n ret == RET_OK 返回pd dataframe数据,数据列格式如下\n\n ret != RET_OK 后面两项为错误字符串\n\n bid_frame_table 经纪买盘数据\n\n ===================== =========== ==============================================================\n 参数 类型 说明\n ===================== =========== ==============================================================\n code str 股票代码\n bid_broker_id int 经纪买盘id\n bid_broker_name str 经纪买盘名称\n bid_broker_pos int 经纪档位\n ===================== =========== ==============================================================\n\n ask_frame_table 经纪卖盘数据\n\n ===================== =========== ==============================================================\n 参数 类型 说明\n ===================== =========== ==============================================================\n code str 股票代码\n ask_broker_id int 经纪卖盘id\n ask_broker_name str 经纪卖盘名称\n ask_broker_pos int 经纪档位\n ===================== =========== ==============================================================\n \"\"\"\n if code is None or is_str(code) is False:\n error_str = ERROR_STR_PREFIX + \"the type of param in code is wrong\"\n return RET_ERROR, error_str\n\n query_processor = self._get_sync_query_processor(\n BrokerQueueQuery.pack_req, BrokerQueueQuery.unpack_rsp)\n kargs = {\n \"code\": code,\n \"conn_id\": self.get_sync_conn_id()\n }\n\n ret_code, ret_msg, content = query_processor(**kargs)\n if ret_code != RET_OK:\n return ret_code, ret_msg, ret_msg\n\n (_, bid_list, ask_list) = content\n col_bid_list = [\n 'code', 'bid_broker_id', 'bid_broker_name', 'bid_broker_pos'\n ]\n col_ask_list = [\n 'code', 'ask_broker_id', 'ask_broker_name', 'ask_broker_pos'\n ]\n\n bid_frame_table = pd.DataFrame(bid_list, columns=col_bid_list)\n ask_frame_table = pd.DataFrame(ask_list, columns=col_ask_list)\n return RET_OK, bid_frame_table, ask_frame_table\n\n def _check_subscribe_param(self, code_list, subtype_list):\n\n code_list = unique_and_normalize_list(code_list)\n subtype_list = unique_and_normalize_list(subtype_list)\n\n if len(code_list) == 0:\n msg = ERROR_STR_PREFIX + 'code_list is null'\n return RET_ERROR, msg, code_list, subtype_list\n\n if len(subtype_list) == 0:\n msg = ERROR_STR_PREFIX + 'subtype_list is null'\n return RET_ERROR, msg, code_list, subtype_list\n\n for subtype in subtype_list:\n if subtype not in SUBTYPE_MAP:\n subtype_str = ','.join([x for x in SUBTYPE_MAP])\n msg = ERROR_STR_PREFIX + 'subtype is %s , which is wrong. (%s)' % (\n subtype, subtype_str)\n return RET_ERROR, msg, code_list, subtype_list\n\n for code in code_list:\n ret, msg = split_stock_str(code)\n if ret != RET_OK:\n return RET_ERROR, msg, code_list, subtype_list\n\n return RET_OK, \"\", code_list, subtype_list\n\n def subscribe(self, code_list, subtype_list, is_first_push=True, subscribe_push=True):\n \"\"\"\n 订阅注册需要的实时信息,指定股票和订阅的数据类型即可\n\n 注意:len(code_list) * 订阅的K线类型的数量 <= 100\n\n :param code_list: 需要订阅的股票代码列表\n :param subtype_list: 需要订阅的数据类型列表,参见SubType\n :param is_first_push: 订阅成功后是否马上推送一次数据\n :param subscribe_push: 订阅后不推送\n :return: (ret, err_message)\n\n ret == RET_OK err_message为None\n\n ret != RET_OK err_message为错误描述字符串\n :example:\n\n .. code:: python\n\n from futu import *\n quote_ctx = OpenQuoteContext(host='127.0.0.1', port=11111)\n print(quote_ctx.subscribe(['HK.00700'], [SubType.QUOTE)])\n quote_ctx.close()\n \"\"\"\n return self._subscribe_impl(code_list, subtype_list, is_first_push, subscribe_push)\n\n def _subscribe_impl(self, code_list, subtype_list, is_first_push, subscribe_push=True):\n\n ret, msg, code_list, subtype_list = self._check_subscribe_param(code_list, subtype_list)\n if ret != RET_OK:\n return ret, msg\n\n kline_sub_count = 0\n for sub_type in subtype_list:\n if sub_type in KLINE_SUBTYPE_LIST:\n kline_sub_count += 1\n\n # if kline_sub_count * len(code_list) > MAX_KLINE_SUB_COUNT:\n # return RET_ERROR, 'Too many subscription'\n\n query_processor = self._get_sync_query_processor(SubscriptionQuery.pack_subscribe_req,\n SubscriptionQuery.unpack_subscribe_rsp)\n\n kargs = {\n 'code_list': code_list,\n 'subtype_list': subtype_list,\n 'conn_id': self.get_sync_conn_id(),\n 'is_first_push': is_first_push,\n 'subscribe_push': subscribe_push\n }\n ret_code, msg, _ = query_processor(**kargs)\n\n if ret_code != RET_OK:\n return RET_ERROR, msg\n\n for subtype in subtype_list:\n if subtype not in self._ctx_subscribe:\n self._ctx_subscribe[subtype] = set()\n code_set = self._ctx_subscribe[subtype]\n code_set.update(code_list)\n #\n # ret_code, msg, push_req_str = SubscriptionQuery.pack_push_req(\n # code_list, subtype_list, self.get_async_conn_id(), is_first_push)\n #\n # if ret_code != RET_OK:\n # return RET_ERROR, msg\n #\n # ret_code, msg = self._send_async_req(push_req_str)\n # if ret_code != RET_OK:\n # return RET_ERROR, msg\n\n return RET_OK, None\n\n def _reconnect_subscribe(self, code_list, subtype_list):\n\n # 将k线定阅和其它定阅区分开来\n kline_sub_list = []\n other_sub_list = []\n for sub in subtype_list:\n if sub in KLINE_SUBTYPE_LIST:\n kline_sub_list.append(sub)\n else:\n other_sub_list.append(sub)\n\n # 连接断开时,可能会有大批股票需要重定阅,分次定阅,提高成功率\n kline_sub_one_size = 1\n if len(kline_sub_list) > 0:\n kline_sub_one_size = math.floor(100 / len(kline_sub_list))\n\n sub_info_list = [\n {\"sub_list\": kline_sub_list, \"one_size\": kline_sub_one_size},\n {\"sub_list\": other_sub_list, \"one_size\": 100},\n ]\n\n ret_code = RET_OK\n ret_data = None\n\n for info in sub_info_list:\n sub_list = info[\"sub_list\"]\n one_size = info[\"one_size\"]\n all_count = len(code_list)\n start_idx = 0\n\n while start_idx < all_count and len(sub_list):\n sub_count = one_size if start_idx + one_size <= all_count else (all_count - start_idx)\n sub_codes = code_list[start_idx: start_idx + sub_count]\n start_idx += sub_count\n\n ret_code, ret_data = self._subscribe_impl(sub_codes, sub_list, False)\n if ret_code != RET_OK:\n break\n if ret_code != RET_OK:\n break\n\n return ret_code, ret_data\n\n def unsubscribe(self, code_list, subtype_list):\n \"\"\"\n 取消订阅\n :param code_list: 取消订阅的股票代码列表\n :param subtype_list: 取消订阅的类型,参见SubType\n :return: (ret, err_message)\n\n ret == RET_OK err_message为None\n\n ret != RET_OK err_message为错误描述字符串\n \"\"\"\n\n ret, msg, code_list, subtype_list = self._check_subscribe_param(code_list, subtype_list)\n if ret != RET_OK:\n return ret, msg\n\n query_processor = self._get_sync_query_processor(SubscriptionQuery.pack_unsubscribe_req,\n SubscriptionQuery.unpack_unsubscribe_rsp)\n\n kargs = {\n 'code_list': code_list,\n 'subtype_list': subtype_list,\n \"conn_id\": self.get_sync_conn_id()\n }\n\n for subtype in subtype_list:\n if subtype not in self._ctx_subscribe:\n continue\n code_set = self._ctx_subscribe[subtype]\n for code in code_list:\n if code not in code_set:\n continue\n code_set.remove(code)\n\n ret_code, msg, _ = query_processor(**kargs)\n\n if ret_code != RET_OK:\n return RET_ERROR, msg\n\n ret_code, msg, unpush_req_str = SubscriptionQuery.pack_unpush_req(code_list, subtype_list, self.get_async_conn_id())\n if ret_code != RET_OK:\n return RET_ERROR, msg\n\n ret_code, msg = self._send_async_req(unpush_req_str)\n if ret_code != RET_OK:\n return RET_ERROR, msg\n\n return RET_OK, None\n\n def query_subscription(self, is_all_conn=True):\n \"\"\"\n 查询已订阅的实时信息\n\n :param is_all_conn: 是否返回所有连接的订阅状态,不传或者传False只返回当前连接数据\n :return: (ret, data)\n\n ret != RET_OK 返回错误字符串\n\n ret == RET_OK 返回 定阅信息的字典数据 ,格式如下:\n\n {\n 'total_used': 4, # 所有连接已使用的定阅额度\n\n 'own_used': 0, # 当前连接已使用的定阅额度\n\n 'remain': 496, # 剩余的定阅额度\n\n 'sub_list': # 每种定阅类型对应的股票列表\n\n {\n 'BROKER': ['HK.00700', 'HK.02318'],\n\n 'RT_DATA': ['HK.00700', 'HK.02318']\n }\n }\n \"\"\"\n is_all_conn = bool(is_all_conn)\n query_processor = self._get_sync_query_processor(\n SubscriptionQuery.pack_subscription_query_req,\n SubscriptionQuery.unpack_subscription_query_rsp)\n kargs = {\n \"is_all_conn\": is_all_conn,\n \"conn_id\": self.get_sync_conn_id()\n }\n\n ret_code, msg, sub_table = query_processor(**kargs)\n if ret_code == RET_ERROR:\n return ret_code, msg\n\n ret_dict = {}\n ret_dict['total_used'] = sub_table['total_used']\n ret_dict['remain'] = sub_table['remain']\n ret_dict['own_used'] = 0\n ret_dict['sub_list'] = {}\n for conn_sub in sub_table['conn_sub_list']:\n\n is_own_conn = conn_sub['is_own_conn']\n if is_own_conn:\n ret_dict['own_used'] = conn_sub['used']\n if not is_all_conn and not is_own_conn:\n continue\n\n for sub_info in conn_sub['sub_list']:\n subtype = sub_info['subtype']\n\n if subtype not in ret_dict['sub_list']:\n ret_dict['sub_list'][subtype] = []\n code_list = ret_dict['sub_list'][subtype]\n\n for code in sub_info['code_list']:\n if code not in code_list:\n code_list.append(code)\n\n return RET_OK, ret_dict\n\n def get_stock_quote(self, code_list):\n \"\"\"\n 获取订阅股票报价的实时数据,有订阅要求限制。\n\n 对于异步推送,参见StockQuoteHandlerBase\n\n :param code_list: 股票代码列表,必须确保code_list中的股票均订阅成功后才能够执行\n :return: (ret, data)\n\n ret == RET_OK 返回pd dataframe数据,数据列格式如下\n\n ret != RET_OK 返回错误字符串\n\n ===================== =========== ==============================================================\n 参数 类型 说明\n ===================== =========== ==============================================================\n code str 股票代码\n data_date str 日期\n data_time str 时间(美股默认是美东时间,港股A股默认是北京时间)\n last_price float 最新价格\n open_price float 今日开盘价\n high_price float 最高价格\n low_price float 最低价格\n prev_close_price float 昨收盘价格\n volume int 成交数量\n turnover float 成交金额\n turnover_rate float 换手率\n amplitude int 振幅\n suspension bool 是否停牌(True表示停牌)\n listing_date str 上市日期 (yyyy-MM-dd)\n price_spread float 当前价差,亦即摆盘数据的买档或卖档的相邻档位的报价差\n dark_status str 暗盘交易状态,见DarkStatus\n strike_price float 行权价\n contract_size int 每份合约数\n open_interest int 未平仓合约数\n implied_volatility float 隐含波动率\n premium float 溢价\n delta float 希腊值 Delta\n gamma float 希腊值 Gamma\n vega float 希腊值 Vega\n theta float 希腊值 Theta\n rho float 希腊值 Rho\n ===================== =========== ==============================================================\n\n \"\"\"\n code_list = unique_and_normalize_list(code_list)\n if not code_list:\n error_str = ERROR_STR_PREFIX + \"the type of code_list param is wrong\"\n return RET_ERROR, error_str\n\n query_processor = self._get_sync_query_processor(\n StockQuoteQuery.pack_req,\n StockQuoteQuery.unpack_rsp,\n )\n kargs = {\n \"stock_list\": code_list,\n \"conn_id\": self.get_sync_conn_id()\n }\n\n ret_code, msg, quote_list = query_processor(**kargs)\n if ret_code == RET_ERROR:\n return ret_code, msg\n\n col_list = [\n 'code', 'data_date', 'data_time', 'last_price', 'open_price',\n 'high_price', 'low_price', 'prev_close_price', 'volume',\n 'turnover', 'turnover_rate', 'amplitude', 'suspension',\n 'listing_date', 'price_spread', 'dark_status', 'strike_price',\n 'contract_size', 'open_interest', 'implied_volatility',\n 'premium', 'delta', 'gamma', 'vega', 'theta', 'rho'\n ]\n\n quote_frame_table = pd.DataFrame(quote_list, columns=col_list)\n\n return RET_OK, quote_frame_table\n\n def get_rt_ticker(self, code, num=500):\n \"\"\"\n 获取指定股票的实时逐笔。取最近num个逐笔\n\n :param code: 股票代码\n :param num: 最近ticker个数(有最大个数限制,最近1000个)\n :return: (ret, data)\n\n ret == RET_OK 返回pd dataframe数据,数据列格式如下\n\n ret != RET_OK 返回错误字符串\n\n ===================== =========== ==============================================================\n 参数 类型 说明\n ===================== =========== ==============================================================\n stock_code str 股票代码\n sequence int 逐笔序号\n time str 成交时间(美股默认是美东时间,港股A股默认是北京时间)\n price float 成交价格\n volume int 成交数量(股数)\n turnover float 成交金额\n ticker_direction str 逐笔方向\n type str 逐笔类型,参见TickerType\n ===================== =========== ==============================================================\n \"\"\"\n\n if code is None or is_str(code) is False:\n error_str = ERROR_STR_PREFIX + \"the type of code param is wrong\"\n return RET_ERROR, error_str\n\n if num is None or isinstance(num, int) is False:\n error_str = ERROR_STR_PREFIX + \"the type of num param is wrong\"\n return RET_ERROR, error_str\n\n query_processor = self._get_sync_query_processor(\n TickerQuery.pack_req,\n TickerQuery.unpack_rsp,\n )\n kargs = {\n \"code\": code,\n \"num\": num,\n \"conn_id\": self.get_sync_conn_id()\n }\n ret_code, msg, ticker_list = query_processor(**kargs)\n if ret_code == RET_ERROR:\n return ret_code, msg\n\n col_list = [\n 'code', 'time', 'price', 'volume', 'turnover', \"ticker_direction\",\n 'sequence', 'type'\n ]\n ticker_frame_table = pd.DataFrame(ticker_list, columns=col_list)\n\n return RET_OK, ticker_frame_table\n\n def get_cur_kline(self, code, num, ktype=SubType.K_DAY, autype=AuType.QFQ):\n \"\"\"\n 实时获取指定股票最近num个K线数据,最多1000根\n\n :param code: 股票代码\n :param num: k线数据个数\n :param ktype: k线类型,参见KLType\n :param autype: 复权类型,参见AuType\n :return: (ret, data)\n\n ret == RET_OK 返回pd dataframe数据,数据列格式如下\n\n ret != RET_OK 返回错误字符串\n\n ===================== =========== ==============================================================\n 参数 类型 说明\n ===================== =========== ==============================================================\n code str 股票代码\n time_key str 时间(美股默认是美东时间,港股A股默认是北京时间)\n open float 开盘价\n close float 收盘价\n high float 最高价\n low float 最低价\n volume int 成交量\n turnover float 成交额\n pe_ratio float 市盈率(该字段为比例字段,默认不展示%)\n turnover_rate float 换手率\n ===================== =========== ==============================================================\n \"\"\"\n param_table = {'code': code, 'ktype': ktype}\n for x in param_table:\n param = param_table[x]\n if param is None or is_str(param) is False:\n error_str = ERROR_STR_PREFIX + \"the type of %s param is wrong\" % x\n return RET_ERROR, error_str\n\n if num is None or isinstance(num, int) is False:\n error_str = ERROR_STR_PREFIX + \"the type of num param is wrong\"\n return RET_ERROR, error_str\n\n if autype is not None and is_str(autype) is False:\n error_str = ERROR_STR_PREFIX + \"the type of autype param is wrong\"\n return RET_ERROR, error_str\n\n query_processor = self._get_sync_query_processor(\n CurKlineQuery.pack_req,\n CurKlineQuery.unpack_rsp,\n )\n\n kargs = {\n \"code\": code,\n \"num\": num,\n \"ktype\": ktype,\n \"autype\": autype,\n \"conn_id\": self.get_sync_conn_id()\n }\n ret_code, msg, kline_list = query_processor(**kargs)\n if ret_code == RET_ERROR:\n return ret_code, msg\n\n col_list = [\n 'code', 'time_key', 'open', 'close', 'high', 'low', 'volume',\n 'turnover', 'pe_ratio', 'turnover_rate'\n ]\n kline_frame_table = pd.DataFrame(kline_list, columns=col_list)\n\n return RET_OK, kline_frame_table\n\n def get_order_book(self, code):\n \"\"\"\n 获取实时摆盘数据\n\n :param code: 股票代码\n :return: (ret, data)\n\n ret == RET_OK 返回字典,数据格式如下\n\n ret != RET_OK 返回错误字符串\n\n {‘code’: 股票代码\n ‘Ask’:[ (ask_price1, ask_volume1,order_num), (ask_price2, ask_volume2, order_num),…]\n ‘Bid’: [ (bid_price1, bid_volume1, order_num), (bid_price2, bid_volume2, order_num),…]\n }\n\n 'Ask':卖盘, 'Bid'买盘。每个元组的含义是(委托价格,委托数量,委托订单数)\n \"\"\"\n if code is None or is_str(code) is False:\n error_str = ERROR_STR_PREFIX + \"the type of code param is wrong\"\n return RET_ERROR, error_str\n\n query_processor = self._get_sync_query_processor(\n OrderBookQuery.pack_req,\n OrderBookQuery.unpack_rsp,\n )\n\n kargs = {\n \"code\": code,\n \"conn_id\": self.get_sync_conn_id()\n }\n ret_code, msg, orderbook = query_processor(**kargs)\n if ret_code == RET_ERROR:\n return ret_code, msg\n\n return RET_OK, orderbook\n\n def get_multi_points_history_kline(self,\n code_list,\n dates,\n fields,\n ktype=KLType.K_DAY,\n autype=AuType.QFQ,\n no_data_mode=KLNoDataMode.FORWARD):\n '''\n 从本地历史K线中获取多支股票多个时间点的指定数据列\n\n :param code_list: 单个或多个股票 'HK.00700' or ['HK.00700', 'HK.00001']\n :param dates: 单个或多个日期 '2017-01-01' or ['2017-01-01', '2017-01-02']\n :param fields: 单个或多个数据列 KL_FIELD.ALL or [KL_FIELD.DATE_TIME, KL_FIELD.OPEN]\n :param ktype: K线类型\n :param autype: 复权类型\n :param no_data_mode: 指定时间为非交易日时,对应的k线数据取值模式,参见KLNoDataMode\n :return: (ret, data)\n\n ret == RET_OK 返回pd dataframe数据,固定表头包括'code'(代码) 'time_point'(指定的日期) 'data_status' (KLDataStatus)。数据列格式如下\n\n ret != RET_OK 返回错误字符串\n\n ================= =========== ==============================================================================\n 参数 类型 说明\n ================= =========== ==============================================================================\n code str 股票代码\n time_point str 请求的时间\n data_status str 数据点是否有效,参见KLDataStatus\n time_key str k线时间(美股默认是美东时间,港股A股默认是北京时间)\n open float 开盘价\n close float 收盘价\n high float 最高价\n low float 最低价\n pe_ratio float 市盈率(该字段为比例字段,默认不展示%)\n turnover_rate float 换手率\n volume int 成交量\n turnover float 成交额\n change_rate float 涨跌幅\n last_close float 昨收价\n ================= =========== ==============================================================================\n '''\n req_codes = unique_and_normalize_list(code_list)\n if not code_list:\n error_str = ERROR_STR_PREFIX + \"the type of code param is wrong\"\n return RET_ERROR, error_str\n\n req_dates = unique_and_normalize_list(dates)\n if not dates:\n error_str = ERROR_STR_PREFIX + \"the type of dates param is wrong\"\n return RET_ERROR, error_str\n\n req_fields = unique_and_normalize_list(fields)\n if not fields:\n req_fields = copy(KL_FIELD.ALL_REAL)\n req_fields = KL_FIELD.normalize_field_list(req_fields)\n if not req_fields:\n error_str = ERROR_STR_PREFIX + \"the type of fields param is wrong\"\n return RET_ERROR, error_str\n\n query_processor = self._get_sync_query_processor(\n MultiPointsHisKLine.pack_req, MultiPointsHisKLine.unpack_rsp)\n\n # 一次性最多取100支股票的数据\n max_req_code_num = 50\n\n data_finish = False\n list_ret = []\n # 循环请求数据,避免一次性取太多超时\n while not data_finish:\n logger.debug('get_multi_points_history_kline - wait ... %s' % datetime.now())\n kargs = {\n \"code_list\": req_codes,\n \"dates\": req_dates,\n \"fields\": copy(req_fields),\n \"ktype\": ktype,\n \"autype\": autype,\n \"max_req\": max_req_code_num,\n \"no_data_mode\": int(no_data_mode),\n \"conn_id\": self.get_sync_conn_id()\n }\n ret_code, msg, content = query_processor(**kargs)\n if ret_code == RET_ERROR:\n return ret_code, msg\n\n list_kline, has_next = content\n data_finish = (not has_next)\n\n for dict_item in list_kline:\n item_code = dict_item['code']\n list_ret.append(dict_item)\n if item_code in req_codes:\n req_codes.remove(item_code)\n\n if 0 == len(req_codes):\n data_finish = True\n\n # 表头列\n col_list = ['code', 'time_point', 'data_status']\n for field in req_fields:\n str_field = KL_FIELD.DICT_KL_FIELD_STR[field]\n if str_field not in col_list:\n col_list.append(str_field)\n\n pd_frame = pd.DataFrame(list_ret, columns=col_list)\n\n return RET_OK, pd_frame\n\n def get_referencestock_list(self, code, reference_type):\n \"\"\"\n 获取证券的关联数据\n :param code: 证券id,str,例如HK.00700\n :param reference_type: 要获得的相关数据,参见SecurityReferenceType。例如WARRANT,表示获取正股相关的涡轮\n :return: (ret, data)\n\n ret == RET_OK 返回pd dataframe数据,数据列格式如下\n\n ret != RET_OK 返回错误字符串\n ================= =========== ==============================================================================\n 参数 类型 说明\n ================= =========== ==============================================================================\n code str 证券代码\n lot_size int 每手数量\n stock_type str 证券类型,参见SecurityType\n stock_name str 证券名字\n list_time str 上市时间(美股默认是美东时间,港股A股默认是北京时间)\n wrt_valid bool 是否是涡轮,如果为True,下面wrt开头的字段有效\n wrt_type str 涡轮类型,参见WrtType\n wrt_code str 所属正股\n ================= =========== ==============================================================================\n\n \"\"\"\n if code is None or is_str(code) is False:\n error_str = ERROR_STR_PREFIX + \"the type of code param is wrong\"\n return RET_ERROR, error_str\n\n query_processor = self._get_sync_query_processor(\n StockReferenceList.pack_req,\n StockReferenceList.unpack_rsp,\n )\n\n kargs = {\n \"code\": code,\n 'ref_type': reference_type,\n \"conn_id\": self.get_sync_conn_id()\n }\n ret_code, msg, data_list = query_processor(**kargs)\n if ret_code == RET_ERROR:\n return ret_code, msg\n\n col_list = [\n 'code', 'lot_size', 'stock_type', 'stock_name', 'list_time', 'wrt_valid', 'wrt_type', 'wrt_code'\n ]\n\n pd_frame = pd.DataFrame(data_list, columns=col_list)\n return RET_OK, pd_frame\n\n def get_owner_plate(self, code_list):\n \"\"\"\n 获取单支或多支股票的所属板块信息列表\n\n :param code_list: 股票代码列表,仅支持正股、指数。list或str。例如:['HK.00700', 'HK.00001']或者'HK.00700,HK.00001'。\n :return: (ret, data)\n\n ret == RET_OK 返回pd dataframe数据,data.DataFrame数据, 数据列格式如下\n\n ret != RET_OK 返回错误字符串\n\n ===================== =========== ==============================================================\n 参数 类型 说明\n ===================== =========== ==============================================================\n code str 证券代码\n plate_code str 板块代码\n plate_name str 板块名字\n plate_type str 板块类型(行业板块或概念板块),futu.common.constant.Plate\n ===================== =========== ==============================================================\n \"\"\"\n if is_str(code_list):\n code_list = code_list.split(',')\n elif isinstance(code_list, list):\n pass\n else:\n return RET_ERROR, \"code list must be like ['HK.00001', 'HK.00700'] or 'HK.00001,HK.00700'\"\n\n code_list = unique_and_normalize_list(code_list)\n for code in code_list:\n if code is None or is_str(code) is False:\n error_str = ERROR_STR_PREFIX + \"the type of param in code_list is wrong\"\n return RET_ERROR, error_str\n\n query_processor = self._get_sync_query_processor(\n OwnerPlateQuery.pack_req, OwnerPlateQuery.unpack_rsp)\n kargs = {\n \"code_list\": code_list,\n \"conn_id\": self.get_sync_conn_id()\n }\n\n ret_code, msg, owner_plate_list = query_processor(**kargs)\n if ret_code == RET_ERROR:\n return ret_code, msg\n\n col_list = [\n 'code', 'plate_code', 'plate_name', 'plate_type'\n ]\n\n owner_plate_table = pd.DataFrame(owner_plate_list, columns=col_list)\n\n return RET_OK, owner_plate_table\n\n def get_holding_change_list(self, code, holder_type, start=None, end=None):\n \"\"\"\n 获取大股东持股变动列表,只提供美股数据\n\n :param code: 股票代码. 例如:'US.AAPL'\n :param holder_type: 持有者类别,StockHolder_\n :param start: 开始时间. 例如:'2016-10-01'\n :param end: 结束时间,例如:'2017-10-01'。\n start与end的组合如下:\n ========== ========== ========================================\n start类型 end类型 说明\n ========== ========== ========================================\n str str start和end分别为指定的日期\n None str start为end往前365天\n str None end为start往后365天\n None None end为当前日期,start为end往前365天\n ========== ========== ========================================\n\n :return: (ret, data)\n\n ret == RET_OK 返回pd dataframe数据,data.DataFrame数据, 数据列格式如下\n\n ret != RET_OK 返回错误字符串\n\n ===================== =========== ==============================================================\n 参数 类型 说明\n ===================== =========== ==============================================================\n holder_name str 高管名称\n holding_qty float 持股数\n holding_ratio float 持股比例(该字段为比例字段,默认不展示%)\n change_qty float 变动数\n change_ratio float 变动比例(该字段为比例字段,默认不展示%)\n time str 发布时间(美股的时间默认是美东)\n ===================== =========== ==============================================================\n \"\"\"\n holder_type = STOCK_HOLDER_CLASS_MAP[holder_type]\n if code is None or is_str(code) is False:\n msg = ERROR_STR_PREFIX + \"the type of code param is wrong\"\n return RET_ERROR, msg\n\n if holder_type < 1 or holder_type > len(STOCK_HOLDER_CLASS_MAP):\n msg = ERROR_STR_PREFIX + \"the type {0} is wrong, total number of types is {1}\".format(holder_type, len(STOCK_HOLDER_CLASS_MAP))\n return RET_ERROR, msg\n\n ret_code, msg, start, end = normalize_start_end_date(start, end, delta_days=365)\n if ret_code != RET_OK:\n return ret_code, msg\n\n query_processor = self._get_sync_query_processor(\n HoldingChangeList.pack_req, HoldingChangeList.unpack_rsp)\n kargs = {\n \"code\": code,\n \"holder_type\": holder_type,\n \"conn_id\": self.get_sync_conn_id(),\n \"start_date\": start,\n \"end_date\": end\n }\n\n ret_code, msg, owner_plate_list = query_processor(**kargs)\n if ret_code == RET_ERROR:\n return ret_code, msg\n\n col_list = [\n 'holder_name', 'holding_qty', 'holding_ratio', 'change_qty', 'change_ratio', 'time'\n ]\n\n holding_change_list = pd.DataFrame(owner_plate_list, columns=col_list)\n\n return RET_OK, holding_change_list\n\n def get_option_chain(self, code, start=None, end=None, option_type=OptionType.ALL, option_cond_type=OptionCondType.ALL):\n \"\"\"\n 通过标的股查询期权\n\n :param code: 股票代码,例如:'HK.02318'\n :param start: 开始日期,该日期指到期日,例如'2017-08-01'\n :param end: 结束日期(包括这一天),该日期指到期日,例如'2017-08-30'。 注意,时间范围最多30天\n start和end的组合如下:\n ========== ========== ========================================\n start类型 end类型 说明\n ========== ========== ========================================\n str str start和end分别为指定的日期\n None str start为end往前30天\n str None end为start往后30天\n None None start为当前日期,end往后30天\n ========== ========== ========================================\n :param option_type: 期权类型,默认全部,全部/看涨/看跌,futu.common.constant.OptionType\n :param option_cond_type: 默认全部,全部/价内/价外,futu.common.constant.OptionCondType\n :return: (ret, data)\n\n ret == RET_OK 返回pd dataframe数据,数据列格式如下\n\n ret != RET_OK 返回错误字符串\n\n ================== =========== ==============================================================\n 参数 类型 说明\n ================== =========== ==============================================================\n code str 股票代码\n name str 名字\n lot_size int 每手数量\n stock_type str 股票类型,参见SecurityType\n option_type str 期权类型,Qot_Common.OptionType\n stock_owner str 标的股\n strike_time str 行权日(美股默认是美东时间,港股A股默认是北京时间)\n strike_price float 行权价\n suspension bool 是否停牌(True表示停牌)\n stock_id int 股票id\n ================== =========== ==============================================================\n\n \"\"\"\n\n if code is None or is_str(code) is False:\n error_str = ERROR_STR_PREFIX + \"the type of code param is wrong\"\n return RET_ERROR, error_str\n\n ret_code, msg, start, end = normalize_start_end_date(start, end, delta_days=29, default_time_end='00:00:00', prefer_end_now=False)\n if ret_code != RET_OK:\n return ret_code, msg\n\n query_processor = self._get_sync_query_processor(\n OptionChain.pack_req, OptionChain.unpack_rsp)\n kargs = {\n \"code\": code,\n \"conn_id\": self.get_sync_conn_id(),\n \"start_date\": start,\n \"end_date\": end,\n \"option_cond_type\": option_cond_type,\n \"option_type\": option_type\n }\n\n ret_code, msg, option_chain_list = query_processor(**kargs)\n if ret_code == RET_ERROR:\n return ret_code, msg\n\n col_list = [\n 'code', 'name', 'lot_size', 'stock_type',\n 'option_type', 'stock_owner', 'strike_time', 'strike_price', 'suspension',\n 'stock_id'\n ]\n\n option_chain = pd.DataFrame(option_chain_list, columns=col_list)\n\n option_chain.sort_values(by=[\"strike_time\", \"strike_price\"], axis=0, ascending=True, inplace=True)\n option_chain.index = range(len(option_chain))\n\n return RET_OK, option_chain\n\n def get_order_detail(self, code):\n return RET_ERROR, \"this service has been cancelled\"\n\n \"\"\"\n 查询A股Level 2权限下提供的委托明细\n\n :param code: 股票代码,例如:'HK.02318'\n :return: (ret, data)\n\n ret == RET_OK data为1个dict,包含以下数据\n\n ret != RET_OK data为错误字符串\n\n {‘code’: 股票代码\n ‘Ask’:[ order_num, [order_volume1, order_volume2] ]\n ‘Bid’: [ order_num, [order_volume1, order_volume2] ]\n }\n\n 'Ask':卖盘, 'Bid'买盘。order_num指委托订单数量,order_volume是每笔委托的委托量,当前最多返回前50笔委托的委托数量。即order_num有可能多于后面的order_volume\n \"\"\"\n\n if code is None or is_str(code) is False:\n error_str = ERROR_STR_PREFIX + \"the type of code param is wrong\"\n return RET_ERROR, error_str\n\n query_processor = self._get_sync_query_processor(\n OrderDetail.pack_req, OrderDetail.unpack_rsp)\n kargs = {\n \"code\": code,\n \"conn_id\": self.get_sync_conn_id()\n }\n\n ret_code, msg, order_detail = query_processor(**kargs)\n if ret_code == RET_ERROR:\n return ret_code, msg\n\n return RET_OK, order_detail\n\n\n def get_warrant(self, stock_owner='', req=None):\n \"\"\"\n :param stock_owner:所属正股\n :param req:futu.quote.quote_get_warrant.Request\n \"\"\"\n from futu.quote.quote_get_warrant import Request\n\n if (req is None) or (not isinstance(req, Request)):\n req = Request()\n\n if stock_owner is not None:\n req.stock_owner = stock_owner\n\n\n query_processor = self._get_sync_query_processor(QuoteWarrant.pack_req, QuoteWarrant.unpack_rsp)\n kargs = {\n \"req\": req,\n \"conn_id\": self.get_sync_conn_id()\n }\n ret_code, msg, content = query_processor(**kargs)\n if ret_code != RET_OK:\n return ret_code, msg\n else:\n warrant_data_list, last_page, all_count = content\n col_list = ['stock', 'name', 'stock_owner', 'type', 'issuer', 'maturity_time',\n 'list_time', 'last_trade_time', 'recovery_price', 'conversion_ratio',\n 'lot_size', 'strike_price', 'last_close_price', 'cur_price', 'price_change_val', 'change_rate',\n 'status', 'bid_price', 'ask_price', 'bid_vol', 'ask_vol', 'volume', 'turnover', 'score',\n 'premium', 'break_even_point', 'leverage', 'ipop', 'price_recovery_ratio', 'conversion_price',\n 'street_rate', 'street_vol', 'amplitude', 'issue_size', 'high_price', 'low_price',\n 'implied_volatility', 'delta', 'effective_leverage', 'list_timestamp', 'last_trade_timestamp',\n 'maturity_timestamp']\n warrant_data_frame = pd.DataFrame(warrant_data_list, columns=col_list)\n #1120400921001028854\n return ret_code, (warrant_data_frame, last_page, all_count)\n\n\n\n\n"
] |
[
[
"pandas.DataFrame"
]
] |
david8862/keras-CenterNet
|
[
"e74b933f6dd5ffac04f2de3eb0d887742be8490f"
] |
[
"utils/setup.py"
] |
[
"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\nimport setuptools\nfrom setuptools.extension import Extension\nfrom distutils.command.build_ext import build_ext as DistUtilsBuildExt\n\n\nclass BuildExtension(setuptools.Command):\n description = DistUtilsBuildExt.description\n user_options = DistUtilsBuildExt.user_options\n boolean_options = DistUtilsBuildExt.boolean_options\n help_options = DistUtilsBuildExt.help_options\n\n def __init__(self, *args, **kwargs):\n from setuptools.command.build_ext import build_ext as SetupToolsBuildExt\n\n # Bypass __setatrr__ to avoid infinite recursion.\n self.__dict__['_command'] = SetupToolsBuildExt(*args, **kwargs)\n\n def __getattr__(self, name):\n return getattr(self._command, name)\n\n def __setattr__(self, name, value):\n setattr(self._command, name, value)\n\n def initialize_options(self, *args, **kwargs):\n return self._command.initialize_options(*args, **kwargs)\n\n def finalize_options(self, *args, **kwargs):\n ret = self._command.finalize_options(*args, **kwargs)\n import numpy\n self.include_dirs.append(numpy.get_include())\n return ret\n\n def run(self, *args, **kwargs):\n return self._command.run(*args, **kwargs)\n\n\nextensions = [\n Extension(\n 'compute_overlap',\n ['compute_overlap.pyx']\n ),\n]\n\n\nsetuptools.setup(\n name = 'keras-CenterNet',\n version = '0.0.1',\n description = 'Keras implementation of CenterNet object detection.',\n url = 'https://github.com/david8862/keras-CenterNet',\n author = 'david8862',\n author_email = '[email protected]',\n maintainer = 'david8862',\n maintainer_email = '[email protected]',\n cmdclass = {'build_ext': BuildExtension},\n packages = setuptools.find_packages(),\n ext_modules = extensions,\n setup_requires = [\"cython>=0.28\", \"numpy>=1.14.0\"]\n)\n"
] |
[
[
"numpy.get_include"
]
] |
qianqianjun/DCGAN
|
[
"4e2d37f1d785e592e59334b91d197ef0475c1c99"
] |
[
"main.py"
] |
[
"\"\"\"\nwrite by qianqianjun\n2019.12.20\n运行GAN进行训练的入口文件。\n\"\"\"\nimport os\nimport tensorflow as tf\nfrom train_argparse import hps\nfrom dataset_loader import train_images\nfrom data_provider import MnistData\nfrom DCGAN import DCGAN\nfrom utils import combine_imgs\n\n# 创建生成结果目录\noutput_dir='./out'\nif not os.path.exists(output_dir):\n os.mkdir(output_dir)\n\n# 创建DCGAN\ndcgan=DCGAN(hps)\n# 加载Mnist 数据集\nmnist_data=MnistData(train_images,hps.z_dim,hps.img_size)\n# 构建计算图模型\nz_placeholder,img_placeholder,generated_imgs,losses=dcgan.build()\n\n# 构建训练过程模型\ntrain_op=dcgan.build_train_op(losses,hps.learning_rate,hps.beta1)\n\n# 开始进行训练~ :\ninit_op=tf.global_variables_initializer()\n# 定义训练多少步\ntrain_steps=hps.train_steps\nwith tf.Session() as sess:\n sess.run(init_op)\n for step in range(train_steps):\n batch_imgs,batch_z=mnist_data.next_batch(hps.batch_size)\n fetches=[train_op,losses['g'],losses['d']]\n should_sample=(step+1) %100 ==0\n # 如果到了该保存中间结果的步骤,则run 的时候在 fetches 中加上生成的图像\n if should_sample:\n fetches+= [generated_imgs]\n output_values=sess.run(\n fetches,feed_dict={\n z_placeholder:batch_z,\n img_placeholder:batch_imgs,\n }\n )\n _,g_loss_val,d_loss_val=output_values[0:3]\n # 打印训练过程的损失情况\n if (step+1) %200==0:\n print('step: %4d , g_loss: %4.3f , d_loss: %4.3f' % (step, g_loss_val, d_loss_val))\n\n # 保存中间过程图片结果:\n if should_sample:\n gen_imgs_val=output_values[3]\n gen_img_path=os.path.join(output_dir,'%05d-gen.jpg' % (step+1))\n gt_img_path=os.path.join(output_dir,'%05d-gt.jpg' % (step+1))\n gen_img=combine_imgs(gen_imgs_val,hps.img_size)\n gt_img=combine_imgs(batch_imgs,hps.img_size)\n gen_img.save(gen_img_path)\n gt_img.save(gt_img_path)"
] |
[
[
"tensorflow.global_variables_initializer",
"tensorflow.Session"
]
] |
StevenTang1998/TextBox
|
[
"acd8298c7e6618384d585146f799d02cc475520c"
] |
[
"textbox/model/Seq2Seq/t5.py"
] |
[
"# @Time : 2021/3/15\n# @Author : Zhuohao Yu\n# @Email : [email protected]\n\nr\"\"\"\nT5\n################################################\nReference:\n Colin et al. \"Exploring the Limits of Transfer Learning with a Unified Text-to-Text Transformer\" at JMLR 2020.\n\"\"\"\n\nimport torch\nimport torch.nn as nn\nimport torch.functional as F\n\nfrom textbox.model.abstract_generator import Seq2SeqGenerator\nfrom transformers import T5Tokenizer, T5ForConditionalGeneration, T5Config\n\n\nclass T5(Seq2SeqGenerator):\n\n def __init__(self, config, dataset):\n super(T5, self).__init__(config, dataset)\n\n self.pretrained_model_path = config['pretrained_model_path']\n self.tokenizer = T5Tokenizer.from_pretrained(self.pretrained_model_path)\n self.configuration = T5Config.from_pretrained(self.pretrained_model_path)\n\n self.model = T5ForConditionalGeneration.from_pretrained(self.pretrained_model_path, config=self.configuration)\n\n self.padding_token_idx = self.tokenizer.pad_token_id\n self.loss = nn.CrossEntropyLoss(ignore_index=self.padding_token_idx, reduction='none')\n if config['task_type'] == \"summarization\":\n self.t5_task_text = \"summarize: \"\n elif config['task_type'] == \"translation\":\n self.t5_task_text = \"translate German to English: \"\n else:\n raise NotImplementedError(\"Only summarization and translation are supported.\")\n\n def generate(self, batch_data, eval_data):\n source_text = batch_data['source_text']\n input_ids, attn_masks = self.tokenize_text(source_text)\n\n sample_outputs = self.model.generate(\n input_ids, attention_mask=attn_masks, num_beams=5, max_length=self.target_max_length, early_stopping=True\n )\n generated_text = self.tokenizer.batch_decode(sample_outputs, skip_special_tokens=True)\n generate_corpus = [text.lower().split() for text in generated_text]\n return generate_corpus\n\n def tokenize_text(self, text, is_target=False):\n input_ids = []\n attn_masks = []\n texts = [(self.t5_task_text if not is_target else '') + ' '.join(t) for t in text]\n encoding_dict = self.tokenizer(\n texts, max_length=self.source_max_length, padding=True, truncation=True, return_tensors=\"pt\"\n )\n\n input_ids = encoding_dict['input_ids'].to(self.device)\n attn_masks = encoding_dict['attention_mask'].to(self.device)\n\n return input_ids, attn_masks\n\n def forward(self, corpus, epoch_idx=-1):\n source_text = corpus['source_text']\n target_text = corpus['target_text']\n\n input_ids, attn_masks = self.tokenize_text(source_text)\n target_ids, decoder_attn_masks = self.tokenize_text(target_text, is_target=True)\n\n decoder_input_ids = target_ids[:, :-1].contiguous()\n decoder_attn_masks = decoder_attn_masks[:, :-1].contiguous()\n decoder_target_ids = target_ids[:, 1:].contiguous()\n\n outputs = self.model(\n input_ids,\n attention_mask=attn_masks,\n decoder_input_ids=decoder_input_ids,\n decoder_attention_mask=decoder_attn_masks,\n use_cache=False\n )\n\n token_logits = outputs.logits\n loss = self.loss(token_logits.view(-1, token_logits.size(-1)), decoder_target_ids.view(-1))\n loss = loss.reshape_as(decoder_target_ids)\n\n length = (decoder_target_ids != self.padding_token_idx).sum(dim=1).float()\n loss = loss.sum(dim=1) / length\n\n return loss.mean()\n"
] |
[
[
"torch.nn.CrossEntropyLoss"
]
] |
comydream/OpenNMT-py
|
[
"bdca05a3fac8f864b21c86a8ad03c09895212e70"
] |
[
"onmt/translate/greedy_search.py"
] |
[
"import torch\nimport torch.nn.functional as F\n\nfrom onmt.translate.decode_strategy import DecodeStrategy\n\n\ndef sample_topp(logits, keep_topp):\n sorted_logits, sorted_indices = torch.sort(logits,\n descending=True,\n dim=1)\n\n cumulative_probs = torch.cumsum(F.softmax(sorted_logits,\n dim=-1), dim=-1)\n sorted_indices_to_keep = cumulative_probs.lt(keep_topp)\n\n # keep indices until overflowing p\n cumsum_mask = sorted_indices_to_keep.cumsum(dim=1)\n last_included = cumsum_mask[:, -1:]\n last_included.clamp_(0, sorted_indices_to_keep.size()[1] - 1)\n sorted_indices_to_keep = sorted_indices_to_keep.scatter_(\n 1, last_included, 1)\n\n # Set all logits that are not in the top-p to -10000.\n # This puts the probabilities close to 0.\n keep_indices = sorted_indices_to_keep.scatter(\n 1,\n sorted_indices,\n sorted_indices_to_keep,\n )\n return logits.masked_fill(~keep_indices, -10000)\n\n\ndef sample_topk(logits, keep_topk):\n top_values, _ = torch.topk(logits, keep_topk, dim=1)\n kth_best = top_values[:, -1].view([-1, 1])\n kth_best = kth_best.repeat([1, logits.shape[1]]).float()\n\n # Set all logits that are not in the top-k to -10000.\n # This puts the probabilities close to 0.\n ignore = torch.lt(logits, kth_best)\n return logits.masked_fill(ignore, -10000)\n\n\ndef sample_with_temperature(logits, sampling_temp, keep_topk, keep_topp):\n \"\"\"Select next tokens randomly from the top k possible next tokens.\n\n Samples from a categorical distribution over the ``keep_topk`` words using\n the category probabilities ``logits / sampling_temp``.\n\n Args:\n logits (FloatTensor): Shaped ``(batch_size, vocab_size)``.\n These can be logits (``(-inf, inf)``) or log-probs (``(-inf, 0]``).\n (The distribution actually uses the log-probabilities\n ``logits - logits.logsumexp(-1)``, which equals the logits if\n they are log-probabilities summing to 1.)\n sampling_temp (float): Used to scale down logits. The higher the\n value, the more likely it is that a non-max word will be\n sampled.\n keep_topk (int): This many words could potentially be chosen. The\n other logits are set to have probability 0.\n keep_topp (float): Keep most likely words until the cumulated\n probability is greater than p. If used with keep_topk: both\n conditions will be applied\n\n Returns:\n (LongTensor, FloatTensor):\n\n * topk_ids: Shaped ``(batch_size, 1)``. These are\n the sampled word indices in the output vocab.\n * topk_scores: Shaped ``(batch_size, 1)``. These\n are essentially ``(logits / sampling_temp)[topk_ids]``.\n \"\"\"\n\n if sampling_temp == 0.0 or keep_topk == 1:\n # For temp=0.0, take the argmax to avoid divide-by-zero errors.\n # keep_topk=1 is also equivalent to argmax.\n topk_scores, topk_ids = logits.topk(1, dim=-1)\n if sampling_temp > 0:\n topk_scores /= sampling_temp\n else:\n logits = torch.div(logits, sampling_temp)\n if keep_topp > 0:\n logits = sample_topp(logits, keep_topp)\n if keep_topk > 0:\n logits = sample_topk(logits, keep_topk)\n dist = torch.distributions.Categorical(logits=logits)\n topk_ids = dist.sample().view(-1, 1)\n topk_scores = logits.gather(dim=1, index=topk_ids)\n return topk_ids, topk_scores\n\n\nclass GreedySearch(DecodeStrategy):\n \"\"\"Select next tokens randomly from the top k possible next tokens.\n\n The ``scores`` attribute's lists are the score, after applying temperature,\n of the final prediction (either EOS or the final token in the event\n that ``max_length`` is reached)\n\n Args:\n pad (int): See base.\n bos (int): See base.\n eos (int): See base.\n unk (int): See base.\n batch_size (int): See base.\n global_scorer (onmt.translate.GNMTGlobalScorer): Scorer instance.\n min_length (int): See base.\n max_length (int): See base.\n ban_unk_token (Boolean): See base.\n block_ngram_repeat (int): See base.\n exclusion_tokens (set[int]): See base.\n return_attention (bool): See base.\n max_length (int): See base.\n sampling_temp (float): See\n :func:`~onmt.translate.greedy_search.sample_with_temperature()`.\n keep_topk (int): See\n :func:`~onmt.translate.greedy_search.sample_with_temperature()`.\n keep_topp (float): See\n :func:`~onmt.translate.greedy_search.sample_with_temperature()`.\n beam_size (int): Number of beams to use.\n \"\"\"\n\n def __init__(self, pad, bos, eos, unk, batch_size, global_scorer,\n min_length, block_ngram_repeat, exclusion_tokens,\n return_attention, max_length, sampling_temp, keep_topk,\n keep_topp, beam_size, ban_unk_token):\n super(GreedySearch, self).__init__(\n pad, bos, eos, unk, batch_size, beam_size, global_scorer,\n min_length, block_ngram_repeat, exclusion_tokens,\n return_attention, max_length, ban_unk_token)\n self.sampling_temp = sampling_temp\n self.keep_topk = keep_topk\n self.keep_topp = keep_topp\n self.topk_scores = None\n self.beam_size = beam_size\n\n def initialize(self, memory_bank, src_lengths, src_map=None, device=None,\n target_prefix=None):\n \"\"\"Initialize for decoding.\"\"\"\n (fn_map_state, memory_bank,\n src_map, target_prefix) = self.initialize_tile(\n memory_bank, src_lengths, src_map, target_prefix)\n if device is None:\n device = self.get_device_from_memory_bank(memory_bank)\n\n super(GreedySearch, self).initialize(\n memory_bank, src_lengths, src_map, device, target_prefix)\n self.select_indices = torch.arange(\n self.batch_size*self.beam_size, dtype=torch.long, device=device)\n self.original_batch_idx = fn_map_state(torch.arange(\n self.batch_size, dtype=torch.long, device=device), dim=0)\n self.beams_scores = torch.zeros((self.batch_size*self.beam_size, 1),\n dtype=torch.float, device=device)\n return fn_map_state, memory_bank, self.memory_lengths, src_map\n\n @property\n def current_predictions(self):\n return self.alive_seq[:, -1]\n\n @property\n def batch_offset(self):\n return self.select_indices\n\n def _pick(self, log_probs):\n \"\"\"Function used to pick next tokens.\n\n Args:\n log_probs (FloatTensor): ``(batch_size, vocab_size)``.\n \"\"\"\n # maybe fix some prediction at this step by modifying log_probs\n log_probs = self.target_prefixing(log_probs)\n topk_ids, topk_scores = sample_with_temperature(\n log_probs, self.sampling_temp, self.keep_topk, self.keep_topp)\n return topk_ids, topk_scores\n\n def align_select_indices(self):\n nb_finished_beams = (self.is_finished.view(-1).size(0) -\n self.select_indices.size(0))\n if nb_finished_beams:\n self.select_indices = torch.arange(\n self.select_indices.size(0), dtype=torch.long,\n device=self.select_indices.device)\n\n def advance(self, log_probs, attn):\n \"\"\"Select next tokens randomly from the top k possible next tokens.\n\n Args:\n log_probs (FloatTensor): Shaped ``(batch_size, vocab_size)``.\n These can be logits (``(-inf, inf)``) or log-probs\n (``(-inf, 0]``). (The distribution actually uses the\n log-probabilities ``logits - logits.logsumexp(-1)``,\n which equals the logits if they are log-probabilities summing\n to 1.)\n attn (FloatTensor): Shaped ``(1, B, inp_seq_len)``.\n \"\"\"\n\n self.align_select_indices()\n\n self.ensure_min_length(log_probs)\n self.ensure_unk_removed(log_probs)\n self.block_ngram_repeats(log_probs)\n\n topk_ids, self.topk_scores = self._pick(log_probs)\n self.beams_scores += self.topk_scores\n\n self.is_finished = topk_ids.eq(self.eos)\n\n self.alive_seq = torch.cat([self.alive_seq, topk_ids], -1)\n if self.return_attention:\n if self.alive_attn is None:\n self.alive_attn = attn\n else:\n self.alive_attn = torch.cat([self.alive_attn, attn], 0)\n self.ensure_max_length()\n\n def update_finished(self):\n \"\"\"Finalize scores and predictions.\"\"\"\n # shape: (sum(~ self.is_finished), 1)\n finished_batches = self.is_finished.view(-1).nonzero(as_tuple=False)\n step = len(self)\n length_penalty = self.global_scorer.length_penalty(\n step, alpha=self.global_scorer.alpha)\n\n for b in finished_batches.view(-1):\n b_orig = self.original_batch_idx[b]\n score = self.beams_scores[b, 0]/length_penalty\n pred = self.alive_seq[b, 1:]\n attention = (\n self.alive_attn[:, b, :self.memory_lengths[b]]\n if self.alive_attn is not None else [])\n self.hypotheses[b_orig].append((score, pred, attention))\n self.done = self.is_finished.all()\n if self.done:\n for b in range(self.batch_size):\n best_hyp = sorted(\n self.hypotheses[b], key=lambda x: x[0], reverse=True)\n for score, pred, attn in best_hyp:\n self.scores[b].append(score)\n self.predictions[b].append(pred)\n self.attention[b].append(attn)\n return\n is_alive = ~self.is_finished.view(-1)\n self.alive_seq = self.alive_seq[is_alive]\n self.beams_scores = self.beams_scores[is_alive]\n if self.alive_attn is not None:\n self.alive_attn = self.alive_attn[:, is_alive]\n self.select_indices = is_alive.nonzero(as_tuple=False).view(-1)\n self.original_batch_idx = self.original_batch_idx[is_alive]\n self.maybe_update_target_prefix(self.select_indices)\n\n\nclass GreedySearchLM(GreedySearch):\n def update_finished(self):\n super(GreedySearchLM, self).update_finished()\n self.update_memory_lengths()\n\n def update_memory_lengths(self):\n is_alive = ~self.is_finished.view(-1)\n self.memory_lengths = self.memory_lengths[is_alive]\n\n def advance(self, log_probs, attn):\n super(GreedySearchLM, self).advance(log_probs, attn)\n\n # in LM task memory_lengths is associated with currently generated src\n # and therefore needs to follow the generation\n self.memory_lengths += 1\n\n def initialize(self, src, src_lengths, src_map=None, device=None,\n target_prefix=None):\n \"\"\"Initialize for decoding.\"\"\"\n\n if device is None:\n device = src.device\n\n (fn_map_state, _, self.memory_lengths,\n src_map) = super(GreedySearchLM, self).initialize(\n None, src_lengths, src_map, device, target_prefix)\n src = fn_map_state(src, dim=1)\n\n return fn_map_state, src, self.memory_lengths, src_map\n"
] |
[
[
"torch.div",
"torch.nn.functional.softmax",
"torch.zeros",
"torch.cat",
"torch.lt",
"torch.distributions.Categorical",
"torch.sort",
"torch.arange",
"torch.topk"
]
] |
Lsplastic/Tensorflow_ssd
|
[
"f2935079fb8d2cd2288ef5f7a415749243f34542"
] |
[
"dataset/dataset_inspect.py"
] |
[
"# Copyright 2018 Changan Wang\r\n\r\n# Licensed under the Apache License, Version 2.0 (the \"License\");\r\n# you may not use this file except in compliance with the License.\r\n# You may obtain a copy of the License at\r\n\r\n# http://www.apache.org/licenses/LICENSE-2.0\r\n\r\n# Unless required by applicable law or agreed to in writing, software\r\n# distributed under the License is distributed on an \"AS IS\" BASIS,\r\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r\n# See the License for the specific language governing permissions and\r\n# limitations under the License.\r\n# =============================================================================\r\nfrom __future__ import absolute_import\r\nfrom __future__ import division\r\nfrom __future__ import print_function\r\n\r\nimport os\r\n\r\nimport tensorflow as tf\r\n\r\ndef count_split_examples(split_path, file_prefix='.tfrecord'):\r\n # Count the total number of examples in all of these shard\r\n num_samples = 0\r\n tfrecords_to_count = tf.gfile.Glob(os.path.join(split_path, file_prefix))\r\n opts = tf.python_io.TFRecordOptions(tf.python_io.TFRecordCompressionType.ZLIB)\r\n for tfrecord_file in tfrecords_to_count:\r\n for record in tf.python_io.tf_record_iterator(tfrecord_file):#, options = opts):\r\n num_samples += 1\r\n return num_samples\r\n\r\nif __name__ == '__main__':\r\n print('train:', count_split_examples('/media/rs/7A0EE8880EE83EAF/Detections/SSD/dataset/tfrecords', 'train-?????-of-?????'))\r\n print('val:', count_split_examples('/media/rs/7A0EE8880EE83EAF/Detections/SSD/dataset/tfrecords', 'val-?????-of-?????'))\r\n"
] |
[
[
"tensorflow.python_io.tf_record_iterator",
"tensorflow.python_io.TFRecordOptions"
]
] |
dkkim1005/Neural_Network_Quantum_State
|
[
"7e94929c5ef65ce87f63bf20c81acaa524adca82"
] |
[
"python/meas_smag.py"
] |
[
"#!/usr/bin/env python3\nimport numpy as np\nfrom pynqs import sampler\n\nfloatType = 'float32'\nsymmType = 'tr'\n# hyper parameter sets of rbm and MCMC sampler\nkwargs = {\n 'nInputs' : 16,\n 'nHiddens' : 4,\n 'nChains' : 1000,\n 'seedNumber' : 0,\n 'seedDistance' : 123456789,\n 'init_mcmc_steps' : 300\n}\n# transverse-field strengthes\nhfield = '-1.1'\n# functor to locate a path of the file\nfilepath = './temp/build/RBMTrSymmCH-N%dA%dH%sV1'\\\n %(kwargs['nInputs'], kwargs['nHiddens'], hfield)\nkwargs['path_to_load'] = filepath\n# total number of measurements\nnmeas = 1000\n# number of Monte-Carlo steps\nnms = 20\n# range of the error bar (95% confidence)\nZ = 2\n\nrbm = sampler.RBM(floatType = floatType, symmType = symmType)\nrbm.init(**kwargs)\n\nmag = np.zeros([nmeas], dtype = floatType)\nfor i in range(nmeas):\n print ('# of measurements: %d'%i, end = '\\r')\n rbm.do_mcmc_steps(nms)\n spinStates = rbm.get_spinStates()\n mag[i] = np.mean(np.abs(np.mean(spinStates, axis = 1)))\nmag_mean = np.mean(mag)\nmag_err = Z*np.sqrt(np.sum((mag - mag_mean)**2)/(nmeas*(nmeas-1)))\nprint ('<|m|> : %.5E'%mag_mean, ' +/- %.3E'%mag_err)\n"
] |
[
[
"numpy.sum",
"numpy.zeros",
"numpy.mean"
]
] |
salesforce/DialFact
|
[
"d400b250147e45c106b18e52254b1060f7c1575d"
] |
[
"scripts/run_fever_scoring.py"
] |
[
"import argparse\nimport sys\nimport jsonlines\nfrom tqdm import tqdm\nimport logging\nimport json\nimport torch\nimport torch.nn.functional as F\nimport jsonlines\nimport random\nimport os\nimport numpy as np\nfrom scipy.special import softmax\n# os.environ[\"NCCL_SHM_DISABLE\"] = \"1\"\nfrom tqdm import tqdm\nfrom typing import List\nfrom sklearn.metrics import f1_score, precision_score, recall_score\nfrom datasets import Dataset\nfrom torch.utils.data import Dataset, DataLoader\nfrom transformers import AutoConfig, AutoTokenizer, AutoModelForSequenceClassification, get_cosine_schedule_with_warmup\nfrom transformers import AutoModelForSequenceClassification, TrainingArguments, Trainer, EvalPrediction, default_data_collator, set_seed\nfrom transformers import InputExample, PreTrainedTokenizer, InputFeatures\nimport os\nos.environ[\"TOKENIZERS_PARALLELISM\"] = \"false\"\n\nLABELS = [\"SUPPORTS\", \"REFUTES\", \"NOT ENOUGH INFO\"]\n\ndef get_json_lines(inp_file):\n lines = []\n with jsonlines.open(inp_file) as reader:\n for obj in reader:\n lines.append(obj)\n \n return lines\n\ndef write_json_lines(output_file_name, list_data, output_folder):\n with jsonlines.open(output_folder+ output_file_name, mode='w') as writer:\n for dataline in list_data:\n writer.write(dataline)\n\n\nclass ClassificationModel():\n def __init__(self, num_labels=2, max_length=256, model_name_or_path='albert-large-v2', config_name=None, tokenizer_name=None):\n NUM_LABELS = num_labels\n self.max_seq_length = 256\n self.model_name_or_path = model_name_or_path\n self.config_name = config_name\n self.tokenizer_name = tokenizer_name\n self.max_length = max_length\n\n config = AutoConfig.from_pretrained(\n self.config_name if self.config_name else self.model_name_or_path,\n num_labels=NUM_LABELS,\n # cache_dir='.cache/',\n )\n add_prefix_space = False\n if 'roberta' in self.model_name_or_path:\n add_prefix_space = True\n self.tokenizer = AutoTokenizer.from_pretrained(\n self.tokenizer_name if self.tokenizer_name else self.model_name_or_path,\n # cache_dir=model_args.cache_dir,\n add_prefix_space=True,\n # use_fast=True,\n )\n self.model = AutoModelForSequenceClassification.from_pretrained(\n self.model_name_or_path,\n from_tf=bool(\".ckpt\" in self.model_name_or_path),\n config=config,\n # cache_dir=args.cache_dir,\n )\n\n def get_string_text(self, tokens_a, tokens_b):\n max_num_tokens = self.max_seq_length - 3\n total_length = len(tokens_a) + len(tokens_b)\n if total_length > max_num_tokens:\n len_b = len(tokens_b)\n a_begin = max_num_tokens - len_b\n tokens_a = tokens_a[-a_begin:]\n try:\n assert len(tokens_a) + len(tokens_b) <= max_num_tokens\n assert len(tokens_a) >= 1\n except:\n import pdb;\n pdb.set_trace()\n print('some problem with preproc')\n # assert len(tokens_b) >= 1\n\n tokens = []\n segment_ids = []\n tokens.append(self.tokenizer.cls_token)\n segment_ids.append(0)\n for token in tokens_a:\n tokens.append(token)\n segment_ids.append(0)\n\n tokens.append(self.tokenizer.sep_token)\n segment_ids.append(0)\n\n for token in tokens_b:\n tokens.append(token)\n segment_ids.append(1)\n tokens.append(self.tokenizer.sep_token)\n segment_ids.append(1)\n\n return tokens, segment_ids\n\n def tokenize_function_test(self, examples):\n # Remove empty lines\n # examples[\"text\"] = [line for line in examples[\"text\"] if len(line) > 0 and not line.isspace()]\n # examples = [line for line in examples if len(line) > 0 and not line.isspace()]\n all_texts = []\n all_segment_ids = []\n all_labels = []\n # import pdb;pdb.set_trace()\n processed = []\n items = []\n # keys = list(examples.keys())\n # for i in range(len(examples[keys[0]])):\n # ex = {}\n # for k in keys:\n # ex[k] = examples[k][i]\n # items.append(ex)\n # import pdb;pdb.set_trace()\n items = examples\n max_seq_length = 216\n for example in items:\n first_tokens = self.tokenizer.tokenize(example['actual'])\n for sent2 in example['prediction']:\n sec_tokens = self.tokenizer.tokenize(sent2)\n tokens = [\"[CLS]\"] + first_tokens + [\"[SEP]\"] + sec_tokens\n if len(sec_tokens) + len(first_tokens) > max_seq_length - 1:\n tokens = tokens[:(max_seq_length - 1)]\n tokens = tokens + [\"[SEP]\"]\n\n segment_ids = [0] * (len(first_tokens) + 2)\n segment_ids += [1] * (len(sec_tokens) + 1)\n all_texts.append(tokens)\n all_segment_ids.append(segment_ids)\n\n tokenized = self.tokenizer.batch_encode_plus(\n all_texts,\n padding='max_length',\n truncation=True,\n max_length=max_seq_length,\n is_split_into_words=True,\n return_special_tokens_mask=True,\n add_special_tokens=False,\n )\n\n # print(len(tokenized['input_ids']))\n padded_length = len(tokenized['input_ids'][0])\n all_segment_ids = [x + [0] * (padded_length - len(x)) for x in all_segment_ids]\n tokenized['token_type_ids'] = all_segment_ids\n # tokenized['label'] = all_labels\n\n return tokenized\n\n def tokenize_function(self, examples, sent2_type='evidence_touse', sent1_type='prediction'):\n all_texts = []\n all_segment_ids = []\n all_labels = []\n processed = []\n items = []\n max_seq_length = 216\n for example in examples:\n evidence_data = example[sent2_type]\n sent2 = evidence_data\n for p, sent1 in enumerate(example[sent1_type]):\n if type(evidence_data) is list:\n sent2 = example[sent2_type][p]\n items.append([sent2, sent1])\n # import pdb;pdb.set_trace()\n try:\n batch_encoding = self.tokenizer(\n [(example[0], example[1])\n for example in items],\n max_length=self.max_length,\n padding=\"max_length\",\n truncation=True,\n )\n except:\n import pdb;pdb.set_trace()\n # import pdb;pdb.set_trace()\n\n features = []\n input1 = list(batch_encoding.keys())[0]\n num_inputs = len(batch_encoding[input1])\n for i in range(num_inputs):\n inputs = {k: batch_encoding[k][i] for k in batch_encoding}\n feature = InputFeatures(**inputs)\n features.append(feature)\n\n return features\n\n def tokenize_function_data(self, examples, sent2_type='evidence_touse', sent1_type='response'):\n all_texts = []\n all_segment_ids = []\n all_labels = []\n processed = []\n items = []\n max_seq_length = 216\n for example in examples:\n evidence_data = example[sent2_type]\n sent2 = evidence_data\n sent1 = example[sent1_type]\n items.append([sent2, sent1])\n # import pdb;pdb.set_trace()\n try:\n batch_encoding = self.tokenizer(\n [(ex[0], ex[1])\n for ex in items],\n max_length=self.max_length,\n padding=\"max_length\",\n truncation=True,\n )\n except:\n import pdb;pdb.set_trace()\n # import pdb;pdb.set_trace()\n\n features = []\n input1 = list(batch_encoding.keys())[0]\n num_inputs = len(batch_encoding[input1])\n for i in range(num_inputs):\n inputs = {k: batch_encoding[k][i] for k in batch_encoding}\n feature = InputFeatures(**inputs)\n features.append(feature)\n\n return features\n\ndef create_data_loader(tokenized_eval_dataset, batch_size):\n\n return DataLoader(\n tokenized_eval_dataset,\n batch_size=batch_size,\n num_workers=4,\n collate_fn=default_data_collator\n \n )\n\ndef score_testdata(args, classification_model_dnli, testdata):\n tokenized_eval_dataset = classification_model_dnli.tokenize_function_data(testdata, sent1_type=args.response_tag)\n# import pdb;pdb.set_trace()\n# tdataset = Dataset.from_dict(tokenized_eval_dataset)\n# test_data_loader = create_data_loader(tdataset, args.batch_size)\n test_data_loader = create_data_loader(tokenized_eval_dataset, args.batch_size)\n all_scores = []\n parsed = 0\n for idx, d in enumerate(tqdm(test_data_loader)):\n input_ids = d[\"input_ids\"].to(device)\n attention_mask = d[\"attention_mask\"].to(device)\n token_type_ids = d[\"token_type_ids\"].to(device)\n outputs = classification_model_dnli.model(\n input_ids=input_ids,\n attention_mask=attention_mask,\n token_type_ids=token_type_ids\n )\n\n outputs = softmax(outputs['logits'].tolist(),axis=1)\n for oidx, out in enumerate(outputs):\n softmax_l1 = out.tolist()\n # dnli_score = [x[0] for x in softmax_l1]\n # print(softmax_l1)s\n # all_scores+=softmax_l1\n testdata[parsed][args.typeprefix+'fever_score'] = softmax_l1\n parsed+=1\n \ndef score_data(args, classification_model_dnli, max_evidences=5):\n testdata = get_json_lines(args.input_file)\n for i, datapoint in enumerate(tqdm(testdata)):\n# lines = datapoint[args.response_tag]\n if 'evidence_list' in datapoint:\n all_evidences = datapoint['evidence_list'][:max_evidences]\n # for e, evilist in enumerate(datapoint['evidence_list'][:max_evidences]):\n # all_evidences = evilist#datapoint['evidence_list']\n # print(all_evidences)\n # print(['title: ' + x[0] + ' content: ' + x[2] for x in all_evidences])\n all_evidence_texts = ['title: ' + x[0] + ' content: ' + x[2] for x in all_evidences]\n # evidence_text = ' ### '.join(all_evidence_texts)\n evidence_text = ' '.join(all_evidence_texts)\n datapoint['evidence_touse'] = evidence_text\n\n if args.claim_only:\n datapoint['evidence_touse'] = ''\n # import pdb;pdb.set_trace()\n if len(datapoint[args.response_tag])==0:\n continue\n\n\n score_testdata(args, classification_model_dnli, testdata)\n # scores = lm_scores(lines, model, tokenizer, device)\n# datapoint['dnli_score'] = scores\n \n write_json_lines(args.preds_file, testdata, args.output_folder)\n \n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument('-c', '--cuda_device', type=int, help='id of GPU to use', default=0)\n parser.add_argument('-m', '--model', type=str, help='model name to use', default='colloquial_bert_large/')\n parser.add_argument('-i', '--input_file', type=str, help='path to the file containing the evaluation data', required=True)\n parser.add_argument('-o', '--preds_file', type=str, help='output file to save the results')\n parser.add_argument('--output_folder', type=str, help='output file to save the results', default='colloquialfeverscores/')\n parser.add_argument('--response_tag', type=str, help='tag', default='response')\n parser.add_argument('--batch_size', type=int, help='batch size', default=20)\n parser.add_argument('--claim_only', action='store_true', default=False, help='Disables evidence')\n parser.add_argument('--max_seq_length', type=int, help='batch size', default=256)\n parser.add_argument('--knowledgeformat', type=str, help='tag', default='') # wikijoin\n parser.add_argument('--typeprefix', type=str, help='tag', default='')\n parser.add_argument('--outputprefix', type=str, help='tag', default='')\n \n# parser.add_argument('-append', action='store_true', help='allow append to previous run', default=False)\n\n args = parser.parse_args()\n if args.preds_file is None:\n args.preds_file = args.input_file.split('/')[-1]\n \n args.preds_file = args.outputprefix + args.preds_file\n# assert(not os.path.exists(args.preds_file))\n if args.cuda_device>=0:\n device = 'cuda:'+str(args.cuda_device)\n else:\n device = 'cpu'\n \n args.device = device\n classification_model_dnli = ClassificationModel(num_labels=3,model_name_or_path=args.model)\n classification_model_dnli.model = classification_model_dnli.model.to(device)\n print('model loaded')\n classification_model_dnli.model.eval()\n score_data(args, classification_model_dnli)\n \n# python fever_scoring.py -i ../post_generation/contextagg_maskfill_mix1_wow_test_tsc_200_t1.5.jsonl --output_folder vitamincscores/ -m tals/albert-xlarge-vitaminc\n# python fever_scoring.py -i ../post_generation/contextagg_maskfill_mix1_wow_test_tsc_200_t1.5.jsonl --knowledgeformat wikijoin --typeprefix colloq_ --output_folder colloquialfeverscores/ -m colloquial_bert_large\n"
] |
[
[
"torch.utils.data.DataLoader"
]
] |
praeclarumjj3/CuML
|
[
"1c812d3b07a11c3a69a284d9960058a874d97bfa"
] |
[
"CuSVD/testcases/gen_testcase.py"
] |
[
"#!/usr/bin/python3\n\n#########################################################################\n# Generate M x N matrix of real numbers and store #\n# the the matrix in file named 'testcase_<M>_<N>' #\n# Parameters: #\n# M :no of rows (samples) in matrix #\n# N :no of coulmns (features) in matrix #\n# lrange, urange :range of matrix elements ie #\n# forall 0<=i<M, 0<=j<N #\n# lrange <= matrix[i][j] <= urange #\n# Format of output file: #\n# ----------------------------------------------------------------- #\n# | M N #\n#\t| D[0][0] D[0][1] ... D[0][N-1] D[1][0] ... D[M-1][N-1] #\n# ----------------------------------------------------------------- #\n#########################################################################\n\n\nfrom random import uniform\nfrom sklearn.preprocessing import StandardScaler\n\nM = 1000 # number of rows (samples) in input matrix D\nN = 300 # number of columns (features) in input matrix\nlrange = -100000 # lrange <= element of matrix\nurange = 100000 # element of matrix <= urange\n\n# generate the matrix\nD = []\nfor i in range(M):\n temp = []\n for j in range(N):\n temp.append(uniform(lrange, urange))\n D.append(temp)\n\n# standardize\nX_std = StandardScaler().fit_transform(D)\n\nfilename = 'testcase_' + str(M) + '_' + str(N) #output filename\nfile = open(filename, 'w')\n\n# write size of matrix in first line of file\nfile.write(str(M) + ' ' +str(N) + '\\n')\n\n# write space separated matrix elements\nfor i in range(M):\n for j in range(N):\n file.write('%.7f ' %(X_std[i][j]))\n\nfile.close()\n"
] |
[
[
"sklearn.preprocessing.StandardScaler"
]
] |
masayoshi-nakamura/CognitiveArchitectureLecture
|
[
"5e036b48e92f266062eb7be8a366e754dee24f2c"
] |
[
"examples/brainsimulator_agent/components/visual_area_component.py"
] |
[
"\nimport brica1\nimport numpy as np\nimport pygazebo.msg.poses_stamped_pb2\nimport pickle\n\nclass VisualAreaComponent(brica1.Component):\n def __init__(self):\n super(VisualAreaComponent, self).__init__()\n self.last_position = np.array((0, 0))\n\n def __position_to_area_id(self, pos2d):\n x = pos2d[0]\n y = pos2d[1]\n radius = 1\n maze_width = 1\n\n if x*x + y*y < radius*radius:\n return (0, 0)\n\n areaIdX = 0\n if x < maze_width*0.5:\n areaIdX = -1\n if x > maze_width*0.5:\n areaIdX = 1\n\n areaIdY = 0\n if y < maze_width*0.5:\n areaIdY = -1\n if y > maze_width*0.5:\n areaIdY = 1\n return (areaIdX, areaIdY)\n \n def get_server_response(self):\n return self.server_response\n \n def callback(self, data):\n pose = pygazebo.msg.poses_stamped_pb2.PosesStamped()\n message = pose.FromString(data)\n\n turtlebot_id = 0\n if message.pose[turtlebot_id].name != \"turtlebot\":\n raise Exception(\"message.pose[0].name is not turtlbot\")\n\n position = np.array((\n message.pose[turtlebot_id].position.x,\n message.pose[turtlebot_id].position.y))\n orientation = np.array((\n message.pose[turtlebot_id].orientation.x,\n message.pose[turtlebot_id].orientation.y,\n message.pose[turtlebot_id].orientation.z,\n message.pose[turtlebot_id].orientation.w))\n\n vel = self.last_position - position\n self.last_position = position\n\n self.set_state(\"out_body_velocity\",\n np.array((vel[0], vel[1])).astype(np.float32))\n self.set_state(\"out_body_position\",\n position.astype(np.float32))\n self.set_state(\"out_body_orientation\",\n orientation.astype(np.float32))\n \n self.server_response = {\"out_body_velocity\":vel.tolist(),\n \"out_body_position\":position.tolist(),\n \"out_body_orientation\":orientation.tolist()}\n #print self.server_response\n\n def fire(self):\n for key in self.states.keys():\n self.results[key] = self.states[key]\n"
] |
[
[
"numpy.array"
]
] |
vtekur/gnn_pathplanning
|
[
"150ca315c214134eda8f5c5b55ce71da9360bcce"
] |
[
"utils/visualize.py"
] |
[
"#!/usr/bin/env python3\nimport yaml\nimport matplotlib\n# matplotlib.use(\"Agg\")\nfrom matplotlib.patches import Circle, Rectangle, Arrow\nfrom matplotlib.collections import PatchCollection\nfrom matplotlib.patches import ConnectionPatch\nfrom matplotlib.patches import FancyArrowPatch\nimport matplotlib.pyplot as plt\n\nimport numpy as np\nfrom matplotlib import animation\nfrom matplotlib import lines\nimport matplotlib.animation as manimation\nimport argparse\nimport math\nimport gc\nimport seaborn as sns\nimport time\nimport scipy.io as sio\nimport sys\nnp.set_printoptions(threshold=np.inf)\nclass Animation:\n def __init__(self, config):\n\n self.config = config\n with open(config.map) as map_file:\n self.data_map = yaml.load(map_file)\n\n with open(config.schedule) as states_file:\n self.schedule = yaml.load(states_file)\n\n self.num_agents = len(self.data_map[\"agents\"])\n self.K = self.config.nGraphFilterTaps\n self.ID_agent = self.config.id_chosenAgent\n data_contents = sio.loadmat(config.GSO)\n self.GSO = np.transpose(data_contents[\"gso\"], (2, 3, 0, 1)).squeeze(3)\n self.commRadius = data_contents[\"commRadius\"]\n self.maxLink = 500\n\n aspect = self.data_map[\"map\"][\"dimensions\"][0] / self.data_map[\"map\"][\"dimensions\"][1]\n\n self.fig = plt.figure(frameon=False, figsize=(4 * aspect, 4))\n self.ax = self.fig.add_subplot(111, aspect='equal')\n self.fig.subplots_adjust(left=0, right=1, bottom=0, top=1, wspace=None, hspace=None)\n # self.ax.set_frame_on(False)\n\n self.patches = []\n self.artists = []\n self.agents = dict()\n self.commLink = dict()\n self.agent_names = dict()\n\n # self.list_color = self.get_cmap(self.num_agents)\n self.list_color = sns.color_palette(\"hls\", self.num_agents)\n self.list_color_commLink = sns.color_palette(\"hls\", 8) # self.K)\n\n self.list_commLinkStyle = list(lines.lineStyles.keys())\n\n # create boundary patch\n xmin = -0.5\n ymin = -0.5\n xmax = self.data_map[\"map\"][\"dimensions\"][0] - 0.5\n ymax = self.data_map[\"map\"][\"dimensions\"][1] - 0.5\n\n # self.ax.relim()\n plt.xlim(xmin, xmax)\n plt.ylim(ymin, ymax)\n # self.ax.set_xticks([])\n # self.ax.set_yticks([])\n # plt.axis('off')\n # self.ax.axis('tight')\n # self.ax.axis('off')\n\n self.patches.append(Rectangle((xmin, ymin), xmax - xmin, ymax - ymin, facecolor='none', edgecolor='black'))\n for o in self.data_map[\"map\"][\"obstacles\"]:\n x, y = o[0], o[1]\n self.patches.append(Rectangle((x - 0.5, y - 0.5), 1, 1, facecolor='black', edgecolor='black'))\n\n # initialize communication Link\n for id_link in range(self.maxLink):\n #https://matplotlib.org/api/artist_api.html#module-matplotlib.lines\n name_link = \"{}\".format(id_link)\n # self.commLink[name_link] = FancyArrowPatch((0,0), (0,0),linewidth=2)\n self.commLink[name_link] = plt.Line2D((0, 0), (0, 0), linewidth=2)\n self.artists.append(self.commLink[name_link])\n\n # print(self.schedule[\"schedule\"])\n # create agents:\n self.T = 0\n # draw goals first\n for d, i in zip(self.data_map[\"agents\"], range(0, self.num_agents)):\n self.patches.append(\n Rectangle((d[\"goal\"][0] - 0.25, d[\"goal\"][1] - 0.25), 0.6, 0.6, facecolor=self.list_color[i],\n edgecolor=self.list_color[i], alpha=0.5))\n\n for d, i in zip(self.data_map[\"agents\"], range(0, self.num_agents)):\n #https://matplotlib.org/api/artist_api.html#module-matplotlib.lines\n name = d[\"name\"]\n self.agents[name] = Circle((d[\"start\"][0], d[\"start\"][1]), 0.4, facecolor=self.list_color[i],\n edgecolor=self.list_color[i])\n self.agents[name].original_face_color = self.list_color[i]\n self.patches.append(self.agents[name])\n self.T = max(self.T, self.schedule[\"schedule\"][name][-1][\"t\"])\n\n # set floating ID\n self.agent_names[name] = self.ax.text(d[\"start\"][0], d[\"start\"][1], name.replace('agent', ''))\n\n\n\n self.agent_names[name].set_horizontalalignment('center')\n self.agent_names[name].set_verticalalignment('center')\n self.artists.append(self.agent_names[name])\n\n\n # self.ax.add_line(dotted_line)\n # self.ax.set_axis_off()\n # self.fig.axes[0].set_visible(False)\n # self.fig.axes.get_yaxis().set_visible(False)\n\n # self.fig.tight_layout()\n\n self.anim = animation.FuncAnimation(self.fig, self.animate_func,\n init_func=self.init_func,\n frames=int(self.T + 1) * 10,\n interval=100,\n blit=True)\n\n def get_cmap(self, n, name='hsv'):\n '''Returns a function that maps each index in 0, 1, ..., n-1 to a distinct\n RGB color; the keyword argument name must be a standard mpl colormap name.'''\n return plt.cm.get_cmap(name, n)\n\n def save(self, file_name, speed):\n\n \n self.anim.save(\n file_name,\n \"ffmpeg\",\n fps=10 * speed,\n dpi=200),8\n\n # savefig_kwargs={\"pad_inches\": 0, \"bbox_inches\": \"tight\"})\n\n def show(self):\n plt.show()\n\n def init_func(self):\n for p in self.patches:\n self.ax.add_patch(p)\n for a in self.artists:\n\n self.ax.add_artist(a)\n return self.patches + self.artists\n\n # def find_neighours(self, ID_selected_agent, step, level, max_level=self.K):\n def get_currentGSO(self, step):\n # module to get GSO\n # print(self.GSO.shape)\n GSO_current = self.GSO[:, :, step]\n # print(GSO_current.shape)\n gso_up_diag = np.triu(GSO_current)\n # print(gso_up)\n # return gso_up_diag\n return GSO_current\n\n def update_gso(self, gso_tmp, id_chosenAgent, id_neighborAgent):\n gso_tmp[id_chosenAgent, id_neighborAgent] = 0\n gso_tmp[id_neighborAgent, id_chosenAgent] = 0\n\n return gso_tmp\n\n def find_neighours(self, gso, id_chosenAgent):\n # print(id_chosenAgent)\n # print(gso)\n ID_neighbor_robot = gso[id_chosenAgent,:].nonzero()[0]\n # print(gso_up[ID_selected_agent,:])\n # print(ID_neighbor_robot)\n\n return ID_neighbor_robot, ID_neighbor_robot.shape[0]\n\n\n def build_comm_link(self, store_list_line, gso, id_chosenAgent, index_hop):\n\n if index_hop >= self.K:\n # print('\\n {}\\n'.format(store_list_line))\n return store_list_line\n\n else:\n # status_agent_currentHop = agents_array[id_chosenAgent]\n id_neighbor_robot, num_neighbor = self.find_neighours(gso, id_chosenAgent)\n\n # pos_agent_currentHop_array = np.array(status_agent_currentHop.center)\n\n # repeat until K\n for index in range(num_neighbor):\n id_neighbor = id_neighbor_robot[index]\n # status_agent_nextHop = agents_array[id_neighbor]\n # pos_agent_nextHop_array = np.array(status_agent_nextHop.center)\n\n # draw line (pos1,pos2)\n # print('#### current hop {} / {}'.format(index_hop+1,self.K))\n # print('\\t {} <- \\t{}'.format(id_chosenAgent, id_neighbor))\n # print('\\t {} <- \\t{}'.format(status_agent_currentHop, status_agent_nextHop))\n\n # posX_agent = (pos_agent_currentHop_array[0], pos_agent_nextHop_array[0])\n # posY_agent = (pos_agent_currentHop_array[1], pos_agent_nextHop_array[1])\n\n\n line = (index_hop + 1,index_hop-1, (id_chosenAgent, id_neighbor))\n\n name_line = '{}-{}'.format(id_chosenAgent, id_neighbor)\n store_list_line.update({name_line:line})\n gso_new = self.update_gso(gso,id_chosenAgent,id_neighbor)\n store_list_line = self.build_comm_link(store_list_line, gso_new, id_neighbor, index_hop+1)\n return store_list_line\n\n def get_linkPos(self,agents_array,id_chosenAgent,id_neighbor):\n status_agent_currentHop = agents_array[id_chosenAgent]\n pos_agent_currentHop_array = np.array(status_agent_currentHop.center)\n status_agent_nextHop = agents_array[id_neighbor]\n pos_agent_nextHop_array = np.array(status_agent_nextHop.center)\n posX_agent = (pos_agent_currentHop_array[0], pos_agent_nextHop_array[0])\n posY_agent = (pos_agent_currentHop_array[1], pos_agent_nextHop_array[1])\n\n return (posX_agent, posY_agent)\n\n\n def animate_func(self, i):\n currentStep = i//10\n\n if i%10 == 0:\n gso_current = self.get_currentGSO(currentStep)\n self.list_line = self.build_comm_link({}, gso_current, self.ID_agent, 1)\n # print(self.list_line)\n\n # print(\"time-frame:{}/{} - step:{}\".format(i,int(self.T + 1) * 10, currentStep))\n for agent_name in self.schedule[\"schedule\"]:\n agent = self.schedule[\"schedule\"][agent_name]\n # print(agent)\n pos = self.getState(i / 10, agent)\n p = (pos[0], pos[1])\n self.agents[agent_name].center = p\n self.agent_names[agent_name].set_position(p)\n\n # reset all colors\n for _, agent in self.agents.items():\n agent.set_facecolor(agent.original_face_color)\n\n # build communcation link\n agents_array = [agent for _, agent in self.agents.items()]\n\n id_link = 0\n for key_link, line_info in self.list_line.items():\n name_link = \"{}\".format(id_link)\n index_hop, index_style, (id_chosenAgent, id_neighbor) = line_info\n pos = self.get_linkPos(agents_array, id_chosenAgent, id_neighbor)\n self.commLink[name_link].set_data(pos)\n self.commLink[name_link].set_color(self.list_color_commLink[index_style])\n self.commLink[name_link].set_linestyle(self.list_commLinkStyle[index_style])\n # print(self.list_commLinkStyle[index_hop-2])\n # print(\"{}/{}- {} - {}\".format(index_hop, self.K, key_link, self.commLink[name_link]._posA_posB))\n id_link += 1\n\n id_link_reset = id_link\n for id_link_rest in range(id_link_reset, self.maxLink):\n name_link = \"{}\".format(id_link_rest)\n self.commLink[name_link].set_data((0, 0), (0, 0))\n\n # check drive-drive collisions\n\n for id_m in range(0, len(agents_array)):\n for id_n in range(id_m + 1, len(agents_array)):\n # print(i,j)\n d1 = agents_array[id_m]\n d2 = agents_array[id_n]\n pos1 = np.array(d1.center)\n pos2 = np.array(d2.center)\n # plt.plot(pos1, pos2, 'ro-')\n if np.linalg.norm(pos1 - pos2) < 0.7:\n d1.set_facecolor('red')\n d2.set_facecolor('red')\n print(\"COLLISION! (agent-agent) ({}, {})\".format(id_m, id_n))\n\n return self.patches + self.artists\n\n def getState(self, t, d):\n idx = 0\n while idx < len(d) and d[idx][\"t\"] < t:\n idx += 1\n if idx == 0:\n return np.array([float(d[0][\"x\"]), float(d[0][\"y\"])])\n elif idx < len(d):\n posLast = np.array([float(d[idx - 1][\"x\"]), float(d[idx - 1][\"y\"])])\n posNext = np.array([float(d[idx][\"x\"]), float(d[idx][\"y\"])])\n else:\n return np.array([float(d[-1][\"x\"]), float(d[-1][\"y\"])])\n dt = d[idx][\"t\"] - d[idx - 1][\"t\"]\n t = (t - d[idx - 1][\"t\"]) / dt\n pos = (posNext - posLast) * t + posLast\n # print(pos)\n return pos\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n parser.add_argument('--local_dir', default='/Users/vtek/gnn_pathplanning/')\n parser.add_argument('--nGraphFilterTaps', type=int, default=3)\n parser.add_argument('--type')\n parser.add_argument('--caseId', default='00000')\n parser.add_argument(\"--speed\", type=int, default=2, help=\"speedup-factor\")\n parser.add_argument('--log_time_trained', type=str, default='0')\n parser.add_argument('--id_chosenAgent', type=int, default=0)\n parser.add_argument('--failure_case', type=bool, default=False)\n parser.add_argument('--name', default=None)\n args = parser.parse_args()\n if args.failure_case:\n case_type = 'failure'\n else:\n case_type = 'success'\n base_dir = args.local_dir + 'Results_best/AnimeDemo/{}/map20x20_rho1_10Agent/K{}_HS0/TR_M20p1_10Agent/{}/commR_6/'.format(args.type, args.nGraphFilterTaps,args.log_time_trained)\n args.map = base_dir + 'input/{}Cases_ID{}.yaml'.format(case_type, args.caseId)\n args.schedule = base_dir+'predict_{}/{}Cases_ID{}.yaml'.format(case_type,case_type,args.caseId)\n args.GSO = base_dir+'GSO/{}Cases_ID{}.mat'.format(case_type,args.caseId)\n if args.name:\n args.video = args.local_dir + 'Results_best/' + '/video_K{}_{}_{}.mp4'.format(args.nGraphFilterTaps, args.type, args.name)\n else:\n args.video = args.local_dir + 'Results_best/' + '/video_K{}_{}.mp4'.format(args.nGraphFilterTaps, args.type)\n animation = Animation(args)\n if args.video:\n print(\"Starting!\")\n animation.save(args.video, args.speed)\n print(\"Ending!\")\n else:\n animation.show()\n\n\n"
] |
[
[
"numpy.array",
"matplotlib.pyplot.cm.get_cmap",
"matplotlib.pyplot.ylim",
"numpy.set_printoptions",
"matplotlib.patches.Rectangle",
"scipy.io.loadmat",
"matplotlib.pyplot.Line2D",
"matplotlib.patches.Circle",
"numpy.linalg.norm",
"matplotlib.pyplot.xlim",
"matplotlib.lines.lineStyles.keys",
"numpy.transpose",
"numpy.triu",
"matplotlib.pyplot.show",
"matplotlib.pyplot.figure"
]
] |
zyyhhxx/convNet.pytorch
|
[
"85f65f80b6d75810077c54bd3a8c9094cc2a26f9"
] |
[
"models/resnet.py"
] |
[
"import torch\nimport torch.nn as nn\nimport torchvision.transforms as transforms\nimport math\nfrom .modules.se import SEBlock\nfrom .modules.checkpoint import CheckpointModule\nimport os\nimport sys\nsys.path.append(os.path.dirname(os.path.dirname(os.path.realpath(__file__))))\nfrom utils.mixup import MixUp\n\n__all__ = ['resnet', 'resnet_se']\n\n\ndef init_model(model):\n for m in model.modules():\n if isinstance(m, nn.Conv2d):\n n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels\n m.weight.data.normal_(0, math.sqrt(2. / n))\n elif isinstance(m, nn.BatchNorm2d):\n m.weight.data.fill_(1)\n m.bias.data.zero_()\n for m in model.modules():\n if isinstance(m, Bottleneck):\n nn.init.constant_(m.bn3.weight, 0)\n elif isinstance(m, BasicBlock):\n nn.init.constant_(m.bn2.weight, 0)\n\n model.fc.weight.data.normal_(0, 0.01)\n model.fc.bias.data.zero_()\n\n\ndef weight_decay_config(value=1e-4, log=False):\n return {'name': 'WeightDecay',\n 'value': value,\n 'log': log,\n 'filter': {'parameter_name': lambda n: not n.endswith('bias'),\n 'module': lambda m: not isinstance(m, nn.BatchNorm2d)}\n }\n\n\ndef mixsize_config(sz, base_size, base_batch, base_duplicates, adapt_batch, adapt_duplicates):\n assert adapt_batch or adapt_duplicates or sz == base_size\n batch_size = base_batch\n duplicates = base_duplicates\n if adapt_batch and adapt_duplicates:\n scale = base_size/sz\n else:\n scale = (base_size/sz)**2\n\n if scale * duplicates < 0.5:\n adapt_duplicates = False\n adapt_batch = True\n\n if adapt_batch:\n batch_size = int(round(scale * base_batch))\n\n if adapt_duplicates:\n duplicates = int(round(scale * duplicates))\n\n duplicates = max(1, duplicates)\n return {\n 'input_size': sz,\n 'batch_size': batch_size,\n 'duplicates': duplicates\n }\n\n\ndef ramp_up_fn(lr0, lrT, T):\n rate = (lrT - lr0) / T\n return \"lambda t: {'lr': %s + t * %s}\" % (lr0, rate)\n\n\ndef conv3x3(in_planes, out_planes, stride=1, groups=1, bias=False):\n \"3x3 convolution with padding\"\n return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,\n padding=1, groups=groups, bias=bias)\n\n\nclass BasicBlock(nn.Module):\n\n def __init__(self, inplanes, planes, stride=1, expansion=1,\n downsample=None, groups=1, residual_block=None, dropout=0.):\n super(BasicBlock, self).__init__()\n dropout = 0 if dropout is None else dropout\n self.conv1 = conv3x3(inplanes, planes, stride, groups=groups)\n self.bn1 = nn.BatchNorm2d(planes)\n self.relu = nn.ReLU(inplace=True)\n self.conv2 = conv3x3(planes, expansion * planes, groups=groups)\n self.bn2 = nn.BatchNorm2d(expansion * planes)\n self.downsample = downsample\n self.residual_block = residual_block\n self.stride = stride\n self.expansion = expansion\n self.dropout = nn.Dropout(dropout)\n\n def forward(self, x):\n residual = x\n\n out = self.conv1(x)\n out = self.bn1(out)\n out = self.relu(out)\n out = self.dropout(out)\n\n out = self.conv2(out)\n out = self.bn2(out)\n\n if self.downsample is not None:\n residual = self.downsample(residual)\n\n if self.residual_block is not None:\n residual = self.residual_block(residual)\n\n out += residual\n out = self.relu(out)\n\n return out\n\n\nclass Bottleneck(nn.Module):\n\n def __init__(self, inplanes, planes, stride=1, expansion=4, downsample=None, groups=1, residual_block=None, dropout=0.):\n super(Bottleneck, self).__init__()\n dropout = 0 if dropout is None else dropout\n self.conv1 = nn.Conv2d(\n inplanes, planes, kernel_size=1, bias=False)\n self.bn1 = nn.BatchNorm2d(planes)\n self.conv2 = conv3x3(planes, planes, stride=stride, groups=groups)\n self.bn2 = nn.BatchNorm2d(planes)\n self.conv3 = nn.Conv2d(\n planes, planes * expansion, kernel_size=1, bias=False)\n self.bn3 = nn.BatchNorm2d(planes * expansion)\n self.relu = nn.ReLU(inplace=True)\n self.dropout = nn.Dropout(dropout)\n self.downsample = downsample\n self.residual_block = residual_block\n self.stride = stride\n self.expansion = expansion\n\n def forward(self, x):\n residual = x\n out = self.conv1(x)\n out = self.bn1(out)\n out = self.relu(out)\n out = self.dropout(out)\n\n out = self.conv2(out)\n out = self.bn2(out)\n out = self.relu(out)\n out = self.dropout(out)\n\n out = self.conv3(out)\n out = self.bn3(out)\n\n if self.downsample is not None:\n residual = self.downsample(residual)\n\n if self.residual_block is not None:\n residual = self.residual_block(residual)\n\n out += residual\n out = self.relu(out)\n\n return out\n\n\nclass ResNet(nn.Module):\n\n def __init__(self):\n super(ResNet, self).__init__()\n\n def _make_layer(self, block, planes, blocks, expansion=1, stride=1, groups=1, residual_block=None, dropout=None, mixup=False):\n downsample = None\n out_planes = planes * expansion\n if stride != 1 or self.inplanes != out_planes:\n downsample = nn.Sequential(\n nn.Conv2d(self.inplanes, out_planes,\n kernel_size=1, stride=stride, bias=False),\n nn.BatchNorm2d(planes * expansion),\n )\n if residual_block is not None:\n residual_block = residual_block(out_planes)\n\n layers = []\n layers.append(block(self.inplanes, planes, stride, expansion=expansion,\n downsample=downsample, groups=groups, residual_block=residual_block, dropout=dropout))\n self.inplanes = planes * expansion\n for i in range(1, blocks):\n layers.append(block(self.inplanes, planes, expansion=expansion, groups=groups,\n residual_block=residual_block, dropout=dropout))\n if mixup:\n layers.append(MixUp())\n return nn.Sequential(*layers)\n\n def features(self, x):\n x = self.conv1(x)\n x = self.bn1(x)\n x = self.relu(x)\n x = self.maxpool(x)\n\n x = self.layer1(x)\n x = self.layer2(x)\n x = self.layer3(x)\n x = self.layer4(x)\n\n x = self.avgpool(x)\n return x.view(x.size(0), -1)\n\n def forward(self, x):\n x = self.features(x)\n x = self.fc(x)\n return x\n\n\nclass ResNet_imagenet(ResNet):\n num_train_images = 1281167\n\n def __init__(self, num_classes=1000, inplanes=64,\n block=Bottleneck, residual_block=None, layers=[3, 4, 23, 3],\n width=[64, 128, 256, 512], expansion=4, groups=[1, 1, 1, 1],\n regime='normal', scale_lr=1, ramp_up_lr=True, checkpoint_segments=0, mixup=False,\n base_devices=4, base_device_batch=64, base_duplicates=1, base_image_size=224, mix_size_regime='D+'):\n super(ResNet_imagenet, self).__init__()\n self.inplanes = inplanes\n self.conv1 = nn.Conv2d(3, self.inplanes, kernel_size=7, stride=2, padding=3,\n bias=False)\n self.bn1 = nn.BatchNorm2d(self.inplanes)\n self.relu = nn.ReLU(inplace=True)\n self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)\n\n for i in range(len(layers)):\n layer = self._make_layer(block=block, planes=width[i], blocks=layers[i], expansion=expansion,\n stride=1 if i == 0 else 2, residual_block=residual_block, groups=groups[i],\n mixup=mixup)\n if checkpoint_segments > 0:\n layer_checkpoint_segments = min(checkpoint_segments, layers[i])\n layer = CheckpointModule(layer, layer_checkpoint_segments)\n setattr(self, 'layer%s' % str(i + 1), layer)\n\n self.avgpool = nn.AdaptiveAvgPool2d(1)\n self.fc = nn.Linear(width[-1] * expansion, num_classes)\n\n init_model(self)\n batch_size = base_devices * base_device_batch\n num_steps_epoch = math.floor(self.num_train_images / batch_size)\n\n # base regime\n self.regime = [\n {'epoch': 0, 'optimizer': 'SGD', 'lr': scale_lr * 1e-1,\n 'momentum': 0.9, 'regularizer': weight_decay_config(1e-4)},\n {'epoch': 30, 'lr': scale_lr * 1e-2},\n {'epoch': 60, 'lr': scale_lr * 1e-3},\n {'epoch': 80, 'lr': scale_lr * 1e-4}\n ]\n\n if 'cutmix' in regime:\n self.regime = [\n {'epoch': 0, 'optimizer': 'SGD', 'lr': scale_lr * 1e-1,\n 'momentum': 0.9, 'regularizer': weight_decay_config(1e-4)},\n {'epoch': 75, 'lr': scale_lr * 1e-2},\n {'epoch': 150, 'lr': scale_lr * 1e-3},\n {'epoch': 225, 'lr': scale_lr * 1e-4}\n ]\n\n # Sampled regimes from \"Mix & Match: training convnets with mixed image sizes for improved accuracy, speed and scale resiliency\"\n if 'sampled' in regime:\n # add gradient smoothing\n self.regime[0]['regularizer'] = [{'name': 'GradSmooth', 'momentum': 0.9, 'log': False},\n weight_decay_config(1e-4)]\n ramp_up_lr = False\n self.data_regime = None\n\n def size_config(size): return mixsize_config(size, base_size=base_image_size, base_batch=base_device_batch, base_duplicates=base_duplicates,\n adapt_batch=mix_size_regime == 'B+', adapt_duplicates=mix_size_regime == 'D+')\n increment = int(base_image_size / 7)\n\n if '144' in regime:\n self.sampled_data_regime = [\n (0.1, size_config(base_image_size+increment)),\n (0.1, size_config(base_image_size)),\n (0.6, size_config(base_image_size - 3*increment)),\n (0.2, size_config(base_image_size - 4*increment)),\n ]\n else: # sampled-224\n self.sampled_data_regime = [\n (0.8/6, size_config(base_image_size - 3*increment)),\n (0.8/6, size_config(base_image_size - 2*increment)),\n (0.8/6, size_config(base_image_size - increment)),\n (0.2, size_config(base_image_size)),\n (0.8/6, size_config(base_image_size + increment)),\n (0.8/6, size_config(base_image_size + 2*increment)),\n (0.8/6, size_config(base_image_size + 3*increment)),\n ]\n\n self.data_eval_regime = [\n {'epoch': 0, 'input_size': base_image_size}\n ]\n\n if ramp_up_lr and scale_lr > 1: # add learning rate ramp-up\n self.regime[0]['step_lambda'] = ramp_up_fn(0.1, 0.1 * scale_lr,\n num_steps_epoch * 5)\n self.regime.insert(1, {'epoch': 5, 'lr': scale_lr * 1e-1})\n\n\nclass ResNet_cifar(ResNet):\n\n def __init__(self, num_classes=10, inplanes=16,\n block=BasicBlock, depth=18, width=[16, 32, 64],\n groups=[1, 1, 1], residual_block=None, regime='normal', dropout=None, mixup=False):\n super(ResNet_cifar, self).__init__()\n self.inplanes = inplanes\n n = int((depth - 2) / 6)\n self.conv1 = nn.Conv2d(3, self.inplanes, kernel_size=3, stride=1, padding=1,\n bias=False)\n self.bn1 = nn.BatchNorm2d(self.inplanes)\n self.relu = nn.ReLU(inplace=True)\n self.maxpool = lambda x: x\n\n self.layer1 = self._make_layer(block, width[0], n, groups=groups[0],\n residual_block=residual_block, dropout=dropout, mixup=mixup)\n self.layer2 = self._make_layer(block, width[1], n, stride=2, groups=groups[1],\n residual_block=residual_block, dropout=dropout, mixup=mixup)\n self.layer3 = self._make_layer(block, width[2], n, stride=2, groups=groups[2],\n residual_block=residual_block, dropout=dropout, mixup=mixup)\n self.layer4 = lambda x: x\n self.avgpool = nn.AdaptiveAvgPool2d(1)\n self.fc = nn.Linear(width[-1], num_classes)\n\n init_model(self)\n self.regime = [\n {'epoch': 0, 'optimizer': 'SGD', 'lr': 1e-1, 'momentum': 0.9,\n 'regularizer': weight_decay_config(1e-4)},\n {'epoch': 81, 'lr': 1e-2},\n {'epoch': 122, 'lr': 1e-3},\n {'epoch': 164, 'lr': 1e-4}\n ]\n\n if 'wide-resnet' in regime:\n self.regime = [\n {'epoch': 0, 'optimizer': 'SGD', 'lr': 1e-1, 'momentum': 0.9,\n 'regularizer': weight_decay_config(5e-4)},\n {'epoch': 60, 'lr': 2e-2},\n {'epoch': 120, 'lr': 4e-3},\n {'epoch': 160, 'lr': 8e-4}\n ]\n\n # Sampled regimes from \"Mix & Match: training convnets with mixed image sizes for improved accuracy, speed and scale resiliency\"\n if 'sampled' in regime:\n adapt_batch = True if 'B+' in regime else False\n adapt_duplicates = True if ('D+' in regime or not adapt_batch) \\\n else False\n\n def size_config(size): return mixsize_config(size, base_size=32, base_batch=64, base_duplicates=1,\n adapt_batch=adapt_batch, adapt_duplicates=adapt_duplicates)\n # add gradient smoothing\n self.regime[0]['regularizer'] = [{'name': 'GradSmooth', 'momentum': 0.9, 'log': False},\n weight_decay_config(1e-4)]\n self.data_regime = None\n self.sampled_data_regime = [\n (0.3, size_config(32)),\n (0.2, size_config(48)),\n (0.3, size_config(24)),\n (0.2, size_config(16)),\n ]\n self.data_eval_regime = [\n {'epoch': 0, 'input_size': 32, 'scale_size': 32}\n ]\n\n\ndef resnet(**config):\n dataset = config.pop('dataset', 'imagenet')\n if config.pop('quantize', False):\n from .modules.quantize import QConv2d, QLinear, RangeBN\n torch.nn.Linear = QLinear\n torch.nn.Conv2d = QConv2d\n torch.nn.BatchNorm2d = RangeBN\n\n bn_norm = config.pop('bn_norm', None)\n if bn_norm is not None:\n from .modules.lp_norm import L1BatchNorm2d, TopkBatchNorm2d\n if bn_norm == 'L1':\n torch.nn.BatchNorm2d = L1BatchNorm2d\n if bn_norm == 'TopK':\n torch.nn.BatchNorm2d = TopkBatchNorm2d\n\n if 'imagenet' in dataset:\n config.setdefault('num_classes', 1000)\n depth = config.pop('depth', 50)\n if depth == 18:\n config.update(dict(block=BasicBlock,\n layers=[2, 2, 2, 2],\n expansion=1))\n if depth == 34:\n config.update(dict(block=BasicBlock,\n layers=[3, 4, 6, 3],\n expansion=1))\n if depth == 50:\n config.update(dict(block=Bottleneck, layers=[3, 4, 6, 3]))\n if depth == 101:\n config.update(dict(block=Bottleneck, layers=[3, 4, 23, 3]))\n if depth == 152:\n config.update(dict(block=Bottleneck, layers=[3, 8, 36, 3]))\n if depth == 200:\n config.update(dict(block=Bottleneck, layers=[3, 24, 36, 3]))\n\n return ResNet_imagenet(**config)\n\n elif dataset == 'cifar10':\n config.setdefault('num_classes', 10)\n config.setdefault('depth', 44)\n return ResNet_cifar(block=BasicBlock, **config)\n\n elif dataset == 'cifar100':\n config.setdefault('num_classes', 100)\n config.setdefault('depth', 44)\n return ResNet_cifar(block=BasicBlock, **config)\n\n\ndef resnet_se(**config):\n config['residual_block'] = SEBlock\n return resnet(**config)\n"
] |
[
[
"torch.nn.Sequential",
"torch.nn.Dropout",
"torch.nn.init.constant_",
"torch.nn.Conv2d",
"torch.nn.MaxPool2d",
"torch.nn.Linear",
"torch.nn.AdaptiveAvgPool2d",
"torch.nn.BatchNorm2d",
"torch.nn.ReLU"
]
] |
sahiljuneja/kaggle-ctds
|
[
"caac226f2c5d33b6d324c5cf33a777758b9163d1"
] |
[
"utils/modify_ravdess.py"
] |
[
"import re\nimport os\nimport argparse\nimport librosa\nimport librosa.display\nimport matplotlib.pyplot as plt\nimport numpy as np\n\n\ndef modify_data(input_path, save_path, dir_dict):\n \n path = os.listdir(input_path)\n for folders in path:\n \n folders = os.path.sep.join([input_path, folders])\n \n for file in os.listdir(folders):\n \n num = re.findall('\\d+', file)\n emotion = dir_dict[num[2]]\n \n file_save_path = save_path + str(emotion)\n if not os.path.isdir(file_save_path):\n os.makedirs(file_save_path)\n\n load_file_path = '{0}/{1}'.format(folders, file)\n \n file_name = \"/{}.jpeg\".format(file[:-4])\n if not os.path.isfile(file_save_path + file_name):\n \n y, sr = librosa.load(load_file_path)\n yt, _ = librosa.effects.trim(y)\n y = yt\n\n mel_spect = librosa.feature.melspectrogram(y=y, sr=sr, n_fft=1024, hop_length=100)\n mel_spect = librosa.power_to_db(mel_spect, ref=np.max)\n\n librosa.display.specshow(mel_spect, y_axis='mel', fmax=20000, x_axis='time');\n\n\n plt.savefig(file_save_path + file_name)\n\n #print(\"File saved to: {}\".format(file_save_path + file_name))\n\n\nif __name__ == \"__main__\":\n\n # sample call\n # python modify_ravdess.py -p /notebooks/storage/ravdess/ -s /notebooks/storage/ravdess_mod/\n \n # arguments parser\n ap = argparse.ArgumentParser()\n ap.add_argument(\"-p\", \"--path\", type=str, help=\"path to raw data\")\n ap.add_argument(\"-s\", \"--save\", type=str, help=\"path to save data after processing\")\n args = vars(ap.parse_args())\n \n # directory structure dict\n dir_dict = {'01' : 'neutral', '02' : 'calm', '03' : 'happy', '04' : 'sad', \n '05' : 'angry', '06' : 'fearful', '07' : 'disgust', '08' : 'surprised'}\n \n ip_path = args[\"path\"]\n save_path = args[\"save\"]\n \n if not os.path.isdir(save_path):\n os.makedirs(save_path)\n \n modify_data(ip_path, save_path, dir_dict)\n print(\"Data converted from .wav to .jpeg\")\n "
] |
[
[
"matplotlib.pyplot.savefig"
]
] |
akhambhati/dyne2
|
[
"d2f050b3d14ef429fc9c52821e87f1c9a52a521d"
] |
[
"dyne/adjacency/coherence.py"
] |
[
"\"\"\"\nCoherence pipes for quantifying signal similarity (i.e. connectivity)\n\nCreated by: Ankit Khambhati\n\nChange Log\n----------\n2016/03/06 - Implemented WelchCoh and MTCoh pipes\n\"\"\"\n\nfrom __future__ import division\nimport numpy as np\nfrom mtspec import mt_coherence, mtspec\nfrom scipy.signal import coherence\nimport matplotlib.pyplot as plt\n\nfrom ..errors import check_type\nfrom ..base import AdjacencyPipe\n\n\nclass WelchCoh(AdjacencyPipe):\n \"\"\"\n WelchCoh pipe for spectral coherence estimation using Welch's method\n\n Parameters\n ----------\n window: str\n Desired window to use. See Scipy get_window for a list of windows.\n\n secperseg: float\n Length of each segment in seconds. Recommended half of window length.\n\n pctoverlap: float (0<x<1)\n Percent overlap between segments. Recommended values of 50 pct.\n\n cf: list\n Frequency range over which to compute coherence [-NW+C, C+NW]\n \"\"\"\n\n def __init__(self, window, secperseg, pctoverlap, cf):\n # Standard param checks\n check_type(window, str)\n check_type(secperseg, float)\n check_type(pctoverlap, float)\n check_type(cf, list)\n if not len(cf) == 2:\n raise Exception('Must give a frequency range in list of length 2')\n if (pctoverlap > 1) or (pctoverlap < 0):\n raise Exception('Percent overlap must be a positive fraction')\n\n # Assign to instance\n self.window = window\n self.secperseg = secperseg\n self.pctoverlap = pctoverlap\n self.cf = cf\n\n def _pipe_as_flow(self, signal_packet):\n # Get signal_packet details\n hkey = signal_packet.keys()[0]\n ax_0_ix = signal_packet[hkey]['meta']['ax_0']['index']\n ax_1_ix = signal_packet[hkey]['meta']['ax_1']['index']\n signal = signal_packet[hkey]['data']\n fs = np.int(np.mean(1./np.diff(ax_0_ix)))\n\n # Assume undirected connectivity\n triu_ix, triu_iy = np.triu_indices(len(ax_1_ix), k=1)\n\n # Initialize association matrix\n adj = np.zeros((len(ax_1_ix), len(ax_1_ix)))\n\n # Derive signal segmenting for coherence estimation\n nperseg = int(self.secperseg*fs)\n noverlap = int(self.secperseg*fs*self.pctoverlap)\n\n freq, Cxy = coherence(signal[:, triu_ix],\n signal[:, triu_iy],\n fs=fs, window=self.window,\n nperseg=nperseg, noverlap=noverlap,\n axis=0)\n\n # Find closest frequency to the desired center frequency\n cf_idx = np.flatnonzero((freq >= self.cf[0]) &\n (freq <= self.cf[1]))\n\n # Store coherence in association matrix\n adj[triu_ix, triu_iy] = np.mean(Cxy[cf_idx, :], axis=0)\n adj += adj.T\n\n new_packet = {}\n new_packet[hkey] = {\n 'data': adj,\n 'meta': {\n 'ax_0': signal_packet[hkey]['meta']['ax_1'],\n 'ax_1': signal_packet[hkey]['meta']['ax_1'],\n 'time': {\n 'label': 'Time (sec)',\n 'index': np.float(ax_0_ix[-1])\n }\n }\n }\n\n return new_packet\n\n\nclass MTCoh(AdjacencyPipe):\n \"\"\"\n MTCoh pipe for spectral coherence estimation using\n multitaper methods\n\n Parameters\n ----------\n time_band: float\n The time half bandwidth resolution of the estimate [-NW, NW];\n such that resolution is 2*NW\n\n n_taper: int\n Number of Slepian sequences to use (Usually < 2*NW-1)\n\n cf: list\n Frequency range over which to compute coherence [-NW+C, C+NW]\n \"\"\"\n\n def __init__(self, time_band, n_taper, cf):\n # Standard param checks\n check_type(time_band, float)\n check_type(n_taper, int)\n check_type(cf, list)\n if n_taper >= 2*time_band:\n raise Exception('Number of tapers must be less than 2*time_band')\n if not len(cf) == 2:\n raise Exception('Must give a frequency range in list of length 2')\n\n # Assign instance parameters\n self.time_band = time_band\n self.n_taper = n_taper\n self.cf = cf\n\n def _pipe_as_flow(self, signal_packet):\n # Get signal_packet details\n hkey = signal_packet.keys()[0]\n ax_0_ix = signal_packet[hkey]['meta']['ax_0']['index']\n ax_1_ix = signal_packet[hkey]['meta']['ax_1']['index']\n signal = signal_packet[hkey]['data']\n fs = np.int(np.mean(1./np.diff(ax_0_ix)))\n\n # Assume undirected connectivity\n triu_ix, triu_iy = np.triu_indices(len(ax_1_ix), k=1)\n\n # Initialize association matrix\n adj = np.zeros((len(ax_1_ix), len(ax_1_ix)))\n\n # Compute all coherences\n for n1, n2 in zip(triu_ix, triu_iy):\n out = mt_coherence(1.0/fs,\n signal[:, n1],\n signal[:, n2],\n self.time_band,\n self.n_taper,\n int(len(ax_0_ix)/2.), 0.95,\n iadapt=1,\n cohe=True, freq=True)\n\n # Find closest frequency to the desired center frequency\n #cf_idx = np.argmin(np.abs(out['freq'] - self.cf))\n cf_idx = np.flatnonzero((out['freq'] >= self.cf[0]) &\n (out['freq'] <= self.cf[1]))\n\n # Store coherence in association matrix\n adj[n1, n2] = np.mean(out['cohe'][cf_idx])\n adj += adj.T\n\n new_packet = {}\n new_packet[hkey] = {\n 'data': adj,\n 'meta': {\n 'ax_0': signal_packet[hkey]['meta']['ax_1'],\n 'ax_1': signal_packet[hkey]['meta']['ax_1'],\n 'time': {\n 'label': 'Time (sec)',\n 'index': np.float(ax_0_ix[-1])\n }\n }\n }\n\n return new_packet\n"
] |
[
[
"scipy.signal.coherence",
"numpy.flatnonzero",
"numpy.mean",
"numpy.diff",
"numpy.float"
]
] |
jwillis0720/seaborn
|
[
"0dc93d01c78370e91ebdf72c888719fbbc6d1085"
] |
[
"seaborn/algorithms.py"
] |
[
"\"\"\"Algorithms to support fitting routines in seaborn plotting functions.\"\"\"\nimport numbers\nimport numpy as np\nimport warnings\nfrom math import sqrt\n\n\ndef wls_confidence_interval(data, z=1.96):\n \"\"\"Calculate the Wilson score confidence interval for a data set.\n\n data : array of 1-dimensional data, 1's or 0's\n z : float, z-score default=1.96 for a 95% confidence interval\n \"\"\"\n n = len(data)\n\n # counts the number of 1 or Trues over false or 0\n p = len([i for i in data if i]) / n\n\n denominator = 1 + z ** 2 / n\n centre_adjusted_probability = p + z * z / (2 * n)\n adjusted_standard_deviation = sqrt((p * (1 - p) + z * z / (4 * n)) / n)\n\n lower_bound = (centre_adjusted_probability - z * adjusted_standard_deviation) / denominator\n upper_bound = (centre_adjusted_probability + z * adjusted_standard_deviation) / denominator\n return (lower_bound, upper_bound)\n\n\ndef bootstrap(*args, **kwargs):\n \"\"\"Resample one or more arrays with replacement and store aggregate values.\n\n Positional arguments are a sequence of arrays to bootstrap along the first\n axis and pass to a summary function.\n\n Keyword arguments:\n n_boot : int, default 10000\n Number of iterations\n axis : int, default None\n Will pass axis to ``func`` as a keyword argument.\n units : array, default None\n Array of sampling unit IDs. When used the bootstrap resamples units\n and then observations within units instead of individual\n datapoints.\n func : string or callable, default np.mean\n Function to call on the args that are passed in. If string, tries\n to use as named method on numpy array.\n seed : Generator | SeedSequence | RandomState | int | None\n Seed for the random number generator; useful if you want\n reproducible resamples.\n\n Returns\n -------\n boot_dist: array\n array of bootstrapped statistic values\n\n \"\"\"\n # Ensure list of arrays are same length\n if len(np.unique(list(map(len, args)))) > 1:\n raise ValueError(\"All input arrays must have the same length\")\n n = len(args[0])\n\n # Default keyword arguments\n n_boot = kwargs.get(\"n_boot\", 10000)\n func = kwargs.get(\"func\", np.mean)\n axis = kwargs.get(\"axis\", None)\n units = kwargs.get(\"units\", None)\n random_seed = kwargs.get(\"random_seed\", None)\n if random_seed is not None:\n msg = \"`random_seed` has been renamed to `seed` and will be removed\"\n warnings.warn(msg)\n seed = kwargs.get(\"seed\", random_seed)\n if axis is None:\n func_kwargs = dict()\n else:\n func_kwargs = dict(axis=axis)\n\n # Initialize the resampler\n rng = _handle_random_seed(seed)\n\n # Coerce to arrays\n args = list(map(np.asarray, args))\n if units is not None:\n units = np.asarray(units)\n\n # Allow for a function that is the name of a method on an array\n if isinstance(func, str):\n\n def f(x):\n return getattr(x, func)()\n\n else:\n f = func\n\n # Handle numpy changes\n try:\n integers = rng.integers\n except AttributeError:\n integers = rng.randint\n\n # Do the bootstrap\n if units is not None:\n return _structured_bootstrap(args, n_boot, units, f, func_kwargs, integers)\n\n boot_dist = []\n for i in range(int(n_boot)):\n resampler = integers(0, n, n, dtype=np.intp) # intp is indexing dtype\n sample = [a.take(resampler, axis=0) for a in args]\n boot_dist.append(f(*sample, **func_kwargs))\n return np.array(boot_dist)\n\n\ndef _structured_bootstrap(args, n_boot, units, func, func_kwargs, integers):\n \"\"\"Resample units instead of datapoints.\"\"\"\n unique_units = np.unique(units)\n n_units = len(unique_units)\n\n args = [[a[units == unit] for unit in unique_units] for a in args]\n\n boot_dist = []\n for i in range(int(n_boot)):\n resampler = integers(0, n_units, n_units, dtype=np.intp)\n sample = [[a[i] for i in resampler] for a in args]\n lengths = map(len, sample[0])\n resampler = [integers(0, n, n, dtype=np.intp) for n in lengths]\n sample = [[c.take(r, axis=0) for c, r in zip(a, resampler)] for a in sample]\n sample = list(map(np.concatenate, sample))\n boot_dist.append(func(*sample, **func_kwargs))\n return np.array(boot_dist)\n\n\ndef _handle_random_seed(seed=None):\n \"\"\"Given a seed in one of many formats, return a random number generator.\n\n Generalizes across the numpy 1.17 changes, preferring newer functionality.\n\n \"\"\"\n if isinstance(seed, np.random.RandomState):\n rng = seed\n else:\n try:\n # General interface for seeding on numpy >= 1.17\n rng = np.random.default_rng(seed)\n except AttributeError:\n # We are on numpy < 1.17, handle options ourselves\n if isinstance(seed, (numbers.Integral, np.integer)):\n rng = np.random.RandomState(seed)\n elif seed is None:\n rng = np.random.RandomState()\n else:\n err = \"{} cannot be used to seed the randomn number generator\"\n raise ValueError(err.format(seed))\n return rng\n"
] |
[
[
"numpy.unique",
"numpy.asarray",
"numpy.array",
"numpy.random.RandomState",
"numpy.random.default_rng"
]
] |
lluo5779/Robo-Adviser
|
[
"43aa4b73bfc96e55ed664328330a930975596124"
] |
[
"server/models/portfolio/risk.py"
] |
[
"import numpy as np\nimport pandas as pd\n\n\ndef risk_prefs(horizon, aversion, cardinal, return_target, l, mu_bl1, mu_bl2, cov_bl1):\n\n if horizon is None:\n horizon = 10\n\n alpha = 0.05\n\n safe_target = float(((mu_bl1 + mu_bl2) / 2).mean())\n\n # set the variances for the first period estimates\n vars = pd.DataFrame(np.diag(cov_bl1), index=cov_bl1.index)\n\n risk_mul, turn_mul = l, 1\n\n if horizon <= 1:\n # select the 12 assets with the lowest variances\n risk_mul *= 2\n turn_mul *= 0.25\n alpha = 0.20\n\n elif horizon <= 5:\n risk_mul *= 0.75\n turn_mul *= 1\n alpha = 0.10\n\n else:\n risk_mul *= 0.25\n turn_mul *= 2\n\n\n print(\"RISK PREFERENCES\\n\\n\\n\")\n if return_target > safe_target:\n risk_mul *= 0.5\n\n if aversion == 1:\n cardinality = list(np.where(mu_bl1.rank() > len(mu_bl1) - cardinal, 1, 0).ravel())\n exposures = (0.02, 0.30)\n elif aversion == 2:\n cardinality = list(np.where(pd.DataFrame(np.divide(mu_bl1.values, vars.values).ravel()).rank() > len(mu_bl1) - cardinal, 1, 0).ravel())\n exposures = (0.04, 0.20)\n else:\n # NO SINGLE NAME STOCKS\n vars = pd.DataFrame(np.diag(cov_bl1.iloc[:-10, :-10]), index=mu_bl1[:-10].index)\n cardinality = list(np.where(vars.rank(ascending=True) > (len(mu_bl1[:-10])- cardinal), 1, 0).ravel()) + [0]*10\n exposures = (0.05, 0.15)\n\n risk_mul *= aversion\n\n return (alpha, alpha*1.02), (risk_mul, turn_mul), exposures, cardinality\n"
] |
[
[
"numpy.diag",
"numpy.divide"
]
] |
haziq9978/PythonChatbot
|
[
"8eb77140b32a4c6770dab20d4e26be03504ac5ee"
] |
[
"train.py"
] |
[
"import numpy as np\nimport random\nimport json\n\nimport torch\nimport torch.nn as nn\nfrom torch.utils.data import Dataset, DataLoader\n\nfrom nltk_utils import bag_of_words, tokenize, stem\nfrom model import NeuralNet\n\nwith open('dataCombine.json', 'r') as f:\n intents = json.load(f)\n\nall_words = []\ntags = []\nxy = []\n# loop through each sentence in our intents patterns\nfor intent in intents['intents']:\n tag = intent['tag']\n # add to tag list\n tags.append(tag)\n for pattern in intent['patterns']:\n # tokenize each word in the sentence\n w = tokenize(pattern)\n # add to our words list\n all_words.extend(w)\n # add to xy pair\n xy.append((w, tag))\n\n# stem and lower each word\nignore_words = ['?', '.', '!']\nall_words = [stem(w) for w in all_words if w not in ignore_words]\n# remove duplicates and sort\nall_words = sorted(set(all_words))\ntags = sorted(set(tags))\n\nprint(len(xy), \"patterns\")\nprint(len(tags), \"tags:\", tags)\nprint(len(all_words), \"unique stemmed words:\", all_words)\n\n# create training data\nX_train = []\ny_train = []\nfor (pattern_sentence, tag) in xy:\n # X: bag of words for each pattern_sentence\n bag = bag_of_words(pattern_sentence, all_words)\n X_train.append(bag)\n # y: PyTorch CrossEntropyLoss needs only class labels, not one-hot\n label = tags.index(tag)\n y_train.append(label)\n\nX_train = np.array(X_train)\ny_train = np.array(y_train)\n\n# Hyper-parameters \nnum_epochs = 1000\nbatch_size = 8\nlearning_rate = 0.001\ninput_size = len(X_train[0])\nhidden_size = 8\noutput_size = len(tags)\nprint(input_size, output_size)\n\nclass ChatDataset(Dataset):\n\n def __init__(self):\n self.n_samples = len(X_train)\n self.x_data = X_train\n self.y_data = y_train\n\n # support indexing such that dataset[i] can be used to get i-th sample\n def __getitem__(self, index):\n return self.x_data[index], self.y_data[index]\n\n # we can call len(dataset) to return the size\n def __len__(self):\n return self.n_samples\n\ndataset = ChatDataset()\ntrain_loader = DataLoader(dataset=dataset,\n batch_size=batch_size,\n shuffle=True,\n num_workers=0)\n\ndevice = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n\nmodel = NeuralNet(input_size, hidden_size, output_size).to(device)\n\n# Loss and optimizer\ncriterion = nn.CrossEntropyLoss()\noptimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)\n\n# Train the model\nfor epoch in range(num_epochs):\n for (words, labels) in train_loader:\n words = words.to(device)\n labels = labels.to(dtype=torch.long).to(device)\n \n # Forward pass\n outputs = model(words)\n # if y would be one-hot, we must apply\n # labels = torch.max(labels, 1)[1]\n loss = criterion(outputs, labels)\n \n # Backward and optimize\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n \n if (epoch+1) % 10 == 0:\n print (f'Epoch [{epoch+1}/{num_epochs}], Loss: {loss.item():.7f}')\n\n\nprint(f'final loss: {loss.item():.4f}')\n\ndata = {\n\"model_state\": model.state_dict(),\n\"input_size\": input_size,\n\"hidden_size\": hidden_size,\n\"output_size\": output_size,\n\"all_words\": all_words,\n\"tags\": tags\n}\n\nFILE = \"data.pth\"\ntorch.save(data, FILE)\n\nprint(f'training complete. file saved to {FILE}')\n"
] |
[
[
"torch.nn.CrossEntropyLoss",
"torch.utils.data.DataLoader",
"torch.cuda.is_available",
"numpy.array",
"torch.save"
]
] |
whxf/nlp_api
|
[
"a63b67287e9a90381cac14bb1c5b723ccbeb14a3"
] |
[
"tools/similarity.py"
] |
[
"\"\"\"\n@author: Li Xi\n@file: similarity.py\n@time: 2019/10/30 15:37\n@desc:\n计算文本相似度:\n1. WordMoverDistance 基于词移距离的文本相似度计算 【比较文档的相似度】\n2. WordVectorSimilarity word-vector的句子相似度计算 【比较句子的相似度】\n注意事项:\n* 两种方法都需要输入句子分词之后的结果,类型需要时list\n* 为提升效率/效果,可对分词结果进行处理,如去除停用词等\n* 具体使用方法见文件的最下\n* 可自定义加载词向量文件\n\"\"\"\nimport os\n\nimport gensim\nimport numpy as np\n\nfrom tools.segment import LtpSegment\n\n\nclass WordMoverDistance(object):\n \"\"\"词移距离 Word Mover's Distance\"\"\"\n __vector_path = os.path.join(\"source\", \"sgns.renmin.word.bz2\")\n word2vec_model = gensim.models.KeyedVectors.load_word2vec_format(__vector_path)\n word2vec_model.init_sims(replace=True) # normalizes vectors\n\n def distance(self, tokens1, tokens2):\n \"\"\"\n 计算词移距离\n !!!: 这里需要输入句子的分词后结果\n :param tokens1: [list]\n :param tokens2: [list]\n :return: score 值\n \"\"\"\n distance = self.word2vec_model.wmdistance(tokens1, tokens2)\n return distance\n\n\nclass WordVectorSimilarity(object):\n \"\"\"\n 基于word-vector的句子相似度计算(余弦相似度)\n !!!: 不仅可以使用词向量也可使用字向量\n \"\"\"\n __vector_path = os.path.join(\"source\", \"sgns.renmin.word.bz2\")\n word2vec_model = gensim.models.KeyedVectors.load_word2vec_format(__vector_path)\n\n def __init__(self, vector_dim=300):\n \"\"\"\n\n :param vector_dim: 词向量的维度\n \"\"\"\n self.vector_dim = vector_dim\n\n def get_word_vector(self, word):\n \"\"\"\n 获取词的词向量,如果没有找到,返回全零的embedding\n :param word:\n :return:\n \"\"\"\n try:\n return self.word2vec_model[word]\n except:\n return np.zeros(self.vector_dim)\n\n def similarity_cosine(self, tokens1, tokens2):\n \"\"\"\n 计算句子的余弦相似度,其中句子向量等于字符向量求平均\n !!!: 这里需要输入句子的分词后结果\n :param tokens1:\n :param tokens2:\n :return:\n \"\"\"\n # 求 sentence1 的向量表示\n sentence1 = np.zeros(self.vector_dim)\n for _token in tokens1:\n sentence1 += self.get_word_vector(_token)\n sentence1 = sentence1 / len(tokens1)\n\n # 求 sentence2 的向量表示\n sentence2 = np.zeros(self.vector_dim)\n for _token in tokens2:\n sentence2 += self.get_word_vector(_token)\n sentence2 = sentence2 / len(tokens2)\n\n # 余弦相似度计算公式 sim = sum(a*b) / { sum[ sqrt(a^2) ] * sum[ sqrt(b^2) ] }\n cos1 = np.sum(sentence1 * sentence2)\n cos21 = np.sqrt(sum(sentence1 ** 2))\n cos22 = np.sqrt(sum(sentence2 ** 2))\n similarity = cos1 / float(cos21 * cos22)\n return similarity\n\n def distance(self, tokens1, tokens2):\n \"\"\"\n 计算 WordVectorSimilarity\n !!!: 这里需要输入句子的分词后结果\n :param tokens1:\n :param tokens2:\n :return:\n \"\"\"\n return self.similarity_cosine(tokens1, tokens2)\n\n\nif __name__ == \"__main__\":\n # -------- Begin WordMoverDistance Test --------\n # 初始化 WordMoverDistance\n sim = WordMoverDistance()\n # 初始化 LTP 用于分词\n ltp = LtpSegment()\n\n str1 = ltp.segment(\"我是中国人,我深爱着我的祖国\") # 分词结果为list\n str2 = ltp.segment(\"中国是我的母亲,我热爱她\")\n print(\"相似度:{}\".format(sim.distance(str1, str2)))\n # 相似度:0.5040331478972442\n\n str1 = ltp.segment(\"小勇硕士毕业于北京语言大学,目前在中科院软件所工作\")\n str2 = ltp.segment(\"大方博士就读于首都师范大学,未来不知道会在哪里上班\")\n print(\"相似度:{}\".format(sim.distance(str1, str2)))\n # 相似度:0.8857186341563674\n # -------- End WordMoverDistance Test --------\n\n # -------- Begin WordVectorSimilarity Test --------\n # 初始化 WordVectorSimilarity\n sim = WordVectorSimilarity()\n # 初始化 LTP 用于分词\n ltp = LtpSegment()\n\n str1 = ltp.segment(\"我是中国人,我深爱着我的祖国\") # 分词结果为list\n str2 = ltp.segment(\"中国是我的母亲,我热爱她\")\n print(\"相似度:{}\".format(sim.distance(str1, str2)))\n # 相似度:0.9048935250581785\n\n str1 = ltp.segment(\"小勇硕士毕业于北京语言大学,目前在中科院软件所工作\")\n str2 = ltp.segment(\"大方博士就读于首都师范大学,未来不知道会在哪里上班\")\n print(\"相似度:{}\".format(sim.distance(str1, str2)))\n # 相似度:0.812708497722071\n # -------- End WordVectorSimilarity Test --------\n"
] |
[
[
"numpy.zeros",
"numpy.sum"
]
] |
AnonymousExplorer/Conditional-GANs-Pytorch
|
[
"6c15ec67217156d6f041e34efe29ab62f9ef7c7d"
] |
[
"train_InfoGAN1.py"
] |
[
"from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport argparse\nimport json\n\nimport model\nimport numpy as np\nimport pylib\nimport PIL.Image as Image\nimport tensorboardX\nimport torch\nimport torchvision\nimport torchvision.datasets as dsets\nimport torchvision.transforms as tforms\nimport torchlib\n\n\n# ==============================================================================\n# = param =\n# ==============================================================================\n\n# command line arguments\nparser = argparse.ArgumentParser()\n# model\nparser.add_argument('--z_dim', dest='z_dim', type=int, default=100)\n# training\nparser.add_argument('--epoch', dest='epoch', type=int, default=50)\nparser.add_argument('--batch_size', dest='batch_size', type=int, default=64)\nparser.add_argument('--d_learning_rate', dest='d_learning_rate', type=float, default=0.0002)\nparser.add_argument('--g_learning_rate', dest='g_learning_rate', type=float, default=0.001)\nparser.add_argument('--n_d', dest='n_d', type=int, help='# of d updates per g update', default=1)\nparser.add_argument('--loss_mode', dest='loss_mode', choices=['gan', 'lsgan', 'wgan', 'hinge_v1', 'hinge_v2'], default='hinge_v2')\nparser.add_argument('--gp_mode', dest='gp_mode', choices=['none', 'dragan', 'wgan-gp'], default='none')\nparser.add_argument('--gp_coef', dest='gp_coef', type=float, default=1.0)\nparser.add_argument('--norm', dest='norm', choices=['none', 'batch_norm', 'instance_norm'], default='none')\nparser.add_argument('--weight_norm', dest='weight_norm', choices=['none', 'spectral_norm', 'weight_norm'], default='spectral_norm')\n# others\nparser.add_argument('--experiment_name', dest='experiment_name', default='InfoGAN1_default')\n\n# parse arguments\nargs = parser.parse_args()\n# model\nz_dim = args.z_dim\n# training\nepoch = args.epoch\nbatch_size = args.batch_size\nd_learning_rate = args.d_learning_rate\ng_learning_rate = args.g_learning_rate\nn_d = args.n_d\nloss_mode = args.loss_mode\ngp_mode = args.gp_mode\ngp_coef = args.gp_coef\nnorm = args.norm\nweight_norm = args.weight_norm\n# ohters\nexperiment_name = args.experiment_name\n\n# save settings\npylib.mkdir('./output/%s' % experiment_name)\nwith open('./output/%s/setting.txt' % experiment_name, 'w') as f:\n f.write(json.dumps(vars(args), indent=4, separators=(',', ':')))\n\n# others\nuse_gpu = torch.cuda.is_available()\ndevice = torch.device(\"cuda\" if use_gpu else \"cpu\")\nc_dim = 10\n\n\n# ==============================================================================\n# = setting =\n# ==============================================================================\n\n# data\ntransform = tforms.Compose(\n [tforms.Scale(size=(32, 32), interpolation=Image.BICUBIC),\n tforms.ToTensor(),\n tforms.Lambda(lambda x: torch.cat((x, x, x), dim=0)),\n tforms.Normalize(mean=[0.5] * 3, std=[0.5] * 3)]\n)\ntrain_loader = torch.utils.data.DataLoader(\n dataset=dsets.FashionMNIST('data/FashionMNIST', train=True, download=True, transform=transform),\n batch_size=batch_size,\n shuffle=True,\n num_workers=4,\n pin_memory=use_gpu,\n drop_last=True\n)\n\n# model\nD = model.DiscriminatorInfoGAN1(x_dim=3, c_dim=c_dim, norm=norm, weight_norm=weight_norm).to(device)\nG = model.GeneratorInfoGAN1(z_dim=z_dim, c_dim=c_dim).to(device)\n\n# gan loss function\nd_loss_fn, g_loss_fn = model.get_losses_fn(loss_mode)\n\n# optimizer\nd_optimizer = torch.optim.Adam(D.parameters(), lr=d_learning_rate, betas=(0.5, 0.999))\ng_optimizer = torch.optim.Adam(G.parameters(), lr=g_learning_rate, betas=(0.5, 0.999))\n\n\n# ==============================================================================\n# = train =\n# ==============================================================================\n\n# load checkpoint\nckpt_dir = './output/%s/checkpoints' % experiment_name\npylib.mkdir(ckpt_dir)\ntry:\n ckpt = torchlib.load_checkpoint(ckpt_dir)\n start_ep = ckpt['epoch']\n D.load_state_dict(ckpt['D'])\n G.load_state_dict(ckpt['G'])\n d_optimizer.load_state_dict(ckpt['d_optimizer'])\n g_optimizer.load_state_dict(ckpt['g_optimizer'])\nexcept:\n print(' [*] No checkpoint!')\n start_ep = 0\n\n# writer\nwriter = tensorboardX.SummaryWriter('./output/%s/summaries' % experiment_name)\n\n# run\nz_sample = torch.randn(c_dim * 10, z_dim).to(device)\nc_sample = torch.tensor(np.concatenate([np.eye(c_dim)] * 10), dtype=z_sample.dtype).to(device)\nfor ep in range(start_ep, epoch):\n for i, (x, _) in enumerate(train_loader):\n step = ep * len(train_loader) + i + 1\n D.train()\n G.train()\n\n # train D and Q\n x = x.to(device)\n c_dense = torch.tensor(np.random.randint(c_dim, size=[batch_size])).to(device)\n z = torch.randn(batch_size, z_dim).to(device)\n c = torch.tensor(np.eye(c_dim)[c_dense.cpu().numpy()], dtype=z.dtype).to(device)\n\n x_f = G(z, c).detach()\n x_gan_logit, _ = D(x)\n x_f_gan_logit, x_f_c_logit = D(x_f)\n\n d_x_gan_loss, d_x_f_gan_loss = d_loss_fn(x_gan_logit, x_f_gan_logit)\n d_x_f_c_logit = torch.nn.functional.cross_entropy(x_f_c_logit, c_dense)\n gp = model.gradient_penalty(D, x, x_f, mode=gp_mode)\n d_loss = d_x_gan_loss + d_x_f_gan_loss + gp * gp_coef + d_x_f_c_logit\n\n D.zero_grad()\n d_loss.backward()\n d_optimizer.step()\n\n writer.add_scalar('D/d_gan_loss', (d_x_gan_loss + d_x_f_gan_loss).data.cpu().numpy(), global_step=step)\n writer.add_scalar('D/d_q_loss', d_x_f_c_logit.data.cpu().numpy(), global_step=step)\n writer.add_scalar('D/gp', gp.data.cpu().numpy(), global_step=step)\n\n # train G\n if step % n_d == 0:\n c_dense = torch.tensor(np.random.randint(c_dim, size=[batch_size])).to(device)\n c = torch.tensor(np.eye(c_dim)[c_dense.cpu().numpy()], dtype=z.dtype).to(device)\n z = torch.randn(batch_size, z_dim).to(device)\n\n x_f = G(z, c)\n x_f_gan_logit, x_f_c_logit = D(x_f)\n\n g_gan_loss = g_loss_fn(x_f_gan_logit)\n d_x_f_c_logit = torch.nn.functional.cross_entropy(x_f_c_logit, c_dense)\n g_loss = g_gan_loss + d_x_f_c_logit\n\n G.zero_grad()\n g_loss.backward()\n g_optimizer.step()\n\n writer.add_scalar('G/g_gan_loss', g_gan_loss.data.cpu().numpy(), global_step=step)\n writer.add_scalar('G/g_q_loss', d_x_f_c_logit.data.cpu().numpy(), global_step=step)\n\n # display\n if step % 1 == 0:\n print(\"Epoch: (%3d) (%5d/%5d)\" % (ep, i + 1, len(train_loader)))\n\n # sample\n if step % 100 == 0:\n G.eval()\n x_f_sample = (G(z_sample, c_sample) + 1) / 2.0\n\n save_dir = './output/%s/sample_training' % experiment_name\n pylib.mkdir(save_dir)\n torchvision.utils.save_image(x_f_sample, '%s/Epoch_(%d)_(%dof%d).jpg' % (save_dir, ep, i + 1, len(train_loader)), nrow=10)\n\n torchlib.save_checkpoint({'epoch': ep + 1,\n 'D': D.state_dict(),\n 'G': G.state_dict(),\n 'd_optimizer': d_optimizer.state_dict(),\n 'g_optimizer': g_optimizer.state_dict()},\n '%s/Epoch_(%d).ckpt' % (ckpt_dir, ep + 1),\n max_keep=2)\n"
] |
[
[
"torch.cat",
"torch.randn",
"numpy.eye",
"torch.nn.functional.cross_entropy",
"torch.cuda.is_available",
"torch.device",
"numpy.random.randint"
]
] |
zooechiu/pyro2
|
[
"b0ca4aa7b1b0f0d445c6a8d0ab63fcc0bc8a431c"
] |
[
"compressible_sr/problems/rt.py"
] |
[
"from __future__ import print_function\n\nimport numpy as np\n\nimport sys\nimport mesh.patch as patch\nimport compressible_sr.eos as eos\nfrom util import msg\n\n\ndef init_data(my_data, rp):\n \"\"\" initialize the rt problem \"\"\"\n\n msg.bold(\"initializing the rt problem...\")\n\n # make sure that we are passed a valid patch object\n if not isinstance(my_data, patch.CellCenterData2d):\n print(\"ERROR: patch invalid in rt.py\")\n print(my_data.__class__)\n sys.exit()\n\n # get the density, momenta, and energy as separate variables\n dens = my_data.get_var(\"density\")\n xmom = my_data.get_var(\"x-momentum\")\n ymom = my_data.get_var(\"y-momentum\")\n ener = my_data.get_var(\"energy\")\n\n gamma = rp.get_param(\"eos.gamma\")\n\n grav = rp.get_param(\"compressible.grav\")\n\n dens1 = rp.get_param(\"rt.dens1\")\n dens2 = rp.get_param(\"rt.dens2\")\n p0 = rp.get_param(\"rt.p0\")\n amp = rp.get_param(\"rt.amp\")\n sigma = rp.get_param(\"rt.sigma\")\n\n # initialize the components, remember, that ener here is\n # rho*eint + 0.5*rho*v**2, where eint is the specific\n # internal energy (erg/g)\n xmom[:, :] = 0.0\n ymom[:, :] = 0.0\n dens[:, :] = 0.0\n\n # set the density to be stratified in the y-direction\n myg = my_data.grid\n\n ycenter = 0.5*(myg.ymin + myg.ymax)\n\n p = myg.scratch_array()\n\n p[:, :] = p0\n dens[:, :] = dens1\n\n for j in range(myg.jlo, myg.jhi+1):\n if (myg.y[j] < ycenter):\n dens[:, j] = dens1\n p[:, j] = p0 + dens1*grav*myg.y[j]\n\n else:\n dens[:, j] = dens2\n p[:, j] = p0 + dens1*grav*ycenter + dens2*grav*(myg.y[j] - ycenter)\n\n ymom[:, :] = amp*np.cos(2.0*np.pi*myg.x2d/(myg.xmax-myg.xmin))*np.exp(-(myg.y2d-ycenter)**2/sigma**2)\n\n rhoh = eos.rhoh_from_rho_p(gamma, dens, p)\n\n u = xmom\n v = ymom\n W = 1./np.sqrt(1-u**2-v**2)\n dens[:, :] *= W\n xmom[:, :] *= rhoh[:, :]*W**2\n ymom[:, :] *= rhoh[:, :]*W**2\n\n ener[:, :] = rhoh[:, :]*W**2 - p - dens[:, :]\n\n # set the energy (P = cs2*dens)\n # ener[:, :] = p[:, :]/(gamma - 1.0) + \\\n # 0.5*(xmom[:, :]**2 + ymom[:, :]**2)/dens[:, :]\n\n\ndef finalize():\n \"\"\" print out any information to the user at the end of the run \"\"\"\n pass\n"
] |
[
[
"numpy.cos",
"numpy.exp",
"numpy.sqrt"
]
] |
SaadChaouki/ml-eli5-cli5
|
[
"625a69edadf4737e41c58193873cf8a54273d7f0"
] |
[
"visualisations/linear_regression.py"
] |
[
"from supervised.regression.linearRegression import LinearRegression\nfrom visualisations.color_palette import two_colors\nfrom deep_learning.loss import MSELoss\n\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.datasets import make_regression\n\nfrom matplotlib.animation import FuncAnimation\nimport matplotlib.pyplot as plt\nimport matplotlib\n\nimport numpy as np\nimport argparse\n\nmatplotlib.use(\"TkAgg\")\n\n\ndef update(i):\n y_pred = np.array([x for _, x in sorted(zip(X_train, model.error[i]))])\n plt.title(f'Iteration: {i + 1} | MSE: {round(MSELoss()(y_train, model.error[i]), 2)}')\n line.set_ydata(y_pred)\n\n\nif __name__ == '__main__':\n\n # Argument parsing.\n parser = argparse.ArgumentParser(description='Visualise a custom Linear Regression model in training.')\n parser.add_argument('--max_iter', type=int, help='Maximum number of iterations.', default=100)\n parser.add_argument('--random_state', type=int, help='Random state for data generation.', default=42)\n parser.add_argument('--n_samples', type=int, help='Number of data points.', default=500)\n parser.add_argument('--test_size', type=float, help='Test set size.', default=.2)\n parser.add_argument('--lr', type=float, help='Learning Rate.', default=.1)\n args = parser.parse_args()\n\n # Maximum iterations.\n max_iterations = args.max_iter\n\n # Generate regression data.\n X, y = make_regression(n_features=1, n_samples=args.n_samples, n_informative=1, noise=30,\n random_state=args.random_state, bias=500, tail_strength=1)\n\n # Train - Test Split.\n X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=args.test_size, random_state=42)\n\n # Model and Predictions\n model = LinearRegression(learning_rate=args.lr, iterations=max_iterations)\n model.fit(X_train, y_train)\n\n # Plot\n fig, ax = plt.subplots(figsize=(15, 6), dpi=80)\n fig.suptitle('Linear Regression', fontsize=20)\n\n # Plotting training and testing data.\n ax.scatter(X_train, y_train, color=two_colors[0], label='Train Data')\n ax.scatter(X_test, y_test, color=two_colors[1], label='Test Data')\n\n # Plot first iteration line.\n y_pred = np.array([x for _, x in sorted(zip(X_train, model.error[0]))])\n X_train_sorted = np.array(sorted(X_train))\n line, = ax.plot(X_train_sorted, y_pred, color='black', linewidth=2, label=\"Prediction\")\n\n # Labels and legend\n plt.xlabel('Feature')\n plt.ylabel('Target')\n plt.legend(loc='lower right')\n\n # Animation\n animation = FuncAnimation(fig, update, frames=max_iterations, interval=1, repeat=False)\n\n # Show plot\n plt.show()\n"
] |
[
[
"matplotlib.pyplot.legend",
"matplotlib.use",
"matplotlib.pyplot.subplots",
"sklearn.model_selection.train_test_split",
"sklearn.datasets.make_regression",
"matplotlib.animation.FuncAnimation",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.show",
"matplotlib.pyplot.ylabel"
]
] |
gaocegege/ray
|
[
"c852213b8349b6b9e9e7353573e2259a1b9ef925"
] |
[
"python/ray/tests/test_basic.py"
] |
[
"# coding: utf-8\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport collections\nfrom concurrent.futures import ThreadPoolExecutor\nimport json\nimport logging\nfrom multiprocessing import Process\nimport os\nimport random\nimport re\nimport setproctitle\nimport shutil\nimport six\nimport socket\nimport string\nimport subprocess\nimport sys\nimport tempfile\nimport threading\nimport time\n\nimport numpy as np\nimport pickle\nimport pytest\n\nimport ray\nimport ray.tests.cluster_utils\nimport ray.tests.utils\n\nlogger = logging.getLogger(__name__)\n\n\ndef test_simple_serialization(ray_start_regular):\n primitive_objects = [\n # Various primitive types.\n 0,\n 0.0,\n 0.9,\n 1 << 62,\n 1 << 999,\n \"a\",\n string.printable,\n \"\\u262F\",\n u\"hello world\",\n u\"\\xff\\xfe\\x9c\\x001\\x000\\x00\",\n None,\n True,\n False,\n [],\n (),\n {},\n type,\n int,\n set(),\n # Collections types.\n collections.Counter([np.random.randint(0, 10) for _ in range(100)]),\n collections.OrderedDict([(\"hello\", 1), (\"world\", 2)]),\n collections.defaultdict(lambda: 0, [(\"hello\", 1), (\"world\", 2)]),\n collections.defaultdict(lambda: [], [(\"hello\", 1), (\"world\", 2)]),\n collections.deque([1, 2, 3, \"a\", \"b\", \"c\", 3.5]),\n # Numpy dtypes.\n np.int8(3),\n np.int32(4),\n np.int64(5),\n np.uint8(3),\n np.uint32(4),\n np.uint64(5),\n np.float32(1.9),\n np.float64(1.9),\n ]\n\n if sys.version_info < (3, 0):\n primitive_objects.append(long(0)) # noqa: E501,F821\n\n composite_objects = (\n [[obj]\n for obj in primitive_objects] + [(obj, )\n for obj in primitive_objects] + [{\n (): obj\n } for obj in primitive_objects])\n\n @ray.remote\n def f(x):\n return x\n\n # Check that we can pass arguments by value to remote functions and\n # that they are uncorrupted.\n for obj in primitive_objects + composite_objects:\n new_obj_1 = ray.get(f.remote(obj))\n new_obj_2 = ray.get(ray.put(obj))\n assert obj == new_obj_1\n assert obj == new_obj_2\n # TODO(rkn): The numpy dtypes currently come back as regular integers\n # or floats.\n if type(obj).__module__ != \"numpy\":\n assert type(obj) == type(new_obj_1)\n assert type(obj) == type(new_obj_2)\n\n\ndef test_complex_serialization(ray_start_regular):\n def assert_equal(obj1, obj2):\n module_numpy = (type(obj1).__module__ == np.__name__\n or type(obj2).__module__ == np.__name__)\n if module_numpy:\n empty_shape = ((hasattr(obj1, \"shape\") and obj1.shape == ())\n or (hasattr(obj2, \"shape\") and obj2.shape == ()))\n if empty_shape:\n # This is a special case because currently\n # np.testing.assert_equal fails because we do not properly\n # handle different numerical types.\n assert obj1 == obj2, (\"Objects {} and {} are \"\n \"different.\".format(obj1, obj2))\n else:\n np.testing.assert_equal(obj1, obj2)\n elif hasattr(obj1, \"__dict__\") and hasattr(obj2, \"__dict__\"):\n special_keys = [\"_pytype_\"]\n assert (set(list(obj1.__dict__.keys()) + special_keys) == set(\n list(obj2.__dict__.keys()) + special_keys)), (\n \"Objects {} and {} are different.\".format(obj1, obj2))\n for key in obj1.__dict__.keys():\n if key not in special_keys:\n assert_equal(obj1.__dict__[key], obj2.__dict__[key])\n elif type(obj1) is dict or type(obj2) is dict:\n assert_equal(obj1.keys(), obj2.keys())\n for key in obj1.keys():\n assert_equal(obj1[key], obj2[key])\n elif type(obj1) is list or type(obj2) is list:\n assert len(obj1) == len(obj2), (\"Objects {} and {} are lists with \"\n \"different lengths.\".format(\n obj1, obj2))\n for i in range(len(obj1)):\n assert_equal(obj1[i], obj2[i])\n elif type(obj1) is tuple or type(obj2) is tuple:\n assert len(obj1) == len(obj2), (\"Objects {} and {} are tuples \"\n \"with different lengths.\".format(\n obj1, obj2))\n for i in range(len(obj1)):\n assert_equal(obj1[i], obj2[i])\n elif (ray.serialization.is_named_tuple(type(obj1))\n or ray.serialization.is_named_tuple(type(obj2))):\n assert len(obj1) == len(obj2), (\n \"Objects {} and {} are named \"\n \"tuples with different lengths.\".format(obj1, obj2))\n for i in range(len(obj1)):\n assert_equal(obj1[i], obj2[i])\n else:\n assert obj1 == obj2, \"Objects {} and {} are different.\".format(\n obj1, obj2)\n\n if sys.version_info >= (3, 0):\n long_extras = [0, np.array([[\"hi\", u\"hi\"], [1.3, 1]])]\n else:\n\n long_extras = [\n long(0), # noqa: E501,F821\n np.array([\n [\"hi\", u\"hi\"],\n [1.3, long(1)] # noqa: E501,F821\n ])\n ]\n\n PRIMITIVE_OBJECTS = [\n 0, 0.0, 0.9, 1 << 62, 1 << 100, 1 << 999, [1 << 100, [1 << 100]], \"a\",\n string.printable, \"\\u262F\", u\"hello world\",\n u\"\\xff\\xfe\\x9c\\x001\\x000\\x00\", None, True, False, [], (), {},\n np.int8(3),\n np.int32(4),\n np.int64(5),\n np.uint8(3),\n np.uint32(4),\n np.uint64(5),\n np.float32(1.9),\n np.float64(1.9),\n np.zeros([100, 100]),\n np.random.normal(size=[100, 100]),\n np.array([\"hi\", 3]),\n np.array([\"hi\", 3], dtype=object)\n ] + long_extras\n\n COMPLEX_OBJECTS = [\n [[[[[[[[[[[[]]]]]]]]]]]],\n {\n \"obj{}\".format(i): np.random.normal(size=[100, 100])\n for i in range(10)\n },\n # {(): {(): {(): {(): {(): {(): {(): {(): {(): {(): {\n # (): {(): {}}}}}}}}}}}}},\n (\n (((((((((), ), ), ), ), ), ), ), ), ),\n {\n \"a\": {\n \"b\": {\n \"c\": {\n \"d\": {}\n }\n }\n }\n },\n ]\n\n class Foo(object):\n def __init__(self, value=0):\n self.value = value\n\n def __hash__(self):\n return hash(self.value)\n\n def __eq__(self, other):\n return other.value == self.value\n\n class Bar(object):\n def __init__(self):\n for i, val in enumerate(PRIMITIVE_OBJECTS + COMPLEX_OBJECTS):\n setattr(self, \"field{}\".format(i), val)\n\n class Baz(object):\n def __init__(self):\n self.foo = Foo()\n self.bar = Bar()\n\n def method(self, arg):\n pass\n\n class Qux(object):\n def __init__(self):\n self.objs = [Foo(), Bar(), Baz()]\n\n class SubQux(Qux):\n def __init__(self):\n Qux.__init__(self)\n\n class CustomError(Exception):\n pass\n\n Point = collections.namedtuple(\"Point\", [\"x\", \"y\"])\n NamedTupleExample = collections.namedtuple(\n \"Example\", \"field1, field2, field3, field4, field5\")\n\n CUSTOM_OBJECTS = [\n Exception(\"Test object.\"),\n CustomError(),\n Point(11, y=22),\n Foo(),\n Bar(),\n Baz(), # Qux(), SubQux(),\n NamedTupleExample(1, 1.0, \"hi\", np.zeros([3, 5]), [1, 2, 3]),\n ]\n\n # Test dataclasses in Python 3.7.\n if sys.version_info >= (3, 7):\n from dataclasses import make_dataclass\n\n DataClass0 = make_dataclass(\"DataClass0\", [(\"number\", int)])\n\n CUSTOM_OBJECTS.append(DataClass0(number=3))\n\n class CustomClass(object):\n def __init__(self, value):\n self.value = value\n\n DataClass1 = make_dataclass(\"DataClass1\", [(\"custom\", CustomClass)])\n\n class DataClass2(DataClass1):\n @classmethod\n def from_custom(cls, data):\n custom = CustomClass(data)\n return cls(custom)\n\n def __reduce__(self):\n return (self.from_custom, (self.custom.value, ))\n\n CUSTOM_OBJECTS.append(DataClass2(custom=CustomClass(43)))\n\n BASE_OBJECTS = PRIMITIVE_OBJECTS + COMPLEX_OBJECTS + CUSTOM_OBJECTS\n\n LIST_OBJECTS = [[obj] for obj in BASE_OBJECTS]\n TUPLE_OBJECTS = [(obj, ) for obj in BASE_OBJECTS]\n # The check that type(obj).__module__ != \"numpy\" should be unnecessary, but\n # otherwise this seems to fail on Mac OS X on Travis.\n DICT_OBJECTS = ([{\n obj: obj\n } for obj in PRIMITIVE_OBJECTS if (\n obj.__hash__ is not None and type(obj).__module__ != \"numpy\")] + [{\n 0: obj\n } for obj in BASE_OBJECTS] + [{\n Foo(123): Foo(456)\n }])\n\n RAY_TEST_OBJECTS = (\n BASE_OBJECTS + LIST_OBJECTS + TUPLE_OBJECTS + DICT_OBJECTS)\n\n @ray.remote\n def f(x):\n return x\n\n # Check that we can pass arguments by value to remote functions and\n # that they are uncorrupted.\n for obj in RAY_TEST_OBJECTS:\n assert_equal(obj, ray.get(f.remote(obj)))\n assert_equal(obj, ray.get(ray.put(obj)))\n\n\ndef test_nested_functions(ray_start_regular):\n # Make sure that remote functions can use other values that are defined\n # after the remote function but before the first function invocation.\n @ray.remote\n def f():\n return g(), ray.get(h.remote())\n\n def g():\n return 1\n\n @ray.remote\n def h():\n return 2\n\n assert ray.get(f.remote()) == (1, 2)\n\n # Test a remote function that recursively calls itself.\n\n @ray.remote\n def factorial(n):\n if n == 0:\n return 1\n return n * ray.get(factorial.remote(n - 1))\n\n assert ray.get(factorial.remote(0)) == 1\n assert ray.get(factorial.remote(1)) == 1\n assert ray.get(factorial.remote(2)) == 2\n assert ray.get(factorial.remote(3)) == 6\n assert ray.get(factorial.remote(4)) == 24\n assert ray.get(factorial.remote(5)) == 120\n\n # Test remote functions that recursively call each other.\n\n @ray.remote\n def factorial_even(n):\n assert n % 2 == 0\n if n == 0:\n return 1\n return n * ray.get(factorial_odd.remote(n - 1))\n\n @ray.remote\n def factorial_odd(n):\n assert n % 2 == 1\n return n * ray.get(factorial_even.remote(n - 1))\n\n assert ray.get(factorial_even.remote(4)) == 24\n assert ray.get(factorial_odd.remote(5)) == 120\n\n\ndef test_ray_recursive_objects(ray_start_regular):\n class ClassA(object):\n pass\n\n # Make a list that contains itself.\n lst = []\n lst.append(lst)\n # Make an object that contains itself as a field.\n a1 = ClassA()\n a1.field = a1\n # Make two objects that contain each other as fields.\n a2 = ClassA()\n a3 = ClassA()\n a2.field = a3\n a3.field = a2\n # Make a dictionary that contains itself.\n d1 = {}\n d1[\"key\"] = d1\n # Create a list of recursive objects.\n recursive_objects = [lst, a1, a2, a3, d1]\n\n # Check that exceptions are thrown when we serialize the recursive\n # objects.\n for obj in recursive_objects:\n with pytest.raises(Exception):\n ray.put(obj)\n\n\ndef test_passing_arguments_by_value_out_of_the_box(ray_start_regular):\n @ray.remote\n def f(x):\n return x\n\n # Test passing lambdas.\n\n def temp():\n return 1\n\n assert ray.get(f.remote(temp))() == 1\n assert ray.get(f.remote(lambda x: x + 1))(3) == 4\n\n # Test sets.\n assert ray.get(f.remote(set())) == set()\n s = {1, (1, 2, \"hi\")}\n assert ray.get(f.remote(s)) == s\n\n # Test types.\n assert ray.get(f.remote(int)) == int\n assert ray.get(f.remote(float)) == float\n assert ray.get(f.remote(str)) == str\n\n class Foo(object):\n def __init__(self):\n pass\n\n # Make sure that we can put and get a custom type. Note that the result\n # won't be \"equal\" to Foo.\n ray.get(ray.put(Foo))\n\n\ndef test_putting_object_that_closes_over_object_id(ray_start_regular):\n # This test is here to prevent a regression of\n # https://github.com/ray-project/ray/issues/1317.\n\n class Foo(object):\n def __init__(self):\n self.val = ray.put(0)\n\n def method(self):\n f\n\n f = Foo()\n ray.put(f)\n\n\ndef test_put_get(shutdown_only):\n ray.init(num_cpus=0)\n\n for i in range(100):\n value_before = i * 10**6\n objectid = ray.put(value_before)\n value_after = ray.get(objectid)\n assert value_before == value_after\n\n for i in range(100):\n value_before = i * 10**6 * 1.0\n objectid = ray.put(value_before)\n value_after = ray.get(objectid)\n assert value_before == value_after\n\n for i in range(100):\n value_before = \"h\" * i\n objectid = ray.put(value_before)\n value_after = ray.get(objectid)\n assert value_before == value_after\n\n for i in range(100):\n value_before = [1] * i\n objectid = ray.put(value_before)\n value_after = ray.get(objectid)\n assert value_before == value_after\n\n\ndef test_custom_serializers(ray_start_regular):\n class Foo(object):\n def __init__(self):\n self.x = 3\n\n def custom_serializer(obj):\n return 3, \"string1\", type(obj).__name__\n\n def custom_deserializer(serialized_obj):\n return serialized_obj, \"string2\"\n\n ray.register_custom_serializer(\n Foo, serializer=custom_serializer, deserializer=custom_deserializer)\n\n assert ray.get(ray.put(Foo())) == ((3, \"string1\", Foo.__name__), \"string2\")\n\n class Bar(object):\n def __init__(self):\n self.x = 3\n\n ray.register_custom_serializer(\n Bar, serializer=custom_serializer, deserializer=custom_deserializer)\n\n @ray.remote\n def f():\n return Bar()\n\n assert ray.get(f.remote()) == ((3, \"string1\", Bar.__name__), \"string2\")\n\n\ndef test_serialization_final_fallback(ray_start_regular):\n pytest.importorskip(\"catboost\")\n # This test will only run when \"catboost\" is installed.\n from catboost import CatBoostClassifier\n\n model = CatBoostClassifier(\n iterations=2,\n depth=2,\n learning_rate=1,\n loss_function=\"Logloss\",\n logging_level=\"Verbose\")\n\n reconstructed_model = ray.get(ray.put(model))\n assert set(model.get_params().items()) == set(\n reconstructed_model.get_params().items())\n\n\ndef test_register_class(ray_start_2_cpus):\n # Check that putting an object of a class that has not been registered\n # throws an exception.\n class TempClass(object):\n pass\n\n ray.get(ray.put(TempClass()))\n\n # Test passing custom classes into remote functions from the driver.\n @ray.remote\n def f(x):\n return x\n\n class Foo(object):\n def __init__(self, value=0):\n self.value = value\n\n def __hash__(self):\n return hash(self.value)\n\n def __eq__(self, other):\n return other.value == self.value\n\n foo = ray.get(f.remote(Foo(7)))\n assert foo == Foo(7)\n\n regex = re.compile(r\"\\d+\\.\\d*\")\n new_regex = ray.get(f.remote(regex))\n # This seems to fail on the system Python 3 that comes with\n # Ubuntu, so it is commented out for now:\n # assert regex == new_regex\n # Instead, we do this:\n assert regex.pattern == new_regex.pattern\n\n class TempClass1(object):\n def __init__(self):\n self.value = 1\n\n # Test returning custom classes created on workers.\n @ray.remote\n def g():\n class TempClass2(object):\n def __init__(self):\n self.value = 2\n\n return TempClass1(), TempClass2()\n\n object_1, object_2 = ray.get(g.remote())\n assert object_1.value == 1\n assert object_2.value == 2\n\n # Test exporting custom class definitions from one worker to another\n # when the worker is blocked in a get.\n class NewTempClass(object):\n def __init__(self, value):\n self.value = value\n\n @ray.remote\n def h1(x):\n return NewTempClass(x)\n\n @ray.remote\n def h2(x):\n return ray.get(h1.remote(x))\n\n assert ray.get(h2.remote(10)).value == 10\n\n # Test registering multiple classes with the same name.\n @ray.remote(num_return_vals=3)\n def j():\n class Class0(object):\n def method0(self):\n pass\n\n c0 = Class0()\n\n class Class0(object):\n def method1(self):\n pass\n\n c1 = Class0()\n\n class Class0(object):\n def method2(self):\n pass\n\n c2 = Class0()\n\n return c0, c1, c2\n\n results = []\n for _ in range(5):\n results += j.remote()\n for i in range(len(results) // 3):\n c0, c1, c2 = ray.get(results[(3 * i):(3 * (i + 1))])\n\n c0.method0()\n c1.method1()\n c2.method2()\n\n assert not hasattr(c0, \"method1\")\n assert not hasattr(c0, \"method2\")\n assert not hasattr(c1, \"method0\")\n assert not hasattr(c1, \"method2\")\n assert not hasattr(c2, \"method0\")\n assert not hasattr(c2, \"method1\")\n\n @ray.remote\n def k():\n class Class0(object):\n def method0(self):\n pass\n\n c0 = Class0()\n\n class Class0(object):\n def method1(self):\n pass\n\n c1 = Class0()\n\n class Class0(object):\n def method2(self):\n pass\n\n c2 = Class0()\n\n return c0, c1, c2\n\n results = ray.get([k.remote() for _ in range(5)])\n for c0, c1, c2 in results:\n c0.method0()\n c1.method1()\n c2.method2()\n\n assert not hasattr(c0, \"method1\")\n assert not hasattr(c0, \"method2\")\n assert not hasattr(c1, \"method0\")\n assert not hasattr(c1, \"method2\")\n assert not hasattr(c2, \"method0\")\n assert not hasattr(c2, \"method1\")\n\n\ndef test_keyword_args(ray_start_regular):\n @ray.remote\n def keyword_fct1(a, b=\"hello\"):\n return \"{} {}\".format(a, b)\n\n @ray.remote\n def keyword_fct2(a=\"hello\", b=\"world\"):\n return \"{} {}\".format(a, b)\n\n @ray.remote\n def keyword_fct3(a, b, c=\"hello\", d=\"world\"):\n return \"{} {} {} {}\".format(a, b, c, d)\n\n x = keyword_fct1.remote(1)\n assert ray.get(x) == \"1 hello\"\n x = keyword_fct1.remote(1, \"hi\")\n assert ray.get(x) == \"1 hi\"\n x = keyword_fct1.remote(1, b=\"world\")\n assert ray.get(x) == \"1 world\"\n x = keyword_fct1.remote(a=1, b=\"world\")\n assert ray.get(x) == \"1 world\"\n\n x = keyword_fct2.remote(a=\"w\", b=\"hi\")\n assert ray.get(x) == \"w hi\"\n x = keyword_fct2.remote(b=\"hi\", a=\"w\")\n assert ray.get(x) == \"w hi\"\n x = keyword_fct2.remote(a=\"w\")\n assert ray.get(x) == \"w world\"\n x = keyword_fct2.remote(b=\"hi\")\n assert ray.get(x) == \"hello hi\"\n x = keyword_fct2.remote(\"w\")\n assert ray.get(x) == \"w world\"\n x = keyword_fct2.remote(\"w\", \"hi\")\n assert ray.get(x) == \"w hi\"\n\n x = keyword_fct3.remote(0, 1, c=\"w\", d=\"hi\")\n assert ray.get(x) == \"0 1 w hi\"\n x = keyword_fct3.remote(0, b=1, c=\"w\", d=\"hi\")\n assert ray.get(x) == \"0 1 w hi\"\n x = keyword_fct3.remote(a=0, b=1, c=\"w\", d=\"hi\")\n assert ray.get(x) == \"0 1 w hi\"\n x = keyword_fct3.remote(0, 1, d=\"hi\", c=\"w\")\n assert ray.get(x) == \"0 1 w hi\"\n x = keyword_fct3.remote(0, 1, c=\"w\")\n assert ray.get(x) == \"0 1 w world\"\n x = keyword_fct3.remote(0, 1, d=\"hi\")\n assert ray.get(x) == \"0 1 hello hi\"\n x = keyword_fct3.remote(0, 1)\n assert ray.get(x) == \"0 1 hello world\"\n x = keyword_fct3.remote(a=0, b=1)\n assert ray.get(x) == \"0 1 hello world\"\n\n # Check that we cannot pass invalid keyword arguments to functions.\n @ray.remote\n def f1():\n return\n\n @ray.remote\n def f2(x, y=0, z=0):\n return\n\n # Make sure we get an exception if too many arguments are passed in.\n with pytest.raises(Exception):\n f1.remote(3)\n\n with pytest.raises(Exception):\n f1.remote(x=3)\n\n with pytest.raises(Exception):\n f2.remote(0, w=0)\n\n with pytest.raises(Exception):\n f2.remote(3, x=3)\n\n # Make sure we get an exception if too many arguments are passed in.\n with pytest.raises(Exception):\n f2.remote(1, 2, 3, 4)\n\n @ray.remote\n def f3(x):\n return x\n\n assert ray.get(f3.remote(4)) == 4\n\n\ndef test_variable_number_of_args(shutdown_only):\n @ray.remote\n def varargs_fct1(*a):\n return \" \".join(map(str, a))\n\n @ray.remote\n def varargs_fct2(a, *b):\n return \" \".join(map(str, b))\n\n try:\n\n @ray.remote\n def kwargs_throw_exception(**c):\n return ()\n\n kwargs_exception_thrown = False\n except Exception:\n kwargs_exception_thrown = True\n\n ray.init(num_cpus=1)\n\n x = varargs_fct1.remote(0, 1, 2)\n assert ray.get(x) == \"0 1 2\"\n x = varargs_fct2.remote(0, 1, 2)\n assert ray.get(x) == \"1 2\"\n\n assert kwargs_exception_thrown\n\n @ray.remote\n def f1(*args):\n return args\n\n @ray.remote\n def f2(x, y, *args):\n return x, y, args\n\n assert ray.get(f1.remote()) == ()\n assert ray.get(f1.remote(1)) == (1, )\n assert ray.get(f1.remote(1, 2, 3)) == (1, 2, 3)\n with pytest.raises(Exception):\n f2.remote()\n with pytest.raises(Exception):\n f2.remote(1)\n assert ray.get(f2.remote(1, 2)) == (1, 2, ())\n assert ray.get(f2.remote(1, 2, 3)) == (1, 2, (3, ))\n assert ray.get(f2.remote(1, 2, 3, 4)) == (1, 2, (3, 4))\n\n def testNoArgs(self):\n @ray.remote\n def no_op():\n pass\n\n self.ray_start()\n\n ray.get(no_op.remote())\n\n\ndef test_defining_remote_functions(shutdown_only):\n ray.init(num_cpus=3)\n\n # Test that we can define a remote function in the shell.\n @ray.remote\n def f(x):\n return x + 1\n\n assert ray.get(f.remote(0)) == 1\n\n # Test that we can redefine the remote function.\n @ray.remote\n def f(x):\n return x + 10\n\n while True:\n val = ray.get(f.remote(0))\n assert val in [1, 10]\n if val == 10:\n break\n else:\n logger.info(\"Still using old definition of f, trying again.\")\n\n # Test that we can close over plain old data.\n data = [\n np.zeros([3, 5]), (1, 2, \"a\"), [0.0, 1.0, 1 << 62], 1 << 60, {\n \"a\": np.zeros(3)\n }\n ]\n\n @ray.remote\n def g():\n return data\n\n ray.get(g.remote())\n\n # Test that we can close over modules.\n @ray.remote\n def h():\n return np.zeros([3, 5])\n\n assert np.alltrue(ray.get(h.remote()) == np.zeros([3, 5]))\n\n @ray.remote\n def j():\n return time.time()\n\n ray.get(j.remote())\n\n # Test that we can define remote functions that call other remote\n # functions.\n @ray.remote\n def k(x):\n return x + 1\n\n @ray.remote\n def k2(x):\n return ray.get(k.remote(x))\n\n @ray.remote\n def m(x):\n return ray.get(k2.remote(x))\n\n assert ray.get(k.remote(1)) == 2\n assert ray.get(k2.remote(1)) == 2\n assert ray.get(m.remote(1)) == 2\n\n\ndef test_submit_api(shutdown_only):\n ray.init(num_cpus=2, num_gpus=1, resources={\"Custom\": 1})\n\n @ray.remote\n def f(n):\n return list(range(n))\n\n @ray.remote\n def g():\n return ray.get_gpu_ids()\n\n assert f._remote([0], num_return_vals=0) is None\n id1 = f._remote(args=[1], num_return_vals=1)\n assert ray.get(id1) == [0]\n id1, id2 = f._remote(args=[2], num_return_vals=2)\n assert ray.get([id1, id2]) == [0, 1]\n id1, id2, id3 = f._remote(args=[3], num_return_vals=3)\n assert ray.get([id1, id2, id3]) == [0, 1, 2]\n assert ray.get(\n g._remote(args=[], num_cpus=1, num_gpus=1,\n resources={\"Custom\": 1})) == [0]\n infeasible_id = g._remote(args=[], resources={\"NonexistentCustom\": 1})\n assert ray.get(g._remote()) == []\n ready_ids, remaining_ids = ray.wait([infeasible_id], timeout=0.05)\n assert len(ready_ids) == 0\n assert len(remaining_ids) == 1\n\n @ray.remote\n class Actor(object):\n def __init__(self, x, y=0):\n self.x = x\n self.y = y\n\n def method(self, a, b=0):\n return self.x, self.y, a, b\n\n def gpu_ids(self):\n return ray.get_gpu_ids()\n\n @ray.remote\n class Actor2(object):\n def __init__(self):\n pass\n\n def method(self):\n pass\n\n a = Actor._remote(\n args=[0], kwargs={\"y\": 1}, num_gpus=1, resources={\"Custom\": 1})\n\n a2 = Actor2._remote()\n ray.get(a2.method._remote())\n\n id1, id2, id3, id4 = a.method._remote(\n args=[\"test\"], kwargs={\"b\": 2}, num_return_vals=4)\n assert ray.get([id1, id2, id3, id4]) == [0, 1, \"test\", 2]\n\n\ndef test_many_fractional_resources(shutdown_only):\n ray.init(num_cpus=2, num_gpus=2, resources={\"Custom\": 2})\n\n @ray.remote\n def g():\n return 1\n\n @ray.remote\n def f(block, accepted_resources):\n true_resources = {\n resource: value[0][1]\n for resource, value in ray.get_resource_ids().items()\n }\n if block:\n ray.get(g.remote())\n return true_resources == accepted_resources\n\n # Check that the resource are assigned correctly.\n result_ids = []\n for rand1, rand2, rand3 in np.random.uniform(size=(100, 3)):\n resource_set = {\"CPU\": int(rand1 * 10000) / 10000}\n result_ids.append(f._remote([False, resource_set], num_cpus=rand1))\n\n resource_set = {\"CPU\": 1, \"GPU\": int(rand1 * 10000) / 10000}\n result_ids.append(f._remote([False, resource_set], num_gpus=rand1))\n\n resource_set = {\"CPU\": 1, \"Custom\": int(rand1 * 10000) / 10000}\n result_ids.append(\n f._remote([False, resource_set], resources={\"Custom\": rand1}))\n\n resource_set = {\n \"CPU\": int(rand1 * 10000) / 10000,\n \"GPU\": int(rand2 * 10000) / 10000,\n \"Custom\": int(rand3 * 10000) / 10000\n }\n result_ids.append(\n f._remote(\n [False, resource_set],\n num_cpus=rand1,\n num_gpus=rand2,\n resources={\"Custom\": rand3}))\n result_ids.append(\n f._remote(\n [True, resource_set],\n num_cpus=rand1,\n num_gpus=rand2,\n resources={\"Custom\": rand3}))\n assert all(ray.get(result_ids))\n\n # Check that the available resources at the end are the same as the\n # beginning.\n stop_time = time.time() + 10\n correct_available_resources = False\n while time.time() < stop_time:\n if ray.available_resources() == {\n \"CPU\": 2.0,\n \"GPU\": 2.0,\n \"Custom\": 2.0,\n }:\n correct_available_resources = True\n break\n if not correct_available_resources:\n assert False, \"Did not get correct available resources.\"\n\n\ndef test_get_multiple(ray_start_regular):\n object_ids = [ray.put(i) for i in range(10)]\n assert ray.get(object_ids) == list(range(10))\n\n # Get a random choice of object IDs with duplicates.\n indices = list(np.random.choice(range(10), 5))\n indices += indices\n results = ray.get([object_ids[i] for i in indices])\n assert results == indices\n\n\ndef test_get_multiple_experimental(ray_start_regular):\n object_ids = [ray.put(i) for i in range(10)]\n\n object_ids_tuple = tuple(object_ids)\n assert ray.experimental.get(object_ids_tuple) == list(range(10))\n\n object_ids_nparray = np.array(object_ids)\n assert ray.experimental.get(object_ids_nparray) == list(range(10))\n\n\ndef test_get_dict(ray_start_regular):\n d = {str(i): ray.put(i) for i in range(5)}\n for i in range(5, 10):\n d[str(i)] = i\n result = ray.experimental.get(d)\n expected = {str(i): i for i in range(10)}\n assert result == expected\n\n\ndef test_wait(ray_start_regular):\n @ray.remote\n def f(delay):\n time.sleep(delay)\n return 1\n\n objectids = [f.remote(1.0), f.remote(0.5), f.remote(0.5), f.remote(0.5)]\n ready_ids, remaining_ids = ray.wait(objectids)\n assert len(ready_ids) == 1\n assert len(remaining_ids) == 3\n ready_ids, remaining_ids = ray.wait(objectids, num_returns=4)\n assert set(ready_ids) == set(objectids)\n assert remaining_ids == []\n\n objectids = [f.remote(0.5), f.remote(0.5), f.remote(0.5), f.remote(0.5)]\n start_time = time.time()\n ready_ids, remaining_ids = ray.wait(objectids, timeout=1.75, num_returns=4)\n assert time.time() - start_time < 2\n assert len(ready_ids) == 3\n assert len(remaining_ids) == 1\n ray.wait(objectids)\n objectids = [f.remote(1.0), f.remote(0.5), f.remote(0.5), f.remote(0.5)]\n start_time = time.time()\n ready_ids, remaining_ids = ray.wait(objectids, timeout=5.0)\n assert time.time() - start_time < 5\n assert len(ready_ids) == 1\n assert len(remaining_ids) == 3\n\n # Verify that calling wait with duplicate object IDs throws an\n # exception.\n x = ray.put(1)\n with pytest.raises(Exception):\n ray.wait([x, x])\n\n # Make sure it is possible to call wait with an empty list.\n ready_ids, remaining_ids = ray.wait([])\n assert ready_ids == []\n assert remaining_ids == []\n\n # Test semantics of num_returns with no timeout.\n oids = [ray.put(i) for i in range(10)]\n (found, rest) = ray.wait(oids, num_returns=2)\n assert len(found) == 2\n assert len(rest) == 8\n\n # Verify that incorrect usage raises a TypeError.\n x = ray.put(1)\n with pytest.raises(TypeError):\n ray.wait(x)\n with pytest.raises(TypeError):\n ray.wait(1)\n with pytest.raises(TypeError):\n ray.wait([1])\n\n\ndef test_wait_iterables(ray_start_regular):\n @ray.remote\n def f(delay):\n time.sleep(delay)\n return 1\n\n objectids = (f.remote(1.0), f.remote(0.5), f.remote(0.5), f.remote(0.5))\n ready_ids, remaining_ids = ray.experimental.wait(objectids)\n assert len(ready_ids) == 1\n assert len(remaining_ids) == 3\n\n objectids = np.array(\n [f.remote(1.0),\n f.remote(0.5),\n f.remote(0.5),\n f.remote(0.5)])\n ready_ids, remaining_ids = ray.experimental.wait(objectids)\n assert len(ready_ids) == 1\n assert len(remaining_ids) == 3\n\n\ndef test_multiple_waits_and_gets(shutdown_only):\n # It is important to use three workers here, so that the three tasks\n # launched in this experiment can run at the same time.\n ray.init(num_cpus=3)\n\n @ray.remote\n def f(delay):\n time.sleep(delay)\n return 1\n\n @ray.remote\n def g(l):\n # The argument l should be a list containing one object ID.\n ray.wait([l[0]])\n\n @ray.remote\n def h(l):\n # The argument l should be a list containing one object ID.\n ray.get(l[0])\n\n # Make sure that multiple wait requests involving the same object ID\n # all return.\n x = f.remote(1)\n ray.get([g.remote([x]), g.remote([x])])\n\n # Make sure that multiple get requests involving the same object ID all\n # return.\n x = f.remote(1)\n ray.get([h.remote([x]), h.remote([x])])\n\n\ndef test_caching_functions_to_run(shutdown_only):\n # Test that we export functions to run on all workers before the driver\n # is connected.\n def f(worker_info):\n sys.path.append(1)\n\n ray.worker.global_worker.run_function_on_all_workers(f)\n\n def f(worker_info):\n sys.path.append(2)\n\n ray.worker.global_worker.run_function_on_all_workers(f)\n\n def g(worker_info):\n sys.path.append(3)\n\n ray.worker.global_worker.run_function_on_all_workers(g)\n\n def f(worker_info):\n sys.path.append(4)\n\n ray.worker.global_worker.run_function_on_all_workers(f)\n\n ray.init(num_cpus=1)\n\n @ray.remote\n def get_state():\n time.sleep(1)\n return sys.path[-4], sys.path[-3], sys.path[-2], sys.path[-1]\n\n res1 = get_state.remote()\n res2 = get_state.remote()\n assert ray.get(res1) == (1, 2, 3, 4)\n assert ray.get(res2) == (1, 2, 3, 4)\n\n # Clean up the path on the workers.\n def f(worker_info):\n sys.path.pop()\n sys.path.pop()\n sys.path.pop()\n sys.path.pop()\n\n ray.worker.global_worker.run_function_on_all_workers(f)\n\n\ndef test_running_function_on_all_workers(ray_start_regular):\n def f(worker_info):\n sys.path.append(\"fake_directory\")\n\n ray.worker.global_worker.run_function_on_all_workers(f)\n\n @ray.remote\n def get_path1():\n return sys.path\n\n assert \"fake_directory\" == ray.get(get_path1.remote())[-1]\n\n def f(worker_info):\n sys.path.pop(-1)\n\n ray.worker.global_worker.run_function_on_all_workers(f)\n\n # Create a second remote function to guarantee that when we call\n # get_path2.remote(), the second function to run will have been run on\n # the worker.\n @ray.remote\n def get_path2():\n return sys.path\n\n assert \"fake_directory\" not in ray.get(get_path2.remote())\n\n\ndef test_profiling_api(ray_start_2_cpus):\n @ray.remote\n def f():\n with ray.profile(\n \"custom_event\",\n extra_data={\"name\": \"custom name\"}) as ray_prof:\n ray_prof.set_attribute(\"key\", \"value\")\n\n ray.put(1)\n object_id = f.remote()\n ray.wait([object_id])\n ray.get(object_id)\n\n # Wait until all of the profiling information appears in the profile\n # table.\n timeout_seconds = 20\n start_time = time.time()\n while True:\n if time.time() - start_time > timeout_seconds:\n raise Exception(\"Timed out while waiting for information in \"\n \"profile table.\")\n profile_data = ray.timeline()\n event_types = {event[\"cat\"] for event in profile_data}\n expected_types = [\n \"worker_idle\",\n \"task\",\n \"task:deserialize_arguments\",\n \"task:execute\",\n \"task:store_outputs\",\n \"wait_for_function\",\n \"ray.get\",\n \"ray.put\",\n \"ray.wait\",\n \"submit_task\",\n \"fetch_and_run_function\",\n \"register_remote_function\",\n \"custom_event\", # This is the custom one from ray.profile.\n ]\n\n if all(expected_type in event_types\n for expected_type in expected_types):\n break\n\n\ndef test_wait_cluster(ray_start_cluster):\n cluster = ray_start_cluster\n cluster.add_node(num_cpus=1, resources={\"RemoteResource\": 1})\n cluster.add_node(num_cpus=1, resources={\"RemoteResource\": 1})\n ray.init(redis_address=cluster.redis_address)\n\n @ray.remote(resources={\"RemoteResource\": 1})\n def f():\n return\n\n # Make sure we have enough workers on the remote nodes to execute some\n # tasks.\n tasks = [f.remote() for _ in range(10)]\n start = time.time()\n ray.get(tasks)\n end = time.time()\n\n # Submit some more tasks that can only be executed on the remote nodes.\n tasks = [f.remote() for _ in range(10)]\n # Sleep for a bit to let the tasks finish.\n time.sleep((end - start) * 2)\n _, unready = ray.wait(tasks, num_returns=len(tasks), timeout=0)\n # All remote tasks should have finished.\n assert len(unready) == 0\n\n\ndef test_object_transfer_dump(ray_start_cluster):\n cluster = ray_start_cluster\n\n num_nodes = 3\n for i in range(num_nodes):\n cluster.add_node(resources={str(i): 1}, object_store_memory=10**9)\n ray.init(redis_address=cluster.redis_address)\n\n @ray.remote\n def f(x):\n return\n\n # These objects will live on different nodes.\n object_ids = [\n f._remote(args=[1], resources={str(i): 1}) for i in range(num_nodes)\n ]\n\n # Broadcast each object from each machine to each other machine.\n for object_id in object_ids:\n ray.get([\n f._remote(args=[object_id], resources={str(i): 1})\n for i in range(num_nodes)\n ])\n\n # The profiling information only flushes once every second.\n time.sleep(1.1)\n\n transfer_dump = ray.object_transfer_timeline()\n # Make sure the transfer dump can be serialized with JSON.\n json.loads(json.dumps(transfer_dump))\n assert len(transfer_dump) >= num_nodes**2\n assert len({\n event[\"pid\"]\n for event in transfer_dump if event[\"name\"] == \"transfer_receive\"\n }) == num_nodes\n assert len({\n event[\"pid\"]\n for event in transfer_dump if event[\"name\"] == \"transfer_send\"\n }) == num_nodes\n\n\ndef test_identical_function_names(ray_start_regular):\n # Define a bunch of remote functions and make sure that we don't\n # accidentally call an older version.\n\n num_calls = 200\n\n @ray.remote\n def f():\n return 1\n\n results1 = [f.remote() for _ in range(num_calls)]\n\n @ray.remote\n def f():\n return 2\n\n results2 = [f.remote() for _ in range(num_calls)]\n\n @ray.remote\n def f():\n return 3\n\n results3 = [f.remote() for _ in range(num_calls)]\n\n @ray.remote\n def f():\n return 4\n\n results4 = [f.remote() for _ in range(num_calls)]\n\n @ray.remote\n def f():\n return 5\n\n results5 = [f.remote() for _ in range(num_calls)]\n\n assert ray.get(results1) == num_calls * [1]\n assert ray.get(results2) == num_calls * [2]\n assert ray.get(results3) == num_calls * [3]\n assert ray.get(results4) == num_calls * [4]\n assert ray.get(results5) == num_calls * [5]\n\n @ray.remote\n def g():\n return 1\n\n @ray.remote # noqa: F811\n def g():\n return 2\n\n @ray.remote # noqa: F811\n def g():\n return 3\n\n @ray.remote # noqa: F811\n def g():\n return 4\n\n @ray.remote # noqa: F811\n def g():\n return 5\n\n result_values = ray.get([g.remote() for _ in range(num_calls)])\n assert result_values == num_calls * [5]\n\n\ndef test_illegal_api_calls(ray_start_regular):\n\n # Verify that we cannot call put on an ObjectID.\n x = ray.put(1)\n with pytest.raises(Exception):\n ray.put(x)\n # Verify that we cannot call get on a regular value.\n with pytest.raises(Exception):\n ray.get(3)\n\n\n# TODO(hchen): This test currently doesn't work in Python 2. This is likely\n# because plasma client isn't thread-safe. This needs to be fixed from the\n# Arrow side. See #4107 for relevant discussions.\[email protected](six.PY2, reason=\"Doesn't work in Python 2.\")\ndef test_multithreading(ray_start_2_cpus):\n # This test requires at least 2 CPUs to finish since the worker does not\n # release resources when joining the threads.\n\n def run_test_in_multi_threads(test_case, num_threads=10, num_repeats=25):\n \"\"\"A helper function that runs test cases in multiple threads.\"\"\"\n\n def wrapper():\n for _ in range(num_repeats):\n test_case()\n time.sleep(random.randint(0, 10) / 1000.0)\n return \"ok\"\n\n executor = ThreadPoolExecutor(max_workers=num_threads)\n futures = [executor.submit(wrapper) for _ in range(num_threads)]\n for future in futures:\n assert future.result() == \"ok\"\n\n @ray.remote\n def echo(value, delay_ms=0):\n if delay_ms > 0:\n time.sleep(delay_ms / 1000.0)\n return value\n\n @ray.remote\n class Echo(object):\n def echo(self, value):\n return value\n\n def test_api_in_multi_threads():\n \"\"\"Test using Ray api in multiple threads.\"\"\"\n\n # Test calling remote functions in multiple threads.\n def test_remote_call():\n value = random.randint(0, 1000000)\n result = ray.get(echo.remote(value))\n assert value == result\n\n run_test_in_multi_threads(test_remote_call)\n\n # Test multiple threads calling one actor.\n actor = Echo.remote()\n\n def test_call_actor():\n value = random.randint(0, 1000000)\n result = ray.get(actor.echo.remote(value))\n assert value == result\n\n run_test_in_multi_threads(test_call_actor)\n\n # Test put and get.\n def test_put_and_get():\n value = random.randint(0, 1000000)\n result = ray.get(ray.put(value))\n assert value == result\n\n run_test_in_multi_threads(test_put_and_get)\n\n # Test multiple threads waiting for objects.\n num_wait_objects = 10\n objects = [\n echo.remote(i, delay_ms=10) for i in range(num_wait_objects)\n ]\n\n def test_wait():\n ready, _ = ray.wait(\n objects,\n num_returns=len(objects),\n timeout=1000.0,\n )\n assert len(ready) == num_wait_objects\n assert ray.get(ready) == list(range(num_wait_objects))\n\n run_test_in_multi_threads(test_wait, num_repeats=1)\n\n # Run tests in a driver.\n test_api_in_multi_threads()\n\n # Run tests in a worker.\n @ray.remote\n def run_tests_in_worker():\n test_api_in_multi_threads()\n return \"ok\"\n\n assert ray.get(run_tests_in_worker.remote()) == \"ok\"\n\n # Test actor that runs background threads.\n @ray.remote\n class MultithreadedActor(object):\n def __init__(self):\n self.lock = threading.Lock()\n self.thread_results = []\n\n def background_thread(self, wait_objects):\n try:\n # Test wait\n ready, _ = ray.wait(\n wait_objects,\n num_returns=len(wait_objects),\n timeout=1000.0,\n )\n assert len(ready) == len(wait_objects)\n for _ in range(20):\n num = 10\n # Test remote call\n results = [echo.remote(i) for i in range(num)]\n assert ray.get(results) == list(range(num))\n # Test put and get\n objects = [ray.put(i) for i in range(num)]\n assert ray.get(objects) == list(range(num))\n time.sleep(random.randint(0, 10) / 1000.0)\n except Exception as e:\n with self.lock:\n self.thread_results.append(e)\n else:\n with self.lock:\n self.thread_results.append(\"ok\")\n\n def spawn(self):\n wait_objects = [echo.remote(i, delay_ms=10) for i in range(10)]\n self.threads = [\n threading.Thread(\n target=self.background_thread, args=(wait_objects, ))\n for _ in range(20)\n ]\n [thread.start() for thread in self.threads]\n\n def join(self):\n [thread.join() for thread in self.threads]\n assert self.thread_results == [\"ok\"] * len(self.threads)\n return \"ok\"\n\n actor = MultithreadedActor.remote()\n actor.spawn.remote()\n ray.get(actor.join.remote()) == \"ok\"\n\n\ndef test_free_objects_multi_node(ray_start_cluster):\n # This test will do following:\n # 1. Create 3 raylets that each hold an actor.\n # 2. Each actor creates an object which is the deletion target.\n # 3. Wait 0.1 second for the objects to be deleted.\n # 4. Check that the deletion targets have been deleted.\n # Caution: if remote functions are used instead of actor methods,\n # one raylet may create more than one worker to execute the\n # tasks, so the flushing operations may be executed in different\n # workers and the plasma client holding the deletion target\n # may not be flushed.\n cluster = ray_start_cluster\n config = json.dumps({\"object_manager_repeated_push_delay_ms\": 1000})\n for i in range(3):\n cluster.add_node(\n num_cpus=1,\n resources={\"Custom{}\".format(i): 1},\n _internal_config=config)\n ray.init(redis_address=cluster.redis_address)\n\n class RawActor(object):\n def get(self):\n return ray.worker.global_worker.plasma_client.store_socket_name\n\n ActorOnNode0 = ray.remote(resources={\"Custom0\": 1})(RawActor)\n ActorOnNode1 = ray.remote(resources={\"Custom1\": 1})(RawActor)\n ActorOnNode2 = ray.remote(resources={\"Custom2\": 1})(RawActor)\n\n def create(actors):\n a = actors[0].get.remote()\n b = actors[1].get.remote()\n c = actors[2].get.remote()\n (l1, l2) = ray.wait([a, b, c], num_returns=3)\n assert len(l1) == 3\n assert len(l2) == 0\n return (a, b, c)\n\n def run_one_test(actors, local_only, delete_creating_tasks):\n (a, b, c) = create(actors)\n # The three objects should be generated on different object stores.\n assert ray.get(a) != ray.get(b)\n assert ray.get(a) != ray.get(c)\n assert ray.get(c) != ray.get(b)\n ray.internal.free(\n [a, b, c],\n local_only=local_only,\n delete_creating_tasks=delete_creating_tasks)\n # Wait for the objects to be deleted.\n time.sleep(0.1)\n return (a, b, c)\n\n actors = [\n ActorOnNode0.remote(),\n ActorOnNode1.remote(),\n ActorOnNode2.remote()\n ]\n # Case 1: run this local_only=False. All 3 objects will be deleted.\n (a, b, c) = run_one_test(actors, False, False)\n (l1, l2) = ray.wait([a, b, c], timeout=0.01, num_returns=1)\n # All the objects are deleted.\n assert len(l1) == 0\n assert len(l2) == 3\n # Case 2: run this local_only=True. Only 1 object will be deleted.\n (a, b, c) = run_one_test(actors, True, False)\n (l1, l2) = ray.wait([a, b, c], timeout=0.01, num_returns=3)\n # One object is deleted and 2 objects are not.\n assert len(l1) == 2\n assert len(l2) == 1\n # The deleted object will have the same store with the driver.\n local_return = ray.worker.global_worker.plasma_client.store_socket_name\n for object_id in l1:\n assert ray.get(object_id) != local_return\n\n # Case3: These cases test the deleting creating tasks for the object.\n (a, b, c) = run_one_test(actors, False, False)\n task_table = ray.tasks()\n for obj in [a, b, c]:\n assert ray._raylet.compute_task_id(obj).hex() in task_table\n\n (a, b, c) = run_one_test(actors, False, True)\n task_table = ray.tasks()\n for obj in [a, b, c]:\n assert ray._raylet.compute_task_id(obj).hex() not in task_table\n\n\ndef test_local_mode(shutdown_only):\n @ray.remote\n def local_mode_f():\n return np.array([0, 0])\n\n @ray.remote\n def local_mode_g(x):\n x[0] = 1\n return x\n\n ray.init(local_mode=True)\n\n @ray.remote\n def f():\n return np.ones([3, 4, 5])\n\n xref = f.remote()\n # Remote functions should return ObjectIDs.\n assert isinstance(xref, ray.ObjectID)\n assert np.alltrue(ray.get(xref) == np.ones([3, 4, 5]))\n y = np.random.normal(size=[11, 12])\n # Check that ray.get(ray.put) is the identity.\n assert np.alltrue(y == ray.get(ray.put(y)))\n\n # Make sure objects are immutable, this example is why we need to copy\n # arguments before passing them into remote functions in python mode\n aref = local_mode_f.remote()\n assert np.alltrue(ray.get(aref) == np.array([0, 0]))\n bref = local_mode_g.remote(ray.get(aref))\n # Make sure local_mode_g does not mutate aref.\n assert np.alltrue(ray.get(aref) == np.array([0, 0]))\n assert np.alltrue(ray.get(bref) == np.array([1, 0]))\n\n # wait should return the first num_returns values passed in as the\n # first list and the remaining values as the second list\n num_returns = 5\n object_ids = [ray.put(i) for i in range(20)]\n ready, remaining = ray.wait(\n object_ids, num_returns=num_returns, timeout=None)\n assert ready == object_ids[:num_returns]\n assert remaining == object_ids[num_returns:]\n\n # Check that ray.put() and ray.internal.free() work in local mode.\n\n v1 = np.ones(10)\n v2 = np.zeros(10)\n\n k1 = ray.put(v1)\n assert np.alltrue(v1 == ray.get(k1))\n k2 = ray.put(v2)\n assert np.alltrue(v2 == ray.get(k2))\n\n ray.internal.free([k1, k2])\n with pytest.raises(Exception):\n ray.get(k1)\n with pytest.raises(Exception):\n ray.get(k2)\n\n # Should fail silently.\n ray.internal.free([k1, k2])\n\n # Test actors in LOCAL_MODE.\n\n @ray.remote\n class LocalModeTestClass(object):\n def __init__(self, array):\n self.array = array\n\n def set_array(self, array):\n self.array = array\n\n def get_array(self):\n return self.array\n\n def modify_and_set_array(self, array):\n array[0] = -1\n self.array = array\n\n @ray.method(num_return_vals=3)\n def returns_multiple(self):\n return 1, 2, 3\n\n test_actor = LocalModeTestClass.remote(np.arange(10))\n obj = test_actor.get_array.remote()\n assert isinstance(obj, ray.ObjectID)\n assert np.alltrue(ray.get(obj) == np.arange(10))\n\n test_array = np.arange(10)\n # Remote actor functions should not mutate arguments\n test_actor.modify_and_set_array.remote(test_array)\n assert np.alltrue(test_array == np.arange(10))\n # Remote actor functions should keep state\n test_array[0] = -1\n assert np.alltrue(test_array == ray.get(test_actor.get_array.remote()))\n\n # Check that actor handles work in local mode.\n\n @ray.remote\n def use_actor_handle(handle):\n array = np.ones(10)\n handle.set_array.remote(array)\n assert np.alltrue(array == ray.get(handle.get_array.remote()))\n\n ray.get(use_actor_handle.remote(test_actor))\n\n # Check that exceptions are deferred until ray.get().\n\n exception_str = \"test_basic remote task exception\"\n\n @ray.remote\n def throws():\n raise Exception(exception_str)\n\n obj = throws.remote()\n with pytest.raises(Exception, match=exception_str):\n ray.get(obj)\n\n # Check that multiple return values are handled properly.\n\n @ray.remote(num_return_vals=3)\n def returns_multiple():\n return 1, 2, 3\n\n obj1, obj2, obj3 = returns_multiple.remote()\n assert ray.get(obj1) == 1\n assert ray.get(obj2) == 2\n assert ray.get(obj3) == 3\n assert ray.get([obj1, obj2, obj3]) == [1, 2, 3]\n\n obj1, obj2, obj3 = test_actor.returns_multiple.remote()\n assert ray.get(obj1) == 1\n assert ray.get(obj2) == 2\n assert ray.get(obj3) == 3\n assert ray.get([obj1, obj2, obj3]) == [1, 2, 3]\n\n @ray.remote(num_return_vals=2)\n def returns_multiple_throws():\n raise Exception(exception_str)\n\n obj1, obj2 = returns_multiple_throws.remote()\n with pytest.raises(Exception, match=exception_str):\n ray.get(obj)\n ray.get(obj1)\n with pytest.raises(Exception, match=exception_str):\n ray.get(obj2)\n\n\ndef test_resource_constraints(shutdown_only):\n num_workers = 20\n ray.init(num_cpus=10, num_gpus=2)\n\n @ray.remote(num_cpus=0)\n def get_worker_id():\n time.sleep(0.1)\n return os.getpid()\n\n # Attempt to wait for all of the workers to start up.\n while True:\n if len(\n set(\n ray.get([\n get_worker_id.remote() for _ in range(num_workers)\n ]))) == num_workers:\n break\n\n time_buffer = 2\n\n # At most 10 copies of this can run at once.\n @ray.remote(num_cpus=1)\n def f(n):\n time.sleep(n)\n\n start_time = time.time()\n ray.get([f.remote(0.5) for _ in range(10)])\n duration = time.time() - start_time\n assert duration < 0.5 + time_buffer\n assert duration > 0.5\n\n start_time = time.time()\n ray.get([f.remote(0.5) for _ in range(11)])\n duration = time.time() - start_time\n assert duration < 1 + time_buffer\n assert duration > 1\n\n @ray.remote(num_cpus=3)\n def f(n):\n time.sleep(n)\n\n start_time = time.time()\n ray.get([f.remote(0.5) for _ in range(3)])\n duration = time.time() - start_time\n assert duration < 0.5 + time_buffer\n assert duration > 0.5\n\n start_time = time.time()\n ray.get([f.remote(0.5) for _ in range(4)])\n duration = time.time() - start_time\n assert duration < 1 + time_buffer\n assert duration > 1\n\n @ray.remote(num_gpus=1)\n def f(n):\n time.sleep(n)\n\n start_time = time.time()\n ray.get([f.remote(0.5) for _ in range(2)])\n duration = time.time() - start_time\n assert duration < 0.5 + time_buffer\n assert duration > 0.5\n\n start_time = time.time()\n ray.get([f.remote(0.5) for _ in range(3)])\n duration = time.time() - start_time\n assert duration < 1 + time_buffer\n assert duration > 1\n\n start_time = time.time()\n ray.get([f.remote(0.5) for _ in range(4)])\n duration = time.time() - start_time\n assert duration < 1 + time_buffer\n assert duration > 1\n\n\ndef test_multi_resource_constraints(shutdown_only):\n num_workers = 20\n ray.init(num_cpus=10, num_gpus=10)\n\n @ray.remote(num_cpus=0)\n def get_worker_id():\n time.sleep(0.1)\n return os.getpid()\n\n # Attempt to wait for all of the workers to start up.\n while True:\n if len(\n set(\n ray.get([\n get_worker_id.remote() for _ in range(num_workers)\n ]))) == num_workers:\n break\n\n @ray.remote(num_cpus=1, num_gpus=9)\n def f(n):\n time.sleep(n)\n\n @ray.remote(num_cpus=9, num_gpus=1)\n def g(n):\n time.sleep(n)\n\n time_buffer = 2\n\n start_time = time.time()\n ray.get([f.remote(0.5), g.remote(0.5)])\n duration = time.time() - start_time\n assert duration < 0.5 + time_buffer\n assert duration > 0.5\n\n start_time = time.time()\n ray.get([f.remote(0.5), f.remote(0.5)])\n duration = time.time() - start_time\n assert duration < 1 + time_buffer\n assert duration > 1\n\n start_time = time.time()\n ray.get([g.remote(0.5), g.remote(0.5)])\n duration = time.time() - start_time\n assert duration < 1 + time_buffer\n assert duration > 1\n\n start_time = time.time()\n ray.get([f.remote(0.5), f.remote(0.5), g.remote(0.5), g.remote(0.5)])\n duration = time.time() - start_time\n assert duration < 1 + time_buffer\n assert duration > 1\n\n\ndef test_gpu_ids(shutdown_only):\n num_gpus = 10\n ray.init(num_cpus=10, num_gpus=num_gpus)\n\n def get_gpu_ids(num_gpus_per_worker):\n time.sleep(0.1)\n gpu_ids = ray.get_gpu_ids()\n assert len(gpu_ids) == num_gpus_per_worker\n assert (os.environ[\"CUDA_VISIBLE_DEVICES\"] == \",\".join(\n [str(i) for i in gpu_ids]))\n for gpu_id in gpu_ids:\n assert gpu_id in range(num_gpus)\n return gpu_ids\n\n f0 = ray.remote(num_gpus=0)(lambda: get_gpu_ids(0))\n f1 = ray.remote(num_gpus=1)(lambda: get_gpu_ids(1))\n f2 = ray.remote(num_gpus=2)(lambda: get_gpu_ids(2))\n f4 = ray.remote(num_gpus=4)(lambda: get_gpu_ids(4))\n f5 = ray.remote(num_gpus=5)(lambda: get_gpu_ids(5))\n\n # Wait for all workers to start up.\n @ray.remote\n def f():\n time.sleep(0.1)\n return os.getpid()\n\n start_time = time.time()\n while True:\n if len(set(ray.get([f.remote() for _ in range(10)]))) == 10:\n break\n if time.time() > start_time + 10:\n raise Exception(\"Timed out while waiting for workers to start \"\n \"up.\")\n\n list_of_ids = ray.get([f0.remote() for _ in range(10)])\n assert list_of_ids == 10 * [[]]\n\n list_of_ids = ray.get([f1.remote() for _ in range(10)])\n set_of_ids = {tuple(gpu_ids) for gpu_ids in list_of_ids}\n assert set_of_ids == {(i, ) for i in range(10)}\n\n list_of_ids = ray.get([f2.remote(), f4.remote(), f4.remote()])\n all_ids = [gpu_id for gpu_ids in list_of_ids for gpu_id in gpu_ids]\n assert set(all_ids) == set(range(10))\n\n # There are only 10 GPUs, and each task uses 5 GPUs, so there should only\n # be 2 tasks scheduled at a given time.\n t1 = time.time()\n ray.get([f5.remote() for _ in range(20)])\n assert time.time() - t1 >= 10 * 0.1\n\n # Test that actors have CUDA_VISIBLE_DEVICES set properly.\n\n @ray.remote\n class Actor0(object):\n def __init__(self):\n gpu_ids = ray.get_gpu_ids()\n assert len(gpu_ids) == 0\n assert (os.environ[\"CUDA_VISIBLE_DEVICES\"] == \",\".join(\n [str(i) for i in gpu_ids]))\n # Set self.x to make sure that we got here.\n self.x = 1\n\n def test(self):\n gpu_ids = ray.get_gpu_ids()\n assert len(gpu_ids) == 0\n assert (os.environ[\"CUDA_VISIBLE_DEVICES\"] == \",\".join(\n [str(i) for i in gpu_ids]))\n return self.x\n\n @ray.remote(num_gpus=1)\n class Actor1(object):\n def __init__(self):\n gpu_ids = ray.get_gpu_ids()\n assert len(gpu_ids) == 1\n assert (os.environ[\"CUDA_VISIBLE_DEVICES\"] == \",\".join(\n [str(i) for i in gpu_ids]))\n # Set self.x to make sure that we got here.\n self.x = 1\n\n def test(self):\n gpu_ids = ray.get_gpu_ids()\n assert len(gpu_ids) == 1\n assert (os.environ[\"CUDA_VISIBLE_DEVICES\"] == \",\".join(\n [str(i) for i in gpu_ids]))\n return self.x\n\n a0 = Actor0.remote()\n ray.get(a0.test.remote())\n\n a1 = Actor1.remote()\n ray.get(a1.test.remote())\n\n\ndef test_zero_cpus(shutdown_only):\n ray.init(num_cpus=0)\n\n # We should be able to execute a task that requires 0 CPU resources.\n @ray.remote(num_cpus=0)\n def f():\n return 1\n\n ray.get(f.remote())\n\n # We should be able to create an actor that requires 0 CPU resources.\n @ray.remote(num_cpus=0)\n class Actor(object):\n def method(self):\n pass\n\n a = Actor.remote()\n x = a.method.remote()\n ray.get(x)\n\n\ndef test_zero_cpus_actor(ray_start_cluster):\n cluster = ray_start_cluster\n cluster.add_node(num_cpus=0)\n cluster.add_node(num_cpus=2)\n ray.init(redis_address=cluster.redis_address)\n\n local_plasma = ray.worker.global_worker.plasma_client.store_socket_name\n\n @ray.remote\n class Foo(object):\n def method(self):\n return ray.worker.global_worker.plasma_client.store_socket_name\n\n # Make sure tasks and actors run on the remote raylet.\n a = Foo.remote()\n assert ray.get(a.method.remote()) != local_plasma\n\n\ndef test_fractional_resources(shutdown_only):\n ray.init(num_cpus=6, num_gpus=3, resources={\"Custom\": 1})\n\n @ray.remote(num_gpus=0.5)\n class Foo1(object):\n def method(self):\n gpu_ids = ray.get_gpu_ids()\n assert len(gpu_ids) == 1\n return gpu_ids[0]\n\n foos = [Foo1.remote() for _ in range(6)]\n gpu_ids = ray.get([f.method.remote() for f in foos])\n for i in range(3):\n assert gpu_ids.count(i) == 2\n del foos\n\n @ray.remote\n class Foo2(object):\n def method(self):\n pass\n\n # Create an actor that requires 0.7 of the custom resource.\n f1 = Foo2._remote([], {}, resources={\"Custom\": 0.7})\n ray.get(f1.method.remote())\n # Make sure that we cannot create an actor that requires 0.7 of the\n # custom resource. TODO(rkn): Re-enable this once ray.wait is\n # implemented.\n f2 = Foo2._remote([], {}, resources={\"Custom\": 0.7})\n ready, _ = ray.wait([f2.method.remote()], timeout=0.5)\n assert len(ready) == 0\n # Make sure we can start an actor that requries only 0.3 of the custom\n # resource.\n f3 = Foo2._remote([], {}, resources={\"Custom\": 0.3})\n ray.get(f3.method.remote())\n\n del f1, f3\n\n # Make sure that we get exceptions if we submit tasks that require a\n # fractional number of resources greater than 1.\n\n @ray.remote(num_cpus=1.5)\n def test():\n pass\n\n with pytest.raises(ValueError):\n test.remote()\n\n with pytest.raises(ValueError):\n Foo2._remote([], {}, resources={\"Custom\": 1.5})\n\n\ndef test_multiple_raylets(ray_start_cluster):\n # This test will define a bunch of tasks that can only be assigned to\n # specific raylets, and we will check that they are assigned\n # to the correct raylets.\n cluster = ray_start_cluster\n cluster.add_node(num_cpus=11, num_gpus=0)\n cluster.add_node(num_cpus=5, num_gpus=5)\n cluster.add_node(num_cpus=10, num_gpus=1)\n ray.init(redis_address=cluster.redis_address)\n cluster.wait_for_nodes()\n\n # Define a bunch of remote functions that all return the socket name of\n # the plasma store. Since there is a one-to-one correspondence between\n # plasma stores and raylets (at least right now), this can be\n # used to identify which raylet the task was assigned to.\n\n # This must be run on the zeroth raylet.\n @ray.remote(num_cpus=11)\n def run_on_0():\n return ray.worker.global_worker.plasma_client.store_socket_name\n\n # This must be run on the first raylet.\n @ray.remote(num_gpus=2)\n def run_on_1():\n return ray.worker.global_worker.plasma_client.store_socket_name\n\n # This must be run on the second raylet.\n @ray.remote(num_cpus=6, num_gpus=1)\n def run_on_2():\n return ray.worker.global_worker.plasma_client.store_socket_name\n\n # This can be run anywhere.\n @ray.remote(num_cpus=0, num_gpus=0)\n def run_on_0_1_2():\n return ray.worker.global_worker.plasma_client.store_socket_name\n\n # This must be run on the first or second raylet.\n @ray.remote(num_gpus=1)\n def run_on_1_2():\n return ray.worker.global_worker.plasma_client.store_socket_name\n\n # This must be run on the zeroth or second raylet.\n @ray.remote(num_cpus=8)\n def run_on_0_2():\n return ray.worker.global_worker.plasma_client.store_socket_name\n\n def run_lots_of_tasks():\n names = []\n results = []\n for i in range(100):\n index = np.random.randint(6)\n if index == 0:\n names.append(\"run_on_0\")\n results.append(run_on_0.remote())\n elif index == 1:\n names.append(\"run_on_1\")\n results.append(run_on_1.remote())\n elif index == 2:\n names.append(\"run_on_2\")\n results.append(run_on_2.remote())\n elif index == 3:\n names.append(\"run_on_0_1_2\")\n results.append(run_on_0_1_2.remote())\n elif index == 4:\n names.append(\"run_on_1_2\")\n results.append(run_on_1_2.remote())\n elif index == 5:\n names.append(\"run_on_0_2\")\n results.append(run_on_0_2.remote())\n return names, results\n\n client_table = ray.nodes()\n store_names = []\n store_names += [\n client[\"ObjectStoreSocketName\"] for client in client_table\n if client[\"Resources\"].get(\"GPU\", 0) == 0\n ]\n store_names += [\n client[\"ObjectStoreSocketName\"] for client in client_table\n if client[\"Resources\"].get(\"GPU\", 0) == 5\n ]\n store_names += [\n client[\"ObjectStoreSocketName\"] for client in client_table\n if client[\"Resources\"].get(\"GPU\", 0) == 1\n ]\n assert len(store_names) == 3\n\n def validate_names_and_results(names, results):\n for name, result in zip(names, ray.get(results)):\n if name == \"run_on_0\":\n assert result in [store_names[0]]\n elif name == \"run_on_1\":\n assert result in [store_names[1]]\n elif name == \"run_on_2\":\n assert result in [store_names[2]]\n elif name == \"run_on_0_1_2\":\n assert (result in [\n store_names[0], store_names[1], store_names[2]\n ])\n elif name == \"run_on_1_2\":\n assert result in [store_names[1], store_names[2]]\n elif name == \"run_on_0_2\":\n assert result in [store_names[0], store_names[2]]\n else:\n raise Exception(\"This should be unreachable.\")\n assert set(ray.get(results)) == set(store_names)\n\n names, results = run_lots_of_tasks()\n validate_names_and_results(names, results)\n\n # Make sure the same thing works when this is nested inside of a task.\n\n @ray.remote\n def run_nested1():\n names, results = run_lots_of_tasks()\n return names, results\n\n @ray.remote\n def run_nested2():\n names, results = ray.get(run_nested1.remote())\n return names, results\n\n names, results = ray.get(run_nested2.remote())\n validate_names_and_results(names, results)\n\n\ndef test_custom_resources(ray_start_cluster):\n cluster = ray_start_cluster\n cluster.add_node(num_cpus=3, resources={\"CustomResource\": 0})\n cluster.add_node(num_cpus=3, resources={\"CustomResource\": 1})\n ray.init(redis_address=cluster.redis_address)\n\n @ray.remote\n def f():\n time.sleep(0.001)\n return ray.worker.global_worker.plasma_client.store_socket_name\n\n @ray.remote(resources={\"CustomResource\": 1})\n def g():\n time.sleep(0.001)\n return ray.worker.global_worker.plasma_client.store_socket_name\n\n @ray.remote(resources={\"CustomResource\": 1})\n def h():\n ray.get([f.remote() for _ in range(5)])\n return ray.worker.global_worker.plasma_client.store_socket_name\n\n # The f tasks should be scheduled on both raylets.\n assert len(set(ray.get([f.remote() for _ in range(50)]))) == 2\n\n local_plasma = ray.worker.global_worker.plasma_client.store_socket_name\n\n # The g tasks should be scheduled only on the second raylet.\n raylet_ids = set(ray.get([g.remote() for _ in range(50)]))\n assert len(raylet_ids) == 1\n assert list(raylet_ids)[0] != local_plasma\n\n # Make sure that resource bookkeeping works when a task that uses a\n # custom resources gets blocked.\n ray.get([h.remote() for _ in range(5)])\n\n\ndef test_two_custom_resources(ray_start_cluster):\n cluster = ray_start_cluster\n cluster.add_node(\n num_cpus=3, resources={\n \"CustomResource1\": 1,\n \"CustomResource2\": 2\n })\n cluster.add_node(\n num_cpus=3, resources={\n \"CustomResource1\": 3,\n \"CustomResource2\": 4\n })\n ray.init(redis_address=cluster.redis_address)\n\n @ray.remote(resources={\"CustomResource1\": 1})\n def f():\n time.sleep(0.001)\n return ray.worker.global_worker.plasma_client.store_socket_name\n\n @ray.remote(resources={\"CustomResource2\": 1})\n def g():\n time.sleep(0.001)\n return ray.worker.global_worker.plasma_client.store_socket_name\n\n @ray.remote(resources={\"CustomResource1\": 1, \"CustomResource2\": 3})\n def h():\n time.sleep(0.001)\n return ray.worker.global_worker.plasma_client.store_socket_name\n\n @ray.remote(resources={\"CustomResource1\": 4})\n def j():\n time.sleep(0.001)\n return ray.worker.global_worker.plasma_client.store_socket_name\n\n @ray.remote(resources={\"CustomResource3\": 1})\n def k():\n time.sleep(0.001)\n return ray.worker.global_worker.plasma_client.store_socket_name\n\n # The f and g tasks should be scheduled on both raylets.\n assert len(set(ray.get([f.remote() for _ in range(50)]))) == 2\n assert len(set(ray.get([g.remote() for _ in range(50)]))) == 2\n\n local_plasma = ray.worker.global_worker.plasma_client.store_socket_name\n\n # The h tasks should be scheduled only on the second raylet.\n raylet_ids = set(ray.get([h.remote() for _ in range(50)]))\n assert len(raylet_ids) == 1\n assert list(raylet_ids)[0] != local_plasma\n\n # Make sure that tasks with unsatisfied custom resource requirements do\n # not get scheduled.\n ready_ids, remaining_ids = ray.wait([j.remote(), k.remote()], timeout=0.5)\n assert ready_ids == []\n\n\ndef test_many_custom_resources(shutdown_only):\n num_custom_resources = 10000\n total_resources = {\n str(i): np.random.randint(1, 7)\n for i in range(num_custom_resources)\n }\n ray.init(num_cpus=5, resources=total_resources)\n\n def f():\n return 1\n\n remote_functions = []\n for _ in range(20):\n num_resources = np.random.randint(0, num_custom_resources + 1)\n permuted_resources = np.random.permutation(\n num_custom_resources)[:num_resources]\n random_resources = {\n str(i): total_resources[str(i)]\n for i in permuted_resources\n }\n remote_function = ray.remote(resources=random_resources)(f)\n remote_functions.append(remote_function)\n\n remote_functions.append(ray.remote(f))\n remote_functions.append(ray.remote(resources=total_resources)(f))\n\n results = []\n for remote_function in remote_functions:\n results.append(remote_function.remote())\n results.append(remote_function.remote())\n results.append(remote_function.remote())\n\n ray.get(results)\n\n\n# TODO: 5 retry attempts may be too little for Travis and we may need to\n# increase it if this test begins to be flaky on Travis.\ndef test_zero_capacity_deletion_semantics(shutdown_only):\n ray.init(num_cpus=2, num_gpus=1, resources={\"test_resource\": 1})\n\n def test():\n resources = ray.available_resources()\n MAX_RETRY_ATTEMPTS = 5\n retry_count = 0\n\n while resources and retry_count < MAX_RETRY_ATTEMPTS:\n time.sleep(0.1)\n resources = ray.available_resources()\n retry_count += 1\n\n if retry_count >= MAX_RETRY_ATTEMPTS:\n raise RuntimeError(\n \"Resources were available even after five retries.\")\n\n return resources\n\n function = ray.remote(\n num_cpus=2, num_gpus=1, resources={\"test_resource\": 1})(test)\n cluster_resources = ray.get(function.remote())\n\n # All cluster resources should be utilized and\n # cluster_resources must be empty\n assert cluster_resources == {}\n\n\[email protected]\ndef save_gpu_ids_shutdown_only():\n # Record the curent value of this environment variable so that we can\n # reset it after the test.\n original_gpu_ids = os.environ.get(\"CUDA_VISIBLE_DEVICES\", None)\n\n yield None\n\n # The code after the yield will run as teardown code.\n ray.shutdown()\n # Reset the environment variable.\n if original_gpu_ids is not None:\n os.environ[\"CUDA_VISIBLE_DEVICES\"] = original_gpu_ids\n else:\n del os.environ[\"CUDA_VISIBLE_DEVICES\"]\n\n\ndef test_specific_gpus(save_gpu_ids_shutdown_only):\n allowed_gpu_ids = [4, 5, 6]\n os.environ[\"CUDA_VISIBLE_DEVICES\"] = \",\".join(\n [str(i) for i in allowed_gpu_ids])\n ray.init(num_gpus=3)\n\n @ray.remote(num_gpus=1)\n def f():\n gpu_ids = ray.get_gpu_ids()\n assert len(gpu_ids) == 1\n assert gpu_ids[0] in allowed_gpu_ids\n\n @ray.remote(num_gpus=2)\n def g():\n gpu_ids = ray.get_gpu_ids()\n assert len(gpu_ids) == 2\n assert gpu_ids[0] in allowed_gpu_ids\n assert gpu_ids[1] in allowed_gpu_ids\n\n ray.get([f.remote() for _ in range(100)])\n ray.get([g.remote() for _ in range(100)])\n\n\ndef test_blocking_tasks(ray_start_regular):\n @ray.remote\n def f(i, j):\n return (i, j)\n\n @ray.remote\n def g(i):\n # Each instance of g submits and blocks on the result of another\n # remote task.\n object_ids = [f.remote(i, j) for j in range(2)]\n return ray.get(object_ids)\n\n @ray.remote\n def h(i):\n # Each instance of g submits and blocks on the result of another\n # remote task using ray.wait.\n object_ids = [f.remote(i, j) for j in range(2)]\n return ray.wait(object_ids, num_returns=len(object_ids))\n\n ray.get([h.remote(i) for i in range(4)])\n\n @ray.remote\n def _sleep(i):\n time.sleep(0.01)\n return (i)\n\n @ray.remote\n def sleep():\n # Each instance of sleep submits and blocks on the result of\n # another remote task, which takes some time to execute.\n ray.get([_sleep.remote(i) for i in range(10)])\n\n ray.get(sleep.remote())\n\n\ndef test_max_call_tasks(ray_start_regular):\n @ray.remote(max_calls=1)\n def f():\n return os.getpid()\n\n pid = ray.get(f.remote())\n ray.tests.utils.wait_for_pid_to_exit(pid)\n\n @ray.remote(max_calls=2)\n def f():\n return os.getpid()\n\n pid1 = ray.get(f.remote())\n pid2 = ray.get(f.remote())\n assert pid1 == pid2\n ray.tests.utils.wait_for_pid_to_exit(pid1)\n\n\ndef attempt_to_load_balance(remote_function,\n args,\n total_tasks,\n num_nodes,\n minimum_count,\n num_attempts=100):\n attempts = 0\n while attempts < num_attempts:\n locations = ray.get(\n [remote_function.remote(*args) for _ in range(total_tasks)])\n names = set(locations)\n counts = [locations.count(name) for name in names]\n logger.info(\"Counts are {}.\".format(counts))\n if (len(names) == num_nodes\n and all(count >= minimum_count for count in counts)):\n break\n attempts += 1\n assert attempts < num_attempts\n\n\ndef test_load_balancing(ray_start_cluster):\n # This test ensures that tasks are being assigned to all raylets\n # in a roughly equal manner.\n cluster = ray_start_cluster\n num_nodes = 3\n num_cpus = 7\n for _ in range(num_nodes):\n cluster.add_node(num_cpus=num_cpus)\n ray.init(redis_address=cluster.redis_address)\n\n @ray.remote\n def f():\n time.sleep(0.01)\n return ray.worker.global_worker.plasma_client.store_socket_name\n\n attempt_to_load_balance(f, [], 100, num_nodes, 10)\n attempt_to_load_balance(f, [], 1000, num_nodes, 100)\n\n\ndef test_load_balancing_with_dependencies(ray_start_cluster):\n # This test ensures that tasks are being assigned to all raylets in a\n # roughly equal manner even when the tasks have dependencies.\n cluster = ray_start_cluster\n num_nodes = 3\n for _ in range(num_nodes):\n cluster.add_node(num_cpus=1)\n ray.init(redis_address=cluster.redis_address)\n\n @ray.remote\n def f(x):\n time.sleep(0.010)\n return ray.worker.global_worker.plasma_client.store_socket_name\n\n # This object will be local to one of the raylets. Make sure\n # this doesn't prevent tasks from being scheduled on other raylets.\n x = ray.put(np.zeros(1000000))\n\n attempt_to_load_balance(f, [x], 100, num_nodes, 25)\n\n\ndef wait_for_num_tasks(num_tasks, timeout=10):\n start_time = time.time()\n while time.time() - start_time < timeout:\n if len(ray.tasks()) >= num_tasks:\n return\n time.sleep(0.1)\n raise Exception(\"Timed out while waiting for global state.\")\n\n\ndef wait_for_num_objects(num_objects, timeout=10):\n start_time = time.time()\n while time.time() - start_time < timeout:\n if len(ray.objects()) >= num_objects:\n return\n time.sleep(0.1)\n raise Exception(\"Timed out while waiting for global state.\")\n\n\[email protected](\n os.environ.get(\"RAY_USE_NEW_GCS\") == \"on\",\n reason=\"New GCS API doesn't have a Python API yet.\")\ndef test_global_state_api(shutdown_only):\n\n error_message = (\"The ray global state API cannot be used \"\n \"before ray.init has been called.\")\n\n with pytest.raises(Exception, match=error_message):\n ray.objects()\n\n with pytest.raises(Exception, match=error_message):\n ray.tasks()\n\n with pytest.raises(Exception, match=error_message):\n ray.nodes()\n\n with pytest.raises(Exception, match=error_message):\n ray.jobs()\n\n ray.init(num_cpus=5, num_gpus=3, resources={\"CustomResource\": 1})\n\n resources = {\"CPU\": 5, \"GPU\": 3, \"CustomResource\": 1}\n assert ray.cluster_resources() == resources\n\n assert ray.objects() == {}\n\n job_id = ray.utils.compute_job_id_from_driver(\n ray.WorkerID(ray.worker.global_worker.worker_id))\n driver_task_id = ray.worker.global_worker.current_task_id.hex()\n\n # One task is put in the task table which corresponds to this driver.\n wait_for_num_tasks(1)\n task_table = ray.tasks()\n assert len(task_table) == 1\n assert driver_task_id == list(task_table.keys())[0]\n task_spec = task_table[driver_task_id][\"TaskSpec\"]\n nil_unique_id_hex = ray.UniqueID.nil().hex()\n nil_actor_id_hex = ray.ActorID.nil().hex()\n\n assert task_spec[\"TaskID\"] == driver_task_id\n assert task_spec[\"ActorID\"] == nil_actor_id_hex\n assert task_spec[\"Args\"] == []\n assert task_spec[\"JobID\"] == job_id.hex()\n assert task_spec[\"FunctionID\"] == nil_unique_id_hex\n assert task_spec[\"ReturnObjectIDs\"] == []\n\n client_table = ray.nodes()\n node_ip_address = ray.worker.global_worker.node_ip_address\n\n assert len(client_table) == 1\n assert client_table[0][\"NodeManagerAddress\"] == node_ip_address\n\n @ray.remote\n def f(*xs):\n return 1\n\n x_id = ray.put(1)\n result_id = f.remote(1, \"hi\", x_id)\n\n # Wait for one additional task to complete.\n wait_for_num_tasks(1 + 1)\n task_table = ray.tasks()\n assert len(task_table) == 1 + 1\n task_id_set = set(task_table.keys())\n task_id_set.remove(driver_task_id)\n task_id = list(task_id_set)[0]\n\n task_spec = task_table[task_id][\"TaskSpec\"]\n assert task_spec[\"ActorID\"] == nil_actor_id_hex\n assert task_spec[\"Args\"] == [1, \"hi\", x_id]\n assert task_spec[\"JobID\"] == job_id.hex()\n assert task_spec[\"ReturnObjectIDs\"] == [result_id]\n\n assert task_table[task_id] == ray.tasks(task_id)\n\n # Wait for two objects, one for the x_id and one for result_id.\n wait_for_num_objects(2)\n\n def wait_for_object_table():\n timeout = 10\n start_time = time.time()\n while time.time() - start_time < timeout:\n object_table = ray.objects()\n tables_ready = (object_table[x_id][\"ManagerIDs\"] is not None and\n object_table[result_id][\"ManagerIDs\"] is not None)\n if tables_ready:\n return\n time.sleep(0.1)\n raise Exception(\"Timed out while waiting for object table to \"\n \"update.\")\n\n object_table = ray.objects()\n assert len(object_table) == 2\n\n assert object_table[x_id] == ray.objects(x_id)\n object_table_entry = ray.objects(result_id)\n assert object_table[result_id] == object_table_entry\n\n job_table = ray.jobs()\n\n assert len(job_table) == 1\n assert job_table[0][\"JobID\"] == job_id.hex()\n assert job_table[0][\"NodeManagerAddress\"] == node_ip_address\n\n\n# TODO(rkn): Pytest actually has tools for capturing stdout and stderr, so we\n# should use those, but they seem to conflict with Ray's use of faulthandler.\nclass CaptureOutputAndError(object):\n \"\"\"Capture stdout and stderr of some span.\n\n This can be used as follows.\n\n captured = {}\n with CaptureOutputAndError(captured):\n # Do stuff.\n # Access captured[\"out\"] and captured[\"err\"].\n \"\"\"\n\n def __init__(self, captured_output_and_error):\n if sys.version_info >= (3, 0):\n import io\n self.output_buffer = io.StringIO()\n self.error_buffer = io.StringIO()\n else:\n import cStringIO\n self.output_buffer = cStringIO.StringIO()\n self.error_buffer = cStringIO.StringIO()\n self.captured_output_and_error = captured_output_and_error\n\n def __enter__(self):\n sys.stdout.flush()\n sys.stderr.flush()\n self.old_stdout = sys.stdout\n self.old_stderr = sys.stderr\n sys.stdout = self.output_buffer\n sys.stderr = self.error_buffer\n\n def __exit__(self, exc_type, exc_value, traceback):\n sys.stdout.flush()\n sys.stderr.flush()\n sys.stdout = self.old_stdout\n sys.stderr = self.old_stderr\n self.captured_output_and_error[\"out\"] = self.output_buffer.getvalue()\n self.captured_output_and_error[\"err\"] = self.error_buffer.getvalue()\n\n\ndef test_logging_to_driver(shutdown_only):\n ray.init(num_cpus=1, log_to_driver=True)\n\n @ray.remote\n def f():\n # It's important to make sure that these print statements occur even\n # without calling sys.stdout.flush() and sys.stderr.flush().\n for i in range(100):\n print(i)\n print(100 + i, file=sys.stderr)\n\n captured = {}\n with CaptureOutputAndError(captured):\n ray.get(f.remote())\n time.sleep(1)\n\n output_lines = captured[\"out\"]\n for i in range(200):\n assert str(i) in output_lines\n\n # TODO(rkn): Check that no additional logs appear beyond what we expect\n # and that there are no duplicate logs. Once we address the issue\n # described in https://github.com/ray-project/ray/pull/5462, we should\n # also check that nothing is logged to stderr.\n\n\ndef test_not_logging_to_driver(shutdown_only):\n ray.init(num_cpus=1, log_to_driver=False)\n\n @ray.remote\n def f():\n for i in range(100):\n print(i)\n print(100 + i, file=sys.stderr)\n sys.stdout.flush()\n sys.stderr.flush()\n\n captured = {}\n with CaptureOutputAndError(captured):\n ray.get(f.remote())\n time.sleep(1)\n\n output_lines = captured[\"out\"]\n assert len(output_lines) == 0\n\n # TODO(rkn): Check that no additional logs appear beyond what we expect\n # and that there are no duplicate logs. Once we address the issue\n # described in https://github.com/ray-project/ray/pull/5462, we should\n # also check that nothing is logged to stderr.\n\n\[email protected](\n os.environ.get(\"RAY_USE_NEW_GCS\") == \"on\",\n reason=\"New GCS API doesn't have a Python API yet.\")\ndef test_workers(shutdown_only):\n num_workers = 3\n ray.init(num_cpus=num_workers)\n\n @ray.remote\n def f():\n return id(ray.worker.global_worker), os.getpid()\n\n # Wait until all of the workers have started.\n worker_ids = set()\n while len(worker_ids) != num_workers:\n worker_ids = set(ray.get([f.remote() for _ in range(10)]))\n\n\ndef test_specific_job_id():\n dummy_driver_id = ray.JobID.from_int(1)\n ray.init(num_cpus=1, job_id=dummy_driver_id)\n\n # in driver\n assert dummy_driver_id == ray._get_runtime_context().current_driver_id\n\n # in worker\n @ray.remote\n def f():\n return ray._get_runtime_context().current_driver_id\n\n assert dummy_driver_id == ray.get(f.remote())\n\n ray.shutdown()\n\n\ndef test_object_id_properties():\n id_bytes = b\"00112233445566778899\"\n object_id = ray.ObjectID(id_bytes)\n assert object_id.binary() == id_bytes\n object_id = ray.ObjectID.nil()\n assert object_id.is_nil()\n with pytest.raises(ValueError, match=r\".*needs to have length 20.*\"):\n ray.ObjectID(id_bytes + b\"1234\")\n with pytest.raises(ValueError, match=r\".*needs to have length 20.*\"):\n ray.ObjectID(b\"0123456789\")\n object_id = ray.ObjectID.from_random()\n assert not object_id.is_nil()\n assert object_id.binary() != id_bytes\n id_dumps = pickle.dumps(object_id)\n id_from_dumps = pickle.loads(id_dumps)\n assert id_from_dumps == object_id\n file_prefix = \"test_object_id_properties\"\n\n # Make sure the ids are fork safe.\n def write(index):\n str = ray.ObjectID.from_random().hex()\n with open(\"{}{}\".format(file_prefix, index), \"w\") as fo:\n fo.write(str)\n\n def read(index):\n with open(\"{}{}\".format(file_prefix, index), \"r\") as fi:\n for line in fi:\n return line\n\n processes = [Process(target=write, args=(_, )) for _ in range(4)]\n for process in processes:\n process.start()\n for process in processes:\n process.join()\n hexes = {read(i) for i in range(4)}\n [os.remove(\"{}{}\".format(file_prefix, i)) for i in range(4)]\n assert len(hexes) == 4\n\n\[email protected]\ndef shutdown_only_with_initialization_check():\n yield None\n # The code after the yield will run as teardown code.\n ray.shutdown()\n assert not ray.is_initialized()\n\n\ndef test_initialized(shutdown_only_with_initialization_check):\n assert not ray.is_initialized()\n ray.init(num_cpus=0)\n assert ray.is_initialized()\n\n\ndef test_initialized_local_mode(shutdown_only_with_initialization_check):\n assert not ray.is_initialized()\n ray.init(num_cpus=0, local_mode=True)\n assert ray.is_initialized()\n\n\ndef test_wait_reconstruction(shutdown_only):\n ray.init(num_cpus=1, object_store_memory=10**8)\n\n @ray.remote\n def f():\n return np.zeros(6 * 10**7, dtype=np.uint8)\n\n x_id = f.remote()\n ray.wait([x_id])\n ray.wait([f.remote()])\n assert not ray.worker.global_worker.plasma_client.contains(\n ray.pyarrow.plasma.ObjectID(x_id.binary()))\n ready_ids, _ = ray.wait([x_id])\n assert len(ready_ids) == 1\n\n\ndef test_ray_setproctitle(ray_start_2_cpus):\n @ray.remote\n class UniqueName(object):\n def __init__(self):\n assert setproctitle.getproctitle() == \"ray_UniqueName:__init__()\"\n\n def f(self):\n assert setproctitle.getproctitle() == \"ray_UniqueName:f()\"\n\n @ray.remote\n def unique_1():\n assert setproctitle.getproctitle(\n ) == \"ray_worker:ray.tests.test_basic.unique_1()\"\n\n actor = UniqueName.remote()\n ray.get(actor.f.remote())\n ray.get(unique_1.remote())\n\n\ndef test_duplicate_error_messages(shutdown_only):\n ray.init(num_cpus=0)\n\n driver_id = ray.WorkerID.nil()\n error_data = ray.gcs_utils.construct_error_message(driver_id, \"test\",\n \"message\", 0)\n\n # Push the same message to the GCS twice (they are the same because we\n # do not include a timestamp).\n\n r = ray.worker.global_worker.redis_client\n\n r.execute_command(\"RAY.TABLE_APPEND\",\n ray.gcs_utils.TablePrefix.Value(\"ERROR_INFO\"),\n ray.gcs_utils.TablePubsub.Value(\"ERROR_INFO_PUBSUB\"),\n driver_id.binary(), error_data)\n\n # Before https://github.com/ray-project/ray/pull/3316 this would\n # give an error\n r.execute_command(\"RAY.TABLE_APPEND\",\n ray.gcs_utils.TablePrefix.Value(\"ERROR_INFO\"),\n ray.gcs_utils.TablePubsub.Value(\"ERROR_INFO_PUBSUB\"),\n driver_id.binary(), error_data)\n\n\[email protected](\n os.getenv(\"TRAVIS\") is None,\n reason=\"This test should only be run on Travis.\")\ndef test_ray_stack(ray_start_2_cpus):\n def unique_name_1():\n time.sleep(1000)\n\n @ray.remote\n def unique_name_2():\n time.sleep(1000)\n\n @ray.remote\n def unique_name_3():\n unique_name_1()\n\n unique_name_2.remote()\n unique_name_3.remote()\n\n success = False\n start_time = time.time()\n while time.time() - start_time < 30:\n # Attempt to parse the \"ray stack\" call.\n output = ray.utils.decode(subprocess.check_output([\"ray\", \"stack\"]))\n if (\"unique_name_1\" in output and \"unique_name_2\" in output\n and \"unique_name_3\" in output):\n success = True\n break\n\n if not success:\n raise Exception(\"Failed to find necessary information with \"\n \"'ray stack'\")\n\n\ndef test_pandas_parquet_serialization():\n # Only test this if pandas is installed\n pytest.importorskip(\"pandas\")\n\n import pandas as pd\n import pyarrow as pa\n import pyarrow.parquet as pq\n\n tempdir = tempfile.mkdtemp()\n filename = os.path.join(tempdir, \"parquet-test\")\n pd.DataFrame({\"col1\": [0, 1], \"col2\": [0, 1]}).to_parquet(filename)\n with open(os.path.join(tempdir, \"parquet-compression\"), \"wb\") as f:\n table = pa.Table.from_arrays([pa.array([1, 2, 3])], [\"hello\"])\n pq.write_table(table, f, compression=\"lz4\")\n # Clean up\n shutil.rmtree(tempdir)\n\n\ndef test_socket_dir_not_existing(shutdown_only):\n random_name = ray.ObjectID.from_random().hex()\n temp_raylet_socket_dir = \"/tmp/ray/tests/{}\".format(random_name)\n temp_raylet_socket_name = os.path.join(temp_raylet_socket_dir,\n \"raylet_socket\")\n ray.init(num_cpus=1, raylet_socket_name=temp_raylet_socket_name)\n\n\ndef test_raylet_is_robust_to_random_messages(ray_start_regular):\n node_manager_address = None\n node_manager_port = None\n for client in ray.nodes():\n if \"NodeManagerAddress\" in client:\n node_manager_address = client[\"NodeManagerAddress\"]\n node_manager_port = client[\"NodeManagerPort\"]\n assert node_manager_address\n assert node_manager_port\n # Try to bring down the node manager:\n s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n s.connect((node_manager_address, node_manager_port))\n s.send(1000 * b\"asdf\")\n\n @ray.remote\n def f():\n return 1\n\n assert ray.get(f.remote()) == 1\n\n\ndef test_non_ascii_comment(ray_start_regular):\n @ray.remote\n def f():\n # 日本語 Japanese comment\n return 1\n\n assert ray.get(f.remote()) == 1\n\n\[email protected]\ndef echo(x):\n return x\n\n\[email protected]\nclass WithConstructor(object):\n def __init__(self, data):\n self.data = data\n\n def get_data(self):\n return self.data\n\n\[email protected]\nclass WithoutConstructor(object):\n def set_data(self, data):\n self.data = data\n\n def get_data(self):\n return self.data\n\n\nclass BaseClass(object):\n def __init__(self, data):\n self.data = data\n\n def get_data(self):\n return self.data\n\n\[email protected]\nclass DerivedClass(BaseClass):\n def __init__(self, data):\n # Due to different behaviors of super in Python 2 and Python 3,\n # we use BaseClass directly here.\n BaseClass.__init__(self, data)\n\n\ndef test_load_code_from_local(shutdown_only):\n ray.init(load_code_from_local=True, num_cpus=4)\n message = \"foo\"\n # Test normal function.\n assert ray.get(echo.remote(message)) == message\n # Test actor class with constructor.\n actor = WithConstructor.remote(1)\n assert ray.get(actor.get_data.remote()) == 1\n # Test actor class without constructor.\n actor = WithoutConstructor.remote()\n actor.set_data.remote(1)\n assert ray.get(actor.get_data.remote()) == 1\n # Test derived actor class.\n actor = DerivedClass.remote(1)\n assert ray.get(actor.get_data.remote()) == 1\n # Test using ray.remote decorator on raw classes.\n base_actor_class = ray.remote(num_cpus=1)(BaseClass)\n base_actor = base_actor_class.remote(message)\n assert ray.get(base_actor.get_data.remote()) == message\n\n\ndef test_shutdown_disconnect_global_state():\n ray.init(num_cpus=0)\n ray.shutdown()\n\n with pytest.raises(Exception) as e:\n ray.objects()\n assert str(e.value).endswith(\"ray.init has been called.\")\n\n\[email protected](\n \"ray_start_object_store_memory\", [10**8], indirect=True)\ndef test_redis_lru_with_set(ray_start_object_store_memory):\n x = np.zeros(8 * 10**7, dtype=np.uint8)\n x_id = ray.put(x)\n\n # Remove the object from the object table to simulate Redis LRU eviction.\n removed = False\n start_time = time.time()\n while time.time() < start_time + 10:\n if ray.state.state.redis_clients[0].delete(b\"OBJECT\" +\n x_id.binary()) == 1:\n removed = True\n break\n assert removed\n\n # Now evict the object from the object store.\n ray.put(x) # This should not crash.\n\n\ndef test_decorated_function(ray_start_regular):\n def function_invocation_decorator(f):\n def new_f(args, kwargs):\n # Reverse the arguments.\n return f(args[::-1], {\"d\": 5}), kwargs\n\n return new_f\n\n def f(a, b, c, d=None):\n return a, b, c, d\n\n f.__ray_invocation_decorator__ = function_invocation_decorator\n f = ray.remote(f)\n\n result_id, kwargs = f.remote(1, 2, 3, d=4)\n assert kwargs == {\"d\": 4}\n assert ray.get(result_id) == (3, 2, 1, 5)\n\n\ndef test_get_postprocess(ray_start_regular):\n def get_postprocessor(object_ids, values):\n return [value for value in values if value > 0]\n\n ray.worker.global_worker._post_get_hooks.append(get_postprocessor)\n\n assert ray.get(\n [ray.put(i) for i in [0, 1, 3, 5, -1, -3, 4]]) == [1, 3, 5, 4]\n\n\ndef test_export_after_shutdown(ray_start_regular):\n # This test checks that we can use actor and remote function definitions\n # across multiple Ray sessions.\n\n @ray.remote\n def f():\n pass\n\n @ray.remote\n class Actor(object):\n def method(self):\n pass\n\n ray.get(f.remote())\n a = Actor.remote()\n ray.get(a.method.remote())\n\n ray.shutdown()\n\n # Start Ray and use the remote function and actor again.\n ray.init(num_cpus=1)\n ray.get(f.remote())\n a = Actor.remote()\n ray.get(a.method.remote())\n\n ray.shutdown()\n\n # Start Ray again and make sure that these definitions can be exported from\n # workers.\n ray.init(num_cpus=2)\n\n @ray.remote\n def export_definitions_from_worker(remote_function, actor_class):\n ray.get(remote_function.remote())\n actor_handle = actor_class.remote()\n ray.get(actor_handle.method.remote())\n\n ray.get(export_definitions_from_worker.remote(f, Actor))\n"
] |
[
[
"numpy.testing.assert_equal",
"numpy.uint32",
"numpy.arange",
"numpy.uint8",
"numpy.int32",
"numpy.int8",
"pandas.DataFrame",
"numpy.ones",
"numpy.int64",
"numpy.random.normal",
"numpy.random.permutation",
"numpy.uint64",
"numpy.float64",
"numpy.float32",
"numpy.random.uniform",
"numpy.array",
"numpy.zeros",
"numpy.random.randint"
]
] |
Bifaxin/pandas
|
[
"2ec7f2f279d770b286c9c7679ba7ad0e2f14dcbe"
] |
[
"pandas/core/indexes/interval.py"
] |
[
"\"\"\" define the IntervalIndex \"\"\"\nfrom operator import le, lt\nimport textwrap\nfrom typing import Any, Optional, Tuple, Union\nimport warnings\n\nimport numpy as np\n\nfrom pandas._config import get_option\n\nfrom pandas._libs import Timedelta, Timestamp, lib\nfrom pandas._libs.interval import Interval, IntervalMixin, IntervalTree\nfrom pandas.util._decorators import Appender, Substitution, cache_readonly\nfrom pandas.util._exceptions import rewrite_exception\n\nfrom pandas.core.dtypes.cast import (\n find_common_type,\n infer_dtype_from_scalar,\n maybe_downcast_to_dtype,\n)\nfrom pandas.core.dtypes.common import (\n ensure_platform_int,\n is_datetime64tz_dtype,\n is_datetime_or_timedelta_dtype,\n is_dtype_equal,\n is_float,\n is_float_dtype,\n is_integer,\n is_integer_dtype,\n is_interval_dtype,\n is_list_like,\n is_number,\n is_object_dtype,\n is_scalar,\n)\nfrom pandas.core.dtypes.generic import ABCSeries\nfrom pandas.core.dtypes.missing import isna\n\nfrom pandas._typing import AnyArrayLike\nfrom pandas.core.arrays.interval import IntervalArray, _interval_shared_docs\nimport pandas.core.common as com\nimport pandas.core.indexes.base as ibase\nfrom pandas.core.indexes.base import (\n Index,\n InvalidIndexError,\n _index_shared_docs,\n default_pprint,\n ensure_index,\n)\nfrom pandas.core.indexes.datetimes import DatetimeIndex, date_range\nfrom pandas.core.indexes.multi import MultiIndex\nfrom pandas.core.indexes.timedeltas import TimedeltaIndex, timedelta_range\nfrom pandas.core.ops import get_op_result_name\n\nfrom pandas.tseries.frequencies import to_offset\nfrom pandas.tseries.offsets import DateOffset\n\n_VALID_CLOSED = {\"left\", \"right\", \"both\", \"neither\"}\n_index_doc_kwargs = dict(ibase._index_doc_kwargs)\n\n_index_doc_kwargs.update(\n dict(\n klass=\"IntervalIndex\",\n qualname=\"IntervalIndex\",\n target_klass=\"IntervalIndex or list of Intervals\",\n name=textwrap.dedent(\n \"\"\"\\\n name : object, optional\n Name to be stored in the index.\n \"\"\"\n ),\n )\n)\n\n\ndef _get_next_label(label):\n dtype = getattr(label, \"dtype\", type(label))\n if isinstance(label, (Timestamp, Timedelta)):\n dtype = \"datetime64\"\n if is_datetime_or_timedelta_dtype(dtype) or is_datetime64tz_dtype(dtype):\n return label + np.timedelta64(1, \"ns\")\n elif is_integer_dtype(dtype):\n return label + 1\n elif is_float_dtype(dtype):\n return np.nextafter(label, np.infty)\n else:\n raise TypeError(\n \"cannot determine next label for type {typ!r}\".format(typ=type(label))\n )\n\n\ndef _get_prev_label(label):\n dtype = getattr(label, \"dtype\", type(label))\n if isinstance(label, (Timestamp, Timedelta)):\n dtype = \"datetime64\"\n if is_datetime_or_timedelta_dtype(dtype) or is_datetime64tz_dtype(dtype):\n return label - np.timedelta64(1, \"ns\")\n elif is_integer_dtype(dtype):\n return label - 1\n elif is_float_dtype(dtype):\n return np.nextafter(label, -np.infty)\n else:\n raise TypeError(\n \"cannot determine next label for type {typ!r}\".format(typ=type(label))\n )\n\n\ndef _get_interval_closed_bounds(interval):\n \"\"\"\n Given an Interval or IntervalIndex, return the corresponding interval with\n closed bounds.\n \"\"\"\n left, right = interval.left, interval.right\n if interval.open_left:\n left = _get_next_label(left)\n if interval.open_right:\n right = _get_prev_label(right)\n return left, right\n\n\ndef _new_IntervalIndex(cls, d):\n \"\"\"\n This is called upon unpickling, rather than the default which doesn't have\n arguments and breaks __new__.\n \"\"\"\n return cls.from_arrays(**d)\n\n\nclass SetopCheck:\n \"\"\"\n This is called to decorate the set operations of IntervalIndex\n to perform the type check in advance.\n \"\"\"\n\n def __init__(self, op_name):\n self.op_name = op_name\n\n def __call__(self, setop):\n def func(intvidx_self, other, sort=False):\n intvidx_self._assert_can_do_setop(other)\n other = ensure_index(other)\n\n if not isinstance(other, IntervalIndex):\n result = getattr(intvidx_self.astype(object), self.op_name)(other)\n if self.op_name in (\"difference\",):\n result = result.astype(intvidx_self.dtype)\n return result\n elif intvidx_self.closed != other.closed:\n msg = (\n \"can only do set operations between two IntervalIndex \"\n \"objects that are closed on the same side\"\n )\n raise ValueError(msg)\n\n # GH 19016: ensure set op will not return a prohibited dtype\n subtypes = [intvidx_self.dtype.subtype, other.dtype.subtype]\n common_subtype = find_common_type(subtypes)\n if is_object_dtype(common_subtype):\n msg = (\n \"can only do {op} between two IntervalIndex \"\n \"objects that have compatible dtypes\"\n )\n raise TypeError(msg.format(op=self.op_name))\n\n return setop(intvidx_self, other, sort)\n\n return func\n\n\n@Appender(\n _interval_shared_docs[\"class\"]\n % dict(\n klass=\"IntervalIndex\",\n summary=\"Immutable index of intervals that are closed on the same side.\",\n name=_index_doc_kwargs[\"name\"],\n versionadded=\"0.20.0\",\n extra_attributes=\"is_overlapping\\nvalues\\n\",\n extra_methods=\"\",\n examples=textwrap.dedent(\n \"\"\"\\\n Examples\n --------\n A new ``IntervalIndex`` is typically constructed using\n :func:`interval_range`:\n\n >>> pd.interval_range(start=0, end=5)\n IntervalIndex([(0, 1], (1, 2], (2, 3], (3, 4], (4, 5]],\n closed='right',\n dtype='interval[int64]')\n\n It may also be constructed using one of the constructor\n methods: :meth:`IntervalIndex.from_arrays`,\n :meth:`IntervalIndex.from_breaks`, and :meth:`IntervalIndex.from_tuples`.\n\n See further examples in the doc strings of ``interval_range`` and the\n mentioned constructor methods.\n \"\"\"\n ),\n )\n)\nclass IntervalIndex(IntervalMixin, Index):\n _typ = \"intervalindex\"\n _comparables = [\"name\"]\n _attributes = [\"name\", \"closed\"]\n\n # we would like our indexing holder to defer to us\n _defer_to_indexing = True\n\n # Immutable, so we are able to cache computations like isna in '_mask'\n _mask = None\n\n # --------------------------------------------------------------------\n # Constructors\n\n def __new__(\n cls, data, closed=None, dtype=None, copy=False, name=None, verify_integrity=True\n ):\n\n if name is None and hasattr(data, \"name\"):\n name = data.name\n\n with rewrite_exception(\"IntervalArray\", cls.__name__):\n array = IntervalArray(\n data,\n closed=closed,\n copy=copy,\n dtype=dtype,\n verify_integrity=verify_integrity,\n )\n\n return cls._simple_new(array, name)\n\n @classmethod\n def _simple_new(cls, array, name, closed=None):\n \"\"\"\n Construct from an IntervalArray\n\n Parameters\n ----------\n array : IntervalArray\n name : str\n Attached as result.name\n closed : Any\n Ignored.\n \"\"\"\n result = IntervalMixin.__new__(cls)\n result._data = array\n result.name = name\n result._reset_identity()\n return result\n\n @classmethod\n @Appender(\n _interval_shared_docs[\"from_breaks\"]\n % dict(\n klass=\"IntervalIndex\",\n examples=textwrap.dedent(\n \"\"\"\\\n Examples\n --------\n >>> pd.IntervalIndex.from_breaks([0, 1, 2, 3])\n IntervalIndex([(0, 1], (1, 2], (2, 3]],\n closed='right',\n dtype='interval[int64]')\n \"\"\"\n ),\n )\n )\n def from_breaks(cls, breaks, closed=\"right\", name=None, copy=False, dtype=None):\n with rewrite_exception(\"IntervalArray\", cls.__name__):\n array = IntervalArray.from_breaks(\n breaks, closed=closed, copy=copy, dtype=dtype\n )\n return cls._simple_new(array, name=name)\n\n @classmethod\n @Appender(\n _interval_shared_docs[\"from_arrays\"]\n % dict(\n klass=\"IntervalIndex\",\n examples=textwrap.dedent(\n \"\"\"\\\n Examples\n --------\n >>> pd.IntervalIndex.from_arrays([0, 1, 2], [1, 2, 3])\n IntervalIndex([(0, 1], (1, 2], (2, 3]],\n closed='right',\n dtype='interval[int64]')\n \"\"\"\n ),\n )\n )\n def from_arrays(\n cls, left, right, closed=\"right\", name=None, copy=False, dtype=None\n ):\n with rewrite_exception(\"IntervalArray\", cls.__name__):\n array = IntervalArray.from_arrays(\n left, right, closed, copy=copy, dtype=dtype\n )\n return cls._simple_new(array, name=name)\n\n @classmethod\n @Appender(\n _interval_shared_docs[\"from_tuples\"]\n % dict(\n klass=\"IntervalIndex\",\n examples=textwrap.dedent(\n \"\"\"\\\n Examples\n --------\n >>> pd.IntervalIndex.from_tuples([(0, 1), (1, 2)])\n IntervalIndex([(0, 1], (1, 2]],\n closed='right',\n dtype='interval[int64]')\n \"\"\"\n ),\n )\n )\n def from_tuples(cls, data, closed=\"right\", name=None, copy=False, dtype=None):\n with rewrite_exception(\"IntervalArray\", cls.__name__):\n arr = IntervalArray.from_tuples(data, closed=closed, copy=copy, dtype=dtype)\n return cls._simple_new(arr, name=name)\n\n # --------------------------------------------------------------------\n\n @Appender(_index_shared_docs[\"_shallow_copy\"])\n def _shallow_copy(self, left=None, right=None, **kwargs):\n result = self._data._shallow_copy(left=left, right=right)\n attributes = self._get_attributes_dict()\n attributes.update(kwargs)\n return self._simple_new(result, **attributes)\n\n @cache_readonly\n def _isnan(self):\n \"\"\"Return a mask indicating if each value is NA\"\"\"\n if self._mask is None:\n self._mask = isna(self.left)\n return self._mask\n\n @cache_readonly\n def _engine(self):\n left = self._maybe_convert_i8(self.left)\n right = self._maybe_convert_i8(self.right)\n return IntervalTree(left, right, closed=self.closed)\n\n def __contains__(self, key):\n \"\"\"\n return a boolean if this key is IN the index\n We *only* accept an Interval\n\n Parameters\n ----------\n key : Interval\n\n Returns\n -------\n boolean\n \"\"\"\n if not isinstance(key, Interval):\n return False\n\n try:\n self.get_loc(key)\n return True\n except KeyError:\n return False\n\n @Appender(\n _interval_shared_docs[\"to_tuples\"]\n % dict(\n return_type=\"Index\",\n examples=\"\"\"\n Examples\n --------\n >>> idx = pd.IntervalIndex.from_arrays([0, np.nan, 2], [1, np.nan, 3])\n >>> idx.to_tuples()\n Index([(0.0, 1.0), (nan, nan), (2.0, 3.0)], dtype='object')\n >>> idx.to_tuples(na_tuple=False)\n Index([(0.0, 1.0), nan, (2.0, 3.0)], dtype='object')\n \"\"\",\n )\n )\n def to_tuples(self, na_tuple=True):\n tuples = self._data.to_tuples(na_tuple=na_tuple)\n return Index(tuples)\n\n @cache_readonly\n def _multiindex(self):\n return MultiIndex.from_arrays([self.left, self.right], names=[\"left\", \"right\"])\n\n @property\n def left(self):\n \"\"\"\n Return the left endpoints of each Interval in the IntervalIndex as\n an Index.\n \"\"\"\n return self._data._left\n\n @property\n def right(self):\n \"\"\"\n Return the right endpoints of each Interval in the IntervalIndex as\n an Index.\n \"\"\"\n return self._data._right\n\n @property\n def closed(self):\n \"\"\"\n Whether the intervals are closed on the left-side, right-side, both or\n neither.\n \"\"\"\n return self._data._closed\n\n @Appender(\n _interval_shared_docs[\"set_closed\"]\n % dict(\n klass=\"IntervalIndex\",\n examples=textwrap.dedent(\n \"\"\"\\\n Examples\n --------\n >>> index = pd.interval_range(0, 3)\n >>> index\n IntervalIndex([(0, 1], (1, 2], (2, 3]],\n closed='right',\n dtype='interval[int64]')\n >>> index.set_closed('both')\n IntervalIndex([[0, 1], [1, 2], [2, 3]],\n closed='both',\n dtype='interval[int64]')\n \"\"\"\n ),\n )\n )\n def set_closed(self, closed):\n if closed not in _VALID_CLOSED:\n msg = \"invalid option for 'closed': {closed}\"\n raise ValueError(msg.format(closed=closed))\n\n # return self._shallow_copy(closed=closed)\n array = self._data.set_closed(closed)\n return self._simple_new(array, self.name)\n\n @property\n def length(self):\n \"\"\"\n Return an Index with entries denoting the length of each Interval in\n the IntervalIndex.\n \"\"\"\n return self._data.length\n\n @property\n def size(self):\n # Avoid materializing ndarray[Interval]\n return self._data.size\n\n @property\n def itemsize(self):\n msg = (\n \"IntervalIndex.itemsize is deprecated and will be removed in \"\n \"a future version\"\n )\n warnings.warn(msg, FutureWarning, stacklevel=2)\n\n # suppress the warning from the underlying left/right itemsize\n with warnings.catch_warnings():\n warnings.simplefilter(\"ignore\")\n return self.left.itemsize + self.right.itemsize\n\n def __len__(self) -> int:\n return len(self.left)\n\n @cache_readonly\n def values(self):\n \"\"\"\n Return the IntervalIndex's data as an IntervalArray.\n \"\"\"\n return self._data\n\n @cache_readonly\n def _values(self):\n return self._data\n\n @cache_readonly\n def _ndarray_values(self):\n return np.array(self._data)\n\n def __array__(self, result=None):\n \"\"\" the array interface, return my values \"\"\"\n return self._ndarray_values\n\n def __array_wrap__(self, result, context=None):\n # we don't want the superclass implementation\n return result\n\n def __reduce__(self):\n d = dict(left=self.left, right=self.right)\n d.update(self._get_attributes_dict())\n return _new_IntervalIndex, (self.__class__, d), None\n\n @Appender(_index_shared_docs[\"copy\"])\n def copy(self, deep=False, name=None):\n array = self._data\n if deep:\n array = array.copy()\n attributes = self._get_attributes_dict()\n if name is not None:\n attributes.update(name=name)\n\n return self._simple_new(array, **attributes)\n\n @Appender(_index_shared_docs[\"astype\"])\n def astype(self, dtype, copy=True):\n with rewrite_exception(\"IntervalArray\", self.__class__.__name__):\n new_values = self.values.astype(dtype, copy=copy)\n if is_interval_dtype(new_values):\n return self._shallow_copy(new_values.left, new_values.right)\n return super().astype(dtype, copy=copy)\n\n @cache_readonly\n def dtype(self):\n \"\"\"Return the dtype object of the underlying data\"\"\"\n return self._data.dtype\n\n @property\n def inferred_type(self) -> str:\n \"\"\"Return a string of the type inferred from the values\"\"\"\n return \"interval\"\n\n @Appender(Index.memory_usage.__doc__)\n def memory_usage(self, deep=False):\n # we don't use an explicit engine\n # so return the bytes here\n return self.left.memory_usage(deep=deep) + self.right.memory_usage(deep=deep)\n\n @cache_readonly\n def mid(self):\n \"\"\"\n Return the midpoint of each Interval in the IntervalIndex as an Index.\n \"\"\"\n return self._data.mid\n\n @cache_readonly\n def is_monotonic(self):\n \"\"\"\n Return True if the IntervalIndex is monotonic increasing (only equal or\n increasing values), else False\n \"\"\"\n return self.is_monotonic_increasing\n\n @cache_readonly\n def is_monotonic_increasing(self):\n \"\"\"\n Return True if the IntervalIndex is monotonic increasing (only equal or\n increasing values), else False\n \"\"\"\n return self._engine.is_monotonic_increasing\n\n @cache_readonly\n def is_monotonic_decreasing(self) -> bool:\n \"\"\"\n Return True if the IntervalIndex is monotonic decreasing (only equal or\n decreasing values), else False\n \"\"\"\n return self[::-1].is_monotonic_increasing\n\n @cache_readonly\n def is_unique(self):\n \"\"\"\n Return True if the IntervalIndex contains unique elements, else False\n \"\"\"\n left = self.left\n right = self.right\n\n if self.isna().sum() > 1:\n return False\n\n if left.is_unique or right.is_unique:\n return True\n\n seen_pairs = set()\n check_idx = np.where(left.duplicated(keep=False))[0]\n for idx in check_idx:\n pair = (left[idx], right[idx])\n if pair in seen_pairs:\n return False\n seen_pairs.add(pair)\n\n return True\n\n @cache_readonly\n @Appender(_interval_shared_docs[\"is_non_overlapping_monotonic\"] % _index_doc_kwargs)\n def is_non_overlapping_monotonic(self):\n return self._data.is_non_overlapping_monotonic\n\n @property\n def is_overlapping(self):\n \"\"\"\n Return True if the IntervalIndex has overlapping intervals, else False.\n\n Two intervals overlap if they share a common point, including closed\n endpoints. Intervals that only have an open endpoint in common do not\n overlap.\n\n .. versionadded:: 0.24.0\n\n Returns\n -------\n bool\n Boolean indicating if the IntervalIndex has overlapping intervals.\n\n See Also\n --------\n Interval.overlaps : Check whether two Interval objects overlap.\n IntervalIndex.overlaps : Check an IntervalIndex elementwise for\n overlaps.\n\n Examples\n --------\n >>> index = pd.IntervalIndex.from_tuples([(0, 2), (1, 3), (4, 5)])\n >>> index\n IntervalIndex([(0, 2], (1, 3], (4, 5]],\n closed='right',\n dtype='interval[int64]')\n >>> index.is_overlapping\n True\n\n Intervals that share closed endpoints overlap:\n\n >>> index = pd.interval_range(0, 3, closed='both')\n >>> index\n IntervalIndex([[0, 1], [1, 2], [2, 3]],\n closed='both',\n dtype='interval[int64]')\n >>> index.is_overlapping\n True\n\n Intervals that only have an open endpoint in common do not overlap:\n\n >>> index = pd.interval_range(0, 3, closed='left')\n >>> index\n IntervalIndex([[0, 1), [1, 2), [2, 3)],\n closed='left',\n dtype='interval[int64]')\n >>> index.is_overlapping\n False\n \"\"\"\n # GH 23309\n return self._engine.is_overlapping\n\n @Appender(_index_shared_docs[\"_convert_scalar_indexer\"])\n def _convert_scalar_indexer(self, key, kind=None):\n if kind == \"iloc\":\n return super()._convert_scalar_indexer(key, kind=kind)\n return key\n\n def _maybe_cast_slice_bound(self, label, side, kind):\n return getattr(self, side)._maybe_cast_slice_bound(label, side, kind)\n\n @Appender(_index_shared_docs[\"_convert_list_indexer\"])\n def _convert_list_indexer(self, keyarr, kind=None):\n \"\"\"\n we are passed a list-like indexer. Return the\n indexer for matching intervals.\n \"\"\"\n locs = self.get_indexer_for(keyarr)\n\n # we have missing values\n if (locs == -1).any():\n raise KeyError\n\n return locs\n\n def _maybe_cast_indexed(self, key):\n \"\"\"\n we need to cast the key, which could be a scalar\n or an array-like to the type of our subtype\n \"\"\"\n if isinstance(key, IntervalIndex):\n return key\n\n subtype = self.dtype.subtype\n if is_float_dtype(subtype):\n if is_integer(key):\n key = float(key)\n elif isinstance(key, (np.ndarray, Index)):\n key = key.astype(\"float64\")\n elif is_integer_dtype(subtype):\n if is_integer(key):\n key = int(key)\n\n return key\n\n def _can_reindex(self, indexer: np.ndarray) -> None:\n \"\"\"\n Check if we are allowing reindexing with this particular indexer.\n\n Parameters\n ----------\n indexer : an integer indexer\n\n Raises\n ------\n ValueError if its a duplicate axis\n \"\"\"\n\n # trying to reindex on an axis with duplicates\n if self.is_overlapping and len(indexer):\n raise ValueError(\"cannot reindex from an overlapping axis\")\n\n def _needs_i8_conversion(self, key):\n \"\"\"\n Check if a given key needs i8 conversion. Conversion is necessary for\n Timestamp, Timedelta, DatetimeIndex, and TimedeltaIndex keys. An\n Interval-like requires conversion if it's endpoints are one of the\n aforementioned types.\n\n Assumes that any list-like data has already been cast to an Index.\n\n Parameters\n ----------\n key : scalar or Index-like\n The key that should be checked for i8 conversion\n\n Returns\n -------\n boolean\n \"\"\"\n if is_interval_dtype(key) or isinstance(key, Interval):\n return self._needs_i8_conversion(key.left)\n\n i8_types = (Timestamp, Timedelta, DatetimeIndex, TimedeltaIndex)\n return isinstance(key, i8_types)\n\n def _maybe_convert_i8(self, key):\n \"\"\"\n Maybe convert a given key to it's equivalent i8 value(s). Used as a\n preprocessing step prior to IntervalTree queries (self._engine), which\n expects numeric data.\n\n Parameters\n ----------\n key : scalar or list-like\n The key that should maybe be converted to i8.\n\n Returns\n -------\n key: scalar or list-like\n The original key if no conversion occurred, int if converted scalar,\n Int64Index if converted list-like.\n \"\"\"\n original = key\n if is_list_like(key):\n key = ensure_index(key)\n\n if not self._needs_i8_conversion(key):\n return original\n\n scalar = is_scalar(key)\n if is_interval_dtype(key) or isinstance(key, Interval):\n # convert left/right and reconstruct\n left = self._maybe_convert_i8(key.left)\n right = self._maybe_convert_i8(key.right)\n constructor = Interval if scalar else IntervalIndex.from_arrays\n return constructor(left, right, closed=self.closed)\n\n if scalar:\n # Timestamp/Timedelta\n key_dtype, key_i8 = infer_dtype_from_scalar(key, pandas_dtype=True)\n else:\n # DatetimeIndex/TimedeltaIndex\n key_dtype, key_i8 = key.dtype, Index(key.asi8)\n if key.hasnans:\n # convert NaT from it's i8 value to np.nan so it's not viewed\n # as a valid value, maybe causing errors (e.g. is_overlapping)\n key_i8 = key_i8.where(~key._isnan)\n\n # ensure consistency with IntervalIndex subtype\n subtype = self.dtype.subtype\n msg = (\n \"Cannot index an IntervalIndex of subtype {subtype} with \"\n \"values of dtype {other}\"\n )\n if not is_dtype_equal(subtype, key_dtype):\n raise ValueError(msg.format(subtype=subtype, other=key_dtype))\n\n return key_i8\n\n def _check_method(self, method):\n if method is None:\n return\n\n if method in [\"bfill\", \"backfill\", \"pad\", \"ffill\", \"nearest\"]:\n msg = \"method {method} not yet implemented for IntervalIndex\"\n raise NotImplementedError(msg.format(method=method))\n\n raise ValueError(\"Invalid fill method\")\n\n def _searchsorted_monotonic(self, label, side, exclude_label=False):\n if not self.is_non_overlapping_monotonic:\n raise KeyError(\n \"can only get slices from an IntervalIndex if \"\n \"bounds are non-overlapping and all monotonic \"\n \"increasing or decreasing\"\n )\n\n if isinstance(label, IntervalMixin):\n msg = \"Interval objects are not currently supported\"\n raise NotImplementedError(msg)\n\n # GH 20921: \"not is_monotonic_increasing\" for the second condition\n # instead of \"is_monotonic_decreasing\" to account for single element\n # indexes being both increasing and decreasing\n if (side == \"left\" and self.left.is_monotonic_increasing) or (\n side == \"right\" and not self.left.is_monotonic_increasing\n ):\n sub_idx = self.right\n if self.open_right or exclude_label:\n label = _get_next_label(label)\n else:\n sub_idx = self.left\n if self.open_left or exclude_label:\n label = _get_prev_label(label)\n\n return sub_idx._searchsorted_monotonic(label, side)\n\n def _find_non_overlapping_monotonic_bounds(self, key):\n if isinstance(key, IntervalMixin):\n start = self._searchsorted_monotonic(\n key.left, \"left\", exclude_label=key.open_left\n )\n stop = self._searchsorted_monotonic(\n key.right, \"right\", exclude_label=key.open_right\n )\n elif isinstance(key, slice):\n # slice\n start, stop = key.start, key.stop\n if (key.step or 1) != 1:\n raise NotImplementedError(\"cannot slice with a slice step\")\n if start is None:\n start = 0\n else:\n start = self._searchsorted_monotonic(start, \"left\")\n if stop is None:\n stop = len(self)\n else:\n stop = self._searchsorted_monotonic(stop, \"right\")\n else:\n # scalar or index-like\n\n start = self._searchsorted_monotonic(key, \"left\")\n stop = self._searchsorted_monotonic(key, \"right\")\n return start, stop\n\n def get_loc(\n self, key: Any, method: Optional[str] = None, tolerance=None\n ) -> Union[int, slice, np.ndarray]:\n \"\"\"\n Get integer location, slice or boolean mask for requested label.\n\n Parameters\n ----------\n key : label\n method : {None}, optional\n * default: matches where the label is within an interval only.\n\n Returns\n -------\n loc : int if unique index, slice if monotonic index, else mask\n\n Examples\n --------\n >>> i1, i2 = pd.Interval(0, 1), pd.Interval(1, 2)\n >>> index = pd.IntervalIndex([i1, i2])\n >>> index.get_loc(1)\n 0\n\n You can also supply a point inside an interval.\n\n >>> index.get_loc(1.5)\n 1\n\n If a label is in several intervals, you get the locations of all the\n relevant intervals.\n\n >>> i3 = pd.Interval(0, 2)\n >>> overlapping_index = pd.IntervalIndex([i1, i2, i3])\n >>> overlapping_index.get_loc(0.5)\n array([ True, False, True])\n\n Only exact matches will be returned if an interval is provided.\n\n >>> index.get_loc(pd.Interval(0, 1))\n 0\n \"\"\"\n self._check_method(method)\n\n # list-like are invalid labels for II but in some cases may work, e.g\n # single element array of comparable type, so guard against them early\n if is_list_like(key):\n raise KeyError(key)\n\n if isinstance(key, Interval):\n if self.closed != key.closed:\n raise KeyError(key)\n mask = (self.left == key.left) & (self.right == key.right)\n else:\n # assume scalar\n op_left = le if self.closed_left else lt\n op_right = le if self.closed_right else lt\n try:\n mask = op_left(self.left, key) & op_right(key, self.right)\n except TypeError:\n # scalar is not comparable to II subtype --> invalid label\n raise KeyError(key)\n\n matches = mask.sum()\n if matches == 0:\n raise KeyError(key)\n elif matches == 1:\n return mask.argmax()\n return lib.maybe_booleans_to_slice(mask.view(\"u1\"))\n\n @Substitution(\n **dict(\n _index_doc_kwargs,\n **{\n \"raises_section\": textwrap.dedent(\n \"\"\"\n Raises\n ------\n NotImplementedError\n If any method argument other than the default of\n None is specified as these are not yet implemented.\n \"\"\"\n )\n }\n )\n )\n @Appender(_index_shared_docs[\"get_indexer\"])\n def get_indexer(\n self,\n target: AnyArrayLike,\n method: Optional[str] = None,\n limit: Optional[int] = None,\n tolerance: Optional[Any] = None,\n ) -> np.ndarray:\n\n self._check_method(method)\n\n if self.is_overlapping:\n msg = (\n \"cannot handle overlapping indices; use \"\n \"IntervalIndex.get_indexer_non_unique\"\n )\n raise InvalidIndexError(msg)\n\n target_as_index = ensure_index(target)\n\n if isinstance(target_as_index, IntervalIndex):\n # equal indexes -> 1:1 positional match\n if self.equals(target_as_index):\n return np.arange(len(self), dtype=\"intp\")\n\n # different closed or incompatible subtype -> no matches\n common_subtype = find_common_type(\n [self.dtype.subtype, target_as_index.dtype.subtype]\n )\n if self.closed != target_as_index.closed or is_object_dtype(common_subtype):\n return np.repeat(np.intp(-1), len(target_as_index))\n\n # non-overlapping -> at most one match per interval in target_as_index\n # want exact matches -> need both left/right to match, so defer to\n # left/right get_indexer, compare elementwise, equality -> match\n left_indexer = self.left.get_indexer(target_as_index.left)\n right_indexer = self.right.get_indexer(target_as_index.right)\n indexer = np.where(left_indexer == right_indexer, left_indexer, -1)\n elif not is_object_dtype(target_as_index):\n # homogeneous scalar index: use IntervalTree\n target_as_index = self._maybe_convert_i8(target_as_index)\n indexer = self._engine.get_indexer(target_as_index.values)\n else:\n # heterogeneous scalar index: defer elementwise to get_loc\n # (non-overlapping so get_loc guarantees scalar of KeyError)\n indexer = []\n for key in target_as_index:\n try:\n loc = self.get_loc(key)\n except KeyError:\n loc = -1\n indexer.append(loc)\n\n return ensure_platform_int(indexer)\n\n @Appender(_index_shared_docs[\"get_indexer_non_unique\"] % _index_doc_kwargs)\n def get_indexer_non_unique(\n self, target: AnyArrayLike\n ) -> Tuple[np.ndarray, np.ndarray]:\n target_as_index = ensure_index(target)\n\n # check that target_as_index IntervalIndex is compatible\n if isinstance(target_as_index, IntervalIndex):\n common_subtype = find_common_type(\n [self.dtype.subtype, target_as_index.dtype.subtype]\n )\n if self.closed != target_as_index.closed or is_object_dtype(common_subtype):\n # different closed or incompatible subtype -> no matches\n return (\n np.repeat(-1, len(target_as_index)),\n np.arange(len(target_as_index)),\n )\n\n if is_object_dtype(target_as_index) or isinstance(\n target_as_index, IntervalIndex\n ):\n # target_as_index might contain intervals: defer elementwise to get_loc\n indexer, missing = [], []\n for i, key in enumerate(target_as_index):\n try:\n locs = self.get_loc(key)\n if isinstance(locs, slice):\n locs = np.arange(locs.start, locs.stop, locs.step, dtype=\"intp\")\n locs = np.array(locs, ndmin=1)\n except KeyError:\n missing.append(i)\n locs = np.array([-1])\n indexer.append(locs)\n indexer = np.concatenate(indexer)\n else:\n target_as_index = self._maybe_convert_i8(target_as_index)\n indexer, missing = self._engine.get_indexer_non_unique(\n target_as_index.values\n )\n\n return ensure_platform_int(indexer), ensure_platform_int(missing)\n\n def get_indexer_for(self, target: AnyArrayLike, **kwargs) -> np.ndarray:\n \"\"\"\n Guaranteed return of an indexer even when overlapping.\n\n This dispatches to get_indexer or get_indexer_non_unique\n as appropriate.\n\n Returns\n -------\n numpy.ndarray\n List of indices.\n \"\"\"\n if self.is_overlapping:\n return self.get_indexer_non_unique(target)[0]\n return self.get_indexer(target, **kwargs)\n\n @Appender(_index_shared_docs[\"get_value\"] % _index_doc_kwargs)\n def get_value(self, series: ABCSeries, key: Any) -> Any:\n\n if com.is_bool_indexer(key):\n loc = key\n elif is_list_like(key):\n if self.is_overlapping:\n loc, missing = self.get_indexer_non_unique(key)\n if len(missing):\n raise KeyError\n else:\n loc = self.get_indexer(key)\n elif isinstance(key, slice):\n if not (key.step is None or key.step == 1):\n raise ValueError(\"cannot support not-default step in a slice\")\n loc = self._convert_slice_indexer(key, kind=\"getitem\")\n else:\n loc = self.get_loc(key)\n return series.iloc[loc]\n\n @Appender(_index_shared_docs[\"where\"])\n def where(self, cond, other=None):\n if other is None:\n other = self._na_value\n values = np.where(cond, self.values, other)\n return self._shallow_copy(values)\n\n def delete(self, loc):\n \"\"\"\n Return a new IntervalIndex with passed location(-s) deleted\n\n Returns\n -------\n new_index : IntervalIndex\n \"\"\"\n new_left = self.left.delete(loc)\n new_right = self.right.delete(loc)\n return self._shallow_copy(new_left, new_right)\n\n def insert(self, loc, item):\n \"\"\"\n Return a new IntervalIndex inserting new item at location. Follows\n Python list.append semantics for negative values. Only Interval\n objects and NA can be inserted into an IntervalIndex\n\n Parameters\n ----------\n loc : int\n item : object\n\n Returns\n -------\n new_index : IntervalIndex\n \"\"\"\n if isinstance(item, Interval):\n if item.closed != self.closed:\n raise ValueError(\n \"inserted item must be closed on the same side as the index\"\n )\n left_insert = item.left\n right_insert = item.right\n elif is_scalar(item) and isna(item):\n # GH 18295\n left_insert = right_insert = item\n else:\n raise ValueError(\n \"can only insert Interval objects and NA into an IntervalIndex\"\n )\n\n new_left = self.left.insert(loc, left_insert)\n new_right = self.right.insert(loc, right_insert)\n return self._shallow_copy(new_left, new_right)\n\n def _concat_same_dtype(self, to_concat, name):\n \"\"\"\n assert that we all have the same .closed\n we allow a 0-len index here as well\n \"\"\"\n if not len({i.closed for i in to_concat if len(i)}) == 1:\n msg = (\n \"can only append two IntervalIndex objects \"\n \"that are closed on the same side\"\n )\n raise ValueError(msg)\n return super()._concat_same_dtype(to_concat, name)\n\n @Appender(_index_shared_docs[\"take\"] % _index_doc_kwargs)\n def take(self, indices, axis=0, allow_fill=True, fill_value=None, **kwargs):\n result = self._data.take(\n indices, axis=axis, allow_fill=allow_fill, fill_value=fill_value, **kwargs\n )\n attributes = self._get_attributes_dict()\n return self._simple_new(result, **attributes)\n\n def __getitem__(self, value):\n result = self._data[value]\n if isinstance(result, IntervalArray):\n return self._shallow_copy(result)\n else:\n # scalar\n return result\n\n # --------------------------------------------------------------------\n # Rendering Methods\n # __repr__ associated methods are based on MultiIndex\n\n def _format_with_header(self, header, **kwargs):\n return header + list(self._format_native_types(**kwargs))\n\n def _format_native_types(self, na_rep=\"NaN\", quoting=None, **kwargs):\n # GH 28210: use base method but with different default na_rep\n return super()._format_native_types(na_rep=na_rep, quoting=quoting, **kwargs)\n\n def _format_data(self, name=None):\n\n # TODO: integrate with categorical and make generic\n # name argument is unused here; just for compat with base / categorical\n n = len(self)\n max_seq_items = min((get_option(\"display.max_seq_items\") or n) // 10, 10)\n\n formatter = str\n\n if n == 0:\n summary = \"[]\"\n elif n == 1:\n first = formatter(self[0])\n summary = \"[{first}]\".format(first=first)\n elif n == 2:\n first = formatter(self[0])\n last = formatter(self[-1])\n summary = \"[{first}, {last}]\".format(first=first, last=last)\n else:\n\n if n > max_seq_items:\n n = min(max_seq_items // 2, 10)\n head = [formatter(x) for x in self[:n]]\n tail = [formatter(x) for x in self[-n:]]\n summary = \"[{head} ... {tail}]\".format(\n head=\", \".join(head), tail=\", \".join(tail)\n )\n else:\n tail = [formatter(x) for x in self]\n summary = \"[{tail}]\".format(tail=\", \".join(tail))\n\n return summary + \",\" + self._format_space()\n\n def _format_attrs(self):\n attrs = [(\"closed\", repr(self.closed))]\n if self.name is not None:\n attrs.append((\"name\", default_pprint(self.name)))\n attrs.append((\"dtype\", \"'{dtype}'\".format(dtype=self.dtype)))\n return attrs\n\n def _format_space(self):\n space = \" \" * (len(self.__class__.__name__) + 1)\n return \"\\n{space}\".format(space=space)\n\n # --------------------------------------------------------------------\n\n def argsort(self, *args, **kwargs):\n return np.lexsort((self.right, self.left))\n\n def equals(self, other):\n \"\"\"\n Determines if two IntervalIndex objects contain the same elements\n \"\"\"\n if self.is_(other):\n return True\n\n # if we can coerce to an II\n # then we can compare\n if not isinstance(other, IntervalIndex):\n if not is_interval_dtype(other):\n return False\n other = Index(getattr(other, \".values\", other))\n\n return (\n self.left.equals(other.left)\n and self.right.equals(other.right)\n and self.closed == other.closed\n )\n\n @Appender(\n _interval_shared_docs[\"contains\"]\n % dict(\n klass=\"IntervalIndex\",\n examples=textwrap.dedent(\n \"\"\"\\\n >>> intervals = pd.IntervalIndex.from_tuples([(0, 1), (1, 3), (2, 4)])\n >>> intervals\n IntervalIndex([(0, 1], (1, 3], (2, 4]],\n closed='right',\n dtype='interval[int64]')\n >>> intervals.contains(0.5)\n array([ True, False, False])\n \"\"\"\n ),\n )\n )\n def contains(self, other):\n return self._data.contains(other)\n\n @Appender(\n _interval_shared_docs[\"overlaps\"]\n % dict(\n klass=\"IntervalIndex\",\n examples=textwrap.dedent(\n \"\"\"\\\n >>> intervals = pd.IntervalIndex.from_tuples([(0, 1), (1, 3), (2, 4)])\n >>> intervals\n IntervalIndex([(0, 1], (1, 3], (2, 4]],\n closed='right',\n dtype='interval[int64]')\n \"\"\"\n ),\n )\n )\n def overlaps(self, other):\n return self._data.overlaps(other)\n\n @Appender(_index_shared_docs[\"intersection\"])\n @SetopCheck(op_name=\"intersection\")\n def intersection(\n self, other: \"IntervalIndex\", sort: bool = False\n ) -> \"IntervalIndex\":\n if self.left.is_unique and self.right.is_unique:\n taken = self._intersection_unique(other)\n elif other.left.is_unique and other.right.is_unique and self.isna().sum() <= 1:\n # Swap other/self if other is unique and self does not have\n # multiple NaNs\n taken = other._intersection_unique(self)\n else:\n # duplicates\n taken = self._intersection_non_unique(other)\n\n if sort is None:\n taken = taken.sort_values()\n\n return taken\n\n def _intersection_unique(self, other: \"IntervalIndex\") -> \"IntervalIndex\":\n \"\"\"\n Used when the IntervalIndex does not have any common endpoint,\n no mater left or right.\n Return the intersection with another IntervalIndex.\n\n Parameters\n ----------\n other : IntervalIndex\n\n Returns\n -------\n taken : IntervalIndex\n \"\"\"\n lindexer = self.left.get_indexer(other.left)\n rindexer = self.right.get_indexer(other.right)\n\n match = (lindexer == rindexer) & (lindexer != -1)\n indexer = lindexer.take(match.nonzero()[0])\n\n return self.take(indexer)\n\n def _intersection_non_unique(self, other: \"IntervalIndex\") -> \"IntervalIndex\":\n \"\"\"\n Used when the IntervalIndex does have some common endpoints,\n on either sides.\n Return the intersection with another IntervalIndex.\n\n Parameters\n ----------\n other : IntervalIndex\n\n Returns\n -------\n taken : IntervalIndex\n \"\"\"\n mask = np.zeros(len(self), dtype=bool)\n\n if self.hasnans and other.hasnans:\n first_nan_loc = np.arange(len(self))[self.isna()][0]\n mask[first_nan_loc] = True\n\n other_tups = set(zip(other.left, other.right))\n for i, tup in enumerate(zip(self.left, self.right)):\n if tup in other_tups:\n mask[i] = True\n\n return self[mask]\n\n def _setop(op_name: str, sort=None):\n @SetopCheck(op_name=op_name)\n def func(self, other, sort=sort):\n result = getattr(self._multiindex, op_name)(other._multiindex, sort=sort)\n result_name = get_op_result_name(self, other)\n\n # GH 19101: ensure empty results have correct dtype\n if result.empty:\n result = result.values.astype(self.dtype.subtype)\n else:\n result = result.values\n\n return type(self).from_tuples(result, closed=self.closed, name=result_name)\n\n return func\n\n @property\n def is_all_dates(self) -> bool:\n \"\"\"\n This is False even when left/right contain datetime-like objects,\n as the check is done on the Interval itself\n \"\"\"\n return False\n\n union = _setop(\"union\")\n difference = _setop(\"difference\")\n symmetric_difference = _setop(\"symmetric_difference\")\n\n # TODO: arithmetic operations\n\n\nIntervalIndex._add_logical_methods_disabled()\n\n\ndef _is_valid_endpoint(endpoint):\n \"\"\"helper for interval_range to check if start/end are valid types\"\"\"\n return any(\n [\n is_number(endpoint),\n isinstance(endpoint, Timestamp),\n isinstance(endpoint, Timedelta),\n endpoint is None,\n ]\n )\n\n\ndef _is_type_compatible(a, b):\n \"\"\"helper for interval_range to check type compat of start/end/freq\"\"\"\n is_ts_compat = lambda x: isinstance(x, (Timestamp, DateOffset))\n is_td_compat = lambda x: isinstance(x, (Timedelta, DateOffset))\n return (\n (is_number(a) and is_number(b))\n or (is_ts_compat(a) and is_ts_compat(b))\n or (is_td_compat(a) and is_td_compat(b))\n or com.any_none(a, b)\n )\n\n\ndef interval_range(\n start=None, end=None, periods=None, freq=None, name=None, closed=\"right\"\n):\n \"\"\"\n Return a fixed frequency IntervalIndex.\n\n Parameters\n ----------\n start : numeric or datetime-like, default None\n Left bound for generating intervals.\n end : numeric or datetime-like, default None\n Right bound for generating intervals.\n periods : int, default None\n Number of periods to generate.\n freq : numeric, str, or DateOffset, default None\n The length of each interval. Must be consistent with the type of start\n and end, e.g. 2 for numeric, or '5H' for datetime-like. Default is 1\n for numeric and 'D' for datetime-like.\n name : str, default None\n Name of the resulting IntervalIndex.\n closed : {'left', 'right', 'both', 'neither'}, default 'right'\n Whether the intervals are closed on the left-side, right-side, both\n or neither.\n\n Returns\n -------\n IntervalIndex\n\n See Also\n --------\n IntervalIndex : An Index of intervals that are all closed on the same side.\n\n Notes\n -----\n Of the four parameters ``start``, ``end``, ``periods``, and ``freq``,\n exactly three must be specified. If ``freq`` is omitted, the resulting\n ``IntervalIndex`` will have ``periods`` linearly spaced elements between\n ``start`` and ``end``, inclusively.\n\n To learn more about datetime-like frequency strings, please see `this link\n <http://pandas.pydata.org/pandas-docs/stable/user_guide/timeseries.html#offset-aliases>`__.\n\n Examples\n --------\n Numeric ``start`` and ``end`` is supported.\n\n >>> pd.interval_range(start=0, end=5)\n IntervalIndex([(0, 1], (1, 2], (2, 3], (3, 4], (4, 5]],\n closed='right', dtype='interval[int64]')\n\n Additionally, datetime-like input is also supported.\n\n >>> pd.interval_range(start=pd.Timestamp('2017-01-01'),\n ... end=pd.Timestamp('2017-01-04'))\n IntervalIndex([(2017-01-01, 2017-01-02], (2017-01-02, 2017-01-03],\n (2017-01-03, 2017-01-04]],\n closed='right', dtype='interval[datetime64[ns]]')\n\n The ``freq`` parameter specifies the frequency between the left and right.\n endpoints of the individual intervals within the ``IntervalIndex``. For\n numeric ``start`` and ``end``, the frequency must also be numeric.\n\n >>> pd.interval_range(start=0, periods=4, freq=1.5)\n IntervalIndex([(0.0, 1.5], (1.5, 3.0], (3.0, 4.5], (4.5, 6.0]],\n closed='right', dtype='interval[float64]')\n\n Similarly, for datetime-like ``start`` and ``end``, the frequency must be\n convertible to a DateOffset.\n\n >>> pd.interval_range(start=pd.Timestamp('2017-01-01'),\n ... periods=3, freq='MS')\n IntervalIndex([(2017-01-01, 2017-02-01], (2017-02-01, 2017-03-01],\n (2017-03-01, 2017-04-01]],\n closed='right', dtype='interval[datetime64[ns]]')\n\n Specify ``start``, ``end``, and ``periods``; the frequency is generated\n automatically (linearly spaced).\n\n >>> pd.interval_range(start=0, end=6, periods=4)\n IntervalIndex([(0.0, 1.5], (1.5, 3.0], (3.0, 4.5], (4.5, 6.0]],\n closed='right',\n dtype='interval[float64]')\n\n The ``closed`` parameter specifies which endpoints of the individual\n intervals within the ``IntervalIndex`` are closed.\n\n >>> pd.interval_range(end=5, periods=4, closed='both')\n IntervalIndex([[1, 2], [2, 3], [3, 4], [4, 5]],\n closed='both', dtype='interval[int64]')\n \"\"\"\n start = com.maybe_box_datetimelike(start)\n end = com.maybe_box_datetimelike(end)\n endpoint = start if start is not None else end\n\n if freq is None and com.any_none(periods, start, end):\n freq = 1 if is_number(endpoint) else \"D\"\n\n if com.count_not_none(start, end, periods, freq) != 3:\n raise ValueError(\n \"Of the four parameters: start, end, periods, and \"\n \"freq, exactly three must be specified\"\n )\n\n if not _is_valid_endpoint(start):\n msg = \"start must be numeric or datetime-like, got {start}\"\n raise ValueError(msg.format(start=start))\n elif not _is_valid_endpoint(end):\n msg = \"end must be numeric or datetime-like, got {end}\"\n raise ValueError(msg.format(end=end))\n\n if is_float(periods):\n periods = int(periods)\n elif not is_integer(periods) and periods is not None:\n msg = \"periods must be a number, got {periods}\"\n raise TypeError(msg.format(periods=periods))\n\n if freq is not None and not is_number(freq):\n try:\n freq = to_offset(freq)\n except ValueError:\n raise ValueError(\n \"freq must be numeric or convertible to \"\n \"DateOffset, got {freq}\".format(freq=freq)\n )\n\n # verify type compatibility\n if not all(\n [\n _is_type_compatible(start, end),\n _is_type_compatible(start, freq),\n _is_type_compatible(end, freq),\n ]\n ):\n raise TypeError(\"start, end, freq need to be type compatible\")\n\n # +1 to convert interval count to breaks count (n breaks = n-1 intervals)\n if periods is not None:\n periods += 1\n\n if is_number(endpoint):\n # force consistency between start/end/freq (lower end if freq skips it)\n if com.all_not_none(start, end, freq):\n end -= (end - start) % freq\n\n # compute the period/start/end if unspecified (at most one)\n if periods is None:\n periods = int((end - start) // freq) + 1\n elif start is None:\n start = end - (periods - 1) * freq\n elif end is None:\n end = start + (periods - 1) * freq\n\n breaks = np.linspace(start, end, periods)\n if all(is_integer(x) for x in com.not_none(start, end, freq)):\n # np.linspace always produces float output\n breaks = maybe_downcast_to_dtype(breaks, \"int64\")\n else:\n # delegate to the appropriate range function\n if isinstance(endpoint, Timestamp):\n range_func = date_range\n else:\n range_func = timedelta_range\n\n breaks = range_func(start=start, end=end, periods=periods, freq=freq)\n\n return IntervalIndex.from_breaks(breaks, name=name, closed=closed)\n"
] |
[
[
"pandas.tseries.frequencies.to_offset",
"numpy.linspace",
"pandas.core.dtypes.common.is_dtype_equal",
"pandas.core.dtypes.common.is_datetime64tz_dtype",
"pandas._libs.interval.IntervalTree",
"pandas.core.indexes.base.Index",
"numpy.concatenate",
"pandas._config.get_option",
"numpy.where",
"numpy.nextafter",
"pandas.core.dtypes.common.is_interval_dtype",
"pandas.core.common.all_not_none",
"pandas.core.common.not_none",
"numpy.arange",
"numpy.lexsort",
"pandas.core.common.any_none",
"pandas.core.dtypes.common.is_number",
"pandas.core.ops.get_op_result_name",
"pandas.core.dtypes.common.is_float_dtype",
"pandas.core.dtypes.common.is_float",
"pandas.core.arrays.interval.IntervalArray",
"pandas.util._exceptions.rewrite_exception",
"pandas.core.indexes.base.default_pprint",
"pandas.core.dtypes.common.is_integer_dtype",
"pandas.core.dtypes.common.is_list_like",
"pandas.util._decorators.Appender",
"pandas.core.common.maybe_box_datetimelike",
"pandas.core.indexes.base.ensure_index",
"pandas.core.dtypes.cast.infer_dtype_from_scalar",
"numpy.timedelta64",
"pandas.core.dtypes.common.ensure_platform_int",
"numpy.array",
"pandas.core.arrays.interval.IntervalArray.from_tuples",
"pandas.core.arrays.interval.IntervalArray.from_arrays",
"pandas.core.dtypes.cast.maybe_downcast_to_dtype",
"pandas.core.common.count_not_none",
"pandas.core.common.is_bool_indexer",
"numpy.intp",
"pandas.core.dtypes.common.is_scalar",
"pandas.core.dtypes.common.is_integer",
"pandas.core.dtypes.cast.find_common_type",
"pandas.core.indexes.multi.MultiIndex.from_arrays",
"pandas.core.dtypes.common.is_object_dtype",
"pandas.core.dtypes.common.is_datetime_or_timedelta_dtype",
"pandas.core.dtypes.missing.isna",
"pandas._libs.interval.IntervalMixin.__new__",
"pandas.core.arrays.interval.IntervalArray.from_breaks",
"pandas.core.indexes.base.InvalidIndexError"
]
] |
myforkmachine/pyprobml
|
[
"a750b6e33e849ca75300fec1b9ee4b61def80c52"
] |
[
"auto_generated_scripts/combining_kernels_by_summation.py"
] |
[
"#!/usr/bin/env python\n# coding: utf-8\n\n# In[1]:\n\n\ntry:\n import jax\nexcept:\n get_ipython().run_line_magic('pip', 'install jax jaxlib')\n import jax\nimport jax.numpy as jnp\n\ntry:\n import matplotlib.pyplot as plt\nexcept:\n get_ipython().run_line_magic('pip', 'install matplotlib')\n import matplotlib.pyplot as plt\n\ntry:\n import seaborn as sns\nexcept:\n get_ipython().run_line_magic('pip', 'install seaborn')\n import seaborn as sns\n\ntry:\n import tinygp\nexcept ModuleNotFoundError:\n get_ipython().run_line_magic('pip', 'install -qqq tinygp')\n import tinygp\nkernels = tinygp.kernels\nfrom tinygp import GaussianProcess\n\n\n# In[2]:\n\n\nimport os\n\ndev_mode = \"DEV_MODE\" in os.environ\n\nif dev_mode:\n import sys\n\n sys.path.append(\"scripts\")\n from plot_utils import latexify, savefig\n\n latexify(width_scale_factor=4, height_scale_factor=1.5 / 2)\n\n\n# In[3]:\n\n\ndef plot_sample(data, save_name):\n if dev_mode:\n fig, ax = plt.subplots(2, 1)\n else:\n fig, ax = plt.subplots(2, 1, figsize=(6.4, 6))\n\n # Plot kernel\n kernel = data[\"kernel1\"] + data[\"kernel2\"]\n x2 = jnp.array([1.0]).reshape(-1, 1)\n kernel_values = kernel(x, x2)\n ax[0].plot(x.ravel(), kernel_values.ravel(), color=\"k\")\n\n # Plot samples\n gp = GaussianProcess(kernel, x)\n samples = gp.sample(key, (2,))\n for sample in samples:\n ax[1].plot(x, sample)\n ax[0].set_title(data[\"title\"])\n ax[1].set_xlabel(data[\"xlabel\"])\n for axes in ax:\n axes.set_xticks([])\n ax[0].set_xlabel(\"$x$ (with $x'=1$)\")\n plt.tight_layout()\n sns.despine()\n if dev_mode and len(save_name) > 0:\n savefig(save_name)\n return fig, ax\n\n\nx = jnp.arange(-3.0, 5.1, 0.1).reshape(-1, 1)\nN = len(x)\n\nkey = jax.random.PRNGKey(4)\n\nfig, ax = plot_sample(\n {\n \"kernel1\": kernels.Polynomial(order=1),\n \"kernel2\": kernels.ExpSineSquared(scale=1.5, gamma=1.0),\n \"title\": \"Lin + Per\",\n \"xlabel\": \"periodic plus trend\",\n },\n save_name=\"kernel_sum_lin_per_latexified.pdf\",\n)\n\nfig, ax = plot_sample(\n {\n \"kernel1\": kernels.ExpSquared(scale=1.0),\n \"kernel2\": kernels.ExpSineSquared(scale=2.0, gamma=1.0),\n \"title\": \"SE + Per\",\n \"xlabel\": \"periodic plus noise\",\n },\n save_name=\"kernel_sum_se_per_latexified.pdf\",\n)\n\nfig, ax = plot_sample(\n {\n \"kernel1\": kernels.ExpSquared(scale=1.0),\n \"kernel2\": kernels.Polynomial(order=1),\n \"title\": \"SE + Lin\",\n \"xlabel\": \"linear plus variation\",\n },\n save_name=\"kernel_sum_lin_se_latexified.pdf\",\n)\n\nfig, ax = plot_sample(\n {\n \"kernel1\": kernels.ExpSquared(scale=5.0),\n \"kernel2\": kernels.ExpSquared(scale=0.5),\n \"title\": \"SE (long) + SE (short)}\",\n \"xlabel\": \"slow & fast variation\",\n },\n save_name=\"kernel_sum_se_se_latexified.pdf\",\n)\n\n"
] |
[
[
"matplotlib.pyplot.tight_layout",
"matplotlib.pyplot.subplots"
]
] |
iyanmv/galois
|
[
"a5e6386a684e3e0b47af608217002795dc25c702"
] |
[
"galois/_fields/_main.py"
] |
[
"\"\"\"\nA module that contains the main classes for Galois fields -- FieldClass, FieldArray,\nand Poly. They're all in one file because they have circular dependencies. The specific GF2\nFieldClass is also included.\n\"\"\"\nimport inspect\nimport math\nimport random\nfrom typing import Tuple, List, Sequence, Iterable, Optional, Union\nfrom typing_extensions import Literal\n\nimport numba\nimport numpy as np\n\nfrom .._factor import divisors\nfrom .._overrides import set_module\nfrom .._poly_conversion import integer_to_poly, poly_to_integer, str_to_integer, poly_to_str, sparse_poly_to_integer, sparse_poly_to_str, str_to_sparse_poly\n\nfrom ._dtypes import DTYPES\nfrom ._linalg import dot, row_reduce, lu_decompose, lup_decompose\nfrom ._functions import FunctionMeta\nfrom ._ufuncs import UfuncMeta\n\n__all__ = [\"FieldClass\", \"FieldArray\", \"GF2\", \"Poly\"]\n\n\n###############################################################################\n# NumPy ndarray subclass for Galois fields\n###############################################################################\n\n@set_module(\"galois\")\nclass FieldClass(FunctionMeta, UfuncMeta):\n \"\"\"\n Defines a metaclass for all :obj:`galois.FieldArray` classes.\n\n Important\n ---------\n :obj:`galois.FieldClass` is a metaclass for :obj:`galois.FieldArray` subclasses created with the class factory\n :func:`galois.GF` and should not be instantiated directly. This metaclass gives :obj:`galois.FieldArray` subclasses\n methods and attributes related to their Galois fields.\n\n This class is included in the API to allow the user to test if a class is a Galois field array class.\n\n .. ipython:: python\n\n GF = galois.GF(7)\n isinstance(GF, galois.FieldClass)\n \"\"\"\n # pylint: disable=no-value-for-parameter,unsupported-membership-test,abstract-method,too-many-public-methods\n\n def __new__(cls, name, bases, namespace, **kwargs): # pylint: disable=unused-argument\n return super().__new__(cls, name, bases, namespace)\n\n def __init__(cls, name, bases, namespace, **kwargs):\n super().__init__(name, bases, namespace, **kwargs)\n cls._characteristic = kwargs.get(\"characteristic\", 0)\n cls._degree = kwargs.get(\"degree\", 0)\n cls._order = kwargs.get(\"order\", 0)\n cls._order_str = None\n cls._ufunc_mode = None\n cls._ufunc_target = None\n cls._dtypes = cls._determine_dtypes()\n\n if \"irreducible_poly\" in kwargs:\n cls._irreducible_poly = kwargs[\"irreducible_poly\"]\n cls._irreducible_poly_int = cls._irreducible_poly.integer\n else:\n cls._irreducible_poly = None\n cls._irreducible_poly_int = 0\n cls._primitive_element = kwargs.get(\"primitive_element\", None)\n\n cls._is_primitive_poly = kwargs.get(\"is_primitive_poly\", None)\n cls._prime_subfield = None\n\n cls._display_mode = \"int\"\n\n if cls.degree == 1:\n cls._order_str = f\"order={cls.order}\"\n else:\n cls._order_str = f\"order={cls.characteristic}^{cls.degree}\"\n\n def __str__(cls):\n return f\"<class 'numpy.ndarray over {cls.name}'>\"\n\n def __repr__(cls):\n return str(cls)\n\n ###############################################################################\n # Helper methods\n ###############################################################################\n\n def _determine_dtypes(cls):\n \"\"\"\n At a minimum, valid dtypes are ones that can hold x for x in [0, order).\n \"\"\"\n dtypes = [dtype for dtype in DTYPES if np.iinfo(dtype).max >= cls.order - 1]\n if len(dtypes) == 0:\n dtypes = [np.object_]\n return dtypes\n\n ###############################################################################\n # Class methods\n ###############################################################################\n\n def compile(cls, mode: str):\n \"\"\"\n Recompile the just-in-time compiled numba ufuncs for a new calculation mode.\n\n This function updates :obj:`ufunc_mode`.\n\n Parameters\n ----------\n mode : str\n The ufunc calculation mode.\n\n * `\"auto\"`: Selects \"jit-lookup\" for fields with order less than :math:`2^{20}`, \"jit-calculate\" for larger fields, and \"python-calculate\"\n for fields whose elements cannot be represented with :obj:`numpy.int64`.\n * `\"jit-lookup\"`: JIT compiles arithmetic ufuncs to use Zech log, log, and anti-log lookup tables for efficient computation.\n In the few cases where explicit calculation is faster than table lookup, explicit calculation is used.\n * `\"jit-calculate\"`: JIT compiles arithmetic ufuncs to use explicit calculation. The \"jit-calculate\" mode is designed for large\n fields that cannot or should not store lookup tables in RAM. Generally, the \"jit-calculate\" mode is slower than \"jit-lookup\".\n * `\"python-calculate\"`: Uses pure-python ufuncs with explicit calculation. This is reserved for fields whose elements cannot be\n represented with :obj:`numpy.int64` and instead use :obj:`numpy.object_` with python :obj:`int` (which has arbitrary precision).\n \"\"\"\n if not isinstance(mode, (type(None), str)):\n raise TypeError(f\"Argument `mode` must be a string, not {type(mode)}.\")\n # if not mode in [\"auto\", \"jit-lookup\", \"jit-calculate\", \"python-calculate\"]:\n # raise ValueError(f\"Argument `mode` must be in ['auto', 'jit-lookup', 'jit-calculate', 'python-calculate'], not {mode!r}.\")\n mode = cls.default_ufunc_mode if mode == \"auto\" else mode\n if mode not in cls.ufunc_modes:\n raise ValueError(f\"Argument `mode` must be in {cls.ufunc_modes} for {cls.name}, not {mode!r}.\")\n\n if mode == cls.ufunc_mode:\n # Don't need to rebuild these ufuncs\n return\n\n cls._ufunc_mode = mode\n cls._compile_ufuncs()\n\n def display(\n cls,\n mode: Literal[\"int\", \"poly\", \"power\"] = \"int\"\n ) -> \"DisplayContext\":\n r\"\"\"\n Sets the display mode for all Galois field arrays of this type.\n\n The display mode can be set to either the integer representation, polynomial representation, or power\n representation. This function updates :obj:`display_mode`.\n\n Warning\n -------\n For the power representation, :func:`np.log` is computed on each element. So for large fields without lookup\n tables, displaying arrays in the power representation may take longer than expected.\n\n Parameters\n ----------\n mode : str, optional\n The field element representation.\n\n * `\"int\"` (default): The element displayed as the integer representation of the polynomial. For example, :math:`2x^2 + x + 2` is an element of\n :math:`\\mathrm{GF}(3^3)` and is equivalent to the integer :math:`23 = 2 \\cdot 3^2 + 3 + 2`.\n * `\"poly\"`: The element as a polynomial over :math:`\\mathrm{GF}(p)` of degree less than :math:`m`. For example, :math:`2x^2 + x + 2` is an element\n of :math:`\\mathrm{GF}(3^3)`.\n * `\"power\"`: The element as a power of the primitive element, see :obj:`FieldClass.primitive_element`. For example, :math:`2x^2 + x + 2 = \\alpha^5`\n in :math:`\\mathrm{GF}(3^3)` with irreducible polynomial :math:`x^3 + 2x + 1` and primitive element :math:`\\alpha = x`.\n\n Returns\n -------\n DisplayContext\n A context manager for use in a `with` statement. If permanently setting the display mode, disregard the\n return value.\n\n Examples\n --------\n Change the display mode by calling the :func:`display` method.\n\n .. ipython:: python\n\n GF = galois.GF(3**3)\n print(GF.properties)\n a = GF(23); a\n\n # Permanently set the display mode to the polynomial representation\n GF.display(\"poly\"); a\n # Permanently set the display mode to the power representation\n GF.display(\"power\"); a\n # Permanently reset the default display mode to the integer representation\n GF.display(); a\n\n The :func:`display` method can also be used as a context manager, as shown below.\n\n For the polynomial representation, when the primitive element is :math:`\\alpha = x` in :math:`\\mathrm{GF}(p)[x]` the polynomial\n indeterminate used is :math:`\\alpha`.\n\n .. ipython:: python\n\n GF = galois.GF(2**8)\n print(GF.properties)\n a = GF.Random()\n print(GF.display_mode, a)\n with GF.display(\"poly\"):\n print(GF.display_mode, a)\n with GF.display(\"power\"):\n print(GF.display_mode, a)\n # The display mode is reset after exiting the context manager\n print(GF.display_mode, a)\n\n But when the primitive element is :math:`\\alpha \\ne x` in :math:`\\mathrm{GF}(p)[x]`, the polynomial\n indeterminate used is :math:`x`.\n\n .. ipython:: python\n\n GF = galois.GF(2**8, irreducible_poly=galois.Poly.Degrees([8,4,3,1,0]))\n print(GF.properties)\n a = GF.Random()\n print(GF.display_mode, a)\n with GF.display(\"poly\"):\n print(GF.display_mode, a)\n with GF.display(\"power\"):\n print(GF.display_mode, a)\n # The display mode is reset after exiting the context manager\n print(GF.display_mode, a)\n \"\"\"\n if not isinstance(mode, (type(None), str)):\n raise TypeError(f\"Argument `mode` must be a string, not {type(mode)}.\")\n if mode not in [\"int\", \"poly\", \"power\"]:\n raise ValueError(f\"Argument `mode` must be in ['int', 'poly', 'power'], not {mode!r}.\")\n\n context = DisplayContext(cls)\n cls._display_mode = mode # Set the new state\n\n return context\n\n def repr_table(\n cls,\n primitive_element: Optional[Union[int, str, np.ndarray, \"FieldArray\"]] = None,\n sort: Literal[\"power\", \"poly\", \"vector\", \"int\"] = \"power\"\n ) -> str:\n r\"\"\"\n Generates a field element representation table comparing the power, polynomial, vector, and integer representations.\n\n Parameters\n ----------\n primitive_element : int, str, np.ndarray, galois.FieldArray, optional\n The primitive element to use for the power representation. The default is `None` which uses the field's\n default primitive element, :obj:`primitive_element`. If an array, it must be a 0-D array.\n sort : str, optional\n The sorting method for the table, either `\"power\"` (default), `\"poly\"`, `\"vector\"`, or `\"int\"`. Sorting by \"power\" will order\n the rows of the table by ascending powers of the primitive element. Sorting by any of the others will order the rows in\n lexicographically-increasing polynomial/vector order, which is equivalent to ascending order of the integer representation.\n\n Returns\n -------\n str\n A UTF-8 formatted table comparing the power, polynomial, vector, and integer representations of each\n field element.\n\n Examples\n --------\n .. ipython:: python\n\n GF = galois.GF(2**4)\n print(GF.properties)\n\n Generate a representation table for :math:`\\mathrm{GF}(2^4)`. Since :math:`x^4 + x + 1` is a primitive polynomial,\n :math:`x` is a primitive element of the field. Notice, :math:`\\textrm{ord}(x) = 15`.\n\n .. ipython:: python\n\n print(GF.repr_table())\n\n Generate a representation table for :math:`\\mathrm{GF}(2^4)` using a different primitive element :math:`x^3 + x^2 + x`.\n Notice, :math:`\\textrm{ord}(x^3 + x^2 + x) = 15`.\n\n .. ipython:: python\n\n alpha = GF.primitive_elements[-1]\n print(GF.repr_table(alpha))\n\n Generate a representation table for :math:`\\mathrm{GF}(2^4)` using a non-primitive element :math:`x^3 + x^2`. Notice,\n :math:`\\textrm{ord}(x^3 + x^2) = 5 \\ne 15`.\n\n .. ipython:: python\n\n beta = GF(\"x^3 + x^2\")\n print(GF.repr_table(beta))\n \"\"\"\n if sort not in [\"power\", \"poly\", \"vector\", \"int\"]:\n raise ValueError(f\"Argument `sort` must be in ['power', 'poly', 'vector', 'int'], not {sort!r}.\")\n if primitive_element is None:\n primitive_element = cls.primitive_element\n\n degrees = np.arange(0, cls.order - 1)\n x = primitive_element**degrees\n if sort != \"power\":\n idxs = np.argsort(x)\n degrees, x = degrees[idxs], x[idxs]\n x = np.concatenate((np.atleast_1d(cls(0)), x)) # Add 0 = alpha**-Inf\n prim = poly_to_str(integer_to_poly(primitive_element, cls.characteristic))\n\n # Define print helper functions\n if len(prim) > 1:\n print_power = lambda power: \"0\" if power is None else f\"({prim})^{power}\"\n else:\n print_power = lambda power: \"0\" if power is None else f\"{prim}^{power}\"\n print_poly = lambda x: poly_to_str(integer_to_poly(x, cls.characteristic))\n print_vec = lambda x: str(integer_to_poly(x, cls.characteristic, degree=cls.degree-1))\n print_int = lambda x: str(int(x))\n\n # Determine column widths\n N_power = max([len(print_power(max(degrees))), len(\"Power\")]) + 2\n N_poly = max([len(print_poly(e)) for e in x] + [len(\"Polynomial\")]) + 2\n N_vec = max([len(print_vec(e)) for e in x] + [len(\"Vector\")]) + 2\n N_int = max([len(print_int(e)) for e in x] + [len(\"Integer\")]) + 2\n\n # Useful characters: https://www.utf8-chartable.de/unicode-utf8-table.pl?start=9472\n string = \"╔\" + \"═\"*N_power + \"╤\" + \"═\"*N_poly + \"╤\" + \"═\"*N_vec + \"╤\" + \"═\"*N_int + \"╗\"\n string += \"\\n║\" + \"Power\".center(N_power) + \"│\" + \"Polynomial\".center(N_poly) + \"│\" + \"Vector\".center(N_vec) + \"│\" + \"Integer\".center(N_int) + \"║\"\n string += \"\\n║\" + \"═\"*N_power + \"╪\" + \"═\"*N_poly + \"╪\" + \"═\"*N_vec + \"╪\" + \"═\"*N_int + \"║\"\n\n for i in range(x.size):\n d = None if i == 0 else degrees[i - 1]\n string += \"\\n║\" + print_power(d).center(N_power) + \"│\" + poly_to_str(integer_to_poly(x[i], cls.characteristic)).center(N_poly) + \"│\" + str(integer_to_poly(x[i], cls.characteristic, degree=cls.degree-1)).center(N_vec) + \"│\" + cls._print_int(x[i]).center(N_int) + \"║\"\n\n if i < x.size - 1:\n string += \"\\n╟\" + \"─\"*N_power + \"┼\" + \"─\"*N_poly + \"┼\" + \"─\"*N_vec + \"┼\" + \"─\"*N_int + \"╢\"\n\n string += \"\\n╚\" + \"═\"*N_power + \"╧\" + \"═\"*N_poly + \"╧\"+ \"═\"*N_vec + \"╧\" + \"═\"*N_int + \"╝\"\n\n return string\n\n def arithmetic_table(\n cls,\n operation: Literal[\"+\", \"-\", \"*\", \"/\"],\n x: Optional[\"FieldArray\"] = None,\n y: Optional[\"FieldArray\"] = None\n ) -> str:\n r\"\"\"\n Generates the specified arithmetic table for the Galois field.\n\n Parameters\n ----------\n operation : str\n The arithmetic operation, either `\"+\"`, `\"-\"`, `\"*\"`, or `\"/\"`.\n x : galois.FieldArray, optional\n Optionally specify the :math:`x` values for the arithmetic table. The default is `None`\n which represents :math:`\\{0, \\dots, p^m - 1\\}`.\n y : galois.FieldArray, optional\n Optionally specify the :math:`y` values for the arithmetic table. The default is `None`\n which represents :math:`\\{0, \\dots, p^m - 1\\}` for addition, subtraction, and multiplication and\n :math:`\\{1, \\dots, p^m - 1\\}` for division.\n\n Returns\n -------\n str\n A UTF-8 formatted arithmetic table.\n\n Examples\n --------\n .. ipython:: python\n\n GF = galois.GF(3**2)\n print(GF.arithmetic_table(\"+\"))\n\n .. ipython:: python\n\n GF.display(\"poly\");\n print(GF.arithmetic_table(\"+\"))\n\n .. ipython:: python\n\n GF.display(\"power\");\n print(GF.arithmetic_table(\"+\"))\n\n .. ipython:: python\n\n GF.display(\"poly\");\n x = GF.Random(5); x\n y = GF.Random(3); y\n print(GF.arithmetic_table(\"+\", x=x, y=y))\n GF.display();\n \"\"\"\n if not operation in [\"+\", \"-\", \"*\", \"/\"]:\n raise ValueError(f\"Argument `operation` must be in ['+', '-', '*', '/'], not {operation!r}.\")\n\n if cls.display_mode == \"power\":\n # Order elements by powers of the primitive element\n x_default = np.concatenate((np.atleast_1d(cls(0)), cls.primitive_element**np.arange(0, cls.order - 1, dtype=cls.dtypes[-1])))\n else:\n x_default = cls.Elements()\n y_default = x_default if operation != \"/\" else x_default[1:]\n\n x = x_default if x is None else cls(x)\n y = y_default if y is None else cls(y)\n X, Y = np.meshgrid(x, y, indexing=\"ij\")\n\n if operation == \"+\":\n Z = X + Y\n elif operation == \"-\":\n Z = X - Y\n elif operation == \"*\":\n Z = X * Y\n else:\n Z = X / Y\n\n if cls.display_mode == \"int\":\n print_element = cls._print_int\n elif cls.display_mode == \"poly\":\n print_element = cls._print_poly\n else:\n cls._set_print_power_vars(x)\n print_element = cls._print_power\n\n operation_str = f\"x {operation} y\"\n\n N = max([len(print_element(e)) for e in x]) + 2\n N_left = max(N, len(operation_str) + 2)\n\n # Useful characters: https://www.utf8-chartable.de/unicode-utf8-table.pl?start=9472\n string = \"╔\" + \"═\"*N_left + \"╦\" + (\"═\"*N + \"╤\")*(y.size - 1) + \"═\"*N + \"╗\"\n string += \"\\n║\" + operation_str.rjust(N_left - 1) + \" ║\"\n for j in range(y.size):\n string += print_element(y[j]).center(N)\n string += \"│\" if j < y.size - 1 else \"║\"\n string += \"\\n╠\" + \"═\"*N_left + \"╬\" + (\"═\"*N + \"╪\")*(y.size - 1) + \"═\"*N + \"╣\"\n\n for i in range(x.size):\n string += \"\\n║\" + print_element(x[i]).rjust(N_left - 1) + \" ║\"\n for j in range(y.size):\n string += print_element(Z[i,j]).center(N)\n string += \"│\" if j < y.size - 1 else \"║\"\n\n if i < x.size - 1:\n string += \"\\n╟\" + \"─\"*N_left + \"╫\" + (\"─\"*N + \"┼\")*(y.size - 1) + \"─\"*N + \"╢\"\n\n string += \"\\n╚\" + \"═\"*N_left + \"╩\" + (\"═\"*N + \"╧\")*(y.size - 1) + \"═\"*N + \"╝\"\n\n return string\n\n ###############################################################################\n # Array display methods\n ###############################################################################\n\n def _formatter(cls, array):\n # pylint: disable=attribute-defined-outside-init\n formatter = {}\n if cls.display_mode == \"poly\":\n formatter[\"int\"] = cls._print_poly\n formatter[\"object\"] = cls._print_poly\n elif cls.display_mode == \"power\":\n cls._set_print_power_vars(array)\n formatter[\"int\"] = cls._print_power\n formatter[\"object\"] = cls._print_power\n elif array.dtype == np.object_:\n formatter[\"object\"] = cls._print_int\n return formatter\n\n def _print_int(cls, element): # pylint: disable=no-self-use\n return f\"{int(element)}\"\n\n def _print_poly(cls, element):\n poly = integer_to_poly(element, cls.characteristic)\n poly_var = \"α\" if cls.primitive_element == cls.characteristic else \"x\"\n return poly_to_str(poly, poly_var=poly_var)\n\n def _set_print_power_vars(cls, array):\n nonzero_idxs = np.nonzero(array)\n if array.ndim > 1:\n max_power = np.max(cls._ufunc(\"log\")(array[nonzero_idxs], cls.primitive_element))\n if max_power > 1:\n cls._display_power_width = 2 + len(str(max_power))\n else:\n cls._display_power_width = 1\n else:\n cls._display_power_width = None\n\n def _print_power(cls, element):\n if element == 0:\n s = \"0\"\n else:\n power = cls._ufunc(\"log\")(element, cls.primitive_element)\n if power > 1:\n s = f\"α^{power}\"\n elif power == 1:\n s = \"α\"\n else:\n s = \"1\"\n\n if cls._display_power_width:\n return s.rjust(cls._display_power_width)\n else:\n return s\n\n ###############################################################################\n # Class attributes\n ###############################################################################\n\n @property\n def name(cls) -> str:\n \"\"\"\n str: The Galois field name.\n\n Examples\n --------\n .. ipython:: python\n\n galois.GF(2).name\n galois.GF(2**8).name\n galois.GF(31).name\n galois.GF(7**5).name\n \"\"\"\n if cls._degree == 1:\n return f\"GF({cls._characteristic})\"\n else:\n return f\"GF({cls._characteristic}^{cls._degree})\"\n\n @property\n def characteristic(cls) -> int:\n r\"\"\"\n int: The prime characteristic :math:`p` of the Galois field :math:`\\mathrm{GF}(p^m)`. Adding\n :math:`p` copies of any element will always result in :math:`0`.\n\n Examples\n --------\n .. ipython:: python\n\n GF = galois.GF(2**8, display=\"poly\")\n GF.characteristic\n a = GF.Random(low=1); a\n a * GF.characteristic\n @suppress\n GF.display();\n\n .. ipython:: python\n\n GF = galois.GF(31)\n GF.characteristic\n a = GF.Random(low=1); a\n a * GF.characteristic\n \"\"\"\n return cls._characteristic\n\n @property\n def degree(cls) -> int:\n r\"\"\"\n int: The prime characteristic's degree :math:`m` of the Galois field :math:`\\mathrm{GF}(p^m)`. The degree\n is a positive integer.\n\n Examples\n --------\n .. ipython:: python\n\n galois.GF(2).degree\n galois.GF(2**8).degree\n galois.GF(31).degree\n galois.GF(7**5).degree\n \"\"\"\n return cls._degree\n\n @property\n def order(cls) -> int:\n r\"\"\"\n int: The order :math:`p^m` of the Galois field :math:`\\mathrm{GF}(p^m)`. The order of the field is also equal to\n the field's size.\n\n Examples\n --------\n .. ipython:: python\n\n galois.GF(2).order\n galois.GF(2**8).order\n galois.GF(31).order\n galois.GF(7**5).order\n \"\"\"\n return cls._order\n\n @property\n def irreducible_poly(cls) -> \"Poly\":\n r\"\"\"\n galois.Poly: The irreducible polynomial :math:`f(x)` of the Galois field :math:`\\mathrm{GF}(p^m)`. The irreducible\n polynomial is of degree :math:`m` over :math:`\\mathrm{GF}(p)`.\n\n Examples\n --------\n .. ipython:: python\n\n galois.GF(2).irreducible_poly\n galois.GF(2**8).irreducible_poly\n galois.GF(31).irreducible_poly\n galois.GF(7**5).irreducible_poly\n \"\"\"\n # Ensure accesses of this property don't alter it\n return cls._irreducible_poly.copy()\n\n @property\n def is_primitive_poly(cls) -> bool:\n r\"\"\"\n bool: Indicates whether the :obj:`irreducible_poly` is a primitive polynomial. If so, :math:`x` is a primitive element\n of the Galois field.\n\n Examples\n --------\n .. ipython:: python\n\n GF = galois.GF(2**8, display=\"poly\")\n GF.irreducible_poly\n GF.primitive_element\n\n # The irreducible polynomial is a primitive polynomial if the primitive element is a root\n GF.irreducible_poly(GF.primitive_element, field=GF)\n GF.is_primitive_poly\n @suppress\n GF.display();\n\n Here is an example using the :math:`\\mathrm{GF}(2^8)` field from AES, which does not use a primitive polynomial.\n\n .. ipython:: python\n\n GF = galois.GF(2**8, irreducible_poly=galois.Poly.Degrees([8,4,3,1,0]), display=\"poly\")\n GF.irreducible_poly\n GF.primitive_element\n\n # The irreducible polynomial is a primitive polynomial if the primitive element is a root\n GF.irreducible_poly(GF.primitive_element, field=GF)\n GF.is_primitive_poly\n @suppress\n GF.display();\n \"\"\"\n return cls._is_primitive_poly\n\n @property\n def primitive_element(cls) -> \"FieldArray\":\n r\"\"\"\n galois.FieldArray: A primitive element :math:`\\alpha` of the Galois field :math:`\\mathrm{GF}(p^m)`. A primitive element is a multiplicative\n generator of the field, such that :math:`\\mathrm{GF}(p^m) = \\{0, 1, \\alpha, \\alpha^2, \\dots, \\alpha^{p^m - 2}\\}`.\n\n A primitive element is a root of the primitive polynomial :math:`f(x)`, such that :math:`f(\\alpha) = 0` over\n :math:`\\mathrm{GF}(p^m)`.\n\n Examples\n --------\n .. ipython:: python\n\n galois.GF(2).primitive_element\n galois.GF(2**8).primitive_element\n galois.GF(31).primitive_element\n galois.GF(7**5).primitive_element\n \"\"\"\n # Ensure accesses of this property doesn't alter it\n return cls(cls._primitive_element) # pylint: disable=no-value-for-parameter\n\n @property\n def primitive_elements(cls) -> \"FieldArray\":\n r\"\"\"\n galois.FieldArray: All primitive elements :math:`\\alpha` of the Galois field :math:`\\mathrm{GF}(p^m)`. A primitive element is a multiplicative\n generator of the field, such that :math:`\\mathrm{GF}(p^m) = \\{0, 1, \\alpha, \\alpha^2, \\dots, \\alpha^{p^m - 2}\\}`.\n\n Examples\n --------\n .. ipython:: python\n\n galois.GF(2).primitive_elements\n galois.GF(2**8).primitive_elements\n galois.GF(31).primitive_elements\n galois.GF(7**5).primitive_elements\n \"\"\"\n n = cls.order - 1\n totatives = [t for t in range(1, n + 1) if math.gcd(n, t) == 1]\n powers = np.array(totatives)\n return np.sort(cls.primitive_element ** powers)\n\n @property\n def quadratic_residues(cls) -> \"FieldArray\":\n r\"\"\"\n galois.FieldArray: All quadratic residues in the Galois field.\n\n An element :math:`x` in :math:`\\mathrm{GF}(p^m)` is a *quadratic residue* if there exists a :math:`y` such that\n :math:`y^2 = x` in the field.\n\n In fields with characteristic 2, every element is a quadratic residue. In fields with characteristic greater than 2,\n exactly half of the nonzero elements are quadratic residues (and they have two unique square roots).\n\n See also :func:`FieldArray.is_quadratic_residue`.\n\n Examples\n --------\n .. ipython:: python\n\n GF = galois.GF(11)\n x = GF.quadratic_residues; x\n r = np.sqrt(x)\n r, -r\n r**2\n (-r)**2\n\n .. ipython:: python\n\n GF = galois.GF(2**4)\n x = GF.quadratic_residues; x\n r = np.sqrt(x)\n r, -r\n r**2\n (-r)**2\n \"\"\"\n x = cls.Elements()\n is_quadratic_residue = x.is_quadratic_residue()\n return x[is_quadratic_residue]\n\n @property\n def quadratic_non_residues(cls) -> \"FieldArray\":\n r\"\"\"\n galois.FieldArray: All quadratic non-residues in the Galois field.\n\n An element :math:`x` in :math:`\\mathrm{GF}(p^m)` is a *quadratic non-residue* if there does not exist a :math:`y` such that\n :math:`y^2 = x` in the field.\n\n In fields with characteristic 2, no elements are quadratic non-residues. In fields with characteristic greater than 2,\n exactly half of the nonzero elements are quadratic non-residues.\n\n See also :func:`FieldArray.is_quadratic_residue`.\n\n Examples\n --------\n .. ipython:: python\n\n GF = galois.GF(11)\n GF.quadratic_non_residues\n\n .. ipython:: python\n\n GF = galois.GF(2**4)\n GF.quadratic_non_residues\n \"\"\"\n x = cls.Elements()\n is_quadratic_residue = x.is_quadratic_residue()\n return x[~is_quadratic_residue]\n\n @property\n def is_prime_field(cls) -> bool:\n \"\"\"\n bool: Indicates if the field's order is prime.\n\n Examples\n --------\n .. ipython:: python\n\n galois.GF(2).is_prime_field\n galois.GF(2**8).is_prime_field\n galois.GF(31).is_prime_field\n galois.GF(7**5).is_prime_field\n \"\"\"\n return cls._degree == 1\n\n @property\n def is_extension_field(cls) -> bool:\n \"\"\"\n bool: Indicates if the field's order is a prime power.\n\n Examples\n --------\n .. ipython:: python\n\n galois.GF(2).is_extension_field\n galois.GF(2**8).is_extension_field\n galois.GF(31).is_extension_field\n galois.GF(7**5).is_extension_field\n \"\"\"\n return cls._degree > 1\n\n @property\n def prime_subfield(cls) -> \"FieldClass\":\n r\"\"\"\n galois.FieldClass: The prime subfield :math:`\\mathrm{GF}(p)` of the extension field :math:`\\mathrm{GF}(p^m)`.\n\n Examples\n --------\n .. ipython:: python\n\n print(galois.GF(2).prime_subfield.properties)\n print(galois.GF(2**8).prime_subfield.properties)\n print(galois.GF(31).prime_subfield.properties)\n print(galois.GF(7**5).prime_subfield.properties)\n \"\"\"\n return cls._prime_subfield\n\n @property\n def dtypes(cls) -> List[np.dtype]:\n \"\"\"\n list: List of valid integer :obj:`numpy.dtype` values that are compatible with this Galois field. Creating an array with an\n unsupported dtype will throw a `TypeError` exception.\n\n Examples\n --------\n .. ipython:: python\n\n GF = galois.GF(2); GF.dtypes\n GF = galois.GF(2**8); GF.dtypes\n GF = galois.GF(31); GF.dtypes\n GF = galois.GF(7**5); GF.dtypes\n\n For Galois fields that cannot be represented by :obj:`numpy.int64`, the only valid dtype is :obj:`numpy.object_`.\n\n .. ipython:: python\n\n GF = galois.GF(2**100); GF.dtypes\n GF = galois.GF(36893488147419103183); GF.dtypes\n \"\"\"\n return cls._dtypes\n\n @property\n def display_mode(cls) -> str:\n r\"\"\"\n str: The representation of Galois field elements, either `\"int\"`, `\"poly\"`, or `\"power\"`. This can be\n changed with :func:`display`.\n\n Examples\n --------\n For the polynomial representation, when the primitive element is :math:`\\alpha = x` in :math:`\\mathrm{GF}(p)[x]` the polynomial\n indeterminate used is :math:`\\alpha`.\n\n .. ipython:: python\n\n GF = galois.GF(2**8)\n print(GF.properties)\n a = GF.Random()\n print(GF.display_mode, a)\n with GF.display(\"poly\"):\n print(GF.display_mode, a)\n with GF.display(\"power\"):\n print(GF.display_mode, a)\n # The display mode is reset after exiting the context manager\n print(GF.display_mode, a)\n\n But when the primitive element is :math:`\\alpha \\ne x` in :math:`\\mathrm{GF}(p)[x]`, the polynomial\n indeterminate used is :math:`x`.\n\n .. ipython:: python\n\n GF = galois.GF(2**8, irreducible_poly=galois.Poly.Degrees([8,4,3,1,0]))\n print(GF.properties)\n a = GF.Random()\n print(GF.display_mode, a)\n with GF.display(\"poly\"):\n print(GF.display_mode, a)\n with GF.display(\"power\"):\n print(GF.display_mode, a)\n # The display mode is reset after exiting the context manager\n print(GF.display_mode, a)\n\n The power representation displays elements as powers of :math:`\\alpha` the primitive element, see\n :obj:`FieldClass.primitive_element`.\n\n .. ipython:: python\n\n with GF.display(\"power\"):\n print(GF.display_mode, a)\n # The display mode is reset after exiting the context manager\n print(GF.display_mode, a)\n \"\"\"\n return cls._display_mode\n\n @property\n def ufunc_mode(cls) -> str:\n \"\"\"\n str: The mode for ufunc compilation, either `\"jit-lookup\"`, `\"jit-calculate\"`, or `\"python-calculate\"`.\n\n Examples\n --------\n .. ipython:: python\n\n galois.GF(2).ufunc_mode\n galois.GF(2**8).ufunc_mode\n galois.GF(31).ufunc_mode\n galois.GF(7**5).ufunc_mode\n \"\"\"\n return cls._ufunc_mode\n\n @property\n def ufunc_modes(cls) -> List[str]:\n \"\"\"\n list: All supported ufunc modes for this Galois field array class.\n\n Examples\n --------\n .. ipython:: python\n\n galois.GF(2).ufunc_modes\n galois.GF(2**8).ufunc_modes\n galois.GF(31).ufunc_modes\n galois.GF(2**100).ufunc_modes\n \"\"\"\n if cls.dtypes == [np.object_]:\n return [\"python-calculate\"]\n else:\n return [\"jit-lookup\", \"jit-calculate\"]\n\n @property\n def default_ufunc_mode(cls) -> str:\n \"\"\"\n str: The default ufunc arithmetic mode for this Galois field.\n\n Examples\n --------\n .. ipython:: python\n\n galois.GF(2).default_ufunc_mode\n galois.GF(2**8).default_ufunc_mode\n galois.GF(31).default_ufunc_mode\n galois.GF(2**100).default_ufunc_mode\n \"\"\"\n if cls.dtypes == [np.object_]:\n return \"python-calculate\"\n elif cls.order <= 2**20:\n return \"jit-lookup\"\n else:\n return \"jit-calculate\"\n\n @property\n def properties(cls) -> str:\n \"\"\"\n str: A formatted string displaying relevant properties of the Galois field.\n\n Examples\n --------\n .. ipython:: python\n\n GF = galois.GF(2); print(GF.properties)\n GF = galois.GF(2**8); print(GF.properties)\n GF = galois.GF(31); print(GF.properties)\n GF = galois.GF(7**5); print(GF.properties)\n \"\"\"\n string = f\"{cls.name}:\"\n string += f\"\\n characteristic: {cls.characteristic}\"\n string += f\"\\n degree: {cls.degree}\"\n string += f\"\\n order: {cls.order}\"\n string += f\"\\n irreducible_poly: {cls.irreducible_poly.string}\"\n string += f\"\\n is_primitive_poly: {cls.is_primitive_poly}\"\n string += f\"\\n primitive_element: {poly_to_str(integer_to_poly(cls.primitive_element, cls.characteristic))}\"\n return string\n\n\nclass DirMeta(type):\n \"\"\"\n A mixin metaclass that overrides __dir__ so that dir() and tab-completion in ipython of `FieldArray` classes\n (which are `FieldClass` instances) include the methods and properties from the metaclass. Python does not\n natively include metaclass properties in dir().\n\n This is a separate class because it will be mixed in to `GF2Meta`, `GF2mMeta`, `GFpMeta`, and `GFpmMeta` separately. Otherwise, the\n sphinx documentation of `FieldArray` gets messed up.\n\n Also, to not mess up the sphinx documentation of `GF2`, we had to create a custom sphinx template `class_gf2.rst` that\n manually includes all the classmethods and methods. This is because there is no way to redefine __dir__ for `GF2` and not have\n sphinx get confused when using autoclass.\n \"\"\"\n\n def __dir__(cls):\n if isinstance(cls, FieldClass):\n meta_dir = dir(type(cls))\n classmethods = [attribute for attribute in super().__dir__() if attribute[0] != \"_\" and inspect.ismethod(getattr(cls, attribute))]\n return sorted(meta_dir + classmethods)\n else:\n return super().__dir__()\n\n\nclass DisplayContext:\n \"\"\"\n Simple context manager for the :obj:`FieldClass.display` method.\n \"\"\"\n\n def __init__(self, cls):\n # Save the previous state\n self.cls = cls\n self.mode = cls.display_mode\n\n def __enter__(self):\n # Don't need to do anything, we already set the new mode in the display() method\n pass\n\n def __exit__(self, exc_type, exc_value, traceback):\n # Reset mode and upon exiting the context\n self.cls._display_mode = self.mode\n\n\n###############################################################################\n# NumPy arrays over Galois fields\n###############################################################################\n\n@set_module(\"galois\")\nclass FieldArray(np.ndarray, metaclass=FieldClass):\n r\"\"\"\n An array over :math:`\\mathrm{GF}(p^m)`.\n\n Important\n ---------\n :obj:`galois.FieldArray` is an abstract base class for all Galois field array classes and cannot be instantiated\n directly. Instead, :obj:`galois.FieldArray` subclasses are created using the class factory :func:`galois.GF`.\n\n This class is included in the API to allow the user to test if an array is a Galois field array subclass.\n\n .. ipython:: python\n\n GF = galois.GF(7)\n issubclass(GF, galois.FieldArray)\n x = GF([1,2,3]); x\n isinstance(x, galois.FieldArray)\n\n Notes\n -----\n :obj:`galois.FieldArray` is an abstract base class and cannot be instantiated directly. Instead, the user creates a :obj:`galois.FieldArray`\n subclass for the field :math:`\\mathrm{GF}(p^m)` by calling the class factory :func:`galois.GF`, e.g. `GF = galois.GF(p**m)`. In this case,\n `GF` is a subclass of :obj:`galois.FieldArray` and an instance of :obj:`galois.FieldClass`, a metaclass that defines special methods and attributes\n related to the Galois field.\n\n :obj:`galois.FieldArray`, and `GF`, is a subclass of :obj:`numpy.ndarray` and its constructor `x = GF(array_like)` has the same syntax as\n :func:`numpy.array`. The returned :obj:`galois.FieldArray` instance `x` is a :obj:`numpy.ndarray` that is acted upon like any other\n numpy array, except all arithmetic is performed in :math:`\\mathrm{GF}(p^m)` not in :math:`\\mathbb{Z}` or :math:`\\mathbb{R}`.\n\n Examples\n --------\n Construct the Galois field class for :math:`\\mathrm{GF}(2^8)` using the class factory :func:`galois.GF` and then display\n some relevant properties of the field. See :obj:`galois.FieldClass` for a complete list of Galois field array class\n methods and attributes.\n\n .. ipython:: python\n\n GF256 = galois.GF(2**8)\n GF256\n print(GF256.properties)\n\n Depending on the field's order, only certain numpy dtypes are supported. See :obj:`galois.FieldClass.dtypes` for more details.\n\n .. ipython:: python\n\n GF256.dtypes\n\n Galois field arrays can be created from existing numpy arrays.\n\n .. ipython:: python\n\n x = np.array([155, 232, 162, 159, 63, 29, 247, 141, 75, 189], dtype=int)\n\n # Explicit Galois field array creation -- a copy is performed\n GF256(x)\n\n # Or view an existing numpy array as a Galois field array -- no copy is performed\n x.view(GF256)\n\n Galois field arrays can also be created explicitly by converting an \"array-like\" object.\n\n .. ipython:: python\n\n # A scalar GF(2^8) element from its integer representation\n GF256(37)\n\n # A scalar GF(2^8) element from its polynomial representation\n GF256(\"x^5 + x^2 + 1\")\n\n # A GF(2^8) array from a list of elements in their integer representation\n GF256([[142, 27], [92, 253]])\n\n # A GF(2^8) array from a list of elements in their integer and polynomial representations\n GF256([[142, \"x^5 + x^2 + 1\"], [92, 253]])\n\n There's also an alternate constructor :func:`Vector` (and accompanying :func:`vector` method) to convert an array of coefficients\n over :math:`\\mathrm{GF}(p)` with last dimension :math:`m` into Galois field elements in :math:`\\mathrm{GF}(p^m)`.\n\n .. ipython:: python\n\n # A scalar GF(2^8) element from its vector representation\n GF256.Vector([0, 0, 1, 0, 0, 1, 0, 1])\n\n # A GF(2^8) array from a list of elements in their vector representation\n GF256.Vector([[[1, 0, 0, 0, 1, 1, 1, 0], [0, 0, 0, 1, 1, 0, 1, 1]], [[0, 1, 0, 1, 1, 1, 0, 0], [1, 1, 1, 1, 1, 1, 0, 1]]])\n\n Newly-created arrays will use the smallest unsigned dtype, unless otherwise specified.\n\n .. ipython:: python\n\n a = GF256([66, 166, 27, 182, 125]); a\n a.dtype\n b = GF256([66, 166, 27, 182, 125], dtype=np.int64); b\n b.dtype\n \"\"\"\n # pylint: disable=unsupported-membership-test,not-an-iterable,too-many-public-methods\n\n def __new__(\n cls,\n array: Union[int, str, Iterable, np.ndarray, \"FieldArray\"],\n dtype: Optional[Union[np.dtype, int, object]] = None,\n copy: bool = True,\n order: Literal[\"K\", \"A\", \"C\", \"F\"] = \"K\",\n ndmin: int = 0\n ) -> \"FieldArray\":\n if cls is FieldArray:\n raise NotImplementedError(\"FieldArray is an abstract base class that cannot be directly instantiated. Instead, create a FieldArray subclass for GF(p^m) arithmetic using `GF = galois.GF(p**m)` and instantiate an array using `x = GF(array_like)`.\")\n return cls._array(array, dtype=dtype, copy=copy, order=order, ndmin=ndmin)\n\n def __init__(\n self,\n array: Union[int, str, Iterable, np.ndarray, \"FieldArray\"],\n dtype: Optional[Union[np.dtype, int, object]] = None,\n copy: bool = True,\n order: Literal[\"K\", \"A\", \"C\", \"F\"] = \"K\",\n ndmin: int = 0\n ):\n r\"\"\"\n Creates an array over :math:`\\mathrm{GF}(p^m)`.\n\n Parameters\n ----------\n array : int, str, tuple, list, numpy.ndarray, galois.FieldArray\n The input array-like object to be converted to a Galois field array. See the examples section for demonstations of array creation\n using each input type. See see :func:`galois.FieldClass.display` and :obj:`galois.FieldClass.display_mode` for a description of the\n \"integer\" and \"polynomial\" representation of Galois field elements.\n\n * :obj:`int`: A single integer, which is the \"integer representation\" of a Galois field element, creates a 0-D array.\n * :obj:`str`: A single string, which is the \"polynomial representation\" of a Galois field element, creates a 0-D array.\n * :obj:`tuple`, :obj:`list`: A list or tuple (or nested lists/tuples) of ints or strings (which can be mix-and-matched) creates an array of\n Galois field elements from their integer or polynomial representations.\n * :obj:`numpy.ndarray`, :obj:`galois.FieldArray`: An array of ints creates a copy of the array over this specific field.\n\n dtype : numpy.dtype, optional\n The :obj:`numpy.dtype` of the array elements. The default is `None` which represents the smallest unsigned\n dtype for this class, i.e. the first element in :obj:`galois.FieldClass.dtypes`.\n copy : bool, optional\n The `copy` keyword argument from :func:`numpy.array`. The default is `True` which makes a copy of the input array.\n order : str, optional\n The `order` keyword argument from :func:`numpy.array`. Valid values are `\"K\"` (default), `\"A\"`, `\"C\"`, or `\"F\"`.\n ndmin : int, optional\n The `ndmin` keyword argument from :func:`numpy.array`. The minimum number of dimensions of the output.\n The default is 0.\n\n Returns\n -------\n galois.FieldArray\n An array over :math:`\\mathrm{GF}(p^m)`.\n \"\"\"\n # pylint: disable=unused-argument,super-init-not-called\n # Adding __init__ and not doing anything is done to overwrite the superclass's __init__ docstring\n return\n\n @classmethod\n def _get_dtype(cls, dtype):\n if dtype is None:\n return cls.dtypes[0]\n\n # Convert \"dtype\" to a numpy dtype. This does platform specific conversion, if necessary.\n # For example, np.dtype(int) == np.int64 (on some systems).\n dtype = np.dtype(dtype)\n if dtype not in cls.dtypes:\n raise TypeError(f\"{cls.name} arrays only support dtypes {[np.dtype(d).name for d in cls.dtypes]}, not {dtype.name!r}.\")\n\n return dtype\n\n @classmethod\n def _array(cls, array_like, dtype=None, copy=True, order=\"K\", ndmin=0):\n dtype = cls._get_dtype(dtype)\n array_like = cls._check_array_like_object(array_like)\n array = np.array(array_like, dtype=dtype, copy=copy, order=order, ndmin=ndmin)\n return array.view(cls)\n\n @classmethod\n def _check_array_like_object(cls, array_like):\n if isinstance(array_like, cls):\n # If this was a previously-created and vetted array, there's no need to reverify\n return array_like\n\n if isinstance(array_like, str):\n # Convert the string to an integer and verify it's in range\n array_like = cls._check_string_value(array_like)\n cls._check_array_values(array_like)\n elif isinstance(array_like, (int, np.integer)):\n # Just check that the single int is in range\n cls._check_array_values(array_like)\n elif isinstance(array_like, (list, tuple)):\n # Recursively check the items in the iterable to ensure they're of the correct type\n # and that their values are in range\n array_like = cls._check_iterable_types_and_values(array_like)\n elif isinstance(array_like, np.ndarray):\n # If this a NumPy array, but not a FieldArray, verify the array\n if array_like.dtype == np.object_:\n array_like = cls._check_array_types_dtype_object(array_like)\n elif not np.issubdtype(array_like.dtype, np.integer):\n raise TypeError(f\"{cls.name} arrays must have integer dtypes, not {array_like.dtype}.\")\n cls._check_array_values(array_like)\n else:\n raise TypeError(f\"{cls.name} arrays can be created with scalars of type int, not {type(array_like)}.\")\n\n return array_like\n\n @classmethod\n def _check_iterable_types_and_values(cls, iterable):\n new_iterable = []\n for item in iterable:\n if isinstance(item, (list, tuple)):\n item = cls._check_iterable_types_and_values(item)\n new_iterable.append(item)\n continue\n\n if isinstance(item, str):\n item = cls._check_string_value(item)\n elif not isinstance(item, (int, np.integer, FieldArray)):\n raise TypeError(f\"When {cls.name} arrays are created/assigned with an iterable, each element must be an integer. Found type {type(item)}.\")\n\n cls._check_array_values(item)\n # if not 0 <= item < cls.order:\n # raise ValueError(f\"{cls.name} arrays must have elements in 0 <= x < {cls.order}, not {item}.\")\n\n # Ensure the type is int so dtype=object classes don't get all mixed up\n new_iterable.append(int(item))\n\n return new_iterable\n\n @classmethod\n def _check_array_types_dtype_object(cls, array):\n if array.size == 0:\n return array\n if array.ndim == 0:\n if not isinstance(array[()], (int, np.integer, FieldArray)):\n raise TypeError(f\"When {cls.name} arrays are created/assigned with a numpy array with `dtype=object`, each element must be an integer. Found type {type(array[()])}.\")\n return int(array)\n\n iterator = np.nditer(array, flags=[\"multi_index\", \"refs_ok\"])\n for _ in iterator:\n a = array[iterator.multi_index]\n if not isinstance(a, (int, np.integer, FieldArray)):\n raise TypeError(f\"When {cls.name} arrays are created/assigned with a numpy array with `dtype=object`, each element must be an integer. Found type {type(a)}.\")\n\n # Ensure the type is int so dtype=object classes don't get all mixed up\n array[iterator.multi_index] = int(a)\n\n return array\n\n @classmethod\n def _check_array_values(cls, array):\n if not isinstance(array, np.ndarray):\n # Convert single integer to array so next step doesn't fail\n array = np.array(array)\n\n # Check the value of the \"field elements\" and make sure they are valid\n if np.any(array < 0) or np.any(array >= cls.order):\n idxs = np.logical_or(array < 0, array >= cls.order)\n values = array if array.ndim == 0 else array[idxs]\n raise ValueError(f\"{cls.name} arrays must have elements in `0 <= x < {cls.order}`, not {values}.\")\n\n @classmethod\n def _check_string_value(cls, string):\n return str_to_integer(string, cls.prime_subfield)\n\n ###############################################################################\n # Alternate constructors\n ###############################################################################\n\n @classmethod\n def Zeros(\n cls,\n shape: Union[int, Sequence[int]],\n dtype: Optional[Union[np.dtype, int, object]] = None\n ) -> \"FieldArray\":\n \"\"\"\n Creates a Galois field array with all zeros.\n\n Parameters\n ----------\n shape : int, tuple\n A numpy-compliant `shape` tuple, see :obj:`numpy.ndarray.shape`. An empty tuple `()` represents a scalar.\n A single integer or 1-tuple, e.g. `N` or `(N,)`, represents the size of a 1-D array. A 2-tuple, e.g.\n `(M,N)`, represents a 2-D array with each element indicating the size in each dimension.\n dtype : numpy.dtype, optional\n The :obj:`numpy.dtype` of the array elements. The default is `None` which represents the smallest unsigned\n dtype for this class, i.e. the first element in :obj:`galois.FieldClass.dtypes`.\n\n Returns\n -------\n galois.FieldArray\n A Galois field array of zeros.\n\n Examples\n --------\n .. ipython:: python\n\n GF = galois.GF(31)\n GF.Zeros((2,5))\n \"\"\"\n dtype = cls._get_dtype(dtype)\n array = np.zeros(shape, dtype=dtype)\n return array.view(cls)\n\n @classmethod\n def Ones(\n cls,\n shape: Union[int, Sequence[int]],\n dtype: Optional[Union[np.dtype, int, object]] = None\n ) -> \"FieldArray\":\n \"\"\"\n Creates a Galois field array with all ones.\n\n Parameters\n ----------\n shape : int, tuple\n A numpy-compliant `shape` tuple, see :obj:`numpy.ndarray.shape`. An empty tuple `()` represents a scalar.\n A single integer or 1-tuple, e.g. `N` or `(N,)`, represents the size of a 1-D array. A 2-tuple, e.g.\n `(M,N)`, represents a 2-D array with each element indicating the size in each dimension.\n dtype : numpy.dtype, optional\n The :obj:`numpy.dtype` of the array elements. The default is `None` which represents the smallest unsigned\n dtype for this class, i.e. the first element in :obj:`galois.FieldClass.dtypes`.\n\n Returns\n -------\n galois.FieldArray\n A Galois field array of ones.\n\n Examples\n --------\n .. ipython:: python\n\n GF = galois.GF(31)\n GF.Ones((2,5))\n \"\"\"\n dtype = cls._get_dtype(dtype)\n array = np.ones(shape, dtype=dtype)\n return array.view(cls)\n\n @classmethod\n def Range(\n cls,\n start: int,\n stop: int,\n step: Optional[int] = 1,\n dtype: Optional[Union[np.dtype, int, object]] = None\n ) -> \"FieldArray\":\n \"\"\"\n Creates a 1-D Galois field array with a range of field elements.\n\n Parameters\n ----------\n start : int\n The starting Galois field value (inclusive) in its integer representation.\n stop : int\n The stopping Galois field value (exclusive) in its integer representation.\n step : int, optional\n The space between values. The default is 1.\n dtype : numpy.dtype, optional\n The :obj:`numpy.dtype` of the array elements. The default is `None` which represents the smallest unsigned\n dtype for this class, i.e. the first element in :obj:`galois.FieldClass.dtypes`.\n\n Returns\n -------\n galois.FieldArray\n A 1-D Galois field array of a range of field elements.\n\n Examples\n --------\n .. ipython:: python\n\n GF = galois.GF(31)\n GF.Range(10,20)\n \"\"\"\n if not stop <= cls.order:\n raise ValueError(f\"The stopping value must be less than the field order of {cls.order}, not {stop}.\")\n dtype = cls._get_dtype(dtype)\n array = np.arange(start, stop, step=step, dtype=dtype)\n return array.view(cls)\n\n @classmethod\n def Random(\n cls,\n shape: Union[int, Sequence[int]] = (),\n low: Optional[int] = 0,\n high: Optional[int] = None,\n seed: Optional[Union[int, np.random.Generator]] = None,\n dtype: Optional[Union[np.dtype, int, object]] = None\n ) -> \"FieldArray\":\n \"\"\"\n Creates a Galois field array with random field elements.\n\n Parameters\n ----------\n shape : int, tuple\n A numpy-compliant `shape` tuple, see :obj:`numpy.ndarray.shape`. An empty tuple `()` represents a scalar.\n A single integer or 1-tuple, e.g. `N` or `(N,)`, represents the size of a 1-D array. A 2-tuple, e.g.\n `(M,N)`, represents a 2-D array with each element indicating the size in each dimension.\n low : int, optional\n The lowest value (inclusive) of a random field element in its integer representation. The default is 0.\n high : int, optional\n The highest value (exclusive) of a random field element in its integer representation. The default is `None`\n which represents the field's order :math:`p^m`.\n seed: int, numpy.random.Generator, optional\n Non-negative integer used to initialize the PRNG. The default is `None` which means that unpredictable\n entropy will be pulled from the OS to be used as the seed. A :obj:`numpy.random.Generator` can also be passed. If so,\n it is used directly when `dtype != np.object_`. Its state is used to seed `random.seed()`, otherwise.\n dtype : numpy.dtype, optional\n The :obj:`numpy.dtype` of the array elements. The default is `None` which represents the smallest unsigned\n dtype for this class, i.e. the first element in :obj:`galois.FieldClass.dtypes`.\n\n Returns\n -------\n galois.FieldArray\n A Galois field array of random field elements.\n\n Examples\n --------\n Generate a random matrix with an unpredictable seed.\n\n .. ipython:: python\n\n GF = galois.GF(31)\n GF.Random((2,5))\n\n Generate a random array with a specified seed. This produces repeatable outputs.\n\n .. ipython:: python\n\n GF.Random(10, seed=123456789)\n GF.Random(10, seed=123456789)\n\n Generate a group of random arrays with one global seed.\n\n .. ipython:: python\n\n rng = np.random.default_rng(123456789)\n GF.Random(10, seed=rng)\n GF.Random(10, seed=rng)\n \"\"\"\n dtype = cls._get_dtype(dtype)\n high = cls.order if high is None else high\n if not 0 <= low < high <= cls.order:\n raise ValueError(f\"Arguments must satisfy `0 <= low < high <= order`, not `0 <= {low} < {high} <= {cls.order}`.\")\n\n if seed is not None:\n if not isinstance(seed, (int, np.integer, np.random.Generator)):\n raise ValueError(\"Seed must be an integer, a numpy.random.Generator or None.\")\n if isinstance(seed, (int, np.integer)) and seed < 0:\n raise ValueError(\"Seed must be non-negative.\")\n\n if dtype != np.object_:\n rng = np.random.default_rng(seed)\n array = rng.integers(low, high, shape, dtype=dtype)\n else:\n array = np.empty(shape, dtype=dtype)\n iterator = np.nditer(array, flags=[\"multi_index\", \"refs_ok\"])\n _seed = None\n if seed is not None:\n if isinstance(seed, np.integer):\n # np.integers not supported by random and seeding based on hashing deprecated since Python 3.9\n _seed = seed.item()\n elif isinstance(seed, np.random.Generator):\n _seed = seed.bit_generator.state['state']['state']\n seed.bit_generator.advance(1)\n else: # int\n _seed = seed\n random.seed(_seed)\n for _ in iterator:\n array[iterator.multi_index] = random.randint(low, high - 1)\n\n return array.view(cls)\n\n @classmethod\n def Elements(\n cls,\n dtype: Optional[Union[np.dtype, int, object]] = None\n ) -> \"FieldArray\":\n r\"\"\"\n Creates a 1-D Galois field array of the field's elements :math:`\\{0, \\dots, p^m-1\\}`.\n\n Parameters\n ----------\n dtype : numpy.dtype, optional\n The :obj:`numpy.dtype` of the array elements. The default is `None` which represents the smallest unsigned\n dtype for this class, i.e. the first element in :obj:`galois.FieldClass.dtypes`.\n\n Returns\n -------\n galois.FieldArray\n A 1-D Galois field array of all the field's elements.\n\n Examples\n --------\n .. ipython:: python\n\n GF = galois.GF(2**4)\n GF.Elements()\n\n As usual, Galois field elements can be displayed in either the \"integer\" (default), \"polynomial\", or \"power\" representation.\n This can be changed by calling :func:`galois.FieldClass.display`.\n\n .. ipython:: python\n\n # Permanently set the display mode to \"poly\"\n GF.display(\"poly\");\n GF.Elements()\n # Temporarily set the display mode to \"power\"\n with GF.display(\"power\"):\n print(GF.Elements())\n # Reset the display mode to \"int\"\n GF.display();\n \"\"\"\n return cls.Range(0, cls.order, step=1, dtype=dtype)\n\n @classmethod\n def Identity(\n cls,\n size: int,\n dtype: Optional[Union[np.dtype, int, object]] = None\n ) -> \"FieldArray\":\n r\"\"\"\n Creates an :math:`n \\times n` Galois field identity matrix.\n\n Parameters\n ----------\n size : int\n The size :math:`n` along one axis of the matrix. The resulting array has shape `(size, size)`.\n dtype : numpy.dtype, optional\n The :obj:`numpy.dtype` of the array elements. The default is `None` which represents the smallest unsigned\n dtype for this class, i.e. the first element in :obj:`galois.FieldClass.dtypes`.\n\n Returns\n -------\n galois.FieldArray\n A Galois field identity matrix of shape `(size, size)`.\n\n Examples\n --------\n .. ipython:: python\n\n GF = galois.GF(31)\n GF.Identity(4)\n \"\"\"\n dtype = cls._get_dtype(dtype)\n array = np.identity(size, dtype=dtype)\n return array.view(cls)\n\n @classmethod\n def Vandermonde(\n cls,\n a: Union[int, \"FieldArray\"],\n m: int,\n n: int,\n dtype: Optional[Union[np.dtype, int, object]] = None\n ) -> \"FieldArray\":\n r\"\"\"\n Creates an :math:`m \\times n` Vandermonde matrix of :math:`a \\in \\mathrm{GF}(p^m)`.\n\n Parameters\n ----------\n a : int, galois.FieldArray\n An element of :math:`\\mathrm{GF}(p^m)`.\n m : int\n The number of rows in the Vandermonde matrix.\n n : int\n The number of columns in the Vandermonde matrix.\n dtype : numpy.dtype, optional\n The :obj:`numpy.dtype` of the array elements. The default is `None` which represents the smallest unsigned\n dtype for this class, i.e. the first element in :obj:`galois.FieldClass.dtypes`.\n\n Returns\n -------\n galois.FieldArray\n The :math:`m \\times n` Vandermonde matrix.\n\n Examples\n --------\n .. ipython:: python\n\n GF = galois.GF(2**3)\n a = GF.primitive_element\n V = GF.Vandermonde(a, 7, 7)\n with GF.display(\"power\"):\n print(V)\n \"\"\"\n if not isinstance(a, (int, np.integer, cls)):\n raise TypeError(f\"Argument `a` must be an integer or element of {cls.name}, not {type(a)}.\")\n if not isinstance(m, (int, np.integer)):\n raise TypeError(f\"Argument `m` must be an integer, not {type(m)}.\")\n if not isinstance(n, (int, np.integer)):\n raise TypeError(f\"Argument `n` must be an integer, not {type(n)}.\")\n if not m > 0:\n raise ValueError(f\"Argument `m` must be non-negative, not {m}.\")\n if not n > 0:\n raise ValueError(f\"Argument `n` must be non-negative, not {n}.\")\n\n dtype = cls._get_dtype(dtype)\n a = cls(a, dtype=dtype)\n if not a.ndim == 0:\n raise ValueError(f\"Argument `a` must be a scalar, not {a.ndim}-D.\")\n\n v = a ** np.arange(0, m)\n V = np.power.outer(v, np.arange(0, n))\n\n return V\n\n @classmethod\n def Vector(\n cls,\n array: Union[Iterable, np.ndarray, \"FieldArray\"],\n dtype: Optional[Union[np.dtype, int, object]] = None\n ) -> \"FieldArray\":\n r\"\"\"\n Creates a Galois field array over :math:`\\mathrm{GF}(p^m)` from length-:math:`m` vectors over the prime subfield :math:`\\mathrm{GF}(p)`.\n\n This function is the inverse operation of the :func:`vector` method.\n\n Parameters\n ----------\n array : array_like\n The input array with field elements in :math:`\\mathrm{GF}(p)` to be converted to a Galois field array in :math:`\\mathrm{GF}(p^m)`.\n The last dimension of the input array must be :math:`m`. An input array with shape `(n1, n2, m)` has output shape `(n1, n2)`. By convention,\n the vectors are ordered from highest degree to 0-th degree.\n dtype : numpy.dtype, optional\n The :obj:`numpy.dtype` of the array elements. The default is `None` which represents the smallest unsigned\n dtype for this class, i.e. the first element in :obj:`galois.FieldClass.dtypes`.\n\n Returns\n -------\n galois.FieldArray\n A Galois field array over :math:`\\mathrm{GF}(p^m)`.\n\n Examples\n --------\n .. ipython:: python\n\n GF = galois.GF(2**6)\n vec = galois.GF2.Random((3,6)); vec\n a = GF.Vector(vec); a\n with GF.display(\"poly\"):\n print(a)\n a.vector()\n \"\"\"\n order = cls.prime_subfield.order\n degree = cls.degree\n array = cls.prime_subfield(array).view(np.ndarray).astype(cls.dtypes[-1]) # Use the largest dtype so computation doesn't overflow\n if not array.shape[-1] == degree:\n raise ValueError(f\"The last dimension of `array` must be the field extension dimension {cls.degree}, not {array.shape[-1]}.\")\n degrees = np.arange(degree - 1, -1, -1, dtype=cls.dtypes[-1])\n array = np.sum(array * order**degrees, axis=-1)\n return cls(array, dtype=dtype)\n\n ###############################################################################\n # Instance methods\n ###############################################################################\n\n def additive_order(self) -> Union[np.integer, np.ndarray]:\n r\"\"\"\n Computes the additive order of each element in :math:`x`.\n\n Returns\n -------\n numpy.integer, numpy.ndarray\n An integer array of the additive order of each element in :math:`x`. The return value is a single integer if the\n input array :math:`x` is a scalar.\n\n Notes\n -----\n The additive order :math:`a` of :math:`x` in :math:`\\mathrm{GF}(p^m)` is the smallest integer :math:`a`\n such that :math:`x a = 0`. With the exception of :math:`0`, the additive order of every element is\n the finite field's characteristic.\n\n Examples\n --------\n Below is the additive order of each element of :math:`\\mathrm{GF}(2^4)`.\n\n .. ipython:: python\n\n GF = galois.GF(2**4)\n x = GF.Elements(); x\n order = x.additive_order(); order\n x*order\n \"\"\"\n x = self\n field = type(self)\n\n if x.ndim == 0:\n order = np.int64(1) if x == 0 else np.int64(field.characteristic)\n else:\n order = field.characteristic * np.ones(x.shape, dtype=np.int64)\n order[np.where(x == 0)] = 1\n\n return order\n\n def multiplicative_order(self) -> Union[np.integer, np.ndarray]:\n r\"\"\"\n Computes the multiplicative order :math:`\\textrm{ord}(x)` of each element in :math:`x`.\n\n Returns\n -------\n numpy.integer, numpy.ndarray\n An integer array of the multiplicative order of each element in :math:`x`. The return value is a single integer if the\n input array :math:`x` is a scalar.\n\n Notes\n -----\n The multiplicative order :math:`\\textrm{ord}(x) = a` of :math:`x` in :math:`\\mathrm{GF}(p^m)` is the smallest power :math:`a`\n such that :math:`x^a = 1`. If :math:`a = p^m - 1`, :math:`a` is said to be a generator of the multiplicative group\n :math:`\\mathrm{GF}(p^m)^\\times`.\n\n The multiplicative order of :math:`0` is not defined and will raise an :obj:`ArithmeticError`.\n\n :func:`FieldArray.multiplicative_order` should not be confused with :obj:`FieldClass.order`. The former is a method on a\n Galois field array that returns the multiplicative order of elements. The latter is a property of the field, namely\n the finite field's order or size.\n\n Examples\n --------\n Below is the multiplicative order of each non-zero element of :math:`\\mathrm{GF}(2^4)`. The elements with\n :math:`\\textrm{ord}(x) = 15` are multiplicative generators of :math:`\\mathrm{GF}(2^4)^\\times`\n\n .. ipython:: python\n\n GF = galois.GF(2**4)\n # The multiplicative order of 0 is not defined\n x = GF.Range(1, GF.order); x\n order = x.multiplicative_order(); order\n # Elements with order of 15 are the primitive elements (generators) of the field\n GF.primitive_elements\n x**order\n \"\"\"\n if not np.count_nonzero(self) == self.size:\n raise ArithmeticError(\"The multiplicative order of 0 is not defined.\")\n\n x = self\n field = type(self)\n\n if field.ufunc_mode == \"jit-lookup\":\n # This algorithm is faster if np.log() has a lookup table\n # β = α^k\n # ord(α) = p^m - 1\n # ord(β) = (p^m - 1) / gcd(p^m - 1, k)\n k = np.log(x) # x as an exponent of α\n order = (field.order - 1) // np.gcd(field.order - 1, k)\n else:\n d = np.array(divisors(field.order - 1)) # Divisors d such that d | p^m - 1\n y = np.power.outer(x, d) # x^d -- the first divisor d for which x^d == 1 is the order of x\n idxs = np.argmin(np.abs(y.view(np.ndarray) - 1), axis=-1) # First index of divisors, which is the order of x\n order = d[idxs] # The order of each element of x\n\n return order\n\n def is_quadratic_residue(self) -> Union[np.bool_, np.ndarray]:\n r\"\"\"\n Determines if the elements of :math:`x` are quadratic residues in the Galois field.\n\n Returns\n -------\n numpy.bool_, numpy.ndarray\n An boolean array indicating if each element in :math:`x` is a quadratic residue. The return value is a single boolean if the\n input array :math:`x` is a scalar.\n\n Notes\n -----\n An element :math:`x` in :math:`\\mathrm{GF}(p^m)` is a *quadratic residue* if there exists a :math:`y` such that\n :math:`y^2 = x` in the field.\n\n In fields with characteristic 2, every element is a quadratic residue. In fields with characteristic greater than 2,\n exactly half of the nonzero elements are quadratic residues (and they have two unique square roots).\n\n References\n ----------\n * Section 3.5.1 from https://cacr.uwaterloo.ca/hac/about/chap3.pdf.\n\n Examples\n --------\n .. ipython:: python\n\n GF = galois.GF(11)\n x = GF.Elements(); x\n x.is_quadratic_residue()\n\n .. ipython:: python\n\n GF = galois.GF(2**4)\n x = GF.Elements(); x\n x.is_quadratic_residue()\n\n .. ipython:: python\n\n GF = galois.GF(3**3)\n x = GF.Elements(); x\n x.is_quadratic_residue()\n \"\"\"\n x = self\n field = type(self)\n\n if field.characteristic == 2:\n # All elements are quadratic residues if the field's characteristic is 2\n return np.ones(x.shape, dtype=bool) if x.ndim > 0 else np.bool_(True)\n else:\n # Compute the Legendre symbol on each element\n return x ** ((field.order - 1)//2) != field.characteristic - 1\n\n def vector(\n self,\n dtype: Optional[Union[np.dtype, int, object]] = None\n ) -> \"FieldArray\":\n r\"\"\"\n Converts the Galois field array over :math:`\\mathrm{GF}(p^m)` to length-:math:`m` vectors over the prime subfield :math:`\\mathrm{GF}(p)`.\n\n This function is the inverse operation of the :func:`Vector` constructor. For an array with shape `(n1, n2)`, the output shape\n is `(n1, n2, m)`. By convention, the vectors are ordered from highest degree to 0-th degree.\n\n Parameters\n ----------\n dtype : numpy.dtype, optional\n The :obj:`numpy.dtype` of the array elements. The default is `None` which represents the smallest unsigned\n dtype for this class, i.e. the first element in :obj:`galois.FieldClass.dtypes`.\n\n Returns\n -------\n galois.FieldArray\n A Galois field array of length-:math:`m` vectors over :math:`\\mathrm{GF}(p)`.\n\n Examples\n --------\n .. ipython:: python\n\n GF = galois.GF(2**6)\n a = GF.Random(3); a\n with GF.display(\"poly\"):\n print(a)\n vec = a.vector(); vec\n GF.Vector(vec)\n \"\"\"\n order = type(self).prime_subfield.order\n degree = type(self).degree\n array = self.view(np.ndarray)\n array = np.repeat(array, degree).reshape(*array.shape, degree)\n x = 0\n for i in range(degree):\n q = (array[...,i] - x) // order**(degree - 1 - i)\n array[...,i] = q\n x += q*order**(degree - 1 - i)\n return type(self).prime_subfield(array, dtype=dtype) # pylint: disable=unexpected-keyword-arg\n\n def row_reduce(\n self,\n ncols: Optional[int] = None\n ) -> \"FieldArray\":\n r\"\"\"\n Performs Gaussian elimination on the matrix to achieve reduced row echelon form.\n\n **Row reduction operations**\n\n 1. Swap the position of any two rows.\n 2. Multiply a row by a non-zero scalar.\n 3. Add one row to a scalar multiple of another row.\n\n Parameters\n ----------\n ncols : int, optional\n The number of columns to perform Gaussian elimination over. The default is `None` which represents\n the number of columns of the input array.\n\n Returns\n -------\n galois.FieldArray\n The reduced row echelon form of the input array.\n\n Examples\n --------\n .. ipython:: python\n\n GF = galois.GF(31)\n A = GF.Random((4,4)); A\n A.row_reduce()\n np.linalg.matrix_rank(A)\n\n One column is a linear combination of another.\n\n .. ipython:: python\n\n GF = galois.GF(31)\n A = GF.Random((4,4)); A\n A[:,2] = A[:,1] * GF(17); A\n A.row_reduce()\n np.linalg.matrix_rank(A)\n\n One row is a linear combination of another.\n\n .. ipython:: python\n\n GF = galois.GF(31)\n A = GF.Random((4,4)); A\n A[3,:] = A[2,:] * GF(8); A\n A.row_reduce()\n np.linalg.matrix_rank(A)\n \"\"\"\n return row_reduce(self, ncols=ncols)\n\n def lu_decompose(self) -> \"FieldArray\":\n r\"\"\"\n Decomposes the input array into the product of lower and upper triangular matrices.\n\n Returns\n -------\n galois.FieldArray\n The lower triangular matrix.\n galois.FieldArray\n The upper triangular matrix.\n\n Examples\n --------\n .. ipython:: python\n\n GF = galois.GF(5)\n\n # Not every square matrix has an LU decomposition\n A = GF([[2, 4, 4, 1], [3, 3, 1, 4], [4, 3, 4, 2], [4, 4, 3, 1]])\n L, U = A.lu_decompose()\n L\n U\n\n # A = L U\n np.array_equal(A, L @ U)\n \"\"\"\n return lu_decompose(self)\n\n def lup_decompose(self) -> \"FieldArray\":\n r\"\"\"\n Decomposes the input array into the product of lower and upper triangular matrices using partial pivoting.\n\n Returns\n -------\n galois.FieldArray\n The lower triangular matrix.\n galois.FieldArray\n The upper triangular matrix.\n galois.FieldArray\n The permutation matrix.\n\n Examples\n --------\n .. ipython:: python\n\n GF = galois.GF(5)\n A = GF([[1, 3, 2, 0], [3, 4, 2, 3], [0, 2, 1, 4], [4, 3, 3, 1]])\n L, U, P = A.lup_decompose()\n L\n U\n P\n\n # P A = L U\n np.array_equal(P @ A, L @ U)\n \"\"\"\n return lup_decompose(self)\n\n def field_trace(self) -> \"FieldArray\":\n r\"\"\"\n Computes the field trace :math:`\\mathrm{Tr}_{L / K}(x)` of the elements of :math:`x`.\n\n Returns\n -------\n galois.FieldArray\n The field trace of :math:`x` in the prime subfield :math:`\\mathrm{GF}(p)`.\n\n Notes\n -----\n The `self` array :math:`x` is over the extension field :math:`L = \\mathrm{GF}(p^m)`. The field trace of :math:`x` is\n over the subfield :math:`K = \\mathrm{GF}(p)`. In other words, :math:`\\mathrm{Tr}_{L / K}(x) : L \\rightarrow K`.\n\n For finite fields, since :math:`L` is a Galois extension of :math:`K`, the field trace of :math:`x` is defined as a sum\n of the Galois conjugates of :math:`x`.\n\n .. math:: \\mathrm{Tr}_{L / K}(x) = \\sum_{i=0}^{m-1} x^{p^i}\n\n References\n ----------\n * https://en.wikipedia.org/wiki/Field_trace\n\n Examples\n --------\n The field trace of the elements of :math:`\\mathrm{GF}(3^2)` is shown below.\n\n .. ipython:: python\n\n GF = galois.GF(3**2, display=\"poly\")\n x = GF.Elements(); x\n y = x.field_trace(); y\n \"\"\"\n if not type(self).is_extension_field:\n raise TypeError(f\"The Galois field must be an extension field to compute the field trace, not {type(self)}.\")\n field = type(self)\n subfield = field.prime_subfield\n p = field.characteristic\n m = field.degree\n conjugates = np.power.outer(self, p**np.arange(0, m, dtype=field.dtypes[-1]))\n trace = np.add.reduce(conjugates, axis=-1)\n return subfield(trace)\n\n def field_norm(self) -> \"FieldArray\":\n r\"\"\"\n Computes the field norm :math:`\\mathrm{N}_{L / K}(x)` of the elements of :math:`x`.\n\n Returns\n -------\n galois.FieldArray\n The field norm of :math:`x` in the prime subfield :math:`\\mathrm{GF}(p)`.\n\n Notes\n -----\n The `self` array :math:`x` is over the extension field :math:`L = \\mathrm{GF}(p^m)`. The field norm of :math:`x` is\n over the subfield :math:`K = \\mathrm{GF}(p)`. In other words, :math:`\\mathrm{N}_{L / K}(x) : L \\rightarrow K`.\n\n For finite fields, since :math:`L` is a Galois extension of :math:`K`, the field norm of :math:`x` is defined as a product\n of the Galois conjugates of :math:`x`.\n\n .. math:: \\mathrm{N}_{L / K}(x) = \\prod_{i=0}^{m-1} x^{p^i} = x^{(p^m - 1) / (p - 1)}\n\n References\n ----------\n * https://en.wikipedia.org/wiki/Field_norm\n\n Examples\n --------\n The field norm of the elements of :math:`\\mathrm{GF}(3^2)` is shown below.\n\n .. ipython:: python\n\n GF = galois.GF(3**2, display=\"poly\")\n x = GF.Elements(); x\n y = x.field_norm(); y\n \"\"\"\n if not type(self).is_extension_field:\n raise TypeError(f\"The Galois field must be an extension field to compute the field norm, not {type(self)}.\")\n field = type(self)\n subfield = field.prime_subfield\n p = field.characteristic\n m = field.degree\n norm = self**((p**m - 1) // (p - 1))\n return subfield(norm)\n\n def characteristic_poly(self) -> \"Poly\":\n r\"\"\"\n Computes the characteristic polynomial of a finite field element :math:`a` or a square matrix :math:`\\mathbf{A}`.\n\n This function can be invoked on single finite field elements (scalar 0-D arrays) or square :math:`n \\times n`\n matrices (2-D arrays).\n\n Returns\n -------\n Poly\n For scalar inputs, the degree-:math:`m` characteristic polynomial :math:`p_a(x)` of :math:`a` over :math:`\\mathrm{GF}(p)`.\n For square :math:`n \\times n` matrix inputs, the degree-:math:`n` characteristic polynomial :math:`p_A(x)` of\n :math:`\\mathbf{A}` over :math:`\\mathrm{GF}(p^m)`.\n\n Notes\n -----\n An element :math:`a` of :math:`\\mathrm{GF}(p^m)` has characteristic polynomial :math:`p_a(x)` over :math:`\\mathrm{GF}(p)`.\n The characteristic polynomial when evaluated in :math:`\\mathrm{GF}(p^m)` annihilates :math:`a`, i.e. :math:`p_a(a) = 0`.\n In prime fields :math:`\\mathrm{GF}(p)`, the characteristic polynomial of :math:`a` is simply :math:`p_a(x) = x - a`.\n\n An :math:`n \\times n` matrix :math:`\\mathbf{A}` has characteristic polynomial\n :math:`p_A(x) = \\textrm{det}(x\\mathbf{I} - \\mathbf{A})` over :math:`\\mathrm{GF}(p^m)`. The constant coefficient of the\n characteristic polynomial is :math:`\\textrm{det}(-\\mathbf{A})`. The :math:`x^{n-1}` coefficient of the characteristic\n polynomial is :math:`-\\textrm{Tr}(\\mathbf{A})`. The characteristic polynomial annihilates :math:`\\mathbf{A}`, i.e.\n :math:`p_A(\\mathbf{A}) = \\mathbf{0}`.\n\n References\n ----------\n * https://en.wikipedia.org/wiki/Characteristic_polynomial\n\n Examples\n --------\n The characteristic polynomial of the element :math:`a`.\n\n .. ipython:: python\n\n GF = galois.GF(3**5)\n a = GF.Random(); a\n poly = a.characteristic_poly(); poly\n # The characteristic polynomial annihilates a\n poly(a, field=GF)\n\n The characteristic polynomial of the square matrix :math:`\\mathbf{A}`.\n\n .. ipython:: python\n\n GF = galois.GF(3**5)\n A = GF.Random((3,3)); A\n poly = A.characteristic_poly(); poly\n # The x^0 coefficient is det(-A)\n poly.coeffs[-1] == np.linalg.det(-A)\n # The x^n-1 coefficient is -Tr(A)\n poly.coeffs[1] == -np.trace(A)\n # The characteristic polynomial annihilates the matrix A\n poly(A, elementwise=False)\n \"\"\"\n if self.ndim == 0:\n return self._characteristic_poly_element()\n elif self.ndim == 2:\n return self._characteristic_poly_matrix()\n else:\n raise ValueError(f\"The array must be either 0-D to return the characteristic polynomial of a single element or 2-D to return the characteristic polynomial of a square matrix, not have shape {self.shape}.\")\n\n def _characteristic_poly_element(self):\n field = type(self)\n a = self\n x = Poly.Identity(field)\n\n if field.is_prime_field:\n return x - a\n else:\n powers = a**(field.characteristic**np.arange(0, field.degree, dtype=field.dtypes[-1]))\n poly = Poly.Roots(powers, field=field)\n poly = Poly(poly.coeffs, field=field.prime_subfield)\n return poly\n\n def _characteristic_poly_matrix(self):\n if not self.shape[0] == self.shape[1]:\n raise ValueError(f\"The 2-D array must be square to compute its characteristic polynomial, not have shape {self.shape}.\")\n\n field = type(self)\n A = self\n\n # Compute P = xI - A\n P = np.zeros(self.shape, dtype=object)\n for i in range(self.shape[0]):\n for j in range(self.shape[0]):\n if i == j:\n P[i,j] = Poly([1, -A[i,j]], field=field)\n else:\n P[i,j] = Poly([-A[i,j]], field=field)\n\n # Compute det(P)\n return self._compute_poly_det(P)\n\n def _compute_poly_det(self, A):\n if A.shape == (2,2):\n return A[0,0]*A[1,1] - A[0,1]*A[1,0]\n\n field = type(self)\n n = A.shape[0] # Size of the nxn matrix\n\n det = Poly.Zero(field)\n for i in range(n):\n idxs = np.delete(np.arange(0, n), i)\n if i % 2 == 0:\n det += A[0,i] * self._compute_poly_det(A[1:,idxs])\n else:\n det -= A[0,i] * self._compute_poly_det(A[1:,idxs])\n\n return det\n\n def minimal_poly(self) -> \"Poly\":\n r\"\"\"\n Computes the minimal polynomial of a finite field element :math:`a`.\n\n This function can be invoked only on single finite field elements (scalar 0-D arrays).\n\n Returns\n -------\n Poly\n For scalar inputs, the minimal polynomial :math:`p_a(x)` of :math:`a` over :math:`\\mathrm{GF}(p)`.\n\n Notes\n -----\n An element :math:`a` of :math:`\\mathrm{GF}(p^m)` has minimal polynomial :math:`p_a(x)` over :math:`\\mathrm{GF}(p)`.\n The minimal polynomial when evaluated in :math:`\\mathrm{GF}(p^m)` annihilates :math:`a`, i.e. :math:`p_a(a) = 0`.\n The minimal polynomial always divides the characteristic polynomial. In prime fields :math:`\\mathrm{GF}(p)`, the\n minimal polynomial of :math:`a` is simply :math:`p_a(x) = x - a`.\n\n References\n ----------\n * https://en.wikipedia.org/wiki/Minimal_polynomial_(field_theory)\n * https://en.wikipedia.org/wiki/Minimal_polynomial_(linear_algebra)\n\n Examples\n --------\n The characteristic polynomial of the element :math:`a`.\n\n .. ipython:: python\n\n GF = galois.GF(3**5)\n a = GF.Random(); a\n poly = a.minimal_poly(); poly\n # The minimal polynomial annihilates a\n poly(a, field=GF)\n # The minimal polynomial always divides the characteristic polynomial\n a.characteristic_poly() / poly\n \"\"\"\n if self.ndim == 0:\n return self._minimal_poly_element()\n # elif self.ndim == 2:\n # return self._minimal_poly_matrix()\n else:\n raise ValueError(f\"The array must be either 0-D to return the minimal polynomial of a single element or 2-D to return the minimal polynomial of a square matrix, not have shape {self.shape}.\")\n\n def _minimal_poly_element(self):\n field = type(self)\n a = self\n x = Poly.Identity(field)\n\n if field.is_prime_field:\n return x - a\n else:\n conjugates = np.unique(a**(field.characteristic**np.arange(0, field.degree, dtype=field.dtypes[-1])))\n poly = Poly.Roots(conjugates, field=field)\n poly = Poly(poly.coeffs, field=field.prime_subfield)\n return poly\n\n ###############################################################################\n # Special methods (redefined to add docstrings)\n ###############################################################################\n\n def __add__(self, other): # pylint: disable=useless-super-delegation\n \"\"\"\n Adds two Galois field arrays element-wise.\n\n `Broadcasting <https://numpy.org/doc/stable/user/basics.broadcasting.html>`_ rules apply. Both arrays must be over\n the same Galois field.\n\n Parameters\n ----------\n other : galois.FieldArray\n The other Galois field array.\n\n Returns\n -------\n galois.FieldArray\n The Galois field array `self + other`.\n\n Examples\n --------\n .. ipython:: python\n\n GF = galois.GF(7)\n a = GF.Random((2,5)); a\n b = GF.Random(5); b\n a + b\n \"\"\"\n return super().__add__(other)\n\n def __sub__(self, other): # pylint: disable=useless-super-delegation\n \"\"\"\n Subtracts two Galois field arrays element-wise.\n\n `Broadcasting <https://numpy.org/doc/stable/user/basics.broadcasting.html>`_ rules apply. Both arrays must be over\n the same Galois field.\n\n Parameters\n ----------\n other : galois.FieldArray\n The other Galois field array.\n\n Returns\n -------\n galois.FieldArray\n The Galois field array `self - other`.\n\n Examples\n --------\n .. ipython:: python\n\n GF = galois.GF(7)\n a = GF.Random((2,5)); a\n b = GF.Random(5); b\n a - b\n \"\"\"\n return super().__sub__(other)\n\n def __mul__(self, other): # pylint: disable=useless-super-delegation\n \"\"\"\n Multiplies two Galois field arrays element-wise.\n\n `Broadcasting <https://numpy.org/doc/stable/user/basics.broadcasting.html>`_ rules apply. Both arrays must be over\n the same Galois field.\n\n Warning\n -------\n When both multiplicands are :obj:`galois.FieldArray`, that indicates a Galois field multiplication. When one\n multiplicand is an integer or integer :obj:`numpy.ndarray`, that indicates a scalar multiplication (repeated addition).\n Galois field multiplication and scalar multiplication are equivalent in prime fields, but not in extension fields.\n\n Parameters\n ----------\n other : numpy.ndarray, galois.FieldArray\n A :obj:`numpy.ndarray` of integers for scalar multiplication or a :obj:`galois.FieldArray` of Galois field elements\n for finite field multiplication.\n\n Returns\n -------\n galois.FieldArray\n The Galois field array `self * other`.\n\n Examples\n --------\n .. ipython:: python\n\n GF = galois.GF(7)\n a = GF.Random((2,5)); a\n b = GF.Random(5); b\n a * b\n\n When both multiplicands are Galois field elements, that indicates a Galois field multiplication.\n\n .. ipython:: python\n\n GF = galois.GF(2**4, display=\"poly\")\n a = GF(7); a\n b = GF(2); b\n a * b\n @suppress\n GF.display();\n\n When one multiplicand is an integer, that indicates a scalar multiplication (repeated addition).\n\n .. ipython:: python\n\n a * 2\n a + a\n \"\"\"\n return super().__mul__(other)\n\n def __truediv__(self, other): # pylint: disable=useless-super-delegation\n \"\"\"\n Divides two Galois field arrays element-wise.\n\n `Broadcasting <https://numpy.org/doc/stable/user/basics.broadcasting.html>`_ rules apply. Both arrays must be over\n the same Galois field. In Galois fields, true division and floor division are equivalent.\n\n Parameters\n ----------\n other : galois.FieldArray\n The other Galois field array.\n\n Returns\n -------\n galois.FieldArray\n The Galois field array `self / other`.\n\n Examples\n --------\n .. ipython:: python\n\n GF = galois.GF(7)\n a = GF.Random((2,5)); a\n b = GF.Random(5, low=1); b\n a / b\n \"\"\"\n return super().__truediv__(other)\n\n def __floordiv__(self, other): # pylint: disable=useless-super-delegation\n \"\"\"\n Divides two Galois field arrays element-wise.\n\n `Broadcasting <https://numpy.org/doc/stable/user/basics.broadcasting.html>`_ rules apply. Both arrays must be over\n the same Galois field. In Galois fields, true division and floor division are equivalent.\n\n Parameters\n ----------\n other : galois.FieldArray\n The other Galois field array.\n\n Returns\n -------\n galois.FieldArray\n The Galois field array `self // other`.\n\n Examples\n --------\n .. ipython:: python\n\n GF = galois.GF(7)\n a = GF.Random((2,5)); a\n b = GF.Random(5, low=1); b\n a // b\n \"\"\"\n return super().__floordiv__(other) # pylint: disable=too-many-function-args\n\n def __divmod__(self, other): # pylint: disable=useless-super-delegation\n \"\"\"\n Divides two Galois field arrays element-wise and returns the quotient and remainder.\n\n `Broadcasting <https://numpy.org/doc/stable/user/basics.broadcasting.html>`_ rules apply. Both arrays must be over\n the same Galois field. In Galois fields, true division and floor division are equivalent. In Galois fields, the remainder\n is always zero.\n\n Parameters\n ----------\n other : galois.FieldArray\n The other Galois field array.\n\n Returns\n -------\n galois.FieldArray\n The Galois field array `self // other`.\n galois.FieldArray\n The Galois field array `self % other`.\n\n Examples\n --------\n .. ipython:: python\n\n GF = galois.GF(7)\n a = GF.Random((2,5)); a\n b = GF.Random(5, low=1); b\n q, r = divmod(a, b)\n q, r\n b*q + r\n \"\"\"\n return super().__divmod__(other)\n\n def __mod__(self, other): # pylint: disable=useless-super-delegation\n \"\"\"\n Divides two Galois field arrays element-wise and returns the remainder.\n\n `Broadcasting <https://numpy.org/doc/stable/user/basics.broadcasting.html>`_ rules apply. Both arrays must be over\n the same Galois field. In Galois fields, true division and floor division are equivalent. In Galois fields, the remainder\n is always zero.\n\n Parameters\n ----------\n other : galois.FieldArray\n The other Galois field array.\n\n Returns\n -------\n galois.FieldArray\n The Galois field array `self % other`.\n\n Examples\n --------\n .. ipython:: python\n\n GF = galois.GF(7)\n a = GF.Random((2,5)); a\n b = GF.Random(5, low=1); b\n a % b\n \"\"\"\n return super().__mod__(other)\n\n def __pow__(self, other):\n \"\"\"\n Exponentiates a Galois field array element-wise.\n\n `Broadcasting <https://numpy.org/doc/stable/user/basics.broadcasting.html>`_ rules apply. The first array must be a\n Galois field array and the second must be an integer or integer array.\n\n Parameters\n ----------\n other : int, numpy.ndarray\n The exponent(s) as an integer or integer array.\n\n Returns\n -------\n galois.FieldArray\n The Galois field array `self ** other`.\n\n Examples\n --------\n .. ipython:: python\n\n GF = galois.GF(7)\n a = GF.Random((2,5)); a\n b = np.random.default_rng().integers(0, 10, 5); b\n a ** b\n \"\"\"\n # NOTE: Calling power here instead of `super().__pow__(other)` because when doing so `x ** GF(2)` will invoke `np.square(x)` and not throw\n # an error. This way `np.power(x, GF(2))` is called which correctly checks whether the second argument is an integer.\n return np.power(self, other)\n\n ###############################################################################\n # Overridden numpy methods\n ###############################################################################\n\n def __array_finalize__(self, obj):\n \"\"\"\n A numpy dunder method that is called after \"new\", \"view\", or \"new from template\". It is used here to ensure\n that view casting to a Galois field array has the appropriate dtype and that the values are in the field.\n \"\"\"\n if obj is not None and not isinstance(obj, FieldArray):\n # Only invoked on view casting\n if obj.dtype not in type(self).dtypes:\n raise TypeError(f\"{type(self).name} can only have integer dtypes {type(self).dtypes}, not {obj.dtype}.\")\n self._check_array_values(obj)\n\n def __getitem__(self, key):\n item = super().__getitem__(key)\n if np.isscalar(item):\n # Return scalar array elements as 0-dimensional Galois field arrays. This enables Galois field arithmetic\n # on scalars, which would otherwise be implemented using standard integer arithmetic.\n item = self.__class__(item, dtype=self.dtype)\n return item\n\n def __setitem__(self, key, value):\n # Verify the values to be written to the Galois field array are in the field\n value = self._check_array_like_object(value)\n super().__setitem__(key, value)\n\n def __array_function__(self, func, types, args, kwargs):\n if func in type(self)._OVERRIDDEN_FUNCTIONS:\n output = getattr(type(self), type(self)._OVERRIDDEN_FUNCTIONS[func])(*args, **kwargs)\n\n elif func in type(self)._OVERRIDDEN_LINALG_FUNCTIONS:\n output = type(self)._OVERRIDDEN_LINALG_FUNCTIONS[func](*args, **kwargs)\n\n elif func in type(self)._UNSUPPORTED_FUNCTIONS:\n raise NotImplementedError(f\"The numpy function {func.__name__!r} is not supported on Galois field arrays. If you believe this function should be supported, please submit a GitHub issue at https://github.com/mhostetter/galois/issues.\\n\\nIf you'd like to perform this operation on the data (but not necessarily a Galois field array), you should first call `array = array.view(np.ndarray)` and then call the function.\")\n\n else:\n if func is np.insert:\n args = list(args)\n args[2] = self._check_array_like_object(args[2])\n args = tuple(args)\n\n output = super().__array_function__(func, types, args, kwargs) # pylint: disable=no-member\n\n if func in type(self)._FUNCTIONS_REQUIRING_VIEW:\n output = output.view(type(self)) if not np.isscalar(output) else type(self)(output, dtype=self.dtype)\n\n return output\n\n def __array_ufunc__(self, ufunc, method, *inputs, **kwargs):\n meta = {}\n meta[\"types\"] = [type(inputs[i]) for i in range(len(inputs))]\n meta[\"operands\"] = list(range(len(inputs)))\n if method in [\"at\", \"reduceat\"]:\n # Remove the second argument for \"at\" ufuncs which is the indices list\n meta[\"operands\"].pop(1)\n meta[\"field_operands\"] = [i for i in meta[\"operands\"] if isinstance(inputs[i], self.__class__)]\n meta[\"non_field_operands\"] = [i for i in meta[\"operands\"] if not isinstance(inputs[i], self.__class__)]\n meta[\"field\"] = self.__class__\n meta[\"dtype\"] = self.dtype\n # meta[\"ufuncs\"] = self._ufuncs\n\n if ufunc in type(self)._OVERRIDDEN_UFUNCS:\n # Set all ufuncs with \"casting\" keyword argument to \"unsafe\" so we can cast unsigned integers\n # to integers. We know this is safe because we already verified the inputs.\n if method not in [\"reduce\", \"accumulate\", \"at\", \"reduceat\"]:\n kwargs[\"casting\"] = \"unsafe\"\n\n # Need to set the intermediate dtype for reduction operations or an error will be thrown. We\n # use the largest valid dtype for this field.\n if method in [\"reduce\"]:\n kwargs[\"dtype\"] = type(self).dtypes[-1]\n\n return getattr(type(self), type(self)._OVERRIDDEN_UFUNCS[ufunc])(ufunc, method, inputs, kwargs, meta)\n\n elif ufunc in type(self)._UNSUPPORTED_UFUNCS:\n raise NotImplementedError(f\"The numpy ufunc {ufunc.__name__!r} is not supported on {type(self).name} arrays. If you believe this ufunc should be supported, please submit a GitHub issue at https://github.com/mhostetter/galois/issues.\")\n\n else:\n if ufunc in [np.bitwise_and, np.bitwise_or, np.bitwise_xor] and method not in [\"reduce\", \"accumulate\", \"at\", \"reduceat\"]:\n kwargs[\"casting\"] = \"unsafe\"\n\n inputs, kwargs = type(self)._view_inputs_as_ndarray(inputs, kwargs)\n output = super().__array_ufunc__(ufunc, method, *inputs, **kwargs) # pylint: disable=no-member\n\n if ufunc in type(self)._UFUNCS_REQUIRING_VIEW and output is not None:\n output = output.view(type(self)) if not np.isscalar(output) else type(self)(output, dtype=self.dtype)\n\n return output\n\n def astype(self, dtype, **kwargs): # pylint: disable=arguments-differ\n if dtype not in type(self).dtypes:\n raise TypeError(f\"{type(self).name} arrays can only be cast as integer dtypes in {type(self).dtypes}, not {dtype}.\")\n return super().astype(dtype, **kwargs)\n\n def dot(self, b, out=None):\n # `np.dot(a, b)` is also available as `a.dot(b)`. Need to override this here for proper results.\n return dot(self, b, out=out)\n\n ###############################################################################\n # Display methods\n ###############################################################################\n\n def __str__(self):\n return self.__repr__()\n # formatter = type(self)._formatter(self)\n\n # with np.printoptions(formatter=formatter):\n # string = super().__str__()\n\n # return string\n\n def __repr__(self):\n formatter = type(self)._formatter(self)\n\n cls = type(self)\n class_name = cls.__name__\n with np.printoptions(formatter=formatter):\n cls.__name__ = \"GF\" # Rename the class so very large fields don't create large indenting\n string = super().__repr__()\n cls.__name__ = class_name\n\n # Remove the dtype from the repr and add the Galois field order\n dtype_idx = string.find(\"dtype\")\n if dtype_idx == -1:\n string = string[:-1] + f\", {cls._order_str})\"\n else:\n string = string[:dtype_idx] + f\"{cls._order_str})\"\n\n return string\n\n\n###############################################################################\n# Special GF2 FieldArray subclass\n###############################################################################\n\nclass GF2Meta(FieldClass, DirMeta):\n \"\"\"\n A metaclass for the GF(2) class.\n \"\"\"\n # pylint: disable=no-value-for-parameter\n\n # Need to have a unique cache of \"calculate\" functions for GF(2)\n _FUNC_CACHE_CALCULATE = {}\n\n def __init__(cls, name, bases, namespace, **kwargs):\n super().__init__(name, bases, namespace, **kwargs)\n cls._prime_subfield = cls\n cls._is_primitive_poly = True\n\n cls.compile(kwargs[\"compile\"])\n\n @property\n def ufunc_modes(cls):\n return [\"jit-calculate\"]\n\n @property\n def default_ufunc_mode(cls):\n return \"jit-calculate\"\n\n def _compile_ufuncs(cls):\n super()._compile_ufuncs()\n assert cls.ufunc_mode == \"jit-calculate\"\n\n cls._ufuncs[\"add\"] = np.bitwise_xor\n cls._ufuncs[\"negative\"] = np.positive\n cls._ufuncs[\"subtract\"] = np.bitwise_xor\n cls._ufuncs[\"multiply\"] = np.bitwise_and\n cls._ufuncs[\"reciprocal\"] = np.positive\n cls._ufuncs[\"divide\"] = np.bitwise_and\n\n ###############################################################################\n # Override ufunc routines to use native numpy bitwise ufuncs for GF(2)\n # arithmetic, which is faster than custom ufuncs\n ###############################################################################\n\n def _ufunc_routine_reciprocal(cls, ufunc, method, inputs, kwargs, meta): # pylint: disable=unused-argument\n \"\"\"\n a, b in GF(2)\n b = 1 / a, a = 1 is the only valid element with a multiplicative inverse, which is 1\n = a\n \"\"\"\n cls._verify_unary_method_not_reduction(ufunc, method)\n if np.count_nonzero(inputs[0]) != inputs[0].size:\n raise ZeroDivisionError(\"Cannot compute the multiplicative inverse of 0 in a Galois field.\")\n output = getattr(cls._ufunc(\"reciprocal\"), method)(*inputs, **kwargs)\n return output\n\n def _ufunc_routine_divide(cls, ufunc, method, inputs, kwargs, meta):\n \"\"\"\n Need to re-implement this to manually throw ZeroDivisionError if necessary\n \"\"\"\n cls._verify_operands_in_same_field(ufunc, inputs, meta)\n if np.count_nonzero(inputs[meta[\"operands\"][-1]]) != inputs[meta[\"operands\"][-1]].size:\n raise ZeroDivisionError(\"Cannot compute the multiplicative inverse of 0 in a Galois field.\")\n output = getattr(cls._ufunc(\"divide\"), method)(*inputs, **kwargs)\n output = cls._view_output_as_field(output, meta[\"field\"], meta[\"dtype\"])\n return output\n\n def _ufunc_routine_square(cls, ufunc, method, inputs, kwargs, meta): # pylint: disable=unused-argument\n \"\"\"\n a, c in GF(2)\n c = a ** 2\n = a * a\n = a\n \"\"\"\n cls._verify_unary_method_not_reduction(ufunc, method)\n return inputs[0]\n\n ###############################################################################\n # Arithmetic functions using explicit calculation\n ###############################################################################\n\n @staticmethod\n def _add_calculate(a, b, CHARACTERISTIC, DEGREE, IRREDUCIBLE_POLY):\n \"\"\"\n Not actually used. `np.bitwise_xor()` is faster.\n \"\"\"\n return a ^ b\n\n @staticmethod\n def _negative_calculate(a, CHARACTERISTIC, DEGREE, IRREDUCIBLE_POLY):\n \"\"\"\n Not actually used. `np.positive()` is faster.\n \"\"\"\n return a\n\n @staticmethod\n def _subtract_calculate(a, b, CHARACTERISTIC, DEGREE, IRREDUCIBLE_POLY):\n \"\"\"\n Not actually used. `np.bitwise_xor()` is faster.\n \"\"\"\n return a ^ b\n\n @staticmethod\n def _multiply_calculate(a, b, CHARACTERISTIC, DEGREE, IRREDUCIBLE_POLY):\n \"\"\"\n Not actually used. `np.bitwise_and()` is faster.\n \"\"\"\n return a & b\n\n @staticmethod\n def _reciprocal_calculate(a, CHARACTERISTIC, DEGREE, IRREDUCIBLE_POLY):\n if a == 0:\n raise ZeroDivisionError(\"Cannot compute the multiplicative inverse of 0 in a Galois field.\")\n\n return 1\n\n @staticmethod\n def _divide_calculate(a, b, CHARACTERISTIC, DEGREE, IRREDUCIBLE_POLY):\n if b == 0:\n raise ZeroDivisionError(\"Cannot compute the multiplicative inverse of 0 in a Galois field.\")\n\n return a & b\n\n @staticmethod\n @numba.extending.register_jitable\n def _power_calculate(a, b, CHARACTERISTIC, DEGREE, IRREDUCIBLE_POLY):\n if a == 0 and b < 0:\n raise ZeroDivisionError(\"Cannot compute the multiplicative inverse of 0 in a Galois field.\")\n\n if b == 0:\n return 1\n else:\n return a\n\n @staticmethod\n @numba.extending.register_jitable\n def _log_calculate(a, b, CHARACTERISTIC, DEGREE, IRREDUCIBLE_POLY):\n if a == 0:\n raise ArithmeticError(\"Cannot compute the discrete logarithm of 0 in a Galois field.\")\n if b != 1:\n raise ArithmeticError(\"In GF(2), 1 is the only multiplicative generator.\")\n\n return 0\n\n ###############################################################################\n # Ufuncs written in NumPy operations (not JIT compiled)\n ###############################################################################\n\n @staticmethod\n def _sqrt(a):\n return a.copy()\n\n\n@set_module(\"galois\")\nclass GF2(FieldArray, metaclass=GF2Meta, characteristic=2, degree=1, order=2, primitive_element=1, compile=\"jit-calculate\"):\n r\"\"\"\n An array over :math:`\\mathrm{GF}(2)`.\n\n This class is a pre-generated :obj:`galois.FieldArray` subclass generated with `galois.GF(2)` and is included in the API\n for convenience. See :obj:`galois.FieldArray` and :obj:`galois.FieldClass` for more complete documentation and examples.\n\n Examples\n --------\n This class is equivalent (and, in fact, identical) to the class returned from the Galois field class constructor.\n\n .. ipython:: python\n\n print(galois.GF2)\n GF2 = galois.GF(2); print(GF2)\n GF2 is galois.GF2\n\n The Galois field properties can be viewed by class attributes, see :obj:`galois.FieldClass`.\n\n .. ipython:: python\n\n # View a summary of the field's properties\n print(galois.GF2.properties)\n\n # Or access each attribute individually\n galois.GF2.irreducible_poly\n galois.GF2.is_prime_field\n\n The class's constructor mimics the call signature of :func:`numpy.array`.\n\n .. ipython:: python\n\n # Construct a Galois field array from an iterable\n galois.GF2([1,0,1,1,0,0,0,1])\n\n # Or an iterable of iterables\n galois.GF2([[1,0], [1,1]])\n\n # Or a single integer\n galois.GF2(1)\n \"\"\"\n\n\n###############################################################################\n# Polynomials over Galois fields\n###############################################################################\n\n# Values were obtained by running scripts/sparse_poly_performance_test.py\nSPARSE_VS_BINARY_POLY_FACTOR = 0.00_05\nSPARSE_VS_BINARY_POLY_MIN_COEFFS = int(1 / SPARSE_VS_BINARY_POLY_FACTOR)\nSPARSE_VS_DENSE_POLY_FACTOR = 0.00_5\nSPARSE_VS_DENSE_POLY_MIN_COEFFS = int(1 / SPARSE_VS_DENSE_POLY_FACTOR)\n\n\n@set_module(\"galois\")\nclass Poly:\n r\"\"\"\n Create a polynomial :math:`f(x)` over :math:`\\mathrm{GF}(p^m)`.\n\n The polynomial :math:`f(x) = a_d x^d + a_{d-1} x^{d-1} + \\dots + a_1 x + a_0` has coefficients :math:`\\{a_{d}, a_{d-1}, \\dots, a_1, a_0\\}`\n in :math:`\\mathrm{GF}(p^m)`.\n\n Parameters\n ----------\n coeffs : tuple, list, numpy.ndarray, galois.FieldArray\n The polynomial coefficients :math:`\\{a_d, a_{d-1}, \\dots, a_1, a_0\\}` with type :obj:`galois.FieldArray`. Alternatively, an iterable :obj:`tuple`,\n :obj:`list`, or :obj:`numpy.ndarray` may be provided and the Galois field domain is taken from the `field` keyword argument.\n field : galois.FieldClass, optional\n The Galois field :math:`\\mathrm{GF}(p^m)` the polynomial is over.\n\n * :obj:`None` (default): If the coefficients are a :obj:`galois.FieldArray`, they won't be modified. If the coefficients are not explicitly\n in a Galois field, they are assumed to be from :math:`\\mathrm{GF}(2)` and are converted using `galois.GF2(coeffs)`.\n * :obj:`galois.FieldClass`: The coefficients are explicitly converted to this Galois field `field(coeffs)`.\n\n order : str, optional\n The interpretation of the coefficient degrees.\n\n * `\"desc\"` (default): The first element of `coeffs` is the highest degree coefficient, i.e. :math:`\\{a_d, a_{d-1}, \\dots, a_1, a_0\\}`.\n * `\"asc\"`: The first element of `coeffs` is the lowest degree coefficient, i.e. :math:`\\{a_0, a_1, \\dots, a_{d-1}, a_d\\}`.\n\n Returns\n -------\n galois.Poly\n The polynomial :math:`f(x)`.\n\n Examples\n --------\n Create a polynomial over :math:`\\mathrm{GF}(2)`.\n\n .. ipython:: python\n\n galois.Poly([1,0,1,1])\n galois.Poly.Degrees([3,1,0])\n\n Create a polynomial over :math:`\\mathrm{GF}(2^8)`.\n\n .. ipython:: python\n\n GF = galois.GF(2**8)\n galois.Poly([124,0,223,0,0,15], field=GF)\n\n # Alternate way of constructing the same polynomial\n galois.Poly.Degrees([5,3,0], coeffs=[124,223,15], field=GF)\n\n Polynomial arithmetic using binary operators.\n\n .. ipython:: python\n\n a = galois.Poly([117,0,63,37], field=GF); a\n b = galois.Poly([224,0,21], field=GF); b\n\n a + b\n a - b\n\n # Compute the quotient of the polynomial division\n a / b\n\n # True division and floor division are equivalent\n a / b == a // b\n\n # Compute the remainder of the polynomial division\n a % b\n\n # Compute both the quotient and remainder in one pass\n divmod(a, b)\n \"\"\"\n # pylint: disable=too-many-public-methods\n\n # Increase my array priority so numpy will call my __radd__ instead of its own __add__\n __array_priority__ = 100\n\n def __new__(\n cls,\n coeffs: Union[Tuple[int], List[int], np.ndarray, FieldArray],\n field: Optional[FieldClass] = None,\n order: Literal[\"desc\", \"asc\"] = \"desc\"\n ) -> \"Poly\":\n if not isinstance(coeffs, (list, tuple, np.ndarray, FieldArray)):\n raise TypeError(f\"Argument `coeffs` must array-like, not {type(coeffs)}.\")\n if not isinstance(field, (type(None), FieldClass)):\n raise TypeError(f\"Argument `field` must be a Galois field array class, not {field}.\")\n if not isinstance(order, str):\n raise TypeError(f\"Argument `order` must be a str, not {type(order)}.\")\n if isinstance(coeffs, (FieldArray, np.ndarray)) and not coeffs.ndim <= 1:\n raise ValueError(f\"Argument `coeffs` can have dimension at most 1, not {coeffs.ndim}.\")\n if not order in [\"desc\", \"asc\"]:\n raise ValueError(f\"Argument `order` must be either 'desc' or 'asc', not {order!r}.\")\n\n if isinstance(coeffs, (FieldArray, np.ndarray)):\n coeffs = np.atleast_1d(coeffs)\n\n if order == \"asc\":\n coeffs = coeffs[::-1] # Ensure it's in descending-degree order\n\n coeffs, field = cls._convert_coeffs(coeffs, field)\n\n if field is GF2:\n if len(coeffs) >= SPARSE_VS_BINARY_POLY_MIN_COEFFS and np.count_nonzero(coeffs) <= SPARSE_VS_BINARY_POLY_FACTOR*len(coeffs):\n degrees = np.arange(coeffs.size - 1, -1, -1)\n return SparsePoly(degrees, coeffs, field=field)\n else:\n integer = poly_to_integer(coeffs, 2)\n return BinaryPoly(integer)\n else:\n if len(coeffs) >= SPARSE_VS_DENSE_POLY_MIN_COEFFS and np.count_nonzero(coeffs) <= SPARSE_VS_DENSE_POLY_FACTOR*len(coeffs):\n degrees = np.arange(coeffs.size - 1, -1, -1)\n return SparsePoly(degrees, coeffs, field=field)\n else:\n return DensePoly(coeffs, field=field)\n\n @classmethod\n def _convert_coeffs(cls, coeffs, field):\n if isinstance(coeffs, FieldArray) and field is None:\n # Use the field of the coefficients\n field = type(coeffs)\n else:\n # Convert coefficients to the specified field (or GF2 if unspecified), taking into\n # account negative coefficients\n field = GF2 if field is None else field\n coeffs = np.array(coeffs, dtype=field.dtypes[-1])\n idxs = coeffs < 0\n coeffs = field(np.abs(coeffs))\n coeffs[idxs] *= -1\n\n return coeffs, field\n\n ###############################################################################\n # Alternate constructors\n ###############################################################################\n\n @classmethod\n def Zero(cls, field: Optional[FieldClass] = GF2) -> \"Poly\":\n r\"\"\"\n Constructs the polynomial :math:`f(x) = 0` over :math:`\\mathrm{GF}(p^m)`.\n\n Parameters\n ----------\n field : galois.FieldClass, optional\n The Galois field :math:`\\mathrm{GF}(p^m)` the polynomial is over. The default is :obj:`galois.GF2`.\n\n Returns\n -------\n galois.Poly\n The polynomial :math:`f(x) = 0`.\n\n Examples\n --------\n Construct the zero polynomial over :math:`\\mathrm{GF}(2)`.\n\n .. ipython:: python\n\n galois.Poly.Zero()\n\n Construct the zero polynomial over :math:`\\mathrm{GF}(2^8)`.\n\n .. ipython:: python\n\n GF = galois.GF(2**8)\n galois.Poly.Zero(field=GF)\n \"\"\"\n return Poly([0], field=field)\n\n @classmethod\n def One(cls, field: Optional[FieldClass] = GF2) -> \"Poly\":\n r\"\"\"\n Constructs the polynomial :math:`f(x) = 1` over :math:`\\mathrm{GF}(p^m)`.\n\n Parameters\n ----------\n field : galois.FieldClass, optional\n The Galois field :math:`\\mathrm{GF}(p^m)` the polynomial is over. The default is :obj:`galois.GF2`.\n\n Returns\n -------\n galois.Poly\n The polynomial :math:`f(x) = 1`.\n\n Examples\n --------\n Construct the one polynomial over :math:`\\mathrm{GF}(2)`.\n\n .. ipython:: python\n\n galois.Poly.One()\n\n Construct the one polynomial over :math:`\\mathrm{GF}(2^8)`.\n\n .. ipython:: python\n\n GF = galois.GF(2**8)\n galois.Poly.One(field=GF)\n \"\"\"\n return Poly([1], field=field)\n\n @classmethod\n def Identity(cls, field: Optional[FieldClass] = GF2) -> \"Poly\":\n r\"\"\"\n Constructs the polynomial :math:`f(x) = x` over :math:`\\mathrm{GF}(p^m)`.\n\n Parameters\n ----------\n field : galois.FieldClass, optional\n The Galois field :math:`\\mathrm{GF}(p^m)` the polynomial is over. The default is :obj:`galois.GF2`.\n\n Returns\n -------\n galois.Poly\n The polynomial :math:`f(x) = x`.\n\n Examples\n --------\n Construct the identity polynomial over :math:`\\mathrm{GF}(2)`.\n\n .. ipython:: python\n\n galois.Poly.Identity()\n\n Construct the identity polynomial over :math:`\\mathrm{GF}(2^8)`.\n\n .. ipython:: python\n\n GF = galois.GF(2**8)\n galois.Poly.Identity(field=GF)\n \"\"\"\n return Poly([1, 0], field=field)\n\n @classmethod\n def Random(\n cls,\n degree: int,\n seed: Optional[Union[int, np.random.Generator]] = None,\n field: Optional[FieldClass] = GF2\n ) -> \"Poly\":\n r\"\"\"\n Constructs a random polynomial over :math:`\\mathrm{GF}(p^m)` with degree :math:`d`.\n\n Parameters\n ----------\n degree : int\n The degree of the polynomial.\n seed: int, numpy.random.Generator, optional\n Non-negative integer used to initialize the PRNG. The default is `None` which means that unpredictable\n entropy will be pulled from the OS to be used as the seed. A :obj:`numpy.random.Generator` can also be passed. If so,\n it is used directly when `dtype != np.object_`. Its state is used to seed `random.seed()`, otherwise.\n field : galois.FieldClass, optional\n The Galois field :math:`\\mathrm{GF}(p^m)` the polynomial is over. The default is :obj:`galois.GF2`.\n\n Returns\n -------\n galois.Poly\n The polynomial :math:`f(x)`.\n\n Examples\n --------\n Construct a random degree-:math:`5` polynomial over :math:`\\mathrm{GF}(2)`.\n\n .. ipython:: python\n\n galois.Poly.Random(5)\n\n Construct a random degree-:math:`5` polynomial over :math:`\\mathrm{GF}(2^8)` with a given seed. This produces repeatable results.\n\n .. ipython:: python\n\n GF = galois.GF(2**8)\n galois.Poly.Random(5, seed=123456789, field=GF)\n galois.Poly.Random(5, seed=123456789, field=GF)\n\n Construct multiple polynomials with one global seed.\n\n .. ipython:: python\n\n rng = np.random.default_rng(123456789)\n galois.Poly.Random(5, seed=rng, field=GF)\n galois.Poly.Random(5, seed=rng, field=GF)\n \"\"\"\n if not isinstance(degree, (int, np.integer)):\n raise TypeError(f\"Argument `degree` must be an integer, not {type(degree)}.\")\n if seed is not None:\n if not isinstance(seed, (int, np.integer, np.random.Generator)):\n raise ValueError(\"Seed must be an integer, a numpy.random.Generator or None.\")\n if isinstance(seed, (int, np.integer)) and seed < 0:\n raise ValueError(\"Seed must be non-negative.\")\n if not isinstance(field, FieldClass):\n raise TypeError(f\"Argument `field` must be a Galois field class, not {type(field)}.\")\n if not degree >= 0:\n raise ValueError(f\"Argument `degree` must be non-negative, not {degree}.\")\n\n rng = np.random.default_rng(seed) # Make the seed a PRNG object so it can \"step\" its state if the below \"if\" statement is invoked\n coeffs = field.Random(degree + 1, seed=rng)\n if coeffs[0] == 0:\n coeffs[0] = field.Random(low=1, seed=rng) # Ensure leading coefficient is non-zero\n\n return Poly(coeffs, field=field)\n\n @classmethod\n def Integer(cls, integer: int, field: Optional[FieldClass] = GF2) -> \"Poly\":\n r\"\"\"\n Constructs a polynomial over :math:`\\mathrm{GF}(p^m)` from its integer representation.\n\n Parameters\n ----------\n integer : int\n The integer representation of the polynomial :math:`f(x)`.\n field : galois.FieldClass, optional\n The Galois field :math:`\\mathrm{GF}(p^m)` the polynomial is over. The default is :obj:`galois.GF2`.\n\n Returns\n -------\n galois.Poly\n The polynomial :math:`f(x)`.\n\n Notes\n -----\n The integer value :math:`i` represents the polynomial :math:`f(x) = a_d x^{d} + a_{d-1} x^{d-1} + \\dots + a_1 x + a_0`\n over the field :math:`\\mathrm{GF}(p^m)` if :math:`i = a_{d}(p^m)^{d} + a_{d-1}(p^m)^{d-1} + \\dots + a_1(p^m) + a_0` using integer arithmetic,\n not finite field arithmetic.\n\n Said differently, if the polynomial coefficients :math:`\\{a_d, a_{d-1}, \\dots, a_1, a_0\\}` are considered as the \"digits\" of a radix-:math:`p^m`\n value, the polynomial's integer representation is the decimal value (radix-:math:`10`).\n\n Examples\n --------\n Construct a polynomial over :math:`\\mathrm{GF}(2)` from its integer representation.\n\n .. ipython:: python\n\n galois.Poly.Integer(5)\n\n Construct a polynomial over :math:`\\mathrm{GF}(2^8)` from its integer representation.\n\n .. ipython:: python\n\n GF = galois.GF(2**8)\n galois.Poly.Integer(13*256**3 + 117, field=GF)\n \"\"\"\n if not isinstance(integer, (int, np.integer)):\n raise TypeError(f\"Argument `integer` be an integer, not {type(integer)}\")\n if not isinstance(field, FieldClass):\n raise TypeError(f\"Argument `field` must be a Galois field class, not {type(field)}.\")\n if not integer >= 0:\n raise ValueError(f\"Argument `integer` must be non-negative, not {integer}.\")\n\n if field is GF2:\n # Explicitly create a binary poly\n return BinaryPoly(integer)\n else:\n coeffs = integer_to_poly(integer, field.order)\n return Poly(coeffs, field=field)\n\n @classmethod\n def String(cls, string: str, field: Optional[FieldClass] = GF2) -> \"Poly\":\n r\"\"\"\n Constructs a polynomial over :math:`\\mathrm{GF}(p^m)` from its string representation.\n\n Parameters\n ----------\n string : str\n The string representation of the polynomial :math:`f(x)`.\n field : galois.FieldClass, optional\n The Galois field :math:`\\mathrm{GF}(p^m)` the polynomial is over. The default is :obj:`galois.GF2`.\n\n Returns\n -------\n galois.Poly\n The polynomial :math:`f(x)`.\n\n Notes\n -----\n The string parsing rules include:\n\n * Either `^` or `**` may be used for indicating the polynomial degrees. For example, `\"13x^3 + 117\"` or `\"13x**3 + 117\"`.\n * Multiplication operators `*` may be used between coefficients and the polynomial indeterminate `x`, but are not required. For example,\n `\"13x^3 + 117\"` or `\"13*x^3 + 117\"`.\n * Polynomial coefficients of 1 may be specified or omitted. For example, `\"x^3 + 117\"` or `\"1*x^3 + 117\"`.\n * The polynomial indeterminate can be any single character, but must be consistent. For example, `\"13x^3 + 117\"` or `\"13y^3 + 117\"`.\n * Spaces are not required between terms. For example, `\"13x^3 + 117\"` or `\"13x^3+117\"`.\n * Any combination of the above rules is acceptable.\n\n Examples\n --------\n Construct a polynomial over :math:`\\mathrm{GF}(2)` from its string representation.\n\n .. ipython:: python\n\n galois.Poly.String(\"x^2 + 1\")\n\n Construct a polynomial over :math:`\\mathrm{GF}(2^8)` from its string representation.\n\n .. ipython:: python\n\n GF = galois.GF(2**8)\n galois.Poly.String(\"13x^3 + 117\", field=GF)\n \"\"\"\n if not isinstance(string, str):\n raise TypeError(f\"Argument `string` be an string, not {type(string)}\")\n\n return Poly.Degrees(*str_to_sparse_poly(string), field=field)\n\n\n @classmethod\n def Degrees(\n cls,\n degrees: Union[Tuple[int], List[int], np.ndarray],\n coeffs: Optional[Union[Tuple[int], List[int], np.ndarray, FieldArray]] = None,\n field: Optional[FieldClass] = None\n ) -> \"Poly\":\n r\"\"\"\n Constructs a polynomial over :math:`\\mathrm{GF}(p^m)` from its non-zero degrees.\n\n Parameters\n ----------\n degrees : tuple, list, numpy.ndarray\n The polynomial degrees with non-zero coefficients.\n coeffs : tuple, list, numpy.ndarray, galois.FieldArray, optional\n The corresponding non-zero polynomial coefficients with type :obj:`galois.FieldArray`. Alternatively, an iterable :obj:`tuple`,\n :obj:`list`, or :obj:`numpy.ndarray` may be provided and the Galois field domain is taken from the `field` keyword argument. The\n default is `None` which corresponds to all ones.\n field : galois.FieldClass, optional\n The Galois field :math:`\\mathrm{GF}(p^m)` the polynomial is over.\n\n * :obj:`None` (default): If the coefficients are a :obj:`galois.FieldArray`, they won't be modified. If the coefficients are not explicitly\n in a Galois field, they are assumed to be from :math:`\\mathrm{GF}(2)` and are converted using `galois.GF2(coeffs)`.\n * :obj:`galois.FieldClass`: The coefficients are explicitly converted to this Galois field `field(coeffs)`.\n\n Returns\n -------\n galois.Poly\n The polynomial :math:`f(x)`.\n\n Examples\n --------\n Construct a polynomial over :math:`\\mathrm{GF}(2)` by specifying the degrees with non-zero coefficients.\n\n .. ipython:: python\n\n galois.Poly.Degrees([3,1,0])\n\n Construct a polynomial over :math:`\\mathrm{GF}(2^8)` by specifying the degrees with non-zero coefficients.\n\n .. ipython:: python\n\n GF = galois.GF(2**8)\n galois.Poly.Degrees([3,1,0], coeffs=[251,73,185], field=GF)\n \"\"\"\n if not isinstance(degrees, (list, tuple, np.ndarray)):\n raise TypeError(f\"Argument `degrees` must array-like, not {type(degrees)}.\")\n if not isinstance(coeffs, (type(None), list, tuple, np.ndarray, FieldArray)):\n raise TypeError(f\"Argument `coeffs` must array-like, not {type(coeffs)}.\")\n if not isinstance(field, (type(None), FieldClass)):\n raise TypeError(f\"Argument `field` must be a Galois field array class, not {type(field)}.\")\n\n degrees = np.array(degrees, dtype=np.int64)\n coeffs = [1,]*len(degrees) if coeffs is None else coeffs\n coeffs, field = cls._convert_coeffs(coeffs, field)\n\n if not degrees.ndim <= 1:\n raise ValueError(f\"Argument `degrees` can have dimension at most 1, not {degrees.ndim}.\")\n if not degrees.size == np.unique(degrees).size:\n raise ValueError(f\"Argument `degrees` must have unique entries, not {degrees}.\")\n if not np.all(degrees >= 0):\n raise ValueError(f\"Argument `degrees` must have non-negative values, not {degrees}.\")\n if not coeffs.ndim <= 1:\n raise ValueError(f\"Argument `coeffs` can have dimension at most 1, not {coeffs.ndim}.\")\n if not degrees.size == coeffs.size:\n raise ValueError(f\"Arguments `degrees` and `coeffs` must have the same length, not {degrees.size} and {coeffs.size}.\")\n\n # No nonzero degrees means it's the zero polynomial\n if len(degrees) == 0:\n degrees, coeffs = np.array([0]), field([0])\n\n if field is GF2:\n if len(degrees) < SPARSE_VS_BINARY_POLY_FACTOR*max(degrees):\n # Explicitly create a sparse poly over GF(2)\n return SparsePoly(degrees, coeffs=coeffs, field=field)\n else:\n integer = sparse_poly_to_integer(degrees, coeffs, 2)\n return BinaryPoly(integer)\n else:\n if len(degrees) < SPARSE_VS_DENSE_POLY_FACTOR*max(degrees):\n # Explicitly create a sparse poly over GF(p^m)\n return SparsePoly(degrees, coeffs=coeffs, field=field)\n else:\n degree = max(degrees) # The degree of the polynomial\n all_coeffs = type(coeffs).Zeros(degree + 1)\n all_coeffs[degree - degrees] = coeffs\n return DensePoly(all_coeffs)\n\n @classmethod\n def Roots(\n cls,\n roots: Union[Tuple[int], List[int], np.ndarray, FieldArray],\n multiplicities: Optional[Union[Tuple[int], List[int], np.ndarray]] = None,\n field: Optional[FieldClass] = None\n ) -> \"Poly\":\n r\"\"\"\n Constructs a monic polynomial over :math:`\\mathrm{GF}(p^m)` from its roots.\n\n Parameters\n ----------\n roots : tuple, list, numpy.ndarray, galois.FieldArray\n The roots of the desired polynomial with type :obj:`galois.FieldArray`. Alternatively, an iterable :obj:`tuple`,\n :obj:`list`, or :obj:`numpy.ndarray` may be provided and the Galois field domain is taken from the `field` keyword argument.\n multiplicities : tuple, list, numpy.ndarray, optional\n The corresponding root multiplicities. The default is `None` which corresponds to all ones, i.e. `[1,]*len(roots)`.\n field : galois.FieldClass, optional\n The Galois field :math:`\\mathrm{GF}(p^m)` the polynomial is over.\n\n * :obj:`None` (default): If the roots are a :obj:`galois.FieldArray`, they won't be modified. If the roots are not explicitly\n in a Galois field, they are assumed to be from :math:`\\mathrm{GF}(2)` and are converted using `galois.GF2(roots)`.\n * :obj:`galois.FieldClass`: The roots are explicitly converted to this Galois field `field(roots)`.\n\n Returns\n -------\n galois.Poly\n The polynomial :math:`f(x)`.\n\n Notes\n -----\n The polynomial :math:`f(x)` with :math:`k` roots :math:`\\{r_1, r_2, \\dots, r_k\\}` with multiplicities\n :math:`\\{m_1, m_2, \\dots, m_k\\}` is\n\n .. math::\n\n f(x) &= (x - r_1)^{m_1} (x - r_2)^{m_2} \\dots (x - r_k)^{m_k}\n\n f(x) &= a_d x^d + a_{d-1} x^{d-1} + \\dots + a_1 x + a_0\n\n with degree :math:`d = \\sum_{i=1}^{k} m_i`.\n\n Examples\n --------\n Construct a polynomial over :math:`\\mathrm{GF}(2)` from a list of its roots.\n\n .. ipython:: python\n\n roots = [0, 0, 1]\n p = galois.Poly.Roots(roots); p\n # Evaluate the polynomial at its roots\n p(roots)\n\n Construct a polynomial over :math:`\\mathrm{GF}(2^8)` from a list of its roots with specific multiplicities.\n\n .. ipython:: python\n\n GF = galois.GF(2**8)\n roots = [121, 198, 225]\n multiplicities = [1, 2, 1]\n p = galois.Poly.Roots(roots, multiplicities=multiplicities, field=GF); p\n # Evaluate the polynomial at its roots\n p(roots)\n \"\"\"\n multiplicities = [1,]*len(roots) if multiplicities is None else multiplicities\n if not isinstance(roots, (tuple, list, np.ndarray, FieldArray)):\n raise TypeError(f\"Argument `roots` must be array-like, not {type(roots)}.\")\n if not isinstance(multiplicities, (tuple, list, np.ndarray)):\n raise TypeError(f\"Argument `multiplicities` must be array-like, not {type(multiplicities)}.\")\n if not isinstance(field, (type(None), FieldClass)):\n raise TypeError(f\"Argument `field` must be a Galois field array class, not {field}.\")\n\n roots, field = cls._convert_coeffs(roots, field)\n\n roots = field(roots).flatten()\n if not len(roots) == len(multiplicities):\n raise ValueError(f\"Arguments `roots` and `multiplicities` must have the same length, not {len(roots)} and {len(multiplicities)}.\")\n\n poly = Poly.One(field=field)\n x = Poly.Identity(field=field)\n for root, multiplicity in zip(roots, multiplicities):\n poly *= (x - root)**multiplicity\n\n return poly\n\n ###############################################################################\n # Methods\n ###############################################################################\n\n def coefficients(\n self,\n size: Optional[int] = None,\n order: Literal[\"desc\", \"asc\"] = \"desc\"\n ) -> FieldArray:\n \"\"\"\n Returns the polynomial coefficients in the order and size specified.\n\n Parameters\n ----------\n size : int, optional\n The fixed size of the coefficient array. Zeros will be added for higher-order terms. This value must be\n at least `degree + 1` or a :obj:`ValueError` will be raised. The default is `None` which corresponds\n to `degree + 1`.\n\n order : str, optional\n The interpretation of the coefficient degrees.\n\n * `\"desc\"` (default): The first element returned is the highest degree coefficient.\n * `\"asc\"`: The first element returned is the lowest degree coefficient.\n\n Returns\n -------\n galois.FieldArray\n An array of the polynomial coefficients with length `size`, either in ascending order or descending order.\n\n Notes\n -----\n This accessor is similar to :obj:`coeffs`, but it has more settings. By default, `Poly.coeffs == Poly.coefficients()`.\n\n Examples\n --------\n .. ipython:: python\n\n GF = galois.GF(7)\n p = galois.Poly([3, 0, 5, 2], field=GF); p\n p.coeffs\n p.coefficients()\n # Return the coefficients in ascending order\n p.coefficients(order=\"asc\")\n # Return the coefficients in ascending order with size 8\n p.coefficients(8, order=\"asc\")\n \"\"\"\n if not isinstance(size, (type(None), int, np.integer)):\n raise TypeError(f\"Argument `size` must be an integer, not {type(size)}.\")\n if not isinstance(order, str):\n raise TypeError(f\"Argument `order` must be a str, not {type(order)}.\")\n size = len(self) if size is None else size\n if not size >= len(self):\n raise ValueError(f\"Argument `size` must be at least `degree + 1` which is {len(self)}, not {size}.\")\n if not order in [\"desc\", \"asc\"]:\n raise ValueError(f\"Argument `order` must be either 'desc' or 'asc', not {order!r}.\")\n\n coeffs = self.field.Zeros(size)\n coeffs[-len(self):] = self.coeffs\n if order == \"asc\":\n coeffs = np.flip(coeffs)\n\n return coeffs\n\n def copy(self) -> \"Poly\":\n \"\"\"\n Deep copies the polynomial.\n\n Returns\n -------\n galois.Poly\n A copy of the original polynomial.\n \"\"\"\n raise NotImplementedError\n\n def reverse(self) -> \"Poly\":\n r\"\"\"\n Returns the :math:`d`-th reversal :math:`x^d f(\\frac{1}{x})` of the polynomial :math:`f(x)` with degree :math:`d`.\n\n Returns\n -------\n galois.Poly\n The :math:`n`-th reversal :math:`x^n f(\\frac{1}{x})`.\n\n Notes\n -----\n For a polynomial :math:`f(x) = a_d x^d + a_{d-1} x^{d-1} + \\dots + a_1 x + a_0` with degree :math:`d`, the :math:`d`-th\n reversal is equivalent to reversing the coefficients.\n\n .. math::\n \\textrm{rev}_d f(x) = x^d f(x^{-1}) = a_0 x^d + a_{1} x^{d-1} + \\dots + a_{d-1} x + a_d\n\n Examples\n --------\n .. ipython:: python\n\n GF = galois.GF(7)\n f = galois.Poly([5, 0, 3, 4], field=GF); f\n f.reverse()\n \"\"\"\n return Poly(self.coeffs[::-1])\n\n def roots(self, multiplicity: bool = False) -> FieldArray:\n r\"\"\"\n Calculates the roots :math:`r` of the polynomial :math:`f(x)`, such that :math:`f(r) = 0`.\n\n Parameters\n ----------\n multiplicity : bool, optional\n Optionally return the multiplicity of each root. The default is `False` which only returns the unique\n roots.\n\n Returns\n -------\n galois.FieldArray\n Galois field array of roots of :math:`f(x)`. The roots are ordered in increasing order.\n np.ndarray\n The multiplicity of each root, only returned if `multiplicity=True`.\n\n Notes\n -----\n This implementation uses Chien's search to find the roots :math:`\\{r_1, r_2, \\dots, r_k\\}` of the degree-:math:`d`\n polynomial\n\n .. math::\n f(x) = a_{d}x^{d} + a_{d-1}x^{d-1} + \\dots + a_1x + a_0,\n\n where :math:`k \\le d`. Then, :math:`f(x)` can be factored as\n\n .. math::\n f(x) = (x - r_1)^{m_1} (x - r_2)^{m_2} \\dots (x - r_k)^{m_k},\n\n where :math:`m_i` is the multiplicity of root :math:`r_i` and :math:`d = \\sum_{i=1}^{k} m_i`.\n\n The Galois field elements can be represented as :math:`\\mathrm{GF}(p^m) = \\{0, 1, \\alpha, \\alpha^2, \\dots, \\alpha^{p^m-2}\\}`,\n where :math:`\\alpha` is a primitive element of :math:`\\mathrm{GF}(p^m)`.\n\n :math:`0` is a root of :math:`f(x)` if :math:`a_0 = 0`. :math:`1` is a root of :math:`f(x)` if :math:`\\sum_{j=0}^{d} a_j = 0`. The\n remaining elements of :math:`\\mathrm{GF}(p^m)` are powers of :math:`\\alpha`. The following equations calculate :math:`f(\\alpha^i)`,\n where :math:`\\alpha^i` is a root of :math:`f(x)` if :math:`f(\\alpha^i) = 0`.\n\n .. math::\n f(\\alpha^i) &= a_{d}(\\alpha^i)^{d} + a_{d-1}(\\alpha^i)^{d-1} + \\dots + a_1(\\alpha^i) + a_0\n\n f(\\alpha^i) &\\overset{\\Delta}{=} \\lambda_{i,d} + \\lambda_{i,d-1} + \\dots + \\lambda_{i,1} + \\lambda_{i,0}\n\n f(\\alpha^i) &= \\sum_{j=0}^{d} \\lambda_{i,j}\n\n The next power of :math:`\\alpha` can be easily calculated from the previous calculation.\n\n .. math::\n f(\\alpha^{i+1}) &= a_{d}(\\alpha^{i+1})^{d} + a_{d-1}(\\alpha^{i+1})^{d-1} + \\dots + a_1(\\alpha^{i+1}) + a_0\n\n f(\\alpha^{i+1}) &= a_{d}(\\alpha^i)^{d}\\alpha^d + a_{d-1}(\\alpha^i)^{d-1}\\alpha^{d-1} + \\dots + a_1(\\alpha^i)\\alpha + a_0\n\n f(\\alpha^{i+1}) &= \\lambda_{i,d}\\alpha^d + \\lambda_{i,d-1}\\alpha^{d-1} + \\dots + \\lambda_{i,1}\\alpha + \\lambda_{i,0}\n\n f(\\alpha^{i+1}) &= \\sum_{j=0}^{d} \\lambda_{i,j}\\alpha^j\n\n References\n ----------\n * https://en.wikipedia.org/wiki/Chien_search\n\n Examples\n --------\n Find the roots of a polynomial over :math:`\\mathrm{GF}(2)`.\n\n .. ipython:: python\n\n p = galois.Poly.Roots([0,]*7 + [1,]*13); p\n p.roots()\n p.roots(multiplicity=True)\n\n Find the roots of a polynomial over :math:`\\mathrm{GF}(2^8)`.\n\n .. ipython:: python\n\n GF = galois.GF(2**8)\n p = galois.Poly.Roots([18,]*7 + [155,]*13 + [227,]*9, field=GF); p\n p.roots()\n p.roots(multiplicity=True)\n \"\"\"\n if not isinstance(multiplicity, bool):\n raise TypeError(f\"Argument `multiplicity` must be a bool, not {type(multiplicity)}.\")\n\n roots = self.field._poly_roots(self.nonzero_degrees, self.nonzero_coeffs)\n\n if not multiplicity:\n return roots\n else:\n multiplicities = np.array([self._root_multiplicity(root) for root in roots])\n return roots, multiplicities\n\n def _root_multiplicity(self, root):\n poly = self.copy()\n multiplicity = 1\n\n while True:\n # If the root is also a root of the derivative, then its a multiple root.\n poly = poly.derivative()\n\n if poly == 0:\n # Cannot test whether p'(root) = 0 because p'(x) = 0. We've exhausted the non-zero derivatives. For\n # any Galois field, taking `characteristic` derivatives results in p'(x) = 0. For a root with multiplicity\n # greater than the field's characteristic, we need factor to the polynomial. Here we factor out (x - root)^m,\n # where m is the current multiplicity.\n poly = self.copy() // (Poly([1, -root], field=self.field)**multiplicity)\n\n if poly(root) == 0:\n multiplicity += 1\n else:\n break\n\n return multiplicity\n\n def derivative(self, k: int = 1) -> \"Poly\":\n r\"\"\"\n Computes the :math:`k`-th formal derivative :math:`\\frac{d^k}{dx^k} f(x)` of the polynomial :math:`f(x)`.\n\n Parameters\n ----------\n k : int, optional\n The number of derivatives to compute. 1 corresponds to :math:`p'(x)`, 2 corresponds to :math:`p''(x)`, etc.\n The default is 1.\n\n Returns\n -------\n galois.Poly\n The :math:`k`-th formal derivative of the polynomial :math:`f(x)`.\n\n Notes\n -----\n For the polynomial\n\n .. math::\n f(x) = a_d x^d + a_{d-1} x^{d-1} + \\dots + a_1 x + a_0\n\n the first formal derivative is defined as\n\n .. math::\n f'(x) = (d) \\cdot a_{d} x^{d-1} + (d-1) \\cdot a_{d-1} x^{d-2} + \\dots + (2) \\cdot a_{2} x + a_1\n\n where :math:`\\cdot` represents scalar multiplication (repeated addition), not finite field multiplication.\n For example, :math:`3 \\cdot a = a + a + a`.\n\n References\n ----------\n * https://en.wikipedia.org/wiki/Formal_derivative\n\n Examples\n --------\n Compute the derivatives of a polynomial over :math:`\\mathrm{GF}(2)`.\n\n .. ipython:: python\n\n p = galois.Poly.Random(7); p\n p.derivative()\n\n # k derivatives of a polynomial where k is the Galois field's characteristic will always result in 0\n p.derivative(2)\n\n Compute the derivatives of a polynomial over :math:`\\mathrm{GF}(7)`.\n\n .. ipython:: python\n\n GF = galois.GF(7)\n p = galois.Poly.Random(11, field=GF); p\n p.derivative()\n p.derivative(2)\n p.derivative(3)\n\n # k derivatives of a polynomial where k is the Galois field's characteristic will always result in 0\n p.derivative(7)\n\n Compute the derivatives of a polynomial over :math:`\\mathrm{GF}(2^8)`.\n\n .. ipython:: python\n\n GF = galois.GF(2**8)\n p = galois.Poly.Random(7, field=GF); p\n p.derivative()\n\n # k derivatives of a polynomial where k is the Galois field's characteristic will always result in 0\n p.derivative(2)\n \"\"\"\n if not isinstance(k, (int, np.integer)):\n raise TypeError(f\"Argument `k` must be an integer, not {type(k)}.\")\n if not k > 0:\n raise ValueError(f\"Argument `k` must be a positive integer, not {k}.\")\n\n if 0 in self.nonzero_degrees:\n # Cut off the 0th degree\n degrees = self.nonzero_degrees[:-1] - 1\n coeffs = self.nonzero_coeffs[:-1] * self.nonzero_degrees[:-1] # Scalar multiplication\n else:\n degrees = self.nonzero_degrees - 1\n coeffs = self.nonzero_coeffs * self.nonzero_degrees # Scalar multiplication\n\n p_prime = Poly.Degrees(degrees, coeffs, field=self.field)\n\n k -= 1\n if k > 0:\n return p_prime.derivative(k)\n else:\n return p_prime\n\n ###############################################################################\n # Overridden dunder methods\n ###############################################################################\n\n def __str__(self):\n return f\"Poly({self.string}, {self.field.name})\"\n\n def __repr__(self):\n return str(self)\n\n def __hash__(self):\n t = tuple([self.field.order,] + self.nonzero_degrees.tolist() + self.nonzero_coeffs.tolist())\n return hash(t)\n\n def __call__(self, x: FieldArray, field: Optional[FieldClass] = None, elementwise: bool = True) -> FieldArray:\n \"\"\"\n Evaluates the polynomial at :math:`x`.\n\n Parameters\n ----------\n x : galois.FieldArray\n An array (or 0-D scalar) of field elements to evaluate the polynomial over.\n field : galois.FieldClass, optional\n The Galois field to evaluate the polynomial over. The default is `None` which represents\n the polynomial's current field, i.e. :obj:`field`.\n elementwise : bool, optional\n Indicates to evaluate arrays elementwise. The default is `True`. If `False`, the polynomial\n indeterminate is evaluated at the square matrix :math:`X`.\n\n Returns\n -------\n galois.FieldArray\n The result of the polynomial evaluation of the same shape as :math:`x`.\n\n Examples\n --------\n .. ipython:: python\n\n GF = galois.GF(2**8)\n p = galois.Poly([37, 123, 0, 201], field=GF); p\n\n Evaluate the polynomial elementwise at :math:`x`.\n\n .. ipython:: python\n\n x = GF.Random(4); x\n p(x)\n GF(37)*x**3 + GF(123)*x**2 + GF(201)\n\n Evaluate the polynomial at the matrix :math:`X`.\n\n .. ipython:: python\n\n X = GF.Random((2,2)); X\n p(X, elementwise=False)\n GF(37)*np.linalg.matrix_power(X,3) + GF(123)*np.linalg.matrix_power(X,2) + GF(201)*GF.Identity(2)\n \"\"\"\n if not isinstance(field, (type(None), FieldClass)):\n raise TypeError(f\"Argument `field` must be a Galois field array class, not {type(field)}.\")\n\n field = self.field if field is None else field\n coeffs = field(self.coeffs)\n x = field(x)\n\n if elementwise:\n return field._poly_evaluate(coeffs, x)\n else:\n if not (x.ndim == 2 and x.shape[0] == x.shape[1]):\n raise ValueError(f\"Argument `x` must be a square matrix when evaluating the polynomial not elementwise, not have shape {x.shape}.\")\n return field._poly_evaluate_matrix(coeffs, x)\n\n def __len__(self) -> int:\n \"\"\"\n Returns the length of the coefficient array.\n\n The length of the coefficient array is `Poly.degree + 1`.\n\n Returns\n -------\n int\n The length of the coefficient array.\n\n Examples\n --------\n .. ipython:: python\n\n a = galois.Poly.Random(5); a\n a.coeffs\n len(a)\n a.degree + 1\n \"\"\"\n return self.degree + 1\n\n def _check_inputs_are_polys(self, a, b):\n \"\"\"\n Verify polynomial arithmetic operands are either galois.Poly or scalars in a finite field.\n \"\"\"\n if not isinstance(a, (Poly, self.field)):\n raise TypeError(f\"Both operands must be a galois.Poly or a single element of its field {self.field.name}, not {type(a)}.\")\n if not isinstance(b, (Poly, self.field)):\n raise TypeError(f\"Both operands must be a galois.Poly or a single element of its field {self.field.name}, not {type(b)}.\")\n if (isinstance(a, Poly) and isinstance(b, Poly)) and not a.field is b.field:\n raise TypeError(f\"Both polynomial operands must be over the same field, not {a.field.name} and {b.field.name}.\")\n\n def _check_inputs_are_polys_or_ints(self, a, b):\n \"\"\"\n Verify polynomial arithmetic operands are either galois.Poly, scalars in a finite field, or an integer (scalar multiplication).\n \"\"\"\n if not isinstance(a, (Poly, self.field, int, np.integer)):\n raise TypeError(f\"Both operands must be a galois.Poly, a single element of its field {self.field.name}, or an integer, not {type(a)}.\")\n if not isinstance(b, (Poly, self.field, int, np.integer)):\n raise TypeError(f\"Both operands must be a galois.Poly, a single element of its field {self.field.name}, or an integer, not {type(b)}.\")\n if (isinstance(a, Poly) and isinstance(b, Poly)) and not a.field is b.field:\n raise TypeError(f\"Both polynomial operands must be over the same field, not {a.field.name} and {b.field.name}.\")\n\n def _convert_field_scalars_to_polys(self, a, b):\n \"\"\"\n Convert finite field scalars to 0-degree polynomials in that field.\n \"\"\"\n # Promote a single field element to a 0-degree polynomial\n if isinstance(a, self.field):\n if not a.size == 1:\n raise ValueError(f\"Arguments that are Galois field elements must have size 1 (equivalently a 0-degree polynomial), not size {a.size}.\")\n a = Poly(np.atleast_1d(a))\n if isinstance(b, self.field):\n if not b.size == 1:\n raise ValueError(f\"Arguments that are Galois field elements must have size 1 (equivalently a 0-degree polynomial), not size {b.size}.\")\n b = Poly(np.atleast_1d(b))\n\n return a, b\n\n @staticmethod\n def _determine_poly_class(a, b):\n \"\"\"\n Determine the type of polynomial arithmetic to perform.\n \"\"\"\n if isinstance(a, SparsePoly) or isinstance(b, SparsePoly):\n return SparsePoly\n elif isinstance(a, BinaryPoly) or isinstance(b, BinaryPoly):\n return BinaryPoly\n else:\n return DensePoly\n\n def __add__(self, other):\n \"\"\"\n Adds two polynomials.\n\n Parameters\n ----------\n other : galois.Poly\n The polynomial :math:`b(x)`.\n\n Returns\n -------\n galois.Poly\n The polynomial :math:`c(x) = a(x) + b(x)`.\n\n Examples\n --------\n .. ipython:: python\n\n a = galois.Poly.Random(5); a\n b = galois.Poly.Random(3); b\n a + b\n \"\"\"\n self._check_inputs_are_polys(self, other)\n a, b = self._convert_field_scalars_to_polys(self, other)\n cls = self._determine_poly_class(a, b)\n return cls._add(a, b)\n\n def __radd__(self, other):\n self._check_inputs_are_polys(self, other)\n a, b = self._convert_field_scalars_to_polys(self, other)\n cls = self._determine_poly_class(a, b)\n return cls._add(b, a)\n\n def __sub__(self, other):\n \"\"\"\n Subtracts two polynomials.\n\n Parameters\n ----------\n other : galois.Poly\n The polynomial :math:`b(x)`.\n\n Returns\n -------\n galois.Poly\n The polynomial :math:`c(x) = a(x) - b(x)`.\n\n Examples\n --------\n .. ipython:: python\n\n a = galois.Poly.Random(5); a\n b = galois.Poly.Random(3); b\n a - b\n \"\"\"\n self._check_inputs_are_polys(self, other)\n a, b = self._convert_field_scalars_to_polys(self, other)\n cls = self._determine_poly_class(a, b)\n return cls._sub(a, b)\n\n def __rsub__(self, other):\n self._check_inputs_are_polys(self, other)\n a, b = self._convert_field_scalars_to_polys(self, other)\n cls = self._determine_poly_class(a, b)\n return cls._sub(b, a)\n\n def __mul__(self, other):\n \"\"\"\n Multiplies two polynomials.\n\n Parameters\n ----------\n other : galois.Poly\n The polynomial :math:`b(x)`.\n\n Returns\n -------\n galois.Poly\n The polynomial :math:`c(x) = a(x) b(x)`.\n\n Examples\n --------\n .. ipython:: python\n\n a = galois.Poly.Random(5); a\n b = galois.Poly.Random(3); b\n a * b\n \"\"\"\n self._check_inputs_are_polys_or_ints(self, other)\n a, b = self._convert_field_scalars_to_polys(self, other)\n if isinstance(a, (int, np.integer)):\n # Ensure the integer is in the second operand for scalar multiplication\n a, b = b, a\n cls = self._determine_poly_class(a, b)\n return cls._mul(a, b)\n\n def __rmul__(self, other):\n self._check_inputs_are_polys_or_ints(self, other)\n a, b = self._convert_field_scalars_to_polys(self, other)\n if isinstance(b, (int, np.integer)):\n # Ensure the integer is in the second operand for scalar multiplication\n b, a = a, b\n cls = self._determine_poly_class(a, b)\n return cls._mul(b, a)\n\n def __divmod__(self, other):\n \"\"\"\n Divides two polynomials and returns the quotient and remainder.\n\n Parameters\n ----------\n other : galois.Poly\n The polynomial :math:`b(x)`.\n\n Returns\n -------\n galois.Poly\n The quotient polynomial :math:`q(x)` such that :math:`a(x) = b(x)q(x) + r(x)`.\n galois.Poly\n The remainder polynomial :math:`r(x)` such that :math:`a(x) = b(x)q(x) + r(x)`.\n\n Examples\n --------\n .. ipython:: python\n\n a = galois.Poly.Random(5); a\n b = galois.Poly.Random(3); b\n q, r = divmod(a, b)\n q, r\n b*q + r\n \"\"\"\n self._check_inputs_are_polys(self, other)\n a, b = self._convert_field_scalars_to_polys(self, other)\n cls = self._determine_poly_class(a, b)\n return cls._divmod(a, b)\n\n def __rdivmod__(self, other):\n self._check_inputs_are_polys(self, other)\n a, b = self._convert_field_scalars_to_polys(self, other)\n cls = self._determine_poly_class(a, b)\n return cls._divmod(b, a)\n\n def __truediv__(self, other):\n \"\"\"\n Divides two polynomials and returns the quotient.\n\n True division and floor division are equivalent.\n\n Parameters\n ----------\n other : galois.Poly\n The polynomial :math:`b(x)`.\n\n Returns\n -------\n galois.Poly\n The quotient polynomial :math:`q(x)` such that :math:`a(x) = b(x)q(x) + r(x)`.\n\n Examples\n --------\n .. ipython:: python\n\n a = galois.Poly.Random(5); a\n b = galois.Poly.Random(3); b\n divmod(a, b)\n a / b\n \"\"\"\n self._check_inputs_are_polys(self, other)\n a, b = self._convert_field_scalars_to_polys(self, other)\n cls = self._determine_poly_class(a, b)\n return cls._divmod(a, b)[0]\n\n def __rtruediv__(self, other):\n self._check_inputs_are_polys(self, other)\n a, b = self._convert_field_scalars_to_polys(self, other)\n cls = self._determine_poly_class(a, b)\n return cls._divmod(b, a)[0]\n\n def __floordiv__(self, other):\n \"\"\"\n Divides two polynomials and returns the quotient.\n\n True division and floor division are equivalent.\n\n Parameters\n ----------\n other : galois.Poly\n The polynomial :math:`b(x)`.\n\n Returns\n -------\n galois.Poly\n The quotient polynomial :math:`q(x)` such that :math:`a(x) = b(x)q(x) + r(x)`.\n\n Examples\n --------\n .. ipython:: python\n\n a = galois.Poly.Random(5); a\n b = galois.Poly.Random(3); b\n divmod(a, b)\n a // b\n \"\"\"\n self._check_inputs_are_polys(self, other)\n a, b = self._convert_field_scalars_to_polys(self, other)\n cls = self._determine_poly_class(a, b)\n return cls._divmod(a, b)[0]\n\n def __rfloordiv__(self, other):\n self._check_inputs_are_polys(self, other)\n a, b = self._convert_field_scalars_to_polys(self, other)\n cls = self._determine_poly_class(a, b)\n return cls._divmod(b, a)[0]\n\n def __mod__(self, other):\n \"\"\"\n Divides two polynomials and returns the remainder.\n\n Parameters\n ----------\n other : galois.Poly\n The polynomial :math:`b(x)`.\n\n Returns\n -------\n galois.Poly\n The remainder polynomial :math:`r(x)` such that :math:`a(x) = b(x)q(x) + r(x)`.\n\n Examples\n --------\n .. ipython:: python\n\n a = galois.Poly.Random(5); a\n b = galois.Poly.Random(3); b\n divmod(a, b)\n a % b\n \"\"\"\n self._check_inputs_are_polys(self, other)\n a, b = self._convert_field_scalars_to_polys(self, other)\n cls = self._determine_poly_class(a, b)\n return cls._mod(a, b)\n\n def __rmod__(self, other):\n self._check_inputs_are_polys(self, other)\n a, b = self._convert_field_scalars_to_polys(self, other)\n cls = self._determine_poly_class(a, b)\n return cls._mod(b, a)\n\n def __pow__(self, other):\n \"\"\"\n Exponentiates the polynomial to an integer power.\n\n Parameters\n ----------\n other : int\n The non-negative integer exponent.\n\n Returns\n -------\n galois.Poly\n The polynomial :math:`a(x)^b`.\n\n Examples\n --------\n .. ipython:: python\n\n a = galois.Poly.Random(5); a\n a**3\n a * a * a\n \"\"\"\n if not isinstance(other, (int, np.integer)):\n raise TypeError(f\"For polynomial exponentiation, the second argument must be an int, not {other}.\")\n if not other >= 0:\n raise ValueError(f\"Can only exponentiate polynomials to non-negative integers, not {other}.\")\n a, power = self, other\n field = self.field\n\n # c(x) = a(x) ** power\n if power == 0:\n return Poly.One(field)\n\n c_square = a # The \"squaring\" part\n c_mult = Poly.One(field) # The \"multiplicative\" part\n\n while power > 1:\n if power % 2 == 0:\n c_square *= c_square\n power //= 2\n else:\n c_mult *= c_square\n power -= 1\n c = c_mult * c_square\n\n return c\n\n def __neg__(self):\n raise NotImplementedError\n\n def __eq__(self, other):\n if isinstance(other, (int, np.integer)):\n # Compare poly to a integer scalar (assumed to be from the same field)\n return self.degree == 0 and np.array_equal(self.coeffs, [other])\n\n elif isinstance(other, FieldArray):\n # Compare poly to a finite field scalar (may or may not be from the same field)\n if not other.ndim == 0:\n raise ValueError(f\"Can only compare galois.Poly to a 0-D galois.FieldArray scalar, not shape {other.shape}.\")\n return self.field is type(other) and self.degree == 0 and np.array_equal(self.coeffs, np.atleast_1d(other))\n\n elif not isinstance(other, Poly):\n raise TypeError(f\"Can only compare galois.Poly and galois.Poly / int / galois.FieldArray scalar objects, not {type(other)}.\")\n\n else:\n # Compare two poly objects to each other\n return self.field is other.field and np.array_equal(self.nonzero_degrees, other.nonzero_degrees) and np.array_equal(self.nonzero_coeffs, other.nonzero_coeffs)\n\n def __ne__(self, other):\n return not self.__eq__(other)\n\n @classmethod\n def _add(cls, a, b):\n raise NotImplementedError\n\n @classmethod\n def _sub(cls, a, b):\n raise NotImplementedError\n\n @classmethod\n def _mul(cls, a, b):\n raise NotImplementedError\n\n @classmethod\n def _divmod(cls, a, b):\n raise NotImplementedError\n\n @classmethod\n def _mod(cls, a, b):\n raise NotImplementedError\n\n ###############################################################################\n # Instance properties\n ###############################################################################\n\n @property\n def field(self) -> FieldClass:\n \"\"\"\n galois.FieldClass: The Galois field array class to which the coefficients belong.\n\n Examples\n --------\n .. ipython:: python\n\n a = galois.Poly.Random(5); a\n a.field\n\n .. ipython:: python\n\n GF = galois.GF(2**8)\n b = galois.Poly.Random(5, field=GF); b\n b.field\n \"\"\"\n raise NotImplementedError\n\n @property\n def degree(self) -> int:\n \"\"\"\n int: The degree of the polynomial, i.e. the highest degree with non-zero coefficient.\n\n Examples\n --------\n .. ipython:: python\n\n GF = galois.GF(7)\n p = galois.Poly([3, 0, 5, 2], field=GF); p\n p.degree\n \"\"\"\n raise NotImplementedError\n\n @property\n def nonzero_degrees(self) -> np.ndarray:\n \"\"\"\n numpy.ndarray: An array of the polynomial degrees that have non-zero coefficients, in degree-descending order. The entries of\n :obj:`nonzero_degrees` are paired with :obj:`nonzero_coeffs`.\n\n Examples\n --------\n .. ipython:: python\n\n GF = galois.GF(7)\n p = galois.Poly([3, 0, 5, 2], field=GF); p\n p.nonzero_degrees\n \"\"\"\n raise NotImplementedError\n\n @property\n def nonzero_coeffs(self) -> FieldArray:\n \"\"\"\n galois.FieldArray: The non-zero coefficients of the polynomial in degree-descending order. The entries of :obj:`nonzero_degrees`\n are paired with :obj:`nonzero_coeffs`.\n\n Examples\n --------\n .. ipython:: python\n\n GF = galois.GF(7)\n p = galois.Poly([3, 0, 5, 2], field=GF); p\n p.nonzero_coeffs\n \"\"\"\n raise NotImplementedError\n\n @property\n def degrees(self) -> np.ndarray:\n \"\"\"\n numpy.ndarray: An array of the polynomial degrees in degree-descending order. The entries of :obj:`degrees`\n are paired with :obj:`coeffs`.\n\n Examples\n --------\n .. ipython:: python\n\n GF = galois.GF(7)\n p = galois.Poly([3, 0, 5, 2], field=GF); p\n p.degrees\n \"\"\"\n raise NotImplementedError\n\n @property\n def coeffs(self) -> FieldArray:\n \"\"\"\n galois.FieldArray: The coefficients of the polynomial in degree-descending order. The entries of :obj:`degrees` are\n paired with :obj:`coeffs`.\n\n Examples\n --------\n .. ipython:: python\n\n GF = galois.GF(7)\n p = galois.Poly([3, 0, 5, 2], field=GF); p\n p.coeffs\n \"\"\"\n raise NotImplementedError\n\n @property\n def integer(self) -> int:\n r\"\"\"\n int: The integer representation of the polynomial. For the polynomial :math:`f(x) = a_d x^d + a_{d-1} x^{d-1} + \\dots + a_1 x + a_0`\n over the field :math:`\\mathrm{GF}(p^m)`, the integer representation is :math:`i = a_d (p^m)^{d} + a_{d-1} (p^m)^{d-1} + \\dots + a_1 (p^m) + a_0`\n using integer arithmetic, not finite field arithmetic.\n\n Said differently, if the polynomial coefficients :math:`\\{a_d, a_{d-1}, \\dots, a_1, a_0\\}` are considered as the \"digits\" of a radix-:math:`p^m`\n value, the polynomial's integer representation is the decimal value (radix-:math:`10`).\n\n Examples\n --------\n .. ipython:: python\n\n GF = galois.GF(7)\n p = galois.Poly([3, 0, 5, 2], field=GF); p\n p.integer\n p.integer == 3*7**3 + 5*7**1 + 2*7**0\n \"\"\"\n return sparse_poly_to_integer(self.nonzero_degrees, self.nonzero_coeffs, self.field.order)\n\n @property\n def string(self) -> str:\n \"\"\"\n str: The string representation of the polynomial, without specifying the Galois field.\n\n Examples\n --------\n .. ipython:: python\n\n GF = galois.GF(7)\n p = galois.Poly([3, 0, 5, 2], field=GF); p\n p.string\n \"\"\"\n return sparse_poly_to_str(self.nonzero_degrees, self.nonzero_coeffs)\n\n\nclass DensePoly(Poly):\n \"\"\"\n Implementation of dense polynomials over Galois fields.\n \"\"\"\n\n __slots__ = [\"_coeffs\"]\n\n def __new__(cls, coeffs, field=None): # pylint: disable=signature-differs\n # Arguments aren't verified in Poly.__new__()\n obj = object.__new__(cls)\n obj._coeffs = coeffs\n\n if obj._coeffs.size > 1:\n # Remove leading zero coefficients\n idxs = np.nonzero(obj._coeffs)[0]\n if idxs.size > 0:\n obj._coeffs = obj._coeffs[idxs[0]:]\n else:\n obj._coeffs = obj._coeffs[-1]\n\n # Ensure the coefficient array isn't 0-dimensional\n obj._coeffs = np.atleast_1d(obj._coeffs)\n\n return obj\n\n ###############################################################################\n # Methods\n ###############################################################################\n\n def copy(self):\n return DensePoly(self._coeffs.copy())\n\n ###############################################################################\n # Arithmetic methods\n ###############################################################################\n\n def __neg__(self):\n return DensePoly(-self._coeffs)\n\n @classmethod\n def _add(cls, a, b):\n field = a.field\n\n # c(x) = a(x) + b(x)\n c_coeffs = field.Zeros(max(a.coeffs.size, b.coeffs.size))\n c_coeffs[-a.coeffs.size:] = a.coeffs\n c_coeffs[-b.coeffs.size:] += b.coeffs\n\n return Poly(c_coeffs)\n\n @classmethod\n def _sub(cls, a, b):\n field = a.field\n\n # c(x) = a(x) + b(x)\n c_coeffs = field.Zeros(max(a.coeffs.size, b.coeffs.size))\n c_coeffs[-a.coeffs.size:] = a.coeffs\n c_coeffs[-b.coeffs.size:] -= b.coeffs\n\n return Poly(c_coeffs)\n\n @classmethod\n def _mul(cls, a, b):\n if isinstance(b, (int, np.integer)):\n # Scalar multiplication (p * 3 = p + p + p)\n c_coeffs = a.coeffs * b\n else:\n # c(x) = a(x) * b(x)\n c_coeffs = np.convolve(a.coeffs, b.coeffs)\n\n return Poly(c_coeffs)\n\n @classmethod\n def _divmod(cls, a, b):\n field = a.field\n zero = Poly.Zero(field)\n\n # q(x)*b(x) + r(x) = a(x)\n if b.degree == 0:\n return Poly(a.coeffs // b.coeffs), zero\n\n elif a == 0:\n return zero, zero\n\n elif a.degree < b.degree:\n return zero, a.copy()\n\n else:\n q_coeffs, r_coeffs = field._poly_divmod(a.coeffs, b.coeffs)\n return Poly(q_coeffs), Poly(r_coeffs)\n\n @classmethod\n def _mod(cls, a, b):\n return cls._divmod(a, b)[1]\n\n ###############################################################################\n # Instance properties\n ###############################################################################\n\n @property\n def field(self):\n return type(self._coeffs)\n\n @property\n def degree(self):\n return self._coeffs.size - 1\n\n @property\n def nonzero_degrees(self):\n return self.degree - np.nonzero(self._coeffs)[0]\n\n @property\n def nonzero_coeffs(self):\n return self._coeffs[np.nonzero(self._coeffs)[0]]\n\n @property\n def degrees(self):\n return np.arange(self.degree, -1, -1)\n\n @property\n def coeffs(self):\n return self._coeffs.copy()\n\n\nclass BinaryPoly(Poly):\n \"\"\"\n Implementation of polynomials over GF(2).\n \"\"\"\n\n __slots__ = [\"_integer\", \"_coeffs\"]\n\n def __new__(cls, integer): # pylint: disable=signature-differs\n if not isinstance(integer, (int, np.integer)):\n raise TypeError(f\"Argument `integer` must be an integer, not {type(integer)}.\")\n if not integer >= 0:\n raise ValueError(f\"Argument `integer` must be non-negative, not {integer}.\")\n\n obj = object.__new__(cls)\n obj._integer = integer\n obj._coeffs = None # Only compute these if requested\n\n return obj\n\n ###############################################################################\n # Methods\n ###############################################################################\n\n def copy(self):\n return BinaryPoly(self._integer)\n\n ###############################################################################\n # Arithmetic methods\n ###############################################################################\n\n def __neg__(self):\n return self.copy()\n\n @classmethod\n def _add(cls, a, b):\n return BinaryPoly(a.integer ^ b.integer)\n\n @classmethod\n def _sub(cls, a, b):\n return BinaryPoly(a.integer ^ b.integer)\n\n @classmethod\n def _mul(cls, a, b):\n if isinstance(b, (int, np.integer)):\n # Scalar multiplication (p * 3 = p + p + p)\n return BinaryPoly(a.integer) if b % 2 == 1 else BinaryPoly(0)\n\n else:\n # Re-order operands such that a > b so the while loop has less loops\n a = a.integer\n b = b.integer\n if b > a:\n a, b = b, a\n\n c = 0\n while b > 0:\n if b & 0b1:\n c ^= a # Add a(x) to c(x)\n b >>= 1 # Divide b(x) by x\n a <<= 1 # Multiply a(x) by x\n\n return BinaryPoly(c)\n\n @classmethod\n def _divmod(cls, a, b):\n deg_a = a.degree\n deg_q = a.degree - b.degree\n deg_r = b.degree - 1\n a = a.integer\n b = b.integer\n\n q = 0\n mask = 1 << deg_a\n for i in range(deg_q, -1, -1):\n q <<= 1\n if a & mask:\n a ^= b << i\n q ^= 1 # Set the LSB then left shift\n assert a & mask == 0\n mask >>= 1\n\n # q = a >> deg_r\n mask = (1 << (deg_r + 1)) - 1 # The last deg_r + 1 bits of a\n r = a & mask\n\n return BinaryPoly(q), BinaryPoly(r)\n\n @classmethod\n def _mod(cls, a, b):\n return cls._divmod(a, b)[1]\n\n ###############################################################################\n # Instance properties\n ###############################################################################\n\n @property\n def field(self):\n return GF2\n\n @property\n def degree(self):\n if self._integer == 0:\n return 0\n else:\n return len(bin(self._integer)[2:]) - 1\n\n @property\n def nonzero_degrees(self):\n return self.degree - np.nonzero(self.coeffs)[0]\n\n @property\n def nonzero_coeffs(self):\n return self.coeffs[np.nonzero(self.coeffs)[0]]\n\n @property\n def degrees(self):\n return np.arange(self.degree, -1, -1)\n\n @property\n def coeffs(self):\n if self._coeffs is None:\n binstr = bin(self._integer)[2:]\n self._coeffs = GF2([int(b) for b in binstr])\n return self._coeffs.copy()\n\n @property\n def integer(self):\n return self._integer\n\n\nclass SparsePoly(Poly):\n \"\"\"\n Implementation of sparse polynomials over Galois fields.\n \"\"\"\n\n __slots__ = [\"_degrees\", \"_coeffs\"]\n\n def __new__(cls, degrees, coeffs=None, field=None): # pylint: disable=signature-differs\n coeffs = [1,]*len(degrees) if coeffs is None else coeffs\n if not isinstance(degrees, (list, tuple, np.ndarray)):\n raise TypeError(f\"Argument `degrees` must be array-like, not {type(degrees)}.\")\n if not isinstance(coeffs, (list, tuple, np.ndarray)):\n raise TypeError(f\"Argument `coeffs` must be array-like, not {type(coeffs)}.\")\n if not len(degrees) == len(coeffs):\n raise ValueError(f\"Arguments `degrees` and `coeffs` must have the same length, not {len(degrees)} and {len(coeffs)}.\")\n if not all(degree >= 0 for degree in degrees):\n raise ValueError(f\"Argument `degrees` must have non-negative values, not {degrees}.\")\n\n obj = object.__new__(cls)\n\n if isinstance(coeffs, FieldArray) and field is None:\n obj._degrees = np.array(degrees)\n obj._coeffs = coeffs\n else:\n field = GF2 if field is None else field\n if isinstance(coeffs, np.ndarray):\n # Ensure coeffs is an iterable\n coeffs = coeffs.tolist()\n obj._degrees = np.array(degrees)\n obj._coeffs = field([-field(abs(c)) if c < 0 else field(c) for c in coeffs])\n\n # Sort the degrees and coefficients in descending order\n idxs = np.argsort(degrees)[::-1]\n obj._degrees = obj._degrees[idxs]\n obj._coeffs = obj._coeffs[idxs]\n\n # Remove zero coefficients\n idxs = np.nonzero(obj._coeffs)[0]\n obj._degrees = obj._degrees[idxs]\n obj._coeffs = obj._coeffs[idxs]\n\n return obj\n\n ###############################################################################\n # Methods\n ###############################################################################\n\n def copy(self):\n return SparsePoly(self.degrees, self.coeffs)\n\n def reverse(self):\n return SparsePoly(self.degree - self.degrees, self.coeffs)\n\n ###############################################################################\n # Arithmetic methods\n ###############################################################################\n\n def __neg__(self):\n return SparsePoly(self._degrees, -self._coeffs)\n\n @classmethod\n def _add(cls, a, b):\n field = a.field\n\n # c(x) = a(x) + b(x)\n cc = dict(zip(a.nonzero_degrees, a.nonzero_coeffs))\n for b_degree, b_coeff in zip(b.nonzero_degrees, b.nonzero_coeffs):\n cc[b_degree] = cc.get(b_degree, field(0)) + b_coeff\n\n return Poly.Degrees(list(cc.keys()), list(cc.values()), field=field)\n\n @classmethod\n def _sub(cls, a, b):\n field = a.field\n\n # c(x) = a(x) - b(x)\n cc = dict(zip(a.nonzero_degrees, a.nonzero_coeffs))\n for b_degree, b_coeff in zip(b.nonzero_degrees, b.nonzero_coeffs):\n cc[b_degree] = cc.get(b_degree, field(0)) - b_coeff\n\n return Poly.Degrees(list(cc.keys()), list(cc.values()), field=field)\n\n @classmethod\n def _mul(cls, a, b):\n field = a.field\n\n if isinstance(b, (int, np.integer)):\n # Scalar multiplication (p * 3 = p + p + p)\n return Poly.Degrees(a.nonzero_degrees, a.nonzero_coeffs * b)\n\n else:\n # c(x) = a(x) * b(x)\n cc = {}\n for a_degree, a_coeff in zip(a.nonzero_degrees, a.nonzero_coeffs):\n for b_degree, b_coeff in zip(b.nonzero_degrees, b.nonzero_coeffs):\n cc[a_degree + b_degree] = cc.get(a_degree + b_degree, field(0)) + a_coeff*b_coeff\n\n return Poly.Degrees(list(cc.keys()), list(cc.values()), field=field)\n\n @classmethod\n def _divmod(cls, a, b):\n field = a.field\n zero = Poly.Zero(field)\n\n # q(x)*b(x) + r(x) = a(x)\n if b.degree == 0:\n q_degrees = a.nonzero_degrees\n q_coeffs = [a_coeff // b.coeffs[0] for a_coeff in a.nonzero_coeffs]\n return Poly.Degrees(q_degrees, q_coeffs, field=field), zero\n\n elif a == 0:\n return zero, zero\n\n elif a.degree < b.degree:\n return zero, a.copy()\n\n else:\n aa = dict(zip(a.nonzero_degrees, a.nonzero_coeffs))\n b_coeffs = b.coeffs\n\n q_degree = a.degree - b.degree\n r_degree = b.degree # One larger than final remainder\n qq = {}\n r_coeffs = field.Zeros(r_degree + 1)\n\n # Preset remainder so we can rotate at the start of loop\n for i in range(0, b.degree):\n r_coeffs[1 + i] = aa.get(a.degree - i, 0)\n\n for i in range(0, q_degree + 1):\n r_coeffs = np.roll(r_coeffs, -1)\n r_coeffs[-1] = aa.get(a.degree - (i + b.degree), 0)\n\n if r_coeffs[0] > 0:\n q = r_coeffs[0] // b_coeffs[0]\n r_coeffs -= q*b_coeffs\n qq[q_degree - i] = q\n\n return Poly.Degrees(list(qq.keys()), list(qq.values()), field=field), Poly(r_coeffs[1:])\n\n @classmethod\n def _mod(cls, a, b):\n field = a.field\n zero = Poly.Zero(field)\n\n # q(x)*b(x) + r(x) = a(x)\n if b.degree == 0:\n return zero\n\n elif a == 0:\n return zero\n\n elif a.degree < b.degree:\n return a.copy()\n\n else:\n aa = dict(zip(a.nonzero_degrees, a.nonzero_coeffs))\n b_coeffs = b.coeffs\n\n q_degree = a.degree - b.degree\n r_degree = b.degree # One larger than final remainder\n r_coeffs = field.Zeros(r_degree + 1)\n\n # Preset remainder so we can rotate at the start of loop\n for i in range(0, b.degree):\n r_coeffs[1 + i] = aa.get(a.degree - i, 0)\n\n for i in range(0, q_degree + 1):\n r_coeffs = np.roll(r_coeffs, -1)\n r_coeffs[-1] = aa.get(a.degree - (i + b.degree), 0)\n\n if r_coeffs[0] > 0:\n q = r_coeffs[0] // b_coeffs[0]\n r_coeffs -= q*b_coeffs\n\n return Poly(r_coeffs[1:])\n\n ###############################################################################\n # Instance properties\n ###############################################################################\n\n @property\n def field(self):\n return type(self._coeffs)\n\n @property\n def degree(self):\n return 0 if self._degrees.size == 0 else int(np.max(self._degrees))\n\n @property\n def nonzero_degrees(self):\n return self._degrees.copy()\n\n @property\n def nonzero_coeffs(self):\n return self._coeffs.copy()\n\n @property\n def degrees(self):\n return np.arange(self.degree, -1, -1)\n\n @property\n def coeffs(self):\n # Assemble a full list of coefficients, including zeros\n coeffs = self.field.Zeros(self.degree + 1)\n if self.nonzero_degrees.size > 0:\n coeffs[self.degree - self.nonzero_degrees] = self.nonzero_coeffs\n return coeffs\n\n\n# Define the GF(2) primitive polynomial here, not in _fields/_gf2.py, to avoid a circular dependency with `Poly`.\n# The primitive polynomial is p(x) = x - alpha, where alpha = 1. Over GF(2), this is equivalent\n# to p(x) = x + 1.\nGF2._irreducible_poly = Poly([1, 1]) # pylint: disable=protected-access\n"
] |
[
[
"numpy.issubdtype",
"numpy.dtype",
"numpy.all",
"numpy.max",
"numpy.any",
"numpy.iinfo",
"numpy.bool_",
"numpy.where",
"numpy.random.default_rng",
"numpy.roll",
"numpy.unique",
"numpy.arange",
"numpy.add.reduce",
"numpy.atleast_1d",
"numpy.count_nonzero",
"numpy.repeat",
"numpy.zeros",
"numpy.log",
"numpy.nonzero",
"numpy.power",
"numpy.power.outer",
"numpy.logical_or",
"numpy.int64",
"numpy.identity",
"numpy.argsort",
"numpy.meshgrid",
"numpy.array",
"numpy.sum",
"numpy.flip",
"numpy.convolve",
"numpy.printoptions",
"numpy.nditer",
"numpy.abs",
"numpy.array_equal",
"numpy.sort",
"numpy.ones",
"numpy.gcd",
"numpy.isscalar",
"numpy.empty"
]
] |
iacercalixto/WALS
|
[
"7f4b5042591d536f6b371d5fb252616d2da7abaf"
] |
[
"onmt/train_single.py"
] |
[
"#!/usr/bin/env python\n\"\"\"\n Training on a single process\n\"\"\"\nfrom __future__ import division\n\nimport argparse\nimport os\nimport random\nimport torch\n\nimport onmt.opts as opts\n\nfrom onmt.inputters.inputter import build_dataset_iter, lazily_load_dataset, \\\n _load_fields, _collect_report_features\nfrom onmt.model_builder import build_model\nfrom onmt.utils.optimizers import build_optim\nfrom onmt.trainer import build_trainer\nfrom onmt.models import build_model_saver\nfrom onmt.utils.logging import init_logger, logger\n\nimport sqlite3\nfrom collections import defaultdict\nimport numpy as np\n\ndef get_feat_values(SimulationLanguages, WalsValues, FeaturesList, ListLanguages, FeatureTypes, FeatureNames) :\n\n FeatureValues, FeatureTensors = defaultdict(lambda: defaultdict(int)), defaultdict(lambda: defaultdict(int))\n\n for Language in SimulationLanguages: # For each language in the simulation...\n idx_language = ListLanguages.index(Language) \n for FeatureType in FeatureTypes: # For each feature type...\n for Feature in FeatureTypes[FeatureType]: # For each feature...\n idx_feature = FeatureNames.index(Feature)\n FeatureValues[Language][Feature] = WalsValues[idx_language][idx_feature+1] \n FeatureTensors[Feature] = torch.from_numpy(np.array(range(FeaturesList[idx_feature][1] + 1)))\n\n return FeatureValues, FeatureTensors\n\n\ndef _check_save_model_path(opt):\n save_model_path = os.path.abspath(opt.save_model)\n model_dirname = os.path.dirname(save_model_path)\n if not os.path.exists(model_dirname):\n os.makedirs(model_dirname)\n\n\ndef _tally_parameters(model):\n n_params = sum([p.nelement() for p in model.parameters()])\n enc = 0\n dec = 0\n for name, param in model.named_parameters():\n if 'encoder' in name:\n enc += param.nelement()\n elif 'decoder' or 'generator' in name:\n dec += param.nelement()\n return n_params, enc, dec\n\n\ndef training_opt_postprocessing(opt, device_id):\n if opt.word_vec_size != -1:\n opt.src_word_vec_size = opt.word_vec_size\n opt.tgt_word_vec_size = opt.word_vec_size\n\n if opt.layers != -1:\n opt.enc_layers = opt.layers\n opt.dec_layers = opt.layers\n\n opt.brnn = (opt.encoder_type == \"brnn\")\n\n if opt.rnn_type == \"SRU\" and not opt.gpu_ranks:\n raise AssertionError(\"Using SRU requires -gpu_ranks set.\")\n\n if torch.cuda.is_available() and not opt.gpu_ranks:\n logger.info(\"WARNING: You have a CUDA device, \\\n should run with -gpu_ranks\")\n\n if opt.seed > 0:\n torch.manual_seed(opt.seed)\n # this one is needed for torchtext random call (shuffled iterator)\n # in multi gpu it ensures datasets are read in the same order\n random.seed(opt.seed)\n # some cudnn methods can be random even after fixing the seed\n # unless you tell it to be deterministic\n torch.backends.cudnn.deterministic = True\n\n if device_id >= 0:\n torch.cuda.set_device(device_id)\n if opt.seed > 0:\n # These ensure same initialization in multi gpu mode\n torch.cuda.manual_seed(opt.seed)\n\n return opt\n\n\ndef main(opt, device_id):\n\n SimulationLanguages = [opt.wals_src, opt.wals_tgt]\n\n print('Loading WALS features from databases...')\n\n cwd = os.getcwd()\n print(cwd)\n\n db = sqlite3.connect(cwd + '/onmt/WalsValues.db')\n cursor = db.cursor()\n cursor.execute('SELECT * FROM WalsValues')\n WalsValues = cursor.fetchall()\n\n db = sqlite3.connect(cwd + '/onmt/FeaturesList.db')\n cursor = db.cursor()\n cursor.execute('SELECT * FROM FeaturesList')\n FeaturesList = cursor.fetchall()\n\n db = sqlite3.connect(cwd + '/onmt/FTInfos.db')\n cursor = db.cursor()\n cursor.execute('SELECT * FROM FTInfos')\n FTInfos = cursor.fetchall()\n\n db = sqlite3.connect(cwd + '/onmt/FTList.db')\n cursor = db.cursor()\n cursor.execute('SELECT * FROM FTList')\n FTList = cursor.fetchall()\n\n ListLanguages = []\n for i in WalsValues:\n ListLanguages.append(i[0])\n\n FeatureTypes = defaultdict(lambda: defaultdict(list))\n for i in FTList:\n FeatureTypes[i[0]] = i[1].split(',')\n\n FeatureNames = []\n for i in FeatureTypes:\n for j in FeatureTypes[i]:\n FeatureNames.append(j)\n\n FeatureTypesNames = []\n for i in FeatureTypes:\n FeatureTypesNames.append(i)\n\n FeatureValues, FeatureTensors = get_feat_values(SimulationLanguages, WalsValues, FeaturesList, ListLanguages, FeatureTypes, FeatureNames) \n\n print('WALS databases loaded!')\n\n # FeatureValues: defaultdict with feature values, per language.\n # FeatureTensors: tensor of possible outputs, per feature.\n\n opt = training_opt_postprocessing(opt, device_id)\n init_logger(opt.log_file)\n # Load checkpoint if we resume from a previous training.\n if opt.train_from:\n logger.info('Loading checkpoint from %s' % opt.train_from)\n checkpoint = torch.load(opt.train_from,\n map_location=lambda storage, loc: storage)\n model_opt = checkpoint['opt']\n else:\n checkpoint = None\n model_opt = opt\n\n # Peek the first dataset to determine the data_type.\n # (All datasets have the same data_type).\n first_dataset = next(lazily_load_dataset(\"train\", opt))\n data_type = first_dataset.data_type\n\n # Load fields generated from preprocess phase.\n fields = _load_fields(first_dataset, data_type, opt, checkpoint)\n\n # Report src/tgt features.\n\n src_features, tgt_features = _collect_report_features(fields)\n for j, feat in enumerate(src_features):\n logger.info(' * src feature %d size = %d'\n % (j, len(fields[feat].vocab)))\n for j, feat in enumerate(tgt_features):\n logger.info(' * tgt feature %d size = %d'\n % (j, len(fields[feat].vocab)))\n\n # Build model.\n model = build_model(model_opt, opt, fields, checkpoint, FeatureValues, FeatureTensors, FeatureTypes, FeaturesList, FeatureNames, FTInfos, FeatureTypesNames, SimulationLanguages)\n n_params, enc, dec = _tally_parameters(model)\n logger.info('encoder: %d' % enc)\n logger.info('decoder: %d' % dec)\n logger.info('* number of parameters: %d' % n_params)\n _check_save_model_path(opt)\n\n # Build optimizer.\n optim = build_optim(model, opt, checkpoint)\n\n # Build model saver\n model_saver = build_model_saver(model_opt, opt, model, fields, optim)\n\n trainer = build_trainer(opt, device_id, model, fields,\n optim, data_type, model_saver=model_saver)\n\n def train_iter_fct(): return build_dataset_iter(\n lazily_load_dataset(\"train\", opt), fields, opt)\n\n def valid_iter_fct(): return build_dataset_iter(\n lazily_load_dataset(\"valid\", opt), fields, opt, is_train=False)\n\n # Do training.\n trainer.train(train_iter_fct, valid_iter_fct, opt.train_steps,\n opt.valid_steps)\n\n if opt.tensorboard:\n trainer.report_manager.tensorboard_writer.close()\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser(\n description='train.py',\n formatter_class=argparse.ArgumentDefaultsHelpFormatter)\n\n opts.add_md_help_argument(parser)\n opts.model_opts(parser)\n opts.train_opts(parser)\n\n opt = parser.parse_args()\n main(opt)\n"
] |
[
[
"torch.cuda.set_device",
"torch.cuda.manual_seed",
"torch.load",
"torch.manual_seed",
"torch.cuda.is_available"
]
] |
HaoranZ99/RL-2
|
[
"253c2fd8c705f88d9cc79abd9f331dc99b5895eb"
] |
[
"logger.py"
] |
[
"import numpy as np\nimport time, datetime\nimport matplotlib.pyplot as plt\n\nclass Logger():\n def __init__(self, save_dir):\n self.save_log = save_dir / \"log\"\n with open(self.save_log, \"w\") as f:\n f.write(\n f\"{'Episode':>8}{'Step':>8}{'Epsilon':>10}{'MeanReward':>15}\"\n f\"{'MeanLength':>15}{'MeanLoss':>15}{'MeanQValue':>15}\"\n f\"{'TimeDelta':>15}{'Time':>20}\\n\"\n )\n self.ep_rewards_plot = save_dir / \"reward_plot.jpg\"\n self.ep_lengths_plot = save_dir / \"length_plot.jpg\"\n self.ep_avg_losses_plot = save_dir / \"loss_plot.jpg\"\n self.ep_avg_qs_plot = save_dir / \"q_plot.jpg\"\n\n # History metrics\n self.ep_rewards = []\n self.ep_lengths = []\n self.ep_avg_losses = []\n self.ep_avg_qs = []\n\n # Moving averages, added for every call to record()\n self.moving_avg_ep_rewards = []\n self.moving_avg_ep_lengths = []\n self.moving_avg_ep_avg_losses = []\n self.moving_avg_ep_avg_qs = []\n\n # Current episode metric\n self.init_episode()\n\n # Timing\n self.record_time = time.time()\n\n\n def log_step(self, reward, loss, q):\n self.curr_ep_reward += reward\n self.curr_ep_length += 1\n if loss:\n self.curr_ep_loss += loss\n self.curr_ep_q += q\n self.curr_ep_loss_length += 1\n\n def log_episode(self):\n \"Mark end of episode\"\n self.ep_rewards.append(self.curr_ep_reward)\n self.ep_lengths.append(self.curr_ep_length)\n if self.curr_ep_loss_length == 0:\n ep_avg_loss = 0\n ep_avg_q = 0\n else:\n ep_avg_loss = np.round(self.curr_ep_loss / self.curr_ep_loss_length, 5)\n ep_avg_q = np.round(self.curr_ep_q / self.curr_ep_loss_length, 5)\n self.ep_avg_losses.append(ep_avg_loss)\n self.ep_avg_qs.append(ep_avg_q)\n\n self.init_episode()\n\n def init_episode(self):\n self.curr_ep_reward = 0.0\n self.curr_ep_length = 0\n self.curr_ep_loss = 0.0\n self.curr_ep_q = 0.0\n self.curr_ep_loss_length = 0\n\n def record(self, episode, epsilon, step):\n mean_ep_reward = np.round(np.mean(self.ep_rewards[-100:]), 3)\n mean_ep_length = np.round(np.mean(self.ep_lengths[-100:]), 3)\n mean_ep_loss = np.round(np.mean(self.ep_avg_losses[-100:]), 3)\n mean_ep_q = np.round(np.mean(self.ep_avg_qs[-100:]), 3)\n self.moving_avg_ep_rewards.append(mean_ep_reward)\n self.moving_avg_ep_lengths.append(mean_ep_length)\n self.moving_avg_ep_avg_losses.append(mean_ep_loss)\n self.moving_avg_ep_avg_qs.append(mean_ep_q)\n\n\n last_record_time = self.record_time\n self.record_time = time.time()\n time_since_last_record = np.round(self.record_time - last_record_time, 3)\n\n print(\n f\"Episode {episode} - \"\n f\"Step {step} - \"\n f\"Epsilon {epsilon} - \"\n f\"Mean Reward {mean_ep_reward} - \"\n f\"Mean Length {mean_ep_length} - \"\n f\"Mean Loss {mean_ep_loss} - \"\n f\"Mean Q Value {mean_ep_q} - \"\n f\"Time Delta {time_since_last_record} - \"\n f\"Time {datetime.datetime.now().strftime('%Y-%m-%dT%H:%M:%S')}\"\n )\n\n with open(self.save_log, \"a\") as f:\n f.write(\n f\"{episode:8d}{step:8d}{epsilon:10.3f}\"\n f\"{mean_ep_reward:15.3f}{mean_ep_length:15.3f}{mean_ep_loss:15.3f}{mean_ep_q:15.3f}\"\n f\"{time_since_last_record:15.3f}\"\n f\"{datetime.datetime.now().strftime('%Y-%m-%dT%H:%M:%S'):>20}\\n\"\n )\n\n for metric in [\"ep_rewards\", \"ep_lengths\", \"ep_avg_losses\", \"ep_avg_qs\"]:\n plt.plot(getattr(self, f\"moving_avg_{metric}\"))\n plt.savefig(getattr(self, f\"{metric}_plot\"))\n plt.clf()\n \n def replay_log_step(self, _save_dir, _save_log, state, action):\n with open(_save_dir / _save_log, \"a\") as f:\n f.write(\n f\"The agent take action {action}, the state is {state}.\\n\"\n )\n\n def replay_log(self, _save_dir, _save_log, msg):\n with open(_save_dir / _save_log, \"a\") as f:\n f.write(\n f\"{msg}\\n\"\n )\n \n def repaly_brief(self, _save_dir, _save_log, dict, step):\n with open(_save_dir / _save_log, \"a\") as f:\n for key, val in dict.items():\n f.write(f\"{key} : {val / step * 100:10.3f}%.\\n\")\n \n def get_action_meanings(self):\n return {0: \"Eat\", 1: \"Send gift\", 2: \"Idle\", 3: \"Chat\", 4: \"Work\", 5: \"Comments on Moments\", \n 6:\"Like on Moments\", 7: \"Live room\", 8: \"Play games\", 9: \"Disco dancing\", 10: \"Pray\"}"
] |
[
[
"numpy.round",
"matplotlib.pyplot.clf",
"numpy.mean"
]
] |
petrux/LiteFlowX
|
[
"96197bf4b5a87e682c980d303a0e6429cdb34964"
] |
[
"liteflow/tests/test_layers_base.py"
] |
[
"\"\"\"Test the base class of the layers hierarchy.\"\"\"\n\n# Disable pylint warning about too many statements\n# and local variables since we are dealing with tests.\n# pylint: disable=R0914, R0915\n\nimport mock\nimport tensorflow as tf\n\nfrom liteflow import layers, utils\n\n\nclass Layer(layers.Layer):\n \"\"\"Dummy `layers.Layer` implementation.\"\"\"\n\n def __init__(self, scope=None):\n super(Layer, self).__init__(scope=scope)\n\n def _call_helper(self, *args, **kwargs): # pylint: disable=I0011,W0221\n return RuntimeError('This method should be patched!')\n\n\nclass ScopeTracker(object):\n \"\"\"Function class that just tracks the tf.VariableScope.\"\"\"\n\n def __init__(self, fn=None):\n self._scopes = []\n self._fn = fn if fn is not None else lambda *args, **kwargs: None\n\n def scopes(self):\n \"\"\"Returns the list of tracked scopes.\"\"\"\n return self._scopes\n\n def latest(self):\n \"\"\"Returns the latest tracked scope (or None).\"\"\"\n if not self._scopes:\n return None\n return self._scopes[-1]\n\n def __call__(self, *args, **kwargs):\n scope = tf.get_variable_scope()\n self._scopes.append(scope)\n return self._fn(*args, **kwargs)\n\n @classmethod\n def empty(cls):\n \"\"\"Create a ScopeTracker with an empty inner function.\"\"\"\n def _empty(*args, **kwargs):\n _, _ = args, kwargs\n return ScopeTracker(_empty)\n\n @classmethod\n def identity(cls):\n \"\"\"Create a ScopeTracker with an inner function that returns its only argument.\"\"\"\n return ScopeTracker(lambda x: x)\n\n\ndef scopetracker(func):\n \"\"\"Turns your function into a ScopeTracker.\n\n ```python\n # Use this function as a decorator\n\n import tensorflow as tf\n\n @scopetracker\n def fun(x, y, *args, **kwargs):\n return x + y\n\n witf tf.variable_scope('Scope') as scope:\n print(fun(22, 1)) # 23\n print fun.laters().name # 'Scope'\n ```\n \"\"\"\n return ScopeTracker(func)\n\n\nclass LayerTest(tf.test.TestCase):\n \"\"\"Test case for a generic liteflow.layers.Layer implementation.\"\"\"\n\n @mock.patch.object(Layer, '_call_helper')\n def _test_scope(self, scope, _call_helper=None):\n\n _call_helper.side_effect = ScopeTracker.identity()\n\n layer = Layer()\n self.assertIsNotNone(layer.name)\n self.assertRaises(ValueError, lambda: layer.scope)\n _call_helper.assert_not_called()\n\n layer(object(), scope=scope)\n _call_helper.assert_called_once()\n self.assertEqual(1, len(_call_helper.side_effect.scopes()))\n self.assertEqual(layer.scope, _call_helper.side_effect.latest().name)\n\n layer(object(), scope=scope)\n self.assertEqual(2, _call_helper.call_count)\n self.assertEqual(2, len(_call_helper.side_effect.scopes()))\n self.assertEqual(layer.scope, _call_helper.side_effect.latest().name)\n\n layer(object(), scope=None)\n self.assertEqual(3, _call_helper.call_count)\n self.assertEqual(3, len(_call_helper.side_effect.scopes()))\n self.assertEqual(layer.scope, _call_helper.side_effect.latest().name)\n\n layer(object(), scope=layer.scope + '__ANOTHER')\n self.assertEqual(4, _call_helper.call_count)\n self.assertEqual(4, len(_call_helper.side_effect.scopes()))\n self.assertEqual(layer.scope, _call_helper.side_effect.latest().name)\n\n def test_scope(self):\n \"\"\"Test the default behaviour of a layer.\"\"\"\n self._test_scope(None)\n self._test_scope('Scope')\n self._test_scope(utils.as_scope('Scope'))\n\n @mock.patch.object(Layer, '_call_helper')\n def test_reuse_variables(self, _call_helper=None):\n \"\"\"Test that the first invocation of the layer doesn't reuse variables.\"\"\"\n scope_name = 'Scope'\n var_name = 'Variable'\n def _call(*args, **kwargs):\n _, _ = args, kwargs\n _ = tf.get_variable(name=var_name, shape=0)\n _call_helper.side_effect = _call\n\n with tf.variable_scope(scope_name) as scope:\n _call()\n\n with tf.variable_scope(scope_name) as scope:\n self.assertRaises(ValueError, Layer(), scope=scope)\n\n with tf.variable_scope(scope_name) as scope:\n scope.reuse_variables()\n Layer().__call__(scope=scope)\n\n with tf.variable_scope(scope_name + '__ANOTHER') as scope:\n scope.reuse_variables()\n self.assertRaises(ValueError, Layer(), scope=scope)"
] |
[
[
"tensorflow.variable_scope",
"tensorflow.get_variable",
"tensorflow.get_variable_scope"
]
] |
boknilev/fairseq
|
[
"5da0bffba945105ef7e0d4e7e5610e1cf966a459"
] |
[
"train.py"
] |
[
"#!/usr/bin/env python3 -u\n# Copyright (c) Facebook, Inc. and its affiliates.\n#\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\n\"\"\"\nTrain a new model on one or across multiple GPUs.\n\"\"\"\n\nimport collections\nimport math\nimport random\nimport operator\n\nimport numpy as np\nimport torch\n\nfrom fairseq import checkpoint_utils, distributed_utils, options, progress_bar, tasks, utils\nfrom fairseq.data import iterators\nfrom fairseq.trainer import Trainer\nfrom fairseq.meters import AverageMeter, StopwatchMeter\n\n\ndef main(args, init_distributed=False):\n utils.import_user_module(args)\n\n assert args.max_tokens is not None or args.max_sentences is not None, \\\n 'Must specify batch size either with --max-tokens or --max-sentences'\n\n # Initialize CUDA and distributed training\n if torch.cuda.is_available() and not args.cpu:\n torch.cuda.set_device(args.device_id)\n np.random.seed(args.seed)\n torch.manual_seed(args.seed)\n if init_distributed:\n args.distributed_rank = distributed_utils.distributed_init(args)\n\n if distributed_utils.is_master(args):\n checkpoint_utils.verify_checkpoint_directory(args.save_dir)\n\n # Print args\n print(args)\n\n # Setup task, e.g., translation, language modeling, etc.\n task = tasks.setup_task(args)\n\n # Load valid dataset (we load training data below, based on the latest checkpoint)\n for valid_sub_split in args.valid_subset.split(','):\n task.load_dataset(valid_sub_split, combine=False, epoch=0)\n\n # Build model and criterion\n model = task.build_model(args)\n criterion = task.build_criterion(args)\n print(model)\n print('| model {}, criterion {}'.format(args.arch, criterion.__class__.__name__))\n print('| num. model params: {} (num. trained: {})'.format(\n sum(p.numel() for p in model.parameters()),\n sum(p.numel() for p in model.parameters() if p.requires_grad),\n ))\n\n # Build trainer\n trainer = Trainer(args, task, model, criterion)\n print('| training on {} GPUs'.format(args.distributed_world_size))\n print('| max tokens per GPU = {} and max sentences per GPU = {}'.format(\n args.max_tokens,\n args.max_sentences,\n ))\n\n # Load the latest checkpoint if one is available and restore the\n # corresponding train iterator\n extra_state, epoch_itr = checkpoint_utils.load_checkpoint(args, trainer)\n\n # Train until the learning rate gets too small\n max_epoch = args.max_epoch or math.inf\n max_update = args.max_update or math.inf\n lr = trainer.get_lr()\n train_meter = StopwatchMeter()\n train_meter.start()\n valid_subsets = args.valid_subset.split(',')\n early_stop_patience = 0\n best_valid_loss = -math.inf if args.maximize_best_checkpoint_metric else math.inf \n while (\n lr > args.min_lr\n and (epoch_itr.epoch < max_epoch or (epoch_itr.epoch == max_epoch\n and epoch_itr._next_epoch_itr is not None))\n and trainer.get_num_updates() < max_update\n and (args.patience <= 0 or early_stop_patience < args.patience)\n ):\n # train for one epoch\n train(args, trainer, task, epoch_itr)\n\n if not args.disable_validation and epoch_itr.epoch % args.validate_interval == 0:\n valid_losses = validate(args, trainer, task, epoch_itr, valid_subsets)\n else:\n valid_losses = [None]\n\n # only use first validation loss to update the learning rate\n lr = trainer.lr_step(epoch_itr.epoch, valid_losses[0])\n\n # save checkpoint\n if epoch_itr.epoch % args.save_interval == 0:\n checkpoint_utils.save_checkpoint(args, trainer, epoch_itr, valid_losses[0])\n\n reload_dataset = ':' in getattr(args, 'data', '')\n # sharded data: get train iterator for next epoch\n epoch_itr = trainer.get_train_iterator(epoch_itr.epoch, load_dataset=reload_dataset)\n\n # check patience\n current_loss = valid_losses[0]\n #print('current_loss:', current_loss)\n #print('best_valid_loss:', best_valid_loss)\n better_function = operator.gt if args.maximize_best_checkpoint_metric else operator.lt \n if better_function(current_loss, best_valid_loss):\n early_stop_patience = 0\n best_valid_loss = current_loss\n else:\n early_stop_patience += 1\n if args.patience > 0 and early_stop_patience == args.patience:\n print('| reached patience of {0}, will stop'.format(args.patience))\n\n\n train_meter.stop()\n print('| done training in {:.1f} seconds'.format(train_meter.sum))\n\n\ndef train(args, trainer, task, epoch_itr):\n \"\"\"Train the model for one epoch.\"\"\"\n # Update parameters every N batches\n update_freq = args.update_freq[epoch_itr.epoch - 1] \\\n if epoch_itr.epoch <= len(args.update_freq) else args.update_freq[-1]\n\n # Initialize data iterator\n itr = epoch_itr.next_epoch_itr(\n fix_batches_to_gpus=args.fix_batches_to_gpus,\n shuffle=(epoch_itr.epoch >= args.curriculum),\n )\n itr = iterators.GroupedIterator(itr, update_freq)\n progress = progress_bar.build_progress_bar(\n args, itr, epoch_itr.epoch, no_progress_bar='simple',\n )\n\n extra_meters = collections.defaultdict(lambda: AverageMeter())\n valid_subsets = args.valid_subset.split(',')\n max_update = args.max_update or math.inf\n for i, samples in enumerate(progress, start=epoch_itr.iterations_in_epoch):\n log_output = trainer.train_step(samples)\n if log_output is None:\n continue\n\n # log mid-epoch stats\n stats = get_training_stats(trainer)\n for k, v in log_output.items():\n if k in ['loss', 'nll_loss', 'ntokens', 'nsentences', 'sample_size']:\n continue # these are already logged above\n if 'loss' in k or k == 'accuracy':\n extra_meters[k].update(v, log_output['sample_size'])\n else:\n extra_meters[k].update(v)\n stats[k] = extra_meters[k].avg\n progress.log(stats, tag='train', step=stats['num_updates'])\n\n # ignore the first mini-batch in words-per-second and updates-per-second calculation\n if i == 0:\n trainer.get_meter('wps').reset()\n trainer.get_meter('ups').reset()\n\n num_updates = trainer.get_num_updates()\n if (\n not args.disable_validation\n and args.save_interval_updates > 0\n and num_updates % args.save_interval_updates == 0\n and num_updates > 0\n ):\n valid_losses = validate(args, trainer, task, epoch_itr, valid_subsets)\n checkpoint_utils.save_checkpoint(args, trainer, epoch_itr, valid_losses[0])\n\n if num_updates >= max_update:\n break\n\n # log end-of-epoch stats\n stats = get_training_stats(trainer)\n for k, meter in extra_meters.items():\n stats[k] = meter.avg\n progress.print(stats, tag='train', step=stats['num_updates'])\n\n # reset training meters\n for k in [\n 'train_loss', 'train_nll_loss', 'wps', 'ups', 'wpb', 'bsz', 'gnorm', 'clip',\n ]:\n meter = trainer.get_meter(k)\n if meter is not None:\n meter.reset()\n\n\ndef get_training_stats(trainer):\n stats = collections.OrderedDict()\n stats['loss'] = trainer.get_meter('train_loss')\n if trainer.get_meter('train_nll_loss').count > 0:\n nll_loss = trainer.get_meter('train_nll_loss')\n stats['nll_loss'] = nll_loss\n else:\n nll_loss = trainer.get_meter('train_loss')\n stats['ppl'] = utils.get_perplexity(nll_loss.avg)\n stats['wps'] = trainer.get_meter('wps')\n stats['ups'] = trainer.get_meter('ups')\n stats['wpb'] = trainer.get_meter('wpb')\n stats['bsz'] = trainer.get_meter('bsz')\n stats['num_updates'] = trainer.get_num_updates()\n stats['lr'] = trainer.get_lr()\n stats['gnorm'] = trainer.get_meter('gnorm')\n stats['clip'] = trainer.get_meter('clip')\n stats['oom'] = trainer.get_meter('oom')\n if trainer.get_meter('loss_scale') is not None:\n stats['loss_scale'] = trainer.get_meter('loss_scale')\n stats['wall'] = round(trainer.get_meter('wall').elapsed_time)\n stats['train_wall'] = trainer.get_meter('train_wall')\n return stats\n\n\ndef validate(args, trainer, task, epoch_itr, subsets):\n \"\"\"Evaluate the model on the validation set(s) and return the losses.\"\"\"\n\n if args.fixed_validation_seed is not None:\n # set fixed seed for every validation\n utils.set_torch_seed(args.fixed_validation_seed)\n\n valid_losses = []\n for subset in subsets:\n # Initialize data iterator\n itr = task.get_batch_iterator(\n dataset=task.dataset(subset),\n max_tokens=args.max_tokens_valid,\n max_sentences=args.max_sentences_valid,\n max_positions=utils.resolve_max_positions(\n task.max_positions(),\n trainer.get_model().max_positions(),\n ),\n ignore_invalid_inputs=args.skip_invalid_size_inputs_valid_test,\n required_batch_size_multiple=args.required_batch_size_multiple,\n seed=args.seed,\n num_shards=args.distributed_world_size,\n shard_id=args.distributed_rank,\n num_workers=args.num_workers,\n ).next_epoch_itr(shuffle=False)\n progress = progress_bar.build_progress_bar(\n args, itr, epoch_itr.epoch,\n prefix='valid on \\'{}\\' subset'.format(subset),\n no_progress_bar='simple'\n )\n\n # reset validation loss meters\n for k in ['valid_loss', 'valid_nll_loss']:\n meter = trainer.get_meter(k)\n if meter is not None:\n meter.reset()\n extra_meters = collections.defaultdict(lambda: AverageMeter())\n\n for sample in progress:\n log_output = trainer.valid_step(sample)\n\n for k, v in log_output.items():\n if k in ['loss', 'nll_loss', 'ntokens', 'nsentences', 'sample_size']:\n continue\n extra_meters[k].update(v)\n\n # log validation stats\n stats = get_valid_stats(trainer, args, extra_meters)\n for k, meter in extra_meters.items():\n stats[k] = meter.avg\n progress.print(stats, tag=subset, step=trainer.get_num_updates())\n\n valid_losses.append(\n stats[args.best_checkpoint_metric].avg\n if args.best_checkpoint_metric == 'loss'\n else stats[args.best_checkpoint_metric]\n )\n return valid_losses\n\n\ndef get_valid_stats(trainer, args, extra_meters=None):\n stats = collections.OrderedDict()\n stats['loss'] = trainer.get_meter('valid_loss')\n if trainer.get_meter('valid_nll_loss').count > 0:\n nll_loss = trainer.get_meter('valid_nll_loss')\n stats['nll_loss'] = nll_loss\n else:\n nll_loss = stats['loss']\n stats['ppl'] = utils.get_perplexity(nll_loss.avg)\n stats['num_updates'] = trainer.get_num_updates()\n if hasattr(checkpoint_utils.save_checkpoint, 'best'):\n key = 'best_{0}'.format(args.best_checkpoint_metric)\n best_function = max if args.maximize_best_checkpoint_metric else min\n\n current_metric = None\n if args.best_checkpoint_metric == 'loss':\n current_metric = stats['loss'].avg\n elif args.best_checkpoint_metric in extra_meters:\n current_metric = extra_meters[args.best_checkpoint_metric].avg\n elif args.best_checkpoint_metric in stats:\n current_metric = stats[args.best_checkpoint_metric]\n else:\n raise ValueError(\"best_checkpoint_metric not found in logs\")\n\n stats[key] = best_function(\n checkpoint_utils.save_checkpoint.best,\n current_metric,\n )\n return stats\n\n\ndef distributed_main(i, args, start_rank=0):\n args.device_id = i\n if args.distributed_rank is None: # torch.multiprocessing.spawn\n args.distributed_rank = start_rank + i\n main(args, init_distributed=True)\n\n\ndef cli_main():\n parser = options.get_training_parser()\n args = options.parse_args_and_arch(parser)\n\n if args.distributed_init_method is None:\n distributed_utils.infer_init_method(args)\n\n if args.distributed_init_method is not None:\n # distributed training\n if torch.cuda.device_count() > 1 and not args.distributed_no_spawn:\n start_rank = args.distributed_rank\n args.distributed_rank = None # assign automatically\n torch.multiprocessing.spawn(\n fn=distributed_main,\n args=(args, start_rank),\n nprocs=torch.cuda.device_count(),\n )\n else:\n distributed_main(args.device_id, args)\n elif args.distributed_world_size > 1:\n # fallback for single node with multiple GPUs\n assert args.distributed_world_size <= torch.cuda.device_count()\n port = random.randint(10000, 20000)\n args.distributed_init_method = 'tcp://localhost:{port}'.format(port=port)\n args.distributed_rank = None # set based on device id\n if max(args.update_freq) > 1 and args.ddp_backend != 'no_c10d':\n print('| NOTE: you may get better performance with: --ddp-backend=no_c10d')\n torch.multiprocessing.spawn(\n fn=distributed_main,\n args=(args, ),\n nprocs=args.distributed_world_size,\n )\n else:\n # single GPU training\n main(args)\n\n\nif __name__ == '__main__':\n cli_main()\n"
] |
[
[
"torch.cuda.set_device",
"torch.multiprocessing.spawn",
"numpy.random.seed",
"torch.manual_seed",
"torch.cuda.is_available",
"torch.cuda.device_count"
]
] |
LSSTDESC/sims_TruthCatalog
|
[
"348f5d231997eed387aaa6e3fd4218c126e14cdb",
"348f5d231997eed387aaa6e3fd4218c126e14cdb"
] |
[
"bin.src/write_star_variability_stats.py",
"scripts/Run3.1i/write_lensed_agn_truth.py"
] |
[
"#!/usr/bin/env python\n\"\"\"\nScript to generate stellar_variability table using multiprocessing\non a single 32 core Cori-Haswell node.\n\"\"\"\nimport numpy as np\nimport multiprocessing\nfrom desc.sims_truthcatalog import write_star_variability_stats\n\n\nstars_db_file = ('/global/projecta/projectdirs/lsst/'\n 'groups/SSim/DC2/dc2_stellar_healpixel.db')\nrow_offset = 0\nprocesses = 32\nnum_rows = 20269973 # total number of rows in stars_db_file\nchunk_size = 10000\nrow_bounds = list(range(0, num_rows, num_rows//processes))\nrow_bounds.append(num_rows)\nrow_bounds = np.array(row_bounds) + row_offset\nworkers = []\nwith multiprocessing.Pool(processes=processes) as pool:\n for row_min, row_max in zip(row_bounds[:-1], row_bounds[1:]):\n outfile = f'star_lc_stats_{row_min:08d}_{row_max:08d}.db'\n workers.append(pool.apply_async(write_star_variability_stats,\n (stars_db_file, outfile, row_min,\n row_max), dict(chunk_size=chunk_size)))\n pool.close()\n pool.join()\n [_.get() for _ in workers]\n",
"import os\nimport multiprocessing\nimport numpy as np\nimport desc.sims_truthcatalog as stc\n\ndc2_info = '/global/cfs/cdirs/descssim/DC2'\nlensed_agn_truth_cat = os.path.join(dc2_info, 'Run3.0i', 'truth_tables',\n 'updated_lensed_agn_truth.db')\nopsim_db_file = os.path.join(dc2_info, 'minion_1016_desc_dithered_v4_trimmed.db')\nverbose = True\n\noutfile = 'lensed_agn_truth_cat.db'\nstc.write_lensed_agn_truth_summary(lensed_agn_truth_cat, outfile, verbose=verbose)\n\nnum_agn = 2437\nprocesses = 32\n#num_agn = 21\n#processes = 3\n\nobject_ranges = [int(_) for _ in np.linspace(0, num_agn, processes + 1)]\nwith multiprocessing.Pool(processes=processes) as pool:\n workers = []\n for xmin, xmax in zip(object_ranges[:-1], object_ranges[1:]):\n outfile = f'lensed_agn_variability_truth_{xmin:04d}_{xmax:04d}.db'\n func = stc.write_lensed_agn_variability_truth\n args = (opsim_db_file, lensed_agn_truth_cat, outfile)\n kwds = dict(verbose=verbose, object_range=(xmin, xmax))\n workers.append(pool.apply_async(func, args, kwds))\n pool.close()\n pool.join()\n _ = [worker.get() for worker in workers]\n"
] |
[
[
"numpy.array"
],
[
"numpy.linspace"
]
] |
Joshua-Elms/CSCI-B365
|
[
"f28dda6da3098ec4b9472ee546c3e6798d358ce8"
] |
[
"Meteorology_Modeling_Project/preprocessing/brute_force.py"
] |
[
"import pandas as pd\nfrom itertools import combinations, chain\nfrom sklearn.linear_model import LinearRegression\nfrom sklearn.impute import KNNImputer\nfrom numpy import corrcoef\nfrom random import shuffle\n\npath = \"/Users/joshuaelms/Desktop/github_repos/CSCI-B365/Meteorology_Modeling_Project/data/pretty_data.csv\"\ndf = pd.read_csv(path)\n\ndef power_set(iterable):\n pset = chain.from_iterable(combinations(iterable, r) for r in range(len(iterable)+1))\n return list(list(combo) for combo in pset if len(combo) > 0)\n\nindices = [i for i in range(21, 37)] # + [i for i in range(41, 50)]\n\nfield_pset = power_set([df.columns[i] for i in indices])\n# shuffle(field_pset)\n\nprint(field_pset)\n\ndf_knn = KNNImputer().fit_transform(df)\ndf_knn_actual = pd.DataFrame(df_knn)\ndf_knn_actual.columns = df.columns\n\nwith open(\"/Users/joshuaelms/Desktop/github_repos/CSCI-B365/Meteorology_Modeling_Project/data/log_file.txt\", \"w\") as f:\n max = 0\n for combination in field_pset:\n arr = df_knn_actual[combination].to_numpy()\n target = df_knn_actual[\"Hailstone Size\"].to_numpy()\n obj = LinearRegression().fit(X=arr, y=target)\n coefficients = obj.coef_\n linear_combination = (df_knn_actual[combination]*coefficients).sum(axis=1)\n correlation = corrcoef(linear_combination, target)\n\n corr = correlation[0][1]\n\n if corr > max:\n max = corr\n variable_str = f'{\", \".join(combination[:-1])}, and {combination[-1]}' if len(combination) > 2 else f'{combination[0]} and {combination[-1]}' if len(combination) == 2 else combination[0]\n f.write(f\"Variable{'s' if len(combination) > 1 else ''} {variable_str} yield{'' if len(combination) > 1 else 's'} a correlation of {corr}\\n\")"
] |
[
[
"pandas.read_csv",
"pandas.DataFrame",
"sklearn.linear_model.LinearRegression",
"sklearn.impute.KNNImputer",
"numpy.corrcoef"
]
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.