repo_name
stringlengths 6
130
| hexsha
list | file_path
list | code
list | apis
list |
---|---|---|---|---|
gbravoi/monte-carlo-tree-search | [
"578df8df925e5f569e7354daff6642e1781389b6"
] | [
"checkers/utils.py"
] | [
"\"\"\"\nMartin Kersner, [email protected]\nseoulai.com\n2018\n\nAdapted by Gabriela B. to work with python 2.7 and ROS\n\"\"\"\nimport random\n\n\nimport numpy as np\n\nfrom base import Constants\nfrom rules import Rules\n\n\nclass BoardEncoding(object):\n def __init__(self):\n self._constants = Constants()\n self._encoding = {}\n\n self.empty = 0\n self.dark = 20\n self.dark_king = 21\n self.light = 10\n self.light_king = 11\n\n def __getitem__(self, name):\n return self._encoding[name]\n\n @property\n def empty(self):\n return self._encoding[self._constants.EMPTY]\n\n @empty.setter\n def empty(self, value):\n self._encoding[self._constants.EMPTY] = value\n\n @property\n def dark(self):\n return self._encoding[self._constants.DARK]\n\n @dark.setter\n def dark(self, value):\n self._encoding[self._constants.DARK] = value\n\n @property\n def dark_king(self):\n return self._encoding[self._constants.DARK_KING]\n\n @dark_king.setter\n def dark_king(self, value):\n self._encoding[self._constants.DARK_KING] = value\n\n @property\n def light(self):\n return self._encoding[self._constants.LIGHT]\n\n @light.setter\n def light(self, value):\n self._encoding[self._constants.LIGHT] = value\n\n @property\n def light_king(self):\n return self._encoding[self._constants.LIGHT_KING]\n\n @light_king.setter\n def light_king(self, value):\n self._encoding[self._constants.LIGHT_KING] = value\n\n\ndef board_list2numpy(\n board_list,\n encoding) :\n \"\"\"Convert the state of game (`board_list`) into 2D NumPy Array using `encoding`.\n\n Args:\n board_list: (List[List[Piece]]) State of the game.\n encoding: (BoardEncoding) Optional argument. If not given default encoding will be utilized.\n\n Returns:\n board_numpy: (np.array)\n \"\"\"\n board_size = len(board_list)\n constants = Constants()\n board_numpy = encoding[constants.EMPTY] * np.ones((board_size, board_size))\n\n for row in range(board_size):\n for col in range(board_size):\n if board_list[row][col] is not None:\n ptype = board_list[row][col].ptype\n king = board_list[row][col].king\n\n if ptype == constants.LIGHT:\n if king:\n piece_type = constants.LIGHT_KING\n else:\n piece_type = constants.LIGHT\n else: # DARK\n if king:\n piece_type = constants.DARK_KING\n else:\n piece_type = constants.DARK\n\n board_numpy[row][col] = encoding[piece_type]\n\n return board_numpy\n\n\ndef generate_random_move(\n board,\n ptype,\n board_size):\n \"\"\"Generate random move from all `ptype` valid moves but does not execute it.\n\n Args:\n board: (List[List[Piece]]) State of the game.\n ptype: (int) type of piece for which random move will be generated\n board_size: (int) size of board\n \"\"\"\n valid_moves = Rules.generate_valid_moves(board, ptype, board_size)\n rand_from_row, rand_from_col = random.choice(list(valid_moves.keys()))\n rand_to_row, rand_to_col = random.choice(valid_moves[(rand_from_row, rand_from_col)])\n return rand_from_row, rand_from_col, rand_to_row, rand_to_col\n\n\n#new functions\ndef print_board(board_list):\n\t\"\"\" \n\tprint board for debugging putposes\n receives board as a board_list: List[List],\n\t\"\"\"\n\tnumpy_board=board_list2numpy(board_list)\n\tprint(numpy_board)\n"
] | [
[
"numpy.ones"
]
] |
darrenluc93/web-scraping-challenge | [
"50a9a21161ab0920038c8e0d6a9390bb8e35c5f5"
] | [
"scrape_mars.py"
] | [
"#Import Libraries\n#Web Scraping tools \nfrom bs4 import BeautifulSoup as bs\nfrom selenium import webdriver\n#from splinter import Browser\n\n#DataFrame tools\nimport pandas as pd\n\n#Misc tools for web scraping\nimport time\nimport requests\n\n#Function to initianilze browser.\ndef init_browser():\n\n #Settings for headless mode.\n options = webdriver.ChromeOptions()\n options.add_argument('headless')\n\n #path to the driver and load the options.\n browser = webdriver.Chrome(\"/usr/local/bin/chromedriver\",chrome_options = options)\n\n #returns the brower.\n return browser\n\ndef scrapper():\n\n #Call browser function\n browser = init_browser()\n #Dictionary to store all the results.\n marsInfo_dict = {}\n\n #Code to get NASA Mars News ----------------------------------------------------------------------------------------------\n try:\n\n url = \"https://mars.nasa.gov/news/?page=0&per_page=40&order=publish_date+desc%2Ccreated_at+desc&search=&year=2020%3Apublish_date&category=19%2C165%2C184%2C204&blank_scope=Latest\"\n\n #splinter option - open url\n #browser.visit(url)\n\n #Open url.\n browser.get(url)\n\n #Time to let the website load all the elements\n time.sleep(4) \n\n #splinter option - save HTML \n #html = browser.html \n\n #save the html source.\n html = browser.page_source\n\n #Use bs4 to parse the html response.\n soup = bs(html, \"html.parser\")\n\n #Collect the latest news title\n news_title = soup.find_all('li', class_=\"slide\")[0].find(class_=\"content_title\").text\n news_p = soup.find_all('li', class_=\"slide\")[0].text\n\n marsInfo_dict['news_title'] = news_title\n marsInfo_dict['news_p'] = news_p\n \n except :\n print(f\"Problem at website {url}\")\n\n #Code to get JPL Mars Space Images - Featured Image ---------------------------------------------------------------------------------\n try:\n\n url = \"https://www.jpl.nasa.gov/spaceimages/?search=&category=Mars\"\n\n #splinter option - open url\n #browser.visit(url)\n\n #Opens the url.\n browser.get(url)\n\n #splinter option - FULL IMAGE BUTTON\n #browser.click_link_by_id(\"full_image\")\n\n #Interact with the FULL IMAGE BUTTON\n browser.find_element_by_id(\"full_image\").click()\n\n time.sleep(4)\n\n #splinter option - save HTML \n #html = browser.html \n\n #save the html source.\n html = browser.page_source\n\n #Use bs4 to parse the html response.\n soup = bs(html, \"html.parser\")\n\n featured_image_url = \"https://www.jpl.nasa.gov/\" + soup.find_all('img', class_=\"fancybox-image\")[0]['src']\n\n marsInfo_dict['featured_image_url'] = featured_image_url\n \n except :\n print(f\"Problem at website {url}\")\n \n #Mars Weather ------------------------------------------------------------------------------------------------------------------------\n try:\n url = \"https://twitter.com/marswxreport?lang=en\"\n \n #splinter option - open url\n #browser.visit(url)\n\n #Open the url.\n browser.get(url)\n\n #Time to let the website load all the elements\n time.sleep(4)\n\n #splinter option - save HTML \n #html = browser.html \n\n #save the html source.\n html = browser.page_source\n\n #Use bs4 to parse the html response.\n soup = bs(html, \"html.parser\")\n\n mars_weather = soup.find_all('article', class_=\"css-1dbjc4n r-1loqt21 r-18u37iz r-1ny4l3l r-o7ynqc r-6416eg\")[0].text.strip().replace('Mars Weather@MarsWxReport·19hInSight ','')\n\n marsInfo_dict['mars_weather'] = mars_weather\n \n except :\n print(mars_weather)\n print(f\"Problem at website {url}\")\n\n # Mars Facts--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------\n try:\n url = 'http://space-facts.com/mars/'\n\n #Load url to pandas read html.\n tables = pd.read_html(url)\n\n #Tables\n marsFacts_df = tables[0]\n earthMars_df = tables[1]\n\n #Rename columns\n marsFacts_df.columns = ['Facts', 'Values']\n\n\n #Outpout\n html_outputFacts = marsFacts_df.to_html(index = False)\n html_outputFacts = html_outputFacts.replace('\\n', '')\n\n html_outputMarsEarth = earthMars_df.to_html(index = False)\n html_outputMarsEarth = html_outputMarsEarth.replace('\\n', '')\n\n marsInfo_dict['html_outputFacts'] = html_outputFacts\n marsInfo_dict['html_outputMarsEarth'] = html_outputMarsEarth\n\n except :\n print(f\"Problem at website {url}\")\n\n #hemisphereImages ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------\n\n try:\n temp_list = []\n\n url = \"https://astrogeology.usgs.gov/search/results?q=hemisphere+enhanced&k1=target&v1=Mars\"\n\n #splinter option - open url\n #browser.visit(url)\n\n #Opens the url.\n browser.get(url)\n\n time.sleep(4)\n\n #splinter option - save HTML \n #html = browser.html \n\n #save the html source.\n html = browser.page_source\n\n # close web browser\n browser.close()\n\n #Use bs4 to parse the html response.\n soup = bs(html, \"html.parser\")\n\n links = soup.find_all('div', class_=\"description\")\n\n for link in links:\n\n highDef_url = f\"https://astrogeology.usgs.gov{link.find('a')['href']}\"\n\n responseHighDef = requests.get(highDef_url)\n\n soupHighDef = bs(responseHighDef.text, 'html.parser')\n\n highDef_url = soupHighDef.find_all(\"div\", class_=\"downloads\")[0].find('a')['href']\n\n title = link.find('h3').text \n\n temp_list.append({\"title\" : title, \"img_url\" : highDef_url})\n\n marsInfo_dict['hemisphere_image_urls'] = temp_list\n\n except :\n print(f\"Problem at website {url}\")\n\n return marsInfo_dict"
] | [
[
"pandas.read_html"
]
] |
Sethan/deeplearning-graphics | [
"ce164847a323d3f07cfe241f4bbed6029777c58d"
] | [
"ssd/modeling/backbone/basic.py"
] | [
"import torch\n\n\nclass BasicModel(torch.nn.Module):\n \"\"\"\n This is a basic backbone for SSD.\n The feature extractor outputs a list of 6 feature maps, with the sizes:\n [shape(-1, output_channels[0], 38, 38),\n shape(-1, output_channels[1], 19, 19),\n shape(-1, output_channels[2], 10, 10),\n shape(-1, output_channels[3], 5, 5),\n shape(-1, output_channels[3], 3, 3),\n shape(-1, output_channels[4], 1, 1)]\n where \"output_channels\" is the same as cfg.BACKBONE.OUT_CHANNELS\n \"\"\"\n def __init__(self, cfg):\n super().__init__()\n image_size = cfg.INPUT.IMAGE_SIZE\n output_channels = cfg.MODEL.BACKBONE.OUT_CHANNELS\n self.output_channels = output_channels\n image_channels = cfg.MODEL.BACKBONE.INPUT_CHANNELS\n self.output_feature_size = cfg.MODEL.PRIORS.FEATURE_MAPS\n self.num_filters = [32,64]\n \n \n self.feature_extractor38 = torch.nn.Sequential(\n #part 1 38x38\n torch.nn.Conv2d(\n in_channels=image_channels,\n out_channels=self.num_filters[0],\n kernel_size=3,\n stride=1,\n padding=1\n ),\n torch.nn.BatchNorm2d(self.num_filters[0]),\n torch.nn.MaxPool2d(2, stride=2),\n torch.nn.ELU(),\n torch.nn.Dropout2d(0.05),\n torch.nn.Conv2d(\n in_channels=self.num_filters[0],\n out_channels=self.num_filters[1],\n kernel_size=3,\n stride=1,\n padding=1\n ),\n \n torch.nn.BatchNorm2d(self.num_filters[1]),\n torch.nn.ELU(),\n torch.nn.Dropout2d(0.06),\n torch.nn.Conv2d(\n in_channels=self.num_filters[1],\n out_channels=self.num_filters[1],\n kernel_size=3,\n stride=1,\n padding=1\n ),\n torch.nn.BatchNorm2d(self.num_filters[1]),\n torch.nn.ELU(),\n torch.nn.Dropout2d(0.07),\n torch.nn.Conv2d(\n in_channels=self.num_filters[1],\n out_channels=self.num_filters[1],\n kernel_size=3,\n stride=1,\n padding=1\n ),\n torch.nn.BatchNorm2d(self.num_filters[1]),\n torch.nn.ELU(),\n torch.nn.Dropout2d(0.08),\n torch.nn.Conv2d(\n in_channels=self.num_filters[1],\n out_channels=self.num_filters[1],\n kernel_size=3,\n stride=1,\n padding=1\n ),\n torch.nn.BatchNorm2d(self.num_filters[1]),\n torch.nn.ELU(),\n torch.nn.Dropout2d(0.09),\n torch.nn.Conv2d(\n in_channels=self.num_filters[1],\n out_channels=self.num_filters[1],\n kernel_size=3,\n stride=1,\n padding=1\n ),\n torch.nn.BatchNorm2d(self.num_filters[1]),\n torch.nn.ELU(),\n torch.nn.Dropout2d(0.01),\n torch.nn.Conv2d(\n in_channels=self.num_filters[1],\n out_channels=self.num_filters[1],\n kernel_size=3,\n stride=1,\n padding=1\n ),\n torch.nn.BatchNorm2d(self.num_filters[1]),\n torch.nn.MaxPool2d(2, stride=2),\n torch.nn.ELU(),\n torch.nn.Dropout2d(0.11),\n torch.nn.Conv2d(\n in_channels=self.num_filters[1],\n out_channels=self.num_filters[1],\n kernel_size=3,\n stride=1,\n padding=1\n ),\n torch.nn.BatchNorm2d(self.num_filters[1]),\n torch.nn.ELU(),\n torch.nn.Dropout2d(0.12),\n torch.nn.Conv2d(\n in_channels=self.num_filters[1],\n out_channels=self.num_filters[1],\n kernel_size=3,\n stride=1,\n padding=1\n ),\n torch.nn.BatchNorm2d(self.num_filters[1]),\n torch.nn.ELU(),\n torch.nn.Dropout2d(0.13),\n torch.nn.Conv2d(\n in_channels=self.num_filters[1],\n out_channels=self.num_filters[1],\n kernel_size=3,\n stride=1,\n padding=1\n ),\n torch.nn.BatchNorm2d(self.num_filters[1]),\n torch.nn.ELU(),\n torch.nn.Dropout2d(0.14),\n torch.nn.Conv2d(\n in_channels=self.num_filters[1],\n out_channels=self.output_channels[0],\n kernel_size=3,\n stride=2,\n padding=1\n )\n )\n \n self.feature_extractor19 = torch.nn.Sequential(\n \n #part 2 19x19\n torch.nn.BatchNorm2d(self.output_channels[0]),\n torch.nn.ELU(),\n torch.nn.Dropout2d(0.15),\n torch.nn.Conv2d(\n in_channels=self.output_channels[0],\n out_channels=self.output_channels[0],\n kernel_size=3,\n stride=1,\n padding=1\n ),\n torch.nn.BatchNorm2d(self.output_channels[0]),\n torch.nn.ELU(),\n torch.nn.Dropout2d(0.16),\n torch.nn.Conv2d(\n in_channels=self.output_channels[0],\n out_channels=self.output_channels[0],\n kernel_size=3,\n stride=1,\n padding=1\n ),\n torch.nn.BatchNorm2d(self.output_channels[0]),\n torch.nn.ELU(),\n torch.nn.Dropout2d(0.17),\n torch.nn.Conv2d(\n in_channels=self.output_channels[0],\n out_channels=self.output_channels[0],\n kernel_size=3,\n stride=1,\n padding=1\n ),\n torch.nn.BatchNorm2d(self.output_channels[0]),\n torch.nn.ELU(),\n torch.nn.Dropout2d(0.18),\n torch.nn.Conv2d(\n in_channels=self.output_channels[0],\n out_channels=self.output_channels[0],\n kernel_size=3,\n stride=1,\n padding=1\n ),\n torch.nn.BatchNorm2d(self.output_channels[0]),\n torch.nn.ELU(),\n torch.nn.Dropout2d(0.19),\n torch.nn.Conv2d(\n in_channels=self.output_channels[0],\n out_channels=self.output_channels[0],\n kernel_size=3,\n stride=1,\n padding=1\n ),\n torch.nn.BatchNorm2d(self.output_channels[0]),\n torch.nn.ELU(),\n torch.nn.Dropout2d(0.2),\n torch.nn.Conv2d(\n in_channels=self.output_channels[0],\n out_channels=self.output_channels[0],\n kernel_size=3,\n stride=1,\n padding=1\n ),\n torch.nn.BatchNorm2d(self.output_channels[0]),\n torch.nn.ELU(),\n torch.nn.Dropout2d(0.21),\n torch.nn.Conv2d(\n in_channels=self.output_channels[0],\n out_channels=self.output_channels[0],\n kernel_size=3,\n stride=1,\n padding=1\n ),\n torch.nn.BatchNorm2d(self.output_channels[0]),\n torch.nn.ELU(),\n torch.nn.Dropout2d(0.22),\n torch.nn.Conv2d(\n in_channels=self.output_channels[0],\n out_channels=self.output_channels[1],\n kernel_size=3,\n stride=2,\n padding=1\n ))\n \n self.feature_extractor9 = torch.nn.Sequential(\n \n #part 3 10x10\n torch.nn.BatchNorm2d(self.output_channels[1]),\n torch.nn.ELU(),\n torch.nn.Dropout2d(0.23),\n torch.nn.Conv2d(\n in_channels=self.output_channels[1],\n out_channels=self.output_channels[1],\n kernel_size=3,\n stride=1,\n padding=1\n ),\n torch.nn.BatchNorm2d(self.output_channels[1]),\n torch.nn.ELU(),\n torch.nn.Dropout2d(0.24),\n torch.nn.Conv2d(\n in_channels=self.output_channels[1],\n out_channels=self.output_channels[1],\n kernel_size=3,\n stride=1,\n padding=1\n ),\n torch.nn.BatchNorm2d(self.output_channels[1]),\n torch.nn.ELU(),\n torch.nn.Dropout2d(0.25),\n torch.nn.Conv2d(\n in_channels=self.output_channels[1],\n out_channels=self.output_channels[1],\n kernel_size=3,\n stride=1,\n padding=1\n ),\n torch.nn.BatchNorm2d(self.output_channels[1]),\n torch.nn.ELU(),\n torch.nn.Dropout2d(0.26),\n torch.nn.Conv2d(\n in_channels=self.output_channels[1],\n out_channels=self.output_channels[1],\n kernel_size=3,\n stride=1,\n padding=1\n ),\n torch.nn.BatchNorm2d(self.output_channels[1]),\n torch.nn.ELU(),\n torch.nn.Dropout2d(0.27),\n torch.nn.Conv2d(\n in_channels=self.output_channels[1],\n out_channels=self.output_channels[1],\n kernel_size=3,\n stride=1,\n padding=1\n ),\n torch.nn.BatchNorm2d(self.output_channels[1]),\n torch.nn.ELU(),\n torch.nn.Dropout2d(0.28),\n torch.nn.Conv2d(\n in_channels=self.output_channels[1],\n out_channels=self.output_channels[1],\n kernel_size=3,\n stride=1,\n padding=1\n ),\n torch.nn.BatchNorm2d(self.output_channels[1]),\n torch.nn.ELU(),\n torch.nn.Dropout2d(0.29),\n torch.nn.Conv2d(\n in_channels=self.output_channels[1],\n out_channels=self.output_channels[1],\n kernel_size=3,\n stride=1,\n padding=1\n ),\n torch.nn.BatchNorm2d(self.output_channels[1]),\n torch.nn.ELU(),\n torch.nn.Dropout2d(0.30),\n torch.nn.Conv2d(\n in_channels=self.output_channels[1],\n out_channels=self.output_channels[2],\n kernel_size=3,\n stride=2,\n padding=1\n ))\n \n self.feature_extractor5 = torch.nn.Sequential(\n #part 4 5x5\n torch.nn.BatchNorm2d(self.output_channels[2]),\n torch.nn.ELU(),\n torch.nn.Dropout2d(0.31),\n torch.nn.Conv2d(\n in_channels=self.output_channels[2],\n out_channels=self.output_channels[2],\n kernel_size=3,\n stride=1,\n padding=1\n ),\n torch.nn.BatchNorm2d(self.output_channels[2]),\n torch.nn.ELU(),\n torch.nn.Dropout2d(0.32),\n torch.nn.Conv2d(\n in_channels=self.output_channels[2],\n out_channels=self.output_channels[2],\n kernel_size=3,\n stride=1,\n padding=1\n ),\n torch.nn.BatchNorm2d(self.output_channels[2]),\n torch.nn.ELU(),\n torch.nn.Dropout2d(0.33),\n torch.nn.Conv2d(\n in_channels=self.output_channels[2],\n out_channels=self.output_channels[2],\n kernel_size=3,\n stride=1,\n padding=1\n ),\n torch.nn.BatchNorm2d(self.output_channels[2]),\n torch.nn.ELU(),\n torch.nn.Dropout2d(0.34),\n torch.nn.Conv2d(\n in_channels=self.output_channels[2],\n out_channels=self.output_channels[2],\n kernel_size=3,\n stride=1,\n padding=1\n ),\n torch.nn.BatchNorm2d(self.output_channels[2]),\n torch.nn.ELU(),\n torch.nn.Dropout2d(0.35),\n torch.nn.Conv2d(\n in_channels=self.output_channels[2],\n out_channels=self.output_channels[2],\n kernel_size=3,\n stride=1,\n padding=1\n ),\n torch.nn.BatchNorm2d(self.output_channels[2]),\n torch.nn.ELU(),\n torch.nn.Dropout2d(0.36),\n torch.nn.Conv2d(\n in_channels=self.output_channels[2],\n out_channels=self.output_channels[2],\n kernel_size=3,\n stride=1,\n padding=1\n ),\n torch.nn.BatchNorm2d(self.output_channels[2]),\n torch.nn.ELU(),\n torch.nn.Dropout2d(0.37),\n torch.nn.Conv2d(\n in_channels=self.output_channels[2],\n out_channels=self.output_channels[2],\n kernel_size=3,\n stride=1,\n padding=1\n ),\n torch.nn.BatchNorm2d(self.output_channels[2]),\n torch.nn.ELU(),\n torch.nn.Dropout2d(0.38),\n torch.nn.Conv2d(\n in_channels=self.output_channels[2],\n out_channels=self.output_channels[3],\n kernel_size=3,\n stride=2,\n padding=1\n ))\n \n self.feature_extractor3 = torch.nn.Sequential(\n \n #part 5 3x3\n torch.nn.BatchNorm2d(self.output_channels[3]),\n torch.nn.ELU(),\n torch.nn.Dropout2d(0.39),\n torch.nn.Conv2d(\n in_channels=self.output_channels[3],\n out_channels=self.output_channels[3],\n kernel_size=3,\n stride=1,\n padding=1\n ),\n torch.nn.BatchNorm2d(self.output_channels[3]),\n torch.nn.ELU(),\n torch.nn.Dropout2d(0.40),\n torch.nn.Conv2d(\n in_channels=self.output_channels[3],\n out_channels=self.output_channels[3],\n kernel_size=3,\n stride=1,\n padding=1\n ),\n torch.nn.ELU(),\n torch.nn.Dropout2d(0.41),\n torch.nn.Conv2d(\n in_channels=self.output_channels[3],\n out_channels=self.output_channels[3],\n kernel_size=3,\n stride=1,\n padding=1\n ),\n torch.nn.BatchNorm2d(self.output_channels[3]),\n torch.nn.ELU(),\n torch.nn.Dropout2d(0.42),\n torch.nn.Conv2d(\n in_channels=self.output_channels[3],\n out_channels=self.output_channels[3],\n kernel_size=3,\n stride=1,\n padding=1\n ),\n torch.nn.BatchNorm2d(self.output_channels[3]),\n torch.nn.ELU(),\n torch.nn.Dropout2d(0.43),\n torch.nn.Conv2d(\n in_channels=self.output_channels[3],\n out_channels=self.output_channels[3],\n kernel_size=3,\n stride=1,\n padding=1\n ),\n torch.nn.BatchNorm2d(self.output_channels[3]),\n torch.nn.ELU(),\n torch.nn.Dropout2d(0.44),\n torch.nn.Conv2d(\n in_channels=self.output_channels[3],\n out_channels=self.output_channels[3],\n kernel_size=3,\n stride=1,\n padding=1\n ),\n torch.nn.BatchNorm2d(self.output_channels[3]),\n torch.nn.ELU(),\n torch.nn.Dropout2d(0.45),\n torch.nn.Conv2d(\n in_channels=self.output_channels[3],\n out_channels=self.output_channels[3],\n kernel_size=3,\n stride=1,\n padding=1\n ),\n torch.nn.BatchNorm2d(self.output_channels[3]),\n torch.nn.ELU(),\n torch.nn.Dropout2d(0.46),\n torch.nn.Conv2d(\n in_channels=self.output_channels[3],\n out_channels=self.output_channels[4],\n kernel_size=3,\n stride=2,\n padding=1\n ))\n \n self.feature_extractor1 = torch.nn.Sequential(\n \n #part 6 1x1\n torch.nn.BatchNorm2d(self.output_channels[4]),\n torch.nn.ELU(),\n torch.nn.Dropout2d(0.48),\n torch.nn.Conv2d(\n in_channels=self.output_channels[4],\n out_channels=self.output_channels[4],\n kernel_size=3,\n stride=1,\n padding=1\n ),\n torch.nn.BatchNorm2d(self.output_channels[4]),\n torch.nn.ELU(),\n torch.nn.Dropout2d(0.49),\n torch.nn.Conv2d(\n in_channels=self.output_channels[4],\n out_channels=self.output_channels[4],\n kernel_size=3,\n stride=1,\n padding=1\n ),\n torch.nn.BatchNorm2d(self.output_channels[4]),\n torch.nn.ELU(),\n torch.nn.Dropout2d(0.50),\n torch.nn.Conv2d(\n in_channels=self.output_channels[4],\n out_channels=self.output_channels[4],\n kernel_size=3,\n stride=1,\n padding=1\n ),\n torch.nn.BatchNorm2d(self.output_channels[4]),\n torch.nn.ELU(),\n torch.nn.Dropout2d(0.51),\n torch.nn.Conv2d(\n in_channels=self.output_channels[4],\n out_channels=self.output_channels[4],\n kernel_size=3,\n stride=1,\n padding=1\n ),\n torch.nn.BatchNorm2d(self.output_channels[4]),\n torch.nn.ELU(),\n torch.nn.Dropout2d(0.52),\n torch.nn.Conv2d(\n in_channels=self.output_channels[4],\n out_channels=self.output_channels[4],\n kernel_size=3,\n stride=1,\n padding=1\n ),\n torch.nn.BatchNorm2d(self.output_channels[4]),\n torch.nn.ELU(),\n torch.nn.Dropout2d(0.53),\n torch.nn.Conv2d(\n in_channels=self.output_channels[4],\n out_channels=self.output_channels[4],\n kernel_size=3,\n stride=1,\n padding=1\n ),\n torch.nn.BatchNorm2d(self.output_channels[4]),\n torch.nn.ELU(),\n torch.nn.Dropout2d(0.54),\n torch.nn.Conv2d(\n in_channels=self.output_channels[4],\n out_channels=self.output_channels[4],\n kernel_size=3,\n stride=1,\n padding=1\n ),\n torch.nn.BatchNorm2d(self.output_channels[4]),\n torch.nn.ELU(),\n torch.nn.Dropout2d(0.55),\n torch.nn.Conv2d(\n in_channels=self.output_channels[4],\n out_channels=self.output_channels[5],\n kernel_size=3,\n stride=1,\n padding=0\n ))\n def forward(self, x):\n \"\"\"\n The forward functiom should output features with shape:\n [shape(-1, output_channels[0], 38, 38),\n shape(-1, output_channels[1], 19, 19),\n shape(-1, output_channels[2], 10, 10),\n shape(-1, output_channels[3], 5, 5),\n shape(-1, output_channels[3], 3, 3),\n shape(-1, output_channels[4], 1, 1)]\n We have added assertion tests to check this, iteration through out_features,\n where out_features[0] should have the shape:\n shape(-1, output_channels[0], 38, 38),\n \"\"\"\n \n out_features = []\n out = self.feature_extractor38(x)\n out_features.append(out)\n out = self.feature_extractor19(out)\n out_features.append(out)\n out = self.feature_extractor9(out)\n out_features.append(out)\n out = self.feature_extractor5(out)\n out_features.append(out)\n out = self.feature_extractor3(out)\n out_features.append(out)\n out = self.feature_extractor1(out)\n out_features.append(out)\n feature_list = [38,19,10,5,3,1]\n for idx, feature in enumerate(out_features):\n expected_shape = (self.output_channels[idx], feature_list[idx], feature_list[idx])\n assert feature.shape[1:] == expected_shape, \\\n f\"Expected shape: {expected_shape}, got: {feature.shape[1:]} at output IDX: {idx}\"\n return tuple(out_features)\n\n"
] | [
[
"torch.nn.Dropout2d",
"torch.nn.ELU",
"torch.nn.Conv2d",
"torch.nn.MaxPool2d",
"torch.nn.BatchNorm2d"
]
] |
lego0901/pytea | [
"8ede650def2e68f4610ba816451d8b9e28f09f76",
"8ede650def2e68f4610ba816451d8b9e28f09f76",
"8ede650def2e68f4610ba816451d8b9e28f09f76",
"8ede650def2e68f4610ba816451d8b9e28f09f76",
"8ede650def2e68f4610ba816451d8b9e28f09f76"
] | [
"packages/pytea/pytest/unit_tests/passes/pass_argmax_dim01.py",
"packages/pytea/pytest/benchmarks/transformers/examples/rag/test_distributed_retriever.py",
"packages/pytea/pytest/unit_tests/passes/pass_conv2d_full01.py",
"packages/pytea/pytest/unit_tests/fails/fail_flatten_start_dim01.py",
"packages/pytea/pytest/benchmarks/transformers/tests/test_modeling_bert.py"
] | [
"'''\npass_argmax_dim01.py\nCopyright (c) Seoul National University\nLicensed under the MIT license.\nAuthor: Woo Sung Song\n\ntorch.Tensor.argmax with dim parameter.\n! This is not available since maximum stack size exceeding error has been occured\n'''\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\na = torch.rand(2, 3)\n#m = a.argmax(dim=1)\n\n# shape assertion\n#m + torch.rand(2, 4, 5)",
"import json\nimport os\nimport shutil\nimport sys\nimport tempfile\nimport unittest\nfrom unittest import TestCase\nfrom unittest.mock import patch\n\nimport numpy as np\nfrom datasets import Dataset\n\nimport faiss\nfrom transformers.configuration_bart import BartConfig\nfrom transformers.configuration_dpr import DPRConfig\nfrom transformers.configuration_rag import RagConfig\nfrom transformers.file_utils import is_datasets_available, is_faiss_available, is_psutil_available, is_torch_available\nfrom transformers.retrieval_rag import CustomHFIndex\nfrom transformers.testing_utils import require_torch_non_multi_gpu_but_fix_me\nfrom transformers.tokenization_bart import BartTokenizer\nfrom transformers.tokenization_bert import VOCAB_FILES_NAMES as DPR_VOCAB_FILES_NAMES\nfrom transformers.tokenization_dpr import DPRQuestionEncoderTokenizer\nfrom transformers.tokenization_roberta import VOCAB_FILES_NAMES as BART_VOCAB_FILES_NAMES\n\n\nsys.path.append(os.path.join(os.getcwd())) # noqa: E402 # noqa: E402 # isort:skip\n\nfrom distributed_retriever import RagPyTorchDistributedRetriever # noqa: E402 # isort:skip\n\n\ndef require_distributed_retrieval(test_case):\n \"\"\"\n Decorator marking a test that requires a set of dependencies necessary for pefrorm retrieval with\n :class:`~transformers.RagRetriever`.\n\n These tests are skipped when respective libraries are not installed.\n\n \"\"\"\n if not (is_torch_available() and is_datasets_available() and is_faiss_available() and is_psutil_available()):\n test_case = unittest.skip(\"test requires PyTorch, Datasets, Faiss, psutil\")(test_case)\n return test_case\n\n\n@require_distributed_retrieval\nclass RagRetrieverTest(TestCase):\n def setUp(self):\n self.tmpdirname = tempfile.mkdtemp()\n self.retrieval_vector_size = 8\n\n # DPR tok\n vocab_tokens = [\n \"[UNK]\",\n \"[CLS]\",\n \"[SEP]\",\n \"[PAD]\",\n \"[MASK]\",\n \"want\",\n \"##want\",\n \"##ed\",\n \"wa\",\n \"un\",\n \"runn\",\n \"##ing\",\n \",\",\n \"low\",\n \"lowest\",\n ]\n dpr_tokenizer_path = os.path.join(self.tmpdirname, \"dpr_tokenizer\")\n os.makedirs(dpr_tokenizer_path, exist_ok=True)\n self.vocab_file = os.path.join(dpr_tokenizer_path, DPR_VOCAB_FILES_NAMES[\"vocab_file\"])\n with open(self.vocab_file, \"w\", encoding=\"utf-8\") as vocab_writer:\n vocab_writer.write(\"\".join([x + \"\\n\" for x in vocab_tokens]))\n\n # BART tok\n vocab = [\n \"l\",\n \"o\",\n \"w\",\n \"e\",\n \"r\",\n \"s\",\n \"t\",\n \"i\",\n \"d\",\n \"n\",\n \"\\u0120\",\n \"\\u0120l\",\n \"\\u0120n\",\n \"\\u0120lo\",\n \"\\u0120low\",\n \"er\",\n \"\\u0120lowest\",\n \"\\u0120newer\",\n \"\\u0120wider\",\n \"<unk>\",\n ]\n vocab_tokens = dict(zip(vocab, range(len(vocab))))\n merges = [\"#version: 0.2\", \"\\u0120 l\", \"\\u0120l o\", \"\\u0120lo w\", \"e r\", \"\"]\n self.special_tokens_map = {\"unk_token\": \"<unk>\"}\n\n bart_tokenizer_path = os.path.join(self.tmpdirname, \"bart_tokenizer\")\n os.makedirs(bart_tokenizer_path, exist_ok=True)\n self.vocab_file = os.path.join(bart_tokenizer_path, BART_VOCAB_FILES_NAMES[\"vocab_file\"])\n self.merges_file = os.path.join(bart_tokenizer_path, BART_VOCAB_FILES_NAMES[\"merges_file\"])\n with open(self.vocab_file, \"w\", encoding=\"utf-8\") as fp:\n fp.write(json.dumps(vocab_tokens) + \"\\n\")\n with open(self.merges_file, \"w\", encoding=\"utf-8\") as fp:\n fp.write(\"\\n\".join(merges))\n\n def get_dpr_tokenizer(self) -> DPRQuestionEncoderTokenizer:\n return DPRQuestionEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname, \"dpr_tokenizer\"))\n\n def get_bart_tokenizer(self) -> BartTokenizer:\n return BartTokenizer.from_pretrained(os.path.join(self.tmpdirname, \"bart_tokenizer\"))\n\n def tearDown(self):\n shutil.rmtree(self.tmpdirname)\n\n def get_dummy_dataset(self):\n dataset = Dataset.from_dict(\n {\n \"id\": [\"0\", \"1\"],\n \"text\": [\"foo\", \"bar\"],\n \"title\": [\"Foo\", \"Bar\"],\n \"embeddings\": [np.ones(self.retrieval_vector_size), 2 * np.ones(self.retrieval_vector_size)],\n }\n )\n dataset.add_faiss_index(\"embeddings\", string_factory=\"Flat\", metric_type=faiss.METRIC_INNER_PRODUCT)\n return dataset\n\n def get_dummy_pytorch_distributed_retriever(\n self, init_retrieval: bool, port=12345\n ) -> RagPyTorchDistributedRetriever:\n dataset = self.get_dummy_dataset()\n config = RagConfig(\n retrieval_vector_size=self.retrieval_vector_size,\n question_encoder=DPRConfig().to_dict(),\n generator=BartConfig().to_dict(),\n )\n with patch(\"transformers.retrieval_rag.load_dataset\") as mock_load_dataset:\n mock_load_dataset.return_value = dataset\n retriever = RagPyTorchDistributedRetriever(\n config,\n question_encoder_tokenizer=self.get_dpr_tokenizer(),\n generator_tokenizer=self.get_bart_tokenizer(),\n )\n if init_retrieval:\n retriever.init_retrieval(port)\n return retriever\n\n def get_dummy_custom_hf_index_retriever(self, init_retrieval: bool, from_disk: bool, port=12345):\n dataset = self.get_dummy_dataset()\n config = RagConfig(\n retrieval_vector_size=self.retrieval_vector_size,\n question_encoder=DPRConfig().to_dict(),\n generator=BartConfig().to_dict(),\n index_name=\"custom\",\n )\n if from_disk:\n config.passages_path = os.path.join(self.tmpdirname, \"dataset\")\n config.index_path = os.path.join(self.tmpdirname, \"index.faiss\")\n dataset.get_index(\"embeddings\").save(os.path.join(self.tmpdirname, \"index.faiss\"))\n dataset.drop_index(\"embeddings\")\n dataset.save_to_disk(os.path.join(self.tmpdirname, \"dataset\"))\n del dataset\n retriever = RagPyTorchDistributedRetriever(\n config,\n question_encoder_tokenizer=self.get_dpr_tokenizer(),\n generator_tokenizer=self.get_bart_tokenizer(),\n )\n else:\n retriever = RagPyTorchDistributedRetriever(\n config,\n question_encoder_tokenizer=self.get_dpr_tokenizer(),\n generator_tokenizer=self.get_bart_tokenizer(),\n index=CustomHFIndex(config.retrieval_vector_size, dataset),\n )\n if init_retrieval:\n retriever.init_retrieval(port)\n return retriever\n\n @require_torch_non_multi_gpu_but_fix_me\n def test_pytorch_distributed_retriever_retrieve(self):\n n_docs = 1\n retriever = self.get_dummy_pytorch_distributed_retriever(init_retrieval=True)\n hidden_states = np.array(\n [np.ones(self.retrieval_vector_size), -np.ones(self.retrieval_vector_size)], dtype=np.float32\n )\n retrieved_doc_embeds, doc_ids, doc_dicts = retriever.retrieve(hidden_states, n_docs=n_docs)\n self.assertEqual(retrieved_doc_embeds.shape, (2, n_docs, self.retrieval_vector_size))\n self.assertEqual(len(doc_dicts), 2)\n self.assertEqual(sorted(doc_dicts[0]), [\"embeddings\", \"id\", \"text\", \"title\"])\n self.assertEqual(len(doc_dicts[0][\"id\"]), n_docs)\n self.assertEqual(doc_dicts[0][\"id\"][0], \"1\") # max inner product is reached with second doc\n self.assertEqual(doc_dicts[1][\"id\"][0], \"0\") # max inner product is reached with first doc\n self.assertListEqual(doc_ids.tolist(), [[1], [0]])\n\n @require_torch_non_multi_gpu_but_fix_me\n def test_custom_hf_index_retriever_retrieve(self):\n n_docs = 1\n retriever = self.get_dummy_custom_hf_index_retriever(init_retrieval=True, from_disk=False)\n hidden_states = np.array(\n [np.ones(self.retrieval_vector_size), -np.ones(self.retrieval_vector_size)], dtype=np.float32\n )\n retrieved_doc_embeds, doc_ids, doc_dicts = retriever.retrieve(hidden_states, n_docs=n_docs)\n self.assertEqual(retrieved_doc_embeds.shape, (2, n_docs, self.retrieval_vector_size))\n self.assertEqual(len(doc_dicts), 2)\n self.assertEqual(sorted(doc_dicts[0]), [\"embeddings\", \"id\", \"text\", \"title\"])\n self.assertEqual(len(doc_dicts[0][\"id\"]), n_docs)\n self.assertEqual(doc_dicts[0][\"id\"][0], \"1\") # max inner product is reached with second doc\n self.assertEqual(doc_dicts[1][\"id\"][0], \"0\") # max inner product is reached with first doc\n self.assertListEqual(doc_ids.tolist(), [[1], [0]])\n\n @require_torch_non_multi_gpu_but_fix_me\n def test_custom_pytorch_distributed_retriever_retrieve_from_disk(self):\n n_docs = 1\n retriever = self.get_dummy_custom_hf_index_retriever(init_retrieval=True, from_disk=True)\n hidden_states = np.array(\n [np.ones(self.retrieval_vector_size), -np.ones(self.retrieval_vector_size)], dtype=np.float32\n )\n retrieved_doc_embeds, doc_ids, doc_dicts = retriever.retrieve(hidden_states, n_docs=n_docs)\n self.assertEqual(retrieved_doc_embeds.shape, (2, n_docs, self.retrieval_vector_size))\n self.assertEqual(len(doc_dicts), 2)\n self.assertEqual(sorted(doc_dicts[0]), [\"embeddings\", \"id\", \"text\", \"title\"])\n self.assertEqual(len(doc_dicts[0][\"id\"]), n_docs)\n self.assertEqual(doc_dicts[0][\"id\"][0], \"1\") # max inner product is reached with second doc\n self.assertEqual(doc_dicts[1][\"id\"][0], \"0\") # max inner product is reached with first doc\n self.assertListEqual(doc_ids.tolist(), [[1], [0]])\n",
"'''\npass_conv2d_full01.py\nCopyright (c) Seoul National University\nLicensed under the MIT license.\nAuthor: Woo Sung Song\n\nFull parameters in conv2d\n'''\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\na = torch.rand(10, 32, 28, 28)\nw = torch.rand(30, 32, 3, 4)\nb = torch.rand(30)\n\nx = F.conv2d(a, w, b, 3, 1, (1, 2))\n\n# shape assertion\nx + torch.rand(10, 30, 10, 8)\n",
"'''\nfail_flatten_start_dim01.py\nCopyright (c) Seoul National University\nLicensed under the MIT license.\nAuthor: Woo Sung Song\n\nOut-of-range start_dim for torch.flatten.\n'''\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\na = torch.rand(2, 3, 4, 5, 6, 7)\n\nb = torch.flatten(a, 6)",
"# coding=utf-8\n# Copyright 2018 The Google AI Language Team Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\nimport unittest\n\nfrom transformers import is_torch_available\nfrom transformers.testing_utils import require_torch, slow, torch_device\n\nfrom .test_configuration_common import ConfigTester\nfrom .test_generation_utils import GenerationTesterMixin\nfrom .test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask\n\n\nif is_torch_available():\n import torch\n\n from transformers import (\n MODEL_FOR_PRETRAINING_MAPPING,\n BertConfig,\n BertForMaskedLM,\n BertForMultipleChoice,\n BertForNextSentencePrediction,\n BertForPreTraining,\n BertForQuestionAnswering,\n BertForSequenceClassification,\n BertForTokenClassification,\n BertLMHeadModel,\n BertModel,\n )\n from transformers.modeling_bert import BERT_PRETRAINED_MODEL_ARCHIVE_LIST\n\n\nclass BertModelTester:\n def __init__(\n self,\n parent,\n batch_size=13,\n seq_length=7,\n is_training=True,\n use_input_mask=True,\n use_token_type_ids=True,\n use_labels=True,\n vocab_size=99,\n hidden_size=32,\n num_hidden_layers=5,\n num_attention_heads=4,\n intermediate_size=37,\n hidden_act=\"gelu\",\n hidden_dropout_prob=0.1,\n attention_probs_dropout_prob=0.1,\n max_position_embeddings=512,\n type_vocab_size=16,\n type_sequence_label_size=2,\n initializer_range=0.02,\n num_labels=3,\n num_choices=4,\n scope=None,\n ):\n self.parent = parent\n self.batch_size = batch_size\n self.seq_length = seq_length\n self.is_training = is_training\n self.use_input_mask = use_input_mask\n self.use_token_type_ids = use_token_type_ids\n self.use_labels = use_labels\n self.vocab_size = vocab_size\n self.hidden_size = hidden_size\n self.num_hidden_layers = num_hidden_layers\n self.num_attention_heads = num_attention_heads\n self.intermediate_size = intermediate_size\n self.hidden_act = hidden_act\n self.hidden_dropout_prob = hidden_dropout_prob\n self.attention_probs_dropout_prob = attention_probs_dropout_prob\n self.max_position_embeddings = max_position_embeddings\n self.type_vocab_size = type_vocab_size\n self.type_sequence_label_size = type_sequence_label_size\n self.initializer_range = initializer_range\n self.num_labels = num_labels\n self.num_choices = num_choices\n self.scope = scope\n\n def prepare_config_and_inputs(self):\n input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size)\n\n input_mask = None\n if self.use_input_mask:\n input_mask = random_attention_mask([self.batch_size, self.seq_length])\n\n token_type_ids = None\n if self.use_token_type_ids:\n token_type_ids = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size)\n\n sequence_labels = None\n token_labels = None\n choice_labels = None\n if self.use_labels:\n sequence_labels = ids_tensor([self.batch_size], self.type_sequence_label_size)\n token_labels = ids_tensor([self.batch_size, self.seq_length], self.num_labels)\n choice_labels = ids_tensor([self.batch_size], self.num_choices)\n\n config = BertConfig(\n vocab_size=self.vocab_size,\n hidden_size=self.hidden_size,\n num_hidden_layers=self.num_hidden_layers,\n num_attention_heads=self.num_attention_heads,\n intermediate_size=self.intermediate_size,\n hidden_act=self.hidden_act,\n hidden_dropout_prob=self.hidden_dropout_prob,\n attention_probs_dropout_prob=self.attention_probs_dropout_prob,\n max_position_embeddings=self.max_position_embeddings,\n type_vocab_size=self.type_vocab_size,\n is_decoder=False,\n initializer_range=self.initializer_range,\n return_dict=True,\n )\n\n return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels\n\n def prepare_config_and_inputs_for_decoder(self):\n (\n config,\n input_ids,\n token_type_ids,\n input_mask,\n sequence_labels,\n token_labels,\n choice_labels,\n ) = self.prepare_config_and_inputs()\n\n config.is_decoder = True\n encoder_hidden_states = floats_tensor([self.batch_size, self.seq_length, self.hidden_size])\n encoder_attention_mask = ids_tensor([self.batch_size, self.seq_length], vocab_size=2)\n\n return (\n config,\n input_ids,\n token_type_ids,\n input_mask,\n sequence_labels,\n token_labels,\n choice_labels,\n encoder_hidden_states,\n encoder_attention_mask,\n )\n\n def create_and_check_model(\n self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels\n ):\n model = BertModel(config=config)\n model.to(torch_device)\n model.eval()\n result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids)\n result = model(input_ids, token_type_ids=token_type_ids)\n result = model(input_ids)\n self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size))\n self.parent.assertEqual(result.pooler_output.shape, (self.batch_size, self.hidden_size))\n\n def create_and_check_model_as_decoder(\n self,\n config,\n input_ids,\n token_type_ids,\n input_mask,\n sequence_labels,\n token_labels,\n choice_labels,\n encoder_hidden_states,\n encoder_attention_mask,\n ):\n config.add_cross_attention = True\n model = BertModel(config)\n model.to(torch_device)\n model.eval()\n result = model(\n input_ids,\n attention_mask=input_mask,\n token_type_ids=token_type_ids,\n encoder_hidden_states=encoder_hidden_states,\n encoder_attention_mask=encoder_attention_mask,\n )\n result = model(\n input_ids,\n attention_mask=input_mask,\n token_type_ids=token_type_ids,\n encoder_hidden_states=encoder_hidden_states,\n )\n result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids)\n self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size))\n self.parent.assertEqual(result.pooler_output.shape, (self.batch_size, self.hidden_size))\n\n def create_and_check_for_causal_lm(\n self,\n config,\n input_ids,\n token_type_ids,\n input_mask,\n sequence_labels,\n token_labels,\n choice_labels,\n encoder_hidden_states,\n encoder_attention_mask,\n ):\n model = BertLMHeadModel(config=config)\n model.to(torch_device)\n model.eval()\n result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, labels=token_labels)\n self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size))\n\n def create_and_check_for_masked_lm(\n self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels\n ):\n model = BertForMaskedLM(config=config)\n model.to(torch_device)\n model.eval()\n result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, labels=token_labels)\n self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size))\n\n def create_and_check_model_for_causal_lm_as_decoder(\n self,\n config,\n input_ids,\n token_type_ids,\n input_mask,\n sequence_labels,\n token_labels,\n choice_labels,\n encoder_hidden_states,\n encoder_attention_mask,\n ):\n config.add_cross_attention = True\n model = BertLMHeadModel(config=config)\n model.to(torch_device)\n model.eval()\n result = model(\n input_ids,\n attention_mask=input_mask,\n token_type_ids=token_type_ids,\n labels=token_labels,\n encoder_hidden_states=encoder_hidden_states,\n encoder_attention_mask=encoder_attention_mask,\n )\n result = model(\n input_ids,\n attention_mask=input_mask,\n token_type_ids=token_type_ids,\n labels=token_labels,\n encoder_hidden_states=encoder_hidden_states,\n )\n self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size))\n\n def create_and_check_for_next_sequence_prediction(\n self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels\n ):\n model = BertForNextSentencePrediction(config=config)\n model.to(torch_device)\n model.eval()\n result = model(\n input_ids,\n attention_mask=input_mask,\n token_type_ids=token_type_ids,\n labels=sequence_labels,\n )\n self.parent.assertEqual(result.logits.shape, (self.batch_size, 2))\n\n def create_and_check_for_pretraining(\n self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels\n ):\n model = BertForPreTraining(config=config)\n model.to(torch_device)\n model.eval()\n result = model(\n input_ids,\n attention_mask=input_mask,\n token_type_ids=token_type_ids,\n labels=token_labels,\n next_sentence_label=sequence_labels,\n )\n self.parent.assertEqual(result.prediction_logits.shape, (self.batch_size, self.seq_length, self.vocab_size))\n self.parent.assertEqual(result.seq_relationship_logits.shape, (self.batch_size, 2))\n\n def create_and_check_for_question_answering(\n self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels\n ):\n model = BertForQuestionAnswering(config=config)\n model.to(torch_device)\n model.eval()\n result = model(\n input_ids,\n attention_mask=input_mask,\n token_type_ids=token_type_ids,\n start_positions=sequence_labels,\n end_positions=sequence_labels,\n )\n self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length))\n self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length))\n\n def create_and_check_for_sequence_classification(\n self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels\n ):\n config.num_labels = self.num_labels\n model = BertForSequenceClassification(config)\n model.to(torch_device)\n model.eval()\n result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, labels=sequence_labels)\n self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels))\n\n def create_and_check_for_token_classification(\n self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels\n ):\n config.num_labels = self.num_labels\n model = BertForTokenClassification(config=config)\n model.to(torch_device)\n model.eval()\n result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, labels=token_labels)\n self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels))\n\n def create_and_check_for_multiple_choice(\n self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels\n ):\n config.num_choices = self.num_choices\n model = BertForMultipleChoice(config=config)\n model.to(torch_device)\n model.eval()\n multiple_choice_inputs_ids = input_ids.unsqueeze(1).expand(-1, self.num_choices, -1).contiguous()\n multiple_choice_token_type_ids = token_type_ids.unsqueeze(1).expand(-1, self.num_choices, -1).contiguous()\n multiple_choice_input_mask = input_mask.unsqueeze(1).expand(-1, self.num_choices, -1).contiguous()\n result = model(\n multiple_choice_inputs_ids,\n attention_mask=multiple_choice_input_mask,\n token_type_ids=multiple_choice_token_type_ids,\n labels=choice_labels,\n )\n self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_choices))\n\n def prepare_config_and_inputs_for_common(self):\n config_and_inputs = self.prepare_config_and_inputs()\n (\n config,\n input_ids,\n token_type_ids,\n input_mask,\n sequence_labels,\n token_labels,\n choice_labels,\n ) = config_and_inputs\n inputs_dict = {\"input_ids\": input_ids, \"token_type_ids\": token_type_ids, \"attention_mask\": input_mask}\n return config, inputs_dict\n\n\n@require_torch\nclass BertModelTest(ModelTesterMixin, GenerationTesterMixin, unittest.TestCase):\n\n all_model_classes = (\n (\n BertModel,\n BertLMHeadModel,\n BertForMaskedLM,\n BertForMultipleChoice,\n BertForNextSentencePrediction,\n BertForPreTraining,\n BertForQuestionAnswering,\n BertForSequenceClassification,\n BertForTokenClassification,\n )\n if is_torch_available()\n else ()\n )\n all_generative_model_classes = (BertLMHeadModel,) if is_torch_available() else ()\n\n # special case for ForPreTraining model\n def _prepare_for_class(self, inputs_dict, model_class, return_labels=False):\n inputs_dict = super()._prepare_for_class(inputs_dict, model_class, return_labels=return_labels)\n\n if return_labels:\n if model_class in MODEL_FOR_PRETRAINING_MAPPING.values():\n inputs_dict[\"labels\"] = torch.zeros(\n (self.model_tester.batch_size, self.model_tester.seq_length), dtype=torch.long, device=torch_device\n )\n inputs_dict[\"next_sentence_label\"] = torch.zeros(\n self.model_tester.batch_size, dtype=torch.long, device=torch_device\n )\n return inputs_dict\n\n def setUp(self):\n self.model_tester = BertModelTester(self)\n self.config_tester = ConfigTester(self, config_class=BertConfig, hidden_size=37)\n\n def test_config(self):\n self.config_tester.run_common_tests()\n\n def test_model(self):\n config_and_inputs = self.model_tester.prepare_config_and_inputs()\n self.model_tester.create_and_check_model(*config_and_inputs)\n\n def test_model_as_decoder(self):\n config_and_inputs = self.model_tester.prepare_config_and_inputs_for_decoder()\n self.model_tester.create_and_check_model_as_decoder(*config_and_inputs)\n\n def test_model_as_decoder_with_default_input_mask(self):\n # This regression test was failing with PyTorch < 1.3\n (\n config,\n input_ids,\n token_type_ids,\n input_mask,\n sequence_labels,\n token_labels,\n choice_labels,\n encoder_hidden_states,\n encoder_attention_mask,\n ) = self.model_tester.prepare_config_and_inputs_for_decoder()\n\n input_mask = None\n\n self.model_tester.create_and_check_model_as_decoder(\n config,\n input_ids,\n token_type_ids,\n input_mask,\n sequence_labels,\n token_labels,\n choice_labels,\n encoder_hidden_states,\n encoder_attention_mask,\n )\n\n def test_for_causal_lm(self):\n config_and_inputs = self.model_tester.prepare_config_and_inputs_for_decoder()\n self.model_tester.create_and_check_for_causal_lm(*config_and_inputs)\n\n def test_for_masked_lm(self):\n config_and_inputs = self.model_tester.prepare_config_and_inputs()\n self.model_tester.create_and_check_for_masked_lm(*config_and_inputs)\n\n def test_for_causal_lm_decoder(self):\n config_and_inputs = self.model_tester.prepare_config_and_inputs_for_decoder()\n self.model_tester.create_and_check_model_for_causal_lm_as_decoder(*config_and_inputs)\n\n def test_for_multiple_choice(self):\n config_and_inputs = self.model_tester.prepare_config_and_inputs()\n self.model_tester.create_and_check_for_multiple_choice(*config_and_inputs)\n\n def test_for_next_sequence_prediction(self):\n config_and_inputs = self.model_tester.prepare_config_and_inputs()\n self.model_tester.create_and_check_for_next_sequence_prediction(*config_and_inputs)\n\n def test_for_pretraining(self):\n config_and_inputs = self.model_tester.prepare_config_and_inputs()\n self.model_tester.create_and_check_for_pretraining(*config_and_inputs)\n\n def test_for_question_answering(self):\n config_and_inputs = self.model_tester.prepare_config_and_inputs()\n self.model_tester.create_and_check_for_question_answering(*config_and_inputs)\n\n def test_for_sequence_classification(self):\n config_and_inputs = self.model_tester.prepare_config_and_inputs()\n self.model_tester.create_and_check_for_sequence_classification(*config_and_inputs)\n\n def test_for_token_classification(self):\n config_and_inputs = self.model_tester.prepare_config_and_inputs()\n self.model_tester.create_and_check_for_token_classification(*config_and_inputs)\n\n @slow\n def test_model_from_pretrained(self):\n for model_name in BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:\n model = BertModel.from_pretrained(model_name)\n self.assertIsNotNone(model)\n"
] | [
[
"torch.rand"
],
[
"numpy.ones"
],
[
"torch.nn.functional.conv2d",
"torch.rand"
],
[
"torch.rand",
"torch.flatten"
],
[
"torch.zeros"
]
] |
jgharris7/DocClass | [
"9ef62e655272cca8374187040eb3dd73f3f82b72",
"9ef62e655272cca8374187040eb3dd73f3f82b72"
] | [
"model/app/LearnTfidfCNB.py",
"model/app/learnmodel2.py"
] | [
"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Mon Mar 22 22:43:22 2021\r\n\r\n@author: jgharris\r\n\"\"\"\r\n\r\n# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Mon Mar 22 21:09:34 2021\r\n\r\n@author: jgharris\r\n\"\"\"\r\n\r\nroot='C:/Users/jgharris/DocClass/'\r\n\r\ndataFile='/data/shuffled-full-set-hashed.csv'\r\n\r\n\r\n\r\nimport statistics as stat\r\nimport pandas as pd\r\n \r\nfrom sklearn.model_selection import train_test_split\r\n \r\nfrom sklearn.metrics import accuracy_score\r\n \r\nimport matplotlib\r\nimport matplotlib.pyplot as plt\r\nimport numpy as np\r\nimport pickle\r\nfrom DocClfTfidfCNB import DocClfTfidfCNB\r\nfrom Documents import Documents\r\n\r\n\r\n#dataFile='/test/testshort.csv'\r\n\r\nmodelName=\"nbtfidfv0\"\r\nmaxlines=80000000\r\ntestsize=.3\r\nrandom_state=45\r\nMAXSTRINGLENGH=4000\r\nFIRSTSTRINGLENGTH=80\r\nconf_mat=[]\r\ndef main(): \r\n # Set up corpus for training \r\n corpus=Documents()\r\n corpus.readFromFile(root+dataFile,maxline=maxlines)\r\n ''' \r\n model1=DocClfComplNB(maxStringLength=MAXSTRINGLENGH, \\\r\n firstStringLength=FIRSTSTRINGLENGTH)\r\n '''\r\n model1=DocClfTfidfCNB(maxStringLength=MAXSTRINGLENGH, \\\r\n firstStringLength=FIRSTSTRINGLENGTH)\r\n print()\r\n # split into test and training sets\r\n xtrain,xtest,ytrain,ytest=\\\r\n train_test_split(corpus.words,corpus.y,test_size=testsize, \\\r\n random_state=random_state)\r\n ytrainpred=model1.fit(xtrain,ytrain)\r\n ytestpred=model1.predict(xtest)\r\n\r\n trainAccuracy=accuracy_score(ytrain,ytrainpred)\r\n testAccuracy=accuracy_score(ytest,ytestpred)\r\n controlAccuracy=accuracy_score(np.random.permutation(ytest),ytestpred)\r\n \r\n \r\n global conf_mat\r\n conf_mat =model1.confidence(ytest, ytestpred)\r\n print(model1.confidence)\r\n print()\r\n print( np.unique(ytestpred,return_counts=True))\r\n print()\r\n \r\n [print(\"%-20s\" % key +\" %5.3f\" % value) for key,value in model1.confidence.items()]\r\n for row in range(0,conf_mat.shape[0]):\r\n print( [\" %4d\" % conf_mat[row,col] for col in range(0,conf_mat.shape[1])])\r\n \r\n rowsum=conf_mat.sum(axis=0)\r\n colsum=conf_mat.sum(axis=1)\r\n labels=[]\r\n [labels.append(key) for key in model1.confidence.keys()]\r\n print(\"item rowsum colsum\")\r\n for ic in range(0,conf_mat.shape[0]):\r\n print(\"%-25s\" % labels[ic] + \" %5d\" % rowsum[ic]+ \" %5d\" % colsum[ic])\r\n \r\n print(\"\")\r\n print('train=%6.2f test=%6.2f control=%6.2f' % \r\n (trainAccuracy,testAccuracy,controlAccuracy))\r\n # compute accuracy given predicted value\r\n \r\n \r\n pickle.dump(model1,open(root+modelName+\".pckmdl\",\"wb\"))\r\n \r\n print(ytestpred[0])\r\n print(xtest[0][0:20])\r\n testfile=open(root+modelName+\"testdata.txt\",\"wt\")\r\n \r\n testfile.write(ytestpred[0])\r\n testfile.write(\"\\n\")\r\n testfile.write(xtest[0])\r\n testfile.write(\"\\n\")\r\n testfile.write(ytestpred[10])\r\n testfile.write(\"\\n\")\r\n testfile.write(xtest[10])\r\n testfile.write(\"\\n\")\r\n testfile.close()\r\n print( model1.message)\r\n \r\n \r\n \r\nif __name__=='__main__':\r\n main()\r\n ",
"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Mon Mar 22 21:09:34 2021\r\n\r\n@author: jgharris\r\n\"\"\"\r\n\r\nroot='C:/Users/jgharris/DocClass/'\r\n\r\ndataFile='/data/shuffled-full-set-hashed.csv'\r\n\r\n\r\n\r\nimport statistics as stat\r\nimport pandas as pd\r\n \r\nfrom sklearn.model_selection import train_test_split\r\n \r\nfrom sklearn.metrics import accuracy_score\r\n \r\nimport matplotlib\r\nimport matplotlib.pyplot as plt\r\nimport numpy as np\r\nimport pickle\r\nfrom DocClfTfidfNb import DocClfTfidfNb\r\nfrom Documents import Documents\r\n\r\n\r\n#dataFile='/test/testshort.csv'\r\n\r\nmodelName=\"nbtfidfv0\"\r\nmaxlines=80000000\r\ntestsize=.3\r\nrandom_state=45\r\nMAXSTRINGLENGH=4000\r\nFIRSTSTRINGLENGTH=80\r\nconf_mat=[]\r\ndef main(): \r\n # Set up corpus for training \r\n corpus=Documents()\r\n corpus.readFromFile(root+dataFile,maxline=maxlines)\r\n ''' \r\n model1=DocClfComplNB(maxStringLength=MAXSTRINGLENGH, \\\r\n firstStringLength=FIRSTSTRINGLENGTH)\r\n '''\r\n model1=DocClfTfidfNb(maxStringLength=MAXSTRINGLENGH, \\\r\n firstStringLength=FIRSTSTRINGLENGTH)\r\n print()\r\n # split into test and training sets\r\n xtrain,xtest,ytrain,ytest=\\\r\n train_test_split(corpus.words,corpus.y,test_size=testsize, \\\r\n random_state=random_state)\r\n ytrainpred=model1.fit(xtrain,ytrain)\r\n ytestpred=model1.predict(xtest)\r\n\r\n trainAccuracy=accuracy_score(ytrain,ytrainpred)\r\n testAccuracy=accuracy_score(ytest,ytestpred)\r\n controlAccuracy=accuracy_score(np.random.permutation(ytest),ytestpred)\r\n \r\n \r\n global conf_mat\r\n conf_mat =model1.confidence(ytest, ytestpred)\r\n print(model1.confidence)\r\n print()\r\n print( np.unique(ytestpred,return_counts=True))\r\n print()\r\n \r\n [print(\"%-20s\" % key +\" %5.3f\" % value) for key,value in model1.confidence.items()]\r\n for row in range(0,conf_mat.shape[0]):\r\n print( [\" %4d\" % conf_mat[row,col] for col in range(0,conf_mat.shape[1])])\r\n \r\n rowsum=conf_mat.sum(axis=0)\r\n colsum=conf_mat.sum(axis=1)\r\n labels=[]\r\n [labels.append(key) for key in model1.confidence.keys()]\r\n print(\"item rowsum colsum\")\r\n for ic in range(0,conf_mat.shape[0]):\r\n print(\"%-25s\" % labels[ic] + \" %5d\" % rowsum[ic]+ \" %5d\" % colsum[ic])\r\n \r\n print(\"\")\r\n print('train=%6.2f test=%6.2f control=%6.2f' % \r\n (trainAccuracy,testAccuracy,controlAccuracy))\r\n # compute accuracy given predicted value\r\n \r\n \r\n pickle.dump(model1,open(root+modelName+\".pckmdl\",\"wb\"))\r\n \r\n print(ytestpred[0])\r\n print(xtest[0][0:20])\r\n testfile=open(root+modelName+\"testdata.txt\",\"wt\")\r\n \r\n testfile.write(ytestpred[0])\r\n testfile.write(\"\\n\")\r\n testfile.write(xtest[0])\r\n testfile.write(\"\\n\")\r\n testfile.write(ytestpred[10])\r\n testfile.write(\"\\n\")\r\n testfile.write(xtest[10])\r\n testfile.write(\"\\n\")\r\n testfile.close()\r\n print( model1.message)\r\n \r\n \r\n \r\nif __name__=='__main__':\r\n main()\r\n "
] | [
[
"numpy.unique",
"numpy.random.permutation",
"sklearn.model_selection.train_test_split",
"sklearn.metrics.accuracy_score"
],
[
"numpy.unique",
"numpy.random.permutation",
"sklearn.model_selection.train_test_split",
"sklearn.metrics.accuracy_score"
]
] |
dampierch/herv | [
"9f1ce0e676977b6c8d25fdf446c0807826b80bea"
] | [
"scripts/gdc_req_legacy.py"
] | [
"'''\nthis script queries the gdc legacy archive via the search and retrieve api and\nreturns msi_status object (from files endpoint on legacy)\n-- get uuids of xml files with the msi annotations from legacy server\n-- download each xml file\n-- parse xml files to extract msi annotations for each subject\n\nscript should be called from within gdc_ann_make, which itself should be called\nas part of snakemake pipeline\n-- usage: snakemake setup_tcga\n'''\n\n\nimport io\nimport json\nimport os\nimport pandas as pd\nimport requests\nimport re\nimport subprocess\nimport glob\nimport xml.etree.ElementTree as ET\n\n\nmodname = 'gdc_req_legacy'\n\n\ndef set_filters():\n '''\n set filters for gdc legacy files endpoint search\n -- json format\n -- for files.data_type, values for MSI status are 'Auxiliary test' and\n 'Microsatellite instability'\n -- here use 'Auxiliary test' per TCGAbiolinks examples\n '''\n filters = {\n 'op':'and',\n 'content':[\n {'op':'or',\n 'content':[\n {'op':'in',\n 'content':{\n 'field':'cases.project.project_id',\n 'value':'TCGA-COAD'\n }\n },\n {'op':'in',\n 'content':{\n 'field':'cases.project.project_id',\n 'value':'TCGA-READ'\n }\n }\n ]\n },\n {'op':'and',\n 'content':[\n {'op':'in',\n 'content':{\n 'field':'files.data_category',\n 'value':'Other'\n }\n },\n {'op':'in',\n 'content':{\n 'field':'files.data_type',\n 'value':'Auxiliary test'\n }\n },\n {'op':'in',\n 'content':{\n 'field':'files.access',\n 'value':'open'\n }\n }\n ]\n }\n ]\n }\n filters = json.dumps(filters)\n return filters\n\n\ndef set_fields():\n '''\n set fields for extraction from endpoint\n '''\n fields = [\n 'file_name',\n 'file_id',\n 'md5sum',\n 'file_size',\n 'state'\n ]\n fields = ','.join(fields)\n return fields\n\n\ndef set_params(filters,fields):\n '''\n set parameters for https get request to endpoint\n -- set size parameter empirically to a level greater than number of target\n cases to get all records at once\n '''\n params = {\n 'filters': filters,\n 'fields': fields,\n 'format': 'TSV',\n 'size': '1500'\n }\n return params\n\n\ndef get_results(endpoint,params):\n '''\n given an endpoint and parameters, execute https GET request for xml file_id\n entities and build results dataframe with msi results\n '''\n response = requests.get(endpoint, params=params)\n object = io.StringIO(response.content.decode('utf-8'))\n results = pd.read_table(object)\n return results\n\n\ndef download_xml_uuid(files_res,dest):\n '''\n download xml files one at a time by uuid\n '''\n file_count = 0\n for uuid in files_res.id:\n cmd = ' '.join(['gdc-client download',uuid,'-d',dest])\n subprocess.call(cmd, shell=True)\n print(' '.join([uuid,'downloaded']))\n file_count = file_count + 1\n print(' '.join([str(file_count),'files downloaded']))\n\n\ndef download_xml_manifest(files_res,dest):\n '''\n -- create manifest object\n -- write manifest to file\n -- use manifest for bulk download\n '''\n select = ['file_id', 'file_name', 'md5sum', 'file_size', 'state']\n manifest = files_res[select]\n manifest.columns = ['id', 'filename', 'md5', 'size', 'state']\n manifest = manifest.sort_values(by=['id'])\n out_file = dest + 'manifest.tsv'\n manifest.to_csv(out_file, sep='\\t', index=False)\n cmd = ' '.join(['gdc-client download','-m',out_file,'-d',dest])\n subprocess.call(cmd, shell=True)\n print('manifest downloaded')\n\n\ndef parse_xml(files_res,dest):\n '''\n parse xml files to extract msi status\n '''\n msi_dict = {}\n msi_dict['subject_id'] = []\n msi_dict['msi_status'] = []\n tag1 = 'mononucleotide_and_dinucleotide_marker_panel_analysis_status'\n tag2 = 'mononucleotide_marker_panel_analysis_status'\n file_count = 0\n for uuid in files_res.id:\n pattern = dest + uuid + '/*.xml'\n fn = glob.glob(pattern)[0]\n tree = ET.parse(fn)\n for elem in tree.getiterator():\n if 'bcr_patient_barcode' in elem.tag:\n subject_id = elem.text\n if tag1 in elem.tag and elem.text != None:\n msi_status = elem.text\n elif tag2 in elem.tag and elem.text != None:\n msi_status = elem.text\n msi_dict['subject_id'].append(subject_id)\n msi_dict['msi_status'].append(msi_status)\n file_count = file_count + 1\n print(' '.join([str(file_count),'files parsed']))\n msi_res = pd.DataFrame.from_dict(msi_dict)\n return msi_res\n\n\ndef check_outpath(out_path):\n '''\n check for presence of absence of out_path and make directory if absent\n '''\n l = out_path.strip('/').split('/')\n d = ''\n for e in l:\n d = d + '/' + e\n if os.path.exists(d):\n print(d,'present')\n else:\n print(d,'absent')\n print('making',d,'now')\n os.mkdir(d)\n\n\ndef main():\n endpoint = 'https://api.gdc.cancer.gov/legacy/files/'\n filters = set_filters()\n fields = set_fields()\n params = set_params(filters, fields)\n files_res = get_results(endpoint, params)\n dest = os.environ['ann_dir'] + 'tcga/msi/'\n check_outpath(dest)\n download_xml_manifest(files_res, dest)\n msi_res = parse_xml(files_res, dest)\n return msi_res\n\n\nif __name__ == '__main__':\n print('This script is not meant to be run as main. See usage statment:')\n print('usage: snakemake setup_tcga')\nelse:\n msi_res = main()\n"
] | [
[
"pandas.read_table",
"pandas.DataFrame.from_dict"
]
] |
spacegoing/t2t_caps | [
"ded708b738fa8966eb7544708c4a785479da4c3c",
"ded708b738fa8966eb7544708c4a785479da4c3c",
"ded708b738fa8966eb7544708c4a785479da4c3c",
"ded708b738fa8966eb7544708c4a785479da4c3c",
"ded708b738fa8966eb7544708c4a785479da4c3c",
"ded708b738fa8966eb7544708c4a785479da4c3c"
] | [
"tensor2tensor/layers/discretization.py",
"tensor2tensor/data_generators/text_problems.py",
"tensor2tensor/data_generators/algorithmic_test.py",
"tensor2tensor/models/neural_gpu_test.py",
"tensor2tensor/data_generators/celeba.py",
"tensor2tensor/data_generators/translate_test.py"
] | [
"# coding=utf-8\n# Copyright 2018 The Tensor2Tensor Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Discretization bottlenecks used to train discrete latent variables.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\nfrom functools import partial\n# Dependency imports\nfrom tensor2tensor.layers import common_layers\nimport tensorflow as tf\nfrom tensorflow.python.training import moving_averages\n\n\ndef project_hidden(x, projection_tensors, hidden_size, num_blocks):\n \"\"\"Project encoder hidden state into block_dim using projection tensors.\n\n Args:\n x: Encoder hidden state of shape [-1, hidden_size].\n projection_tensors: Projection tensors used to project the hidden state.\n hidden_size: Dimension of the latent space.\n num_blocks: Number of blocks in DVQ.\n\n Returns:\n Projected states of shape [-1, num_blocks, block_dim].\n \"\"\"\n x = tf.reshape(x, shape=[1, -1, hidden_size])\n x_tiled = tf.reshape(\n tf.tile(x, multiples=[num_blocks, 1, 1]),\n shape=[num_blocks, -1, hidden_size])\n x_projected = tf.matmul(x_tiled, projection_tensors)\n x_projected = tf.transpose(x_projected, perm=[1, 0, 2])\n return x_projected\n\n\ndef slice_hidden(x, hidden_size, num_blocks):\n \"\"\"Slice encoder hidden state into block_dim.\n\n Args:\n x: Encoder hidden state of shape [-1, hidden_size].\n hidden_size: Dimension of the latent space.\n num_blocks: Number of blocks in DVQ.\n\n Returns:\n Sliced states of shape [-1, num_blocks, block_dim].\n \"\"\"\n block_dim = int(hidden_size // num_blocks)\n x_sliced = tf.reshape(x, shape=[-1, num_blocks, block_dim])\n return x_sliced\n\n\ndef nearest_neighbor(x,\n means,\n block_v_size,\n random_top_k=1,\n soft_em=False,\n num_samples=1):\n \"\"\"Find the nearest element in means to elements in x.\n\n Args:\n x: Batch of encoder continuous latent states sliced/projected into shape\n [-1, num_blocks, block_dim].\n means: Embedding table of shpae [num_blocks, block_v_size, block_dim].\n block_v_size: Number of table entries per block.\n random_top_k: Noisy top-k if this is bigger than 1 (Default: 1).\n soft_em: If True then use soft EM rather than hard EM (Default: False).\n num_samples: Number of samples to take in soft EM (Default: 1).\n\n Returns:\n Tensor with nearest element in mean encoded in one-hot notation\n and distances.\n \"\"\"\n x_norm_sq = tf.reduce_sum(tf.square(x), axis=-1, keep_dims=True)\n means_norm_sq = tf.reduce_sum(tf.square(means), axis=-1, keep_dims=True)\n scalar_prod = tf.matmul(\n tf.transpose(x, perm=[1, 0, 2]), tf.transpose(means, perm=[0, 2, 1]))\n scalar_prod = tf.transpose(scalar_prod, perm=[1, 0, 2])\n dist = x_norm_sq + tf.transpose(\n means_norm_sq, perm=[2, 0, 1]) - 2 * scalar_prod\n\n # computing cluster probabilities\n if soft_em:\n num_blocks = common_layers.shape_list(dist)[1]\n nearest_idx = tf.stack(\n [\n tf.multinomial(-dist[:, i, :], num_samples=num_samples)\n for i in range(num_blocks)\n ],\n axis=1)\n nearest_hot = tf.one_hot(nearest_idx, depth=block_v_size)\n nearest_hot = tf.reduce_mean(nearest_hot, axis=-2)\n else:\n if random_top_k > 1:\n _, top_k_idx = tf.nn.top_k(-dist, k=random_top_k)\n nearest_idx = tf.gather(\n top_k_idx,\n tf.random_uniform(\n [1], minval=0, maxval=random_top_k - 1, dtype=tf.int32),\n axis=-1)\n else:\n nearest_idx = tf.argmax(-dist, axis=-1)\n nearest_hot = tf.one_hot(nearest_idx, block_v_size)\n return nearest_hot\n\n\ndef embedding_lookup(x,\n means,\n num_blocks,\n block_v_size,\n random_top_k=1,\n soft_em=False,\n num_samples=1):\n \"\"\"Compute nearest neighbors and loss for training the embeddings via DVQ.\n\n Args:\n x: Batch of encoder continuous latent states sliced/projected into shape\n [-1, num_blocks, block_dim].\n means: Embedding table of shape [num_blocks, block_v_size, block_dim].\n num_blocks: Number of blocks in DVQ.\n block_v_size: Number of table entries per block.\n random_top_k: Noisy top-k if this is bigger than 1 (Default: 1).\n soft_em: If True then use soft EM rather than hard EM (Default: False).\n num_samples: Number of samples to use for soft EM (Default: 1).\n\n Returns:\n The nearest neighbor in one hot form, the nearest neighbor itself, the\n commitment loss, embedding training loss and distances.\n \"\"\"\n x_means_hot = nearest_neighbor(\n x,\n means,\n block_v_size,\n random_top_k,\n soft_em=soft_em,\n num_samples=num_samples)\n x_means_hot_flat = tf.reshape(x_means_hot, [-1, num_blocks, block_v_size])\n x_means = tf.matmul(tf.transpose(x_means_hot_flat, perm=[1, 0, 2]), means)\n x_means = tf.transpose(x_means, [1, 0, 2])\n q_loss = tf.reduce_mean(tf.square((tf.stop_gradient(x) - x_means)))\n e_loss = tf.reduce_mean(tf.square(x - tf.stop_gradient(x_means)))\n return x_means_hot, x_means, q_loss, e_loss\n\n\ndef bit_to_int(x_bit, num_bits, base=2):\n \"\"\"Turn x_bit representing numbers bitwise (lower-endian) to int tensor.\n\n Args:\n x_bit: Tensor containing numbers in a particular base to be converted to\n int.\n num_bits: Number of bits in the representation.\n base: Base of the representation.\n\n Returns:\n Integer representation of this number.\n \"\"\"\n x_l = tf.stop_gradient(tf.to_int32(tf.reshape(x_bit, [-1, num_bits])))\n x_labels = []\n for i in range(num_bits):\n x_labels.append(x_l[:, i] * tf.to_int32(base)**tf.to_int32(i))\n res = sum(x_labels)\n return tf.to_int32(tf.reshape(res, common_layers.shape_list(x_bit)[:-1]))\n\n\ndef int_to_bit(x_int, num_bits, base=2):\n \"\"\"Turn x_int representing numbers into a bitwise (lower-endian) tensor.\n\n Args:\n x_int: Tensor containing integer to be converted into base notation.\n num_bits: Number of bits in the representation.\n base: Base of the representation.\n\n Returns:\n Corresponding number expressed in base.\n \"\"\"\n x_l = tf.to_int32(tf.expand_dims(x_int, axis=-1))\n x_labels = []\n for i in range(num_bits):\n x_labels.append(\n tf.floormod(\n tf.floordiv(tf.to_int32(x_l),\n tf.to_int32(base)**i), tf.to_int32(base)))\n res = tf.concat(x_labels, axis=-1)\n return tf.to_float(res)\n\n\ndef int_to_bit_embed(x_int, num_bits, embedding_size, base=2):\n \"\"\"Turn x_int into a bitwise (lower-endian) tensor and embed densly.\"\"\"\n shape = common_layers.shape_list(x_int)\n inputs = int_to_bit(x_int, num_bits, base=base)\n inputs = tf.reshape(inputs, shape[:-1] + [shape[-1] * 8])\n inputs = 2.0 * tf.to_float(inputs) - 1.0 # Move from 0/1 to -1/1.\n return tf.layers.dense(inputs, embedding_size, name=\"int_to_bit_embed\")\n\n\ndef embed(x,\n hidden_size,\n z_size,\n filter_size,\n name,\n bottleneck_kind=\"dvq\",\n soft_em=False,\n num_blocks=2,\n num_residuals=1,\n block_v_size=None,\n means=None):\n \"\"\"Embedding function that takes discrete latent and returns embedding.\n\n Args:\n x: Input to the discretization bottleneck.\n hidden_size: Dimension of the latent state.\n z_size: Number of bits used to produce discrete code; discrete codes range\n from 1 to 2**z_size.\n filter_size: Filter size to be used for the embedding function.\n name: Name for the bottleneck scope.\n bottleneck_kind: Kind of discretization bottleneck to use; one of dvq,\n semhash, gumbel-softmax (Default: dvq).\n soft_em: If True then it uses a multi-sample version of EM (Default: False).\n num_blocks: Number of blocks in DVQ (Default: 2).\n num_residuals: Number of residuals (Default: 1).\n block_v_size: Number of embedding entries per block (Default: None).\n means: The embedding table for dvq (Default: None).\n\n Returns:\n Continuous embedding to be passed on to the decoder.\n\n Raises:\n ValueError: For unknown or missing arguments.\n \"\"\"\n with tf.variable_scope(name, reuse=tf.AUTO_REUSE):\n if bottleneck_kind == \"semhash\":\n c = int_to_bit(x, z_size)\n h1a = tf.layers.dense(c, filter_size, name=\"vch1a\")\n h1b = tf.layers.dense(1.0 - c, filter_size, name=\"vch1b\")\n h1 = h1a + h1b\n elif bottleneck_kind == \"gumbel-softmax\":\n hot = tf.one_hot(x, 2**z_size)\n h1 = tf.layers.dense(hot, hidden_size, name=\"dae_dense\")\n elif bottleneck_kind == \"dvq\":\n if block_v_size is None:\n raise ValueError(\"Bottleneck kind is dvq but block_v_size is None.\")\n\n if soft_em:\n assert num_residuals == 1\n x_hot_flat = tf.reshape(x, shape=[-1, num_blocks, block_v_size])\n h1 = tf.matmul(tf.transpose(x_hot_flat, perm=[1, 0, 2]), means[0])\n h1 = tf.transpose(h1, perm=[1, 0, 2])\n new_shape = common_layers.shape_list(x)\n new_shape[-1] = hidden_size\n h1 = tf.reshape(h1, shape=new_shape)\n else:\n shape_x = common_layers.shape_list(x)\n x_flat = tf.reshape(x, [-1, 1])\n c = int_to_bit(x_flat, num_bits=z_size, base=2)\n shape = common_layers.shape_list(c)\n new_shape = shape\n new_shape[-1] = num_residuals\n new_shape.append(num_blocks)\n new_shape.append(int(z_size / (num_residuals * num_blocks)))\n c = tf.to_int32(tf.reshape(c, shape=new_shape))\n h1_shape = shape_x\n h1_shape.append(hidden_size)\n h1 = tf.zeros(dtype=tf.float32, shape=h1_shape)\n for i in range(num_residuals):\n c_residual = bit_to_int(\n c[:, :, i, :, :],\n num_bits=int(z_size / (num_residuals * num_blocks)),\n base=2)\n c_hot = tf.one_hot(c_residual, depth=block_v_size, axis=-1)\n c_hot_flat = tf.reshape(c_hot, shape=[-1, num_blocks, block_v_size])\n h1_residual = tf.matmul(\n tf.transpose(c_hot_flat, perm=[1, 0, 2]), means[i])\n h1_residual = tf.transpose(h1_residual, perm=[1, 0, 2])\n h1_residual = tf.reshape(h1_residual, shape=h1_shape)\n h1 += h1_residual\n elif bottleneck_kind == \"rounding\":\n h1 = x\n else:\n raise ValueError(\"Unknown bottleneck kind.\")\n\n h2 = tf.layers.dense(tf.nn.relu(h1), filter_size, name=\"vch2\")\n return tf.layers.dense(tf.nn.relu(h2), hidden_size, name=\"vcfin\")\n\n\ndef vae(x, name, z_size):\n \"\"\"Simple variational autoencoder without discretization.\n\n Args:\n x: Input to the discretization bottleneck.\n name: Name for the bottleneck scope.\n z_size: Number of bits used to produce discrete code; discrete codes range\n from 1 to 2**z_size.\n\n Returns:\n Embedding function, latent, loss, mu and log_simga.\n \"\"\"\n with tf.variable_scope(name):\n mu = tf.layers.dense(x, z_size, name=\"mu\")\n log_sigma = tf.layers.dense(x, z_size, name=\"log_sigma\")\n shape = common_layers.shape_list(x)\n epsilon = tf.random_normal([shape[0], shape[1], 1, z_size])\n z = mu + tf.exp(log_sigma / 2) * epsilon\n kl = 0.5 * tf.reduce_mean(\n tf.exp(log_sigma) + tf.square(mu) - 1. - log_sigma, axis=-1)\n free_bits = z_size // 4\n kl_loss = tf.reduce_mean(tf.maximum(kl - free_bits, 0.0))\n return z, kl_loss, mu, log_sigma\n\n\ndef top_k_softmax(x, k):\n \"\"\"Calculate softmax(x), select top-k and rescale to sum to 1.\n\n Args:\n x: Input to softmax over.\n k: Number of top-k to select.\n\n Returns:\n softmax(x) and maximum item.\n \"\"\"\n x = tf.nn.softmax(x)\n top_x, _ = tf.nn.top_k(x, k=k + 1)\n min_top = tf.reduce_min(top_x, axis=-1, keep_dims=True)\n x = tf.nn.relu((x - min_top) + 1e-12)\n x /= tf.reduce_sum(x, axis=-1, keep_dims=True)\n return x, tf.reduce_max(top_x, axis=-1)\n\n\ndef gumbel_sample(shape):\n \"\"\"Sample from the Gumbel distribution, protect from overflows.\n\n Args:\n shape: Shape of Gumbel samples.\n\n Returns:\n Noise drawn from Gumbel distribution.\n \"\"\"\n uniform_samples = tf.random_uniform(shape, minval=0.00001, maxval=0.99998)\n return -tf.log(-tf.log(uniform_samples))\n\n\ndef gumbel_softmax(x,\n name,\n z_size,\n mode,\n softmax_k=0,\n kl_warmup_steps=150000,\n summary=True):\n \"\"\"Gumbel softmax discretization bottleneck.\n\n Args:\n x: Input to the discretization bottleneck.\n name: Name for the bottleneck scope.\n z_size: Number of bits used to produce discrete code; discrete codes range\n from 1 to 2**z_size.\n mode: Mode represents whether we are training or testing for bottlenecks\n that differ in behavior (Default: None).\n softmax_k: If > 1 then do top-k softmax (Default: 0).\n kl_warmup_steps: Number of steps for kl warmup (Default: 150000).\n summary: If True, then write summaries (Default: True).\n\n Returns:\n Embedding function, discrete code and loss.\n \"\"\"\n with tf.variable_scope(name):\n m = tf.layers.dense(x, 2**z_size, name=\"mask\")\n if softmax_k > 0:\n m, kl = top_k_softmax(m, softmax_k)\n return m, m, 1.0 - tf.reduce_mean(kl)\n logsm = tf.nn.log_softmax(m)\n\n # Gumbel-softmax sample.\n gumbel_samples = gumbel_sample(common_layers.shape_list(m))\n steps = kl_warmup_steps\n gumbel_samples *= common_layers.inverse_exp_decay(steps // 5) * 0.5\n temperature = 1.2 - common_layers.inverse_lin_decay(steps)\n\n # 10% of the time keep reasonably high temperature to keep learning.\n temperature = tf.cond(\n tf.less(tf.random_uniform([]), 0.9), lambda: temperature,\n lambda: tf.random_uniform([], minval=0.5, maxval=1.0))\n s = tf.nn.softmax((logsm + gumbel_samples) / temperature)\n m = tf.nn.softmax(m)\n kl = -tf.reduce_max(logsm, axis=-1)\n\n if summary:\n tf.summary.histogram(\"max-log\", tf.reshape(kl, [-1]))\n\n # Calculate the argmax and construct hot vectors.\n maxvec = tf.reshape(tf.argmax(m, axis=-1), [-1])\n maxvhot = tf.stop_gradient(tf.one_hot(maxvec, 2**z_size))\n\n # Add losses that prevent too few being used.\n distrib = tf.reshape(logsm, [-1, 2**z_size]) * maxvhot\n d_mean = tf.reduce_mean(distrib, axis=[0], keep_dims=True)\n d_variance = tf.reduce_mean(tf.square(distrib - d_mean), axis=[0])\n d_dev = -tf.reduce_mean(d_variance)\n ret = s\n\n if mode != tf.contrib.learn.ModeKeys.TRAIN:\n ret = tf.reshape(maxvhot, common_layers.shape_list(s)) # Just hot @eval.\n return m, ret, d_dev * 5.0 + tf.reduce_mean(kl) * 0.002\n\n\ndef discrete_bottleneck(x,\n hidden_size,\n z_size,\n filter_size,\n name,\n mode=None,\n startup_steps=50000,\n bottleneck_kind=\"dvq\",\n num_blocks=2,\n num_residuals=1,\n reshape_method=\"slice\",\n projection_tensors=None,\n means=None,\n beta=0.25,\n noise_dev=1.,\n decay=0.999,\n discrete_mix=0.5,\n random_top_k=1,\n soft_em=False,\n num_samples=1,\n epsilon=1e-5,\n softmax_k=0,\n kl_warmup_steps=150000,\n ema=True,\n ema_count=None,\n ema_means=None,\n summary=True):\n \"\"\"Discretization bottleneck for latent variables.\n\n Args:\n x: Input to the discretization bottleneck.\n hidden_size: Dimension of the latent state.\n z_size: Number of bits used to produce discrete code; discrete codes range\n from 1 to 2**z_size.\n filter_size: Filter size to be used for the embedding function.\n name: Name for the bottleneck scope.\n mode: Mode represents whether we are training or testing for bottlenecks\n that differ in behavior (Default: None).\n startup_steps: Number of steps after which latent predictor is trained\n (Default: 50000).\n bottleneck_kind: Kind of discretization bottleneck to use; one of dvq,\n semhash, gumbel-softmax (Default: dvq).\n num_blocks: Number of blocks to use for decomposed vector\n quantization (Default: 2).\n num_residuals: Number of residual units used to compute nearest\n neighbors (Default: 1).\n reshape_method: Method to reshape for DVQ (Default: slice).\n projection_tensors: If the reshape method is project, then these are the\n tensors used to project (Default: None).\n means: The embedding table for dvq (Default: None).\n beta: Beta factor for the DVQ loss (Default: 0.25).\n noise_dev: Stddev for noise added for semhash (Default: 0).\n decay: Decay factor for the exponential moving average (Default: 0.999).\n discrete_mix: Factor for mixing discrete and non-discrete input for semhash\n (Default: 0.5).\n random_top_k: Noisy top-k for DVQ (Default: 1).\n soft_em: If True then use soft EM rather than hard EM (Default: False).\n num_samples: Number of samples for soft EM (Default: 1).\n epsilon: Epsilon parameter for DVQ (Default: 1e-5).\n softmax_k: If > 1 then do top-k softmax (Default: 0).\n kl_warmup_steps: Number of steps for kl warmup (Default: 150000).\n ema: If True update embeddings using exponential moving averages (Default:\n True).\n ema_count: Table of counts for each embedding corresponding to how many\n examples in a batch it was the closest to (Default: None).\n ema_means: Exponentially averaged version of the embeddings (Default: None).\n summary: If True, then write summaries (Default: True).\n\n Returns:\n Embedding to pass to the decoder, discrete latent, loss, and the embedding\n function.\n\n Raises:\n ValueError: If projection_tensors is None for reshape_method project, or\n ema_count or ema_means is None if we are using ema, or unknown args.\n \"\"\"\n block_v_size = None\n if bottleneck_kind == \"dvq\":\n # Define the dvq parameters\n assert means is not None\n\n # Check block dimensions add up\n if hidden_size % num_blocks != 0:\n raise ValueError(\"num_blocks does not divide hidden size\")\n\n if z_size % num_residuals != 0:\n raise ValueError(\"num_residuals does not divide embedding table size\")\n\n z_size_per_residual = int(z_size / num_residuals)\n\n if z_size_per_residual % num_blocks != 0:\n raise ValueError(\"num_blocks does not divide embedding table size\")\n\n block_v_size = 2**(z_size_per_residual / num_blocks)\n block_v_size = int(block_v_size)\n\n # Set the reshape method corresponding to projections or slices\n if reshape_method == \"slice\":\n reshape_fn = partial(\n slice_hidden, hidden_size=hidden_size, num_blocks=num_blocks)\n elif reshape_method == \"project\":\n if projection_tensors is None:\n raise ValueError(\n \"Projection tensors is None for reshape_method project\")\n reshape_fn = partial(\n project_hidden,\n projection_tensors=projection_tensors,\n hidden_size=hidden_size,\n num_blocks=num_blocks)\n else:\n raise ValueError(\"Unknown reshape_method\")\n\n # Check if the ema settings make sense\n if ema:\n if ema_count is None:\n raise ValueError(\"ema_count is None but ema is True\")\n if ema_means is None:\n raise ValueError(\"ema_means is None but ema is True\")\n\n with tf.variable_scope(name, reuse=tf.AUTO_REUSE):\n l = tf.constant(0.0)\n if bottleneck_kind == \"dense\":\n c = tf.layers.dense(x, z_size, name=\"vcc\")\n h1 = tf.layers.dense(c, filter_size, name=\"vch1\")\n elif bottleneck_kind == \"vae\":\n c, l, _, _ = vae(x, z_size, \"vae\")\n h1 = tf.layers.dense(c, filter_size, name=\"vch1\")\n elif bottleneck_kind == \"semhash\":\n c = tf.layers.dense(x, z_size, name=\"vcc\")\n y_clean = common_layers.saturating_sigmoid(c)\n if summary:\n tf.summary.histogram(\"y_clean\", tf.reshape(y_clean, [-1]))\n if noise_dev > 0 and mode == tf.estimator.ModeKeys.TRAIN:\n noise = tf.truncated_normal(\n common_layers.shape_list(c), mean=0.0, stddev=noise_dev)\n y = common_layers.saturating_sigmoid(c + noise)\n else:\n y = y_clean\n d = tf.to_float(tf.less(0.5, y))\n y_discrete = tf.stop_gradient(d) + y - tf.stop_gradient(y)\n pd = common_layers.inverse_exp_decay(startup_steps * 2)\n pd *= discrete_mix\n pd = pd if mode == tf.estimator.ModeKeys.TRAIN else 1.0\n c = tf.where(\n tf.less(tf.random_uniform([common_layers.shape_list(y)[0]]), pd),\n y_discrete, y)\n h1a = tf.layers.dense(c, filter_size, name=\"vch1a\")\n h1b = tf.layers.dense(1.0 - c, filter_size, name=\"vch1b\")\n h1 = h1a + h1b\n dx = tf.to_int32(tf.stop_gradient(d))\n c = bit_to_int(dx, z_size)\n elif bottleneck_kind == \"gumbel-softmax\":\n _, hot, l = gumbel_softmax(x, name, z_size, mode, softmax_k,\n kl_warmup_steps, summary)\n c = tf.argmax(hot, axis=-1)\n h1 = tf.layers.dense(hot, hidden_size, name=\"dae_dense\")\n elif bottleneck_kind == \"dvq\":\n x_reshaped = reshape_fn(x)\n x_res = x_reshaped\n x_means_hot = []\n x_means = 0\n l = 0\n for i in range(num_residuals):\n x_means_hot_res, x_means_res, q_loss_res, e_loss_res = embedding_lookup(\n x_res, means[i], num_blocks, block_v_size, random_top_k, soft_em,\n num_samples)\n # Update the ema variables\n if ema:\n tf.logging.info(\"Using EMA with beta = {}\".format(beta))\n updated_ema_count_res = moving_averages.assign_moving_average(\n ema_count[i],\n tf.reduce_sum(\n tf.reshape(\n x_means_hot_res, shape=[-1, num_blocks, block_v_size]),\n axis=0),\n decay,\n zero_debias=False)\n\n dw = tf.matmul(\n tf.transpose(x_means_hot_res, perm=[1, 2, 0]),\n tf.transpose(x_res, perm=[1, 0, 2]))\n\n updated_ema_means_res = moving_averages.assign_moving_average(\n ema_means[i], dw, decay, zero_debias=False)\n n = tf.reduce_sum(updated_ema_count_res, axis=-1, keep_dims=True)\n updated_ema_count_res = ((updated_ema_count_res + epsilon) /\n (n + 2**z_size * epsilon) * n)\n # pylint: disable=g-no-augmented-assignment\n updated_ema_means_res = updated_ema_means_res / tf.expand_dims(\n updated_ema_count_res, axis=-1)\n # pylint: enable=g-no-augmented-assignment\n\n with tf.control_dependencies([e_loss_res]):\n update_means_res = tf.assign(means[i], updated_ema_means_res)\n with tf.control_dependencies([update_means_res]):\n l += beta * e_loss_res\n else:\n l += q_loss_res + beta * e_loss_res\n\n # Update the residuals\n x_res -= x_means_res\n x_means += x_means_res\n x_means_hot.append(x_means_hot_res)\n\n # Get the discrete latent representation\n x_means_hot = tf.stack(x_means_hot, axis=1)\n x_means_idx = tf.argmax(x_means_hot, axis=-1)\n\n # Get the binary representation\n x_means_bits = int_to_bit(\n x_means_idx,\n num_bits=int(z_size / (num_residuals * num_blocks)),\n base=2)\n shape = common_layers.shape_list(x_means_bits)\n new_shape = shape[:-2]\n new_shape[-1] = z_size\n x_means_bits = tf.reshape(x_means_bits, shape=new_shape)\n c = bit_to_int(tf.to_int32(x_means_bits), num_bits=z_size, base=2)\n\n # Adjust shape of c\n shape_x = common_layers.shape_list(x)\n new_shape = shape_x[:-1]\n c = tf.reshape(c, new_shape)\n\n # If we are doing soft EM then c is x_means_hot\n if soft_em:\n c = x_means_hot\n new_shape.append(block_v_size)\n c = tf.reshape(c, new_shape)\n\n x_means = tf.reshape(x_means, shape_x)\n x_reshaped = tf.reshape(x_reshaped, shape_x)\n h1 = x_reshaped + tf.stop_gradient(x_means - x_reshaped)\n else:\n raise ValueError(\"Unknown discretization method.\")\n\n h2 = tf.layers.dense(tf.nn.relu(h1), filter_size, name=\"vch2\")\n res = tf.layers.dense(tf.nn.relu(h2), hidden_size, name=\"vcfin\")\n\n embed_fn = partial(\n embed,\n hidden_size=hidden_size,\n z_size=z_size,\n filter_size=filter_size,\n name=name,\n bottleneck_kind=bottleneck_kind,\n soft_em=soft_em,\n num_blocks=num_blocks,\n num_residuals=num_residuals,\n block_v_size=block_v_size,\n means=means)\n return res, c, l, embed_fn\n\n\n# New API for discretization bottlenecks:\n# * Each method is separate and provides 2 functions:\n# * The [method]_bottleneck function returns discretized state.\n# * The [method]_unbottleneck function moves from discretized state to dense.\n\n\ndef tanh_discrete_bottleneck(x, bottleneck_size, bottleneck_noise,\n discretize_warmup_steps, mode):\n \"\"\"Simple discretization through tanh, flip bottleneck_noise many bits.\"\"\"\n x = tf.tanh(tf.layers.dense(x, bottleneck_size,\n name=\"tanh_discrete_bottleneck\"))\n d = x + tf.stop_gradient(2.0 * tf.to_float(tf.less(0.0, x)) - 1.0 - x)\n if mode == tf.estimator.ModeKeys.TRAIN:\n noise = tf.random_uniform(common_layers.shape_list(x))\n noise = 2.0 * tf.to_float(tf.less(bottleneck_noise, noise)) - 1.0\n d *= noise\n d = common_layers.mix(d, x, discretize_warmup_steps,\n mode == tf.estimator.ModeKeys.TRAIN)\n return d\n\n\ndef tanh_discrete_unbottleneck(x, hidden_size):\n \"\"\"Simple un-discretization from tanh.\"\"\"\n x = tf.layers.dense(x, hidden_size, name=\"tanh_discrete_unbottleneck\")\n return x\n\n\ndef isemhash_bottleneck(x, bottleneck_size, bottleneck_noise,\n discretize_warmup_steps, mode,\n isemhash_noise_dev=0.5, isemhash_mix_prob=0.5):\n \"\"\"Improved semantic hashing bottleneck.\"\"\"\n with tf.variable_scope(\"isemhash_bottleneck\"):\n x = tf.layers.dense(x, bottleneck_size, name=\"dense\")\n y = common_layers.saturating_sigmoid(x)\n if isemhash_noise_dev > 0 and mode == tf.estimator.ModeKeys.TRAIN:\n noise = tf.truncated_normal(\n common_layers.shape_list(x), mean=0.0, stddev=isemhash_noise_dev)\n y = common_layers.saturating_sigmoid(x + noise)\n d = tf.to_float(tf.less(0.5, y)) + y - tf.stop_gradient(y)\n d = 2.0 * d - 1.0 # Move from [0, 1] to [-1, 1].\n if mode == tf.estimator.ModeKeys.TRAIN: # Flip some bits.\n noise = tf.random_uniform(common_layers.shape_list(x))\n noise = 2.0 * tf.to_float(tf.less(bottleneck_noise, noise)) - 1.0\n d *= noise\n d = common_layers.mix(d, 2.0 * y - 1.0, discretize_warmup_steps,\n mode == tf.estimator.ModeKeys.TRAIN,\n max_prob=isemhash_mix_prob)\n return d\n\n\ndef isemhash_unbottleneck(x, hidden_size, isemhash_filter_size_multiplier=1.0):\n \"\"\"Improved semantic hashing un-bottleneck.\"\"\"\n filter_size = int(hidden_size * isemhash_filter_size_multiplier)\n x = 0.5 * (x - 1.0) # Move from [-1, 1] to [0, 1].\n with tf.variable_scope(\"isemhash_unbottleneck\"):\n h1a = tf.layers.dense(x, filter_size, name=\"hidden1a\")\n h1b = tf.layers.dense(1.0 - x, filter_size, name=\"hidden1b\")\n h2 = tf.layers.dense(tf.nn.relu(h1a + h1b), filter_size, name=\"hidden2\")\n return tf.layers.dense(tf.nn.relu(h2), hidden_size, name=\"final\")\n\n\ndef parametrized_bottleneck(x, hparams):\n \"\"\"Meta-function calling all the above bottlenecks with hparams.\"\"\"\n if hparams.bottleneck_kind == \"tanh_discrete\":\n return tanh_discrete_bottleneck(\n x, hparams.bottleneck_size, hparams.bottleneck_noise * 0.5,\n hparams.discretize_warmup_steps, hparams.mode)\n if hparams.bottleneck_kind == \"isemhash\":\n return isemhash_bottleneck(\n x, hparams.bottleneck_size, hparams.bottleneck_noise * 0.5,\n hparams.discretize_warmup_steps, hparams.mode,\n hparams.isemhash_noise_dev, hparams.isemhash_mix_prob)\n raise ValueError(\"Unsupported hparams.bottleneck_kind %s\"\n % hparams.bottleneck_kind)\n\n\ndef parametrized_unbottleneck(x, hidden_size, hparams):\n \"\"\"Meta-function calling all the above un-bottlenecks with hparams.\"\"\"\n if hparams.bottleneck_kind == \"tanh_discrete\":\n return tanh_discrete_unbottleneck(x, hidden_size)\n if hparams.bottleneck_kind == \"isemhash\":\n return isemhash_unbottleneck(\n x, hidden_size, hparams.isemhash_filter_size_multiplier)\n raise ValueError(\"Unsupported hparams.bottleneck_kind %s\"\n % hparams.bottleneck_kind)\n",
"# coding=utf-8\n# Copyright 2018 The Tensor2Tensor Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Base classes for text-based Problems.\n\n* Text2TextProblem: input=text, target=text.\n* Text2ClassProblem: input=text, target=class.\n* Text2SelfProblem (for language modeling): target=text\n* QuestionAndContext2TextProblem: input=text, context=text, target=text.\n\nThe Text2TextTmpDir problem allows you to train without defining a problem. It\nexpects you to format your data in a particular way and put it in tmp_dir. See\nits docstring.\n\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport os\n\nfrom tensor2tensor.data_generators import generator_utils\nfrom tensor2tensor.data_generators import problem\nfrom tensor2tensor.data_generators import text_encoder\nfrom tensor2tensor.utils import metrics\nfrom tensor2tensor.utils import registry\n\nimport tensorflow as tf\n\n\nclass VocabType(object):\n \"\"\"Available text vocabularies.\"\"\"\n CHARACTER = \"character\"\n SUBWORD = \"subwords\"\n TOKEN = \"tokens\"\n\n\nclass Text2TextProblem(problem.Problem):\n \"\"\"Base class for text-to-text problems.\n\n Subclasses only must override `generate_samples` and `is_generate_per_split`.\n See the \"Subclass interface\" code block below to see what else subclasses can\n override.\n \"\"\"\n\n # START: Subclass interface\n @property\n def dataset_splits(self):\n \"\"\"Splits of data to produce and number of output shards for each.\"\"\"\n return [{\n \"split\": problem.DatasetSplit.TRAIN,\n \"shards\": 100,\n }, {\n \"split\": problem.DatasetSplit.EVAL,\n \"shards\": 1,\n }]\n\n @property\n def is_generate_per_split(self):\n \"\"\"A single call to `generate_samples` generates for all `dataset_splits`.\n\n Set to True if you already have distinct subsets of data for each dataset\n split specified in `self.dataset_splits`. `self.generate_samples` will be\n called once for each split.\n\n Set to False if you have a unified dataset that you'd like to have split out\n into training and evaluation data automatically. `self.generate_samples`\n will be called only once and the data will be sharded across the dataset\n splits specified in `self.dataset_splits`.\n\n Returns:\n bool\n \"\"\"\n raise NotImplementedError()\n\n def generate_samples(self, data_dir, tmp_dir, dataset_split):\n \"\"\"Generate samples of input text and target text pairs.\n\n Each yielded dict will be made into a single example. The values should be\n raw text. The Problem will generate a vocabulary and encode the raw text as\n integers as part of the data generation process.\n\n This method is typically called once per split in `self.dataset_splits`\n unless `self.is_generate_per_split=False`.\n\n Args:\n data_dir: final data directory. Typically only used in this method to copy\n over user-supplied vocab files (for example, if vocab_type ==\n VocabType.TOKEN).\n tmp_dir: temporary directory that you can use for downloading and scratch.\n dataset_split: problem.DatasetSplit, which data split to generate samples\n for (for example, training and evaluation).\n\n Yields:\n {\"inputs\": text, \"targets\": text}\n \"\"\"\n raise NotImplementedError()\n\n @property\n def vocab_type(self):\n \"\"\"What kind of vocabulary to use.\n\n `VocabType`s:\n * `SUBWORD`: `SubwordTextEncoder`, an invertible wordpiece vocabulary.\n Must provide `self.approx_vocab_size`. Generates the vocabulary based on\n the training data. To limit the number of samples the vocab generation\n looks at, override `self.max_samples_for_vocab`. Recommended and\n default.\n * `CHARACTER`: `ByteTextEncoder`, encode raw bytes.\n * `TOKEN`: `TokenTextEncoder`, vocabulary based on a file. Must provide a\n vocabulary file yourself (`TokenTextEncoder.store_to_file`) because one\n will not be generated for you. The vocab file should be stored in\n `data_dir/` with the name specified by `self.vocab_filename`.\n\n Returns:\n VocabType constant\n \"\"\"\n return VocabType.SUBWORD\n\n @property\n def approx_vocab_size(self):\n \"\"\"Approximate vocab size to generate. Only for VocabType.SUBWORD.\"\"\"\n return 2**15 # ~32k\n\n @property\n def additional_reserved_tokens(self):\n \"\"\"Additional reserved tokens. Only for VocabType.SUBWORD.\n\n Returns:\n List of str tokens that will get vocab ids 2+ (0 and 1 are reserved for\n padding and end-of-string).\n \"\"\"\n return []\n\n @property\n def oov_token(self):\n \"\"\"Out of vocabulary token. Only for VocabType.TOKEN.\"\"\"\n return None\n\n @property\n def max_samples_for_vocab(self):\n \"\"\"How many samples from `generate_samples` to look at for vocab generation.\n\n Only applies if self.vocab_type == VocabType.SUBWORD.\n\n If None, look at all training samples.\n\n Returns:\n None or int.\n \"\"\"\n return None\n\n @property\n def packed_length(self):\n \"\"\"Pack multiple examples into a single example of constant length.\n\n This is useful for TPU training to reduce the fraction of padding tokens.\n See generator_utils.pack_examples.\n\n Returns:\n None or int\n \"\"\"\n return None\n\n # END: Subclass interface\n\n @property\n def has_inputs(self):\n return True\n\n def max_length(self, model_hparams):\n return (self.packed_length or\n super(Text2TextProblem, self).max_length(model_hparams))\n\n def feature_encoders(self, data_dir):\n encoder = self.get_or_create_vocab(data_dir, None, force_get=True)\n encoders = {\"targets\": encoder}\n if self.has_inputs:\n encoders[\"inputs\"] = encoder\n return encoders\n\n def generate_text_for_vocab(self, data_dir, tmp_dir):\n for i, sample in enumerate(\n self.generate_samples(data_dir, tmp_dir, problem.DatasetSplit.TRAIN)):\n if self.has_inputs:\n yield sample[\"inputs\"]\n yield sample[\"targets\"]\n if self.max_samples_for_vocab and (i + 1) >= self.max_samples_for_vocab:\n break\n\n @property\n def vocab_filename(self):\n if self.vocab_type == VocabType.SUBWORD:\n return \"vocab.%s.%d.%s\" % (self.dataset_filename(),\n self.approx_vocab_size,\n VocabType.SUBWORD)\n else:\n return \"vocab.%s.%s\" % (self.dataset_filename(), VocabType.TOKEN)\n\n def get_or_create_vocab(self, data_dir, tmp_dir, force_get=False):\n if self.vocab_type == VocabType.CHARACTER:\n encoder = text_encoder.ByteTextEncoder()\n elif self.vocab_type == VocabType.SUBWORD:\n if force_get:\n vocab_filepath = os.path.join(data_dir, self.vocab_filename)\n encoder = text_encoder.SubwordTextEncoder(vocab_filepath)\n else:\n encoder = generator_utils.get_or_generate_vocab_inner(\n data_dir, self.vocab_filename, self.approx_vocab_size,\n self.generate_text_for_vocab(data_dir, tmp_dir),\n max_subtoken_length=self.max_subtoken_length,\n reserved_tokens=(\n text_encoder.RESERVED_TOKENS + self.additional_reserved_tokens))\n elif self.vocab_type == VocabType.TOKEN:\n vocab_filename = os.path.join(data_dir, self.vocab_filename)\n encoder = text_encoder.TokenTextEncoder(vocab_filename,\n replace_oov=self.oov_token)\n else:\n raise ValueError(\"Unrecognized VocabType\")\n return encoder\n\n def _maybe_pack_examples(self, generator):\n \"\"\"Wraps generator with packer if self.packed_length.\"\"\"\n if not self.packed_length:\n return generator\n return generator_utils.pack_examples(\n generator,\n self.has_inputs,\n self.packed_length,\n chop_long_sequences=not self.has_inputs)\n\n def generate_encoded_samples(self, data_dir, tmp_dir, dataset_split):\n generator = self.generate_samples(data_dir, tmp_dir, dataset_split)\n encoder = self.get_or_create_vocab(data_dir, tmp_dir)\n return text2text_generate_encoded(generator, encoder,\n has_inputs=self.has_inputs)\n\n @property\n def max_subtoken_length(self):\n \"\"\"Maximum subtoken length when generating vocab.\n\n Override with a finite integer (e.g. 100) to avoid quadratic-time vocab\n building.\n\n Returns:\n an integer or None\n \"\"\"\n return None\n\n @property\n def batch_size_means_tokens(self):\n return True\n\n def generate_data(self, data_dir, tmp_dir, task_id=-1):\n\n filepath_fns = {\n problem.DatasetSplit.TRAIN: self.training_filepaths,\n problem.DatasetSplit.EVAL: self.dev_filepaths,\n problem.DatasetSplit.TEST: self.test_filepaths,\n }\n\n split_paths = [(split[\"split\"], filepath_fns[split[\"split\"]](\n data_dir, split[\"shards\"], shuffled=False))\n for split in self.dataset_splits]\n all_paths = []\n for _, paths in split_paths:\n all_paths.extend(paths)\n\n if self.is_generate_per_split:\n for split, paths in split_paths:\n generator_utils.generate_files(\n self._maybe_pack_examples(\n self.generate_encoded_samples(data_dir, tmp_dir, split)), paths)\n else:\n generator_utils.generate_files(\n self._maybe_pack_examples(\n self.generate_encoded_samples(\n data_dir, tmp_dir, problem.DatasetSplit.TRAIN)), all_paths)\n\n generator_utils.shuffle_dataset(all_paths)\n\n def hparams(self, defaults, unused_model_hparams):\n p = defaults\n p.stop_at_eos = int(True)\n\n if self.has_inputs:\n source_vocab_size = self._encoders[\"inputs\"].vocab_size\n p.input_modality = {\n \"inputs\": (registry.Modalities.SYMBOL, source_vocab_size)\n }\n target_vocab_size = self._encoders[\"targets\"].vocab_size\n p.target_modality = (registry.Modalities.SYMBOL, target_vocab_size)\n if self.vocab_type == VocabType.CHARACTER:\n p.loss_multiplier = 2.0\n\n if self.packed_length:\n identity = (registry.Modalities.GENERIC, None)\n if self.has_inputs:\n p.input_modality[\"inputs_segmentation\"] = identity\n p.input_modality[\"inputs_position\"] = identity\n p.input_modality[\"targets_segmentation\"] = identity\n p.input_modality[\"targets_position\"] = identity\n\n def example_reading_spec(self):\n data_fields = {\"targets\": tf.VarLenFeature(tf.int64)}\n if self.has_inputs:\n data_fields[\"inputs\"] = tf.VarLenFeature(tf.int64)\n\n if self.packed_length:\n if self.has_inputs:\n data_fields[\"inputs_segmentation\"] = tf.VarLenFeature(tf.int64)\n data_fields[\"inputs_position\"] = tf.VarLenFeature(tf.int64)\n data_fields[\"targets_segmentation\"] = tf.VarLenFeature(tf.int64)\n data_fields[\"targets_position\"] = tf.VarLenFeature(tf.int64)\n\n data_items_to_decoders = None\n return (data_fields, data_items_to_decoders)\n\n def eval_metrics(self):\n return [\n metrics.Metrics.ACC, metrics.Metrics.ACC_TOP5,\n metrics.Metrics.ACC_PER_SEQ, metrics.Metrics.NEG_LOG_PERPLEXITY,\n metrics.Metrics.APPROX_BLEU, metrics.Metrics.ROUGE_2_F,\n metrics.Metrics.ROUGE_L_F\n ]\n\n\nclass QuestionAndContext2TextProblem(Text2TextProblem):\n \"\"\"Problems consisting of inputs, context, and a target.\n\n Variant of Text2TextProblem that includes a \"context\" feature in addition to\n \"inputs\" and \"targets.\"\n \"\"\"\n QUESTION_SEPARATOR = \"<EOQ>\"\n QUESTION_SEPARATOR_ID = 2\n\n @property\n def additional_reserved_tokens(self):\n return [self.QUESTION_SEPARATOR]\n\n def feature_encoders(self, data_dir):\n encoders = (super(QuestionAndContext2TextProblem, self)\n .feature_encoders(data_dir))\n encoders[\"context\"] = encoders[\"inputs\"]\n return encoders\n\n def generate_text_for_vocab(self, data_dir, tmp_dir):\n for i, sample in enumerate(\n self.generate_samples(data_dir, tmp_dir, problem.DatasetSplit.TRAIN)):\n yield sample[\"inputs\"]\n yield sample[\"context\"]\n yield sample[\"targets\"]\n if self.max_samples_for_vocab and (i + 1) >= self.max_samples_for_vocab:\n break\n\n def generate_encoded_samples(self, data_dir, tmp_dir, dataset_split):\n generator = super(\n QuestionAndContext2TextProblem, self).generate_encoded_samples(\n data_dir, tmp_dir, dataset_split)\n vocab = self.feature_encoders(data_dir)[\"context\"]\n for sample in generator:\n context = vocab.encode(sample[\"context\"])\n context.append(text_encoder.EOS_ID)\n sample[\"context\"] = context\n yield sample\n\n def hparams(self, defaults, unused_model_hparams):\n (super(QuestionAndContext2TextProblem, self)\n .hparams(defaults, unused_model_hparams))\n p = defaults\n source_vocab_size = self._encoders[\"context\"].vocab_size\n p.input_modality[\"context\"] = (registry.Modalities.SYMBOL,\n source_vocab_size)\n if self.packed_length:\n raise NotImplementedError(\"QuestionAndContext2Text does not \"\n \"support packed_length\")\n\n def example_reading_spec(self):\n data_fields, data_items_to_decoders = (super(QuestionAndContext2TextProblem,\n self)\n .example_reading_spec())\n data_fields[\"context\"] = tf.VarLenFeature(tf.int64)\n return (data_fields, data_items_to_decoders)\n\n\nclass Text2SelfProblem(Text2TextProblem):\n \"\"\"Language modeling problems base class.\n\n See Text2TextProblem for subclass interface.\n \"\"\"\n\n def generate_samples(self, data_dir, tmp_dir, dataset_split):\n \"\"\"Generate samples of text.\n\n Args:\n data_dir: final data directory. Typically only used in this method to copy\n over user-supplied vocab files (for example, if vocab_type ==\n VocabType.TOKEN).\n tmp_dir: temporary directory that you can use for downloading and scratch.\n dataset_split: problem.DatasetSplit, which data split to generate samples\n for (for example, training and evaluation).\n\n Yields:\n Sample: dict<str feature_name, str text>: for language modeling problems\n (i.e. Text2SelfProblems), this generator should yield dicts with only\n the \"targets\" key.\n \"\"\"\n raise NotImplementedError()\n\n @property\n def has_inputs(self):\n return False\n\n\nclass Text2ClassProblem(Text2TextProblem):\n \"\"\"Base class for text classification problems.\"\"\"\n\n def generate_samples(self, data_dir, tmp_dir, dataset_split):\n \"\"\"Generate samples of text and label pairs.\n\n Each yielded dict will be a single example. The inputs should be raw text.\n The label should be an int in [0, self.num_classes).\n\n Args:\n data_dir: final data directory. Typically only used in this method to copy\n over user-supplied vocab files (for example, if vocab_type ==\n VocabType.TOKEN).\n tmp_dir: temporary directory that you can use for downloading and scratch.\n dataset_split: problem.DatasetSplit, which data split to generate samples\n for (for example, training and evaluation).\n\n Yields:\n {\"inputs\": text, \"label\": int}\n \"\"\"\n raise NotImplementedError()\n\n # START: Additional subclass interface\n @property\n def num_classes(self):\n \"\"\"The number of classes.\"\"\"\n raise NotImplementedError()\n\n def class_labels(self, data_dir):\n \"\"\"String representation of the classes.\"\"\"\n del data_dir\n return [\"ID_%d\" % i for i in range(self.num_classes)]\n\n # END: Additional subclass interface\n\n def generate_text_for_vocab(self, data_dir, tmp_dir):\n for i, sample in enumerate(\n self.generate_samples(data_dir, tmp_dir, problem.DatasetSplit.TRAIN)):\n yield sample[\"inputs\"]\n if self.max_samples_for_vocab and (i + 1) >= self.max_samples_for_vocab:\n break\n\n def generate_encoded_samples(self, data_dir, tmp_dir, dataset_split):\n generator = self.generate_samples(data_dir, tmp_dir, dataset_split)\n encoder = self.get_or_create_vocab(data_dir, tmp_dir)\n for sample in generator:\n inputs = encoder.encode(sample[\"inputs\"])\n inputs.append(text_encoder.EOS_ID)\n label = sample[\"label\"]\n yield {\"inputs\": inputs, \"targets\": [label]}\n\n def feature_encoders(self, data_dir):\n encoder = self.get_or_create_vocab(data_dir, None, force_get=True)\n\n return {\n \"inputs\": encoder,\n \"targets\": text_encoder.ClassLabelEncoder(self.class_labels(data_dir))\n }\n\n def hparams(self, defaults, unused_model_hparams):\n p = defaults\n source_vocab_size = self._encoders[\"inputs\"].vocab_size\n p.input_modality = {\n \"inputs\": (registry.Modalities.SYMBOL, source_vocab_size)\n }\n p.target_modality = (registry.Modalities.CLASS_LABEL, self.num_classes)\n\n def example_reading_spec(self):\n data_fields = {\n \"inputs\": tf.VarLenFeature(tf.int64),\n \"targets\": tf.FixedLenFeature([1], tf.int64),\n }\n data_items_to_decoders = None\n return (data_fields, data_items_to_decoders)\n\n\ndef txt_line_iterator(txt_path):\n \"\"\"Iterate through lines of file.\"\"\"\n with tf.gfile.Open(txt_path) as f:\n for line in f:\n yield line.strip()\n\n\ndef text2text_txt_iterator(source_txt_path, target_txt_path):\n \"\"\"Yield dicts for Text2TextProblem.generate_samples from lines of files.\"\"\"\n for inputs, targets in zip(\n txt_line_iterator(source_txt_path), txt_line_iterator(target_txt_path)):\n yield {\"inputs\": inputs, \"targets\": targets}\n\n\ndef text2text_distill_iterator(source_txt_path, target_txt_path,\n distill_txt_path):\n \"\"\"Yield dicts for Text2TextProblem.generate_samples from lines of files.\"\"\"\n for inputs, targets, dist_targets in zip(\n txt_line_iterator(source_txt_path), txt_line_iterator(target_txt_path),\n txt_line_iterator(distill_txt_path)):\n yield {\"inputs\": inputs, \"targets\": targets, \"dist_targets\": dist_targets}\n\n\ndef text2self_txt_iterator(txt_path):\n for line in txt_line_iterator(txt_path):\n yield {\"targets\": line}\n\n\ndef text2class_txt_iterator(source_txt_path, label_txt_path, class_strs=None):\n \"\"\"Yield dicts for Text2ClassProblem.generate_samples from lines of files.\n\n Args:\n source_txt_path: txt file with record per line.\n label_txt_path: txt file with label per line, either as int or str. If\n string, must provide class_strs.\n class_strs: list<str> of class label names. Must be in correct order (i.e.\n [\"a\", \"b\", \"c\"] means that \"a\" will get class ID 0, \"b\" ID 1, etc.).\n\n Yields:\n {\"inputs\": inputs, \"label\": label}\n \"\"\"\n if class_strs:\n class_strs = dict([(s, i) for i, s in enumerate(class_strs)])\n for inputs, label in zip(\n txt_line_iterator(source_txt_path), txt_line_iterator(label_txt_path)):\n label = label.strip()\n if class_strs:\n label = class_strs[label]\n else:\n label = int(label)\n yield {\"inputs\": inputs, \"label\": label}\n\n\ndef text2text_txt_tab_iterator(txt_path):\n \"\"\"Yield dicts for Text2TextProblem.generate_samples from lines of txt_path.\n\n Args:\n txt_path: path to txt file with a record per line, source and target\n are tab-separated.\n\n Yields:\n {\"inputs\": inputs, \"targets\": targets}\n \"\"\"\n for line in txt_line_iterator(txt_path):\n if line and \"\\t\" in line:\n parts = line.split(\"\\t\", 1)\n inputs, targets = parts[:2]\n yield {\"inputs\": inputs.strip(), \"targets\": targets.strip()}\n\n\ndef text2text_generate_encoded(sample_generator,\n vocab,\n targets_vocab=None,\n has_inputs=True):\n \"\"\"Encode Text2Text samples from the generator with the vocab.\"\"\"\n targets_vocab = targets_vocab or vocab\n for sample in sample_generator:\n if has_inputs:\n sample[\"inputs\"] = vocab.encode(sample[\"inputs\"])\n sample[\"inputs\"].append(text_encoder.EOS_ID)\n sample[\"targets\"] = targets_vocab.encode(sample[\"targets\"])\n sample[\"targets\"].append(text_encoder.EOS_ID)\n yield sample\n\n\[email protected]_problem\nclass Text2textTmpdir(Text2TextProblem):\n \"\"\"Allows training a Text2TextProblem without defining a subclass.\n\n Put your training and evaluation data into the following files in tmp_dir,\n with 1 record per line:\n\n * inputs.train.txt\n * targets.train.txt\n * inputs.eval.txt\n * targets.eval.txt\n \"\"\"\n TRAIN_FILES = (\"inputs.train.txt\", \"targets.train.txt\")\n EVAL_FILES = (\"inputs.eval.txt\", \"targets.eval.txt\")\n\n def is_generate_per_split(self):\n return True\n\n def generate_samples(self, data_dir, tmp_dir, dataset_split):\n del data_dir\n is_training = dataset_split == problem.DatasetSplit.TRAIN\n files = self.TRAIN_FILES if is_training else self.EVAL_FILES\n files = [os.path.join(tmp_dir, f) for f in files]\n inputs_file, targets_file = files\n return text2text_txt_iterator(inputs_file, targets_file)\n\n\nclass ChoppedTextProblem(Text2SelfProblem):\n \"\"\"Tokenize and chop text files into fixed-length language-modeling examples.\n\n The input data is a set of text files, as specified by\n self.train_text_filepaths() and self.dev_text_filepaths().\n\n The text is tokenized using a SubwordTextEncoder, and\n then split into examples, each of length self.sequence_length().\n \"\"\"\n\n def train_text_filepaths(self, tmp_dir):\n \"\"\"Local filepaths of text files containing training data.\n\n This function may want to download the files if they do not exist.\n\n Args:\n tmp_dir: a string\n Returns:\n a list of strings.\n \"\"\"\n raise NotImplementedError()\n\n def dev_text_filepaths(self, tmp_dir):\n \"\"\"Local filepaths of text files containing dev data.\n\n This function may want to download the files if they do not exist.\n\n Args:\n tmp_dir: a string\n Returns:\n a list of strings.\n \"\"\"\n raise NotImplementedError()\n\n @property\n def sequence_length(self):\n \"\"\"Length of each example (in tokens).\"\"\"\n raise NotImplementedError()\n\n def max_length(self, model_hparams):\n return model_hparams.split_to_length or self.sequence_length\n\n def text_filepaths_for_task(self, tmp_dir, task_id):\n \"\"\"List of input filepaths for a particular training or dev shard.\n\n Args:\n tmp_dir: a string\n task_id: an integer less than self.num_shards\n Returns:\n a list of tuples (filepath, start_pos, num_bytes)\n \"\"\"\n assert task_id >= 0\n assert task_id < self.num_train_shards + self.num_dev_shards\n if task_id < self.num_train_shards:\n return [\n f for i, f in enumerate(self.train_text_filepaths(tmp_dir))\n if i % self.num_train_shards == task_id\n ]\n else:\n return [\n f for i, f in enumerate(self.dev_text_filepaths(tmp_dir))\n if i % self.num_dev_shards == task_id - self.num_train_shards\n ]\n\n def filepath_to_unicode_strings(self, filepath):\n \"\"\"Read text out of an input file.\n\n The default just reads the text, converts to unicode and yields one\n unicode string.\n\n Subclasses can override this function in order to preprocess, and can\n yield any number of strings.\n\n Args:\n filepath: a string\n Yields:\n unicode strings.\n \"\"\"\n f = tf.gfile.Open(filepath)\n b = f.read()\n yield text_encoder.to_unicode_ignore_errors(b)\n\n def file_generator(self,\n filepaths,\n max_chars_per_file=None,\n max_chars_total=None):\n \"\"\"Read complete text of input files and yield unicode strings.\n\n By default, one unicode string is produced per file, but this is\n not guaranteed, since subclasses can override\n filepath_to_unicode_strings().\n\n max_chars_per_file and max_chars_total can also be specified, in which\n case some strings may be truncated or dropped to limit the total\n amount of output.\n\n Args:\n filepaths: a list of strings\n max_chars_per_file: an optional integer\n max_chars_total: an optional integer\n Yields:\n unicode strings\n \"\"\"\n chars_total = 0\n for fname in filepaths:\n chars_this_file = 0\n tf.logging.info(\"reading file %s\" % fname)\n for text in self.filepath_to_unicode_strings(fname):\n if (max_chars_per_file and\n chars_this_file + len(text) > max_chars_per_file):\n text = text[:max_chars_per_file - chars_this_file]\n if max_chars_total and chars_total + len(text) > max_chars_total:\n text = text[:max_chars_total - chars_total]\n chars_total += len(text)\n chars_this_file += len(text)\n if text:\n yield text\n if max_chars_total and chars_total >= max_chars_total:\n return\n if max_chars_per_file and chars_this_file >= max_chars_per_file:\n break\n\n def example_generator(self, encoder, tmp_dir, task_id):\n \"\"\"Generator for examples.\n\n Args:\n encoder: a TextEncoder\n tmp_dir: a string\n task_id: an integer\n Yields:\n feature dictionaries\n \"\"\"\n filepaths = self.text_filepaths_for_task(tmp_dir, task_id)\n if task_id >= self.num_train_shards:\n # this is dev data - limit the total length.\n max_chars_per_file = self.max_dev_chars // (\n self.num_dev_shards * len(filepaths))\n else:\n max_chars_per_file = None\n tokens = []\n for ftext in self.file_generator(\n filepaths, max_chars_per_file=max_chars_per_file):\n tokens.extend(encoder.encode(ftext))\n pos = 0\n while pos + self.sequence_length <= len(tokens):\n yield {\"targets\": tokens[pos:pos + self.sequence_length]}\n pos += self.sequence_length\n if pos > 0:\n tokens = tokens[pos:]\n if self.remainder_policy == \"pad\":\n if tokens:\n targets = tokens + [0] * (self.sequence_length - len(tokens))\n yield {\"targets\": targets}\n else:\n assert self.remainder_policy == \"drop\"\n\n @property\n def remainder_policy(self):\n \"\"\"What to do with leftover tokens.\n\n Returns:\n a string - either \"pad\" or \"drop\".\n \"\"\"\n return \"pad\"\n\n def prepare_to_generate(self, data_dir, tmp_dir):\n \"\"\"Make sure that the data is prepared and the vocab is generated.\"\"\"\n self.get_or_create_vocab(data_dir, tmp_dir)\n self.train_text_filepaths(tmp_dir)\n self.dev_text_filepaths(tmp_dir)\n\n def generate_text_for_vocab(self, data_dir, tmp_dir):\n return self.file_generator(\n self.train_text_filepaths(tmp_dir),\n max_chars_total=self.max_chars_for_vocab)\n\n def generate_data(self, data_dir, tmp_dir, task_id=-1):\n \"\"\"Generates training/dev data.\n\n Args:\n data_dir: a string\n tmp_dir: a string\n task_id: an optional integer\n Returns:\n shard or shards for which data was generated.\n \"\"\"\n tf.logging.info(\"generate_data task_id=%s\" % task_id)\n encoder = self.get_or_create_vocab(data_dir, tmp_dir)\n assert task_id >= 0 and task_id < self.num_generate_tasks\n if task_id < self.num_train_shards:\n out_file = self.training_filepaths(\n data_dir, self.num_train_shards, shuffled=False)[task_id]\n else:\n out_file = self.dev_filepaths(\n data_dir, self.num_dev_shards,\n shuffled=False)[task_id - self.num_train_shards]\n generator_utils.generate_files(\n self.example_generator(encoder, tmp_dir, task_id), [out_file])\n generator_utils.shuffle_dataset([out_file])\n\n @property\n def max_chars_for_vocab(self):\n \"\"\"Number of characters of training data to use for generating vocab.\"\"\"\n return 10**7\n\n @property\n def num_train_shards(self):\n return self.dataset_splits[0][\"shards\"]\n\n @property\n def num_dev_shards(self):\n return self.dataset_splits[1][\"shards\"]\n\n @property\n def max_dev_chars(self):\n \"\"\"Limit dev set to at most this many characters (default 10M).\"\"\"\n return 10**7\n\n @property\n def multiprocess_generate(self):\n return True\n\n @property\n def num_generate_tasks(self):\n return self.num_train_shards + self.num_dev_shards\n\n def eval_metrics(self):\n return [metrics.Metrics.ACC, metrics.Metrics.NEG_LOG_PERPLEXITY]\n",
"# coding=utf-8\n# Copyright 2018 The Tensor2Tensor Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Algorithmic generators test.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\n# Dependency imports\n\nfrom six.moves import range # pylint: disable=redefined-builtin\n\nfrom tensor2tensor.data_generators import algorithmic\n\nimport tensorflow as tf\n\n\nclass AlgorithmicTest(tf.test.TestCase):\n\n def testIdentityGenerator(self):\n identity_problem = algorithmic.AlgorithmicIdentityBinary40()\n counter = 0\n for d in identity_problem.generator(3, 8, 10):\n counter += 1\n self.assertEqual(d[\"inputs\"], d[\"targets\"])\n self.assertEqual(counter, 10)\n\n def testReverseGenerator(self):\n reversing_problem = algorithmic.AlgorithmicReverseBinary40()\n counter = 0\n for d in reversing_problem.generator(3, 8, 10):\n counter += 1\n self.assertEqual(list(reversed(d[\"inputs\"])), d[\"targets\"])\n self.assertEqual(counter, 10)\n\n def testZipfDistribution(self):\n # Following Zipf's Law with alpha equals 1: the first in rank is two times\n # more probable/frequent that the second in rank, three times more prob/freq\n # that the third in rank and so on.\n d = algorithmic.zipf_distribution(10, 1.0001)\n for i in range(len(d[1:])-1):\n self.assertEqual(\"%.4f\" % (abs(d[i+1]-d[i+2])*(i+2)), \"%.4f\" % d[1])\n\n def testReverseGeneratorNlpLike(self):\n counter = 0\n for d in algorithmic.reverse_generator_nlplike(3, 8, 10):\n counter += 1\n self.assertEqual(list(reversed(d[\"inputs\"])), d[\"targets\"])\n self.assertEqual(counter, 10)\n\n def testLowerEndianToNumber(self):\n self.assertEqual(algorithmic.lower_endian_to_number([0], 2), 0)\n self.assertEqual(algorithmic.lower_endian_to_number([0], 7), 0)\n self.assertEqual(algorithmic.lower_endian_to_number([1], 2), 1)\n self.assertEqual(algorithmic.lower_endian_to_number([5], 8), 5)\n self.assertEqual(algorithmic.lower_endian_to_number([0, 1], 2), 2)\n self.assertEqual(algorithmic.lower_endian_to_number([0, 1, 1], 2), 6)\n self.assertEqual(algorithmic.lower_endian_to_number([7, 3, 1, 2], 10), 2137)\n\n def testNumberToLowerEndian(self):\n self.assertEqual(algorithmic.number_to_lower_endian(0, 2), [0])\n self.assertEqual(algorithmic.number_to_lower_endian(0, 7), [0])\n self.assertEqual(algorithmic.number_to_lower_endian(1, 2), [1])\n self.assertEqual(algorithmic.number_to_lower_endian(5, 8), [5])\n self.assertEqual(algorithmic.number_to_lower_endian(2, 2), [0, 1])\n self.assertEqual(algorithmic.number_to_lower_endian(6, 2), [0, 1, 1])\n self.assertEqual(algorithmic.number_to_lower_endian(2137, 10), [7, 3, 1, 2])\n\n def testAdditionGenerator(self):\n addition_problem = algorithmic.AlgorithmicAdditionBinary40()\n counter = 0\n for d in addition_problem.generator(4, 8, 10):\n counter += 1\n self.assertEqual(d[\"inputs\"].count(4), 1)\n self.assertEqual(d[\"inputs\"].count(5), 0)\n self.assertEqual(d[\"targets\"].count(4), 0)\n self.assertEqual(d[\"targets\"].count(5), 0)\n self.assertEqual(counter, 10)\n\n def testMultiplicationGenerator(self):\n multiplication_problem = algorithmic.AlgorithmicMultiplicationBinary40()\n counter = 0\n for d in multiplication_problem.generator(4, 8, 10):\n counter += 1\n self.assertEqual(d[\"inputs\"].count(4), 1)\n self.assertEqual(d[\"inputs\"].count(5), 0)\n self.assertEqual(d[\"targets\"].count(4), 0)\n self.assertEqual(d[\"targets\"].count(5), 0)\n self.assertEqual(counter, 10)\n\n\nif __name__ == \"__main__\":\n tf.test.main()\n",
"# coding=utf-8\n# Copyright 2018 The Tensor2Tensor Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Tests for Neural GPU.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\n# Dependency imports\n\nimport numpy as np\n\nfrom tensor2tensor.data_generators import problem_hparams\nfrom tensor2tensor.layers import common_hparams\nfrom tensor2tensor.models import neural_gpu\n\nimport tensorflow as tf\n\n\nclass NeuralGPUTest(tf.test.TestCase):\n\n def testNeuralGPU(self):\n hparams = common_hparams.basic_params1()\n batch_size = 3\n input_length = 5\n target_length = input_length\n input_vocab_size = 9\n target_vocab_size = 11\n p_hparams = problem_hparams.test_problem_hparams(input_vocab_size,\n target_vocab_size)\n inputs = -1 + np.random.random_integers(\n input_vocab_size, size=(batch_size, input_length, 1, 1))\n targets = -1 + np.random.random_integers(\n target_vocab_size, size=(batch_size, target_length, 1, 1))\n with self.test_session() as session:\n features = {\n \"inputs\": tf.constant(inputs, dtype=tf.int32),\n \"targets\": tf.constant(targets, dtype=tf.int32)\n }\n model = neural_gpu.NeuralGPU(hparams, tf.estimator.ModeKeys.TRAIN,\n p_hparams)\n logits, _ = model(features)\n session.run(tf.global_variables_initializer())\n res = session.run(logits)\n self.assertEqual(res.shape, (batch_size, target_length, 1, 1,\n target_vocab_size))\n\n\nif __name__ == \"__main__\":\n tf.test.main()\n",
"# coding=utf-8\n# Copyright 2018 The Tensor2Tensor Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"CelebA.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport os\nimport zipfile\n\n# Dependency imports\n\nfrom tensor2tensor.data_generators import generator_utils\nfrom tensor2tensor.data_generators import image_utils\nfrom tensor2tensor.utils import registry\n\nimport tensorflow as tf\n\n\[email protected]_problem\nclass ImageCeleba(image_utils.ImageProblem):\n \"\"\"CelebA dataset, aligned and cropped images.\"\"\"\n IMG_DATA = (\"img_align_celeba.zip\",\n \"https://drive.google.com/uc?export=download&\"\n \"id=0B7EVK8r0v71pZjFTYXZWM3FlRnM\")\n LANDMARKS_DATA = (\"celeba_landmarks_align\",\n \"https://drive.google.com/uc?export=download&\"\n \"id=0B7EVK8r0v71pd0FJY3Blby1HUTQ\")\n ATTR_DATA = (\"celeba_attr\", \"https://drive.google.com/uc?export=download&\"\n \"id=0B7EVK8r0v71pblRyaVFSWGxPY0U\")\n\n LANDMARK_HEADINGS = (\"lefteye_x lefteye_y righteye_x righteye_y \"\n \"nose_x nose_y leftmouth_x leftmouth_y rightmouth_x \"\n \"rightmouth_y\").split()\n ATTR_HEADINGS = (\n \"5_o_Clock_Shadow Arched_Eyebrows Attractive Bags_Under_Eyes Bald Bangs \"\n \"Big_Lips Big_Nose Black_Hair Blond_Hair Blurry Brown_Hair \"\n \"Bushy_Eyebrows Chubby Double_Chin Eyeglasses Goatee Gray_Hair \"\n \"Heavy_Makeup High_Cheekbones Male Mouth_Slightly_Open Mustache \"\n \"Narrow_Eyes No_Beard Oval_Face Pale_Skin Pointy_Nose Receding_Hairline \"\n \"Rosy_Cheeks Sideburns Smiling Straight_Hair Wavy_Hair Wearing_Earrings \"\n \"Wearing_Hat Wearing_Lipstick Wearing_Necklace Wearing_Necktie Young\"\n ).split()\n\n def hparams(self, defaults, unused_model_hparams):\n p = defaults\n p.input_modality = {\"inputs\": (\"image:identity\", 256)}\n p.target_modality = (\"image:identity\", 256)\n p.batch_size_multiplier = 256\n p.input_space_id = 1\n p.target_space_id = 1\n\n def generator(self, tmp_dir, how_many, start_from=0):\n \"\"\"Image generator for CELEBA dataset.\n\n Args:\n tmp_dir: path to temporary storage directory.\n how_many: how many images and labels to generate.\n start_from: from which image to start.\n\n Yields:\n A dictionary representing the images with the following fields:\n * image/encoded: the string encoding the image as JPEG,\n * image/format: the string \"jpeg\" representing image format,\n \"\"\"\n out_paths = []\n for fname, url in [self.IMG_DATA, self.LANDMARKS_DATA, self.ATTR_DATA]:\n path = generator_utils.maybe_download_from_drive(tmp_dir, fname, url)\n out_paths.append(path)\n\n img_path, landmarks_path, attr_path = out_paths # pylint: disable=unbalanced-tuple-unpacking\n unzipped_folder = img_path[:-4]\n if not tf.gfile.Exists(unzipped_folder):\n zipfile.ZipFile(img_path, \"r\").extractall(tmp_dir)\n\n with tf.gfile.Open(landmarks_path) as f:\n landmarks_raw = f.read()\n\n with tf.gfile.Open(attr_path) as f:\n attr_raw = f.read()\n\n def process_landmarks(raw_data):\n landmarks = {}\n lines = raw_data.split(\"\\n\")\n headings = lines[1].strip().split()\n for line in lines[2:-1]:\n values = line.strip().split()\n img_name = values[0]\n landmark_values = [int(v) for v in values[1:]]\n landmarks[img_name] = landmark_values\n return landmarks, headings\n\n def process_attrs(raw_data):\n attrs = {}\n lines = raw_data.split(\"\\n\")\n headings = lines[1].strip().split()\n for line in lines[2:-1]:\n values = line.strip().split()\n img_name = values[0]\n attr_values = [int(v) for v in values[1:]]\n attrs[img_name] = attr_values\n return attrs, headings\n\n img_landmarks, _ = process_landmarks(landmarks_raw)\n img_attrs, _ = process_attrs(attr_raw)\n\n image_files = tf.gfile.Glob(unzipped_folder + \"/*.jpg\")\n for filename in image_files[start_from:start_from + how_many]:\n img_name = os.path.basename(filename)\n landmarks = img_landmarks[img_name]\n attrs = img_attrs[img_name]\n\n with tf.gfile.Open(filename, \"r\") as f:\n encoded_image_data = f.read()\n yield {\n \"image/encoded\": [encoded_image_data],\n \"image/format\": [\"jpeg\"],\n \"attributes\": attrs,\n \"landmarks\": landmarks,\n }\n\n @property\n def train_shards(self):\n return 100\n\n @property\n def dev_shards(self):\n return 10\n\n def generate_data(self, data_dir, tmp_dir, task_id=-1):\n generator_utils.generate_dataset_and_shuffle(\n self.generator(tmp_dir, 162770), # train\n self.training_filepaths(data_dir, self.train_shards, shuffled=False),\n self.generator(tmp_dir, 19867, 162770), # dev\n self.dev_filepaths(data_dir, self.dev_shards, shuffled=False))\n\n\[email protected]_problem\nclass ImageCelebaMultiResolution(ImageCeleba):\n \"\"\"CelebA at multiple resolutions.\n\n The resolutions are specified as a hyperparameter during preprocessing.\n \"\"\"\n\n def dataset_filename(self):\n return \"image_celeba\"\n\n def preprocess_example(self, example, mode, hparams):\n image = example[\"inputs\"]\n if hasattr(hparams, \"resize_method\"):\n method = getattr(tf.image.ResizeMethod, hparams.resize_method)\n else: # default\n method = tf.image.ResizeMethod.BICUBIC\n\n # Remove boundaries in CelebA images. Remove 40 pixels each side\n # vertically and 20 pixels each side horizontally.\n image = tf.image.crop_to_bounding_box(image, 40, 20, 218 - 80, 178 - 40)\n\n scaled_images = image_utils.make_multiscale(\n image, hparams.resolutions,\n resize_method=method, num_channels=self.num_channels)\n\n # Pack tuple of scaled images into one tensor. We do this by enforcing the\n # columns to match for every resolution.\n highest_res = hparams.resolutions[-1]\n example[\"inputs\"] = image\n example[\"targets\"] = tf.concat([\n tf.reshape(scaled_image,\n [res**2 // highest_res, highest_res, self.num_channels])\n for scaled_image, res in zip(scaled_images, hparams.resolutions)],\n axis=0)\n return example\n\n\[email protected]_problem\nclass Img2imgCeleba(ImageCeleba):\n \"\"\"8px to 32px problem.\"\"\"\n\n def dataset_filename(self):\n return \"image_celeba\"\n\n def preprocess_example(self, example, unused_mode, unused_hparams):\n image = example[\"inputs\"]\n # Remove boundaries in CelebA images. Remove 40 pixels each side\n # vertically and 20 pixels each side horizontally.\n image = tf.image.crop_to_bounding_box(image, 40, 20, 218 - 80, 178 - 40)\n image_8 = image_utils.resize_by_area(image, 8)\n image_32 = image_utils.resize_by_area(image, 32)\n\n example[\"inputs\"] = image_8\n example[\"targets\"] = image_32\n return example\n\n\[email protected]_problem\nclass Img2imgCeleba64(Img2imgCeleba):\n \"\"\"8px to 64px problem.\"\"\"\n\n def preprocess_example(self, example, unused_mode, unused_hparams):\n image = example[\"inputs\"]\n # Remove boundaries in CelebA images. Remove 40 pixels each side\n # vertically and 20 pixels each side horizontally.\n image = tf.image.crop_to_bounding_box(image, 40, 20, 218 - 80, 178 - 40)\n image_8 = image_utils.resize_by_area(image, 8)\n image_64 = image_utils.resize_by_area(image, 64)\n\n example[\"inputs\"] = image_8\n example[\"targets\"] = image_64\n return example\n",
"# coding=utf-8\n# Copyright 2018 The Tensor2Tensor Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Translate generators test.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport os\nimport shutil\nimport tarfile\n\n# Dependency imports\n\nfrom tensor2tensor.data_generators import text_problems\nfrom tensor2tensor.data_generators import translate\n\nimport tensorflow as tf\n\n\nclass TranslateTest(tf.test.TestCase):\n DATASETS = [\n [\"data1.tgz\", (\"train1.en\", \"train1.de\")],\n [\"data2.tgz\", (\"train2.en\", \"train2.de\")],\n [\"data3.tgz\", (\"train3.en\", \"train3.de\")],\n ]\n\n @classmethod\n def setUpClass(cls):\n tmp_dir = tf.test.get_temp_dir()\n compressed_dir = os.path.join(tmp_dir, \"compressed\")\n shutil.rmtree(tmp_dir)\n tf.gfile.MakeDirs(compressed_dir)\n\n en_data = [str(i) for i in range(10, 40)]\n de_data = [str(i) for i in range(100, 130)]\n data = list(zip(en_data, de_data))\n\n for i, dataset in enumerate(cls.DATASETS):\n tar_file = dataset[0]\n en_file, de_file = [\n os.path.join(compressed_dir, name) for name in dataset[1]\n ]\n with tf.gfile.Open(en_file, \"w\") as en_f:\n with tf.gfile.Open(de_file, \"w\") as de_f:\n start = i * 10\n end = start + 10\n for en_line, de_line in data[start:end]:\n en_f.write(en_line)\n en_f.write(\"\\n\")\n de_f.write(de_line)\n de_f.write(\"\\n\")\n\n with tarfile.open(os.path.join(tmp_dir, tar_file), \"w:gz\") as tar_f:\n tar_f.add(en_file, os.path.basename(en_file))\n tar_f.add(de_file, os.path.basename(de_file))\n\n cls.tmp_dir = tmp_dir\n cls.data = data\n\n def testCompileData(self):\n filename = \"out\"\n filepath = os.path.join(self.tmp_dir, filename)\n translate.compile_data(self.tmp_dir, self.DATASETS, filename)\n\n count = 0\n for i, example in enumerate(\n text_problems.text2text_txt_iterator(filepath + \".lang1\",\n filepath + \".lang2\")):\n expected = self.data[i]\n self.assertEqual(list(expected), [example[\"inputs\"], example[\"targets\"]])\n count += 1\n self.assertEqual(count, len(self.data))\n\n\nif __name__ == \"__main__\":\n tf.test.main()\n"
] | [
[
"tensorflow.concat",
"tensorflow.nn.log_softmax",
"tensorflow.control_dependencies",
"tensorflow.zeros",
"tensorflow.reduce_sum",
"tensorflow.stack",
"tensorflow.to_int32",
"tensorflow.python.training.moving_averages.assign_moving_average",
"tensorflow.layers.dense",
"tensorflow.stop_gradient",
"tensorflow.nn.top_k",
"tensorflow.to_float",
"tensorflow.square",
"tensorflow.argmax",
"tensorflow.tile",
"tensorflow.matmul",
"tensorflow.less",
"tensorflow.exp",
"tensorflow.multinomial",
"tensorflow.one_hot",
"tensorflow.nn.relu",
"tensorflow.reduce_max",
"tensorflow.nn.softmax",
"tensorflow.transpose",
"tensorflow.constant",
"tensorflow.reduce_mean",
"tensorflow.maximum",
"tensorflow.reshape",
"tensorflow.assign",
"tensorflow.expand_dims",
"tensorflow.reduce_min",
"tensorflow.log",
"tensorflow.variable_scope",
"tensorflow.random_uniform",
"tensorflow.random_normal"
],
[
"tensorflow.logging.info",
"tensorflow.FixedLenFeature",
"tensorflow.gfile.Open",
"tensorflow.VarLenFeature"
],
[
"tensorflow.test.main"
],
[
"tensorflow.constant",
"tensorflow.global_variables_initializer",
"numpy.random.random_integers",
"tensorflow.test.main"
],
[
"tensorflow.image.crop_to_bounding_box",
"tensorflow.gfile.Open",
"tensorflow.gfile.Exists",
"tensorflow.reshape",
"tensorflow.gfile.Glob"
],
[
"tensorflow.gfile.MakeDirs",
"tensorflow.test.main",
"tensorflow.test.get_temp_dir",
"tensorflow.gfile.Open"
]
] |
amaarquadri/perfect-information-game | [
"6755f9633935be762d039ece9c0b646c64de6ab8"
] | [
"perfect_information_game/tablebases/symmetry_transform.py"
] | [
"import numpy as np\nfrom perfect_information_game.games import Chess\nfrom perfect_information_game.utils import iter_product\nfrom perfect_information_game.tablebases import get_verified_chess_subclass\n\n\nclass SymmetryTransform:\n # noinspection PyChainedComparisons\n PAWNLESS_UNIQUE_SQUARE_INDICES = [(i, j) for i, j in iter_product(Chess.BOARD_SHAPE)\n if i < 4 and j < 4 and i <= j]\n UNIQUE_SQUARE_INDICES = [(i, j) for i, j in iter_product(Chess.BOARD_SHAPE) if j < 4]\n\n def __init__(self, GameClass, state):\n self.GameClass = get_verified_chess_subclass(GameClass)\n self.flip_colors = self.flip_i = self.flip_j = self.flip_diagonal = False\n\n if self.should_swap_colours(state):\n # black is attacking, so switch white and black\n self.flip_colors = True\n i, j = self.GameClass.get_king_pos(state, self.GameClass.BLACK_SLICE)\n i = self.GameClass.ROWS - 1 - i\n else:\n i, j = self.GameClass.get_king_pos(state, self.GameClass.WHITE_SLICE)\n\n pawnless = np.all(state[:, :, self.GameClass.WHITE_PAWN] == 0) and \\\n np.all(state[:, :, self.GameClass.BLACK_PAWN] == 0)\n\n if pawnless and not (i < 4):\n self.flip_i = True\n i = self.GameClass.ROWS - 1 - i\n if not (j < 4): # horizontal flipping can be done, even with pawns\n self.flip_j = True\n j = self.GameClass.COLUMNS - 1 - j\n if pawnless and not (i <= j):\n self.flip_diagonal = True\n\n def should_swap_colours(self, state):\n heuristic = self.GameClass.heuristic(state)\n if heuristic > 0:\n # white is up in material, so don't swap colours\n return False\n if heuristic < 0:\n # black is up in material, so swap colours\n return True\n # compare the number of pawns on each rank, from most advanced to least advanced pawns\n # no need to check second rank pawns, because if everything else is equal they must be equal too\n for rank in range(7, 2, -1):\n if np.sum(state[rank - 1, :, self.GameClass.BLACK_PAWN]) > \\\n np.sum(state[8 - rank, :, self.GameClass.WHITE_PAWN]):\n # black has more pawns than white on this rank, so swap colours\n return True\n return False\n\n @staticmethod\n def identity(GameClass):\n identity = SymmetryTransform(GameClass, GameClass.STARTING_STATE)\n identity.flip_colors = identity.flip_i = identity.flip_j = identity.flip_diagonal = False\n return identity\n\n @staticmethod\n def random(GameClass, descriptor):\n \"\"\"\n Returns a random symmetry transform for the given descriptor.\n \"\"\"\n random = SymmetryTransform.identity(GameClass)\n pawnless = 'p' not in descriptor and 'P' not in descriptor\n\n random.flip_colors = np.random.random() < 0.5\n random.flip_j = np.random.random() < 0.5\n if pawnless:\n random.flip_i = np.random.random() < 0.5\n random.flip_diagonal = np.random.random() < 0.5\n return random\n\n def is_identity(self):\n return not self.flip_colors and not self.flip_i and not self.flip_j and not self.flip_diagonal\n\n def transform_state(self, state):\n if self.flip_colors:\n state = self.flip_state_colors(self.GameClass, state)\n if self.flip_i:\n state = self.flip_state_i(state)\n if self.flip_j:\n state = self.flip_state_j(state)\n if self.flip_diagonal:\n state = self.flip_state_diagonal(state)\n return state\n\n def untransform_state(self, state):\n # since all transform_funcs are their own inverses, we can just run through them in reverse\n if self.flip_diagonal:\n state = self.flip_state_diagonal(state)\n if self.flip_j:\n state = self.flip_state_j(state)\n if self.flip_i:\n state = self.flip_state_i(state)\n if self.flip_colors:\n state = self.flip_state_colors(self.GameClass, state)\n return state\n\n def transform_outcome(self, outcome):\n return -outcome if self.flip_colors else outcome\n\n @staticmethod\n def flip_state_colors(GameClass, state):\n special_layers = np.copy(state[..., -2:])\n special_layers[..., -1] = 1 - special_layers[..., -1] # flip whose turn it is\n new_state = np.concatenate((state[..., GameClass.BLACK_SLICE], state[..., GameClass.WHITE_SLICE],\n special_layers),\n axis=-1)\n # need to flip board vertically after flipping colours\n # this ensures that the pawns move in the correct directions\n return SymmetryTransform.flip_state_i(new_state)\n\n @staticmethod\n def flip_state_i(state):\n return np.flip(state, axis=0)\n\n @staticmethod\n def flip_state_j(state):\n return np.flip(state, axis=1)\n\n @staticmethod\n def flip_state_diagonal(state):\n return np.rot90(np.flip(state, axis=1), axes=(0, 1))\n"
] | [
[
"numpy.random.random",
"numpy.concatenate",
"numpy.all",
"numpy.copy",
"numpy.flip",
"numpy.sum"
]
] |
tremblerz/enas | [
"329ee3f8beb5e715bf2dad1182cfb5120b3485f9"
] | [
"src/ptb/ptb_enas_controller.py"
] | [
"\n\n\n\nimport sys\nimport os\nimport time\n\nimport numpy as np\nimport tensorflow as tf\n\nfrom src.utils import get_train_ops\nfrom src.common_ops import stack_lstm\n\nfrom tensorflow.python.training import moving_averages\n\nclass PTBEnasController(object):\n def __init__(self,\n rhn_depth=5,\n lstm_size=32,\n lstm_num_layers=2,\n lstm_keep_prob=1.0,\n tanh_constant=None,\n temperature=None,\n num_funcs=2,\n lr_init=1e-3,\n lr_dec_start=0,\n lr_dec_every=100,\n lr_dec_rate=0.9,\n l2_reg=0,\n entropy_weight=None,\n clip_mode=None,\n grad_bound=None,\n bl_dec=0.999,\n optim_algo=\"adam\",\n sync_replicas=False,\n num_aggregate=None,\n num_replicas=None,\n name=\"controller\"):\n\n print(\"-\" * 80)\n print(\"Building PTBEnasController\")\n\n self.rhn_depth = rhn_depth\n self.lstm_size = lstm_size\n self.lstm_num_layers = lstm_num_layers \n self.lstm_keep_prob = lstm_keep_prob\n self.tanh_constant = tanh_constant\n self.temperature = temperature\n self.num_funcs = num_funcs\n self.lr_init = lr_init\n self.lr_dec_start = lr_dec_start\n self.lr_dec_every = lr_dec_every\n self.lr_dec_rate = lr_dec_rate\n self.l2_reg = l2_reg\n self.entropy_weight = entropy_weight\n self.clip_mode = clip_mode\n self.grad_bound = grad_bound\n self.bl_dec = bl_dec\n self.optim_algo = optim_algo\n self.sync_replicas = sync_replicas\n self.num_aggregate = num_aggregate\n self.num_replicas = num_replicas\n self.name = name\n\n self._create_params()\n self._build_sampler()\n\n def _create_params(self):\n initializer = tf.random_uniform_initializer(minval=-0.1, maxval=0.1)\n with tf.variable_scope(self.name, initializer=initializer):\n with tf.variable_scope(\"lstm\"):\n self.w_lstm = []\n for layer_id in range(self.lstm_num_layers):\n with tf.variable_scope(\"layer_{}\".format(layer_id)):\n w = tf.get_variable(\"w\", [2 * self.lstm_size, 4 * self.lstm_size])\n self.w_lstm.append(w)\n\n num_funcs = self.num_funcs\n with tf.variable_scope(\"embedding\"):\n self.g_emb = tf.get_variable(\"g_emb\", [1, self.lstm_size])\n self.w_emb = tf.get_variable(\"w\", [num_funcs, self.lstm_size])\n\n with tf.variable_scope(\"softmax\"):\n self.w_soft = tf.get_variable(\"w\", [self.lstm_size, num_funcs])\n\n with tf.variable_scope(\"attention\"):\n self.attn_w_1 = tf.get_variable(\"w_1\", [self.lstm_size, self.lstm_size])\n self.attn_w_2 = tf.get_variable(\"w_2\", [self.lstm_size, self.lstm_size])\n self.attn_v = tf.get_variable(\"v\", [self.lstm_size, 1])\n\n def _build_sampler(self):\n \"\"\"Build the sampler ops and the log_prob ops.\"\"\"\n\n arc_seq = []\n sample_log_probs = []\n sample_entropy = []\n all_h = []\n all_h_w = []\n\n # sampler ops\n inputs = self.g_emb\n prev_c, prev_h = [], []\n for _ in range(self.lstm_num_layers):\n prev_c.append(tf.zeros([1, self.lstm_size], dtype=tf.float32))\n prev_h.append(tf.zeros([1, self.lstm_size], dtype=tf.float32))\n\n # used = tf.zeros([self.rhn_depth, 2], dtype=tf.int32)\n for layer_id in range(self.rhn_depth):\n next_c, next_h = stack_lstm(inputs, prev_c, prev_h, self.w_lstm)\n prev_c, prev_h = next_c, next_h\n all_h.append(next_h[-1])\n all_h_w.append(tf.matmul(next_h[-1], self.attn_w_1))\n\n if layer_id > 0:\n query = tf.matmul(next_h[-1], self.attn_w_2)\n query = query + tf.concat(all_h_w[:-1], axis=0)\n query = tf.tanh(query)\n logits = tf.matmul(query, self.attn_v)\n logits = tf.reshape(logits, [1, layer_id])\n\n if self.temperature is not None:\n logits /= self.temperature\n if self.tanh_constant is not None:\n logits = self.tanh_constant * tf.tanh(logits)\n diff = tf.to_float(layer_id - tf.range(0, layer_id)) ** 2\n logits -= tf.reshape(diff, [1, layer_id]) / 6.0\n\n skip_index = tf.multinomial(logits, 1)\n skip_index = tf.to_int32(skip_index)\n skip_index = tf.reshape(skip_index, [1])\n arc_seq.append(skip_index)\n\n log_prob = tf.nn.sparse_softmax_cross_entropy_with_logits(\n logits=logits, labels=skip_index)\n sample_log_probs.append(log_prob)\n\n entropy = log_prob * tf.exp(-log_prob)\n sample_entropy.append(tf.stop_gradient(entropy))\n\n inputs = tf.nn.embedding_lookup(\n tf.concat(all_h[:-1], axis=0), skip_index)\n inputs /= (0.1 + tf.to_float(layer_id - skip_index))\n else:\n inputs = self.g_emb\n\n next_c, next_h = stack_lstm(inputs, prev_c, prev_h, self.w_lstm)\n prev_c, prev_h = next_c, next_h\n logits = tf.matmul(next_h[-1], self.w_soft)\n if self.temperature is not None:\n logits /= self.temperature\n if self.tanh_constant is not None:\n logits = self.tanh_constant * tf.tanh(logits)\n func = tf.multinomial(logits, 1)\n func = tf.to_int32(func)\n func = tf.reshape(func, [1])\n arc_seq.append(func)\n log_prob = tf.nn.sparse_softmax_cross_entropy_with_logits(\n logits=logits, labels=func)\n sample_log_probs.append(log_prob)\n entropy = log_prob * tf.exp(-log_prob)\n sample_entropy.append(tf.stop_gradient(entropy))\n inputs = tf.nn.embedding_lookup(self.w_emb, func)\n\n arc_seq = tf.concat(arc_seq, axis=0)\n self.sample_arc = arc_seq\n\n self.sample_log_probs = tf.concat(sample_log_probs, axis=0)\n self.ppl = tf.exp(tf.reduce_mean(self.sample_log_probs))\n\n sample_entropy = tf.concat(sample_entropy, axis=0)\n self.sample_entropy = tf.reduce_sum(sample_entropy)\n\n self.all_h = all_h\n\n def build_trainer(self, child_model):\n # actor\n self.valid_loss = tf.to_float(child_model.rl_loss)\n self.valid_loss = tf.stop_gradient(self.valid_loss)\n self.valid_ppl = tf.exp(self.valid_loss)\n self.reward = 80.0 / self.valid_ppl\n\n if self.entropy_weight is not None:\n self.reward += self.entropy_weight * self.sample_entropy\n\n # or baseline\n self.sample_log_probs = tf.reduce_sum(self.sample_log_probs)\n self.baseline = tf.Variable(0.0, dtype=tf.float32, trainable=False)\n baseline_update = tf.assign_sub(\n self.baseline, (1 - self.bl_dec) * (self.baseline - self.reward))\n\n with tf.control_dependencies([baseline_update]):\n self.reward = tf.identity(self.reward)\n self.loss = self.sample_log_probs * (self.reward - self.baseline)\n\n self.train_step = tf.Variable(\n 0, dtype=tf.int32, trainable=False, name=\"train_step\")\n tf_variables = [var\n for var in tf.trainable_variables() if var.name.startswith(self.name)]\n\n self.train_op, self.lr, self.grad_norm, self.optimizer = get_train_ops(\n self.loss,\n tf_variables,\n self.train_step,\n clip_mode=self.clip_mode,\n grad_bound=self.grad_bound,\n l2_reg=self.l2_reg,\n lr_init=self.lr_init,\n lr_dec_start=self.lr_dec_start,\n lr_dec_every=self.lr_dec_every,\n lr_dec_rate=self.lr_dec_rate,\n optim_algo=self.optim_algo,\n sync_replicas=self.sync_replicas,\n num_aggregate=self.num_aggregate,\n num_replicas=self.num_replicas)\n\n"
] | [
[
"tensorflow.get_variable",
"tensorflow.concat",
"tensorflow.control_dependencies",
"tensorflow.zeros",
"tensorflow.reduce_sum",
"tensorflow.tanh",
"tensorflow.to_int32",
"tensorflow.Variable",
"tensorflow.random_uniform_initializer",
"tensorflow.stop_gradient",
"tensorflow.to_float",
"tensorflow.trainable_variables",
"tensorflow.matmul",
"tensorflow.identity",
"tensorflow.exp",
"tensorflow.nn.sparse_softmax_cross_entropy_with_logits",
"tensorflow.nn.embedding_lookup",
"tensorflow.range",
"tensorflow.reduce_mean",
"tensorflow.reshape",
"tensorflow.assign_sub",
"tensorflow.variable_scope",
"tensorflow.multinomial"
]
] |
HuguesMoreau/Sensors_similariy | [
"4b8592049c83b03a11f5c57fab247290ee29b8f5",
"4b8592049c83b03a11f5c57fab247290ee29b8f5"
] | [
"models/SHL_2018/transforms.py",
"models/store_model_SHL.py"
] | [
"#!/usr/bin/env python\r\n# -*- coding: utf-8 -*-\r\n\"\"\"\r\nThis file contains diverse preprocessing functions (mostly norms ans spectrograms),\r\nand basic tests and visualizations.\r\nIf you are to work with any IPython console (ex: with Jupyter or spyder), is is advised\r\nto launch a '%matplotlib qt' ,to get clean widow\r\n\"\"\"\r\n\r\n\r\nif __name__ == '__main__': # this is used to launch the file from anywhere\r\n import sys\r\n sys.path.append(\"../..\")\r\n\r\nimport numpy as np\r\nimport torch\r\nimport scipy.signal, scipy.interpolate, scipy.ndimage\r\n\r\n\r\nfrom param import classes_names, fs, duration_window, duration_overlap, spectro_batch_size\r\nfrom models.SHL_2018 import Datasets\r\n\r\nif __name__ == \"__main__\":\r\n import matplotlib.pyplot as plt\r\n n_classes = len(classes_names)\r\n # We will need this for the tests\r\n DS = Datasets.SignalsDataSet(mode='train', transform=None)\r\n\r\n\r\n#%% transform functions\r\n\r\n\"\"\"In all following functions, the input parameter (data) is, by default,\r\n a dict of numpy arrays, containing signal names (eg. \"Gyr_z\") as keys, and 1-dimensional\r\n arrays as values\r\n\r\nMost of this part contains basic visualizations to make sure the preprocessing is correct\"\"\"\r\n\r\n\r\n\r\n\r\nclass TemporalTransform():\r\n \"\"\" create the base transform to use to each element of the data\r\n\r\n Parameters\r\n ----------\r\n signal_name: a string (ex: 'Gyr_y', 'Ori_x')\r\n If the string ends by \"_norm\" (ex: \"Mag_norm\"), the output will\r\n be the norm of the three (or four) axis of the signal.\r\n\r\n Returns\r\n -------\r\n a function with input: a dict of (_, 6000) arrays (key example: 'Gyr_y')\r\n and output: an array with the same shape.\r\n \"\"\"\r\n def __init__(self, signal_name):\r\n super(TemporalTransform, self).__init__()\r\n self.signal_name = signal_name\r\n\r\n def __call__(self, data):\r\n \"\"\"\r\n Parameters\r\n ----------\r\n data: a dict of (B, 6000) arrays (key example: 'Gyr_y')\r\n\r\n Returns\r\n -------\r\n an array with shape (B, 6000), where B depends on the input shape.\r\n \"\"\"\r\n if self.signal_name[-2:] in ['_x', '_y', '_z', '_w'] or self.signal_name == \"Pressure\":\r\n processed_signal = data[self.signal_name]\r\n elif self.signal_name[-5:] == '_norm':\r\n suffix_location = self.signal_name.index(\"_\") # 4 if signal_name == \"LAcc\", 3 otherwise\r\n sensor = self.signal_name[:suffix_location] # ex: 'Acc', 'LAcc'\r\n if sensor == \"Ori\":\r\n # in that case, data[sensor+\"_x\"]**2 + data[sensor+\"_y\"]**2 + data[sensor+\"_z\"]**2 should be 1.0\r\n processed_signal = np.sqrt(data[sensor+\"_x\"]**2 + data[sensor+\"_y\"]**2 + data[sensor+\"_z\"]**2 \\\r\n + data[sensor+\"_w\"]**2)\r\n else :\r\n processed_signal = np.sqrt(data[sensor+\"_x\"]**2 + data[sensor+\"_y\"]**2 + data[sensor+\"_z\"]**2)\r\n else :\r\n raise ValueError(\"unknown signal name: '{}'. Signal names should end with either '_x', '_y', '_z', '_w', or '_norm'\".format(signal_name))\r\n return processed_signal\r\n\r\n\r\n\r\n def __str__(self):\r\n \"\"\"purely for visual purposes, so that we can print() the function\"\"\"\r\n str_to_return = \"Temporal_transform\"\r\n str_to_return += f\"\\n\\t Signal: {self.signal_name}\"\r\n return str_to_return\r\n\r\n\r\n\r\n\r\nif __name__ == \"__main__\":\r\n\r\n # plot one figure per sensor\r\n # on each figure, one subplot per class,\r\n # to find one instance per each class, we start looking at index = index0\r\n index0 = 0\r\n\r\n for tested_signal_name in [\"Acc_norm\", \"Ori_norm\", \"Mag_norm\", \"LAcc_x\"]:\r\n # plot 1 segment from each class.\r\n plt.figure()\r\n\r\n if tested_signal_name != 'Pressure':\r\n suffix_location = tested_signal_name.index(\"_\")\r\n tested_sensor = tested_signal_name[:suffix_location] # ex: 'Acc', 'LAcc'\r\n else:\r\n tested_sensor = 'Pressure'\r\n\r\n sensor_axis = [tested_sensor + axis for axis in [\"_x\", \"_y\", \"_z\"]] if tested_sensor != 'Pressure' else ['Pressure']\r\n if tested_sensor == \"Ori\" : sensor_axis.append(tested_sensor+\"_w\")\r\n temporal_transform = TemporalTransform(tested_signal_name)\r\n remaining_classes = classes_names.copy()\r\n index = index0\r\n\r\n while len(remaining_classes)>0:\r\n data_tensor, class_tensor = DS[index] # data is a dict of 2D tensors (1,nb)\r\n data_cpu = {signal:data_tensor[signal].to(torch.device('cpu')).detach().numpy() for signal in data_tensor.keys()}\r\n class_index = int(class_tensor)\r\n class_name = classes_names[class_index-1]\r\n\r\n if class_name in remaining_classes:\r\n\r\n remaining_classes.remove(class_name)\r\n plt.subplot(2, 4, n_classes - len(remaining_classes))\r\n for k,signal in enumerate(sensor_axis):\r\n\r\n if k==0: # compute the temporal axis once\r\n nb = data_cpu[signal].shape[1]\r\n x_t = np.linspace(0, nb/fs, nb)\r\n\r\n plt.plot(x_t, data_cpu[signal][0,:])\r\n selected_signal = temporal_transform(data_cpu)\r\n error_message_dtype = \"One of the signals does not have the correct type: {}, {} \\n dtype should be float32, is actually {}\".format(tested_signal_name, str(temporal_transform), selected_signal.dtype)\r\n assert (selected_signal.dtype == 'float32'), error_message_dtype\r\n\r\n plt.plot(x_t, selected_signal[0,:], '--')\r\n plt.xlabel(\"t (s)\")\r\n legend = sensor_axis + [tested_signal_name+' (selected)']\r\n plt.legend(legend)\r\n plt.title(\"{} ({}, index={})\".format(tested_sensor, classes_names[class_index-1], index))\r\n index +=1\r\n plt.show()\r\n\r\n\r\n\r\n\r\n#%%\r\n\r\n# ---------------- Spectrogram transforms ---------------------\r\n\r\n\r\n# Interpolation functions\r\ndef interpol_log(f, t, spectrogram, out_size):\r\n \"\"\"interpolates the spectrogram in input using a linear axis for the timestamps and a LOG axis for the frequencies\r\n\r\n Parameters\r\n ----------\r\n f : numpy array, shape: (F_in,), frequencies of the spectrogram\r\n t : numpy array, shape: (T_in,), timestamps of the spectrogram\r\n spectrogram : (B, F_in, T_in), B is batch size; 3D numpy array\r\n\r\n out_size : couple of ints (F_out, T_out)\r\n\r\n Returns\r\n -------\r\n f_interpolated : numpy array, shape: (F_out,), frequencies of the spectrogram AFTER interpolation\r\n t_interpolated : numpy array, shape: (T_out,), timestamps of the spectrogram AFTER interpolation\r\n a spectrogram, where the f axis (second dimension) has been re-interpolated\r\n using a log axis\r\n\r\n \"\"\"\r\n B = spectrogram.shape[0]\r\n out_f, out_t = out_size\r\n\r\n log_f = np.log(f+f[1]) # log between 0.2 Hz and 50.2 Hz\r\n\r\n log_f_normalized = (log_f-log_f[0])/(log_f[-1]-log_f[0]) # between 0.0 and 1.0\r\n t_normalized = (t-t[0])/(t[-1]-t[0])\r\n\r\n rescaled_f = out_f*log_f_normalized # 0 and 48\r\n # rescaled_f = (out_f-1)*log_f_normalized ??\r\n rescaled_t = out_t*t_normalized\r\n\r\n spectrogram_interpolated = np.zeros( (B, out_f, out_t), dtype='float32')\r\n index_f, index_t = np.arange(out_f), np.arange(out_t) # between 0 and 47\r\n\r\n for i in range(B):\r\n spectrogram_fn = scipy.interpolate.interp2d(rescaled_t, rescaled_f, spectrogram[i,:,:], copy=False)\r\n # interp2d returns a 2D function\r\n spectrogram_interpolated[i,:,:] = spectrogram_fn(index_t, index_f) # care to the order\r\n\r\n f_fn = scipy.interpolate.interp1d(rescaled_f, f, copy=False)\r\n f_interpolated = f_fn(index_f)\r\n\r\n t_fn = scipy.interpolate.interp1d(rescaled_t, t, copy=False)\r\n t_interpolated = t_fn(index_t)\r\n\r\n\r\n return f_interpolated, t_interpolated, spectrogram_interpolated\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n#%%\r\n# ---------------- The spectrogram class --------------\r\nclass SpectrogramTransform():\r\n \"\"\" create the transform to work with spectrograms. This class behaves\r\n essentially the same as TempralTransform, except the created transform\r\n returns a dict of 3d array instead of 2d\r\n\r\n\r\n Parameters\r\n ----------\r\n signal_name: a string signal (ex: 'Gyr_y', 'Ori_x')\r\n If the string ends by \"_norm\" (ex: \"Mag_norm\"), the output will\r\n be the norm of the three (or four) axis of the signal.\r\n\r\n Returns\r\n -------\r\n a function with input: data : a dict of (_, 6000) arrays (key example: 'Gyr_y')\r\n and output: a dictionnary of 2d arrays.\r\n\r\n \"\"\"\r\n def __init__(self, signal_name):\r\n super(SpectrogramTransform, self).__init__()\r\n\r\n self.temporal_transform = TemporalTransform(signal_name)\r\n self.fs = fs\r\n self.duration_window = duration_window\r\n self.duration_overlap = duration_overlap\r\n self.spectro_batch_size = spectro_batch_size # these values were loaded from the param file\r\n self.signal_name = signal_name\r\n self.out_size = (48, 48)\r\n\r\n def __call__(self, data):\r\n \"\"\"\r\n Parameters\r\n ----------\r\n data : a dict of (B, 6000) arrays (key example: 'Gyr_y')\r\n\r\n Returns\r\n -------\r\n An array with shape (B, F, T), where B (dataset size) depends on the\r\n input shape, and F and T are equal to 48 here.\r\n \"\"\"\r\n temporal_signal = self.temporal_transform(data)\r\n del data # free some memory\r\n fs = self.fs\r\n nperseg = int(self.duration_window * fs)\r\n noverlap = int(self.duration_overlap * fs)\r\n\r\n spectro_batch_size = self.spectro_batch_size\r\n # turning 13,000 temporal signals into (550, 500) array\r\n # spectrograms at once is too much: a single (13000, 550, 500) array,\r\n # with simple precision requires 7.15 Go !\r\n # This is why we work with batches of 1000 instead. For each batch,\r\n # we compute the complete sectrogram (1000 x 550 x 500), then\r\n # interpolate it to smaller sizes, before working wit the following batch.\r\n\r\n current_spectro_batch_size = temporal_signal.shape[0]\r\n\r\n if current_spectro_batch_size < spectro_batch_size :\r\n f, t, spectrogram = scipy.signal.spectrogram(temporal_signal, fs=fs, nperseg=nperseg, noverlap=noverlap)\r\n f_interpolated, t_interpolated, interpolated_spectrogram = interpol_log(f, t, spectrogram, self.out_size)\r\n # f, t, and possibly out_size will be ignored when the function does not need them\r\n else :\r\n n_batches = (current_spectro_batch_size-1)//spectro_batch_size +1\r\n nb_interp_f, nb_interp_t = self.out_size\r\n interpolated_spectrogram = np.zeros((current_spectro_batch_size, nb_interp_f, nb_interp_t), dtype='float32')\r\n for i in range(n_batches):\r\n i_min = i * spectro_batch_size\r\n i_max = (i+1) * spectro_batch_size # does not matter if it goes beyond current_spectro_batch_size\r\n this_temporal_signal = temporal_signal[i_min:i_max,:]\r\n f, t, spectrogram = scipy.signal.spectrogram(this_temporal_signal, fs=fs, nperseg=nperseg, noverlap=noverlap)\r\n f_interpolated, t_interpolated, interpolated_spectrogram[i_min:i_max,:,:] = interpol_log(f, t, spectrogram, self.out_size)\r\n del temporal_signal\r\n np.log(interpolated_spectrogram + 1e-10, dtype='float32', out=interpolated_spectrogram) # in-place operation\r\n self.f_interpolated = f_interpolated\r\n self.t_interpolated = t_interpolated\r\n return interpolated_spectrogram\r\n\r\n\r\n\r\n def __str__(self):\r\n \"\"\"purely for visual purposes, so that we can print() the function\"\"\"\r\n str_to_return = \"Spectrogram transform\"\r\n str_to_return += f\"\\n\\t Signals: {self.signal_name}\"\r\n str_to_return += f\"\\n\\t Output size: {self.out_size}\"\r\n str_to_return += f\"\\n\\t Interpolation: log-interpolation\"\r\n str_to_return += \"\\n\\t Log-power\"\r\n return str_to_return\r\n\r\n# end of class SpectrogramTransform():\r\n\r\n\r\n\r\n#%%\r\nif __name__ == \"__main__\":\r\n fontdict = {'fontsize':10}\r\n n_ticks = 10\r\n\r\n # we plot the raw spectrogram and two interpolated spectrograms for the following classes\r\n selected_classes = [\"Run\", \"Walk\"]\r\n remaining_classes = selected_classes.copy()\r\n nsel = len(selected_classes)\r\n index = 3204 # where to tart the search\r\n plt.figure(figsize=(12,8))\r\n signal_name = \"Acc_norm\"\r\n temporal_transform = TemporalTransform(signal_name) # we will plot the result\r\n spectrogram_transform = SpectrogramTransform(signal_name)\r\n\r\n while len(remaining_classes)>0:\r\n data_tensor, class_tensor = DS[index]\r\n data_cpu = {signal:data_tensor[signal].cpu().detach().numpy() for signal in data_tensor.keys()}\r\n class_index = int(class_tensor)\r\n class_name = classes_names[class_index-1]\r\n\r\n if class_name in remaining_classes:\r\n remaining_classes.remove(class_name)\r\n i_class = nsel - len(remaining_classes) # between 1 and n\r\n\r\n temporal_signal = temporal_transform(data_cpu)\r\n nb = temporal_signal.shape[1]\r\n x_t = np.linspace(0, nb/fs, nb)\r\n plt.subplot(2,nsel,i_class)\r\n plt.plot(x_t, temporal_signal[0,:])\r\n plt.title(f'{class_name} (index={index})', fontdict)\r\n plt.xlabel(\"t (sec)\")\r\n plt.ylabel(signal_name)\r\n\r\n data_tensor, _ = DS[index] # we need to recreate data because the variable is deleted\r\n data_cpu = {signal:data_tensor[signal].to(torch.device('cpu')).detach().numpy() for signal in data_tensor.keys()}\r\n spectrogram_interpolated = spectrogram_transform(data_cpu)\r\n f_interpolated = spectrogram_transform.f_interpolated\r\n t_interpolated = spectrogram_transform.t_interpolated\r\n\r\n plt.subplot(2,nsel,i_class + nsel)\r\n t_interpolated = spectrogram_transform.t_interpolated\r\n f_interpolated = spectrogram_transform.f_interpolated\r\n matrix_shape = spectrogram_interpolated.shape\r\n time_list = [f'{t_interpolated[i]:.0f}' for i in np.round(np.linspace(0, matrix_shape[2]-1,n_ticks)).astype(int)]\r\n freq_list = [f'{f_interpolated[i]:.1f}' for i in np.round(np.linspace(0, matrix_shape[1]-1,n_ticks)).astype(int)]\r\n\r\n plt.xticks(np.linspace(0, matrix_shape[2]-1, n_ticks), time_list)\r\n plt.yticks(np.linspace(0, matrix_shape[1]-1, n_ticks), freq_list)\r\n plt.imshow(spectrogram_interpolated[0,:,:])\r\n\r\n plt.ylabel(\"f (Hz)\")\r\n plt.xlabel(\"t (s)\")\r\n plt.colorbar()\r\n\r\n index += 1\r\n\r\n plt.show()\r\n\r\n\r\n#%%\r\n\r\n\r\n\r\n\r\n",
"\"\"\"\nAuthor Hugues\n\nThis script initializes neural networks, trains them, and records the features\nin .pickle files\nThe naming syntax is defined in the 'create_filename' function\n\nMany functions are only defined to provide a signature that is homogeneous to\nother networks'\n\"\"\"\n\nimport random\nimport os\n\nimport torch\nimport torch.nn as nn\nimport numpy as np\n\nfrom pathlib import Path\n\nif __name__ == '__main__': # this is used to launch the file from anywhere\n import sys\n sys.path.append(\"..\")\n\nfrom models.SHL_2018.Datasets import SignalsDataSet\nfrom models.SHL_2018.transforms import SpectrogramTransform\n#from models.SHL_2018.fusion import ConcatCollate\n#from architectures import basic_CNN, late_fusion\nfrom models.SHL_2018.CNN import CNN\nfrom param import device, classes_names, data_path\n\nn_classes = len(classes_names)\n\n\n\nclass Diagnostic_CNN(CNN):\n \"\"\" Used to assign new methods to the networks without raising warnings\n when we reload a CNN classs that is different from what already exists \"\"\"\n def __init__(self, *args, **kwargs):\n CNN.__init__(self, *args, **kwargs)\n self.classification_layer = self.FC1 # we need the classification layers\n #from all models to have the same name\n\n\n def train_process(self, train_dataloader, val_dataloader):\n \"\"\"\n Override the base train_process method to remove the unwanted argument\n \"\"\"\n return CNN.train_process(self, train_dataloader, val_dataloader, maxepochs=50)\n\n def forward_from_FC(self, X):\n \"\"\"\n This function is to replace the forward() method.\n\n Parameters\n ----------\n X: torch tensor wich size is (Batch, Features)\n\n Returns\n -------\n scores: torch tensor with shape (batch, n_classes) giving the scores for each class\n \"\"\"\n X = self.FC1(X)\n return X\n\n\n def record(self, dataloader):\n \"\"\"\n Records all the hidden features and feature maps corresponding the the\n input samples in the dataloader, put them into a single dictionnary, and return it.\n\n Parameters\n ----------\n dataloader: a Pytorch Dataloader instance\n\n Returns\n -------\n a triple of :\n features: afray of floats with shape (n_samples, n_features)\n predictions: array of ints with shape (n_samples,)\n ground_truth: array of ints with shape (n_samples,)\n \"\"\"\n features = np.zeros((len(dataloader.dataset), self.FC0.out_features))\n predictions = np.zeros((len(dataloader.dataset),))\n ground_truth = np.zeros((len(dataloader.dataset),))\n\n self.train(False)\n i_start = 0 # where to start inserting the data in the big arrays\n # i_end wil be updated every batch\n\n with torch.no_grad():\n for (X, Y) in dataloader:\n X_copy = X.clone()\n i_end = i_start + X.shape[0] # = i_start + batch_size (batch_size can change for the last batch)\n\n X = self.conv0(X)\n X = self.mp(self.relu(X))\n X = self.conv1(X)\n X = self.mp(self.relu(X))\n X = self.conv2(X)\n X = self.mp(self.relu(X))\n X = X.view(X.shape[0],-1)\n X = self.relu(self.FC0(X))\n features[i_start:i_end,:] = X.clone().detach().cpu().numpy()\n X = self.FC1(X)\n\n predictions_this_batch = torch.argmax(X, dim=1)\n predictions[i_start:i_end] = predictions_this_batch.cpu().detach().numpy()\n ground_truth[i_start:i_end] = Y.cpu().detach().numpy()\n result_detailed = X\n assert(self(X_copy) == result_detailed).all() # we check we did not forget any step\n\n self.optim.zero_grad()\n i_start = i_end\n\n return (features, predictions, ground_truth)\n\n\n\n def validate(self, dataloader):\n \"\"\"\n Parameter: a Pytorch DataLoader\n Returns:\n score_name (string)\n score_value (float between 0 and 1) \"\"\"\n _, _, _, f1_score = CNN.train_process(self, [], dataloader, maxepochs=1)\n return (\"f1_score\", f1_score)\n\n\n\ndef create_filename(dataset, sensor_name, trial_index):\n return f\"{dataset}-{sensor_name}-{trial_index}.pt\"\n\n\n\n #%%\nif __name__ == \"__main__\":\n if 'models' not in os.listdir(data_path):\n os.mkdir(data_path/Path('models'))\n\n\n possible_sensors = [\"Gyr_y\", \"Acc_norm\", \"Mag_norm\"]\n n_repetitions = 3 *2 # 3 couples = 6 networks\n\n\n for sensor in possible_sensors:\n transform = SpectrogramTransform(sensor)\n train_dataset = SignalsDataSet(mode='train', transform=transform)\n val_dataset = SignalsDataSet(mode='val', transform=transform)\n\n train_dataloader_with_shuffle = torch.utils.data.DataLoader(train_dataset, batch_size=64, shuffle=True)\n # it is usually better to train with shuffle (especially considering the data distribution),\n # but we do not want any shuffle when recording the data\n train_dataloader_no_shuffle = torch.utils.data.DataLoader(train_dataset, batch_size=64, shuffle=False)\n val_dataloader = torch.utils.data.DataLoader(val_dataset, batch_size=64, shuffle=False)\n\n torch.save((train_dataloader_no_shuffle, val_dataloader),\n Path(data_path) / Path(\"models\") / Path(f'dataloaders-SHL_2018-{sensor}.pt'))\n\n for i_repetition in range(n_repetitions):\n model = Diagnostic_CNN()\n model.to(device)\n model.train_process(train_dataloader_with_shuffle, val_dataloader)\n filename = create_filename(\"SHL_2018\", sensor, trial_index=i_repetition)\n torch.save(model, Path(data_path) / Path(\"models\") / Path(\"model-\" + filename))\n\n features_pred_GT_train = model.record(train_dataloader_no_shuffle)\n features_pred_GT_val = model.record(val_dataloader)\n torch.save((features_pred_GT_train, features_pred_GT_val),\n Path(data_path) / Path(\"models\") /Path(\"features-\" + filename))\n # note: the file type is named \"feature\" even though the file also\n # contains predictions and ground truth.\n\n del train_dataset, val_dataset, train_dataloader_no_shuffle, train_dataloader_with_shuffle, val_dataloader\n\n"
] | [
[
"matplotlib.pyplot.legend",
"numpy.log",
"matplotlib.pyplot.imshow",
"numpy.sqrt",
"numpy.linspace",
"matplotlib.pyplot.title",
"numpy.arange",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.colorbar",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.subplot",
"torch.device",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.show",
"numpy.zeros",
"matplotlib.pyplot.figure"
],
[
"torch.no_grad",
"torch.utils.data.DataLoader",
"torch.argmax"
]
] |
aditya2592/PoseCNN | [
"da9eaae850eed7521a2a48a4d27474d655caab42",
"da9eaae850eed7521a2a48a4d27474d655caab42"
] | [
"lib/rpn_layer/proposal_target_layer.py",
"lib/hard_label_layer/hard_label_op_grad.py"
] | [
"# --------------------------------------------------------\n# Faster R-CNN\n# Copyright (c) 2015 Microsoft\n# Licensed under The MIT License [see LICENSE for details]\n# Written by Ross Girshick, Sean Bell and Xinlei Chen\n# --------------------------------------------------------\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport numpy as np\nimport numpy.random as npr\nfrom fcn.config import cfg\nfrom utils.bbox_transform import bbox_transform\nfrom utils.cython_bbox import bbox_overlaps\n\ndef proposal_target_layer(rpn_rois, rpn_scores, gt_boxes, poses, _num_classes):\n \"\"\"\n Assign object detection proposals to ground-truth targets. Produces proposal\n classification labels and bounding-box regression targets.\n \"\"\"\n\n # Proposal ROIs (0, x1, y1, x2, y2) coming from RPN\n # (i.e., rpn.proposal_layer.ProposalLayer), or any other source\n all_rois = rpn_rois\n all_scores = rpn_scores\n\n # Include ground-truth boxes in the set of candidate rois\n if cfg.TRAIN.USE_GT:\n zeros = np.zeros((gt_boxes.shape[0], 1), dtype=gt_boxes.dtype)\n all_rois = np.vstack(\n (all_rois, np.hstack((zeros, gt_boxes[:, :-1])))\n )\n # not sure if it a wise appending, but anyway i am not using it\n all_scores = np.vstack((all_scores, zeros))\n\n num_images = 1\n rois_per_image = cfg.TRAIN.BATCH_SIZE / num_images\n fg_rois_per_image = np.round(cfg.TRAIN.FG_FRACTION * rois_per_image)\n\n # Sample rois with classification labels and bounding box regression\n # targets\n labels, rois, roi_scores, bbox_targets, bbox_inside_weights, poses_target, poses_weight = _sample_rois(\n all_rois, all_scores, gt_boxes, poses, fg_rois_per_image,\n rois_per_image, _num_classes)\n\n rois = rois.reshape(-1, 5)\n roi_scores = roi_scores.reshape(-1)\n labels = labels.reshape(-1, 1)\n bbox_targets = bbox_targets.reshape(-1, _num_classes * 4)\n bbox_inside_weights = bbox_inside_weights.reshape(-1, _num_classes * 4)\n bbox_outside_weights = np.array(bbox_inside_weights > 0).astype(np.float32)\n\n return rois, roi_scores, labels, bbox_targets, bbox_inside_weights, bbox_outside_weights, poses_target, poses_weight\n\n\ndef _get_bbox_regression_labels(bbox_target_data, num_classes):\n \"\"\"Bounding-box regression targets (bbox_target_data) are stored in a\n compact form N x (class, tx, ty, tw, th)\n\n This function expands those targets into the 4-of-4*K representation used\n by the network (i.e. only one class has non-zero targets).\n\n Returns:\n bbox_target (ndarray): N x 4K blob of regression targets\n bbox_inside_weights (ndarray): N x 4K blob of loss weights\n \"\"\"\n\n clss = bbox_target_data[:, 0]\n bbox_targets = np.zeros((clss.size, 4 * num_classes), dtype=np.float32)\n bbox_inside_weights = np.zeros(bbox_targets.shape, dtype=np.float32)\n inds = np.where(clss > 0)[0]\n for ind in inds:\n cls = clss[ind]\n start = int(4 * cls)\n end = start + 4\n bbox_targets[ind, start:end] = bbox_target_data[ind, 1:]\n bbox_inside_weights[ind, start:end] = cfg.TRAIN.BBOX_INSIDE_WEIGHTS\n return bbox_targets, bbox_inside_weights\n\n\ndef _compute_targets(ex_rois, gt_rois, labels):\n \"\"\"Compute bounding-box regression targets for an image.\"\"\"\n\n assert ex_rois.shape[0] == gt_rois.shape[0]\n assert ex_rois.shape[1] == 4\n assert gt_rois.shape[1] == 4\n\n targets = bbox_transform(ex_rois, gt_rois)\n if cfg.TRAIN.BBOX_NORMALIZE_TARGETS_PRECOMPUTED:\n # Optionally normalize targets by a precomputed mean and stdev\n targets = ((targets - np.array(cfg.TRAIN.BBOX_NORMALIZE_MEANS))\n / np.array(cfg.TRAIN.BBOX_NORMALIZE_STDS))\n return np.hstack(\n (labels[:, np.newaxis], targets)).astype(np.float32, copy=False)\n\n\ndef _compute_pose_targets(quaternions, labels, num_classes):\n \"\"\"Compute pose regression targets for an image.\"\"\"\n\n num = quaternions.shape[0]\n poses_target = np.zeros((num, 4 * num_classes), dtype=np.float32)\n poses_weight = np.zeros((num, 4 * num_classes), dtype=np.float32)\n\n for i in xrange(num):\n cls = labels[i]\n if cls > 0:\n start = int(4 * cls)\n end = start + 4\n poses_target[i, start:end] = quaternions[i, :]\n poses_weight[i, start:end] = 1.0\n\n return poses_target, poses_weight\n\n\ndef _sample_rois(all_rois, all_scores, gt_boxes, poses, fg_rois_per_image, rois_per_image, num_classes):\n \"\"\"Generate a random sample of RoIs comprising foreground and background\n examples.\n \"\"\"\n # overlaps: (rois x gt_boxes)\n overlaps = bbox_overlaps(\n np.ascontiguousarray(all_rois[:, 1:5], dtype=np.float),\n np.ascontiguousarray(gt_boxes[:, :4], dtype=np.float))\n gt_assignment = overlaps.argmax(axis=1)\n max_overlaps = overlaps.max(axis=1)\n labels = gt_boxes[gt_assignment, 4]\n quaternions = poses[gt_assignment, 6:10]\n\n # Select foreground RoIs as those with >= FG_THRESH overlap\n fg_inds = np.where(max_overlaps >= cfg.TRAIN.FG_THRESH)[0]\n # Guard against the case when an image has fewer than fg_rois_per_image\n # Select background RoIs as those within [BG_THRESH_LO, BG_THRESH_HI)\n bg_inds = np.where((max_overlaps < cfg.TRAIN.BG_THRESH_HI) &\n (max_overlaps >= cfg.TRAIN.BG_THRESH_LO))[0]\n\n # Small modification to the original version where we ensure a fixed number of regions are sampled\n if fg_inds.size > 0 and bg_inds.size > 0:\n fg_rois_per_image = min(fg_rois_per_image, fg_inds.size)\n fg_inds = npr.choice(fg_inds, size=int(fg_rois_per_image), replace=False)\n bg_rois_per_image = rois_per_image - fg_rois_per_image\n to_replace = bg_inds.size < bg_rois_per_image\n bg_inds = npr.choice(bg_inds, size=int(bg_rois_per_image), replace=to_replace)\n elif fg_inds.size > 0:\n to_replace = fg_inds.size < rois_per_image\n fg_inds = npr.choice(fg_inds, size=int(rois_per_image), replace=to_replace)\n fg_rois_per_image = rois_per_image\n elif bg_inds.size > 0:\n to_replace = bg_inds.size < rois_per_image\n bg_inds = npr.choice(bg_inds, size=int(rois_per_image), replace=to_replace)\n fg_rois_per_image = 0\n else:\n import pdb\n pdb.set_trace()\n\n # The indices that we're selecting (both fg and bg)\n keep_inds = np.append(fg_inds, bg_inds)\n # Select sampled values from various arrays:\n labels = labels[keep_inds]\n # Clamp labels for the background RoIs to 0\n labels[int(fg_rois_per_image):] = 0\n rois = all_rois[keep_inds]\n roi_scores = all_scores[keep_inds]\n\n # pose regression targets and weights\n poses_target, poses_weight = _compute_pose_targets(quaternions[keep_inds], labels, num_classes)\n\n bbox_target_data = _compute_targets(\n rois[:, 1:5], gt_boxes[gt_assignment[keep_inds], :4], labels)\n\n bbox_targets, bbox_inside_weights = \\\n _get_bbox_regression_labels(bbox_target_data, num_classes)\n\n return labels, rois, roi_scores, bbox_targets, bbox_inside_weights, poses_target, poses_weight\n",
"import tensorflow as tf\nfrom tensorflow.python.framework import ops\nimport hard_label_op\n\[email protected](\"Hardlabel\")\ndef _hard_label_shape(op):\n\n output_shape = op.inputs[0].get_shape()\n return [output_shape]\n\[email protected](\"Hardlabel\")\ndef _hard_label_grad(op, grad):\n\n bottom_prob = op.inputs[0]\n bottom_gt = op.inputs[1]\n threshold = op.get_attr('threshold')\n\n # compute gradient\n data_grad_prob, data_grad_gt = hard_label_op.hard_label_grad(bottom_prob, bottom_gt, grad, threshold)\n\n return [data_grad_prob, data_grad_gt]\n"
] | [
[
"numpy.hstack",
"numpy.ascontiguousarray",
"numpy.round",
"numpy.append",
"numpy.array",
"numpy.zeros",
"numpy.where",
"numpy.vstack"
],
[
"tensorflow.python.framework.ops.RegisterShape",
"tensorflow.python.framework.ops.RegisterGradient"
]
] |
dendisuhubdy/Vitis-AI | [
"524f65224c52314155dafc011d488ed30e458fcb",
"524f65224c52314155dafc011d488ed30e458fcb"
] | [
"alveo/apps/whole_app_acceleration/classification/test_classify_pp.py",
"alveo/apps/face_detect/detect_util.py"
] | [
"# Copyright 2019 Xilinx Inc.\n# Copyright 2019 Xilinx Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom __future__ import print_function\nfrom six import itervalues, iteritems\nfrom ctypes import *\nimport numpy as np\n\nimport os, sys\nfrom vai.dpuv1.rt import xdnn, xdnn_io\nfrom vai.dpuv1.rt.vitis.python.dpu.runner import Runner\nimport waa_rt\n\nimport multiprocessing as mp\nimport ctypes\n\n\ndef pre_process(q,args):\n\n xclbin_p=str(args['xclbin']+\"/xdnn_v3_96x16_2pe_8b_9mb_bank03.xclbin\")\n kernelName_p=\"pp_pipeline_accel\"\n deviceIdx_p=args['deviceid']\n fpga_pp = waa_rt.PreProcess(xclbin_p,kernelName_p,deviceIdx_p, 0)\n batch_sz = args['batch_sz']\n img_paths = xdnn_io.getFilePaths(args['images'])\n print(\"Pre-processing handle created. Populating Queue\")\n for i in range(0, len(img_paths), batch_sz):\n for j, p in enumerate(img_paths[i:i + batch_sz]):\n arr, ht = fpga_pp.preprocess_input(p)\n q.put(arr)\n print(\"Queue populated\")\n\n\ndef process_xdnn(q,args):\n runner = Runner(args['vitis_rundir'])\n inTensors = runner.get_input_tensors()\n outTensors = runner.get_output_tensors()\n batch_sz = args['batch_sz']\n if batch_sz == -1:\n # use Runner's suggested batch size\n batch_sz = inTensors[0].dims[0]\n\n if args['golden']:\n goldenMap = xdnn_io.getGoldenMap(args['golden'])\n top5Count = 0\n top1Count = 0\n\n fpgaBlobs = []\n for io in [inTensors, outTensors]:\n blobs = []\n for t in io:\n shape = (batch_sz,) + tuple([t.dims[i] for i in range(t.ndims)][1:])\n blobs.append(np.empty((shape), dtype=np.float32, order='C'))\n fpgaBlobs.append(blobs)\n\n img_paths = xdnn_io.getFilePaths(args['images'])\n labels = xdnn_io.get_labels(args['labels'])\n xdnnCPUOp = xdnn.XDNNCPUOp(\"%s/weights.h5\" % args['vitis_rundir'])\n fcOutput = np.empty((batch_sz, args['outsz'],), dtype=np.float32, order='C')\n\n fpgaInput = fpgaBlobs[0][0]\n for i in range(0, len(img_paths), batch_sz):\n pl = []\n # fill tensor input data from image file\n for j, p in enumerate(img_paths[i:i + batch_sz]):\n\n img, _ = q.get(), None\n pl.append(p)\n np.copyto(fpgaInput[j], img)\n\n jid = runner.execute_async(fpgaBlobs[0], fpgaBlobs[1])\n runner.wait(jid)\n\n xdnnCPUOp.computeFC(fpgaBlobs[1][0], fcOutput)\n softmaxOut = xdnnCPUOp.computeSoftmax(fcOutput)\n if args['golden']:\n for j,p in enumerate(img_paths[i:i + batch_sz]):\n top1Count += xdnn_io.isTopK(softmaxOut[j], goldenMap, p, labels, 1)\n top5Count += xdnn_io.isTopK(softmaxOut[j], goldenMap, p, labels, 5)\n else:\n xdnn_io.printClassification(softmaxOut, pl, labels)\n\n if args['golden']:\n print ( (\"\\nAverage accuracy (n=%d) Top-1: %.1f%%, Top-5: %.1f%%\\n\") % (len(img_paths), float(top1Count)/float(len(img_paths))*100., float(top5Count)/float(len(img_paths))*100.) )\n\nif __name__ == '__main__':\n print(\"\\n\\n\\n\\n\\n\\n\\n\\n\" + '\\33[32m' + \"Running Inference with HW Pre-processing\" + '\\33[0m') \n\n args = xdnn_io.processCommandLine()\n\t\t#Create a queue for passing the pre-processed data\n q = mp.Queue()\n\t\t#Creating a process to run HW pre-processing kernel\n p_preprocess = mp.Process(target=pre_process,args=(q,args))\n\t\t#Process to run XDNN\n p_xdnn = mp.Process(target=process_xdnn,args=(q,args))\n\n p_preprocess.start()\n p_xdnn.start()\n p_preprocess.join()\n p_xdnn.join()\n",
"# Copyright 2019 Xilinx Inc.\n# \n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n# \n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nimport numpy as np\nimport ctypes\nimport os\n\ndef GSTilingLayer_forward_py(bottom, stride):\n stride_sq = stride**2;\n\n input_batch = bottom.shape[0]\n input_channels = bottom.shape[1]\n input_height = bottom.shape[2]\n input_width = bottom.shape[3]\n\n output_batch = input_batch\n output_channels = input_channels/stride_sq\n output_height = input_height*stride\n output_width = input_width*stride\n\n \n top = np.zeros([int(output_batch), int(output_channels), int(output_height), int(output_width)],dtype=np.float32)\n\n #return top\n\n for n in range(input_batch): \n for ic in range(input_channels):\n off_div = (ic / output_channels) / stride;\n off_mod = (ic / output_channels) % stride;\n oc = ic % output_channels;\n for iy in range(input_height): \n oy = iy * stride + off_div;\n ox = off_mod - stride\n top[n,oc,oy,off_mod::stride] = bottom[n,ic,iy,:input_width]\n #for ox in range(input_width):\n #top[n,oc,oy,off_mod + ox*stride] = bottom[n,ic,iy,ox] \n \n return top\n\n\ndef GSTilingLayer_forward_c(bottom, stride):\n global top_dim\n global top\n \n moddir = os.path.dirname(os.path.abspath(__file__))\n clib = ctypes.cdll.LoadLibrary('{}/detect_util_c/detect_util_c.so'.format(moddir))\n\n stride_sq = stride**2;\n\n input_batch = bottom.shape[0]\n input_channels = bottom.shape[1]\n input_height = bottom.shape[2]\n input_width = bottom.shape[3]\n\n output_batch = input_batch\n output_channels = input_channels/stride_sq\n output_height = input_height*stride\n output_width = input_width*stride\n \n top = np.zeros([int(output_batch), int(output_channels), int(output_height), int(output_width)],dtype=np.float32)\n\n clib.GSTilingLayer_forward_c(ctypes.c_void_p(top.ctypes.data), ctypes.c_void_p(bottom.ctypes.data),\n ctypes.c_int(input_batch),\n ctypes.c_int(input_channels),\n ctypes.c_int(input_height),\n ctypes.c_int(input_width),\n ctypes.c_int(stride))\n return top\n\n\ndef GSTilingLayer_forward(bottom, stride):\n\n #return GSTilingLayer_forward_py(bottom, stride)\n return GSTilingLayer_forward_c(bottom, stride)\n\n\ndef SoftmaxLayer_forward(bottom): \n input_batch = bottom.shape[0]\n input_channels = bottom.shape[1]\n input_height = bottom.shape[2]\n input_width = bottom.shape[3]\n\n top = np.zeros([input_batch, input_channels, input_height, input_width],dtype=np.float32)\n\n #return top\n\n for n in range(input_batch):\n\n scale_data = np.zeros([input_height,input_width],dtype=np.float32)\n scale_data = bottom[n,0,...]\n\n for c in range(1,input_channels):\n scale_data = np.maximum(scale_data, bottom[n,c,...])\n \n tmp_bottom = bottom[n,...] - scale_data\n tmp_bottom = np.exp(tmp_bottom)\n\n scale_data = np.sum(tmp_bottom, axis=0)\n tmp_bottom = tmp_bottom / scale_data\n top[n] = tmp_bottom\n\n return top\n"
] | [
[
"numpy.copyto",
"numpy.empty"
],
[
"numpy.maximum",
"numpy.exp",
"numpy.zeros",
"numpy.sum"
]
] |
loveredcarrot/ssl_multi_seg | [
"5315dbcc2c44e8effab28699c1491dd67b7ce00b"
] | [
"code/networks/Unet.py"
] | [
"# -*- coding: utf-8 -*- \n# @Time : 2021/4/8 15:52\n# @Author : aurorazeng\n# @File : Unet.py \n# @license: (C) Copyright 2021-2026, aurorazeng; No reprobaiction without permission.\n\n\n\"\"\"\nThe implementation is borrowed from: https://github.com/HiLab-git/PyMIC\n\"\"\"\nfrom __future__ import division, print_function\n\nimport numpy as np\nimport torch\nimport torch.nn as nn\nfrom torch.distributions.uniform import Uniform\n\n\nclass ConvBlock(nn.Module):\n \"\"\"two convolution layers with batch norm and leaky relu\"\"\"\n\n def __init__(self, in_channels, out_channels, dropout_p):\n super(ConvBlock, self).__init__()\n self.conv_conv = nn.Sequential(\n nn.Conv2d(in_channels, out_channels, kernel_size=3, padding=1),\n nn.BatchNorm2d(out_channels),\n # nn.LeakyReLU(),\n nn.ReLU(),\n nn.Dropout(dropout_p),\n nn.Conv2d(out_channels, out_channels, kernel_size=3, padding=1),\n nn.BatchNorm2d(out_channels),\n # nn.LeakyReLU()\n nn.ReLU()\n )\n\n def forward(self, x):\n return self.conv_conv(x)\n\n\nclass DownBlock(nn.Module):\n \"\"\"Downsampling followed by ConvBlock\"\"\"\n\n def __init__(self, in_channels, out_channels, dropout_p):\n super(DownBlock, self).__init__()\n self.maxpool_conv = nn.Sequential(\n nn.MaxPool2d(2),\n ConvBlock(in_channels, out_channels, dropout_p)\n\n )\n\n def forward(self, x):\n return self.maxpool_conv(x)\n\n\nclass UpBlock(nn.Module):\n \"\"\"Upssampling followed by ConvBlock\"\"\"\n\n def __init__(self, in_channels1, in_channels2, out_channels, dropout_p,\n bilinear=True):\n super(UpBlock, self).__init__()\n self.bilinear = bilinear\n if bilinear:\n self.conv1x1 = nn.Conv2d(in_channels1, in_channels2, kernel_size=1)\n self.up = nn.Upsample(\n scale_factor=2, mode='bilinear', align_corners=True)\n else:\n self.up = nn.ConvTranspose2d(\n in_channels1, in_channels2, kernel_size=2, stride=2)\n self.conv = ConvBlock(in_channels2 * 2, out_channels, dropout_p)\n\n def forward(self, x1, x2):\n if self.bilinear:\n x1 = self.conv1x1(x1)\n x1 = self.up(x1)\n x = torch.cat([x2, x1], dim=1)\n return self.conv(x)\n\n\nclass Encoder(nn.Module):\n def __init__(self, params):\n super(Encoder, self).__init__()\n self.params = params\n self.in_chns = self.params['in_chns']\n self.ft_chns = self.params['feature_chns']\n self.n_class = self.params['class_num']\n self.bilinear = self.params['bilinear']\n self.dropout = self.params['dropout']\n assert (len(self.ft_chns) == 5)\n self.in_conv = ConvBlock(\n self.in_chns, self.ft_chns[0], self.dropout[0])\n self.down1 = DownBlock(\n self.ft_chns[0], self.ft_chns[1], self.dropout[1])\n self.down2 = DownBlock(\n self.ft_chns[1], self.ft_chns[2], self.dropout[2])\n self.down3 = DownBlock(\n self.ft_chns[2], self.ft_chns[3], self.dropout[3])\n self.down4 = DownBlock(\n self.ft_chns[3], self.ft_chns[4], self.dropout[4])\n\n def forward(self, x):\n x0 = self.in_conv(x)\n x1 = self.down1(x0)\n x2 = self.down2(x1)\n x3 = self.down3(x2)\n x4 = self.down4(x3)\n return [x0, x1, x2, x3, x4]\n\n\nclass Decoder(nn.Module):\n def __init__(self, params):\n super(Decoder, self).__init__()\n self.params = params\n self.in_chns = self.params['in_chns']\n self.ft_chns = self.params['feature_chns']\n self.n_class = self.params['class_num']\n self.bilinear = self.params['bilinear']\n assert (len(self.ft_chns) == 5)\n\n self.up1 = UpBlock(\n self.ft_chns[4], self.ft_chns[3], self.ft_chns[3], dropout_p=0.0)\n self.up2 = UpBlock(\n self.ft_chns[3], self.ft_chns[2], self.ft_chns[2], dropout_p=0.0)\n self.up3 = UpBlock(\n self.ft_chns[2], self.ft_chns[1], self.ft_chns[1], dropout_p=0.0)\n self.up4 = UpBlock(\n self.ft_chns[1], self.ft_chns[0], self.ft_chns[0], dropout_p=0.0)\n\n self.out_conv = nn.Conv2d(self.ft_chns[0], self.n_class,\n kernel_size=1, padding=0)\n\n def forward(self, feature):\n x0 = feature[0]\n x1 = feature[1]\n x2 = feature[2]\n x3 = feature[3]\n x4 = feature[4]\n\n x = self.up1(x4, x3)\n x = self.up2(x, x2)\n x = self.up3(x, x1)\n x = self.up4(x, x0)\n output = self.out_conv(x)\n return output\n\n\nclass UNet(nn.Module):\n def __init__(self, in_chns, class_num):\n super(UNet, self).__init__()\n\n params = {'in_chns': in_chns,\n # 'feature_chns': [16, 32, 64, 128, 256],\n 'feature_chns': [32, 64, 128, 256, 512],\n 'dropout': [0, 0, 0, 0, 0],\n 'class_num': class_num,\n 'bilinear': False,\n 'acti_func': 'relu'}\n\n self.encoder = Encoder(params)\n self.decoder = Decoder(params)\n\n def forward(self, x):\n feature = self.encoder(x)\n output = self.decoder(feature)\n return output\n\n\nclass UNetWithDrop(nn.Module):\n def __init__(self, in_chns, class_num):\n super(UNetWithDrop, self).__init__()\n\n params = {'in_chns': in_chns,\n # 'feature_chns': [16, 32, 64, 128, 256],\n 'feature_chns': [32, 64, 128, 256, 512],\n 'dropout': [0.05, 0.1, 0.2, 0.3, 0.5],\n 'class_num': class_num,\n 'bilinear': False,\n 'acti_func': 'relu'}\n\n self.encoder = Encoder(params)\n self.decoder = Decoder(params)\n\n def forward(self, x):\n feature = self.encoder(x)\n output = self.decoder(feature)\n return output\n"
] | [
[
"torch.nn.Dropout",
"torch.nn.ConvTranspose2d",
"torch.cat",
"torch.nn.Conv2d",
"torch.nn.MaxPool2d",
"torch.nn.Upsample",
"torch.nn.BatchNorm2d",
"torch.nn.ReLU"
]
] |
Atica57/DALLE-pytorch | [
"4fa108271aeb1972fcb118390ec15b656f2c328a"
] | [
"train_dalle.py"
] | [
"import argparse\nfrom random import choice\nfrom pathlib import Path\n\n# torch\n\nimport torch\nfrom torch.optim import Adam\nfrom torch.nn.utils import clip_grad_norm_\n\n# vision imports\n\nfrom PIL import Image\nfrom torchvision import transforms as T\nfrom torch.utils.data import DataLoader, Dataset\nfrom torchvision.datasets import ImageFolder\nfrom torchvision.utils import make_grid, save_image\n\n# dalle related classes and utils\n\nfrom dalle_pytorch import OpenAIDiscreteVAE, DiscreteVAE, DALLE\nfrom dalle_pytorch.simple_tokenizer import tokenize, tokenizer, VOCAB_SIZE\n\n# argument parsing\n\nparser = argparse.ArgumentParser()\n\ngroup = parser.add_mutually_exclusive_group(required = False)\n\ngroup.add_argument('--vae_path', type = str,\n help='path to your trained discrete VAE')\n\ngroup.add_argument('--dalle_path', type = str,\n help='path to your partially trained DALL-E')\n\nparser.add_argument('--image_text_folder', type = str, required = True,\n help='path to your folder of images and text for learning the DALL-E')\n\nargs = parser.parse_args()\n\n# helpers\n\ndef exists(val):\n return val is not None\n\n# constants\n\nVAE_PATH = args.vae_path\nDALLE_PATH = args.dalle_path\nRESUME = exists(DALLE_PATH)\n\nEPOCHS = 20\nBATCH_SIZE = 4\nLEARNING_RATE = 3e-4\nGRAD_CLIP_NORM = 0.5\n\nMODEL_DIM = 512\nTEXT_SEQ_LEN = 256\nDEPTH = 2\nHEADS = 4\nDIM_HEAD = 64\n\n# reconstitute vae\n\nif RESUME:\n dalle_path = Path(DALLE_PATH)\n assert dalle_path.exists(), 'DALL-E model file does not exist'\n\n loaded_obj = torch.load(str(dalle_path))\n\n dalle_params, vae_params, weights = loaded_obj['hparams'], loaded_obj['vae_params'], loaded_obj['weights']\n\n vae = DiscreteVAE(**vae_params)\n\n dalle_params = dict(\n vae = vae,\n **dalle_params\n )\n\n IMAGE_SIZE = vae_params['image_size']\n\nelse:\n if exists(VAE_PATH):\n vae_path = Path(VAE_PATH)\n assert vae_path.exists(), 'VAE model file does not exist'\n\n loaded_obj = torch.load(str(vae_path))\n\n vae_params, weights = loaded_obj['hparams'], loaded_obj['weights']\n\n vae = DiscreteVAE(**vae_params)\n vae.load_state_dict(weights)\n else:\n print('using OpenAIs pretrained VAE for encoding images to tokens')\n vae_params = None\n\n vae = OpenAIDiscreteVAE()\n\n IMAGE_SIZE = vae.image_size\n\n dalle_params = dict(\n vae = vae,\n num_text_tokens = VOCAB_SIZE,\n text_seq_len = TEXT_SEQ_LEN,\n dim = MODEL_DIM,\n depth = DEPTH,\n heads = HEADS,\n dim_head = DIM_HEAD\n )\n\n# helpers\n\ndef save_model(path):\n save_obj = {\n 'hparams': dalle_params,\n 'vae_params': vae_params,\n 'weights': dalle.state_dict()\n }\n\n torch.save(save_obj, path)\n\n# dataset loading\n\nclass TextImageDataset(Dataset):\n def __init__(self, folder, text_len = 256, image_size = 128):\n super().__init__()\n path = Path(folder)\n\n text_files = [*path.glob('**/*.txt')]\n\n image_files = [\n *path.glob('**/*.png'),\n *path.glob('**/*.jpg'),\n *path.glob('**/*.jpeg')\n ]\n\n text_files = {t.stem: t for t in text_files}\n image_files = {i.stem: i for i in image_files}\n\n keys = (image_files.keys() & text_files.keys())\n\n self.keys = list(keys)\n self.text_files = {k: v for k, v in text_files.items() if k in keys}\n self.image_files = {k: v for k, v in image_files.items() if k in keys}\n\n self.image_tranform = T.Compose([\n T.Lambda(lambda img: img.convert('RGB') if img.mode != 'RGB' else img),\n T.CenterCrop(image_size),\n T.Resize(image_size),\n T.ToTensor(),\n T.Lambda(lambda t: t.expand(3, -1, -1)),\n T.Normalize((0.5,) * 3, (0.5,) * 3)\n ])\n\n def __len__(self):\n return len(self.keys)\n\n def __getitem__(self, ind):\n key = self.keys[ind]\n text_file = self.text_files[key]\n image_file = self.image_files[key]\n\n image = Image.open(image_file)\n descriptions = text_file.read_text().split('\\n')\n descriptions = list(filter(lambda t: len(t) > 0, descriptions))\n description = choice(descriptions)\n\n tokenized_text = tokenize(description).squeeze(0)\n mask = tokenized_text != 0\n\n image_tensor = self.image_tranform(image)\n return tokenized_text, image_tensor, mask\n\n# create dataset and dataloader\n\nds = TextImageDataset(\n args.image_text_folder,\n text_len = TEXT_SEQ_LEN,\n image_size = IMAGE_SIZE\n)\n\nassert len(ds) > 0, 'dataset is empty'\nprint(f'{len(ds)} image-text pairs found for training')\n\ndl = DataLoader(ds, batch_size = BATCH_SIZE, shuffle = True, drop_last = True)\n\n# initialize DALL-E\n\ndalle = DALLE(**dalle_params).cuda()\n\nif RESUME:\n dalle.load_state_dict(weights)\n\n# optimizer\n\nopt = Adam(dalle.parameters(), lr = LEARNING_RATE)\n\n# experiment tracker\n\nimport wandb\n\nwandb.config.depth = DEPTH\nwandb.config.heads = HEADS\nwandb.config.dim_head = DIM_HEAD\n\nwandb.init(project = 'dalle_train_transformer', resume = RESUME)\n\n# training\n\nfor epoch in range(EPOCHS):\n for i, (text, images, mask) in enumerate(dl):\n text, images, mask = map(lambda t: t.cuda(), (text, images, mask))\n\n loss = dalle(text, images, mask = mask, return_loss = True)\n\n loss.backward()\n clip_grad_norm_(dalle.parameters(), GRAD_CLIP_NORM)\n\n opt.step()\n opt.zero_grad()\n\n log = {}\n\n if i % 10 == 0:\n print(epoch, i, f'loss - {loss.item()}')\n\n log = {\n **log,\n 'epoch': epoch,\n 'iter': i,\n 'loss': loss.item()\n }\n\n if i % 100 == 0:\n sample_text = text[:1]\n token_list = sample_text.masked_select(sample_text != 0).tolist()\n decoded_text = tokenizer.decode(token_list)\n\n image = dalle.generate_images(\n text[:1],\n mask = mask[:1],\n filter_thres = 0.9 # topk sampling at 0.9\n )\n\n save_model(f'./dalle.pt')\n wandb.save(f'./dalle.pt')\n\n log = {\n **log,\n 'image': wandb.Image(image, caption = decoded_text)\n }\n\n wandb.log(log)\n\nsave_model(f'./dalle-final.pt')\nwandb.save('./dalle-final.pt')\nwandb.finish()\n"
] | [
[
"torch.utils.data.DataLoader",
"torch.save"
]
] |
jhuebotter/CartpoleSNNdemo | [
"d18a85cbc45bff48295c46c9cd8c9fc00192318c"
] | [
"CartPole/_CartPole_mathematical_helpers.py"
] | [
"\"\"\"\nSmall general mathematical functions.\nThis file was necessary to make CartPole module self-contained.\n\"\"\"\n\nfrom math import fmod\nimport numpy as np\n\n\n# Wraps the angle into range [-π, π]\ndef wrap_angle_rad(angle: float) -> float:\n Modulo = fmod(angle, 2 * np.pi) # positive modulo\n if Modulo < -np.pi:\n angle = Modulo + 2 * np.pi\n elif Modulo > np.pi:\n angle = Modulo - 2 * np.pi\n else:\n angle = Modulo\n return angle\n\n\ndef wrap_angle_rad_inplace(angle: np.ndarray) -> None:\n Modulo = np.fmod(angle, 2 * np.pi) # positive modulo\n neg_wrap, pos_wrap = Modulo < -np.pi, Modulo > np.pi\n angle[neg_wrap] = Modulo[neg_wrap] + 2 * np.pi\n angle[pos_wrap] = Modulo[pos_wrap] - 2 * np.pi\n angle[~(neg_wrap | pos_wrap)] = Modulo[~(neg_wrap | pos_wrap)]\n\n\ndef conditional_decorator(dec, cond):\n def decorator(func):\n return dec(func) if cond else func\n return decorator\n\n\n"
] | [
[
"numpy.fmod"
]
] |
augustehirth/Cirq | [
"e616710a0fa243524a9f6d7bc0d35e6b952fe3d0",
"e616710a0fa243524a9f6d7bc0d35e6b952fe3d0",
"e616710a0fa243524a9f6d7bc0d35e6b952fe3d0",
"e616710a0fa243524a9f6d7bc0d35e6b952fe3d0",
"e616710a0fa243524a9f6d7bc0d35e6b952fe3d0"
] | [
"cirq-google/cirq_google/serialization/op_serializer_test.py",
"cirq-core/cirq/testing/consistent_phase_by_test.py",
"cirq-core/cirq/ops/controlled_gate.py",
"cirq-core/cirq/transformers/analytical_decompositions/two_qubit_to_cz_test.py",
"cirq-core/cirq/testing/consistent_protocols.py"
] | [
"# Copyright 2019 The Cirq Developers\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom typing import Dict, List\n\nimport copy\nimport numpy as np\nimport pytest\nimport sympy\n\nfrom google.protobuf import json_format\n\nimport cirq\nimport cirq_google as cg\nfrom cirq_google.api import v2\n\n\nDEFAULT_TOKEN = 'test_tag'\n\n\ndef op_proto(json: Dict) -> v2.program_pb2.Operation:\n op = v2.program_pb2.Operation()\n json_format.ParseDict(json, op)\n return op\n\n\nclass GateWithAttribute(cirq.SingleQubitGate):\n def __init__(self, val):\n self.val = val\n\n\nclass GateWithProperty(cirq.SingleQubitGate):\n def __init__(self, val, not_req=None):\n self._val = val\n self._not_req = not_req\n\n @property\n def val(self):\n return self._val\n\n\nclass GateWithMethod(cirq.SingleQubitGate):\n def __init__(self, val):\n self._val = val\n\n def get_val(self):\n return self._val\n\n\nclass SubclassGate(GateWithAttribute):\n\n pass\n\n\ndef get_val(op):\n return op.gate.get_val()\n\n\nTEST_CASES = (\n (float, 1.0, {'arg_value': {'float_value': 1.0}}),\n (str, 'abc', {'arg_value': {'string_value': 'abc'}}),\n (float, 1, {'arg_value': {'float_value': 1.0}}),\n (List[bool], [True, False], {'arg_value': {'bool_values': {'values': [True, False]}}}),\n (List[bool], (True, False), {'arg_value': {'bool_values': {'values': [True, False]}}}),\n (\n List[bool],\n np.array([True, False], dtype=bool),\n {'arg_value': {'bool_values': {'values': [True, False]}}},\n ),\n (sympy.Symbol, sympy.Symbol('x'), {'symbol': 'x'}),\n (float, sympy.Symbol('x'), {'symbol': 'x'}),\n (\n float,\n sympy.Symbol('x') - sympy.Symbol('y'),\n {\n 'func': {\n 'type': 'add',\n 'args': [\n {'symbol': 'x'},\n {\n 'func': {\n 'type': 'mul',\n 'args': [{'arg_value': {'float_value': -1.0}}, {'symbol': 'y'}],\n }\n },\n ],\n }\n },\n ),\n)\n\n\[email protected](('val_type', 'val', 'arg_value'), TEST_CASES)\ndef test_to_proto_attribute(val_type, val, arg_value):\n serializer = cg.GateOpSerializer(\n gate_type=GateWithAttribute,\n serialized_gate_id='my_gate',\n args=[\n cg.SerializingArg(serialized_name='my_val', serialized_type=val_type, op_getter='val')\n ],\n )\n q = cirq.GridQubit(1, 2)\n result = serializer.to_proto(GateWithAttribute(val)(q), arg_function_language='linear')\n expected = op_proto(\n {'gate': {'id': 'my_gate'}, 'args': {'my_val': arg_value}, 'qubits': [{'id': '1_2'}]}\n )\n assert result == expected\n\n\[email protected](('val_type', 'val', 'arg_value'), TEST_CASES)\ndef test_to_proto_property(val_type, val, arg_value):\n serializer = cg.GateOpSerializer(\n gate_type=GateWithProperty,\n serialized_gate_id='my_gate',\n args=[\n cg.SerializingArg(serialized_name='my_val', serialized_type=val_type, op_getter='val')\n ],\n )\n q = cirq.GridQubit(1, 2)\n result = serializer.to_proto(GateWithProperty(val)(q), arg_function_language='linear')\n expected = op_proto(\n {'gate': {'id': 'my_gate'}, 'args': {'my_val': arg_value}, 'qubits': [{'id': '1_2'}]}\n )\n assert result == expected\n\n\[email protected](('val_type', 'val', 'arg_value'), TEST_CASES)\ndef test_to_proto_callable(val_type, val, arg_value):\n serializer = cg.GateOpSerializer(\n gate_type=GateWithMethod,\n serialized_gate_id='my_gate',\n args=[\n cg.SerializingArg(serialized_name='my_val', serialized_type=val_type, op_getter=get_val)\n ],\n )\n q = cirq.GridQubit(1, 2)\n result = serializer.to_proto(GateWithMethod(val)(q), arg_function_language='linear')\n expected = op_proto(\n {'gate': {'id': 'my_gate'}, 'args': {'my_val': arg_value}, 'qubits': [{'id': '1_2'}]}\n )\n assert result == expected\n\n\ndef test_to_proto_gate_predicate():\n serializer = cg.GateOpSerializer(\n gate_type=GateWithAttribute,\n serialized_gate_id='my_gate',\n args=[cg.SerializingArg(serialized_name='my_val', serialized_type=float, op_getter='val')],\n can_serialize_predicate=lambda x: x.gate.val == 1,\n )\n q = cirq.GridQubit(1, 2)\n assert serializer.to_proto(GateWithAttribute(0)(q)) is None\n assert serializer.to_proto(GateWithAttribute(1)(q)) is not None\n assert not serializer.can_serialize_operation(GateWithAttribute(0)(q))\n assert serializer.can_serialize_operation(GateWithAttribute(1)(q))\n\n\ndef test_to_proto_gate_mismatch():\n serializer = cg.GateOpSerializer(\n gate_type=GateWithProperty,\n serialized_gate_id='my_gate',\n args=[cg.SerializingArg(serialized_name='my_val', serialized_type=float, op_getter='val')],\n )\n q = cirq.GridQubit(1, 2)\n with pytest.raises(ValueError, match='GateWithAttribute.*GateWithProperty'):\n serializer.to_proto(GateWithAttribute(1.0)(q))\n\n\ndef test_to_proto_unsupported_type():\n serializer = cg.GateOpSerializer(\n gate_type=GateWithProperty,\n serialized_gate_id='my_gate',\n args=[cg.SerializingArg(serialized_name='my_val', serialized_type=bytes, op_getter='val')],\n )\n q = cirq.GridQubit(1, 2)\n with pytest.raises(ValueError, match='bytes'):\n serializer.to_proto(GateWithProperty(b's')(q))\n\n\ndef test_to_proto_named_qubit_supported():\n serializer = cg.GateOpSerializer(\n gate_type=GateWithProperty,\n serialized_gate_id='my_gate',\n args=[cg.SerializingArg(serialized_name='my_val', serialized_type=float, op_getter='val')],\n )\n q = cirq.NamedQubit('a')\n arg_value = 1.0\n result = serializer.to_proto(GateWithProperty(arg_value)(q))\n\n expected = op_proto(\n {\n 'gate': {'id': 'my_gate'},\n 'args': {'my_val': {'arg_value': {'float_value': arg_value}}},\n 'qubits': [{'id': 'a'}],\n }\n )\n assert result == expected\n\n\ndef test_to_proto_line_qubit_supported():\n serializer = cg.GateOpSerializer(\n gate_type=GateWithProperty,\n serialized_gate_id='my_gate',\n args=[cg.SerializingArg(serialized_name='my_val', serialized_type=float, op_getter='val')],\n )\n q = cirq.LineQubit('10')\n arg_value = 1.0\n result = serializer.to_proto(GateWithProperty(arg_value)(q))\n\n expected = op_proto(\n {\n 'gate': {'id': 'my_gate'},\n 'args': {'my_val': {'arg_value': {'float_value': arg_value}}},\n 'qubits': [{'id': '10'}],\n }\n )\n assert result == expected\n\n\ndef test_to_proto_required_but_not_present():\n serializer = cg.GateOpSerializer(\n gate_type=GateWithProperty,\n serialized_gate_id='my_gate',\n args=[\n cg.SerializingArg(\n serialized_name='my_val', serialized_type=float, op_getter=lambda x: None\n )\n ],\n )\n q = cirq.GridQubit(1, 2)\n with pytest.raises(ValueError, match='required'):\n serializer.to_proto(GateWithProperty(1.0)(q))\n\n\ndef test_to_proto_no_getattr():\n serializer = cg.GateOpSerializer(\n gate_type=GateWithProperty,\n serialized_gate_id='my_gate',\n args=[cg.SerializingArg(serialized_name='my_val', serialized_type=float, op_getter='nope')],\n )\n q = cirq.GridQubit(1, 2)\n with pytest.raises(ValueError, match='does not have'):\n serializer.to_proto(GateWithProperty(1.0)(q))\n\n\ndef test_to_proto_not_required_ok():\n serializer = cg.GateOpSerializer(\n gate_type=GateWithProperty,\n serialized_gate_id='my_gate',\n args=[\n cg.SerializingArg(serialized_name='my_val', serialized_type=float, op_getter='val'),\n cg.SerializingArg(\n serialized_name='not_req',\n serialized_type=float,\n op_getter='not_req',\n required=False,\n ),\n ],\n )\n expected = op_proto(\n {\n 'gate': {'id': 'my_gate'},\n 'args': {'my_val': {'arg_value': {'float_value': 0.125}}},\n 'qubits': [{'id': '1_2'}],\n }\n )\n\n q = cirq.GridQubit(1, 2)\n assert serializer.to_proto(GateWithProperty(0.125)(q)) == expected\n\n\[email protected](\n ('val_type', 'val'),\n (\n (float, 's'),\n (str, 1.0),\n (sympy.Symbol, 1.0),\n (List[bool], [1.0]),\n (List[bool], 'a'),\n (List[bool], (1.0,)),\n ),\n)\ndef test_to_proto_type_mismatch(val_type, val):\n serializer = cg.GateOpSerializer(\n gate_type=GateWithProperty,\n serialized_gate_id='my_gate',\n args=[\n cg.SerializingArg(serialized_name='my_val', serialized_type=val_type, op_getter='val')\n ],\n )\n q = cirq.GridQubit(1, 2)\n with pytest.raises(ValueError, match=str(type(val))):\n serializer.to_proto(GateWithProperty(val)(q))\n\n\ndef test_can_serialize_operation_subclass():\n serializer = cg.GateOpSerializer(\n gate_type=GateWithAttribute,\n serialized_gate_id='my_gate',\n args=[cg.SerializingArg(serialized_name='my_val', serialized_type=float, op_getter='val')],\n can_serialize_predicate=lambda x: x.gate.val == 1,\n )\n q = cirq.GridQubit(1, 1)\n assert serializer.can_serialize_operation(SubclassGate(1)(q))\n assert not serializer.can_serialize_operation(SubclassGate(0)(q))\n\n\ndef test_defaults_not_serialized():\n serializer = cg.GateOpSerializer(\n gate_type=GateWithAttribute,\n serialized_gate_id='my_gate',\n args=[\n cg.SerializingArg(\n serialized_name='my_val', serialized_type=float, default=1.0, op_getter='val'\n )\n ],\n )\n q = cirq.GridQubit(1, 2)\n no_default = op_proto(\n {\n 'gate': {'id': 'my_gate'},\n 'args': {'my_val': {'arg_value': {'float_value': 0.125}}},\n 'qubits': [{'id': '1_2'}],\n }\n )\n assert no_default == serializer.to_proto(GateWithAttribute(0.125)(q))\n with_default = op_proto({'gate': {'id': 'my_gate'}, 'qubits': [{'id': '1_2'}]})\n assert with_default == serializer.to_proto(GateWithAttribute(1.0)(q))\n\n\ndef test_token_serialization():\n serializer = cg.GateOpSerializer(\n gate_type=GateWithAttribute,\n serialized_gate_id='my_gate',\n args=[cg.SerializingArg(serialized_name='my_val', serialized_type=float, op_getter='val')],\n )\n q = cirq.GridQubit(1, 2)\n tag = cg.CalibrationTag('my_token')\n expected = op_proto(\n {\n 'gate': {'id': 'my_gate'},\n 'args': {'my_val': {'arg_value': {'float_value': 0.125}}},\n 'qubits': [{'id': '1_2'}],\n 'token_value': 'my_token',\n }\n )\n assert expected == serializer.to_proto(GateWithAttribute(0.125)(q).with_tags(tag))\n\n\nONE_CONSTANT = [v2.program_pb2.Constant(string_value='my_token')]\nTWO_CONSTANTS = [\n v2.program_pb2.Constant(string_value='other_token'),\n v2.program_pb2.Constant(string_value='my_token'),\n]\n\n\[email protected](\n ('constants', 'expected_index', 'expected_constants'),\n (\n ([], 0, ONE_CONSTANT),\n (ONE_CONSTANT, 0, ONE_CONSTANT),\n (TWO_CONSTANTS, 1, TWO_CONSTANTS),\n ),\n)\ndef test_token_serialization_with_constant_reference(constants, expected_index, expected_constants):\n serializer = cg.GateOpSerializer(\n gate_type=GateWithAttribute,\n serialized_gate_id='my_gate',\n args=[cg.SerializingArg(serialized_name='my_val', serialized_type=float, op_getter='val')],\n )\n # Make a local copy since we are modifying the array in-place.\n constants = copy.copy(constants)\n q = cirq.GridQubit(1, 2)\n tag = cg.CalibrationTag('my_token')\n expected = op_proto(\n {\n 'gate': {'id': 'my_gate'},\n 'args': {'my_val': {'arg_value': {'float_value': 0.125}}},\n 'qubits': [{'id': '1_2'}],\n 'token_constant_index': expected_index,\n }\n )\n assert expected == serializer.to_proto(\n GateWithAttribute(0.125)(q).with_tags(tag), constants=constants\n )\n assert constants == expected_constants\n\n\ndef default_circuit_proto():\n op1 = v2.program_pb2.Operation()\n op1.gate.id = 'x_pow'\n op1.args['half_turns'].arg_value.string_value = 'k'\n op1.qubits.add().id = '1_1'\n\n op2 = v2.program_pb2.Operation()\n op2.gate.id = 'x_pow'\n op2.args['half_turns'].arg_value.float_value = 1.0\n op2.qubits.add().id = '1_2'\n op2.token_constant_index = 0\n\n return v2.program_pb2.Circuit(\n scheduling_strategy=v2.program_pb2.Circuit.MOMENT_BY_MOMENT,\n moments=[\n v2.program_pb2.Moment(\n operations=[op1, op2],\n ),\n ],\n )\n\n\ndef default_circuit():\n return cirq.FrozenCircuit(\n cirq.X(cirq.GridQubit(1, 1)) ** sympy.Symbol('k'),\n cirq.X(cirq.GridQubit(1, 2)).with_tags(DEFAULT_TOKEN),\n cirq.measure(cirq.GridQubit(1, 1), key='m'),\n )\n\n\ndef test_circuit_op_serializer_properties():\n serializer = cg.CircuitOpSerializer()\n assert serializer.internal_type == cirq.FrozenCircuit\n assert serializer.serialized_id == 'circuit'\n\n\ndef test_can_serialize_circuit_op():\n serializer = cg.CircuitOpSerializer()\n assert serializer.can_serialize_operation(cirq.CircuitOperation(default_circuit()))\n assert not serializer.can_serialize_operation(cirq.X(cirq.GridQubit(1, 1)))\n\n\ndef test_circuit_op_to_proto_errors():\n serializer = cg.CircuitOpSerializer()\n to_serialize = cirq.CircuitOperation(default_circuit())\n\n constants = [\n v2.program_pb2.Constant(string_value=DEFAULT_TOKEN),\n v2.program_pb2.Constant(circuit_value=default_circuit_proto()),\n ]\n raw_constants = {\n DEFAULT_TOKEN: 0,\n default_circuit(): 1,\n }\n\n with pytest.raises(ValueError, match='CircuitOp serialization requires a constants list'):\n serializer.to_proto(to_serialize)\n\n with pytest.raises(ValueError, match='CircuitOp serialization requires a constants list'):\n serializer.to_proto(to_serialize, constants=constants)\n\n with pytest.raises(ValueError, match='CircuitOp serialization requires a constants list'):\n serializer.to_proto(to_serialize, raw_constants=raw_constants)\n\n with pytest.raises(ValueError, match='Serializer expected CircuitOperation'):\n serializer.to_proto(\n v2.program_pb2.Operation(), constants=constants, raw_constants=raw_constants\n )\n\n bad_raw_constants = {cirq.FrozenCircuit(): 0}\n with pytest.raises(ValueError, match='Encountered a circuit not in the constants table'):\n serializer.to_proto(to_serialize, constants=constants, raw_constants=bad_raw_constants)\n\n with pytest.raises(ValueError, match='Cannot serialize repetitions of type'):\n serializer.to_proto(\n to_serialize ** sympy.Symbol('a'), constants=constants, raw_constants=raw_constants\n )\n\n\[email protected]('repetitions', [1, 5, ['a', 'b', 'c']])\ndef test_circuit_op_to_proto(repetitions):\n serializer = cg.CircuitOpSerializer()\n if isinstance(repetitions, int):\n repetition_ids = None\n else:\n repetition_ids = repetitions\n repetitions = len(repetition_ids)\n to_serialize = cirq.CircuitOperation(\n circuit=default_circuit(),\n qubit_map={cirq.GridQubit(1, 1): cirq.GridQubit(1, 2)},\n measurement_key_map={'m': 'results'},\n param_resolver={'k': 1.0},\n repetitions=repetitions,\n repetition_ids=repetition_ids,\n )\n\n constants = [\n v2.program_pb2.Constant(string_value=DEFAULT_TOKEN),\n v2.program_pb2.Constant(circuit_value=default_circuit_proto()),\n ]\n raw_constants = {\n DEFAULT_TOKEN: 0,\n default_circuit(): 1,\n }\n\n repetition_spec = v2.program_pb2.RepetitionSpecification()\n if repetition_ids is None:\n repetition_spec.repetition_count = repetitions\n else:\n for rep_id in repetition_ids:\n repetition_spec.repetition_ids.ids.append(rep_id)\n\n qubit_map = v2.program_pb2.QubitMapping()\n q_p1 = qubit_map.entries.add()\n q_p1.key.id = '1_1'\n q_p1.value.id = '1_2'\n\n measurement_key_map = v2.program_pb2.MeasurementKeyMapping()\n meas_p1 = measurement_key_map.entries.add()\n meas_p1.key.string_key = 'm'\n meas_p1.value.string_key = 'results'\n\n arg_map = v2.program_pb2.ArgMapping()\n arg_p1 = arg_map.entries.add()\n arg_p1.key.arg_value.string_value = 'k'\n arg_p1.value.arg_value.float_value = 1.0\n\n expected = v2.program_pb2.CircuitOperation(\n circuit_constant_index=1,\n repetition_specification=repetition_spec,\n qubit_map=qubit_map,\n measurement_key_map=measurement_key_map,\n arg_map=arg_map,\n )\n actual = serializer.to_proto(to_serialize, constants=constants, raw_constants=raw_constants)\n assert actual == expected\n",
"# Copyright 2018 The Cirq Developers\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport pytest\n\nimport numpy as np\n\nimport cirq\n\n\nclass GoodPhaser:\n def __init__(self, e):\n self.e = e\n\n def _unitary_(self):\n return np.array([[0, 1j**-self.e], [1j**self.e, 0]])\n\n def _phase_by_(self, phase_turns: float, qubit_index: int):\n return GoodPhaser(self.e + phase_turns * 4)\n\n def _resolve_parameters_(self, resolver, recursive):\n return GoodPhaser(resolver.value_of(self.e, recursive))\n\n\nclass GoodQuditPhaser:\n def __init__(self, e):\n self.e = e\n\n def _qid_shape_(self):\n return (3,)\n\n def _unitary_(self):\n return np.array(\n [\n [0, 1j**-self.e, 0],\n [0, 0, 1j**self.e],\n [1, 0, 0],\n ]\n )\n\n def _phase_by_(self, phase_turns: float, qubit_index: int):\n return GoodQuditPhaser(self.e + phase_turns * 4)\n\n def _resolve_parameters_(self, resolver, recursive):\n return GoodQuditPhaser(resolver.value_of(self.e, recursive))\n\n\nclass BadPhaser:\n def __init__(self, e):\n self.e = e\n\n def _unitary_(self):\n return np.array([[0, 1j ** -(self.e * 2)], [1j**self.e, 0]])\n\n def _phase_by_(self, phase_turns: float, qubit_index: int):\n return BadPhaser(self.e + phase_turns * 4)\n\n def _resolve_parameters_(self, resolver, recursive):\n return BadPhaser(resolver.value_of(self.e, recursive))\n\n\nclass NotPhaser:\n def _unitary_(self):\n return np.array([[0, 1], [1, 0]])\n\n def _phase_by_(self, phase_turns: float, qubit_index: int):\n return NotImplemented\n\n\nclass SemiBadPhaser:\n def __init__(self, e):\n self.e = e\n\n def _unitary_(self):\n a1 = cirq.unitary(GoodPhaser(self.e[0]))\n a2 = cirq.unitary(BadPhaser(self.e[1]))\n return np.kron(a1, a2)\n\n def _phase_by_(self, phase_turns: float, qubit_index: int):\n r = list(self.e)\n r[qubit_index] += phase_turns * 4\n return SemiBadPhaser(r)\n\n def _resolve_parameters_(self, resolver, recursive):\n return SemiBadPhaser([resolver.value_of(val, recursive) for val in self.e])\n\n\ndef test_assert_phase_by_is_consistent_with_unitary():\n cirq.testing.assert_phase_by_is_consistent_with_unitary(GoodPhaser(0.5))\n\n cirq.testing.assert_phase_by_is_consistent_with_unitary(GoodQuditPhaser(0.5))\n\n with pytest.raises(AssertionError, match='Phased unitary was incorrect for index #0'):\n cirq.testing.assert_phase_by_is_consistent_with_unitary(BadPhaser(0.5))\n\n with pytest.raises(AssertionError, match='Phased unitary was incorrect for index #1'):\n cirq.testing.assert_phase_by_is_consistent_with_unitary(SemiBadPhaser([0.5, 0.25]))\n\n # Vacuous success.\n cirq.testing.assert_phase_by_is_consistent_with_unitary(NotPhaser())\n",
"# Copyright 2018 The Cirq Developers\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom typing import (\n AbstractSet,\n Any,\n cast,\n Collection,\n Dict,\n List,\n Optional,\n Sequence,\n Tuple,\n Union,\n TYPE_CHECKING,\n)\n\nimport numpy as np\n\nfrom cirq import protocols, value, _import\nfrom cirq._compat import deprecated\nfrom cirq.ops import raw_types, controlled_operation as cop, matrix_gates\nfrom cirq.type_workarounds import NotImplementedType\n\nif TYPE_CHECKING:\n import cirq\n\ncontrolled_gate_decomposition = _import.LazyLoader(\n 'controlled_gate_decomposition', globals(), 'cirq.transformers.analytical_decompositions'\n)\ncommon_gates = _import.LazyLoader('common_gates', globals(), 'cirq.ops')\nline_qubit = _import.LazyLoader('line_qubit', globals(), 'cirq.devices')\n\n\[email protected]_equality\nclass ControlledGate(raw_types.Gate):\n \"\"\"Augments existing gates to have one or more control qubits.\n\n This object is typically created via `gate.controlled()`.\n \"\"\"\n\n def __init__(\n self,\n sub_gate: 'cirq.Gate',\n num_controls: int = None,\n control_values: Optional[Sequence[Union[int, Collection[int]]]] = None,\n control_qid_shape: Optional[Sequence[int]] = None,\n ) -> None:\n \"\"\"Initializes the controlled gate. If no arguments are specified for\n the controls, defaults to a single qubit control.\n\n Args:\n sub_gate: The gate to add a control qubit to.\n num_controls: Total number of control qubits.\n control_values: For which control qubit values to apply the sub\n gate. A sequence of length `num_controls` where each\n entry is an integer (or set of integers) corresponding to the\n qubit value (or set of possible values) where that control is\n enabled. When all controls are enabled, the sub gate is\n applied. If unspecified, control values default to 1.\n control_qid_shape: The qid shape of the controls. A tuple of the\n expected dimension of each control qid. Defaults to\n `(2,) * num_controls`. Specify this argument when using qudits.\n\n Raises:\n ValueError: If the `control_values` or `control_qid_shape` does not\n match with `num_conrols`, or if the `control_values` are out of\n bounds.\n \"\"\"\n if num_controls is None:\n if control_values is not None:\n num_controls = len(control_values)\n elif control_qid_shape is not None:\n num_controls = len(control_qid_shape)\n else:\n num_controls = 1\n if control_values is None:\n control_values = ((1,),) * num_controls\n if num_controls != len(control_values):\n raise ValueError('len(control_values) != num_controls')\n\n if control_qid_shape is None:\n control_qid_shape = (2,) * num_controls\n if num_controls != len(control_qid_shape):\n raise ValueError('len(control_qid_shape) != num_controls')\n self._control_qid_shape = tuple(control_qid_shape)\n\n # Convert to sorted tuples\n self._control_values = cast(\n Tuple[Tuple[int, ...], ...],\n tuple((val,) if isinstance(val, int) else tuple(sorted(val)) for val in control_values),\n )\n # Verify control values not out of bounds\n for i, (val, dimension) in enumerate(zip(self.control_values, self.control_qid_shape)):\n if not all(0 <= v < dimension for v in val):\n raise ValueError(\n 'Control values <{!r}> outside of range for control qubit '\n 'number <{!r}>.'.format(val, i)\n )\n\n # Flatten nested ControlledGates.\n if isinstance(sub_gate, ControlledGate):\n self._sub_gate = sub_gate.sub_gate # type: ignore\n self._control_values += sub_gate.control_values\n self._control_qid_shape += sub_gate.control_qid_shape\n else:\n self._sub_gate = sub_gate\n\n @property\n def control_qid_shape(self) -> Tuple[int, ...]:\n return self._control_qid_shape\n\n @control_qid_shape.setter # type: ignore\n @deprecated(\n deadline=\"v0.15\",\n fix=\"The mutators of this class are deprecated, instantiate a new object instead.\",\n )\n def control_qid_shape(self, control_qid_shape: Tuple[int, ...]):\n self._control_qid_shape = control_qid_shape\n\n @property\n def control_values(self) -> Tuple[Tuple[int, ...], ...]:\n return self._control_values\n\n @control_values.setter # type: ignore\n @deprecated(\n deadline=\"v0.15\",\n fix=\"The mutators of this class are deprecated, instantiate a new object instead.\",\n )\n def control_values(self, control_values: Tuple[Tuple[int, ...], ...]):\n self._control_values = control_values\n\n @property\n def sub_gate(self) -> 'cirq.Gate':\n return self._sub_gate\n\n @sub_gate.setter # type: ignore\n @deprecated(\n deadline=\"v0.15\",\n fix=\"The mutators of this class are deprecated, instantiate a new object instead.\",\n )\n def sub_gate(self, sub_gate: 'cirq.Gate'):\n self._sub_gate = sub_gate\n\n def num_controls(self) -> int:\n return len(self.control_qid_shape)\n\n def _qid_shape_(self) -> Tuple[int, ...]:\n return self.control_qid_shape + protocols.qid_shape(self.sub_gate)\n\n def _decompose_(self, qubits):\n if (\n protocols.has_unitary(self.sub_gate)\n and protocols.num_qubits(self.sub_gate) == 1\n and self._qid_shape_() == (2,) * len(self._qid_shape_())\n ):\n control_qubits = list(qubits[: self.num_controls()])\n invert_ops: List['cirq.Operation'] = []\n for cvals, cqbit in zip(self.control_values, qubits[: self.num_controls()]):\n if set(cvals) == {0}:\n invert_ops.append(common_gates.X(cqbit))\n elif set(cvals) == {0, 1}:\n control_qubits.remove(cqbit)\n decomposed_ops = controlled_gate_decomposition.decompose_multi_controlled_rotation(\n protocols.unitary(self.sub_gate), control_qubits, qubits[-1]\n )\n return invert_ops + decomposed_ops + invert_ops\n\n if isinstance(self.sub_gate, common_gates.CZPowGate):\n z_sub_gate = common_gates.ZPowGate(\n exponent=self.sub_gate.exponent, global_shift=self.sub_gate.global_shift\n )\n kwargs = {\n 'num_controls': self.num_controls() + 1,\n 'control_values': self.control_values + (1,),\n 'control_qid_shape': self.control_qid_shape + (2,),\n }\n controlled_z = (\n z_sub_gate.controlled(**kwargs)\n if protocols.is_parameterized(self)\n else ControlledGate(z_sub_gate, **kwargs)\n )\n if self != controlled_z:\n return protocols.decompose_once_with_qubits(controlled_z, qubits, NotImplemented)\n\n if isinstance(self.sub_gate, matrix_gates.MatrixGate):\n # Default decompositions of 2/3 qubit `cirq.MatrixGate` ignores global phase, which is\n # local phase in the controlled variant and hence cannot be ignored.\n return NotImplemented\n\n result = protocols.decompose_once_with_qubits(\n self.sub_gate, qubits[self.num_controls() :], NotImplemented\n )\n if result is NotImplemented:\n return NotImplemented\n\n decomposed: List['cirq.Operation'] = []\n for op in result:\n decomposed.append(\n op.controlled_by(*qubits[: self.num_controls()], control_values=self.control_values)\n )\n return decomposed\n\n def on(self, *qubits: 'cirq.Qid') -> cop.ControlledOperation:\n if len(qubits) == 0:\n raise ValueError(f\"Applied a gate to an empty set of qubits. Gate: {self!r}\")\n self.validate_args(qubits)\n return cop.ControlledOperation(\n qubits[: self.num_controls()],\n self.sub_gate.on(*qubits[self.num_controls() :]),\n self.control_values,\n )\n\n def _value_equality_values_(self):\n return (\n self.sub_gate,\n self.num_controls(),\n self.control_values,\n self.control_qid_shape,\n )\n\n def _apply_unitary_(self, args: 'protocols.ApplyUnitaryArgs') -> np.ndarray:\n qubits = line_qubit.LineQid.for_gate(self)\n op = self.sub_gate.on(*qubits[self.num_controls() :])\n c_op = cop.ControlledOperation(qubits[: self.num_controls()], op, self.control_values)\n return protocols.apply_unitary(c_op, args, default=NotImplemented)\n\n def _has_unitary_(self) -> bool:\n return protocols.has_unitary(self.sub_gate)\n\n def _unitary_(self) -> Union[np.ndarray, NotImplementedType]:\n qubits = line_qubit.LineQid.for_gate(self)\n op = self.sub_gate.on(*qubits[self.num_controls() :])\n c_op = cop.ControlledOperation(qubits[: self.num_controls()], op, self.control_values)\n\n return protocols.unitary(c_op, default=NotImplemented)\n\n def _has_mixture_(self) -> bool:\n return protocols.has_mixture(self.sub_gate)\n\n def _mixture_(self) -> Union[np.ndarray, NotImplementedType]:\n qubits = line_qubit.LineQid.for_gate(self)\n op = self.sub_gate.on(*qubits[self.num_controls() :])\n c_op = cop.ControlledOperation(qubits[: self.num_controls()], op, self.control_values)\n return protocols.mixture(c_op, default=NotImplemented)\n\n def __pow__(self, exponent: Any) -> 'ControlledGate':\n new_sub_gate = protocols.pow(self.sub_gate, exponent, NotImplemented)\n if new_sub_gate is NotImplemented:\n return NotImplemented\n return ControlledGate(\n new_sub_gate,\n self.num_controls(),\n control_values=self.control_values,\n control_qid_shape=self.control_qid_shape,\n )\n\n def _is_parameterized_(self) -> bool:\n return protocols.is_parameterized(self.sub_gate)\n\n def _parameter_names_(self) -> AbstractSet[str]:\n return protocols.parameter_names(self.sub_gate)\n\n def _resolve_parameters_(\n self, resolver: 'cirq.ParamResolver', recursive: bool\n ) -> 'ControlledGate':\n new_sub_gate = protocols.resolve_parameters(self.sub_gate, resolver, recursive)\n return ControlledGate(\n new_sub_gate,\n self.num_controls(),\n control_values=self.control_values,\n control_qid_shape=self.control_qid_shape,\n )\n\n def _trace_distance_bound_(self) -> Optional[float]:\n if self._is_parameterized_():\n return None\n u = protocols.unitary(self.sub_gate, default=None)\n if u is None:\n return NotImplemented\n angle_list = np.append(np.angle(np.linalg.eigvals(u)), 0)\n return protocols.trace_distance_from_angle_list(angle_list)\n\n def _circuit_diagram_info_(\n self, args: 'cirq.CircuitDiagramInfoArgs'\n ) -> 'cirq.CircuitDiagramInfo':\n sub_args = protocols.CircuitDiagramInfoArgs(\n known_qubit_count=(\n args.known_qubit_count - self.num_controls()\n if args.known_qubit_count is not None\n else None\n ),\n known_qubits=(\n args.known_qubits[self.num_controls() :] if args.known_qubits is not None else None\n ),\n use_unicode_characters=args.use_unicode_characters,\n precision=args.precision,\n label_map=args.label_map,\n )\n sub_info = protocols.circuit_diagram_info(self.sub_gate, sub_args, None)\n if sub_info is None:\n return NotImplemented\n\n def get_symbol(vals):\n if tuple(vals) == (1,):\n return '@'\n return f\"({','.join(map(str, vals))})\"\n\n return protocols.CircuitDiagramInfo(\n wire_symbols=(\n *(get_symbol(vals) for vals in self.control_values),\n *sub_info.wire_symbols,\n ),\n exponent=sub_info.exponent,\n )\n\n def __str__(self) -> str:\n if set(self.control_values) == {(1,)}:\n\n def get_prefix(control_vals):\n return 'C'\n\n else:\n\n def get_prefix(control_vals):\n control_vals_str = ''.join(map(str, sorted(control_vals)))\n return f'C{control_vals_str}'\n\n return ''.join(map(get_prefix, self.control_values)) + str(self.sub_gate)\n\n def __repr__(self) -> str:\n if self.num_controls() == 1 and self.control_values == ((1,),):\n return f'cirq.ControlledGate(sub_gate={self.sub_gate!r})'\n\n if all(vals == (1,) for vals in self.control_values) and set(self.control_qid_shape) == {2}:\n return (\n f'cirq.ControlledGate(sub_gate={self.sub_gate!r}, '\n f'num_controls={self.num_controls()!r})'\n )\n return (\n f'cirq.ControlledGate(sub_gate={self.sub_gate!r}, '\n f'control_values={self.control_values!r},'\n f'control_qid_shape={self.control_qid_shape!r})'\n )\n\n def _json_dict_(self) -> Dict[str, Any]:\n return {\n 'control_values': self.control_values,\n 'control_qid_shape': self.control_qid_shape,\n 'sub_gate': self.sub_gate,\n }\n",
"# Copyright 2018 The Cirq Developers\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport cmath\nimport random\n\nimport numpy as np\nimport pytest\n\nimport cirq\nfrom cirq import value\nfrom cirq.transformers.analytical_decompositions.two_qubit_to_cz import (\n _parity_interaction,\n _is_trivial_angle,\n two_qubit_matrix_to_diagonal_and_cz_operations,\n)\nfrom cirq.testing import random_two_qubit_circuit_with_czs\n\nALLOW_DEPRECATION_IN_TEST = 'ALLOW_DEPRECATION_IN_TEST'\n\n\ndef test_deprecated_submodule():\n with cirq.testing.assert_deprecated(\n \"Use cirq.transformers.analytical_decompositions.two_qubit_to_cz instead\", deadline=\"v0.16\"\n ):\n _ = cirq.optimizers.two_qubit_decompositions.two_qubit_matrix_to_cz_operations\n\n\[email protected](\n 'rad,expected',\n (\n lambda err, largeErr: [\n (np.pi / 4, True),\n (np.pi / 4 + err, True),\n (np.pi / 4 + largeErr, False),\n (np.pi / 4 - err, True),\n (np.pi / 4 - largeErr, False),\n (-np.pi / 4, True),\n (-np.pi / 4 + err, True),\n (-np.pi / 4 + largeErr, False),\n (-np.pi / 4 - err, True),\n (-np.pi / 4 - largeErr, False),\n (0, True),\n (err, True),\n (largeErr, False),\n (-err, True),\n (-largeErr, False),\n (np.pi / 8, False),\n (-np.pi / 8, False),\n ]\n )(1e-8 * 2 / 3, 1e-8 * 4 / 3),\n)\ndef test_is_trivial_angle(rad, expected):\n tolerance = 1e-8\n out = _is_trivial_angle(rad, tolerance)\n assert out == expected, f'rad = {rad}'\n\n\ndef _operations_to_matrix(operations, qubits):\n return cirq.Circuit(operations).unitary(\n qubit_order=cirq.QubitOrder.explicit(qubits), qubits_that_should_be_present=qubits\n )\n\n\ndef _random_single_partial_cz_effect():\n return cirq.dot(\n cirq.kron(cirq.testing.random_unitary(2), cirq.testing.random_unitary(2)),\n np.diag([1, 1, 1, cmath.exp(2j * random.random() * np.pi)]),\n cirq.kron(cirq.testing.random_unitary(2), cirq.testing.random_unitary(2)),\n )\n\n\ndef _random_double_partial_cz_effect():\n return cirq.dot(\n cirq.kron(cirq.testing.random_unitary(2), cirq.testing.random_unitary(2)),\n np.diag([1, 1, 1, cmath.exp(2j * random.random() * np.pi)]),\n cirq.kron(cirq.testing.random_unitary(2), cirq.testing.random_unitary(2)),\n np.diag([1, 1, 1, cmath.exp(2j * random.random() * np.pi)]),\n cirq.kron(cirq.testing.random_unitary(2), cirq.testing.random_unitary(2)),\n )\n\n\ndef _random_double_full_cz_effect():\n return cirq.dot(\n cirq.kron(cirq.testing.random_unitary(2), cirq.testing.random_unitary(2)),\n cirq.unitary(cirq.CZ),\n cirq.kron(cirq.testing.random_unitary(2), cirq.testing.random_unitary(2)),\n cirq.unitary(cirq.CZ),\n cirq.kron(cirq.testing.random_unitary(2), cirq.testing.random_unitary(2)),\n )\n\n\ndef assert_cz_depth_below(operations, threshold, must_be_full):\n total_cz = 0\n\n for op in operations:\n assert len(op.qubits) <= 2\n if len(op.qubits) == 2:\n assert isinstance(op.gate, cirq.CZPowGate)\n e = value.canonicalize_half_turns(op.gate.exponent)\n if must_be_full:\n assert e == 1\n total_cz += abs(e)\n\n assert total_cz <= threshold\n\n\ndef assert_ops_implement_unitary(q0, q1, operations, intended_effect, atol=0.01):\n actual_effect = _operations_to_matrix(operations, (q0, q1))\n assert cirq.allclose_up_to_global_phase(actual_effect, intended_effect, atol=atol)\n\n\[email protected](\n 'max_partial_cz_depth,max_full_cz_depth,effect',\n [\n (0, 0, np.eye(4)),\n (\n 0,\n 0,\n np.array(\n [\n [0, 0, 0, 1],\n [0, 0, 1, 0],\n [0, 1, 0, 0],\n [1, 0, 0, 0j],\n ]\n ),\n ),\n (0, 0, cirq.unitary(cirq.CZ**0.00000001)),\n (0.5, 2, cirq.unitary(cirq.CZ**0.5)),\n (1, 1, cirq.unitary(cirq.CZ)),\n (1, 1, cirq.unitary(cirq.CNOT)),\n (\n 1,\n 1,\n np.array(\n [\n [1, 0, 0, 1j],\n [0, 1, 1j, 0],\n [0, 1j, 1, 0],\n [1j, 0, 0, 1],\n ]\n )\n * np.sqrt(0.5),\n ),\n (\n 1,\n 1,\n np.array(\n [\n [1, 0, 0, -1j],\n [0, 1, -1j, 0],\n [0, -1j, 1, 0],\n [-1j, 0, 0, 1],\n ]\n )\n * np.sqrt(0.5),\n ),\n (\n 1,\n 1,\n np.array(\n [\n [1, 0, 0, 1j],\n [0, 1, -1j, 0],\n [0, -1j, 1, 0],\n [1j, 0, 0, 1],\n ]\n )\n * np.sqrt(0.5),\n ),\n (1.5, 3, cirq.map_eigenvalues(cirq.unitary(cirq.SWAP), lambda e: e**0.5)),\n (2, 2, cirq.unitary(cirq.SWAP).dot(cirq.unitary(cirq.CZ))),\n (3, 3, cirq.unitary(cirq.SWAP)),\n (\n 3,\n 3,\n np.array(\n [\n [0, 0, 0, 1],\n [0, 1, 0, 0],\n [0, 0, 1, 0],\n [1, 0, 0, 0j],\n ]\n ),\n ),\n ]\n + [(1, 2, _random_single_partial_cz_effect()) for _ in range(10)]\n + [(2, 2, _random_double_full_cz_effect()) for _ in range(10)]\n + [(2, 3, _random_double_partial_cz_effect()) for _ in range(10)]\n + [(3, 3, cirq.testing.random_unitary(4)) for _ in range(10)],\n)\ndef test_two_to_ops_equivalent_and_bounded_for_known_and_random(\n max_partial_cz_depth, max_full_cz_depth, effect\n):\n q0 = cirq.NamedQubit('q0')\n q1 = cirq.NamedQubit('q1')\n\n operations_with_partial = cirq.two_qubit_matrix_to_cz_operations(q0, q1, effect, True)\n operations_with_full = cirq.two_qubit_matrix_to_cz_operations(q0, q1, effect, False)\n\n assert_ops_implement_unitary(q0, q1, operations_with_partial, effect)\n assert_ops_implement_unitary(q0, q1, operations_with_full, effect)\n\n assert_cz_depth_below(operations_with_partial, max_partial_cz_depth, False)\n assert_cz_depth_below(operations_with_full, max_full_cz_depth, True)\n\n\ndef test_trivial_parity_interaction_corner_case():\n q0 = cirq.NamedQubit('q0')\n q1 = cirq.NamedQubit('q1')\n nearPi4 = np.pi / 4 * 0.99\n tolerance = 1e-2\n circuit = cirq.Circuit(_parity_interaction(q0, q1, -nearPi4, tolerance))\n assert len(circuit) == 2\n\n\ndef test_kak_decomposition_depth_full_cz():\n a, b = cirq.LineQubit.range(2)\n\n # Random.\n u = cirq.testing.random_unitary(4)\n operations_with_full = cirq.two_qubit_matrix_to_cz_operations(a, b, u, False)\n c = cirq.Circuit(operations_with_full)\n # 3 CZ, 3+1 PhasedX, 1 Z\n assert len(c) <= 8\n\n # Double-axis interaction.\n u = cirq.unitary(cirq.Circuit(cirq.CNOT(a, b), cirq.CNOT(b, a)))\n operations_with_part = cirq.two_qubit_matrix_to_cz_operations(a, b, u, False)\n c = cirq.Circuit(operations_with_part)\n # 2 CZ, 2+1 PhasedX, 1 Z\n assert len(c) <= 6\n\n # Test unoptimized/un-cleaned length of Double-axis interaction.\n u = cirq.unitary(cirq.Circuit(cirq.CNOT(a, b), cirq.CNOT(b, a)))\n operations_with_part = cirq.two_qubit_matrix_to_cz_operations(a, b, u, False, 1e-8, False)\n c = cirq.Circuit(operations_with_part)\n assert len(c) > 6 # Length should be 13 with extra Pauli gates\n\n # Partial single-axis interaction.\n u = cirq.unitary(cirq.CNOT**0.1)\n operations_with_part = cirq.two_qubit_matrix_to_cz_operations(a, b, u, False)\n c = cirq.Circuit(operations_with_part)\n # 2 CZ, 2+1 PhasedX, 1 Z\n assert len(c) <= 6\n\n # Full single-axis interaction.\n u = cirq.unitary(cirq.ControlledGate(cirq.Y))\n operations_with_part = cirq.two_qubit_matrix_to_cz_operations(a, b, u, False)\n c = cirq.Circuit(operations_with_part)\n # 1 CZ, 1+1 PhasedX, 1 Z\n assert len(c) <= 4\n\n\ndef test_kak_decomposition_depth_partial_cz():\n a, b = cirq.LineQubit.range(2)\n\n # Random.\n u = cirq.testing.random_unitary(4)\n operations_with_full = cirq.two_qubit_matrix_to_cz_operations(a, b, u, True)\n c = cirq.Circuit(operations_with_full)\n # 3 CP, 3+1 PhasedX, 1 Z\n assert len(c) <= 8\n\n # Double-axis interaction.\n u = cirq.unitary(cirq.Circuit(cirq.CNOT(a, b), cirq.CNOT(b, a)))\n operations_with_part = cirq.two_qubit_matrix_to_cz_operations(a, b, u, True)\n c = cirq.Circuit(operations_with_part)\n # 2 CP, 2+1 PhasedX, 1 Z\n assert len(c) <= 6\n\n # Partial single-axis interaction.\n u = cirq.unitary(cirq.CNOT**0.1)\n operations_with_part = cirq.two_qubit_matrix_to_cz_operations(a, b, u, True)\n c = cirq.Circuit(operations_with_part)\n # 1 CP, 1+1 PhasedX, 1 Z\n assert len(c) <= 4\n\n # Full single-axis interaction.\n u = cirq.unitary(cirq.ControlledGate(cirq.Y))\n operations_with_part = cirq.two_qubit_matrix_to_cz_operations(a, b, u, True)\n c = cirq.Circuit(operations_with_part)\n # 1 CP, 1+1 PhasedX, 1 Z\n assert len(c) <= 4\n\n\[email protected](\n \"v\",\n [\n cirq.unitary(random_two_qubit_circuit_with_czs(3)),\n cirq.unitary(random_two_qubit_circuit_with_czs(2)),\n np.diag(np.exp(1j * np.pi * np.random.random(4))),\n ],\n)\ndef test_decompose_to_diagonal_and_circuit(v):\n b, c = cirq.LineQubit.range(2)\n diagonal, ops = two_qubit_matrix_to_diagonal_and_cz_operations(b, c, v)\n assert cirq.is_diagonal(diagonal)\n combined_circuit = cirq.Circuit(cirq.MatrixGate(diagonal)(b, c), ops)\n circuit_unitary = combined_circuit.unitary(qubits_that_should_be_present=[b, c])\n cirq.testing.assert_allclose_up_to_global_phase(circuit_unitary, v, atol=1e-14)\n",
"# Copyright 2018 The Cirq Developers\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport itertools\nfrom typing import Any, Dict, Optional, Sequence, Type, Union\n\nimport numpy as np\nimport sympy\n\nfrom cirq import ops, protocols, value\nfrom cirq.testing.consistent_act_on import assert_all_implemented_act_on_effects_match_unitary\nfrom cirq.testing.circuit_compare import (\n assert_has_consistent_apply_unitary,\n assert_has_consistent_qid_shape,\n)\nfrom cirq.testing.consistent_decomposition import (\n assert_decompose_is_consistent_with_unitary,\n assert_decompose_ends_at_default_gateset,\n)\nfrom cirq.testing.consistent_phase_by import (\n assert_phase_by_is_consistent_with_unitary,\n)\nfrom cirq.testing.consistent_qasm import (\n assert_qasm_is_consistent_with_unitary,\n)\nfrom cirq.testing.consistent_pauli_expansion import (\n assert_pauli_expansion_is_consistent_with_unitary,\n)\nfrom cirq.testing.consistent_resolve_parameters import (\n assert_consistent_resolve_parameters,\n)\nfrom cirq.testing.consistent_specified_has_unitary import (\n assert_specifies_has_unitary_if_unitary,\n)\nfrom cirq.testing.equivalent_repr_eval import assert_equivalent_repr\nfrom cirq.testing.consistent_controlled_gate_op import assert_controlled_and_controlled_by_identical\n\n\ndef assert_implements_consistent_protocols(\n val: Any,\n *,\n exponents: Sequence[Any] = (0, 1, -1, 0.25, -0.5, 0.1, sympy.Symbol('s')),\n qubit_count: Optional[int] = None,\n ignoring_global_phase: bool = False,\n setup_code: str = 'import cirq\\nimport numpy as np\\nimport sympy',\n global_vals: Optional[Dict[str, Any]] = None,\n local_vals: Optional[Dict[str, Any]] = None,\n ignore_decompose_to_default_gateset: bool = False,\n) -> None:\n \"\"\"Checks that a value is internally consistent and has a good __repr__.\"\"\"\n global_vals = global_vals or {}\n local_vals = local_vals or {}\n\n _assert_meets_standards_helper(\n val,\n ignoring_global_phase=ignoring_global_phase,\n setup_code=setup_code,\n global_vals=global_vals,\n local_vals=local_vals,\n ignore_decompose_to_default_gateset=ignore_decompose_to_default_gateset,\n )\n\n for exponent in exponents:\n p = protocols.pow(val, exponent, None)\n if p is not None:\n _assert_meets_standards_helper(\n val**exponent,\n ignoring_global_phase=ignoring_global_phase,\n setup_code=setup_code,\n global_vals=global_vals,\n local_vals=local_vals,\n ignore_decompose_to_default_gateset=ignore_decompose_to_default_gateset,\n )\n\n\ndef assert_eigengate_implements_consistent_protocols(\n eigen_gate_type: Type[ops.EigenGate],\n *,\n exponents: Sequence[value.TParamVal] = (0, 0.5, 1, -1, 0.25, -0.5, 0.1, sympy.Symbol('s')),\n global_shifts: Sequence[float] = (0, -0.5, 0.1),\n qubit_count: Optional[int] = None,\n ignoring_global_phase: bool = False,\n setup_code: str = 'import cirq\\nimport numpy as np\\nimport sympy',\n global_vals: Optional[Dict[str, Any]] = None,\n local_vals: Optional[Dict[str, Any]] = None,\n ignore_decompose_to_default_gateset: bool = False,\n) -> None:\n \"\"\"Checks that an EigenGate subclass is internally consistent and has a\n good __repr__.\"\"\"\n # pylint: disable=unused-variable\n __tracebackhide__ = True\n # pylint: enable=unused-variable\n\n for exponent in exponents:\n for shift in global_shifts:\n _assert_meets_standards_helper(\n eigen_gate_type(exponent=exponent, global_shift=shift),\n ignoring_global_phase=ignoring_global_phase,\n setup_code=setup_code,\n global_vals=global_vals,\n local_vals=local_vals,\n ignore_decompose_to_default_gateset=ignore_decompose_to_default_gateset,\n )\n\n\ndef assert_eigen_shifts_is_consistent_with_eigen_components(val: ops.EigenGate) -> None:\n # pylint: disable=unused-variable\n __tracebackhide__ = True\n # pylint: enable=unused-variable\n if not protocols.is_parameterized(val):\n assert val._eigen_shifts() == [\n e[0] for e in val._eigen_components()\n ], \"_eigen_shifts not consistent with _eigen_components\"\n\n\ndef assert_has_consistent_trace_distance_bound(val: Any) -> None:\n # pylint: disable=unused-variable\n __tracebackhide__ = True\n # pylint: enable=unused-variable\n u = protocols.unitary(val, default=None)\n val_from_trace = protocols.trace_distance_bound(val)\n assert 0.0 <= val_from_trace <= 1.0\n if u is not None:\n\n class Unitary:\n def _unitary_(self):\n return u\n\n val_from_unitary = protocols.trace_distance_bound(Unitary())\n\n assert val_from_trace >= val_from_unitary or np.isclose(val_from_trace, val_from_unitary)\n\n\ndef _assert_meets_standards_helper(\n val: Any,\n *,\n ignoring_global_phase: bool,\n setup_code: str,\n global_vals: Optional[Dict[str, Any]],\n local_vals: Optional[Dict[str, Any]],\n ignore_decompose_to_default_gateset: bool,\n) -> None:\n __tracebackhide__ = True # pylint: disable=unused-variable\n\n assert_consistent_resolve_parameters(val)\n assert_specifies_has_unitary_if_unitary(val)\n assert_has_consistent_qid_shape(val)\n assert_has_consistent_apply_unitary(val)\n assert_all_implemented_act_on_effects_match_unitary(val)\n assert_qasm_is_consistent_with_unitary(val)\n assert_has_consistent_trace_distance_bound(val)\n assert_decompose_is_consistent_with_unitary(val, ignoring_global_phase=ignoring_global_phase)\n if not ignore_decompose_to_default_gateset:\n assert_decompose_ends_at_default_gateset(val)\n assert_phase_by_is_consistent_with_unitary(val)\n assert_pauli_expansion_is_consistent_with_unitary(val)\n assert_equivalent_repr(\n val, setup_code=setup_code, global_vals=global_vals, local_vals=local_vals\n )\n assert protocols.measurement_key_objs(val) == protocols.measurement_key_names(val)\n if isinstance(val, ops.EigenGate):\n assert_eigen_shifts_is_consistent_with_eigen_components(val)\n if isinstance(val, ops.Gate):\n assert_controlled_and_controlled_by_identical(val)\n\n\ndef assert_commutes_magic_method_consistent_with_unitaries(\n *vals: Sequence[Any], atol: Union[int, float] = 1e-8\n) -> None:\n if any(isinstance(val, ops.Operation) for val in vals):\n raise TypeError('`_commutes_` need not be consistent with unitaries for `Operation`.')\n unitaries = [protocols.unitary(val, None) for val in vals]\n pairs = itertools.permutations(zip(vals, unitaries), 2)\n for (left_val, left_unitary), (right_val, right_unitary) in pairs:\n if left_unitary is None or right_unitary is None:\n continue\n commutes = protocols.commutes(left_val, right_val, atol=atol, default=None)\n if commutes is None:\n continue\n assert commutes == protocols.commutes(left_unitary, right_unitary)\n"
] | [
[
"numpy.array"
],
[
"numpy.array",
"numpy.kron"
],
[
"numpy.linalg.eigvals"
],
[
"numpy.eye",
"numpy.array",
"numpy.random.random",
"numpy.sqrt"
],
[
"numpy.isclose"
]
] |
LiyrAstroph/CDNest | [
"afb6b869ce1c4ebd76662b20310f1d9d3db4e26e"
] | [
"tests/rastrigin_accept_action.py"
] | [
"#\n# sample from a Rastrigin test function\n# this is to illustrate how to use accept_action in CDNest to avoid repeat calculations.\n#\n# A 2D Rastrigin function looks\n# \n# logL=-(10.0*2 + (coords[0]**2 - 10*np.cos(2.0*np.pi*coords[0])) + (coords[1]**2 - 10*np.cos(2.0*np.pi*coords[1])) ) \n#\n# Every perturb, only one parameter is updated, so that the terms related to the rest parameters \n# do not need to recalculate, just use the values in the previous step.\n#\n# In this example, we use an array to record values of the term \"(coords[0]**2 - 10*np.cos(2.0*np.pi*coords[0]))\"\n# in every accepted perturb.\n#\n\nfrom mpi4py import MPI\nimport numpy as np\nimport cydnest\nimport matplotlib.pyplot as plt\nfrom matplotlib import cm\n\ncomm = MPI.COMM_WORLD\nrank = comm.Get_rank()\n\ndef randh():\n \"\"\"\n generate from the heavy-tailed distribution.\n \"\"\"\n return 10.0**(1.5 - 3*np.abs(np.random.randn()/np.sqrt(-np.log(np.random.rand()))))*np.random.randn()\n\ndef wrap(x, a, b):\n assert b > a\n return (x - a)%(b - a) + a\n\nclass Model(object):\n\n def __init__(self, num_params=1, num_particles=1):\n \"\"\"\n intialize the model\n \"\"\"\n # number of particles each core holds\n self.num_particles = num_particles\n\n # number of parameters\n self.num_params = num_params \n\n # parameter ranges, a list\n self.param_range = [[-5.12, 5.12]]*num_params\n\n # parameter prior type.\n # three types: Uniform, Gaussian, Log \n self.prior_type = [\"Uniform\"]*num_params\n\n # parameter prior information. used when the prior is Gaussian\n # indicate the mean and standard deviation of the Gaussian prior\n self.prior_info = [[0.0, 1.0]]*num_params\n \n # which parameter being perturbed \n # which particle being perturbed\n self.which_param_update = 0\n self.which_particle_update = 0\n\n # perturbed values and accepted values for all particles\n self.value_perturb = [0.0]*self.num_particles\n self.value_accept = [0.0]*self.num_particles\n\n def accept_action(self):\n \"\"\"\n action taken when a perturb is accepted\n record the accepted values from the perturbed values\n \"\"\"\n\n # note \"which_particle_update\" is updated and \"which_param_update\" is updated\n if self.which_param_update < 1:\n self.value_accept[self.which_particle_update] = self.value_perturb[self.which_particle_update]\n \n def kill_action(self, i, i_copy):\n \"\"\"\n cdnest kill a particle when it is not updated for a long time.\n action taken when a particle is killed: i particle is killed,\n copy i_copy particle's values to i particle's values\n this function is needed, since we record some accepted values \n \"\"\"\n self.value_accept[i] = self.value_accept[i_copy]\n return\n \n # users can define their own functions to generate \n # the initial parameter values \n # this is optinal. if not defined, cydnest will use the internal \n # function. \n def from_prior(self):\n \"\"\"\n generate initial values of model parameters from priors\n \"\"\"\n coords = np.zeros(self.num_params)\n for i in range(self.num_params):\n if self.prior_type[i] == \"Uniform\":\n coords[i] = np.random.uniform(self.param_range[i][0], self.param_range[i][1])\n elif self.prior_type[i] == \"Gaussian\":\n coords[i] = np.random.randn() * self.prior_info[i][1] + self.prior_info[0]\n wrap(coords[i], self.param_range[i][0], self.param_range[i][1])\n elif self.prior_type[i] == \"Log\": # LOG prior\n coords[i] = np.random.uniform(np.log(self.param_range[i][0]), np.log(self.param_range[i][1]))\n coords[i] = np.exp(coords[i])\n\n return coords\n\n # users can define their own functions to perturb \n # parameter values for sampling \n # this is optinal. if not defined, cydnest will use the internal \n # function. \n def perturb(self, coords):\n \"\"\"\n perturb the parameters\n \"\"\"\n i = np.random.randint(self.num_params)\n \n # record which parameter is updated\n self.which_param_update = i\n\n LogH = 0.0 # prior ratio: ln(prior(new)/prior(old)) = ln(prior(new)) - ln(prior(old))\n width = (self.param_range[i][1]-self.param_range[i][0])\n if self.prior_type[i] == \"Uniform\":\n coords[i] += width*randh()\n coords[i] = wrap(coords[i], self.param_range[i][0], self.param_range[i][1])\n elif self.prior_type[i] == \"Gaussian\": \n LogH -= ( -0.5* (coords[i] - self.prior_info[i][0])**2/self.prior_info[i][1]**2 ) # ln(Gaussian)\n coords[i] += width*randh()\n coords[i] = wrap(coords[i], self.param_range[i][0], self.param_range[i][1])\n LogH += ( -0.5* (coords[i] - self.prior_info[i][0])**2/self.prior_info[i][1]**2 )\n elif self.prior_type[i] == \"Log\":\n LogH -= ( -np.log(coords[i]) ) # ln(1/x) = -ln(x)\n coords[i] += width*randh()\n coords[i] = wrap(coords[i], self.param_range[i][0], self.param_range[i][1])\n LogH += ( -np.log(coords[i]) )\n return LogH \n \n def log_likelihood_initial(self, coords):\n \"\"\"\n calculate likelihood at initial start\n \"\"\" \n self.which_particle_update = cydnest.get_which_particle_update()\n self.value_accept[self.which_particle_update] = coords[0]**2 - 10*np.cos(2.0*np.pi*coords[0])\n value = self.value_accept[self.which_particle_update]\n return -(10.0*2 + (value) + (coords[1]**2 - 10*np.cos(2.0*np.pi*coords[1])) )\n\n def log_likelihood(self, coords):\n \"\"\"\n calculate likelihood\n \"\"\"\n # get which particle is being updated, and save it to self model\n\n self.which_particle_update = cydnest.get_which_particle_update()\n \n value = 0.0\n if self.which_param_update < 1: # when 0-th parameter update, recalculate\n self.value_perturb[self.which_particle_update] = coords[0]**2 - 10*np.cos(2.0*np.pi*coords[0])\n value = self.value_perturb[self.which_particle_update]\n else: # otherwise, use the accepted value\n value = self.value_accept[self.which_particle_update]\n\n return -(10.0*2 + (value) + (coords[1]**2 - 10*np.cos(2.0*np.pi*coords[1])) )\n\n# create a model\nmodel = Model(num_params=2, num_particles=2)\n\n# create a dnest sampler\n# max_num_save is the number of samples to generate\n# max_num_levels is the number of levels \n# ptol is the likelihood tolerance in loge()\nsampler = cydnest.sampler(model, sample_dir=\"./\", max_num_saves = 10000, ptol=0.1, num_particles=model.num_particles)\n#\n# The full argument lists look like:\n# sampler = cydnest.sampler(model, sample_dir=\"./\", max_num_saves = 10000, ptol=0.1, \n# num_particles=1, thread_steps_factor = 10, \n# max_num_levels = 0, lam = 10, beta = 100\n# new_level_interval_factor = 2, save_interval_factor = 2)\n#\n\n\n# run sampler\nlogz = sampler.run()\ncomm.Barrier()\n\n# ouput evidence\nif rank == 0:\n print(\"Evidence:\", logz)\n\n psample = np.loadtxt(sampler.get_sample_dir() +\"/posterior_sample\" + sampler.get_sample_tag() + \".txt\")\n psample_info = np.loadtxt(sampler.get_sample_dir() +\"/posterior_sample_info\" + sampler.get_sample_tag() + \".txt\")\n\n fig = plt.figure(figsize=(15, 12))\n ax = fig.add_subplot(111, projection='3d')\n \n\n X = np.arange(-1.5, 1.5, 0.01)\n Y = np.arange(-1.5, 1.5, 0.01)\n X, Y = np.meshgrid(X, Y)\n Z = -(10.0*2 + (X**2 - 10*np.cos(2.0*np.pi*X)) + (Y**2 - 10*np.cos(2.0*np.pi*Y)) )\n ax.plot_surface(X, Y, Z, cmap=cm.ocean, rstride=2, cstride=2, linewidth=0, antialiased=False, zorder=0)\n\n idx = np.where((np.abs(psample[:, 0]) <1.4) & (np.abs(psample[:, 1]) <1.4))\n ax.plot(psample[idx[0], 0], psample[idx[0], 1], psample_info[idx[0]], ls='none', marker='+', zorder=10)\n ax.set_xlim(-1.5, 1.5)\n ax.set_ylim(-1.5, 1.5)\n ax.set_xlabel(r'$\\theta_1$')\n ax.set_ylabel(r'$\\theta_2$')\n ax.set_zlabel(r'$\\log L$')\n fig.savefig(\"fig_rastrigin.jpg\", bbox_inches='tight')\n plt.show()\n\n # do postprocess, plot, show the properties of sampling \n cydnest.postprocess(sampler.get_sample_dir(), sampler.get_sample_tag(), temperature=1.0, doplot=True)"
] | [
[
"numpy.log",
"numpy.abs",
"numpy.arange",
"numpy.cos",
"numpy.random.randn",
"numpy.random.randint",
"numpy.random.rand",
"numpy.exp",
"numpy.random.uniform",
"numpy.meshgrid",
"numpy.zeros",
"matplotlib.pyplot.show",
"matplotlib.pyplot.figure"
]
] |
echo-ray/catalyst | [
"8b4274d17f0a42ee4d1d5e09d30fb0919aea2a51"
] | [
"catalyst/marketplace/marketplace.py"
] | [
"from __future__ import print_function\n\nimport glob\nimport json\nimport os\nimport re\nimport shutil\nimport sys\nimport time\nimport webbrowser\n\nimport bcolz\nimport logbook\nimport pandas as pd\nimport requests\nfrom requests_toolbelt import MultipartDecoder\nfrom requests_toolbelt.multipart.decoder import \\\n NonMultipartContentTypeException\n\nfrom catalyst.constants import (\n LOG_LEVEL, AUTH_SERVER, ETH_REMOTE_NODE, MARKETPLACE_CONTRACT,\n MARKETPLACE_CONTRACT_ABI, ENIGMA_CONTRACT, ENIGMA_CONTRACT_ABI)\nfrom catalyst.exchange.utils.stats_utils import set_print_settings\nfrom catalyst.marketplace.marketplace_errors import (\n MarketplacePubAddressEmpty, MarketplaceDatasetNotFound,\n MarketplaceNoAddressMatch, MarketplaceHTTPRequest,\n MarketplaceNoCSVFiles, MarketplaceRequiresPython3)\nfrom catalyst.marketplace.utils.auth_utils import get_key_secret, \\\n get_signed_headers\nfrom catalyst.marketplace.utils.bundle_utils import merge_bundles\nfrom catalyst.marketplace.utils.eth_utils import bin_hex, from_grains, \\\n to_grains\nfrom catalyst.marketplace.utils.path_utils import get_bundle_folder, \\\n get_data_source_folder, get_marketplace_folder, \\\n get_user_pubaddr, get_temp_bundles_folder, extract_bundle\nfrom catalyst.utils.paths import ensure_directory\n\nif sys.version_info.major < 3:\n import urllib\nelse:\n import urllib.request as urllib\n\nlog = logbook.Logger('Marketplace', level=LOG_LEVEL)\n\n\nclass Marketplace:\n def __init__(self):\n global Web3\n try:\n from web3 import Web3, HTTPProvider\n except ImportError:\n raise MarketplaceRequiresPython3()\n\n self.addresses = get_user_pubaddr()\n\n if self.addresses[0]['pubAddr'] == '':\n raise MarketplacePubAddressEmpty(\n filename=os.path.join(\n get_marketplace_folder(), 'addresses.json')\n )\n self.default_account = self.addresses[0]['pubAddr']\n\n self.web3 = Web3(HTTPProvider(ETH_REMOTE_NODE))\n\n contract_url = urllib.urlopen(MARKETPLACE_CONTRACT)\n\n self.mkt_contract_address = Web3.toChecksumAddress(\n contract_url.readline().decode(\n contract_url.info().get_content_charset()).strip())\n\n abi_url = urllib.urlopen(MARKETPLACE_CONTRACT_ABI)\n abi = json.load(abi_url)\n\n self.mkt_contract = self.web3.eth.contract(\n self.mkt_contract_address,\n abi=abi,\n )\n\n contract_url = urllib.urlopen(ENIGMA_CONTRACT)\n\n self.eng_contract_address = Web3.toChecksumAddress(\n contract_url.readline().decode(\n contract_url.info().get_content_charset()).strip())\n\n abi_url = urllib.urlopen(ENIGMA_CONTRACT_ABI)\n abi = json.load(abi_url)\n\n self.eng_contract = self.web3.eth.contract(\n self.eng_contract_address,\n abi=abi,\n )\n\n # def get_data_sources_map(self):\n # return [\n # dict(\n # name='Marketcap',\n # desc='The marketcap value in USD.',\n # start_date=pd.to_datetime('2017-01-01'),\n # end_date=pd.to_datetime('2018-01-15'),\n # data_frequencies=['daily'],\n # ),\n # dict(\n # name='GitHub',\n # desc='The rate of development activity on GitHub.',\n # start_date=pd.to_datetime('2017-01-01'),\n # end_date=pd.to_datetime('2018-01-15'),\n # data_frequencies=['daily', 'hour'],\n # ),\n # dict(\n # name='Influencers',\n # desc='Tweets & related sentiments by selected influencers.',\n # start_date=pd.to_datetime('2017-01-01'),\n # end_date=pd.to_datetime('2018-01-15'),\n # data_frequencies=['daily', 'hour', 'minute'],\n # ),\n # ]\n\n def to_text(self, hex):\n return Web3.toText(hex).rstrip('\\0')\n\n def choose_pubaddr(self):\n if len(self.addresses) == 1:\n address = self.addresses[0]['pubAddr']\n address_i = 0\n print('Using {} for this transaction.'.format(address))\n else:\n while True:\n for i in range(0, len(self.addresses)):\n print('{}\\t{}\\t{}'.format(\n i,\n self.addresses[i]['pubAddr'],\n self.addresses[i]['desc'])\n )\n address_i = int(input('Choose your address associated with '\n 'this transaction: [default: 0] ') or 0)\n if not (0 <= address_i < len(self.addresses)):\n print('Please choose a number between 0 and {}\\n'.format(\n len(self.addresses) - 1))\n else:\n address = Web3.toChecksumAddress(\n self.addresses[address_i]['pubAddr'])\n break\n\n return address, address_i\n\n def sign_transaction(self, tx):\n\n url = 'https://www.myetherwallet.com/#offline-transaction'\n print('\\nVisit {url} and enter the following parameters:\\n\\n'\n 'From Address:\\t\\t{_from}\\n'\n '\\n\\tClick the \"Generate Information\" button\\n\\n'\n 'To Address:\\t\\t{to}\\n'\n 'Value / Amount to Send:\\t{value}\\n'\n 'Gas Limit:\\t\\t{gas}\\n'\n 'Gas Price:\\t\\t[Accept the default value]\\n'\n 'Nonce:\\t\\t\\t{nonce}\\n'\n 'Data:\\t\\t\\t{data}\\n'.format(\n url=url,\n _from=tx['from'],\n to=tx['to'],\n value=tx['value'],\n gas=tx['gas'],\n nonce=tx['nonce'],\n data=tx['data'], )\n )\n\n webbrowser.open_new(url)\n\n signed_tx = input('Copy and Paste the \"Signed Transaction\" '\n 'field here:\\n')\n\n if signed_tx.startswith('0x'):\n signed_tx = signed_tx[2:]\n\n return signed_tx\n\n def check_transaction(self, tx_hash):\n\n if 'ropsten' in ETH_REMOTE_NODE:\n etherscan = 'https://ropsten.etherscan.io/tx/'\n elif 'rinkeby' in ETH_REMOTE_NODE:\n etherscan = 'https://rinkeby.etherscan.io/tx/'\n else:\n etherscan = 'https://etherscan.io/tx/'\n etherscan = '{}{}'.format(etherscan, tx_hash)\n\n print('\\nYou can check the outcome of your transaction here:\\n'\n '{}\\n\\n'.format(etherscan))\n\n def _list(self):\n data_sources = self.mkt_contract.functions.getAllProviders().call()\n\n data = []\n for index, data_source in enumerate(data_sources):\n if index > 0:\n if 'test' not in Web3.toText(data_source).lower():\n data.append(\n dict(\n dataset=self.to_text(data_source)\n )\n )\n return pd.DataFrame(data)\n\n def list(self):\n df = self._list()\n\n set_print_settings()\n if df.empty:\n print('There are no datasets available yet.')\n else:\n print(df)\n\n def subscribe(self, dataset=None):\n\n if dataset is None:\n\n df_sets = self._list()\n if df_sets.empty:\n print('There are no datasets available yet.')\n return\n\n set_print_settings()\n while True:\n print(df_sets)\n dataset_num = input('Choose the dataset you want to '\n 'subscribe to [0..{}]: '.format(\n df_sets.size - 1))\n try:\n dataset_num = int(dataset_num)\n except ValueError:\n print('Enter a number between 0 and {}'.format(\n df_sets.size - 1))\n else:\n if dataset_num not in range(0, df_sets.size):\n print('Enter a number between 0 and {}'.format(\n df_sets.size - 1))\n else:\n dataset = df_sets.iloc[dataset_num]['dataset']\n break\n\n dataset = dataset.lower()\n\n address = self.choose_pubaddr()[0]\n provider_info = self.mkt_contract.functions.getDataProviderInfo(\n Web3.toHex(dataset)\n ).call()\n\n if not provider_info[4]:\n print('The requested \"{}\" dataset is not registered in '\n 'the Data Marketplace.'.format(dataset))\n return\n\n grains = provider_info[1]\n price = from_grains(grains)\n\n subscribed = self.mkt_contract.functions.checkAddressSubscription(\n address, Web3.toHex(dataset)\n ).call()\n\n if subscribed[5]:\n print(\n '\\nYou are already subscribed to the \"{}\" dataset.\\n'\n 'Your subscription started on {} UTC, and is valid until '\n '{} UTC.'.format(\n dataset,\n pd.to_datetime(subscribed[3], unit='s', utc=True),\n pd.to_datetime(subscribed[4], unit='s', utc=True)\n )\n )\n return\n\n print('\\nThe price for a monthly subscription to this dataset is'\n ' {} ENG'.format(price))\n\n print(\n 'Checking that the ENG balance in {} is greater than {} '\n 'ENG... '.format(address, price), end=''\n )\n\n wallet_address = address[2:]\n balance = self.web3.eth.call({\n 'from': address,\n 'to': self.eng_contract_address,\n 'data': '0x70a08231000000000000000000000000{}'.format(\n wallet_address\n )\n })\n\n try:\n balance = Web3.toInt(balance) # web3 >= 4.0.0b7\n except TypeError:\n balance = Web3.toInt(hexstr=balance) # web3 <= 4.0.0b6\n\n if balance > grains:\n print('OK.')\n else:\n print('FAIL.\\n\\nAddress {} balance is {} ENG,\\nwhich is lower '\n 'than the price of the dataset that you are trying to\\n'\n 'buy: {} ENG. Get enough ENG to cover the costs of the '\n 'monthly\\nsubscription for what you are trying to buy, '\n 'and try again.'.format(\n address, from_grains(balance), price))\n return\n\n while True:\n agree_pay = input('Please confirm that you agree to pay {} ENG '\n 'for a monthly subscription to the dataset \"{}\" '\n 'starting today. [default: Y] '.format(\n price, dataset)) or 'y'\n if agree_pay.lower() not in ('y', 'n'):\n print(\"Please answer Y or N.\")\n else:\n if agree_pay.lower() == 'y':\n break\n else:\n return\n\n print('Ready to subscribe to dataset {}.\\n'.format(dataset))\n print('In order to execute the subscription, you will need to sign '\n 'two different transactions:\\n'\n '1. First transaction is to authorize the Marketplace contract '\n 'to spend {} ENG on your behalf.\\n'\n '2. Second transaction is the actual subscription for the '\n 'desired dataset'.format(price))\n\n tx = self.eng_contract.functions.approve(\n self.mkt_contract_address,\n grains,\n ).buildTransaction(\n {'from': address,\n 'nonce': self.web3.eth.getTransactionCount(address)}\n )\n\n signed_tx = self.sign_transaction(tx)\n try:\n tx_hash = '0x{}'.format(\n bin_hex(self.web3.eth.sendRawTransaction(signed_tx))\n )\n print(\n '\\nThis is the TxHash for this transaction: {}'.format(tx_hash)\n )\n\n except Exception as e:\n print('Unable to subscribe to data source: {}'.format(e))\n return\n\n self.check_transaction(tx_hash)\n\n print('Waiting for the first transaction to succeed...')\n\n while True:\n try:\n if self.web3.eth.getTransactionReceipt(tx_hash).status:\n break\n else:\n print('\\nTransaction failed. Aborting...')\n return\n except AttributeError:\n pass\n for i in range(0, 10):\n print('.', end='', flush=True)\n time.sleep(1)\n\n print('\\nFirst transaction successful!\\n'\n 'Now processing second transaction.')\n\n tx = self.mkt_contract.functions.subscribe(\n Web3.toHex(dataset),\n ).buildTransaction({\n 'from': address,\n 'nonce': self.web3.eth.getTransactionCount(address)})\n\n signed_tx = self.sign_transaction(tx)\n\n try:\n tx_hash = '0x{}'.format(bin_hex(\n self.web3.eth.sendRawTransaction(signed_tx)))\n print('\\nThis is the TxHash for this transaction: '\n '{}'.format(tx_hash))\n\n except Exception as e:\n print('Unable to subscribe to data source: {}'.format(e))\n return\n\n self.check_transaction(tx_hash)\n\n print('Waiting for the second transaction to succeed...')\n\n while True:\n try:\n if self.web3.eth.getTransactionReceipt(tx_hash).status:\n break\n else:\n print('\\nTransaction failed. Aborting...')\n return\n except AttributeError:\n pass\n for i in range(0, 10):\n print('.', end='', flush=True)\n time.sleep(1)\n\n print('\\nSecond transaction successful!\\n'\n 'You have successfully subscribed to dataset {} with'\n 'address {}.\\n'\n 'You can now ingest this dataset anytime during the '\n 'next month by running the following command:\\n'\n 'catalyst marketplace ingest --dataset={}'.format(\n dataset, address, dataset))\n\n def process_temp_bundle(self, ds_name, path):\n \"\"\"\n Merge the temp bundle into the main bundle for the specified\n data source.\n\n Parameters\n ----------\n ds_name\n path\n\n Returns\n -------\n\n \"\"\"\n tmp_bundle = extract_bundle(path)\n bundle_folder = get_data_source_folder(ds_name)\n ensure_directory(bundle_folder)\n if os.listdir(bundle_folder):\n zsource = bcolz.ctable(rootdir=tmp_bundle, mode='r')\n ztarget = bcolz.ctable(rootdir=bundle_folder, mode='r')\n merge_bundles(zsource, ztarget)\n\n else:\n os.rename(tmp_bundle, bundle_folder)\n\n pass\n\n def ingest(self, ds_name=None, start=None, end=None, force_download=False):\n\n if ds_name is None:\n\n df_sets = self._list()\n if df_sets.empty:\n print('There are no datasets available yet.')\n return\n\n set_print_settings()\n while True:\n print(df_sets)\n dataset_num = input('Choose the dataset you want to '\n 'ingest [0..{}]: '.format(\n df_sets.size - 1))\n try:\n dataset_num = int(dataset_num)\n except ValueError:\n print('Enter a number between 0 and {}'.format(\n df_sets.size - 1))\n else:\n if dataset_num not in range(0, df_sets.size):\n print('Enter a number between 0 and {}'.format(\n df_sets.size - 1))\n else:\n ds_name = df_sets.iloc[dataset_num]['dataset']\n break\n\n # ds_name = ds_name.lower()\n\n # TODO: catch error conditions\n provider_info = self.mkt_contract.functions.getDataProviderInfo(\n Web3.toHex(ds_name)\n ).call()\n\n if not provider_info[4]:\n print('The requested \"{}\" dataset is not registered in '\n 'the Data Marketplace.'.format(ds_name))\n return\n\n address, address_i = self.choose_pubaddr()\n fns = self.mkt_contract.functions\n check_sub = fns.checkAddressSubscription(\n address, Web3.toHex(ds_name)\n ).call()\n\n if check_sub[0] != address or self.to_text(check_sub[1]) != ds_name:\n print('You are not subscribed to dataset \"{}\" with address {}. '\n 'Plese subscribe first.'.format(ds_name, address))\n return\n\n if not check_sub[5]:\n print('Your subscription to dataset \"{}\" expired on {} UTC.'\n 'Please renew your subscription by running:\\n'\n 'catalyst marketplace subscribe --dataset={}'.format(\n ds_name,\n pd.to_datetime(check_sub[4], unit='s', utc=True),\n ds_name)\n )\n\n if 'key' in self.addresses[address_i]:\n key = self.addresses[address_i]['key']\n secret = self.addresses[address_i]['secret']\n else:\n key, secret = get_key_secret(address)\n\n headers = get_signed_headers(ds_name, key, secret)\n log.debug('Starting download of dataset for ingestion...')\n r = requests.post(\n '{}/marketplace/ingest'.format(AUTH_SERVER),\n headers=headers,\n stream=True,\n )\n if r.status_code == 200:\n target_path = get_temp_bundles_folder()\n try:\n decoder = MultipartDecoder.from_response(r)\n for part in decoder.parts:\n h = part.headers[b'Content-Disposition'].decode('utf-8')\n # Extracting the filename from the header\n name = re.search(r'filename=\"(.*)\"', h).group(1)\n\n filename = os.path.join(target_path, name)\n with open(filename, 'wb') as f:\n # for chunk in part.content.iter_content(\n # chunk_size=1024):\n # if chunk: # filter out keep-alive new chunks\n # f.write(chunk)\n f.write(part.content)\n\n self.process_temp_bundle(ds_name, filename)\n\n except NonMultipartContentTypeException:\n response = r.json()\n raise MarketplaceHTTPRequest(\n request='ingest dataset',\n error=response,\n )\n else:\n raise MarketplaceHTTPRequest(\n request='ingest dataset',\n error=r.status_code,\n )\n\n log.info('{} ingested successfully'.format(ds_name))\n\n def get_dataset(self, ds_name, start=None, end=None):\n ds_name = ds_name.lower()\n\n # TODO: filter ctable by start and end date\n bundle_folder = get_data_source_folder(ds_name)\n z = bcolz.ctable(rootdir=bundle_folder, mode='r')\n\n df = z.todataframe() # type: pd.DataFrame\n df.set_index(['date', 'symbol'], drop=True, inplace=True)\n\n # TODO: implement the filter more carefully\n # if start and end is None:\n # df = df.xs(start, level=0)\n\n return df\n\n def clean(self, ds_name=None, data_frequency=None):\n\n if ds_name is None:\n mktplace_root = get_marketplace_folder()\n folders = [os.path.basename(f.rstrip('/'))\n for f in glob.glob('{}/*/'.format(mktplace_root))\n if 'temp_bundles' not in f]\n\n while True:\n for idx, f in enumerate(folders):\n print('{}\\t{}'.format(idx, f))\n dataset_num = input('Choose the dataset you want to '\n 'clean [0..{}]: '.format(\n len(folders) - 1))\n try:\n dataset_num = int(dataset_num)\n except ValueError:\n print('Enter a number between 0 and {}'.format(\n len(folders) - 1))\n else:\n if dataset_num not in range(0, len(folders)):\n print('Enter a number between 0 and {}'.format(\n len(folders) - 1))\n else:\n ds_name = folders[dataset_num]\n break\n\n ds_name = ds_name.lower()\n\n if data_frequency is None:\n folder = get_data_source_folder(ds_name)\n\n else:\n folder = get_bundle_folder(ds_name, data_frequency)\n\n shutil.rmtree(folder)\n pass\n\n def create_metadata(self, key, secret, ds_name, data_frequency, desc,\n has_history=True, has_live=True):\n \"\"\"\n\n Returns\n -------\n\n \"\"\"\n headers = get_signed_headers(ds_name, key, secret)\n r = requests.post(\n '{}/marketplace/register'.format(AUTH_SERVER),\n json=dict(\n ds_name=ds_name,\n desc=desc,\n data_frequency=data_frequency,\n has_history=has_history,\n has_live=has_live,\n ),\n headers=headers,\n )\n\n if r.status_code != 200:\n raise MarketplaceHTTPRequest(\n request='register', error=r.status_code\n )\n\n if 'error' in r.json():\n raise MarketplaceHTTPRequest(\n request='upload file', error=r.json()['error']\n )\n\n def register(self):\n while True:\n desc = input('Enter the name of the dataset to register: ')\n dataset = desc.lower()\n provider_info = self.mkt_contract.functions.getDataProviderInfo(\n Web3.toHex(dataset)\n ).call()\n\n if provider_info[4]:\n print('There is already a dataset registered under '\n 'the name \"{}\". Please choose a different '\n 'name.'.format(dataset))\n else:\n break\n\n price = int(\n input(\n 'Enter the price for a monthly subscription to '\n 'this dataset in ENG: '\n )\n )\n while True:\n freq = input('Enter the data frequency [daily, hourly, minute]: ')\n if freq.lower() not in ('daily', 'hourly', 'minute'):\n print('Not a valid frequency.')\n else:\n break\n\n while True:\n reg_pub = input(\n 'Does it include historical data? [default: Y]: '\n ) or 'y'\n if reg_pub.lower() not in ('y', 'n'):\n print('Please answer Y or N.')\n else:\n if reg_pub.lower() == 'y':\n has_history = True\n else:\n has_history = False\n break\n\n while True:\n reg_pub = input(\n 'Doest it include live data? [default: Y]: '\n ) or 'y'\n if reg_pub.lower() not in ('y', 'n'):\n print('Please answer Y or N.')\n else:\n if reg_pub.lower() == 'y':\n has_live = True\n else:\n has_live = False\n break\n\n address, address_i = self.choose_pubaddr()\n if 'key' in self.addresses[address_i]:\n key = self.addresses[address_i]['key']\n secret = self.addresses[address_i]['secret']\n else:\n key, secret = get_key_secret(address)\n\n grains = to_grains(price)\n\n tx = self.mkt_contract.functions.register(\n Web3.toHex(dataset),\n grains,\n address,\n ).buildTransaction(\n {'from': address,\n 'nonce': self.web3.eth.getTransactionCount(address)}\n )\n\n signed_tx = self.sign_transaction(tx)\n\n try:\n tx_hash = '0x{}'.format(\n bin_hex(self.web3.eth.sendRawTransaction(signed_tx))\n )\n print(\n '\\nThis is the TxHash for this transaction: {}'.format(tx_hash)\n )\n\n except Exception as e:\n print('Unable to register the requested dataset: {}'.format(e))\n return\n\n self.check_transaction(tx_hash)\n\n print('Waiting for the transaction to succeed...')\n\n while True:\n try:\n if self.web3.eth.getTransactionReceipt(tx_hash).status:\n break\n else:\n print('\\nTransaction failed. Aborting...')\n return\n except AttributeError:\n pass\n for i in range(0, 10):\n print('.', end='', flush=True)\n time.sleep(1)\n\n print('\\nWarming up the {} dataset'.format(dataset))\n self.create_metadata(\n key=key,\n secret=secret,\n ds_name=dataset,\n data_frequency=freq,\n desc=desc,\n has_history=has_history,\n has_live=has_live,\n )\n print('\\n{} registered successfully'.format(dataset))\n\n def publish(self, dataset, datadir, watch):\n dataset = dataset.lower()\n provider_info = self.mkt_contract.functions.getDataProviderInfo(\n Web3.toHex(dataset)\n ).call()\n\n if not provider_info[4]:\n raise MarketplaceDatasetNotFound(dataset=dataset)\n\n match = next(\n (l for l in self.addresses if l['pubAddr'] == provider_info[0]),\n None\n )\n if not match:\n raise MarketplaceNoAddressMatch(\n dataset=dataset,\n address=provider_info[0])\n\n print('Using address: {} to publish this dataset.'.format(\n provider_info[0]))\n\n if 'key' in match:\n key = match['key']\n secret = match['secret']\n else:\n key, secret = get_key_secret(provider_info[0])\n\n headers = get_signed_headers(dataset, key, secret)\n filenames = glob.glob(os.path.join(datadir, '*.csv'))\n\n if not filenames:\n raise MarketplaceNoCSVFiles(datadir=datadir)\n\n files = []\n for file in filenames:\n files.append(('file', open(file, 'rb')))\n\n r = requests.post('{}/marketplace/publish'.format(AUTH_SERVER),\n files=files,\n headers=headers)\n\n if r.status_code != 200:\n raise MarketplaceHTTPRequest(request='upload file',\n error=r.status_code)\n\n if 'error' in r.json():\n raise MarketplaceHTTPRequest(request='upload file',\n error=r.json()['error'])\n\n print('Dataset {} uploaded successfully.'.format(dataset))\n"
] | [
[
"pandas.to_datetime",
"pandas.DataFrame"
]
] |
microsoft/Protein-Folding | [
"f534b2dd1e3f192fbcdadf234f25828c7f458a58"
] | [
"coevolution_transformer/model/msa_embeddings.py"
] | [
"# Copyright (c) Microsoft Corporation.\n# Licensed under the MIT License.\nimport math\n\nimport torch\nfrom torch import nn\nimport torch.nn.functional as F\n\n\nclass PositionalEncoding(nn.Module):\n def __init__(self, d_model, max_len=1 << 13):\n super(PositionalEncoding, self).__init__()\n self.ninp = d_model\n pe = torch.zeros(max_len, d_model)\n position = torch.arange(0, max_len, dtype=torch.float).unsqueeze(1)\n div_term = torch.exp(torch.arange(0, d_model, 2).float() * (-math.log(10000.0) / d_model))\n pe[:, 0::2] = torch.sin(position * div_term)\n pe[:, 1::2] = torch.cos(position * div_term) # (L, C)\n self.register_buffer(\"pe\", pe)\n\n def forward(self, idx):\n \"\"\"\n idx: (B, L)\n return: (B, L, C)\n \"\"\"\n return self.pe[idx]\n\n\nclass MSAEmbeddings(nn.Module):\n def __init__(self, msa_gap, embed_dim, dropout):\n super(MSAEmbeddings, self).__init__()\n self.embed_dim = embed_dim\n self.onehot = nn.Embedding(24, 24)\n self.onehot.weight.data = torch.eye(24)\n self.onehot.weight.requires_grad = False\n self.msa_embeddings = nn.Linear((msa_gap * 2 + 2) * 24 + 2, embed_dim)\n self.position_embeddings = PositionalEncoding(embed_dim)\n self.layer_norm = nn.LayerNorm(embed_dim)\n self.dropout = nn.Dropout(dropout)\n\n def forward(self, seq_ids, msa_ids, position_ids):\n \"\"\"\n seq_ids: (B, L)\n msa_ids: (B, K, *, L)\n position_ids: (B, L)\n return: (B, K, L, C)\n \"\"\"\n B, K, _, L = msa_ids.shape\n seq = self.onehot(seq_ids)\n msa_ids = msa_ids.transpose(-2, -1)\n boundary = msa_ids[..., -2:].float()\n msa = self.onehot(msa_ids[..., :-2]).reshape(B, K, L, -1)\n msa = torch.cat([seq[:, None].repeat(1, msa.shape[1], 1, 1), msa, boundary], dim=-1)\n msa_emb = self.msa_embeddings(msa)\n pos_emb = self.position_embeddings(position_ids)\n embeddings = msa_emb * math.sqrt(self.embed_dim) + pos_emb[:, None]\n embeddings = self.layer_norm(embeddings)\n embeddings = self.dropout(embeddings)\n return embeddings\n"
] | [
[
"torch.nn.Dropout",
"torch.sin",
"torch.zeros",
"torch.eye",
"torch.nn.Embedding",
"torch.nn.LayerNorm",
"torch.nn.Linear",
"torch.arange",
"torch.cos"
]
] |
sunhuaibo/HLA-HED | [
"bb0672e62a20baad80f5f154c9220bf8e5b8b28c"
] | [
"hla_hed.py"
] | [
"#!/usr/bin/env python\n# -*- coding=utf-8 -*-\n\n# =====================================\n# Author: Huaibo Sun\n# E-mail: [email protected]\n# date: 2022-03-31\n# =====================================\n\nimport os\nimport pandas as pd\nfrom Bio import SeqIO\nfrom pathlib import Path\nfrom itertools import combinations\nfrom argparse import ArgumentParser, RawDescriptionHelpFormatter\n\ndef get_opt():\n \"\"\"\n Input HLA file format\n \n Sample A1 A2 B1 B2 C1 C2\n p1 A*01:01 A*01:03 B*07:01 B*07:02 C*01:01 C*01:02\n p2 A*01:01 A*01:03 B*07:01 B*07:02 C*01:01 C*01:02\n \n If you use this tool, please cite the following three papers.\n \n Grantham R. Amino acid difference formula to help explain protein evolution. Science. 1974 Sep 6;185(4154):862-4. doi: 10.1126/science.185.4154.862. PMID: 4843792.\n Pierini F, Lenz TL. Divergent Allele Advantage at Human MHC Genes: Signatures of Past and Ongoing Selection. Mol Biol Evol. 2018 Sep 1;35(9):2145-2158. doi: 10.1093/molbev/msy116. PMID: 29893875; PMCID: PMC6106954.\n Chowell D, Krishna C, Pierini F, Makarov V, Rizvi NA, Kuo F, Morris LGT, Riaz N, Lenz TL, Chan TA. Evolutionary divergence of HLA class I genotype impacts efficacy of cancer immunotherapy. Nat Med. 2019 Nov;25(11):1715-1720. doi: 10.1038/s41591-019-0639-4. Epub 2019 Nov 7. PMID: 31700181; PMCID: PMC7938381.\n \n \"\"\"\n \n script = os.path.dirname(os.path.abspath(__file__))\n parser = ArgumentParser(formatter_class=RawDescriptionHelpFormatter, epilog=get_opt.__doc__)\n parser.add_argument(\"-d\", default=f\"{script}/database/grantham_matrix.txt\", help=\"Distance matrix for all amino acids, default: database/grantham_matrix.txt. (reference: DOI: 10.1126/science.185.4154.862)\")\n parser.add_argument(\"-f\", default=f\"{script}/database/ABC_prot.fa\", help=\"Amino acid sequences in fasta format, default: database/ABC_prot.fa.\")\n parser.add_argument(\"-i\", required=True, help=\"Input file of tab-delimited with individual HLA typing.\")\n parser.add_argument(\"-p\", action=\"store_true\", help=\"Paired HED score.\")\n parser.add_argument(\"-o\", required=True, help=\"Output file name.\")\n\n parse = parser.parse_args()\n return(parse)\n\ndef check_file(infile):\n if not infile.exists:\n raise Exception(f\"{str(infile)} file is not exist\")\n\ndef read_fasta(infile):\n infile = Path(infile)\n check_file(infile)\n record = SeqIO.parse(infile, \"fasta\")\n seq_array = {seq.id: str(seq.seq) for seq in record}\n seq_len = [len(value) for value in seq_array.values()]\n if len(set(seq_len)) != 1:\n raise Exception(\"Input sequences length is not equality\")\n return(seq_array)\n\ndef read_aa(infile):\n infile = Path(infile)\n check_file(infile)\n df = pd.read_csv(infile, header=0, sep=\"\\t\", index_col=0)\n aa_pairwise_dis = df.to_dict()\n return(aa_pairwise_dis)\n\ndef calculate_distange(hla1, hla2, sequence, distance):\n seq_hla1 = sequence.get(hla1, False)\n seq_hla2 = sequence.get(hla2, False)\n if not seq_hla1 or not seq_hla2:\n return(\"NA\")\n else:\n seq_len = len(seq_hla1)\n dis = 0\n for i in range(seq_len):\n aa1 = seq_hla1[i]\n aa2 = seq_hla2[i]\n dis += distance[aa1][aa2]\n dis = dis / seq_len\n return(dis)\n\n\ndef main():\n opt = get_opt()\n seq_array = read_fasta(opt.f)\n aa_pairwise_dis = read_aa(opt.d)\n\n infile = Path(opt.i)\n outfile = Path(opt.o)\n check_file(infile)\n\n df = pd.read_csv(infile, header=0, sep=\"\\t\")\n \n\n if opt.p:\n df2 = pd.melt(df, id_vars=[\"Sample\"], value_vars=[\"A1\", \"A2\", \"B1\",\"B2\", \"C1\",\"C2\"])\n alleles = set(df2[\"value\"].values.tolist())\n alleles_pair = combinations(alleles, 2)\n \n outheader = [\"Allele1\",\"Allele2\",\"HED\"]\n with open(outfile, \"w\") as fw:\n fw.write(\"\\t\".join(outheader) + \"\\n\")\n for allele1, allele2 in alleles_pair:\n dis_hla_pair = calculate_distange(allele1, allele2, seq_array, aa_pairwise_dis)\n outline = [allele1, allele2, dis_hla_pair]\n outline = [str(x) for x in outline]\n\n fw.write(\"\\t\".join(outline) + \"\\n\")\n else:\n outheader = [\"Sample\",\"HED_A\",\"HED_B\",\"HED_C\",\"Mean_HE\"]\n with open(outfile, \"w\") as fw:\n fw.write(\"\\t\".join(outheader) + \"\\n\")\n for _, line in df.iterrows():\n hla_a1 = line[\"A1\"]\n hla_a2 = line[\"A2\"]\n dis_hla_a = calculate_distange(hla_a1, hla_a2, seq_array, aa_pairwise_dis)\n\n hla_b1 = line[\"B1\"]\n hla_b2 = line[\"B2\"]\n dis_hla_b = calculate_distange(hla_b1, hla_b2, seq_array, aa_pairwise_dis)\n \n hla_c1 = line[\"C1\"]\n hla_c2 = line[\"C2\"]\n dis_hla_c = calculate_distange(hla_c1, hla_c2, seq_array, aa_pairwise_dis)\n\n if dis_hla_a == \"NA\" or dis_hla_b == \"NA\" or dis_hla_c == \"NA\":\n dis_mean = \"NA\"\n else:\n dis_mean = (dis_hla_a + dis_hla_b + dis_hla_c) / 3\n\n outline = [line[\"Sample\"], dis_hla_a, dis_hla_b, dis_hla_c, dis_mean]\n outline = [str(x) for x in outline]\n\n fw.write(\"\\t\".join(outline) + \"\\n\")\n\nif __name__ == \"__main__\":\n main()"
] | [
[
"pandas.read_csv",
"pandas.melt"
]
] |
LeiShi/Synthetic-Diagnostics-Platform | [
"5f1cb5c29d182490acbd4f3c167f0e09ec211236"
] | [
"src/python3/sdp/math/interpolation.py"
] | [
"\"\"\"This module contains some useful interpolation methods\n\"\"\"\n\nimport numpy as np\nfrom scipy.interpolate import BarycentricInterpolator\n\nclass InterpolationError(Exception):\n def __init__(self,value):\n self.value = value\n def __str__(self):\n return repr(self.value)\n\nclass OutofBoundError(InterpolationError, ValueError):\n def __init__(self, value):\n self.value = value\n def __str__(self):\n return repr(self.value)\n\ndef linear_3d_3point(X,Y,Z,x,y,tol = 1e-8):\n \"\"\"3D interpolation method\n Linearly interpolate the value of z for given x,y.\n By using 3 points data, the unknown value of z is assumed on the same plane.\n The method used here is the cross product method. From P(x1,y1,z1),Q(x2,y2,z2),and R(x3,y3,z3), construct 2 vectors on the plane, PQ(x2-x1,y2-y1,z2-z1) and PR(x3-x1,y3-y1,z3-z1). Then do the cross product, PQ*PR = N. This gives the normal vector of the plane. The plane's equation is then 'N dot X = d', where X is an arbitary point and d to be determined. d can be easily gotten from any one of the given points, say P. d = N dot P. Then the equation of the plane is found. The equation can be written as 'ax+by+cz = d', then z can be solved for given x and y.\n \n Arguments:\n x1,y1,z1: coordinates of the first point\n x2,y2,z2: the second point\n x3,y3,z3: the third point\n x,y: the x,y coordinates for the wanted\n\n return value:\n interpolated z value on given (x,y)\n \"\"\"\n x1,x2,x3 = X[0],X[1],X[2]\n y1,y2,y3 = Y[0],Y[1],Y[2]\n z0 = np.max(Z)\n z1,z2,z3 = Z[0]/z0,Z[1]/z0,Z[2]/z0\n\n\n Nx = (y2-y1)*(z3-z1)-(y3-y1)*(z2-z1)\n Ny = (x3-x1)*(z2-z1)-(x2-x1)*(z3-z1)\n Nz = (x2-x1)*(y3-y1)-(x3-x1)*(y2-y1)\n\n z_base = (x2-x1)*(y3-y1)\n\n print(Nx,Ny,Nz,z_base)\n\n if(np.absolute(Nz/z_base) <= tol ):\n raise InterpolationError('3 points interpolation failed: given points are on a plane vertical to XY plane, no z value being able to interpolated.')\n\n d = Nx*x1 + Ny*y1 + Nz*z1\n print(d, d-Nx*x-Ny*y)\n\n return (d - Nx*x - Ny*y)/float(Nz)*z0\n\n\ndef trilinear_interp(X,Y,Z,F,x, fill_value=0.0):\n \"\"\" Trilinear interpolation (3D) for 1 point on a cubic mesh\n See Wikipedia for a better description than the following:\n First choose a direction and interpolate all the corners along this \n direction (so 8pts -> 4pts) at the value of the wanted point.\n Choose a second direction and interpolate the 4pts at the wanted point\n (4pts -> 2pts).\n Finish with the interpolation along the last line\n \n Arguments:\n X -- 1D array containing the X coordinate of F\n Y -- 1D array containing the Y coordinate of F\n Z -- 1D array containing the Z coordinate of F\n F -- 3D array containing the data\n x -- position (3D) where the interpolation is wanted\n\n return value:\n interpolated z value on given (x,y)\n \"\"\"\n raise NameError('Does not work, should use RegularGridInterpolator')\n if len(x.shape) == 1:\n # if outside the box, put the value to fill_value\n if x[0] < X[0] or x[1] < Y[0] or x[2] < Z[0]\\\n or x[0] > X[-1] or x[1] > Y[-1] or x[2] > Z[-1]:\n return fill_value\n else:\n # First find the x,y,z coordinate of the corner of the cube\n indx = np.where(X < x[0])[0].max()\n indy = np.where(Y < x[1])[0].max()\n indz = np.where(Z < x[2])[0].max()\n\n # relative coordinates\n rx = (x[0]-X[indx])/(X[indx+1]-X[indx])\n ry = (x[1]-Y[indy])/(Y[indy+1]-Y[indy])\n rz = (x[2]-Z[indz])/(Z[indz+1]-Z[indz])\n \n # compute the first linear interpolation\n temp = 1-rx\n c00 = F[indx,indy,indz]*temp + F[indx+1,indy,indz]*rx\n c10 = F[indx,indy+1,indz]*temp + F[indx+1,indy+1,indz]*rx\n c01 = F[indx,indy,indz+1]*temp + F[indx+1,indy,indz+1]*rx\n c11 = F[indx,indy+1,indz+1]*temp + F[indx+1,indy+1,indz+1]*rx\n \n # compute the second linear interpolation\n temp = 1-ry\n c0 = c00*temp + c10*ry\n c1 = c01*temp + c11*ry\n \n # compute the last linear interpolation\n return c0*(1-rz) + c1*rz\n elif len(x.shape) == 2:\n \"\"\"this part is the same that before but with a mesh (not only one point).\n the comments will be only for trick due to the shape of the positions\n abd not on the method (look the first part for them)\n \"\"\"\n G = np.zeros(len(x[:,0]))\n # First find the x,y,z coordinate of the corner of the cube\n ind = ~((x[:,0] < X[0]) | (x[:,1] < Y[0]) | (x[:,2] < Z[0]) |\n (x[:,0] > X[-1]) | (x[:,1] > Y[-1]) | (x[:,2] > Z[-1]))\n\n G[~ind] = fill_value\n indx = np.where(X <= x[ind,0])[0].max()\n indy = np.where(Y <= x[ind,1])[0].max()\n indz = np.where(Z <= x[ind,2])[0].max()\n \n # relative coordinates\n rx = (x[ind,0]-X[indx])/(X[indx+1]-X[indx])\n ry = (x[ind,1]-Y[indy])/(Y[indy+1]-Y[indy])\n rz = (x[ind,2]-Z[indz])/(Z[indz+1]-Z[indz])\n \n # compute the first linear interpolation\n temp = 1-rx\n c00 = F[indx,indy,indz]*temp + F[indx+1,indy,indz]*rx\n c10 = F[indx,indy+1,indz]*temp + F[indx+1,indy+1,indz]*rx\n c01 = F[indx,indy,indz+1]*temp + F[indx+1,indy,indz+1]*rx\n c11 = F[indx,indy+1,indz+1]*temp + F[indx+1,indy+1,indz+1]*rx\n \n # compute the second linear interpolation\n temp = 1-ry\n c0 = c00*temp + c10*ry\n c1 = c01*temp + c11*ry\n \n # compute the last linear interpolation\n G[ind] = c0*(1-rz) + c1*rz\n return G\n else:\n raise NameError('Error: wrong shape of the position to interpolate')\n \n\n# BarycentricInterpolator with boundary check\nclass BoundaryWarnBarycentricInterpolator(BarycentricInterpolator):\n \"\"\"Barycentric Interpolator with Boundary Check. Based on \n :py:class:`scipy.interpolate.BarycentricInterpolator`.\n \n The boundary is set as minimun x and maximum x. If called with x outside \n the available range, a OutofBoundError will be raised.\n \n __init__(xi, yi=None, axis=0, bound_error=True, fill_value=0)\n \n :param xi: x coordinates for interpolation\n :type xi: array of float\n :param yi: Optional, y values on each xi location. If not given, need to be\n provided later using :py:method`set_yi` method.\n :type yi: array of float\n :param int axis: the axis of yi along which the interpolator will be \n created.\n :param bool bound_error: If True, out of bound interpolation will result a\n OutofBoundError. Otherwise fill_value will be used\n . Default to be True\n :param float fill_value: If bound_error is False, out of bound values will\n be automatically filled with fill_value.\n \n see :py:class:`scipy.interpolate.BarycentricInterpolator` for further \n information. \n \"\"\"\n \n def __init__(self, xi, yi=None, axis=0, bound_error=True, fill_value=0):\n \n self._xmin = np.min(xi)\n self._xmax = np.max(xi)\n self._bound_error = bound_error\n self._fill_value = fill_value\n \n super(BoundaryWarnBarycentricInterpolator, self).__init__(xi, yi, axis)\n \n \n \n def __call__(self, x):\n if (self._bound_error):\n if np.any(x < self._xmin) or np.any(x > self._xmax):\n raise OutofBoundError('x out of bound! xmin: {}, xmax: {}'.\\\n format(self._xmin, self._xmax))\n return super(BoundaryWarnBarycentricInterpolator, self).__call__(x)\n else:\n outbound_idx = np.logical_or(x < self._xmin, x > self._xmax)\n result = np.empty_like(x)\n result[~outbound_idx] = super(BoundaryWarnBarycentricInterpolator, \n self).__call__(x[~outbound_idx]) \n result[outbound_idx] = self._fill_value\n return result\n \n\n def add_xi(self, xi, yi=None):\n super(BoundaryWarnBarycentricInterpolator, self).add_xi(xi, yi)\n self._xmin = np.min( [np.min(xi), self._xmin] )\n self._xmax = np.max( [np.max(xi), self._xmax] )\n \n \n def set_yi(self, yi, axis=None):\n yi = np.array(yi)\n if not self._bound_error:\n assert yi.ndim == 1\n super(BoundaryWarnBarycentricInterpolator, self).set_yi(yi, axis)\n \n\n "
] | [
[
"numpy.absolute",
"numpy.min",
"numpy.empty_like",
"numpy.logical_or",
"numpy.max",
"numpy.any",
"numpy.array",
"numpy.where"
]
] |
mayureeb/fakenews | [
"c47a72c8bbe4d413b309da0c662da784c002fe3f"
] | [
"Code/sentiment_analysis.py"
] | [
"import pandas as pd\nfrom textblob import TextBlob\n\npd.options.mode.chained_assignment = None # ignores the SettingWithCopy Warning\ndf = pd.read_csv('INPUT.csv', encoding = 'utf8')\ndf['polarity'] = 0.0\ndf['subjectivity'] = 0.0\nfor i in range(0, len(df.index)):\n print(i)\n blob = TextBlob(str(df['text'][i]))\n df['subjectivity'][i] = blob.sentiment.subjectivity\n df['polarity'][i] = blob.sentiment.polarity\n\nprint(df.head())\ndf.to_csv('OUTPUT.csv', encoding = 'utf8')\n"
] | [
[
"pandas.read_csv"
]
] |
LutzGross/fingal | [
"4b6fcc02871e7ba1a98f37ffd18f1a16a5fe6a48"
] | [
"bin/specsim3d/spectralsim.py"
] | [
"#-------------------------------------------------------------------------------\r\n# Name: Spectralsim\r\n# Purpose: Simulation of standard normal random fields\r\n#\r\n# Author: Dr.-Ing. S. Hoerning\r\n#\r\n# Created: 02.05.2018, Centre for Natural Gas, EAIT,\r\n# The University of Queensland, Brisbane, QLD, Australia\r\n#-------------------------------------------------------------------------------\r\n\r\nimport numpy as np\r\nfrom . import covariancefunction as covfun\r\n\r\n\r\nclass spectral_random_field(object):\r\n def __init__(self,\r\n domainsize = (100,100),\r\n covmod = '1.0 Exp(2.)',\r\n periodic = False,\r\n ):\r\n\r\n self.counter = 0\r\n self.periodic = periodic\r\n # create self.xyz for plotting 3d\r\n if len(domainsize) == 3:\r\n self.xyz = np.mgrid[[slice(0,n,1) for n in domainsize]].reshape(3,-1).T\r\n # adjust domainsize by cutoff for non-perjodic output\r\n self.cutoff = 0\r\n if not self.periodic:\r\n cutoff = covfun.find_maximum_range(covmod)\r\n cutoffs = []\r\n for dim in domainsize:\r\n tsize = dim + cutoff\r\n # find closest multiple of 8 that is larger than tsize\r\n m8 = np.int(np.ceil(tsize/8.)*8.)\r\n cutoffs.append(m8 - dim)\r\n\r\n self.cutoff = np.array(cutoffs)\r\n\r\n\r\n self.domainsize = np.array(domainsize)+self.cutoff\r\n self.covmod = covmod\r\n self.ndim = len(self.domainsize)\r\n self.npoints = np.prod(self.domainsize)\r\n\r\n self.grid = np.mgrid[[slice(0,n,1) for n in self.domainsize]]\r\n\r\n # ensure periodicity of domain\r\n for i in range(self.ndim):\r\n self.domainsize = self.domainsize[:,np.newaxis]\r\n self.grid = np.min((self.grid,np.array(self.domainsize)-self.grid),axis=0)\r\n\r\n # compute distances from origin (--> wavenumbers in fourier space)\r\n self.h = ((self.grid**2).sum(axis=0))**0.5\r\n # covariances (in fourier space!!!)\r\n self.Q = covfun.Covariogram(self.h, self.covmod)\r\n\r\n # FFT of covariances\r\n self.FFTQ = np.abs(np.fft.fftn(self.Q))\r\n\r\n # eigenvalues of decomposition\r\n self.sqrtFFTQ = np.sqrt(self.FFTQ / self.npoints)\r\n\r\n self.Y = self.simnew()\r\n\r\n\r\n\r\n def simnew(self):\r\n self.counter += 1\r\n # compute random field via inverse fourier transform\r\n real = np.random.standard_normal(size=self.sqrtFFTQ.shape)\r\n imag = np.random.standard_normal(size=self.sqrtFFTQ.shape)\r\n epsilon = real + 1j*imag\r\n rand = epsilon * self.sqrtFFTQ\r\n self.Y = np.real(np.fft.ifftn(rand))*self.npoints\r\n\r\n if not self.periodic:\r\n # readjust domainsize to correct size (--> no boundary effects...)\r\n gridslice = [slice(0,(self.domainsize.squeeze()-self.cutoff)[i],1)\r\n for i in range(self.ndim)]\r\n self.Y = self.Y[tuple(gridslice)]\r\n self.Y = self.Y.reshape(self.domainsize.squeeze()-self.cutoff)\r\n\r\n return self.Y\r\n\r\n\r\n# TEST CASE\r\nif __name__ == \"__main__\":\r\n from mpl_toolkits.mplot3d import Axes3D\r\n import matplotlib.pyplot as plt\r\n domain = (30, 30, 30)\r\n covmod = '1.0 Exp(4.)'\r\n spec = spectral_random_field(domainsize = domain, covmod = covmod)\r\n field3d = spec.simnew()\r\n\r\n xyz = np.mgrid[[slice(0 , n, 1) for n in domain]].reshape(3,-1).T\r\n\r\n fig = plt.figure()\r\n ax = fig.add_subplot(111, projection='3d')\r\n ax.scatter(xyz[:,0], xyz[:,1], xyz[:,2], c=field3d.flatten())\r\n plt.show()\r\n\r\n"
] | [
[
"numpy.sqrt",
"numpy.random.standard_normal",
"numpy.fft.fftn",
"numpy.fft.ifftn",
"numpy.ceil",
"numpy.prod",
"numpy.array",
"matplotlib.pyplot.show",
"matplotlib.pyplot.figure"
]
] |
echaussidon/redrock | [
"9a3d4f0aed8c0792f2cc731dbdf04a99018083bf"
] | [
"py/redrock/templates.py"
] | [
"\"\"\"\nClasses and functions for templates.\n\"\"\"\n\nfrom __future__ import absolute_import, division, print_function\n\nimport sys\nfrom glob import glob\nimport os\nimport traceback\n\nimport numpy as np\nfrom astropy.io import fits\n\nfrom .utils import native_endian, elapsed, transmission_Lyman\n\nfrom .rebin import rebin_template, trapz_rebin\n\n\nclass Template(object):\n \"\"\"A spectral Template PCA object.\n\n The template data is read from a redrock-format template file.\n Alternatively, the data can be specified in the constructor.\n\n Args:\n filename (str): the path to the template file, either absolute or\n relative to the RR_TEMPLATE_DIR environment variable.\n\n \"\"\"\n def __init__(self, filename=None, spectype=None, redshifts=None,\n wave=None, flux=None, subtype=None):\n\n if filename is not None:\n fx = None\n if os.path.exists(filename):\n fx = fits.open(filename, memmap=False)\n else:\n xfilename = os.path.join(os.getenv('RR_TEMPLATE_DIR'), filename)\n if os.path.exists(xfilename):\n fx = fits.open(xfilename, memmap=False)\n else:\n raise IOError('unable to find '+filename)\n\n hdr = fx['BASIS_VECTORS'].header\n if 'VERSION' in hdr:\n self._version = hdr['VERSION']\n else:\n self._version = 'unknown'\n\n self.wave = np.asarray(hdr['CRVAL1'] + \\\n hdr['CDELT1']*np.arange(hdr['NAXIS1']), dtype=np.float64)\n if 'LOGLAM' in hdr and hdr['LOGLAM'] != 0:\n self.wave = 10**self.wave\n\n self.flux = np.asarray(native_endian(fx['BASIS_VECTORS'].data),\n dtype=np.float64)\n\n self._redshifts = None\n\n ## find out if redshift info is present in the file\n old_style_templates = True\n try:\n self._redshifts = native_endian(fx['REDSHIFTS'].data)\n old_style_templates = False\n except KeyError:\n pass\n\n fx.close()\n\n self._rrtype = hdr['RRTYPE'].strip().upper()\n if old_style_templates:\n if self._rrtype == 'GALAXY':\n # redshifts = 10**np.arange(np.log10(1+0.005),\n # np.log10(1+2.0), 1.5e-4) - 1\n self._redshifts = 10**np.arange(np.log10(1-0.005),\n np.log10(1+1.7), 3e-4) - 1\n elif self._rrtype == 'STAR':\n self._redshifts = np.arange(-0.002, 0.00201, 4e-5)\n elif self._rrtype == 'QSO':\n self._redshifts = 10**np.arange(np.log10(1+0.05),\n np.log10(1+6.0), 5e-4) - 1\n else:\n raise ValueError(\"Unknown redshift range to use for \"\n \"template type {}\".format(self._rrtype))\n zmin = self._redshifts[0]\n zmax = self._redshifts[-1]\n print(\"DEBUG: Using default redshift range {:.4f}-{:.4f} for \"\n \"{}\".format(zmin, zmax, os.path.basename(filename)))\n else:\n zmin = self._redshifts[0]\n zmax = self._redshifts[-1]\n print(\"DEBUG: Using redshift range {:.4f}-{:.4f} for \"\n \"{}\".format(zmin, zmax, os.path.basename(filename)))\n\n self._subtype = None\n if 'RRSUBTYP' in hdr:\n self._subtype = hdr['RRSUBTYP'].strip().upper()\n else:\n self._subtype = ''\n\n else:\n self._rrtype = spectype\n self._redshifts = redshifts\n self.wave = wave\n self.flux = flux\n self._subtype = subtype\n\n self._nbasis = self.flux.shape[0]\n self._nwave = self.flux.shape[1]\n\n\n @property\n def nbasis(self):\n return self._nbasis\n\n @property\n def nwave(self):\n return self._nwave\n\n @property\n def template_type(self):\n return self._rrtype\n\n @property\n def sub_type(self):\n return self._subtype\n\n @property\n def full_type(self):\n \"\"\"Return formatted type:subtype string.\n \"\"\"\n if self._subtype != '':\n return '{}:::{}'.format(self._rrtype, self._subtype)\n else:\n return self._rrtype\n\n @property\n def redshifts(self):\n return self._redshifts\n\n\n def eval(self, coeff, wave, z):\n \"\"\"Return template for given coefficients, wavelengths, and redshift\n\n Args:\n coeff : array of coefficients length self.nbasis\n wave : wavelengths at which to evaluate template flux\n z : redshift at which to evaluate template flux\n\n Returns:\n template flux array\n\n Notes:\n A single factor of (1+z)^-1 is applied to the resampled flux\n to conserve integrated flux after redshifting.\n\n \"\"\"\n assert len(coeff) == self.nbasis\n flux = self.flux.T.dot(coeff).T / (1+z)\n return trapz_rebin(self.wave*(1+z), flux, wave)\n\n\n\n\ndef find_templates(template_dir=None):\n \"\"\"Return list of redrock-\\*.fits template files\n\n Search directories in this order, returning results from first one found:\n - template_dir\n - $RR_TEMPLATE_DIR\n - <redrock_code>/templates/\n\n Args:\n template_dir (str): optional directory containing the templates.\n\n Returns:\n list: a list of template files.\n\n \"\"\"\n if template_dir is None:\n if 'RR_TEMPLATE_DIR' in os.environ:\n template_dir = os.environ['RR_TEMPLATE_DIR']\n else:\n thisdir = os.path.dirname(__file__)\n tempdir = os.path.join(os.path.abspath(thisdir), 'templates')\n if os.path.exists(tempdir):\n template_dir = tempdir\n\n if template_dir is None:\n raise IOError(\"ERROR: can't find template_dir, $RR_TEMPLATE_DIR, or {rrcode}/templates/\")\n else:\n print('DEBUG: Read templates from {}'.format(template_dir) )\n\n return sorted(glob(os.path.join(template_dir, 'rrtemplate-*.fits')))\n\n\nclass DistTemplatePiece(object):\n \"\"\"One piece of the distributed template data.\n\n This is a simple container for storing interpolated templates for a set of\n redshift values. It is used for communicating the interpolated templates\n between processes.\n\n In the MPI case, each process will store at most two of these\n simultaneously. This is the data that is computed on a single process and\n passed between processes.\n\n Args:\n index (int): the chunk index of this piece- this corresponds to\n the process rank that originally computed this piece.\n redshifts (array): the redshift range contained in this piece.\n data (list): a list of dictionaries, one for each redshift, and\n each containing the 2D interpolated template values for all\n \"wavehash\" keys.\n\n \"\"\"\n def __init__(self, index, redshifts, data):\n self.index = index\n self.redshifts = redshifts\n self.data = data\n\n\ndef _mp_rebin_template(template, dwave, zlist, qout):\n \"\"\"Function for multiprocessing version of rebinning.\n \"\"\"\n try:\n results = dict()\n for z in zlist:\n binned = rebin_template(template, z, dwave)\n results[z] = binned\n qout.put(results)\n except:\n exc_type, exc_value, exc_traceback = sys.exc_info()\n lines = traceback.format_exception(exc_type, exc_value, exc_traceback)\n lines = [ \"MP rebin: {}\".format(x) for x in lines ]\n print(\"\".join(lines))\n sys.stdout.flush()\n return\n\n\nclass DistTemplate(object):\n \"\"\"Distributed template data interpolated to all redshifts.\n\n For a given template, the redshifts are distributed among the\n processes in the communicator. Then each process will rebin the\n template to those redshifts for the wavelength grids specified by\n dwave.\n\n Args:\n template (Template): the template to distribute\n dwave (dict): the keys are the \"wavehash\" and the values\n are a 1D array containing the wavelength grid.\n mp_procs (int): if not using MPI, restrict the number of\n multiprocesses to this.\n comm (mpi4py.MPI.Comm): (optional) the MPI communicator.\n\n \"\"\"\n def __init__(self, template, dwave, mp_procs=1, comm=None):\n self._comm = comm\n self._template = template\n self._dwave = dwave\n\n self._comm_rank = 0\n self._comm_size = 1\n if self._comm is not None:\n self._comm_rank = self._comm.rank\n self._comm_size = self._comm.size\n\n self._distredshifts = np.array_split(self._template.redshifts,\n self._comm_size)\n\n myz = self._distredshifts[self._comm_rank]\n nz = len(myz)\n\n data = list()\n\n # In the case of not using MPI (comm == None), one process is rebinning\n # all the templates. In that scenario, use multiprocessing\n # workers to do the rebinning.\n\n if self._comm is not None:\n # MPI case- compute our local redshifts\n for z in myz:\n binned = rebin_template(self._template, z, self._dwave)\n data.append(binned)\n else:\n # We don't have MPI, so use multiprocessing\n import multiprocessing as mp\n\n qout = mp.Queue()\n work = np.array_split(myz, mp_procs)\n procs = list()\n for i in range(mp_procs):\n p = mp.Process(target=_mp_rebin_template,\n args=(self._template, self._dwave, work[i], qout))\n procs.append(p)\n p.start()\n\n # Extract the output into a single list\n results = dict()\n for i in range(mp_procs):\n res = qout.get()\n results.update(res)\n for z in myz:\n data.append(results[z])\n\n # Correct spectra for Lyman-series\n for i, z in enumerate(myz):\n for k in list(self._dwave.keys()):\n T = transmission_Lyman(z,self._dwave[k])\n for vect in range(data[i][k].shape[1]):\n data[i][k][:,vect] *= T\n\n self._piece = DistTemplatePiece(self._comm_rank, myz, data)\n\n\n @property\n def comm(self):\n return self._comm\n\n @property\n def template(self):\n return self._template\n\n @property\n def local(self):\n return self._piece\n\n\n def cycle(self):\n \"\"\"Pass our piece of data to the next process.\n\n If we have returned to our original data, then return True, otherwise\n return False.\n\n Args:\n Nothing\n\n Returns (bool):\n Whether we have finished (True) else False.\n\n \"\"\"\n # If we are not using MPI, this function is a no-op, so just return.\n if self._comm is None:\n return True\n\n rank = self._comm_rank\n nproc = self._comm_size\n\n to_proc = rank + 1\n if to_proc >= nproc:\n to_proc = 0\n\n from_proc = rank - 1\n if from_proc < 0:\n from_proc = nproc - 1\n\n # Send our data and get a request handle for later checking.\n\n req = self._comm.isend(self._piece, to_proc)\n\n # Receive our data\n\n incoming = self._comm.recv(source=from_proc)\n\n # Wait for send to finishself._comm_rank = self._comm.rank\n\n req.wait()\n\n # Now replace our local piece with the new one\n\n self._piece = incoming\n\n # Are we done?\n\n done = False\n if self._piece.index == rank:\n done = True\n\n return done\n\n\ndef load_dist_templates(dwave, templates=None, comm=None, mp_procs=1):\n \"\"\"Read and distribute templates from disk.\n\n This reads one or more template files from disk and distributes them among\n an MPI communicator. Each process will locally store interpolated data\n for a redshift slice of each template. For a single redshift, the template\n is interpolated to the wavelength grids specified by \"dwave\".\n\n As an example, imagine 3 templates with independent redshift ranges. Also\n imagine that the communicator has 2 processes. This function would return\n a list of 3 DistTemplate objects. Within each of those objects, the 2\n processes store the interpolated data for a subset of the redshift range:\n\n DistTemplate #1: zmin1 <---- p0 ----> | <---- p1 ----> zmax1\n DistTemplate #2: zmin2 <-- p0 --> | <-- p1 --> zmax2\n DistTemplate #3: zmin3 <--- p0 ---> | <--- p1 ---> zmax3\n\n Args:\n dwave (dict): the dictionary of wavelength grids. Keys are the\n \"wavehash\" and values are an array of wavelengths.\n templates (str or None): if None, find all templates from the\n redrock template directory. If a path to a file is specified,\n load that single template. If a path to a directory is given,\n load all templates in that directory.\n comm (mpi4py.MPI.Comm): (optional) the MPI communicator.\n mp_procs (int): if not using MPI, restrict the number of\n multiprocesses to this.\n\n Returns:\n list: a list of DistTemplate objects.\n\n \"\"\"\n timer = elapsed(None, \"\", comm=comm)\n\n template_files = None\n\n if (comm is None) or (comm.rank == 0):\n # Only one process needs to do this\n if templates is not None:\n if os.path.isfile(templates):\n # we are using just a single file\n template_files = [ templates ]\n elif os.path.isdir(templates):\n # this is a template dir\n template_files = find_templates(template_dir=templates)\n else:\n print(\"{} is neither a file nor a directory\"\\\n .format(templates))\n sys.stdout.flush()\n if comm is not None:\n comm.Abort()\n else:\n template_files = find_templates()\n\n if comm is not None:\n template_files = comm.bcast(template_files, root=0)\n\n template_data = list()\n if (comm is None) or (comm.rank == 0):\n for t in template_files:\n template_data.append(Template(filename=t))\n\n if comm is not None:\n template_data = comm.bcast(template_data, root=0)\n\n timer = elapsed(timer, \"Read and broadcast of {} templates\"\\\n .format(len(template_files)), comm=comm)\n\n # Compute the interpolated templates in a distributed way with every\n # process generating a slice of the redshift range.\n\n dtemplates = list()\n for t in template_data:\n dtemplates.append(DistTemplate(t, dwave, mp_procs=mp_procs, comm=comm))\n\n timer = elapsed(timer, \"Rebinning templates\", comm=comm)\n\n return dtemplates\n"
] | [
[
"numpy.arange",
"numpy.log10",
"numpy.array_split"
]
] |
guissy/StockRecommendSystem | [
"2e8694d0bb2ceaa42585ee7414564d921cc5a854"
] | [
"Source/FetchData/Fetch_Data_Stock_CHN_Daily.py"
] | [
"import sys, os, time, datetime, warnings, configparser\nimport pandas as pd\nimport numpy as np\nimport tushare as ts\nimport concurrent.futures\nfrom tqdm import tqdm\n\ncur_path = os.path.dirname(os.path.abspath(__file__))\nfor _ in range(2):\n root_path = cur_path[0:cur_path.rfind('/', 0, len(cur_path))]\n cur_path = root_path\nsys.path.append(root_path + \"/\" + 'Source/DataBase/')\nfrom Source.DataBase.DB_API import queryStock, storeStock, queryStockList, storeStockList, queryStockPublishDay, storePublishDay\n\ndef getStocksList(root_path):\n try:\n df = queryStockList(root_path, \"DB_STOCK\", \"SHEET_CHN_DAILY\")\n df.index = df.index.astype(str).str.zfill(6)\n except Exception as e:\n df = pd.DataFrame()\n\n if df.empty == False: return df\n import subprocess\n subprocess.Popen('brew services restart mongodb'.split())\n stock_info = ts.get_stock_basics()\n listData = pd.DataFrame(stock_info)\n #listData.index.name = 'symbol'\n #listData.index = listData.index.astype(str).str.zfill(6) #[str(symbol).zfill(6) for symbol in listData.index] #listData.index.astype(str).str.zfill(6)\n #print(listData.index)\n #listData['symbol'] = listData['symbol'].str.strip()\n storeStockList(root_path, \"DB_STOCK\", \"SHEET_CHN_DAILY\", listData)\n df = queryStockList(root_path, \"DB_STOCK\", \"SHEET_CHN_DAILY\")\n df.index = df.index.astype(str).str.zfill(6)\n return df\n\ndef getSingleStock(symbol):\n repeat_times = 1\n message = \"\"\n df = pd.DataFrame()\n\n for _ in range(repeat_times): \n try:\n data = ts.get_hist_data(symbol)\n data.sort_index(ascending=True, inplace=True)\n return data, \"\"\n except Exception as e:\n message = symbol + \" fetch exception: \" + str(e)\n continue \n return df, message\n\ndef getSingleStockByTime(symbol, from_date, till_date):\n start = from_date.split('-')\n start_y, start_m, start_d = start[0], start[1], start[2] # starting date\n\n end = till_date.split('-')\n end_y, end_m, end_d = end[0], end[1], end[2] # until now\n \n repeat_times = 1\n message = \"\"\n df = pd.DataFrame()\n\n for _ in range(repeat_times): \n try:\n data = ts.get_hist_data(symbol, from_date, till_date)\n data.sort_index(ascending=True, inplace=True)\n return data, \"\"\n except Exception as e:\n message = symbol + \" fetch exception: \" + str(e)\n continue \n return df, message\n\ndef judgeOpenDaysInRange(from_date, to_date):\n holidays=[\"2017-01-01\", \"2017-01-02\",\n \"2017-01-27\", \"2017-01-28\", \"2017-01-29\", \"2017-01-30\", \"2017-01-31\", \"2017-02-01\", \"2017-02-02\",\n \"2017-04-02\", \"2017-04-03\", \"2017-04-04\",\n \"2017-05-01\",\n \"2017-05-28\", \"2017-05-29\", \"2017-05-30\",\n \"2017-10-01\", \"2017-10-02\", \"2017-10-03\", \"2017-10-04\", \"2017-10-05\",\"2017-10-06\",\"2017-10-07\",\"2017-10-08\"]\n\n #holidays = cal.holidays(from_date, to_date)\n duedays = pd.bdate_range(from_date, to_date)\n df = pd.DataFrame()\n df['date'] = duedays\n df['holiday'] = duedays.isin(holidays)\n opendays = df[df['holiday'] == False]\n return opendays\n\ndef judgeNeedPostDownload(from_date, to_date):\n today = datetime.datetime.now()\n start_date = pd.Timestamp(from_date)\n end_date = pd.Timestamp(to_date)\n\n if start_date > today: return False \n if end_date > today: to_date = today.strftime(\"%Y-%m-%d\")\n dateList = judgeOpenDaysInRange(from_date, to_date)\n if len(dateList) > 0: return True\n return False\n\n\ndef updateSingleStockData(root_path, symbol, force_check):\n startTime = time.time()\n message = \"\"\n\n if len(symbol) == 0: return startTime, message\n\n till_date = (datetime.datetime.now()).strftime(\"%Y-%m-%d\")\n end_date = pd.Timestamp(till_date)\n \n stockData, lastUpdateTime = queryStock(root_path, \"DB_STOCK\", \"SHEET_CHN_DAILY\", symbol)\n\n if stockData.empty:\n stockData, message = getSingleStock(symbol)\n if stockData.empty == False:\n storeStock(root_path, \"DB_STOCK\", \"SHEET_CHN_DAILY\", symbol, stockData)\n return startTime, message\n\n modified = False\n first_date = pd.Timestamp(stockData.index[0])\n last_date = pd.Timestamp(stockData.index[-1])\n updateOnce = end_date > lastUpdateTime\n \n if end_date > last_date and (updateOnce or force_check):\n to_date = (last_date + datetime.timedelta(days=1)).strftime(\"%Y-%m-%d\")\n if judgeNeedPostDownload(to_date, till_date):\n message = message + \", download post data from \" + to_date + \" to \" + till_date\n moreStockData, tempMessage = getSingleStockByTime(symbol, to_date, till_date)\n message = message + tempMessage\n if len(moreStockData) > 0:\n if isinstance(moreStockData.index, pd.DatetimeIndex):\n moreStockData.index = moreStockData.index.strftime(\"%Y-%m-%d\")\n modified = True\n stockData = pd.concat([stockData, moreStockData])\n stockData.index.name = 'date'\n \n if modified:\n stockData = stockData[~stockData.index.duplicated(keep='first')]\n storeStock(root_path, \"DB_STOCK\", \"SHEET_CHN_DAILY\", symbol, stockData)\n elif updateOnce:\n stockData = stockData[~stockData.index.duplicated(keep='first')]\n storeStock(root_path, \"DB_STOCK\", \"SHEET_CHN_DAILY\", symbol, stockData)\n message = message + \", nothing updated\"\n else:\n message = \"\"\n\n return startTime, message\n\ndef updateStockData_CHN(root_path, storeType, force_check = False):\n\n symbols = getStocksList(root_path).index.values.tolist()\n\n pbar = tqdm(total=len(symbols))\n\n\n if storeType == 2:\n for symbol in symbols:\n startTime, message = updateSingleStockData(root_path, symbol, force_check)\n outMessage = '%-*s fetched in: %.4s seconds' % (6, symbol, (time.time() - startTime))\n pbar.set_description(outMessage)\n pbar.update(1)\n\n if storeType == 1:\n log_errors = []\n log_update = []\n with concurrent.futures.ThreadPoolExecutor(max_workers=8) as executor:\n # Start the load operations and mark each future with its URL\n future_to_stock = {executor.submit(updateSingleStockData, root_path, symbol, force_check): symbol for symbol in symbols}\n for future in concurrent.futures.as_completed(future_to_stock):\n stock = future_to_stock[future]\n try:\n startTime, message = future.result()\n except Exception as exc:\n startTime = time.time()\n log_errors.append('%r generated an exception: %s' % (stock, exc))\n else:\n if len(message) > 0: log_update.append(message)\n outMessage = '%-*s fetched in: %.4s seconds' % (6, stock, (time.time() - startTime))\n pbar.set_description(outMessage)\n pbar.update(1)\n if len(log_errors) > 0: print(log_errors)\n # if len(log_update) > 0: print(log_update)\n\n pbar.close()\n return symbols\n\nif __name__ == \"__main__\":\n pd.set_option('precision', 3)\n pd.set_option('display.width',1000)\n warnings.filterwarnings('ignore', category=pd.io.pytables.PerformanceWarning)\n\n config = configparser.ConfigParser()\n config.read(root_path + \"/\" + \"config.ini\")\n storeType = int(config.get('Setting', 'StoreType'))\n\n if storeType == 1:\n from Start_DB_Server import StartServer, ShutdownServer\n # start database server (async)\n thread = StartServer(root_path)\n \n # wait for db start, the standard procedure should listen to \n # the completed event of function \"StartServer\"\n time.sleep(5)\n \n updateStockData_CHN(root_path, storeType)\n\n if storeType == 1:\n # stop database server (sync)\n time.sleep(5)\n ShutdownServer()\n"
] | [
[
"pandas.concat",
"pandas.bdate_range",
"pandas.DataFrame",
"pandas.set_option",
"pandas.Timestamp"
]
] |
ambareeshravi/TrafficSignClassifier_API | [
"8628057439ee70f6d827abf931071e9b6539bd5b"
] | [
"utils.py"
] | [
"'''\nAuthor: Ambareesh Ravi\nDate: Jul 31, 2021\nTitle: utils.py\nDescription:\n Contains utility and helper functions for the project\n'''\n\n# Libraries imports\nimport numpy as np\nimport pandas as pd\nimport os\nfrom tqdm import tqdm\nfrom time import time\nfrom glob import glob\nfrom PIL import Image\nimport matplotlib.pyplot as plt\nimport argparse\nimport cv2\n\n# Global variables\nMANUAL_SEED = 42\nnp.random.seed(42)\n\ndef INFO(s):\n '''\n Prints information in a particular format\n\n Args:\n s - string <str> to be printed\n\n Returns:\n -\n\n Exception:\n -\n '''\n print(\"-\"*40)\n print(\"INFO:\", s)\n print(\"-\"*40)\n\ndef read_directory_content(path):\n '''\n Reads all files in a directory given a path\n \n Args:\n path - path for the directory as <str>\n \n Returns:\n sorted list of files in the directory\n \n Exception:\n -\n '''\n if \"*\" not in path: path = os.path.join(path, \"*\")\n return sorted(glob(path))\n \ndef create_directory(path):\n '''\n Creates a directory given a path if the path does not exist\n \n Args:\n path - path for the directory as <str>\n \n Returns:\n -\n \n Exception:\n -\n '''\n # Create a directory\n if not os.path.exists(path): os.mkdir(path)\n\ndef save_image(array, path, resize = False, extension = \".png\"):\n '''\n Saves an array into an image file\n \n Args:\n array - image as a <np.array>\n path - path for the image as <str>\n resize - [optional] to resize image to given size - <tuple> of <int> (w,h)\n extension - [optional] type of image file as <str>\n \n Returns:\n -\n \n Exception:\n -\n '''\n # Add image extension\n if extension not in path:\n path = path.split(\".\")[0] + extension\n \n # Save image into a file using PIL Image handle\n img = Image.fromarray(array)\n # Resize image if reaquired\n if resize: img = img.resize(resize)\n # Save image\n img.save(path)\n \ndef read_image(image_path):\n '''\n Reads an image from the given path as a PIL.Image handle\n \n Args:\n image_path - path for the image as <str>\n \n Returns:\n -\n \n Exception:\n -\n '''\n return Image.open(image_path)\n\nclass Visualizer:\n def __init__(self,):\n '''\n Initializes the class to visualize results in comparison with the inputs\n \n Args:\n -\n \n Returns:\n -\n \n Exception:\n -\n '''\n pass\n \n def gray2color(self, x):\n '''\n Converts a single channel grayscale image to coloured 3 channel format\n \n Args:\n x - input as <np.array>\n \n Returns:\n -\n \n Exception:\n -\n '''\n return np.repeat(np.expand_dims(x, axis = -1), 3, axis = -1)\n \n def visualize_composite(self, input_image, label, prediction, margin = 8, save_path = None):\n '''\n Function to visualize input, label, prediction together in an image\n \n Args:\n input_image - input RGB image as <np.array>\n label - label binary mask Grayscale image as <np.array>\n prediction - predicted binary mask Grayscale image as <np.array>\n margin - margin between images in terms of pixels in <int>\n save_path - path to save the file <str>\n \n Returns:\n -\n \n Exception:\n -\n '''\n rounded_pred = np.round(prediction)\n margin = np.ones((label.shape[0], margin, 3))\n composite = np.hstack((input_image, margin, self.gray2color(label), margin, self.gray2color(rounded_pred)))\n img = Image.fromarray((composite*255).astype(np.uint8))\n if save_path: save_image()\n return img\n"
] | [
[
"numpy.round",
"numpy.expand_dims",
"numpy.random.seed",
"numpy.ones"
]
] |
kadeng/tensorflow_project_workspace | [
"dee284fb2d1796329895130a075cd57a62ea873f"
] | [
"tensorflow/contrib/learn/python/learn/estimators/dnn.py"
] | [
"# Copyright 2016 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Deep Neural Network estimators.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport six\n\nfrom tensorflow.contrib import layers\nfrom tensorflow.contrib.framework import deprecated\nfrom tensorflow.contrib.framework import deprecated_arg_values\nfrom tensorflow.contrib.framework.python.framework import experimental\nfrom tensorflow.contrib.framework.python.ops import variables as contrib_variables\nfrom tensorflow.contrib.layers.python.layers import optimizers\nfrom tensorflow.contrib.learn.python.learn import evaluable\nfrom tensorflow.contrib.learn.python.learn import metric_spec\nfrom tensorflow.contrib.learn.python.learn import monitors as monitor_lib\nfrom tensorflow.contrib.learn.python.learn import trainable\nfrom tensorflow.contrib.learn.python.learn.estimators import dnn_linear_combined\nfrom tensorflow.contrib.learn.python.learn.estimators import estimator\nfrom tensorflow.contrib.learn.python.learn.estimators import head as head_lib\nfrom tensorflow.contrib.learn.python.learn.estimators import model_fn\nfrom tensorflow.contrib.learn.python.learn.estimators import prediction_key\nfrom tensorflow.contrib.learn.python.learn.utils import export\nfrom tensorflow.python.ops import nn\nfrom tensorflow.python.ops import partitioned_variables\nfrom tensorflow.python.ops import variable_scope\nfrom tensorflow.python.summary import summary\n\n_CENTERED_BIAS_WEIGHT = \"centered_bias_weight\"\n\n# The default learning rate of 0.05 is a historical artifact of the initial\n# implementation, but seems a reasonable choice.\n_LEARNING_RATE = 0.05\n\n\ndef _get_feature_dict(features):\n if isinstance(features, dict):\n return features\n return {\"\": features}\n\n\ndef _get_optimizer(optimizer):\n if callable(optimizer):\n return optimizer()\n else:\n return optimizer\n\n\ndef _add_hidden_layer_summary(value, tag):\n summary.scalar(\"%s_fraction_of_zero_values\" % tag, nn.zero_fraction(value))\n summary.histogram(\"%s_activation\" % tag, value)\n\n\ndef _dnn_model_fn(features, labels, mode, params, config=None):\n \"\"\"Deep Neural Net model_fn.\n\n Args:\n features: `Tensor` or dict of `Tensor` (depends on data passed to `fit`).\n labels: `Tensor` of shape [batch_size, 1] or [batch_size] labels of\n dtype `int32` or `int64` in the range `[0, n_classes)`.\n mode: Defines whether this is training, evaluation or prediction.\n See `ModeKeys`.\n params: A dict of hyperparameters.\n The following hyperparameters are expected:\n * head: A `_Head` instance.\n * hidden_units: List of hidden units per layer.\n * feature_columns: An iterable containing all the feature columns used by\n the model.\n * optimizer: string, `Optimizer` object, or callable that defines the\n optimizer to use for training. If `None`, will use the Adagrad\n optimizer with a default learning rate of 0.05.\n * activation_fn: Activation function applied to each layer. If `None`,\n will use `tf.nn.relu`.\n * dropout: When not `None`, the probability we will drop out a given\n coordinate.\n * gradient_clip_norm: A float > 0. If provided, gradients are\n clipped to their global norm with this clipping ratio.\n * embedding_lr_multipliers: Optional. A dictionary from\n `EmbeddingColumn` to a `float` multiplier. Multiplier will be used to\n multiply with learning rate for the embedding variables.\n config: `RunConfig` object to configure the runtime settings.\n\n Returns:\n predictions: A dict of `Tensor` objects.\n loss: A scalar containing the loss of the step.\n train_op: The op for training.\n \"\"\"\n head = params[\"head\"]\n hidden_units = params[\"hidden_units\"]\n feature_columns = params[\"feature_columns\"]\n optimizer = params.get(\"optimizer\") or \"Adagrad\"\n activation_fn = params.get(\"activation_fn\")\n dropout = params.get(\"dropout\")\n gradient_clip_norm = params.get(\"gradient_clip_norm\")\n num_ps_replicas = config.num_ps_replicas if config else 0\n embedding_lr_multipliers = params.get(\"embedding_lr_multipliers\", {})\n\n features = _get_feature_dict(features)\n parent_scope = \"dnn\"\n\n input_layer_partitioner = (partitioned_variables.min_max_variable_partitioner(\n max_partitions=num_ps_replicas, min_slice_size=64 << 20))\n input_layer_scope = parent_scope + \"/input_from_feature_columns\"\n with variable_scope.variable_scope(\n input_layer_scope,\n values=list(six.itervalues(features)),\n partitioner=input_layer_partitioner) as scope:\n net = layers.input_from_feature_columns(\n columns_to_tensors=features,\n feature_columns=feature_columns,\n weight_collections=[parent_scope],\n scope=scope)\n\n hidden_layer_partitioner = (\n partitioned_variables.min_max_variable_partitioner(\n max_partitions=num_ps_replicas))\n for layer_id, num_hidden_units in enumerate(hidden_units):\n with variable_scope.variable_scope(\n parent_scope + \"/hiddenlayer_%d\" % layer_id,\n values=[net],\n partitioner=hidden_layer_partitioner) as scope:\n net = layers.fully_connected(\n net,\n num_hidden_units,\n activation_fn=activation_fn,\n variables_collections=[parent_scope],\n scope=scope)\n if dropout is not None and mode == model_fn.ModeKeys.TRAIN:\n net = layers.dropout(net, keep_prob=(1.0 - dropout))\n _add_hidden_layer_summary(net, scope.name)\n\n with variable_scope.variable_scope(\n parent_scope + \"/logits\",\n values=[net],\n partitioner=hidden_layer_partitioner) as scope:\n logits = layers.fully_connected(\n net,\n head.logits_dimension,\n activation_fn=None,\n variables_collections=[parent_scope],\n scope=scope)\n _add_hidden_layer_summary(logits, scope.name)\n\n def _train_op_fn(loss):\n \"\"\"Returns the op to optimize the loss.\"\"\"\n return optimizers.optimize_loss(\n loss=loss,\n global_step=contrib_variables.get_global_step(),\n learning_rate=_LEARNING_RATE,\n optimizer=_get_optimizer(optimizer),\n gradient_multipliers=(\n dnn_linear_combined._extract_embedding_lr_multipliers( # pylint: disable=protected-access\n embedding_lr_multipliers, parent_scope, input_layer_scope)),\n clip_gradients=gradient_clip_norm,\n name=parent_scope,\n # Empty summaries to prevent optimizers from logging the training_loss.\n summaries=[])\n\n return head.head_ops(features, labels, mode, _train_op_fn, logits)\n\n\nclass DNNClassifier(evaluable.Evaluable, trainable.Trainable):\n \"\"\"A classifier for TensorFlow DNN models.\n\n Example:\n\n ```python\n sparse_feature_a = sparse_column_with_hash_bucket(...)\n sparse_feature_b = sparse_column_with_hash_bucket(...)\n\n sparse_feature_a_emb = embedding_column(sparse_id_column=sparse_feature_a,\n ...)\n sparse_feature_b_emb = embedding_column(sparse_id_column=sparse_feature_b,\n ...)\n\n estimator = DNNClassifier(\n feature_columns=[sparse_feature_a_emb, sparse_feature_b_emb],\n hidden_units=[1024, 512, 256])\n\n # Or estimator using the ProximalAdagradOptimizer optimizer with\n # regularization.\n estimator = DNNClassifier(\n feature_columns=[sparse_feature_a_emb, sparse_feature_b_emb],\n hidden_units=[1024, 512, 256],\n optimizer=tf.train.ProximalAdagradOptimizer(\n learning_rate=0.1,\n l1_regularization_strength=0.001\n ))\n\n # Input builders\n def input_fn_train: # returns x, y (where y represents label's class index).\n pass\n estimator.fit(input_fn=input_fn_train)\n\n def input_fn_eval: # returns x, y (where y represents label's class index).\n pass\n estimator.evaluate(input_fn=input_fn_eval)\n estimator.predict(x=x) # returns predicted labels (i.e. label's class index).\n ```\n\n Input of `fit` and `evaluate` should have following features,\n otherwise there will be a `KeyError`:\n\n * if `weight_column_name` is not `None`, a feature with\n `key=weight_column_name` whose value is a `Tensor`.\n * for each `column` in `feature_columns`:\n - if `column` is a `SparseColumn`, a feature with `key=column.name`\n whose `value` is a `SparseTensor`.\n - if `column` is a `WeightedSparseColumn`, two features: the first with\n `key` the id column name, the second with `key` the weight column name.\n Both features' `value` must be a `SparseTensor`.\n - if `column` is a `RealValuedColumn`, a feature with `key=column.name`\n whose `value` is a `Tensor`.\n \"\"\"\n\n def __init__(self,\n hidden_units,\n feature_columns,\n model_dir=None,\n n_classes=2,\n weight_column_name=None,\n optimizer=None,\n activation_fn=nn.relu,\n dropout=None,\n gradient_clip_norm=None,\n enable_centered_bias=False,\n config=None,\n feature_engineering_fn=None,\n embedding_lr_multipliers=None):\n \"\"\"Initializes a DNNClassifier instance.\n\n Args:\n hidden_units: List of hidden units per layer. All layers are fully\n connected. Ex. `[64, 32]` means first layer has 64 nodes and second one\n has 32.\n feature_columns: An iterable containing all the feature columns used by\n the model. All items in the set should be instances of classes derived\n from `FeatureColumn`.\n model_dir: Directory to save model parameters, graph and etc. This can\n also be used to load checkpoints from the directory into a estimator to\n continue training a previously saved model.\n n_classes: number of label classes. Default is binary classification.\n It must be greater than 1. Note: Class labels are integers representing\n the class index (i.e. values from 0 to n_classes-1). For arbitrary\n label values (e.g. string labels), convert to class indices first.\n weight_column_name: A string defining feature column name representing\n weights. It is used to down weight or boost examples during training. It\n will be multiplied by the loss of the example.\n optimizer: An instance of `tf.Optimizer` used to train the model. If\n `None`, will use an Adagrad optimizer.\n activation_fn: Activation function applied to each layer. If `None`, will\n use `tf.nn.relu`.\n dropout: When not `None`, the probability we will drop out a given\n coordinate.\n gradient_clip_norm: A float > 0. If provided, gradients are\n clipped to their global norm with this clipping ratio. See\n `tf.clip_by_global_norm` for more details.\n enable_centered_bias: A bool. If True, estimator will learn a centered\n bias variable for each class. Rest of the model structure learns the\n residual after centered bias.\n config: `RunConfig` object to configure the runtime settings.\n feature_engineering_fn: Feature engineering function. Takes features and\n labels which are the output of `input_fn` and\n returns features and labels which will be fed\n into the model.\n embedding_lr_multipliers: Optional. A dictionary from `EmbeddingColumn` to\n a `float` multiplier. Multiplier will be used to multiply with\n learning rate for the embedding variables.\n\n Returns:\n A `DNNClassifier` estimator.\n\n Raises:\n ValueError: If `n_classes` < 2.\n \"\"\"\n self._hidden_units = hidden_units\n self._feature_columns = tuple(feature_columns or [])\n self._enable_centered_bias = enable_centered_bias\n self._estimator = estimator.Estimator(\n model_fn=_dnn_model_fn,\n model_dir=model_dir,\n config=config,\n params={\n \"head\":\n head_lib._multi_class_head( # pylint: disable=protected-access\n n_classes,\n weight_column_name=weight_column_name,\n enable_centered_bias=enable_centered_bias),\n \"hidden_units\":\n hidden_units,\n \"feature_columns\":\n self._feature_columns,\n \"optimizer\":\n optimizer,\n \"activation_fn\":\n activation_fn,\n \"dropout\":\n dropout,\n \"gradient_clip_norm\":\n gradient_clip_norm,\n \"embedding_lr_multipliers\":\n embedding_lr_multipliers,\n },\n feature_engineering_fn=feature_engineering_fn)\n\n def fit(self,\n x=None,\n y=None,\n input_fn=None,\n steps=None,\n batch_size=None,\n monitors=None,\n max_steps=None):\n \"\"\"See trainable.Trainable. Note: Labels must be integer class indices.\"\"\"\n # TODO(roumposg): Remove when deprecated monitors are removed.\n hooks = monitor_lib.replace_monitors_with_hooks(monitors, self)\n self._estimator.fit(x=x,\n y=y,\n input_fn=input_fn,\n steps=steps,\n batch_size=batch_size,\n monitors=hooks,\n max_steps=max_steps)\n return self\n\n def evaluate(self,\n x=None,\n y=None,\n input_fn=None,\n feed_fn=None,\n batch_size=None,\n steps=None,\n metrics=None,\n name=None,\n checkpoint_path=None,\n hooks=None):\n \"\"\"See evaluable.Evaluable. Note: Labels must be integer class indices.\"\"\"\n return self._estimator.evaluate(\n x=x,\n y=y,\n input_fn=input_fn,\n feed_fn=feed_fn,\n batch_size=batch_size,\n steps=steps,\n metrics=metrics,\n name=name,\n checkpoint_path=checkpoint_path,\n hooks=hooks)\n\n @deprecated_arg_values(\n estimator.AS_ITERABLE_DATE,\n estimator.AS_ITERABLE_INSTRUCTIONS,\n as_iterable=False)\n def predict(self, x=None, input_fn=None, batch_size=None, as_iterable=True):\n \"\"\"Returns predicted classes for given features.\n\n Args:\n x: features.\n input_fn: Input function. If set, x must be None.\n batch_size: Override default batch size.\n as_iterable: If True, return an iterable which keeps yielding predictions\n for each example until inputs are exhausted. Note: The inputs must\n terminate if you want the iterable to terminate (e.g. be sure to pass\n num_epochs=1 if you are using something like read_batch_features).\n\n Returns:\n Numpy array of predicted classes with shape [batch_size] (or an iterable\n of predicted classes if as_iterable is True). Each predicted class is\n represented by its class index (i.e. integer from 0 to n_classes-1).\n \"\"\"\n key = prediction_key.PredictionKey.CLASSES\n preds = self._estimator.predict(\n x=x,\n input_fn=input_fn,\n batch_size=batch_size,\n outputs=[key],\n as_iterable=as_iterable)\n if as_iterable:\n return (pred[key] for pred in preds)\n return preds[key].reshape(-1)\n\n @deprecated_arg_values(\n estimator.AS_ITERABLE_DATE,\n estimator.AS_ITERABLE_INSTRUCTIONS,\n as_iterable=False)\n def predict_proba(self,\n x=None,\n input_fn=None,\n batch_size=None,\n as_iterable=True):\n \"\"\"Returns prediction probabilities for given features.\n\n Args:\n x: features.\n input_fn: Input function. If set, x and y must be None.\n batch_size: Override default batch size.\n as_iterable: If True, return an iterable which keeps yielding predictions\n for each example until inputs are exhausted. Note: The inputs must\n terminate if you want the iterable to terminate (e.g. be sure to pass\n num_epochs=1 if you are using something like read_batch_features).\n\n Returns:\n Numpy array of predicted probabilities with shape [batch_size, n_classes]\n (or an iterable of predicted probabilities if as_iterable is True).\n \"\"\"\n key = prediction_key.PredictionKey.PROBABILITIES\n preds = self._estimator.predict(\n x=x,\n input_fn=input_fn,\n batch_size=batch_size,\n outputs=[key],\n as_iterable=as_iterable)\n if as_iterable:\n return (pred[key] for pred in preds)\n return preds[key]\n\n def _get_predict_ops(self, features):\n \"\"\"See `Estimator` class.\"\"\"\n # This method exists to support some models that use the legacy interface.\n # pylint: disable=protected-access\n return self._estimator._get_predict_ops(features)\n\n def get_variable_names(self):\n \"\"\"Returns list of all variable names in this model.\n\n Returns:\n List of names.\n \"\"\"\n return self._estimator.get_variable_names()\n\n def get_variable_value(self, name):\n \"\"\"Returns value of the variable given by name.\n\n Args:\n name: string, name of the tensor.\n\n Returns:\n `Tensor` object.\n \"\"\"\n return self._estimator.get_variable_value(name)\n\n def export(self,\n export_dir,\n input_fn=None,\n input_feature_key=None,\n use_deprecated_input_fn=True,\n signature_fn=None,\n default_batch_size=1,\n exports_to_keep=None):\n \"\"\"See BaseEstimator.export.\"\"\"\n\n def default_input_fn(unused_estimator, examples):\n return layers.parse_feature_columns_from_examples(examples,\n self._feature_columns)\n\n return self._estimator.export(\n export_dir=export_dir,\n input_fn=input_fn or default_input_fn,\n input_feature_key=input_feature_key,\n use_deprecated_input_fn=use_deprecated_input_fn,\n signature_fn=(signature_fn or\n export.classification_signature_fn_with_prob),\n prediction_key=prediction_key.PredictionKey.PROBABILITIES,\n default_batch_size=default_batch_size,\n exports_to_keep=exports_to_keep)\n\n @experimental\n def export_savedmodel(self,\n export_dir_base,\n input_fn,\n default_output_alternative_key=None,\n assets_extra=None,\n as_text=False,\n exports_to_keep=None):\n return self._estimator.export_savedmodel(\n export_dir_base,\n input_fn,\n default_output_alternative_key=default_output_alternative_key,\n assets_extra=assets_extra,\n as_text=as_text,\n exports_to_keep=exports_to_keep)\n\n @property\n def model_dir(self):\n return self._estimator.model_dir\n\n @property\n @deprecated(\"2016-10-30\",\n \"This method will be removed after the deprecation date. \"\n \"To inspect variables, use get_variable_names() and \"\n \"get_variable_value().\")\n def weights_(self):\n hiddenlayer_weights = [\n self.get_variable_value(\"dnn/hiddenlayer_%d/weights\" % i)\n for i, _ in enumerate(self._hidden_units)\n ]\n logits_weights = [self.get_variable_value(\"dnn/logits/weights\")]\n return hiddenlayer_weights + logits_weights\n\n @property\n @deprecated(\"2016-10-30\",\n \"This method will be removed after the deprecation date. \"\n \"To inspect variables, use get_variable_names() and \"\n \"get_variable_value().\")\n def bias_(self):\n hiddenlayer_bias = [\n self.get_variable_value(\"dnn/hiddenlayer_%d/biases\" % i)\n for i, _ in enumerate(self._hidden_units)\n ]\n logits_bias = [self.get_variable_value(\"dnn/logits/biases\")]\n if self._enable_centered_bias:\n centered_bias = [self.get_variable_value(_CENTERED_BIAS_WEIGHT)]\n else:\n centered_bias = []\n return hiddenlayer_bias + logits_bias + centered_bias\n\n @property\n def config(self):\n return self._estimator.config\n\n\nclass DNNRegressor(evaluable.Evaluable, trainable.Trainable):\n \"\"\"A regressor for TensorFlow DNN models.\n\n Example:\n\n ```python\n sparse_feature_a = sparse_column_with_hash_bucket(...)\n sparse_feature_b = sparse_column_with_hash_bucket(...)\n\n sparse_feature_a_emb = embedding_column(sparse_id_column=sparse_feature_a,\n ...)\n sparse_feature_b_emb = embedding_column(sparse_id_column=sparse_feature_b,\n ...)\n\n estimator = DNNRegressor(\n feature_columns=[sparse_feature_a, sparse_feature_b],\n hidden_units=[1024, 512, 256])\n\n # Or estimator using the ProximalAdagradOptimizer optimizer with\n # regularization.\n estimator = DNNRegressor(\n feature_columns=[sparse_feature_a, sparse_feature_b],\n hidden_units=[1024, 512, 256],\n optimizer=tf.train.ProximalAdagradOptimizer(\n learning_rate=0.1,\n l1_regularization_strength=0.001\n ))\n\n # Input builders\n def input_fn_train: # returns x, y\n pass\n estimator.fit(input_fn=input_fn_train)\n\n def input_fn_eval: # returns x, y\n pass\n estimator.evaluate(input_fn=input_fn_eval)\n estimator.predict(x=x)\n ```\n\n Input of `fit` and `evaluate` should have following features,\n otherwise there will be a `KeyError`:\n\n * if `weight_column_name` is not `None`, a feature with\n `key=weight_column_name` whose value is a `Tensor`.\n * for each `column` in `feature_columns`:\n - if `column` is a `SparseColumn`, a feature with `key=column.name`\n whose `value` is a `SparseTensor`.\n - if `column` is a `WeightedSparseColumn`, two features: the first with\n `key` the id column name, the second with `key` the weight column name.\n Both features' `value` must be a `SparseTensor`.\n - if `column` is a `RealValuedColumn`, a feature with `key=column.name`\n whose `value` is a `Tensor`.\n \"\"\"\n\n def __init__(self,\n hidden_units,\n feature_columns,\n model_dir=None,\n weight_column_name=None,\n optimizer=None,\n activation_fn=nn.relu,\n dropout=None,\n gradient_clip_norm=None,\n enable_centered_bias=False,\n config=None,\n feature_engineering_fn=None,\n label_dimension=1,\n embedding_lr_multipliers=None):\n \"\"\"Initializes a `DNNRegressor` instance.\n\n Args:\n hidden_units: List of hidden units per layer. All layers are fully\n connected. Ex. `[64, 32]` means first layer has 64 nodes and second one\n has 32.\n feature_columns: An iterable containing all the feature columns used by\n the model. All items in the set should be instances of classes derived\n from `FeatureColumn`.\n model_dir: Directory to save model parameters, graph and etc. This can\n also be used to load checkpoints from the directory into a estimator to\n continue training a previously saved model.\n weight_column_name: A string defining feature column name representing\n weights. It is used to down weight or boost examples during training. It\n will be multiplied by the loss of the example.\n optimizer: An instance of `tf.Optimizer` used to train the model. If\n `None`, will use an Adagrad optimizer.\n activation_fn: Activation function applied to each layer. If `None`, will\n use `tf.nn.relu`.\n dropout: When not `None`, the probability we will drop out a given\n coordinate.\n gradient_clip_norm: A `float` > 0. If provided, gradients are clipped\n to their global norm with this clipping ratio. See\n `tf.clip_by_global_norm` for more details.\n enable_centered_bias: A bool. If True, estimator will learn a centered\n bias variable for each class. Rest of the model structure learns the\n residual after centered bias.\n config: `RunConfig` object to configure the runtime settings.\n feature_engineering_fn: Feature engineering function. Takes features and\n labels which are the output of `input_fn` and\n returns features and labels which will be fed\n into the model.\n label_dimension: Dimension of the label for multilabels. Defaults to 1.\n embedding_lr_multipliers: Optional. A dictionary from `EbeddingColumn` to\n a `float` multiplier. Multiplier will be used to multiply with\n learning rate for the embedding variables.\n\n Returns:\n A `DNNRegressor` estimator.\n \"\"\"\n self._feature_columns = tuple(feature_columns or [])\n self._estimator = estimator.Estimator(\n model_fn=_dnn_model_fn,\n model_dir=model_dir,\n config=config,\n params={\n \"head\":\n head_lib._regression_head( # pylint: disable=protected-access\n label_dimension=label_dimension,\n weight_column_name=weight_column_name,\n enable_centered_bias=enable_centered_bias),\n \"hidden_units\":\n hidden_units,\n \"feature_columns\":\n self._feature_columns,\n \"optimizer\":\n optimizer,\n \"activation_fn\":\n activation_fn,\n \"dropout\":\n dropout,\n \"gradient_clip_norm\":\n gradient_clip_norm,\n \"embedding_lr_multipliers\":\n embedding_lr_multipliers,\n },\n feature_engineering_fn=feature_engineering_fn)\n\n def fit(self,\n x=None,\n y=None,\n input_fn=None,\n steps=None,\n batch_size=None,\n monitors=None,\n max_steps=None):\n \"\"\"See trainable.Trainable.\"\"\"\n # TODO(roumposg): Remove when deprecated monitors are removed.\n hooks = monitor_lib.replace_monitors_with_hooks(monitors, self)\n self._estimator.fit(x=x,\n y=y,\n input_fn=input_fn,\n steps=steps,\n batch_size=batch_size,\n monitors=hooks,\n max_steps=max_steps)\n return self\n\n def evaluate(self,\n x=None,\n y=None,\n input_fn=None,\n feed_fn=None,\n batch_size=None,\n steps=None,\n metrics=None,\n name=None,\n checkpoint_path=None,\n hooks=None):\n \"\"\"See evaluable.Evaluable.\"\"\"\n # TODO(zakaria): remove once deprecation is finished (b/31229024)\n custom_metrics = {}\n if metrics:\n for key, metric in six.iteritems(metrics):\n if (not isinstance(metric, metric_spec.MetricSpec) and\n not isinstance(key, tuple)):\n custom_metrics[(key, prediction_key.PredictionKey.SCORES)] = metric\n else:\n custom_metrics[key] = metric\n\n return self._estimator.evaluate(\n x=x,\n y=y,\n input_fn=input_fn,\n feed_fn=feed_fn,\n batch_size=batch_size,\n steps=steps,\n metrics=custom_metrics,\n name=name,\n checkpoint_path=checkpoint_path,\n hooks=hooks)\n\n @deprecated_arg_values(\n estimator.AS_ITERABLE_DATE,\n estimator.AS_ITERABLE_INSTRUCTIONS,\n as_iterable=False)\n def predict(self, x=None, input_fn=None, batch_size=None, as_iterable=True):\n \"\"\"Returns predicted scores for given features.\n\n Args:\n x: features.\n input_fn: Input function. If set, x must be None.\n batch_size: Override default batch size.\n as_iterable: If True, return an iterable which keeps yielding predictions\n for each example until inputs are exhausted. Note: The inputs must\n terminate if you want the iterable to terminate (e.g. be sure to pass\n num_epochs=1 if you are using something like read_batch_features).\n\n Returns:\n Numpy array of predicted scores (or an iterable of predicted scores if\n as_iterable is True). If `label_dimension == 1`, the shape of the output\n is `[batch_size]`, otherwise the shape is `[batch_size, label_dimension]`.\n \"\"\"\n key = prediction_key.PredictionKey.SCORES\n preds = self._estimator.predict(\n x=x,\n input_fn=input_fn,\n batch_size=batch_size,\n outputs=[key],\n as_iterable=as_iterable)\n if as_iterable:\n return (pred[key] for pred in preds)\n return preds[key]\n\n def _get_predict_ops(self, features):\n \"\"\"See `Estimator` class.\"\"\"\n # This method exists to support some models that use the legacy interface.\n # pylint: disable=protected-access\n return self._estimator._get_predict_ops(features)\n\n def get_variable_names(self):\n \"\"\"Returns list of all variable names in this model.\n\n Returns:\n List of names.\n \"\"\"\n return self._estimator.get_variable_names()\n\n def get_variable_value(self, name):\n \"\"\"Returns value of the variable given by name.\n\n Args:\n name: string, name of the tensor.\n\n Returns:\n `Tensor` object.\n \"\"\"\n return self._estimator.get_variable_value(name)\n\n def export(self,\n export_dir,\n input_fn=None,\n input_feature_key=None,\n use_deprecated_input_fn=True,\n signature_fn=None,\n default_batch_size=1,\n exports_to_keep=None):\n \"\"\"See BaseEstimator.export.\"\"\"\n\n def default_input_fn(unused_estimator, examples):\n return layers.parse_feature_columns_from_examples(examples,\n self._feature_columns)\n\n return self._estimator.export(\n export_dir=export_dir,\n input_fn=input_fn or default_input_fn,\n input_feature_key=input_feature_key,\n use_deprecated_input_fn=use_deprecated_input_fn,\n signature_fn=signature_fn or export.regression_signature_fn,\n prediction_key=prediction_key.PredictionKey.SCORES,\n default_batch_size=default_batch_size,\n exports_to_keep=exports_to_keep)\n\n @property\n def model_dir(self):\n return self._estimator.model_dir\n\n @property\n def config(self):\n return self._estimator.config\n"
] | [
[
"tensorflow.contrib.layers.parse_feature_columns_from_examples",
"tensorflow.contrib.learn.python.learn.estimators.head._multi_class_head",
"tensorflow.contrib.framework.python.ops.variables.get_global_step",
"tensorflow.contrib.learn.python.learn.estimators.dnn_linear_combined._extract_embedding_lr_multipliers",
"tensorflow.contrib.layers.input_from_feature_columns",
"tensorflow.contrib.layers.fully_connected",
"tensorflow.python.summary.summary.histogram",
"tensorflow.contrib.layers.dropout",
"tensorflow.contrib.learn.python.learn.estimators.head._regression_head",
"tensorflow.python.ops.partitioned_variables.min_max_variable_partitioner",
"tensorflow.contrib.framework.deprecated_arg_values",
"tensorflow.python.ops.variable_scope.variable_scope",
"tensorflow.python.ops.nn.zero_fraction",
"tensorflow.contrib.learn.python.learn.monitors.replace_monitors_with_hooks",
"tensorflow.contrib.framework.deprecated"
]
] |
EmbeddedML-EDAGroup/Q-PPG | [
"ed42829d0a456db4f0b31d63ba8b22ba483c7b08",
"ed42829d0a456db4f0b31d63ba8b22ba483c7b08"
] | [
"precision_search/model/TEMPONet_float.py",
"precision_search/base/base_trainer.py"
] | [
"#*----------------------------------------------------------------------------*\n#* Copyright (C) 2021 Politecnico di Torino, Italy *\n#* SPDX-License-Identifier: Apache-2.0 *\n#* *\n#* Licensed under the Apache License, Version 2.0 (the \"License\"); *\n#* you may not use this file except in compliance with the License. *\n#* You may obtain a copy of the License at *\n#* *\n#* http://www.apache.org/licenses/LICENSE-2.0 *\n#* *\n#* Unless required by applicable law or agreed to in writing, software *\n#* distributed under the License is distributed on an \"AS IS\" BASIS, *\n#* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. *\n#* See the License for the specific language governing permissions and *\n#* limitations under the License. *\n#* *\n#* Author: Alessio Burrello *\n#*----------------------------------------------------------------------------*\n\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom base import BaseModel\nfrom math import ceil\nimport sys\nsys.path.append(\"..\")\nfrom models import quant_module_1d as qm\n\n__all__ = ['TempoNetfloat']\n\n\ndef TempoNetfloat(**kwargs):\n return TEMPONet(**kwargs)\n\n\nclass TEMPONet(BaseModel):\n \"\"\"\n TEMPONet architecture:\n Three repeated instances of TemporalConvBlock and ConvBlock organized as follows:\n - TemporalConvBlock\n - ConvBlock\n Two instances of Regressor followed by a final Linear layer with a single neuron.\n \"\"\"\n\n def __init__(self, dataset_name='PPG_Dalia', dataset_args={}):\n super(TEMPONet, self).__init__()\n\n self.dil = [\n 2, 2, 1,\n 4, 4,\n 8, 8\n ]\n self.rf = [\n 5, 5, 5,\n 9, 9,\n 17, 17\n ]\n self.ch = [\n 32, 32, 64,\n 64, 64, 128,\n 128, 128, 128,\n 256, 128\n ]\n\n # 1st instance of two TempConvBlocks and ConvBlock\n k_tcb00 = ceil(self.rf[0] / self.dil[0])\n self.tcb00 = TempConvBlock(\n ch_in=4,\n ch_out=self.ch[0],\n k_size=k_tcb00,\n dil=self.dil[0],\n pad=((k_tcb00 - 1) * self.dil[0] + 1) // 2\n )\n k_tcb01 = ceil(self.rf[1] / self.dil[1])\n self.tcb01 = TempConvBlock(\n ch_in=self.ch[0],\n ch_out=self.ch[1],\n k_size=k_tcb01,\n dil=self.dil[1],\n pad=((k_tcb01 - 1) * self.dil[1] + 1) // 2\n )\n k_cb0 = ceil(self.rf[2] / self.dil[2])\n self.cb0 = ConvBlock(\n ch_in=self.ch[1],\n ch_out=self.ch[2],\n k_size=k_cb0,\n strd=1,\n pad=((k_cb0 - 1) * self.dil[2] + 1) // 2,\n dilation=self.dil[2]\n )\n\n # 2nd instance of two TempConvBlocks and ConvBlock\n k_tcb10 = ceil(self.rf[3] / self.dil[3])\n self.tcb10 = TempConvBlock(\n ch_in=self.ch[2],\n ch_out=self.ch[3],\n k_size=k_tcb10,\n dil=self.dil[3],\n pad=((k_tcb10 - 1) * self.dil[3] + 1) // 2\n )\n k_tcb11 = ceil(self.rf[4] / self.dil[4])\n self.tcb11 = TempConvBlock(\n ch_in=self.ch[3],\n ch_out=self.ch[4],\n k_size=k_tcb11,\n dil=self.dil[4],\n pad=((k_tcb11 - 1) * self.dil[4] + 1) // 2\n )\n self.cb1 = ConvBlock(\n ch_in=self.ch[4],\n ch_out=self.ch[5],\n k_size=5,\n strd=2,\n pad=2\n )\n\n # 3td instance of TempConvBlock and ConvBlock\n k_tcb20 = ceil(self.rf[5] / self.dil[5])\n self.tcb20 = TempConvBlock(\n ch_in=self.ch[5],\n ch_out=self.ch[6],\n k_size=k_tcb20,\n dil=self.dil[5],\n pad=((k_tcb20 - 1) * self.dil[5] + 1) // 2\n )\n k_tcb21 = ceil(self.rf[6] / self.dil[6])\n self.tcb21 = TempConvBlock(\n ch_in=self.ch[6],\n ch_out=self.ch[7],\n k_size=k_tcb21,\n dil=self.dil[6],\n pad=((k_tcb21 - 1) * self.dil[6] + 1) // 2\n )\n self.cb2 = ConvBlock(\n ch_in=self.ch[7],\n ch_out=self.ch[8],\n k_size=5,\n strd=4,\n pad=4\n )\n\n # 1st instance of regressor\n self.regr0 = Regressor(\n ft_in=self.ch[8] * 4,\n ft_out=self.ch[9]\n )\n\n # 2nd instance of regressor\n self.regr1 = Regressor(\n ft_in=self.ch[9],\n ft_out=self.ch[10]\n )\n\n self.out_neuron = nn.Linear(\n in_features=self.ch[10],\n out_features=1\n )\n\n def forward(self, x):\n x = self.cb0(\n self.tcb01(\n self.tcb00(\n x\n )\n )\n )\n x = self.cb1(\n self.tcb11(\n self.tcb10(\n x\n )\n )\n )\n x = self.cb2(\n self.tcb21(\n self.tcb20(\n x\n )\n )\n )\n\n x = x.flatten(1)\n x = self.regr0(\n x\n )\n x = self.regr1(\n x\n )\n\n x = self.out_neuron(\n x\n )\n return x\n\n\nclass TempConvBlock(BaseModel):\n \"\"\"\n Temporal Convolutional Block composed of one temporal convolutional layers.\n The block is composed of :\n - Conv1d layer\n - Chomp1d layer\n - ReLU layer\n - BatchNorm1d layer\n\n :param ch_in: Number of input channels\n :param ch_out: Number of output channels\n :param k_size: Kernel size\n :param dil: Amount of dilation\n :param pad: Amount of padding\n \"\"\"\n\n def __init__(self, ch_in, ch_out, k_size, dil, pad):\n super(TempConvBlock, self).__init__()\n\n self.tcn0 = nn.Conv1d(\n in_channels=ch_in,\n out_channels=ch_out,\n kernel_size=k_size,\n dilation=dil,\n bias = False,\n padding=pad\n )\n self.relu0 = nn.ReLU6()\n self.bn0 = nn.BatchNorm1d(\n num_features=ch_out\n )\n\n def forward(self, x):\n x = self.relu0(self.bn0(self.tcn0(x)))\n return x\n\n\nclass ConvBlock(BaseModel):\n \"\"\"\n Convolutional Block composed of:\n - Conv1d layer\n - AvgPool1d layer\n - ReLU layer\n - BatchNorm1d layer\n\n :param ch_in: Number of input channels\n :param ch_out: Number of output channels\n :param k_size: Kernel size\n :param strd: Amount of stride\n :param pad: Amount of padding\n \"\"\"\n\n def __init__(self, ch_in, ch_out, k_size, strd, pad, dilation=1):\n super(ConvBlock, self).__init__()\n\n self.conv0 = nn.Conv1d(\n in_channels=ch_in,\n out_channels=ch_out,\n kernel_size=k_size,\n stride=strd,\n dilation=dilation,\n bias = False,\n padding=pad\n )\n self.pool0 = nn.AvgPool1d(\n kernel_size=2,\n stride=2,\n padding=0\n )\n self.relu0 = nn.ReLU6()\n self.bn0 = nn.BatchNorm1d(ch_out)\n\n def forward(self, x):\n x = self.relu0(self.bn0(self.pool0(self.conv0(x))))\n return x\n\n\nclass Regressor(BaseModel):\n \"\"\"\n Regressor block composed of :\n - Linear layer\n - ReLU layer\n - BatchNorm1d layer\n\n :param ft_in: Number of input channels\n :param ft_out: Number of output channels\n \"\"\"\n\n def __init__(self, ft_in, ft_out):\n super(Regressor, self).__init__()\n self.ft_in = ft_in\n self.ft_out = ft_out\n\n self.fc0 = nn.Linear(\n in_features=ft_in,\n out_features=ft_out,\n bias = False\n )\n\n self.relu0 = nn.ReLU6()\n self.bn0 = nn.BatchNorm1d(\n num_features=ft_out\n )\n\n def forward(self, x):\n x = self.relu0(self.bn0(self.fc0(x)))\n return x\n\n\nclass Chomp1d(BaseModel):\n \"\"\"\n Module that perform a chomping operation on the input tensor.\n It is used to chomp the amount of zero-padding added on the right of the input tensor, this operation is necessary to compute causal convolutions.\n :param chomp_size: amount of padding 0s to be removed\n \"\"\"\n\n def __init__(self, chomp_size):\n super(Chomp1d, self).__init__()\n self.chomp_size = chomp_size\n\n def forward(self, x):\n return x[:, :, :-self.chomp_size].contiguous()\n\n",
"#*----------------------------------------------------------------------------*\n#* Copyright (C) 2021 Politecnico di Torino, Italy *\n#* SPDX-License-Identifier: Apache-2.0 *\n#* *\n#* Licensed under the Apache License, Version 2.0 (the \"License\"); *\n#* you may not use this file except in compliance with the License. *\n#* You may obtain a copy of the License at *\n#* *\n#* http://www.apache.org/licenses/LICENSE-2.0 *\n#* *\n#* Unless required by applicable law or agreed to in writing, software *\n#* distributed under the License is distributed on an \"AS IS\" BASIS, *\n#* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. *\n#* See the License for the specific language governing permissions and *\n#* limitations under the License. *\n#* *\n#* Author: Alessio Burrello *\n#*----------------------------------------------------------------------------*\n\nimport torch\nfrom abc import abstractmethod\nfrom numpy import inf \nfrom logger import TensorboardWriter\nimport pdb\n\nclass BaseTrainer:\n \"\"\"\n Base class for all trainers\n \"\"\"\n def __init__(self, model, model_float, criterion, metric_ftns, optimizer, optimizer_float, arch_optimizer, finetuning, config, args_input):\n self.config = config\n self.args = args_input\n self.logger = config.get_logger('trainer', config['trainer']['verbosity'])\n \n self.model = model\n self.model_float = model_float\n self.criterion = criterion\n self.metric_ftns = metric_ftns\n self.optimizer = optimizer\n self.optimizer_float = optimizer_float\n self.arch_optimizer = arch_optimizer\n self.finetuning = finetuning\n\n cfg_trainer = config['trainer']\n self.epochs = cfg_trainer['epochs']\n self.save_period = cfg_trainer['save_period']\n self.monitor = cfg_trainer.get('monitor', 'off')\n\n # configuration to monitor model performance and save best\n if self.monitor == 'off':\n self.mnt_mode = 'off'\n self.mnt_best = 0\n else:\n self.mnt_mode, self.mnt_metric = self.monitor.split()\n assert self.mnt_mode in ['min', 'max']\n\n self.mnt_best = inf if self.mnt_mode == 'min' else -inf\n self.early_stop = cfg_trainer.get('early_stop', inf)\n if self.early_stop <= 0:\n self.early_stop = inf\n\n self.start_epoch = 1\n\n self.checkpoint_dir = config.save_dir\n\n # setup visualization writer instance\n self.writer = TensorboardWriter(config.log_dir, self.logger, cfg_trainer['tensorboard'])\n\n if config.resume is not None:\n self._resume_checkpoint(config.resume)\n\n @abstractmethod\n def _train_epoch(self, epoch, prec_float):\n \"\"\"\n Training logic for an epoch\n\n :param epoch: Current epoch number\n \"\"\"\n raise NotImplementedError\n\n def train(self):\n \"\"\"\n Full training logic\n \"\"\"\n not_improved_count = 0\n if self.finetuning == 'True':\n print(\"Float Training\")\n not_improved_count = 0\n for epoch in range(self.start_epoch, self.epochs + 1):\n result = self._train_epoch(epoch, self.finetuning)\n\n # save logged informations into log dicts\n log = {'epoch': epoch}\n log.update(result)\n # print logged information to the screen\n for key, value in log.items():\n self.logger.info('{:15s}: {}'.format(str(key), value))\n\n # evaluate model performance according to configured metric, save best checkpoint as model_best\n best = False\n if self.mnt_mode != 'off':\n try:\n # check whether the model performance improved or not, according to specified metric (mnt_metric)\n improved = (self.mnt_mode == 'min' and log[self.mnt_metric] <= self.mnt_best) or (\n self.mnt_mode == 'max' and log[self.mnt_metric] >= self.mnt_best)\n except KeyError:\n self.logger.warning(\"Warning: Metric '{}' is not found.\"\n \"Model performance monitoring is disabled\".format(self.mnt_metric))\n self.mnt_mode = 'off'\n improved = False\n\n if improved:\n self.mnt_best = log[self.mnt_metric]\n not_improved_count = 0\n best = True\n else:\n not_improved_count += 1\n if not_improved_count > self.early_stop:\n self.logger.info(\"Validation performance didn\\'t improve for {} epochs.\"\n \"Training Stops.\".format(self.early_stop))\n break\n #for name_float, param_float, name, param in zip(self.model_float.named_parameters(), self.model.named_parameters()):\n state_dict = self.model.state_dict()\n for name_float, param_float in self.model_float.named_parameters():\n for name, param in self.model.named_parameters():\n name_float_s = name_float.split('.')\n if name == name_float or name == ''.join(name_float_s[:-1])+'.linear.'+name_float_s[-1] or name == ''.join(name_float_s[:-1])+'.quantized_weight.'+name_float_s[-1]:\n state_dict[name] = param_float\n self.model.load_state_dict(state_dict)\n\n not_improved_count = 0\n print(\"Quantized Training\")\n for epoch in range(self.start_epoch, self.epochs + 1):\n result = self._train_epoch(epoch, 'False')\n\n if self.arch_optimizer != False:\n print('========= architecture =========')\n if hasattr(self.model, 'module'):\n best_arch, bitops, bita, bitw, mixbitops, mixbita, mixbitw = self.model.module.fetch_best_arch()\n else:\n best_arch, bitops, bita, bitw, mixbitops, mixbita, mixbitw = self.model.fetch_best_arch()\n print('best model with bitops: {:.3f}M, bita: {:.3f}K, bitw: {:.3f}M'.format(\n bitops, bita, bitw))\n print('expected model with bitops: {:.3f}M, bita: {:.3f}K, bitw: {:.3f}M'.format(\n mixbitops, mixbita, mixbitw))\n for key, value in best_arch.items():\n print('{}: {}'.format(key, value))\n\n # save logged informations into log dicts\n log = {'epoch' : epoch}\n log.update(result)\n\n # print logged information to the screen\n for key, value in log.items():\n self.logger.info('{:15s}: {}'.format(str(key), value))\n\n # evaluate model performance according to configured metric, save best checkpoint as model_best\n best = False\n if self.mnt_mode != 'off':\n try:\n # check whether the model performance improved or not, according to specified metric (mnt_metric)\n improved = (self.mnt_mode == 'min' and log[self.mnt_metric] <= self.mnt_best) or (self.mnt_mode == 'max' and log[self.mnt_metric] >= self.mnt_best)\n except KeyError:\n self.logger.warning(\"Warning: Metric '{}' is not found.\"\n \"Model performance monitoring is disabled\".format(self.mnt_metric))\n self.mnt_mode = 'off'\n improved = False\n\n if improved:\n self.mnt_best = log[self.mnt_metric]\n not_improved_count = 0\n best = True\n else:\n not_improved_count += 1\n if not_improved_count > self.early_stop:\n self.logger.info(\"Validation performance didn\\'t improve for {} epochs.\"\n \"Training Stops.\".format(self.early_stop))\n if self.arch_optimizer != False:\n import json\n complexity = str(int(self.args.complexity_decay*1000000))\n a_file = open(\"mix_archs/architecture_\"+self.args.arch+complexity+\".json\", \"w\")\n best_arch['best_weight'] = [array_weights.astype('int').tolist() for array_weights in best_arch['best_weight']]\n import numpy as np\n best_arch['best_activ'] = np.asarray(best_arch['best_activ']).astype('int').tolist()\n json.dump(best_arch, a_file)\n break\n\n if epoch % self.save_period == 0 or best==True:\n self._save_checkpoint(epoch, save_best=best)\n \n def _save_checkpoint(self, epoch, save_best=False):\n \"\"\"\n Saving checkpoints\n\n :param epoch: current epoch number\n :param save_best: if True, rename the saved checkpoint to 'model_best.pth'\n \"\"\"\n arch = type(self.model).__name__\n if self.arch_optimizer != False:\n state = {\n 'arch': arch,\n 'epoch': epoch,\n 'state_dict': self.model.state_dict(),\n 'optimizer': self.optimizer.state_dict(),\n 'arch_optimizer': self.arch_optimizer.state_dict(),\n 'monitor_best': self.mnt_best,\n 'config': self.config\n }\n else:\n state = {\n 'arch': arch,\n 'epoch': epoch,\n 'state_dict': self.model.state_dict(),\n 'optimizer': self.optimizer.state_dict(),\n 'monitor_best': self.mnt_best,\n 'config': self.config\n }\n filename = str(self.checkpoint_dir / 'checkpoint-epoch{}.pth'.format(epoch))\n torch.save(state, filename)\n self.logger.info(\"Saving checkpoint: {} ...\".format(filename))\n if save_best:\n best_path = str(self.checkpoint_dir / 'model_best.pth')\n torch.save(state, best_path)\n self.logger.info(\"Saving current best: model_best.pth ...\")\n \n def _resume_checkpoint(self, resume_path):\n \"\"\"\n Resume from saved checkpoints\n\n :param resume_path: Checkpoint path to be resumed\n \"\"\"\n resume_path = str(resume_path)\n self.logger.info(\"Loading checkpoint: {} ...\".format(resume_path))\n checkpoint = torch.load(resume_path)\n self.start_epoch = checkpoint['epoch'] + 1\n self.mnt_best = checkpoint['monitor_best']\n\n # load architecture params from checkpoint\n if checkpoint['config']['arch'] != self.config['arch']:\n self.logger.warning(\"Warning: Optimizer type given in config file is different from that of checkpoint. \"\n \"Optimizer parameters not being resumed.\")\n else:\n self.optimizer.load_state_dict(checkpoint['optimizer'])\n self.arch_optimizer.load_state_dict(checkpoint['arch_optimizer'])\n\n self.logger.info(\"Checkpoint loaded. Resume training from epoch {}\".format(self.start_epoch))\n\n"
] | [
[
"torch.nn.BatchNorm1d",
"torch.nn.ReLU6",
"torch.nn.Linear",
"torch.nn.Conv1d",
"torch.nn.AvgPool1d"
],
[
"numpy.asarray",
"torch.load",
"torch.save"
]
] |
ai-systems/crossmodal_embedding | [
"5c61775531fd350c48a965450ab5e99b28deec5e"
] | [
"crossmodal_embedding/tasks/crossmodal/training_star_task.py"
] | [
"from prefect import Task\nfrom loguru import logger\nfrom tqdm import tqdm\nfrom crossmodal_embedding.models import CrossModalEmbedding, SiameseNet\nfrom crossmodal_embedding.models import InputData, InputDataTest\nfrom sklearn.metrics import precision_recall_fscore_support, f1_score\nimport torch.optim as optim\nimport torch.nn as nn\nimport torch\nimport torch.nn as nn\nfrom crossmodal_embedding.util.evaluation import (\n compute_map_basic,\n compute_map_with_unification,\n)\nfrom torch.utils.data import WeightedRandomSampler\nimport sys\nimport json\n\nfrom torch.utils.tensorboard import SummaryWriter\n\n\nclass TrainingTaskStar(Task):\n def create_weights(self, df):\n positives = 0\n negatives = 0\n weights = list()\n for index, row in df.iterrows():\n if row[\"score\"] == 0:\n negatives = negatives + 1\n else:\n positives = positives + 1\n\n weight_positive = 1.0 / float(positives)\n weight_negative = 1.0 / float(negatives)\n\n for index, row in df.iterrows():\n if row[\"score\"] == 0:\n weights.append(weight_negative)\n else:\n weights.append(weight_positive)\n return torch.tensor(weights)\n\n def run(\n self,\n train,\n test,\n dev,\n num_negatives,\n output_log,\n output_model,\n vocab_size,\n batch_size=10,\n num_epochs=5,\n learning_rate=0.0001,\n max_sequence_len=100,\n hidden_size=10,\n out_embedding=128,\n attention_heads=5,\n word_embedding=50,\n decay=0.01,\n ):\n\n logger.info(f\" Negative Examples: {num_negatives}\")\n logger.info(\"Let's train the Cross-Modal Embedding ! (^・ω・^ )\")\n # Device configuration\n device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n # Check for multi_GPUS\n multiple_gpus = 0\n\n train_class_weight = self.create_weights(train)\n\n train_dataset = InputData(train)\n logger.info(f\"TRAIN: {len(train_dataset)}\")\n dev_dataset = InputData(dev)\n logger.info(f\"DEV: {len(dev_dataset)}\")\n test_dataset = InputDataTest(test, vocab_size)\n logger.info(f\"TEST: {len(test_dataset)}\")\n sampler_train = WeightedRandomSampler(\n train_class_weight, len(train_class_weight)\n )\n\n # Data loader\n\n train_loader = torch.utils.data.DataLoader(\n dataset=train_dataset, batch_size=batch_size, sampler=sampler_train,\n )\n\n dev_loader = torch.utils.data.DataLoader(\n dataset=dev_dataset, batch_size=batch_size, shuffle=False\n )\n\n test_loader = torch.utils.data.DataLoader(\n dataset=test_dataset, batch_size=batch_size, shuffle=False\n )\n\n model = SiameseNet(\n out_embedding,\n batch_size,\n vocab_size,\n max_len=max_sequence_len,\n hidden_size=hidden_size,\n out_embedding=out_embedding,\n device=device,\n attention_heads=attention_heads,\n word_embedding=word_embedding,\n )\n\n if torch.cuda.device_count() > 1:\n logger.info(\n f\"**********Let's use {torch.cuda.device_count()} GPUs!********\"\n )\n multiple_gpus = 1\n model = nn.DataParallel(model)\n else:\n logger.info(\"********* Only one GPU *******\")\n\n model = model.to(device)\n\n # Loss and optimizer\n criterion = nn.NLLLoss()\n\n optimizer = torch.optim.AdamW(\n model.parameters(), lr=learning_rate, weight_decay=decay\n )\n\n scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(\n optimizer, \"min\", verbose=True, patience=1, cooldown=3\n )\n\n # Train the model\n best_value = 0\n all_best = dict()\n result_dict = dict()\n total_step = len(train_loader)\n for epoch in tqdm(range(num_epochs), desc=f\"Epoch\"):\n epoch_loss = 0.0\n running_loss = 0.0\n\n model.train()\n t = tqdm(iter(train_loader), leave=False, total=len(train_loader))\n for (\n i,\n (statement1, st1_mask, st1_len, statement2, st2_mask, st2_len, score),\n ) in enumerate(t):\n\n # Move tensors to the configured device\n statement1 = statement1.to(device)\n st1_mask = st1_mask.to(device)\n st1_len = st1_len.to(device)\n statement2 = statement2.to(device)\n st2_mask = st2_mask.to(device)\n st2_len = st2_len.to(device)\n\n score = score.to(device)\n optimizer.zero_grad()\n sim = model(\n statement1, st1_mask, st1_len, statement2, st2_mask, st2_len\n )\n\n loss = criterion(sim, score)\n\n loss.backward()\n optimizer.step()\n\n epoch_loss += loss.item()\n\n # print statistics\n running_loss += loss.item()\n if i % 10 == 0: \n t.set_description(\"loss: {:.4f}\".format(running_loss / 10))\n running_loss = 0\n\n logger.info(\n f\"********Epoch: {epoch+1} *****Loss: {epoch_loss / len(train_loader)}\"\n )\n result_dict[epoch] = dict()\n result_dict[epoch][\"train_loss\"] = epoch_loss / len(train_loader)\n\n scheduler.step(epoch_loss / len(train_loader))\n if (epoch + 1) % 1 == 0:\n model.eval()\n with torch.no_grad():\n\n logger.info(\"Evaluating on Train set!\")\n t = tqdm(iter(train_loader), leave=False, total=len(train_loader))\n y_pred_list = []\n y_real_list = []\n for (\n i,\n (\n statement1,\n st1_mask,\n st1_len,\n statement2,\n st2_mask,\n st2_len,\n score,\n ),\n ) in enumerate(t):\n\n # Move tensors to the configured device\n statement1 = statement1.to(device)\n st1_mask = st1_mask.to(device)\n st1_len = st1_len.to(device)\n statement2 = statement2.to(device)\n st2_mask = st2_mask.to(device)\n st2_len = st2_len.to(device)\n\n y_real_list.extend(score.cpu().tolist())\n score = score.to(device)\n\n sim = model(\n statement1, st1_mask, st1_len, statement2, st2_mask, st2_len\n )\n y_dev_pred = torch.argmax(sim, dim=1)\n # y_dev_pred = torch.argmax(sim, dim=1)\n y_pred_list.extend(y_dev_pred.cpu().tolist())\n\n f1_value = f1_score(y_real_list, y_pred_list)\n (precision, recall, _, _,) = precision_recall_fscore_support(\n y_real_list, y_pred_list, average=\"binary\"\n )\n # logger.info(\"**** TRAINING SET **** \")\n # logger.info(f\"F1-value: {f1_value}\")\n # logger.info(f\"Precision: {precision}\")\n # logger.info(f\"Recall: {recall}\")\n\n logger.info(\"Evaluating on Dev set!\")\n\n t = tqdm(iter(dev_loader), leave=False, total=len(dev_loader))\n y_pred_list = []\n y_real_list = []\n epoch_test_loss = 0.0\n for (\n i,\n (\n statement1,\n st1_mask,\n st1_len,\n statement2,\n st2_mask,\n st2_len,\n score,\n ),\n ) in enumerate(t):\n\n statement1 = statement1.to(device)\n st1_mask = st1_mask.to(device)\n st1_len = st1_len.to(device)\n statement2 = statement2.to(device)\n st2_mask = st2_mask.to(device)\n st2_len = st2_len.to(device)\n\n y_real_list.extend(score.cpu().tolist())\n score = score.to(device)\n\n sim = model(\n statement1, st1_mask, st2_len, statement2, st2_mask, st2_len\n )\n loss_test = criterion(sim, score)\n epoch_test_loss += loss_test.item()\n y_dev_pred = torch.argmax(sim, dim=1)\n y_pred_list.extend(y_dev_pred.cpu().tolist())\n\n logger.info(f\"DEV LOSS: {epoch_test_loss / len(dev_loader)}\")\n # scheduler.step(epoch_test_loss / len(dev_loader))\n f1_value = f1_score(y_real_list, y_pred_list)\n (precision, recall, _, _,) = precision_recall_fscore_support(\n y_real_list, y_pred_list, average=\"binary\"\n )\n # logger.info(\"**** DEV SET **** \")\n # logger.info(f\"F1-value: {f1_value}\")\n # logger.info(f\"Precision: {precision.tolist()}\")\n # logger.info(f\"Recall: {recall.tolist()}\")\n result_dict[epoch][\"f1\"] = f1_value\n result_dict[epoch][\"precision\"] = precision.tolist()\n result_dict[epoch][\"recall\"] = recall.tolist()\n\n if f1_value > best_value:\n best_value = f1_value\n model = model.to(\"cpu\")\n if multiple_gpus:\n torch.save(\n model.module.state_dict(), f\"./models/{output_model}\",\n )\n else:\n torch.save(\n model.state_dict(), f\"./models/{output_model}\",\n )\n\n all_best[\"f1\"] = f1_value\n all_best[\"precision\"] = precision.tolist()\n all_best[\"recall\"] = recall.tolist()\n model = model.to(device)\n best_model = model\n\n with torch.no_grad():\n best_model.eval()\n logger.info(\"Evaluating on Test set!\")\n all_embeddings = dict()\n t = tqdm(iter(test_loader), leave=False, total=len(test_loader))\n y_pred_list = []\n y_real_list = []\n for (\n i,\n (statement1, st1_mask, st1_len, statement2, st2_mask, st2_len, score),\n ) in enumerate(t):\n\n # Move tensors to the configured device\n statement1 = statement1.to(device)\n st1_mask = st1_mask.to(device)\n st1_len = st1_len.to(device)\n statement2 = statement2.to(device)\n st2_mask = st2_mask.to(device)\n st2_len = st2_len.to(device)\n\n y_real_list.extend(score.cpu().tolist())\n score = score.to(device)\n\n sim = best_model(\n statement1, st1_mask, st1_len, statement2, st2_mask, st2_len\n )\n # y_dev_pred = torch.round(sim)\n y_dev_pred = torch.argmax(sim, dim=1)\n y_pred_list.extend(y_dev_pred.cpu().tolist())\n\n f1_value = f1_score(y_real_list, y_pred_list)\n (precision, recall, _, _,) = precision_recall_fscore_support(\n y_real_list, y_pred_list, average=\"binary\"\n )\n\n logger.info(\"****** PARAMETERS ********\")\n logger.info(f\"Num negatives: {num_negatives}\")\n logger.info(f\"Batch_size: {batch_size}\")\n logger.info(f\"Max len: {max_sequence_len}\")\n logger.info(f\"Word embedding: {word_embedding}\")\n logger.info(f\"Out embedding: {out_embedding}\")\n logger.info(f\"Hidden Size: {hidden_size}\")\n logger.info(f\"Decay: {decay}\")\n logger.info(f\"ATT heads: {attention_heads}\")\n logger.info(f\"Learning rate: {learning_rate}\")\n logger.info(\"****** BEST RESULTS TEST******\")\n logger.info(f\"F1 SCORE {f1_value}\")\n logger.info(f\"PRECISION: {precision}\")\n logger.info(f\"RECALL: {recall}\")\n all_best[\"f1_test\"] = f1_value\n all_best[\"precision_test\"] = precision.tolist()\n all_best[\"recall_test\"] = recall.tolist()\n\n logger.info(\"******** BEST RESULTS DEV **********\")\n logger.info(all_best)\n\n with open(f\"./logs/{output_log}\", \"w\") as f:\n json.dump(result_dict, f)\n with open(f\"./logs/best_{output_log}\", \"w\") as f:\n json.dump(result_dict, f)\n"
] | [
[
"torch.nn.NLLLoss",
"torch.optim.lr_scheduler.ReduceLROnPlateau",
"torch.utils.data.DataLoader",
"torch.tensor",
"sklearn.metrics.precision_recall_fscore_support",
"torch.nn.DataParallel",
"torch.no_grad",
"torch.cuda.is_available",
"sklearn.metrics.f1_score",
"torch.cuda.device_count",
"torch.argmax"
]
] |
YetheYe/Mask_RCNN | [
"6895c617af13ecbf0bb27790e29a6271725cb34f"
] | [
"config.py"
] | [
"\"\"\"\nMask R-CNN\nBase Configurations class.\n\nCopyright (c) 2017 Matterport, Inc.\nLicensed under the MIT License (see LICENSE for details)\nWritten by Waleed Abdulla\n\"\"\"\n\nimport math\nimport numpy as np\n\n\n# Base Configuration Class\n# Don't use this class directly. Instead, sub-class it and override\n# the configurations you need to change.\n\nclass Config(object):\n \"\"\"Base configuration class. For custom configurations, create a\n sub-class that inherits from this one and override properties\n that need to be changed.\n \"\"\"\n # Name the configurations. For example, 'COCO', 'Experiment 3', ...etc.\n # Useful if your code needs to do things differently depending on which\n # experiment is running.\n NAME = None # Override in sub-classes\n\n # NUMBER OF GPUs to use. For CPU training, use 1\n GPU_COUNT = 1\n\n # Number of images to train with on each GPU. A 12GB GPU can typically\n # handle 2 images of 1024x1024px.\n # Adjust based on your GPU memory and image sizes. Use the highest\n # number that your GPU can handle for best performance.\n IMAGES_PER_GPU = 2\n\n # Number of training steps per epoch\n # This doesn't need to match the size of the training set. Tensorboard\n # updates are saved at the end of each epoch, so setting this to a\n # smaller number means getting more frequent TensorBoard updates.\n # Validation stats are also calculated at each epoch end and they\n # might take a while, so don't set this too small to avoid spending\n # a lot of time on validation stats.\n STEPS_PER_EPOCH = 1000\n\n # Number of validation steps to run at the end of every training epoch.\n # A bigger number improves accuracy of validation stats, but slows\n # down the training.\n VALIDATION_STEPS = 50\n\n # Backbone network architecture\n # Supported values are: resnet50, resnet101\n BACKBONE = \"resnet101\"\n\n # The strides of each layer of the FPN Pyramid. These values\n # are based on a Resnet101 backbone.\n BACKBONE_STRIDES = [4, 8, 16, 32, 64]\n\n # Number of classification classes (including background)\n NUM_CLASSES = 1 # Override in sub-classes\n\n # Length of square anchor side in pixels\n RPN_ANCHOR_SCALES = (128, 256, 512)\n\n # Ratios of anchors at each cell (width/height)\n # A value of 1 represents a square anchor, and 0.5 is a wide anchor\n RPN_ANCHOR_RATIOS = [0.5, 1, 2]\n\n # Anchor stride\n # If 1 then anchors are created for each cell in the backbone feature map.\n # If 2, then anchors are created for every other cell, and so on.\n RPN_ANCHOR_STRIDE = 1\n\n # Non-max suppression threshold to filter RPN proposals.\n # You can increase this during training to generate more propsals.\n RPN_NMS_THRESHOLD = 0.7\n\n # How many anchors per image to use for RPN training\n RPN_TRAIN_ANCHORS_PER_IMAGE = 256\n\n # ROIs kept after non-maximum supression (training and inference)\n POST_NMS_ROIS_TRAINING = 2000\n POST_NMS_ROIS_INFERENCE = 1000\n\n # If enabled, resizes instance masks to a smaller size to reduce\n # memory load. Recommended when using high-resolution images.\n USE_MINI_MASK = True\n MINI_MASK_SHAPE = (56, 56) # (height, width) of the mini-mask\n\n # Input image resizing\n # Images are resized such that the small side is IMAGE_MIN_DIM and\n # the long side is <= IMAGE_MAX_DIM. If both conditions can't be\n # satisfied at the same time then IMAGE_MAX_DIM is enforced.\n # Resizing modes:\n # none: No resizing\n # square: Pad with zeros to make it a square (MAX_DIM, MAX_DIM)\n # TODO: currently, only 'square' mode is supported\n IMAGE_RESIZE_MODE = \"square\"\n IMAGE_MIN_DIM = 800\n IMAGE_MAX_DIM = 1024\n\n # Image mean (RGB)\n MEAN_PIXEL = np.array([123.7, 116.8, 103.9])\n\n # Number of ROIs per image to feed to classifier/mask heads\n # The Mask RCNN paper uses 512 but often the RPN doesn't generate\n # enough positive proposals to fill this and keep a positive:negative\n # ratio of 1:3. You can increase the number of proposals by adjusting\n # the RPN NMS threshold.\n TRAIN_ROIS_PER_IMAGE = 200\n\n # Percent of positive ROIs used to train classifier/mask heads\n ROI_POSITIVE_RATIO = 0.33\n\n # Pooled ROIs\n POOL_SIZE = 7\n MASK_POOL_SIZE = 14\n MASK_SHAPE = [28, 28]\n\n # Maximum number of ground truth instances to use in one image\n MAX_GT_INSTANCES = 100\n\n # Bounding box refinement standard deviation for RPN and final detections.\n RPN_BBOX_STD_DEV = np.array([0.1, 0.1, 0.2, 0.2])\n BBOX_STD_DEV = np.array([0.1, 0.1, 0.2, 0.2])\n\n # Max number of final detections\n DETECTION_MAX_INSTANCES = 100\n\n # Minimum probability value to accept a detected instance\n # ROIs below this threshold are skipped\n DETECTION_MIN_CONFIDENCE = 0.5\n\n # Non-maximum suppression threshold for detection\n DETECTION_NMS_THRESHOLD = 0.3\n\n # Learning rate and momentum\n # The Mask RCNN paper uses lr=0.02, but on TensorFlow it causes\n # weights to explode. Likely due to differences in optimzer\n # implementation.\n LEARNING_RATE = 0.001\n LEARNING_MOMENTUM = 0.9\n\n # Weight decay regularization\n WEIGHT_DECAY = 0.0001\n\n # Use RPN ROIs or externally generated ROIs for training\n # Keep this True for most situations. Set to False if you want to train\n # the head branches on ROI generated by code rather than the ROIs from\n # the RPN. For example, to debug the classifier head without having to\n # train the RPN.\n USE_RPN_ROIS = True\n\n # Train or freeze batch normalization layers\n # None: Train BN layers. This is the normal mode\n # False: Freeze BN layers. Good when using a small batch size\n # True: (don't use). Set layer in training mode even when inferencing\n TRAIN_BN = False # Defaulting to False since batch size is often small\n\n # Gradient norm clipping\n GRADIENT_CLIP_NORM = 5.0\n\n def __init__(self):\n \"\"\"Set values of computed attributes.\"\"\"\n # Effective batch size\n self.BATCH_SIZE = self.IMAGES_PER_GPU * self.GPU_COUNT\n\n # Input image size\n if self.IMAGE_RESIZE_MODE == \"crop\":\n self.IMAGE_SHAPE = np.array([self.IMAGE_MIN_DIM, self.IMAGE_MIN_DIM, 3])\n else:\n self.IMAGE_SHAPE = np.array([self.IMAGE_MAX_DIM, self.IMAGE_MAX_DIM, 3])\n\n # Image meta data length\n # See compose_image_meta() for details\n self.IMAGE_META_SIZE = 1 + 3 + 3 + 4 + 1 + self.NUM_CLASSES\n\n def display(self):\n \"\"\"Display Configuration values.\"\"\"\n print(\"\\nConfigurations:\")\n for a in dir(self):\n if not a.startswith(\"__\") and not callable(getattr(self, a)):\n print(\"{:30} {}\".format(a, getattr(self, a)))\n print(\"\\n\")\n"
] | [
[
"numpy.array"
]
] |
ogrenenmakine/VCL-PL-Semi-Supervised-Learning-from-Noisy-Web-Data-with-Variational-Contrastive-Learning | [
"baef25837ce7e073d03f69a095d1992aa18dd2d5",
"baef25837ce7e073d03f69a095d1992aa18dd2d5"
] | [
"recognition/alexnet_PD_finetuning.py",
"data/custom_dataset.py"
] | [
"#!/usr/bin/env python\n# coding: utf-8\n\n# In[ ]:\n\nimport math\nfrom torch import nn\nfrom torch.autograd import Variable\nimport torch\nimport torch.nn.functional as F\nimport torchvision\nimport torch.utils.data as data\nimport torchvision.transforms as transforms\nimport torchvision.utils as vutils\nimport numpy as np\nfrom PIL import Image\nimport os\nimport matplotlib.pyplot as plt\nimport time\nfrom torchsummary import summary\nimport config\nfrom facenet_pytorch import training\nfrom torch.utils.data import DataLoader, SubsetRandomSampler\nfrom torch import optim\nfrom torch.optim.lr_scheduler import MultiStepLR\nfrom torch.utils.tensorboard import SummaryWriter\nfrom torchvision import datasets, transforms\nfrom PIL import Image\nimport glob\nfrom utils.collate import collate_custom\nimport torchvision.models as models\nfrom util import AverageMeter, learning_rate_decay, Logger\nimport collections\n\n# In[ ]:\n\ntransform = transforms.Compose([\n transforms.RandomHorizontalFlip(),\n transforms.RandomApply([\n transforms.ColorJitter(brightness=0.4, contrast=0.4, saturation=0.4, hue=0.1)\n ], p=0.8),\n transforms.RandomGrayscale(0.2),\n transforms.ToTensor(),\n transforms.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5])\n ])\n\n\n# Root directory for dataset\ndata_root = \"/home/mehmetyavuz/datasets/CelebA128/\"\nattr_root = \"/home/mehmetyavuz/datasets/list_attr_celeba.txt\"\n# Number of workers for dataloader\nworkers = 8\n\n# Batch size during training\nbatch_size = 64\n\n# Spatial size of training images. All images will be resized to this\n# size using a transformer.\nimage_size = (128,128)\nepochs = 100\n\n\n# In[ ]:\n\n\nclass CelebA(data.Dataset):\n def __init__(self, data_path, attr_path, image_size, mode, selected_attrs):\n super(CelebA, self).__init__()\n self.data_path = data_path\n att_list = open(attr_path, 'r', encoding='utf-8').readlines()[1].split()\n atts = [att_list.index(att) + 1 for att in selected_attrs]\n images = np.loadtxt(attr_path, skiprows=2, usecols=[0], dtype=np.str)\n labels = np.loadtxt(attr_path, skiprows=2, usecols=atts, dtype=np.int)\n \n self.tf = transforms.Compose([\n transforms.ToTensor(),\n transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)),\n ])\n self.tf_a = transforms.Compose([\n transforms.RandomHorizontalFlip(),\n transforms.RandomApply([\n transforms.ColorJitter(hue=.05, saturation=.05),\n ], p=0.8),\n transforms.RandomGrayscale(0.2),\n ]) \n if mode == 'train':\n self.images = images[:1627]\n self.labels = labels[:1627]\n\n if mode == 'valid':\n self.images = images[162770:182637]\n self.labels = labels[162770:182637]\n\n if mode == 'test':\n self.images = images[182637:]\n self.labels = labels[182637:]\n \n self.length = len(self.images)\n def __getitem__(self, index):\n if index < 16277:\n img = self.tf(self.tf_a(Image.open(os.path.join(self.data_path, self.images[index]))))\n else:\n img = self.tf(Image.open(os.path.join(self.data_path, self.images[index])))\n att = torch.tensor((self.labels[index] + 1) // 2)\n return img, att.to(torch.float32)\n def __len__(self):\n return self.length\n\n\n# In[ ]:\n\n\nattrs_default = [\"5_o_Clock_Shadow\", \"Arched_Eyebrows\", \"Attractive\", \"Bags_Under_Eyes\", \"Bald\", \"Bangs\", \"Big_Lips\", \"Big_Nose\", \"Black_Hair\", \"Blond_Hair\", \"Blurry\", \"Brown_Hair\", \"Bushy_Eyebrows\", \"Chubby\", \"Double_Chin\", \"Eyeglasses\", \"Goatee\", \"Gray_Hair\", \"Heavy_Makeup\", \"High_Cheekbones\", \"Male\", \"Mouth_Slightly_Open\", \"Mustache\", \"Narrow_Eyes\", \"No_Beard\", \"Oval_Face\", \"Pale_Skin\", \"Pointy_Nose\", \"Receding_Hairline\", \"Rosy_Cheeks\", \"Sideburns\", \"Smiling\", \"Straight_Hair\", \"Wavy_Hair\", \"Wearing_Earrings\", \"Wearing_Hat\", \"Wearing_Lipstick\", \"Wearing_Necklace\", \"Wearing_Necktie\", \"Young\"]\n\n\n# In[ ]:\n\n\ndataset = CelebA(data_root, attr_root, image_size, 'train', attrs_default)\ntrain_loader = torch.utils.data.DataLoader(dataset, num_workers=workers, \n batch_size=batch_size, pin_memory=True, collate_fn=collate_custom,\n drop_last=True, shuffle=True)\ndataset = CelebA(data_root, attr_root, image_size, 'valid', attrs_default)\nval_loader = torch.utils.data.DataLoader(dataset,\n batch_size=batch_size,\n shuffle=False,\n num_workers=workers)\ndataset = CelebA(data_root, attr_root, image_size, 'test', attrs_default)\ntest_loader = torch.utils.data.DataLoader(dataset,\n batch_size=batch_size,\n shuffle=False,\n num_workers=workers)\n\n\n# In[ ]:\n\n\n# Decide which device we want to run on\ndevice = torch.device(\"cuda:0\")\n\n\n# In[ ]:\n\nresnet = models.__dict__['alexnet'](pretrained=True)\nresnet.classifier[6] = nn.Linear(4096,40,bias=True)\nresnet = torch.nn.DataParallel(resnet)\nresnet.cuda()\n\nresnet.load_state_dict(torch.load('alexnet_pseudolabeling_001_0_normal.pth'))\n\n\n# In[ ]:\n\n\noptimizer = optim.Adam(resnet.parameters(), lr=0.00001)\nscheduler = None\n\n\n# In[ ]:\n\n\nloss_fn = torch.nn.BCEWithLogitsLoss()\nmetrics = {\n 'acc': training.accuracy_ml\n} \n\n\n# In[ ]:\n\n\nprint('\\n\\nInitial')\nprint('-' * 10)\n\nval_loss = 1\nfor epoch in range(epochs):\n print('\\nEpoch {}/{}'.format(epoch + 1, epochs))\n print('-' * 10)\n\n resnet.train() \n training.pass_epoch(\n resnet, loss_fn, train_loader, optimizer, scheduler,\n batch_metrics=metrics, show_running=True, device=device,\n #writer=writer\n )\n \n #if epoch + 1 >= 30:\n resnet.eval()\n val_metrics = training.pass_epoch(\n resnet, loss_fn, val_loader,\n batch_metrics=metrics, show_running=True, device=device,\n #writer=writer\n )\n\n if val_metrics[0].item() < val_loss:\n val_loss = val_metrics[0].item()\n print('Test set Accuracy Lowest Validation Loss:')\n training.pass_epoch(\n resnet, loss_fn, test_loader,\n batch_metrics=metrics, show_running=True, device=device,\n #writer=writer\n )\n torch.save(resnet.state_dict(), \"alexnet_PD_001_0_normal.pth\")\n\n#writer.close()\n\n\n# In[ ]:\n\n\n\n\n\n# In[ ]:\n\n\n\n\n\n# In[ ]:\n\n\n\n\n",
"\"\"\"\nAuthor: Wouter Van Gansbeke, Simon Vandenhende\nLicensed under the CC BY-NC 4.0 license (https://creativecommons.org/licenses/by-nc/4.0/)\n\"\"\"\nimport numpy as np\nimport torch\nfrom torch.utils.data import Dataset\n \nclass AugmentedDataset(Dataset):\n def __init__(self, dataset, transform):\n super(AugmentedDataset, self).__init__()\n self.dataset = dataset\n \n if isinstance(transform, dict):\n self.image_transform = transform['standard']\n try:\n self.waugmentation_transform = transform['waugment']\n except:\n self.waugmentation_transform = transform['standard']\n else:\n print('Transform is not a dictionary!')\n \n def __len__(self):\n return len(self.dataset)\n\n def __getitem__(self, index):\n sample = {}\n image, label = self.dataset.__getitem__(index)\n \n sample['image'] = self.image_transform(image)\n sample['image_waugmented'] = self.waugmentation_transform(image)\n sample['target'] = label\n\n return sample\n \nclass NeighborsDataset(Dataset):\n def __init__(self, dataset, indices, num_neighbors=None, transform=None):\n super(NeighborsDataset, self).__init__()\n \n self.anchor_transform = transform['standard']\n self.neighbor_transform = transform['augment']\n \n dataset.transform = None\n self.dataset = dataset\n self.indices = indices # Nearest neighbor indices (np.array [len(dataset) x k])\n if num_neighbors is not None:\n self.indices = self.indices[:, :num_neighbors+1]\n\n def __len__(self):\n return len(self.dataset)\n\n def __getitem__(self, index):\n output = {}\n anchor = self.dataset.__getitem__(index)\n \n neighbor_index = np.random.choice(self.indices[index], 1)[0]\n neighbor = self.dataset.__getitem__(neighbor_index)\n\n output['anchor'] = self.anchor_transform(anchor['image'])\n output['neighbor'] = self.anchor_transform(neighbor['image'])\n output['target'] = anchor['target']\n \n return output"
] | [
[
"torch.load",
"torch.utils.data.DataLoader",
"torch.tensor",
"torch.nn.Linear",
"torch.nn.BCEWithLogitsLoss",
"torch.device",
"torch.nn.DataParallel",
"numpy.loadtxt"
],
[
"numpy.random.choice"
]
] |
banboooo044/optimization | [
"a15614b367712d6046311eac311214d27999fc7c"
] | [
"module/LP.py"
] | [
"# date : 2/11/2019\n# author : takeshi\nimport pandas as pd\nimport numpy as np\nfrom IPython.display import display\n\ndef linprog(c,A,comp,b,maximize=True):\n '''\n Maximize(or Minimize) a linear objective function subject to linear equality and inequality constraints.\n\n Linear Programming is intended to solve the following problem form:\n\n Maximize: c * x\n\n Subject to: A * x [comp] b , (x >= 0)\n\n Parameters\n ----------\n c : array_like\n Coefficients of the linear objective function to be maximized.\n A : array_like\n 2-D array which, when matrix-multiplied by x, \n gives the values of constraints at x.\n comp : array_like\n 1-D array of values representing a sign of equality in each constraint (row).\n if value is -1 , it means (<=)\n if value is 0 , it means (=)\n if value is 1 , it means (=>)\n b : array_like\n 1-D array of values representing the RHS of each constraint (row).\n\n maximize : bool, optional\n If True, the linear objective function is to be maximized.\n If False, the linear objective function is to be minimized.\n (the default is True)\n \n Returns\n -------\n pandas.DataFrame\n final simplex table. \n Optimal solution is table['Values'] , and Optimal value is table['z','Values'].\n if x is (1 * n) matrix , x_i ( i >= n ) is Slack Variable.\n '''\n\n # optimize\n def optimize(table,target):\n if not __debug__:\n if target == 'w':\n print(\"Phase 1 : find initial solution\")\n else:\n if maximize:\n print(\"Phase 2 : Maximize the liner objective function\")\n else:\n print(\"Phase 2 : Minimize the liner objective function\")\n baseIndex = table.index.values\n nonBaseIndex = np.setdiff1d(np.vectorize(lambda i : 'x' + str(i))(np.arange(len(table.columns)-1)) ,baseIndex)\n for i in range(100000):\n if not __debug__:\n print(\"roop {0}\".foramt(i))\n display(table)\n nonBaseTable = table.loc[target,nonBaseIndex]\n if ((nonBaseTable < -1e-8).values.sum()) == 0:\n return table\n # 新たな基底変数\n nextIndex = (nonBaseTable.map(lambda x: -x)).idxmax(axis=1)\n # 取り替えられる基底変数\n idx = table.index.get_loc(target)\n tmpLine = (table['Value'].iloc[:idx] / table.loc[ : ,nextIndex].iloc[:idx] )\n prevIndex = str(tmpLine.map(lambda x: float('inf') if x < 0 else x ).idxmin())\n nonBaseIndex[np.where(nonBaseIndex == nextIndex)] = prevIndex\n table = table.rename(index={prevIndex : nextIndex})\n table.loc[nextIndex] /= table.at[nextIndex,nextIndex]\n pivotLine = table.loc[nextIndex]\n unPivotIndex = list(table.index.drop(nextIndex))\n table.loc[unPivotIndex] = table.loc[unPivotIndex].apply(lambda x: x - (x.at[nextIndex]*pivotLine) ,axis=1)\n\n print(\"cannot find base solutions\")\n\n if not maximize: \n c = (-c)\n n,m = A.shape\n slackVariableNum = 0\n artificialVariableNum = 0\n slackVariable = [0] * n\n artificialVariable = [0] * n\n for i in range(n):\n # bの値を全て正の値にしておく\n if b[i] < 0:\n A[i] = -A[i]\n comp[i] = -comp[i]\n b[i] = -b[i]\n # < ( -> スラック変数を導入 )\n if comp[i] == -1:\n slackVariableNum += 1\n slackVariable[i] = 1\n # = ( -> 人為変数を導入 )\n elif comp[i] == 0:\n artificialVariableNum += 1\n artificialVariable[i] = 1\n # > ( -> スラック変数,人為変数を導入 )\n else:\n slackVariableNum += 1\n artificialVariableNum += 1\n slackVariable[i] = -1\n artificialVariable[i] = 1\n\n variableNum = c.shape[0] + slackVariableNum + artificialVariableNum\n addVariableNum = slackVariableNum + artificialVariableNum\n\n # Valueを求める.\n baseIndex = np.empty(n)\n baseValue = np.empty(n)\n A_ = np.append(A , np.zeros((n,addVariableNum)),axis=1)\n slackIter = c.shape[0] \n artificialIter = c.shape[0] + slackVariableNum\n\n # (スラック変数 < 人為変数) の優先順位で基底変数に選ぶ.\n # すると , i 本目の制約条件式のみに登場する変数を選ぶことができる.\n # baseIndex[i] := i 本目の制約条件式のみに登場する変数の番号\n # baseValue[i] := i本目の制約条件式のみに登場する変数の値 ( = Value = b[i] ) となる.\n for i in range(n):\n if slackVariable[i] != 0:\n A_[i,slackIter] = slackVariable[i]\n # 1の場合\n if slackVariable[i] > 0:\n baseIndex[i],baseValue[i] = slackIter, b[i]\n slackIter += 1\n \n if artificialVariable[i] != 0:\n A_[i,artificialIter] = artificialVariable[i]\n baseIndex[i],baseValue[i] = artificialIter, b[i]\n artificialIter += 1 \n\n # フェーズ1 (Valueを見つける)\n # 目的関数の値をzとおく\n # Valueの列を追加\n exA = np.append(baseValue.reshape(n,1),A_,axis=1)\n # zの行を追加\n c_ = np.array([0]*(c.shape[0] + slackVariableNum) + [-1]*(artificialVariableNum))\n c_ = c_[np.vectorize(int)(baseIndex)]\n w = (c_ @ exA).reshape(1,variableNum+1)\n z = np.append(np.append(np.zeros(1),-c),np.array([0]*addVariableNum)).reshape(1,variableNum+1)\n table = np.append(np.append(exA,w,axis=0),z,axis=0)\n # データフレームにする\n df = pd.DataFrame(table,\n columns=['Value']+[ 'x' + str(i) for i in range(variableNum)],\n index= list(np.vectorize(lambda i: 'x' + str(int(i)))(baseIndex)) + ['w','z']\n )\n table = optimize(df,'w')\n if artificialVariableNum != 0:\n table = table.iloc[:,:-artificialVariableNum]\n variableNum -= artificialVariableNum\n table = table.drop('w')\n result = optimize(table,'z')\n if not maximize:\n result['Value']['z'] = -result['Value']['z']\n return result\n\n## Example\nif __name__ == '__main__':\n # maximize 2 * x_0 + 3 * x_1\n # constraints : \n # 1 * x_0 + 2 * x_1 <= 10\n # 2 * x_0 + 1 * x_0 <= 8\n # ( x_0 >= 0 , x_1 >= 0)\n\n c = np.array([ 2,3])\n A = np.array([ [1,2],\n [2,1] ])\n comp = np.array([-1,-1])\n b = np.array([10,8])\n\n # solve\n df = linprog(c,A,comp,b,True)\n # result\n print(df)\n "
] | [
[
"numpy.append",
"numpy.vectorize",
"numpy.array",
"numpy.zeros",
"numpy.where",
"numpy.empty"
]
] |
qftphys/Software-for-visualising-magnetic-layers | [
"7e4c5680b8e87aa677bdf4c912cbccdcb11b09a3",
"7e4c5680b8e87aa677bdf4c912cbccdcb11b09a3",
"7e4c5680b8e87aa677bdf4c912cbccdcb11b09a3"
] | [
"Widgets/openGL_widgets/VectorGLContext.py",
"multiprocessing_parse.py",
"Widgets/plot_widgets/CanvasLayer.py"
] | [
"from PyQt5.QtWidgets import QWidget\n\nfrom Widgets.openGL_widgets.AbstractGLContext import AbstractGLContext\n\nfrom ColorPolicy import ColorPolicy\n\nfrom ctypes import c_void_p\nfrom PyQt5.Qt import Qt\nfrom PyQt5.QtCore import QPoint, QThread\n\nfrom cython_modules.color_policy import multi_iteration_normalize\nfrom pattern_types.Patterns import AbstractGLContextDecorators\n\nimport numpy as np\nimport OpenGL.GLU as glu\nimport OpenGL.GL as gl\nimport math as mt\nfrom multiprocessing import Pool\nfrom ColorPolicy import ColorPolicy\n\nclass VectorGLContext(AbstractGLContext, QWidget):\n def __init__(self, data_dict):\n super().__init__()\n super().shareData(**data_dict)\n self.prerendering_calculation()\n # self.drawing_function = self.slow_arrow_draw\n self.drawing_function = self.vbo_arrow_draw\n\n def prerendering_calculation(self):\n super().prerendering_calculation()\n if self.normalize:\n VectorGLContext.normalize_specification(self.color_vectors, vbo=True)\n self.interleaved = ColorPolicy.apply_vbo_interleave_format(self.vectors_list,\n self.color_vectors)\n self.buffers = None\n ## pad the color\n self.color_vectors = ColorPolicy.apply_vbo_format(self.color_vectors, k=2)\n self.color_vertices = len(self.vectors_list)\n self.vertices = self.color_vertices*2\n self.color_buffer_len = len(self.color_vectors[0])*4\n self.inter_buffer_len = len(self.interleaved[0])*4\n\n self.__FLOAT_BYTE_SIZE__ = 8\n\n @AbstractGLContextDecorators.recording_decorator\n def slow_arrow_draw(self):\n gl.glLineWidth(2*self.scale)\n gl.glPointSize(3*self.scale)\n for vector, color in zip(self.vectors_list,\n self.color_vectors[self.i]):\n if not np.any(color):\n continue\n self.base_arrow(vector, color)\n\n def base_arrow(self, vector, color):\n gl.glColor3f(*color)\n gl.glBegin(gl.GL_LINES)\n gl.glVertex3f(*vector)\n gl.glVertex3f(vector[0]+color[0], vector[1]+color[1],\n vector[2]+color[2])\n gl.glEnd()\n gl.glBegin(gl.GL_POINTS)\n gl.glVertex3f(vector[0]+color[0], vector[1]+color[1],\n vector[2]+color[2])\n gl.glEnd()\n\n def standard_vbo_draw(self):\n gl.glEnableClientState(gl.GL_COLOR_ARRAY)\n gl.glEnableClientState(gl.GL_VERTEX_ARRAY)\n\n gl.glBindBuffer(gl.GL_ARRAY_BUFFER, self.buffers[1])\n gl.glColorPointer(3, gl.GL_FLOAT, 0, None)\n\n gl.glBindBuffer(gl.GL_ARRAY_BUFFER, self.buffers[0])\n gl.glVertexPointer(3, gl.GL_FLOAT, 0, None)\n gl.glDrawArrays(gl.GL_LINES, 0, int(self.vertices))\n\n # now the points\n gl.glBindBuffer(gl.GL_ARRAY_BUFFER, self.buffers[1])\n gl.glColorPointer(3, gl.GL_FLOAT, 3*self.__FLOAT_BYTE_SIZE__, None)\n\n # stride is 3 bytes (3 floats) VVVCCCVVVCCC etc...\n gl.glBindBuffer(gl.GL_ARRAY_BUFFER, self.buffers[0])\n # offset is at 3 indices, so points at 4th vector 3(vertices)*4\n gl.glVertexPointer(3, gl.GL_FLOAT, 3*self.__FLOAT_BYTE_SIZE__,\n c_void_p(4*3))\n gl.glDrawArrays(gl.GL_POINTS, 0, int(self.color_vertices))\n\n gl.glDisableClientState(gl.GL_COLOR_ARRAY)\n gl.glDisableClientState(gl.GL_VERTEX_ARRAY)\n\n\n def vbo_arrow_draw(self):\n if self.buffers is None:\n self.buffers = self.create_vbo()\n else:\n gl.glBindBuffer(gl.GL_ARRAY_BUFFER, self.buffers[0])\n gl.glBufferSubData(gl.GL_ARRAY_BUFFER, 0, self.inter_buffer_len,\n np.array(self.interleaved[self.i],\n dtype='float32').flatten())\n\n gl.glBindBuffer(gl.GL_ARRAY_BUFFER, self.buffers[1])\n gl.glBufferSubData(gl.GL_ARRAY_BUFFER, 0, self.color_buffer_len,\n np.array(self.color_vectors[self.i],\n dtype='float32').flatten())\n\n self.standard_vbo_draw()\n\n def create_vbo(self):\n buffers = gl.glGenBuffers(2)\n gl.glLineWidth(2*self.scale)\n gl.glPointSize(3*self.scale)\n # vertices buffer\n gl.glBindBuffer(gl.GL_ARRAY_BUFFER, buffers[0])\n gl.glBufferData(gl.GL_ARRAY_BUFFER,\n np.array(self.interleaved[self.i],\n dtype='float32').flatten(),\n gl.GL_DYNAMIC_DRAW)\n # color buffer\n gl.glBindBuffer(gl.GL_ARRAY_BUFFER, buffers[1])\n gl.glBufferData(gl.GL_ARRAY_BUFFER,\n np.array(self.color_vectors[self.i],\n dtype='float32').flatten(),\n gl.GL_DYNAMIC_DRAW)\n return buffers\n",
"import numpy as np\nimport os\nimport glob\nfrom multiprocessing import Pool\nfrom cython_modules.cython_parse import *\nfrom binaryornot.check import is_binary\nimport re\n\ndef asynchronous_pool_order(func, args, object_list, timeout=20):\n pool = Pool()\n output_list = []\n multiple_results = [pool.apply_async(func, (object_list[i], *args))\n for i in range(len(object_list))]\n for result in multiple_results:\n output_list.append(result.get(timeout=timeout))\n return output_list\n\nclass MultiprocessingParse:\n @staticmethod\n def compose_trigger_list(files, plot_data):\n \"\"\"\n \"\"\"\n # TODO: FIND A DRIVER NAMES AND IMPLEMENT THEM IF THERE ARE OTHERS\n driver_class = 'MinDriver'\n match_string = '(^.*)(Oxs_' + driver_class + \\\n '-Magnetization-)([0-9]{2})(-)(.*)(.omf)'\n regex = re.compile(match_string)\n st = []\n # probe file\n filename = files[0]\n column_name = None\n try:\n m = regex.search(os.path.basename(filename))\n if m is None:\n raise AttributeError\n column_name = driver_class +'::Iteration'\n except AttributeError:\n driver_class = 'TimeDriver'\n match_string = '(^.*)(Oxs_' + driver_class + \\\n '-Magnetization-)([0-9]{2})(-)(.*)(.omf)'\n column_name = driver_class +'::Iteration'\n regex = re.compile(match_string)\n for filename in files:\n m = regex.search(os.path.basename(filename))\n if m is not None:\n st.append(int(m.groups()[4]))\n else:\n print(filename)\n trigger_list = plot_data.index[plot_data[column_name].isin(st)]\n try:\n assert len(files) == len(trigger_list)\n except AssertionError:\n # duplicates appeared, take first and drop rest\n unique_stages = plot_data[column_name][~plot_data[column_name].duplicated(keep='first')]\n trigger_list = unique_stages.index[unique_stages.isin(st)]\n return trigger_list\n\n @staticmethod\n def guess_file_type(directory):\n supported_extensions = [('.omf', '*.odt'), ('.ovf', '.notsuppyet')]\n voted_extension = None\n files_in_directory = os.listdir(directory)\n # NOTE: decide what extension is found in directory\n # could be both .omf or .ovf but not mixed\n # omit .odt files if not really specified\n for filename in files_in_directory:\n for file_ext in supported_extensions:\n if filename.endswith(file_ext[0]):\n voted_extension = file_ext\n break\n if voted_extension is not None:\n break\n\n # loop may end and the value may still be None,\n # this means invalid directory\n # tbh I am not sure but it helps fix issue\n if voted_extension is None:\n raise ValueError(\"Invalid Directory\")\n\n print(\"SUPPORTED EXTENSION DETECTED {}\".format(voted_extension))\n files_in_directory = [os.path.join(directory, filename)\n for filename in files_in_directory\n if filename.endswith(voted_extension)]\n files_in_directory = sorted(files_in_directory)\n return files_in_directory, voted_extension\n\n @staticmethod\n def readFile(path):\n \"\"\"\n Function loads one selected file.\n :param path: path to file which user wants to load (String)\n :return: depends on filetype:\n if .odt - odt_data, stages\n if .omf || .ovf - rawVectorData, header\n \"\"\"\n if \".odt\" in path:\n odt_data, stages = getPlotData(path)\n return odt_data, stages\n\n elif \".omf\" in path or \".ovf\" in path:\n rawVectorData = None\n if is_binary(path):\n headers, rawVectorData = MultiprocessingParse.readBinary([path])\n header = headers[0]\n elif not is_binary(path):\n rawVectorData = MultiprocessingParse.readText([path])\n header = getFileHeader(path)\n else:\n raise RuntimeError(\"multiprocessing_parse.py readFile:\" +\\\n \" Can't detect encoding!\")\n return rawVectorData, header\n else:\n raise ValueError(\"Invalid file! Must have .odt, .omf \" + \\\n \"or .ovf extension!\")\n\n\n @staticmethod\n def readFolder(directory, multipleFileHeaders=False):\n \"\"\"\n dumps process-ready format from directory\n Returns raw numpy array of vectors, file_header_files and odt data for\n 2d plotting\n :param directory\n :return rawVectorData, file_headers, getPlotData\n \"\"\"\n\n files_in_directory, ext = MultiprocessingParse.guess_file_type(\n directory)\n ext_files = glob.glob(os.path.join(directory, '*' + ext[0]))\n test_file = os.path.join(directory, ext_files[0])\n\n stages = len(ext_files)\n plot_file = glob.glob(os.path.join(directory, ext[1]))\n # look for .odt or .txt in current directory\n if len(plot_file) > 1:\n raise ValueError(\"plot file extension conflict (too many)\")\n #TODO error window\n elif not plot_file or plot_file is None:\n plot_data = None\n plot_file = None\n\n # NOTE: this should recognize both .omf and .ovf files\n trigger_list = None\n if plot_file is not None:\n plot_data, stages0 = getPlotData(plot_file[0])\n print(stages0, stages)\n if stages0 != stages:\n if stages0 > stages:\n trigger_list = MultiprocessingParse.\\\n compose_trigger_list(ext_files,\n plot_data)\n stages = len(trigger_list)\n print(trigger_list)\n print(\"TRIGGER LIST : {}, {}\".format(stages,\n len(trigger_list)))\n elif stages0 < stages:\n raise ValueError(\"Odt cannot have fewer stages that files\")\n else:\n plot_data = None\n\n if not is_binary(test_file):\n rawVectorData = MultiprocessingParse.readText(files_in_directory)\n file_for_header = glob.glob(os.path.join(directory, '*' + ext[0]))\n # virtually any will do\n if not file_for_header:\n raise ValueError(\"no .omf or .ovf file has been found\")\n header = getFileHeader(file_for_header[0])\n else:\n headers, rawVectorData = MultiprocessingParse.readBinary(\n files_in_directory)\n header = headers[0]\n if not header:\n raise ValueError(\"no .omf or .ovf file has been found\")\n return rawVectorData, header, plot_data, stages, trigger_list\n\n @staticmethod\n def readBinary(files_in_directory):\n \"\"\"\n :param files_in_directory: is a list of binary filenames\n in a directory\n :return numpy array of vectors form .omf files\n \"\"\"\n text_pool = Pool()\n\n output = asynchronous_pool_order(binary_format_reader, (),\n files_in_directory)\n output = np.array(output)\n headers = output[:, 0]\n rawVectorData = output[:, 1]\n # test this solution, turn dtype object to float64\n rawVectorData = np.array([x for x in rawVectorData], dtype=np.float32)\n\n if rawVectorData is None or headers is None:\n raise TypeError(\"\\nNo vectors created\")\n\n assert rawVectorData.dtype == np.float32\n return headers, rawVectorData\n\n @staticmethod\n def readText(files_in_directory):\n \"\"\"\n :param files_in_directory: is a list of text filenames in a directory\n :return numpy array of vectors form .omf files\n \"\"\"\n # use multiprocessing\n text_pool = Pool()\n rawVectorData = []\n rawVectorData = asynchronous_pool_order(getRawVectors, (),\n files_in_directory,\n timeout=20)\n if not rawVectorData:\n raise TypeError(\"\\nNo vectors created\")\n rawVectorData = np.array(rawVectorData, dtype=np.float32)\n assert rawVectorData.dtype == np.float32\n return rawVectorData\n",
"from matplotlib import cm\nimport numpy as np\n\nfrom ColorPolicy import ColorPolicy\nfrom Widgets.plot_widgets.AbstractCanvas import AbstractCanvas\nfrom multiprocessing_parse import asynchronous_pool_order\nfrom cython_modules.color_policy import multi_iteration_normalize, \\\n multi_iteration_dot_product\n\n\nclass CanvasLayer(AbstractCanvas):\n def __init__(self, data_dict):\n super().__init__(self)\n super().shareData(**data_dict)\n super().receivedOptions()\n self.handleOptionalData()\n self.createPlotCanvas()\n\n def handleOptionalData(self):\n # must handle iterations since these are optional\n try:\n getattr(self, 'iterations')\n except NameError:\n self.iterations = 1\n finally:\n if self.iterations is None:\n self.iterations = 1\n\n def createPlotCanvas(self):\n self.xc = int(self.file_header['xnodes'])\n self.yc = int(self.file_header['ynodes'])\n self.zc = int(self.file_header['znodes'])\n\n self.title = 'Displayed layer {}'.format(self.layer)\n self.i = self.current_state\n dx, dy = self.reshape_data()\n self.fig.suptitle(self.title)\n self.plot_axis = self.fig.add_subplot(111)\n color_array = self.color_vectors[self.i].astype(float)\n self.color_vectors = self.color_vectors.reshape(self.iterations,\n self.xc*self.yc)\n\n scat = self.plot_axis.scatter(dx, dy, c=color_array, cmap=cm.jet)\n self.plot_axis.hpl = scat\n self.fig.colorbar(self.plot_axis.hpl)\n self.plot_axis.axis('scaled')\n self.plot_axis.axis([0, len(dx), 0, len(dy)])\n self.plot_axis.set_autoscale_on(False)\n self.plot_axis.set_title('{}/{}'.format(self.i, self.iterations))\n\n def replot(self):\n self.plot_axis.hpl.set_array(self.color_vectors[self.i])\n self.plot_axis.set_title('{}/{}'.format(self.i, self.iterations))\n\n def reshape_data(self):\n \"\"\"\n reshaping the data so that plotting might happen faster\n \"\"\"\n if self.normalize:\n multi_iteration_normalize(self.color_vectors)\n # dot product\n self.color_vectors = np.array(self.color_vectors)\n self.color_vectors = self.color_vectors.reshape(self.iterations,\n self.zc, self.yc,\n self.xc, 3)\n self.color_vectors = self.color_vectors[:, self.layer, :, :, :]\n self.color_vectors = self.color_vectors.reshape(self.iterations,\n self.xc*self.yc, 3)\n self.color_vectors = asynchronous_pool_order(CanvasLayer.calculate_layer_colors,\n (self.vector_set,),\n self.color_vectors)\n self.color_vectors = np.array(self.color_vectors, dtype=np.float)\n try:\n assert self.color_vectors.shape == (self.iterations, self.xc, self.yc)\n except AssertionError:\n self.color_vectors = self.color_vectors.reshape(self.iterations,\n self.xc, self.yc)\n x = np.linspace(0, self.xc, self.xc)\n y = np.linspace(0, self.yc, self.yc)\n dx, dy = np.meshgrid(x, y)\n return dx, dy\n\n @staticmethod\n def calculate_layer_colors(x, relative_vector=[0, 1, 0], scale=1):\n dot = np.array([np.inner(i, relative_vector) for i in x])\n angle = np.arccos(dot) ** scale\n angle[np.isnan(angle)] = 0 # get rid of NaN expressions\n return angle\n\n def set_i(self, value, trigger=False):\n if trigger:\n self.i += 1\n else:\n self.i = value\n self.i %= self.iterations\n self.replot()\n self.plot_axis.get_figure().canvas.draw()\n"
] | [
[
"numpy.array",
"numpy.any"
],
[
"numpy.array"
],
[
"numpy.inner",
"numpy.linspace",
"numpy.meshgrid",
"numpy.isnan",
"numpy.arccos",
"numpy.array"
]
] |
BitJetKit/universe | [
"cc9ce6ec241821bfb0f3b85dd455bd36e4ee7a8c"
] | [
"universe/rewarder/rewarder_session.py"
] | [
"from autobahn.twisted import websocket\nimport logging\nimport numpy as np\nimport threading\nimport time\n\nfrom twisted.python import failure\nfrom twisted.internet import defer, endpoints\nimport twisted.internet.error\n\nfrom universe import utils\nfrom universe.twisty import reactor\nfrom universe.rewarder import connection_timer, env_status, reward_buffer, rewarder_client\nfrom universe.utils import display\n\nlogger = logging.getLogger(__name__)\nextra_logger = logging.getLogger('universe.extra.'+__name__)\n\ndef _ping(client):\n return client.send('v0.control.ping', {}, expect_reply=True)\n\nclass RewarderSession(object):\n def __init__(self):\n self.lock = threading.RLock()\n\n self.i = 0\n\n # Mutated by main thread exclusively\n self.names_by_id = {}\n self.reward_buffers = {}\n self.env_statuses = {}\n self.errors = {}\n self.networks = {}\n\n self.clients = {}\n\n def close(self, name=None, reason=u'closed by RewarderSession.close'):\n if name is None:\n names = list(self.names_by_id.values())\n else:\n logger.info('[%s] Closing rewarder connection', name)\n names = [name]\n self.ids_by_name = {name: id for id, name in self.names_by_id.items()}\n\n for name in names:\n with self.lock:\n id = self.ids_by_name.pop(name, None)\n if id is None:\n # already closed\n continue\n\n del self.names_by_id[id]\n del self.reward_buffers[id]\n del self.env_statuses[id]\n self.errors.pop(id, None)\n\n network = self.networks.pop(id)\n network.close()\n\n client = self.clients.pop(id, None)\n if client is not None:\n reactor.callFromThread(client.close, reason=reason)\n\n def connect(self, name, address, label, password, env_id=None, seed=None, fps=60,\n start_timeout=None, observer=False, skip_network_calibration=False):\n if name in self.reward_buffers:\n self.close(name, reason='closing previous connection to reconnect with the same name')\n\n network = Network()\n self.names_by_id[self.i] = name\n self.reward_buffers[self.i] = reward_buffer.RewardBuffer(label)\n self.env_statuses[self.i] = env_status.EnvStatus(label=label, primary=False)\n self.networks[self.i] = network\n\n reactor.callFromThread(self._connect,\n name=name,\n address=address,\n env_id=env_id,\n seed=seed,\n fps=fps,\n i=self.i,\n network=network,\n env_status=self.env_statuses[self.i],\n reward_buffer=self.reward_buffers[self.i],\n label=label,\n start_timeout=start_timeout,\n password=password,\n observer=observer,\n skip_network_calibration=skip_network_calibration,\n )\n self.i += 1\n return network\n\n def _already_closed(self, i):\n # Lock must be held\n return i not in self.names_by_id\n\n # Call only from Twisted thread\n\n # TODO: probably time to convert to kwargs\n @defer.inlineCallbacks\n def _connect(self, name, address, env_id, seed, fps, i, network, env_status, reward_buffer,\n label, password, start_timeout,\n observer, skip_network_calibration,\n attempt=0, elapsed_sleep_time=0,\n ):\n endpoint = endpoints.clientFromString(reactor, 'tcp:'+address)\n factory = websocket.WebSocketClientFactory('ws://'+address)\n factory.protocol = rewarder_client.RewarderClient\n\n assert password, \"Missing password: {} for rewarder session\".format(password)\n factory.headers = {'authorization': utils.basic_auth_encode(password), 'openai-observer': 'true' if observer else 'false'}\n factory.i = i\n\n # Various important objects\n factory.endpoint = endpoint\n factory.env_status = env_status\n factory.reward_buffer = reward_buffer\n\n # Helpful strings\n factory.label = label\n factory.address = address\n\n # Arguments to always send to the remote reset call\n factory.arg_env_id = env_id\n factory.arg_fps = fps\n\n def record_error(e):\n if isinstance(e, failure.Failure):\n e = e.value\n\n # logger.error('[%s] Recording rewarder error: %s', factory.label, e)\n with self.lock:\n # drop error on the floor if we're already closed\n if self._already_closed(factory.i):\n extra_logger.info('[%s] Ignoring error for already closed connection: %s', label, e)\n elif factory.i not in self.clients:\n extra_logger.info('[%s] Received error for connection which has not been fully initialized: %s', label, e)\n # We could handle this better, but right now we\n # just mark this as a fatal error for the\n # backend. Often it actually is.\n self.errors[factory.i] = e\n else:\n extra_logger.info('[%s] Recording fatal error for connection: %s', label, e)\n self.errors[factory.i] = e\n\n def retriable_error(e, error_message):\n if isinstance(e, failure.Failure):\n e = e.value\n\n if self._already_closed(factory.i):\n logger.error('[%s] Got error, but giving up on reconnecting, since %d already disconnected', factory.label, factory.i)\n return\n\n # Also need to handle DNS errors, so let's just handle everything for now.\n #\n # reason.trap(twisted.internet.error.ConnectError, error.ConnectionError)\n if elapsed_sleep_time < start_timeout:\n sleep = min((2 * attempt+1), 10)\n logger.error('[%s] Waiting on rewarder: %s. Retry in %ds (slept %ds/%ds): %s', factory.label, error_message, sleep, elapsed_sleep_time, start_timeout, e)\n reactor.callLater(\n sleep, self._connect, name=name, address=address,\n env_id=env_id, seed=seed, fps=fps, i=i, network=network,\n env_status=env_status, reward_buffer=reward_buffer, label=label,\n attempt=attempt+1, elapsed_sleep_time=elapsed_sleep_time+sleep,\n start_timeout=start_timeout, password=password,\n observer=observer, skip_network_calibration=skip_network_calibration,\n )\n else:\n logger.error('[%s] %s. Retries exceeded (slept %ds/%ds): %s', factory.label, error_message, elapsed_sleep_time, start_timeout, e)\n record_error(e)\n\n factory.record_error = record_error\n\n try:\n retry_msg = 'establish rewarder TCP connection'\n client = yield endpoint.connect(factory)\n extra_logger.info('[%s] Rewarder TCP connection established', factory.label)\n\n retry_msg = 'complete WebSocket handshake'\n yield client.waitForWebsocketConnection()\n extra_logger.info('[%s] Websocket client successfully connected', factory.label)\n\n if not skip_network_calibration:\n retry_msg = 'run network calibration'\n yield network.calibrate(client)\n extra_logger.info('[%s] Network calibration complete', factory.label)\n\n retry_msg = ''\n\n if factory.arg_env_id is not None:\n # We aren't picky about episode ID: we may have\n # already receieved an env.describe message\n # telling us about a resetting environment, which\n # we don't need to bump post.\n #\n # tl;dr hardcoding 0.0 here avoids a double reset.\n reply = yield self._send_env_reset(client, seed=seed, episode_id='0')\n else:\n # No env_id requested, so we just proceed without a reset\n reply = None\n # We're connected and have measured the\n # network. Mark everything as ready to go.\n with self.lock:\n if factory.i not in self.names_by_id:\n # ID has been popped!\n logger.info('[%s] Rewarder %d started, but has already been closed', factory.label, factory.i)\n client.close(reason='RewarderSession: double-closing, client was closed while RewarderSession was starting')\n elif reply is None:\n logger.info('[%s] Attached to running environment without reset', factory.label)\n else:\n context, req, rep = reply\n logger.info('[%s] Initial reset complete: episode_id=%s', factory.label, rep['headers']['episode_id'])\n self.clients[factory.i] = client\n except Exception as e:\n if retry_msg:\n retriable_error(e, 'failed to ' + retry_msg)\n else:\n record_error(e)\n\n def pop_errors(self):\n errors = {}\n with self.lock:\n if self.errors:\n for i, error in self.errors.items():\n name = self.names_by_id[i]\n errors[name] = error\n self.errors.clear()\n return errors\n\n def reset(self, seed=None, env_id=None):\n with self.lock:\n for i, reward_buffer in self.reward_buffers.items():\n reward_buffer.mask()\n reactor.callFromThread(self._reset, seed=seed, env_id=env_id)\n\n def _reset(self, seed=None, env_id=None):\n with self.lock:\n for client in self.clients.values():\n d = self._send_env_reset(client, seed=seed, env_id=env_id)\n # Total hack to capture the variable in the closure\n def callbacks(client):\n def success(reply): pass\n def fail(reason): client.factory.record_error(reason)\n return success, fail\n success, fail = callbacks(client)\n d.addCallback(success)\n d.addErrback(fail)\n\n def _send_env_reset(self, client, seed=None, episode_id=None, env_id=None):\n if episode_id is None:\n episode_id = client.factory.env_status.episode_id\n logger.info('[%s] Sending reset for env_id=%s fps=%s episode_id=%s', client.factory.label, client.factory.arg_env_id, client.factory.arg_fps, episode_id)\n return client.send_reset(\n env_id=client.factory.arg_env_id if env_id is None else env_id,\n seed=seed,\n fps=client.factory.arg_fps,\n episode_id=episode_id)\n\n def pop(self, warn=True, peek_d=None):\n reward_d = {}\n done_d = {}\n info_d = {}\n err_d = self.pop_errors()\n\n for i, reward_buffer in self.reward_buffers.items():\n name = self.names_by_id[i]\n\n reward, done, info = reward_buffer.pop(peek_d.get(name))\n reward_d[name] = reward\n done_d[name] = done\n info_d[name] = info\n\n # TODO: use FPS here rather than 60\n if warn and any(info.get('stats.reward.count', 0) > 60 for info in info_d.values()):\n logger.warn('WARNING: returning more than 60 aggregated rewards: %s. Either your agent is not keeping up with the framerate, or you should have called \".reset()\" to clear pending rewards and reset the environments to a known state.',\n {name: '{} (episode_id={})'.format(info['stats.reward.count'], info.get('env_status.episode_id')) for name, info in info_d.items()})\n\n return reward_d, done_d, info_d, err_d\n\n def wait(self, timeout=None):\n deadline = time.time() + timeout\n for client in self.clients:\n if timeout is not None:\n remaining_timeout = deadline - time.time()\n else:\n remaining_timeout = None\n client.reward_buffer.wait_for_step(timeout=remaining_timeout)\n\n # Hack to test actions over websockets\n # TODO: Carve websockets out of rewarder pkg (into vnc_env? - and move this there)\n def send_action(self, action_n, env_id):\n reactor.callFromThread(self._send_action, env_id, action_n)\n return self.pop_errors()\n\n def _send_action(self, env_id, action_n):\n with self.lock:\n for n, client in zip(action_n, self.clients.values()):\n self._send_env_action(client, env_id, action_n[n])\n\n def _send_env_action(self, client, env_id, action_n):\n if len(action_n) == 0:\n # Hack to skip empty actions. TODO: Find source (throttle?) and fix\n return\n message = {\n 'env_id': env_id,\n 'action': action_n,\n }\n client.send('v0.agent.action', message, expect_reply=False)\n\n def rewards_count(self):\n # TODO: any reason to lock these?\n return [client.reward_buffer.count for client in self.clients]\n\n def pop_observation(self):\n return [client.reward_buffer.pop_observation() for client in self.clients]\n\n # def _connection_time(self):\n # deferreds = []\n # for client in self.clients:\n # endpoint = client.factory.endpoint\n # d = connection_timer.start(endpoint)\n # deferreds.append(d)\n\n # d = defer.DeferredList(deferreds, fireOnOneErrback=True, consumeErrors=True)\n # return d\n\n# Run this in Twisty therad\nclass Network(object):\n def __init__(self):\n self.connection_samples = 10\n self.application_ping_samples = 10\n\n self.connection_time_m = None\n self.lock = threading.Lock()\n\n self.recalibrate = None\n self.client = None\n\n self._ntpdate_reversed_clock_skew = None\n self._reversed_clock_skew = None\n\n def active(self):\n with self.lock:\n return self._reversed_clock_skew is not None\n\n # Used by external consumers\n def reversed_clock_skew(self):\n with self.lock:\n if self._ntpdate_clock_skew is not None:\n return self._ntpdate_reversed_clock_skew\n else:\n return self._reversed_clock_skew\n\n def _report(self):\n connection_time = display.display_timestamps(self.connection_time_m)\n if self._ntpdate_clock_skew is not None:\n ntpdate_clock_skew = display.display_timestamp(self._ntpdate_clock_skew[0])\n else:\n ntpdate_clock_skew = None\n clock_skew = display.display_timestamps_pair(self.clock_skew_m)\n application_rtt = display.display_timestamps(self.application_rtt_m)\n request_overhead = display.display_timestamps(self.request_overhead_m)\n response_overhead = display.display_timestamps(self.response_overhead_m)\n\n extra_logger.info('[%s] Network calibration: ntpdate_clock_skew=%s clock_skew=%s connection_time=%s application_rtt=%s request_overhead=%s response_overhead=%s',\n self.client.factory.label, ntpdate_clock_skew, clock_skew, connection_time, application_rtt,\n request_overhead, response_overhead)\n\n def _start(self):\n def calibrate():\n d = defer.Deferred()\n def fail(reason):\n logger.error('[%s] Could not recalibrate network: %s', self.client.factory.label, reason)\n d.addErrback(fail)\n self._start_measure_connection_time(d)\n self._start()\n self.recalibrate = reactor.callLater(5 * 60, calibrate)\n\n def close(self):\n if self.recalibrate:\n try:\n self.recalibrate.cancel()\n except twisted.internet.error.AlreadyCalled:\n pass\n\n # Called externally\n def calibrate(self, client):\n d = defer.Deferred()\n def success(res):\n # If we succeed, kick off the periodic 5 minute\n # recalibrations.\n self._start()\n return res\n d.addCallback(success)\n\n self.client = client\n\n # Kinda a hack. Idea is to try using the ntpdate -q offset if\n # we can.\n skew = self._start_measure_clock_skew()\n def succeed(offset):\n with self.lock:\n self._ntpdate_clock_skew = np.array([offset, offset])\n self._ntpdate_reversed_clock_skew = np.array([-offset, -offset])\n self._start_measure_connection_time(d)\n skew.addCallback(succeed)\n\n def fail(reason):\n with self.lock:\n self._ntpdate_clock_skew = None\n self._ntpdate_reversed_clock_skew = None\n\n extra_logger.info('[%s] Could not determine clock skew with ntpdate; falling back to application-level ping: %s', self.client.factory.label, reason.value)\n self._start_measure_connection_time(d)\n skew.addErrback(fail)\n\n return d\n\n def _start_measure_connection_time(self, d):\n connection_time_m = np.zeros(self.connection_samples)\n self._measure_connection_time(d, connection_time_m, 0)\n\n def _measure_connection_time(self, d, connection_time_m, i):\n extra_logger.debug('[%s] Measuring connection time (%d/%d)', self.client.factory.label, i+1, len(connection_time_m))\n endpoint = self.client.factory.endpoint\n timer = connection_timer.start(endpoint)\n\n def success(delta):\n connection_time_m[i] = delta\n if i+1 < len(connection_time_m):\n self._measure_connection_time(d, connection_time_m, i+1)\n else:\n self.connection_time_m = connection_time_m\n self._start_measure_application_ping(d)\n def fail(reason):\n d.errback(reason)\n timer.addCallback(success)\n timer.addErrback(fail)\n\n def _start_measure_application_ping(self, d=None):\n clock_skew_m = np.zeros((self.application_ping_samples, 2))\n request_overhead_m = np.zeros((self.application_ping_samples))\n response_overhead_m = np.zeros((self.application_ping_samples))\n application_rtt_m = np.zeros((self.application_ping_samples))\n\n self._measure_application_ping(d, clock_skew_m, request_overhead_m, response_overhead_m, application_rtt_m, 0)\n\n def _measure_application_ping(self, d, clock_skew_m, request_overhead_m, response_overhead_m, application_rtt_m, i):\n extra_logger.debug('[%s] Issuing an application-level ping (%d/%d)', self.client.factory.label, i+1, len(clock_skew_m))\n start = time.time()\n ping = _ping(self.client)\n\n def success(res):\n context, request, response = res\n end = time.time()\n\n request_sent_at = request['headers']['sent_at'] # local\n response_sent_at = response['headers']['sent_at'] # remote\n response_received_at = context['start'] # local\n\n # We try to put bounds on clock skew by subtracting\n # local and remote times, for local and remote events\n # that are causally related.\n #\n # For example, suppose that the following local/remote\n # logical timestamps apply to a request (for a system\n # with clock skew of 100):\n #\n # request_sent local: 0 remote: 100\n # request_recieved local: 1 remote: 101\n # response_sent local: 2 remote: 102\n # response_received local: 3 remote: 103\n #\n # Then:\n #\n # # Remote event *after* local is upper bound\n # request_recieved.remote - request_sent.local = 101\n # # Remote event *before* local is lower bound\n # response_sent.remote - response_received.local = 102 - 3 = 99\n #\n # There's danger of further clock drift over time, but\n # we don't need these to be fully accurate, and this\n # should be fine for now.\n clock_skew_m[i, :] = (response_sent_at-response_received_at, response_sent_at-request_sent_at)\n request_overhead_m[i] = request_sent_at - start\n response_overhead_m[i] = end - response_received_at\n application_rtt_m[i] = response_received_at - request_sent_at\n\n if i+1 < len(clock_skew_m):\n self._measure_application_ping(d, clock_skew_m, request_overhead_m, response_overhead_m, application_rtt_m, i+1)\n else:\n self.clock_skew_m = clock_skew_m\n self.request_overhead_m = request_overhead_m\n self.response_overhead_m = response_overhead_m\n self.application_rtt_m = application_rtt_m\n\n self._report()\n self._update_exposed_metrics()\n\n # Ok, all done!\n if d is not None:\n d.callback(self)\n ping.addCallback(success)\n ping.addErrback(d.errback)\n\n def _update_exposed_metrics(self):\n with self.lock:\n self._clock_skew = self.clock_skew_m.mean(axis=0) # add to local time to get remote time, as (min, max) values\n self._reversed_clock_skew = -self._clock_skew[[1, 0]] # add to remote time to get local time, in format (min, max)\n\n\n def _start_measure_clock_skew(self):\n host = self.client.factory.address.split(':')[0]\n return connection_timer.measure_clock_skew(self.client.factory.label, host)\n"
] | [
[
"numpy.array",
"numpy.zeros"
]
] |
chrhenning/uncertainty_based_ood | [
"13c0b9910966544527497497f6ff0441d5334591"
] | [
"nngp/nngp.py"
] | [
"#!/usr/bin/env python3\n# Copyright 2021 Christian Henning\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n# @title :nngp/nngp.py\n# @author :ch\n# @contact :[email protected]\n# @created :04/19/2021\n# @version :1.0\n# @python_version :3.8.5\nr\"\"\"\nDeep Neural Network as Gaussian Process\n---------------------------------------\n\nThe module :mod:`nngp.nngp` implements helper functions for Bayesian inference\nwith Gaussian Processes with a focus on kernels derived from neural network\narchitectures when taken to the infinite-width limit\n(cf. :mod:`nngp.mlp_kernel`).\n\nSpecifically, we consider a Gaussian Process\n:math:`\\mathcal{GP}\\big(\\mu(x), k(x, x')\\big)` with mean function\n:math:`\\mu(\\cdot)` and kernel :math:`k(\\cdot, \\cdot)`. Unless specified\notherwise, we assume the mean function to be :math:`\\mu(x) = 0`. Note, that any\nmultivariate Gaussian prescribed by the :math:`\\mathcal{GP}` at a given set of\ninput locations is consistent (marginalization from any superset of locations\nwill always lead to the same distribution) and adheres exchangibility (order of\ninput locations doesn't affect the distribution except for repositioning the\ncorresponding function values).\n\nFor any given set of inputs :math:`X = x_1, \\dots, x_n`, the\n:math:`\\mathcal{GP}` allows us to specify a prior distribution over function\nvalues :math:`p(f_1, \\dots, f_n; x_1, \\dots, x_n) \\equiv p(F; X)`.\n\nIn addition to inputs :math:`x` and function values :math:`f`, we consider\nobservations :math:`y`, which are obtained via a likelihood function\n:math:`p(y \\mid f)`.\n\nUsing the prior distribution over functions (the :math:`\\mathcal{GP}`) and a\ndataset :math:`\\mathcal{D} = \\{(x_n, y_n)\\}_{n=1}^N` with inputs :math:`X` and\ntargets :math:`Y`, one can form a posterior distribution over function values\n:math:`f` at an unknown location :math:`x^*` via\n\n.. math::\n\n p(f \\mid \\mathcal{D}; x^*) = p(f \\mid Y; x^* X) = \\frac{1}{p(Y; X)} \\\n \\int p(Y \\mid F) p(F, f; X, x^*) \\, dF\n\nPlease see\n`Rasmussen and Williams <http://www.gaussianprocess.org/gpml/chapters/RW.pdf>`__\nfor a broader introduction into Gaussian Processes.\n\"\"\"\nimport torch\nfrom warnings import warn\n\ndef inference_with_isotropic_gaussian_ll(Y, K_train, K_test, K_all, var=1e-10,\n L_mat=None, return_cov=False):\n r\"\"\"Bayesian inference with Gaussian likelihood and :math:`\\mathcal{GP}`\n prior.\n\n Here, we consider the case\n :math:`p(Y \\mid F) = \\mathcal{N}(Y; F, \\sigma_\\epsilon^2 I)`, where the\n posterior predictive :math:`p(f \\mid \\mathcal{D}; x^*)` can be analytically\n computed\n\n .. math::\n\n p(f \\mid \\mathcal{D}; x^*) &= \\mathcal{N}(f; \\mu^*, \\Sigma^*) \\\\ \\\n \\mu^* &= K(x^*, X) \\big( K(X, X) + \\sigma_\\epsilon^2 I \\big)^{-1} Y \\\\ \\\n \\Sigma^* &= k(x^*, x^*) - K(x^*, X) \\big( K(X, X) + \\\n \\sigma_\\epsilon^2 I \\big)^{-1} K(X, x^*)\n\n Args:\n Y (torch.Tensor): The labels :math:`Y` from the training set encoded as\n vector of shape ``[m]`` or ``[m, 1]``.\n K_train (torch.Tensor): The training data kernel matrix :math:`K(X, X)`.\n K_test (torch.Tensor): The test data kernel values :math:`k(x^*, x^*)`.\n This is a vector either of shape ``[n]``, where ``n`` is the number\n test points, or of shape ``[n, 1]``.\n K_all (torch.Tensor): The kernel values between train and test points\n :math:`K(x^*, X)`. This is expected to be matrix of shape ``[n,m]``,\n where ``m`` is the number of training and ``n`` the number of test\n points, or simply a vector of shape ``[m]``, if there is only one\n test point.\n var (float): The variance :math:`\\sigma_\\epsilon^2` of the likelihood.\n L_mat (torch.Tensor, optional): The matrix :math:`L` resulting from a\n Cholesky decomposition of :math:`K(X, X) + \\sigma_\\epsilon^2 I`.\n If provided, the arguments ``K_train`` and ``var`` are ignored.\n\n The function :func:`cholesky_adaptive_noise` may be helpful to\n compute ``L_mat``.\n return_cov (bool): If ``True``, the return value ``cov`` will be the\n full covariance matrix. However, this option requires ``K_test``\n to be the full ``[n, n]`` kernel matrix.\n\n Returns:\n (tuple): Tuple containing:\n\n - **mean** (torch.Tensor): A tensor of shape ``[n]``, where ``n`` is the\n number of test points. The tensor encodes the mean for each test point\n of the posterior predictive :math:`\\mu^*`.\n - **cov** (torch.Tensor): Same as ``mean`` but encoding the variance\n :math:`\\Sigma^*` of each test point, i.e., the diagonal of the full\n covariance matrix.\n \"\"\"\n m = K_train.shape[0] if L_mat is None else L_mat.shape[0]\n n = K_test.shape[0]\n assert Y.numel() == m\n assert K_all.numel() == m*n\n\n if Y.ndim == 1:\n Y = Y.view(-1, 1)\n if return_cov:\n assert K_test.numel() == n*n and K_test.ndim == 2\n elif K_test.ndim == 2:\n K_test = K_test.view(-1)\n if K_all.ndim == 1:\n assert n == 1\n K_all = K_all.view(n, m)\n\n #inv_K = torch.linalg.inv(K_train + var * torch.eye(m).to(K_train.device))\n #mu = torch.matmul(K_all, torch.matmul(inv_K, Y))\n #if return_cov:\n # sigma = K_test - torch.matmul(K_all, torch.matmul(inv_K, K_all.T))\n #else:\n # #sigma = K_test - torch.bmm(K_all.view(n, 1, m), torch.matmul(inv_K,\n # # K_all.view(n, m, 1))).squeeze()\n # sigma = K_test - (K_all * torch.matmul(inv_K,\n # K_all.view(n, m, 1)).squeeze(dim=2)).sum(dim=1)\n\n # Note, direct matrix inversion is considered extremely numerically\n # unstable. Therefore, Rasmussen et al. propose the use of Cholesky\n # decomposition, see Appendix A.4 in\n # http://www.gaussianprocess.org/gpml/chapters/RW.pdf\n if L_mat is None:\n L = torch.linalg.cholesky(K_train + \\\n var * torch.eye(m).to(K_train.device))\n else:\n L = L_mat\n alpha = torch.triangular_solve(torch.triangular_solve(Y, L, upper=False)[0],\n L, upper=False, transpose=True)[0]\n mu = torch.matmul(K_all, alpha)\n\n v = torch.triangular_solve(K_all.T, L, upper=False)[0]\n if return_cov:\n sigma = K_test - torch.matmul(v.T, v)\n else:\n sigma = K_test - (v * v).sum(dim=0)\n\n if torch.any(sigma < 0):\n sigma[sigma < 0] = 1e-5\n warn('Some entries of the covariance matrix are negative and set to ' +\n '1e-5!')\n\n return mu.squeeze(), sigma\n\ndef gen_inference_kernels(X_train, X_test, kernel_func, compute_K_train=True,\n full_K_test=False):\n r\"\"\"Generate the kernel matrices required for inference.\n\n This function generates the kernel matrices / vectors :math:`K(X, X)`,\n :math:`K(x^*, X)` and :math:`K(x^*, x^*)`, where :math:`X` are training\n inputs and :math:`x^*` are unseen points.\n\n Thus, the function can be seen as helper function for functions like\n :func:`inference_with_isotropic_gaussian_ll`.\n\n Args:\n X_train (torch.Tensor): A batch of ``m`` training inputs. The tensor\n should have shape ``[m, d_in]``, where ``d_in`` is the input\n dimensionality. For scalar inputs, one may also pass a tensor of\n shape ``[m]``.\n X_test (torch.Tensor):A batch of ``n`` unseen test inputs.\n kernel_func (func): The kernel function :math:`k(x, x')`. It is expected\n to have an interface for a single input ``X`` as described in\n the docstring of function:`nngp.mlp_kernel.init_kernel`.\n\n .. code-block:: python\n\n def kernel_func(X):\n # Compute kernel values.\n return K\n\n compute_K_train (bool): Whether the kernel matrix :math:`K(X, X)`\n should be computed. If ``False``, the return value ``K_train`` is\n ``None``.\n full_K_test (bool): Whether the full kernel matrix :math:`K(x^*, x^*)`\n of shape ``[n, n]`` should be computed.\n\n Returns:\n (tuple): Tuple containing:\n\n - **K_train** (torch.Tensor or None): :math:`K(X, X)`, a tensor of\n shape ``[m, m]``.\n - **K_test** (torch.Tensor): :math:`K(x^*, x^*)`, a tensor of shape\n ``[n]``\n - **K_all** (torch.Tensor): :math:`K(x^*, X)`, a tensor of shape\n ``[n,m]``\n \"\"\"\n if compute_K_train:\n K_train = kernel_func(X_train)\n else:\n K_train = None\n\n if full_K_test:\n K_test = kernel_func(X_test)\n else:\n K_test = kernel_func((X_test, X_test))\n\n # Contruct tuples between all train samples and all test samples.\n if X_train.ndim == 1: # `d_in == 1`\n X_train = X_train.view(-1, 1)\n if X_test.ndim == 1:\n X_test = X_test.view(-1, 1)\n\n m = X_train.shape[0]\n n = X_test.shape[0]\n\n X_all = (X_train.repeat(n, 1),\n X_test.view(n, 1, -1).repeat(1, m, 1).view(n*m, -1))\n K_all = kernel_func(X_all)\n\n K_all = K_all.view(n, m)\n\n return K_train, K_test, K_all\n\ndef cholesky_adaptive_noise(K_train, var=1e-10, var_step=2.):\n r\"\"\"Cholesky decomposition of a kernel matrix with noise perturbation.\n\n This function computes the Cholesky decomposition of:\n\n .. math::\n\n L L^T = K(X, X) + \\sigma_\\epsilon^2 I\n\n As kernel matrices :math:`K(X, X)` may easily be (numerically) singular,\n tuning the noise :math:`\\sigma_\\epsilon^2` is crucial. Therefore, this\n method will iteratively increase the noise level until the matrix becomes\n non-singular.\n\n Args:\n (....): See docstring of method :meth:`kernel_efficient`.\n var (float or list): The initial variance :math:`\\sigma_\\epsilon^2`.\n If a list of values is provided, then each value in this list is\n consecutively tested until a non-singular matrix is constructed.\n Note, we assume that the list is sorted from small to large. If none\n of the elements in this list will lead to a non-singular matrix, an\n exception is raised.\n var_step (float): If ``var`` is a single value, then the value specified\n here will be iteratively multiplied to increase the variance\n :math:`\\sigma_\\epsilon^2` (therefore ``var_step > 1`` is required).\n\n Returns:\n (tuple): Tuple containing:\n - **L** (torch.Tensor): The matrix :math:`L` resulting from the\n successful Cholesky decomposition.\n - **var_chosen** (float): The variance :math:`\\sigma_\\epsilon^2` that\n was chosen to obtain ``L``.\n \"\"\"\n m = K_train.shape[0]\n\n if not isinstance(var, (list, tuple)):\n assert var_step > 1.\n\n i = 0\n while True:\n if isinstance(var, (list, tuple)):\n if i >= len(var):\n raise RuntimeError('List of variances didn\\'t contain high ' +\n 'enough values.')\n curr_var = var[i]\n else:\n if i == 0:\n curr_var = var\n else:\n curr_var *= var_step\n\n try:\n L = torch.linalg.cholesky(K_train + curr_var * torch.eye(m).to( \\\n K_train.device))\n except:\n i += 1\n continue\n\n return L, curr_var\n\nif __name__ == '__main__':\n pass\n\n\n"
] | [
[
"torch.any",
"torch.matmul",
"torch.eye",
"torch.triangular_solve"
]
] |
cheneyveron/PaddleX | [
"86f73fc6a66b12c638f642524bfd1cf730e26c4b",
"86f73fc6a66b12c638f642524bfd1cf730e26c4b",
"86f73fc6a66b12c638f642524bfd1cf730e26c4b",
"86f73fc6a66b12c638f642524bfd1cf730e26c4b",
"86f73fc6a66b12c638f642524bfd1cf730e26c4b"
] | [
"paddlex/ppdet/modeling/assigners/atss_assigner.py",
"static/paddlex/cv/models/utils/seg_eval.py",
"paddlex/ppcls/data/preprocess/__init__.py",
"paddlex/ppdet/modeling/post_process.py",
"static/paddlex/tools/x2seg.py"
] | [
"# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport numpy as np\nimport paddle\nimport paddle.nn as nn\nimport paddle.nn.functional as F\n\nfrom paddlex.ppdet.core.workspace import register\nfrom ..ops import iou_similarity\nfrom ..bbox_utils import bbox_center\nfrom .utils import (pad_gt, check_points_inside_bboxes, compute_max_iou_anchor,\n compute_max_iou_gt)\n\n\n@register\nclass ATSSAssigner(nn.Layer):\n \"\"\"Bridging the Gap Between Anchor-based and Anchor-free Detection\n via Adaptive Training Sample Selection\n \"\"\"\n __shared__ = ['num_classes']\n\n def __init__(self,\n topk=9,\n num_classes=80,\n force_gt_matching=False,\n eps=1e-9):\n super(ATSSAssigner, self).__init__()\n self.topk = topk\n self.num_classes = num_classes\n self.force_gt_matching = force_gt_matching\n self.eps = eps\n\n def _gather_topk_pyramid(self, gt2anchor_distances, num_anchors_list,\n pad_gt_mask):\n pad_gt_mask = pad_gt_mask.tile([1, 1, self.topk]).astype(paddle.bool)\n gt2anchor_distances_list = paddle.split(\n gt2anchor_distances, num_anchors_list, axis=-1)\n num_anchors_index = np.cumsum(num_anchors_list).tolist()\n num_anchors_index = [0, ] + num_anchors_index[:-1]\n is_in_topk_list = []\n topk_idxs_list = []\n for distances, anchors_index in zip(gt2anchor_distances_list,\n num_anchors_index):\n num_anchors = distances.shape[-1]\n topk_metrics, topk_idxs = paddle.topk(\n distances, self.topk, axis=-1, largest=False)\n topk_idxs_list.append(topk_idxs + anchors_index)\n topk_idxs = paddle.where(pad_gt_mask, topk_idxs,\n paddle.zeros_like(topk_idxs))\n is_in_topk = F.one_hot(topk_idxs, num_anchors).sum(axis=-2)\n is_in_topk = paddle.where(is_in_topk > 1,\n paddle.zeros_like(is_in_topk),\n is_in_topk)\n is_in_topk_list.append(\n is_in_topk.astype(gt2anchor_distances.dtype))\n is_in_topk_list = paddle.concat(is_in_topk_list, axis=-1)\n topk_idxs_list = paddle.concat(topk_idxs_list, axis=-1)\n return is_in_topk_list, topk_idxs_list\n\n @paddle.no_grad()\n def forward(self,\n anchor_bboxes,\n num_anchors_list,\n gt_labels,\n gt_bboxes,\n bg_index,\n gt_scores=None):\n r\"\"\"This code is based on\n https://github.com/fcjian/TOOD/blob/master/mmdet/core/bbox/assigners/atss_assigner.py\n\n The assignment is done in following steps\n 1. compute iou between all bbox (bbox of all pyramid levels) and gt\n 2. compute center distance between all bbox and gt\n 3. on each pyramid level, for each gt, select k bbox whose center\n are closest to the gt center, so we total select k*l bbox as\n candidates for each gt\n 4. get corresponding iou for the these candidates, and compute the\n mean and std, set mean + std as the iou threshold\n 5. select these candidates whose iou are greater than or equal to\n the threshold as positive\n 6. limit the positive sample's center in gt\n 7. if an anchor box is assigned to multiple gts, the one with the\n highest iou will be selected.\n Args:\n anchor_bboxes (Tensor, float32): pre-defined anchors, shape(L, 4),\n \"xmin, xmax, ymin, ymax\" format\n num_anchors_list (List): num of anchors in each level\n gt_labels (Tensor|List[Tensor], int64): Label of gt_bboxes, shape(B, n, 1)\n gt_bboxes (Tensor|List[Tensor], float32): Ground truth bboxes, shape(B, n, 4)\n bg_index (int): background index\n gt_scores (Tensor|List[Tensor]|None, float32) Score of gt_bboxes,\n shape(B, n, 1), if None, then it will initialize with one_hot label\n Returns:\n assigned_labels (Tensor): (B, L)\n assigned_bboxes (Tensor): (B, L, 4)\n assigned_scores (Tensor): (B, L, C)\n \"\"\"\n gt_labels, gt_bboxes, pad_gt_scores, pad_gt_mask = pad_gt(\n gt_labels, gt_bboxes, gt_scores)\n assert gt_labels.ndim == gt_bboxes.ndim and \\\n gt_bboxes.ndim == 3\n\n num_anchors, _ = anchor_bboxes.shape\n batch_size, num_max_boxes, _ = gt_bboxes.shape\n\n # negative batch\n if num_max_boxes == 0:\n assigned_labels = paddle.full([batch_size, num_anchors], bg_index)\n assigned_bboxes = paddle.zeros([batch_size, num_anchors, 4])\n assigned_scores = paddle.zeros(\n [batch_size, num_anchors, self.num_classes])\n return assigned_labels, assigned_bboxes, assigned_scores\n\n # 1. compute iou between gt and anchor bbox, [B, n, L]\n ious = iou_similarity(gt_bboxes.reshape([-1, 4]), anchor_bboxes)\n ious = ious.reshape([batch_size, -1, num_anchors])\n\n # 2. compute center distance between all anchors and gt, [B, n, L]\n gt_centers = bbox_center(gt_bboxes.reshape([-1, 4])).unsqueeze(1)\n anchor_centers = bbox_center(anchor_bboxes)\n gt2anchor_distances = (gt_centers - anchor_centers.unsqueeze(0)) \\\n .norm(2, axis=-1).reshape([batch_size, -1, num_anchors])\n\n # 3. on each pyramid level, selecting topk closest candidates\n # based on the center distance, [B, n, L]\n is_in_topk, topk_idxs = self._gather_topk_pyramid(\n gt2anchor_distances, num_anchors_list, pad_gt_mask)\n\n # 4. get corresponding iou for the these candidates, and compute the\n # mean and std, 5. set mean + std as the iou threshold\n iou_candidates = ious * is_in_topk\n iou_threshold = paddle.index_sample(\n iou_candidates.flatten(stop_axis=-2),\n topk_idxs.flatten(stop_axis=-2))\n iou_threshold = iou_threshold.reshape([batch_size, num_max_boxes, -1])\n iou_threshold = iou_threshold.mean(axis=-1, keepdim=True) + \\\n iou_threshold.std(axis=-1, keepdim=True)\n is_in_topk = paddle.where(\n iou_candidates > iou_threshold.tile([1, 1, num_anchors]),\n is_in_topk, paddle.zeros_like(is_in_topk))\n\n # 6. check the positive sample's center in gt, [B, n, L]\n is_in_gts = check_points_inside_bboxes(anchor_centers, gt_bboxes)\n\n # select positive sample, [B, n, L]\n mask_positive = is_in_topk * is_in_gts * pad_gt_mask\n\n # 7. if an anchor box is assigned to multiple gts,\n # the one with the highest iou will be selected.\n mask_positive_sum = mask_positive.sum(axis=-2)\n if mask_positive_sum.max() > 1:\n mask_multiple_gts = (mask_positive_sum.unsqueeze(1) > 1).tile(\n [1, num_max_boxes, 1])\n is_max_iou = compute_max_iou_anchor(ious)\n mask_positive = paddle.where(mask_multiple_gts, is_max_iou,\n mask_positive)\n mask_positive_sum = mask_positive.sum(axis=-2)\n # 8. make sure every gt_bbox matches the anchor\n if self.force_gt_matching:\n is_max_iou = compute_max_iou_gt(ious) * pad_gt_mask\n mask_max_iou = (is_max_iou.sum(-2, keepdim=True) == 1).tile(\n [1, num_max_boxes, 1])\n mask_positive = paddle.where(mask_max_iou, is_max_iou,\n mask_positive)\n mask_positive_sum = mask_positive.sum(axis=-2)\n assigned_gt_index = mask_positive.argmax(axis=-2)\n assert mask_positive_sum.max() == 1, \\\n (\"one anchor just assign one gt, but received not equals 1. \"\n \"Received: %f\" % mask_positive_sum.max().item())\n\n # assigned target\n batch_ind = paddle.arange(\n end=batch_size, dtype=gt_labels.dtype).unsqueeze(-1)\n assigned_gt_index = assigned_gt_index + batch_ind * num_max_boxes\n assigned_labels = paddle.gather(\n gt_labels.flatten(), assigned_gt_index.flatten(), axis=0)\n assigned_labels = assigned_labels.reshape([batch_size, num_anchors])\n assigned_labels = paddle.where(\n mask_positive_sum > 0, assigned_labels,\n paddle.full_like(assigned_labels, bg_index))\n\n assigned_bboxes = paddle.gather(\n gt_bboxes.reshape([-1, 4]), assigned_gt_index.flatten(), axis=0)\n assigned_bboxes = assigned_bboxes.reshape([batch_size, num_anchors, 4])\n\n assigned_scores = F.one_hot(assigned_labels, self.num_classes)\n if gt_scores is not None:\n gather_scores = paddle.gather(\n pad_gt_scores.flatten(), assigned_gt_index.flatten(), axis=0)\n gather_scores = gather_scores.reshape([batch_size, num_anchors])\n gather_scores = paddle.where(mask_positive_sum > 0, gather_scores,\n paddle.zeros_like(gather_scores))\n assigned_scores *= gather_scores.unsqueeze(-1)\n\n return assigned_labels, assigned_bboxes, assigned_scores\n",
"# copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport os\nimport sys\nimport numpy as np\nfrom scipy.sparse import csr_matrix\n\n\nclass ConfusionMatrix(object):\n \"\"\"\n Confusion Matrix for segmentation evaluation\n \"\"\"\n\n def __init__(self, num_classes=2, streaming=False):\n self.confusion_matrix = np.zeros(\n [num_classes, num_classes], dtype='int64')\n self.num_classes = num_classes\n self.streaming = streaming\n\n def calculate(self, pred, label, ignore=None):\n # If not in streaming mode, clear matrix everytime when call `calculate`\n if not self.streaming:\n self.zero_matrix()\n\n label = np.transpose(label, (0, 2, 3, 1))\n ignore = np.transpose(ignore, (0, 2, 3, 1))\n mask = np.array(ignore) == 1\n\n label = np.asarray(label)[mask]\n pred = np.asarray(pred)[mask]\n one = np.ones_like(pred)\n # Accumuate ([row=label, col=pred], 1) into sparse matrix\n spm = csr_matrix(\n (one, (label, pred)), shape=(self.num_classes, self.num_classes))\n spm = spm.todense()\n self.confusion_matrix += spm\n\n def zero_matrix(self):\n \"\"\" Clear confusion matrix \"\"\"\n self.confusion_matrix = np.zeros(\n [self.num_classes, self.num_classes], dtype='int64')\n\n def mean_iou(self):\n iou_list = []\n avg_iou = 0\n # TODO: use numpy sum axis api to simpliy\n vji = np.zeros(self.num_classes, dtype=int)\n vij = np.zeros(self.num_classes, dtype=int)\n for j in range(self.num_classes):\n v_j = 0\n for i in range(self.num_classes):\n v_j += self.confusion_matrix[j][i]\n vji[j] = v_j\n\n for i in range(self.num_classes):\n v_i = 0\n for j in range(self.num_classes):\n v_i += self.confusion_matrix[j][i]\n vij[i] = v_i\n\n for c in range(self.num_classes):\n total = vji[c] + vij[c] - self.confusion_matrix[c][c]\n if total == 0:\n iou = 0\n else:\n iou = float(self.confusion_matrix[c][c]) / total\n avg_iou += iou\n iou_list.append(iou)\n avg_iou = float(avg_iou) / float(self.num_classes)\n return np.array(iou_list), avg_iou\n\n def accuracy(self):\n total = self.confusion_matrix.sum()\n total_right = 0\n for c in range(self.num_classes):\n total_right += self.confusion_matrix[c][c]\n if total == 0:\n avg_acc = 0\n else:\n avg_acc = float(total_right) / total\n\n vij = np.zeros(self.num_classes, dtype=int)\n for i in range(self.num_classes):\n v_i = 0\n for j in range(self.num_classes):\n v_i += self.confusion_matrix[j][i]\n vij[i] = v_i\n\n acc_list = []\n for c in range(self.num_classes):\n if vij[c] == 0:\n acc = 0\n else:\n acc = self.confusion_matrix[c][c] / float(vij[c])\n acc_list.append(acc)\n return np.array(acc_list), avg_acc\n\n def kappa(self):\n vji = np.zeros(self.num_classes)\n vij = np.zeros(self.num_classes)\n for j in range(self.num_classes):\n v_j = 0\n for i in range(self.num_classes):\n v_j += self.confusion_matrix[j][i]\n vji[j] = v_j\n\n for i in range(self.num_classes):\n v_i = 0\n for j in range(self.num_classes):\n v_i += self.confusion_matrix[j][i]\n vij[i] = v_i\n\n total = self.confusion_matrix.sum()\n\n # avoid spillovers\n # TODO: is it reasonable to hard code 10000.0?\n total = float(total) / 10000.0\n vji = vji / 10000.0\n vij = vij / 10000.0\n\n tp = 0\n tc = 0\n for c in range(self.num_classes):\n tp += vji[c] * vij[c]\n tc += self.confusion_matrix[c][c]\n\n tc = tc / 10000.0\n pe = tp / (total * total)\n po = tc / total\n\n kappa = (po - pe) / (1 - pe)\n return kappa\n\n def f1_score(self):\n f1score_list = []\n # TODO: use numpy sum axis api to simpliy\n vji = np.zeros(self.num_classes, dtype=int)\n vij = np.zeros(self.num_classes, dtype=int)\n for j in range(self.num_classes):\n v_j = 0\n for i in range(self.num_classes):\n v_j += self.confusion_matrix[j][i]\n vji[j] = v_j\n\n for i in range(self.num_classes):\n v_i = 0\n for j in range(self.num_classes):\n v_i += self.confusion_matrix[j][i]\n vij[i] = v_i\n\n for c in range(self.num_classes):\n if vji[c] == 0:\n precision = 0\n else:\n precision = self.confusion_matrix[c][c] / vji[c]\n if vij[c] == 0:\n recall = 0\n else:\n recall = self.confusion_matrix[c][c] / vij[c]\n if recall + precision <= 1e-06:\n f1score = 0\n else:\n f1score = 2 * precision * recall / (recall + precision)\n f1score_list.append(f1score)\n return np.array(f1score_list)\n",
"# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom paddlex.ppcls.data.preprocess.ops.autoaugment import ImageNetPolicy as RawImageNetPolicy\nfrom paddlex.ppcls.data.preprocess.ops.randaugment import RandAugment as RawRandAugment\nfrom paddlex.ppcls.data.preprocess.ops.timm_autoaugment import RawTimmAutoAugment\nfrom paddlex.ppcls.data.preprocess.ops.cutout import Cutout\n\nfrom paddlex.ppcls.data.preprocess.ops.hide_and_seek import HideAndSeek\nfrom paddlex.ppcls.data.preprocess.ops.random_erasing import RandomErasing\nfrom paddlex.ppcls.data.preprocess.ops.grid import GridMask\n\nfrom paddlex.ppcls.data.preprocess.ops.operators import DecodeImage\nfrom paddlex.ppcls.data.preprocess.ops.operators import ResizeImage\nfrom paddlex.ppcls.data.preprocess.ops.operators import CropImage\nfrom paddlex.ppcls.data.preprocess.ops.operators import RandCropImage\nfrom paddlex.ppcls.data.preprocess.ops.operators import RandFlipImage\nfrom paddlex.ppcls.data.preprocess.ops.operators import NormalizeImage\nfrom paddlex.ppcls.data.preprocess.ops.operators import ToCHWImage\nfrom paddlex.ppcls.data.preprocess.ops.operators import AugMix\n\nfrom paddlex.ppcls.data.preprocess.batch_ops.batch_operators import MixupOperator, CutmixOperator, OpSampler, FmixOperator\n\nimport numpy as np\nfrom PIL import Image\n\n\ndef transform(data, ops=[]):\n \"\"\" transform \"\"\"\n for op in ops:\n data = op(data)\n return data\n\n\nclass AutoAugment(RawImageNetPolicy):\n \"\"\" ImageNetPolicy wrapper to auto fit different img types \"\"\"\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n\n def __call__(self, img):\n if not isinstance(img, Image.Image):\n img = np.ascontiguousarray(img)\n img = Image.fromarray(img)\n\n img = super().__call__(img)\n\n if isinstance(img, Image.Image):\n img = np.asarray(img)\n\n return img\n\n\nclass RandAugment(RawRandAugment):\n \"\"\" RandAugment wrapper to auto fit different img types \"\"\"\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n\n def __call__(self, img):\n if not isinstance(img, Image.Image):\n img = np.ascontiguousarray(img)\n img = Image.fromarray(img)\n\n img = super().__call__(img)\n\n if isinstance(img, Image.Image):\n img = np.asarray(img)\n\n return img\n\n\nclass TimmAutoAugment(RawTimmAutoAugment):\n \"\"\" TimmAutoAugment wrapper to auto fit different img tyeps. \"\"\"\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n\n def __call__(self, img):\n if not isinstance(img, Image.Image):\n img = np.ascontiguousarray(img)\n img = Image.fromarray(img)\n\n img = super().__call__(img)\n\n if isinstance(img, Image.Image):\n img = np.asarray(img)\n\n return img\n",
"# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport numpy as np\nimport paddle\nimport paddle.nn as nn\nimport paddle.nn.functional as F\nfrom paddlex.ppdet.core.workspace import register\nfrom paddlex.ppdet.modeling.bbox_utils import nonempty_bbox, rbox2poly\nfrom paddlex.ppdet.modeling.layers import TTFBox\nfrom .transformers import bbox_cxcywh_to_xyxy\ntry:\n from collections.abc import Sequence\nexcept Exception:\n from collections import Sequence\n\n__all__ = [\n 'BBoxPostProcess', 'MaskPostProcess', 'FCOSPostProcess',\n 'S2ANetBBoxPostProcess', 'JDEBBoxPostProcess', 'CenterNetPostProcess',\n 'DETRBBoxPostProcess', 'SparsePostProcess'\n]\n\n\n@register\nclass BBoxPostProcess(nn.Layer):\n __shared__ = ['num_classes']\n __inject__ = ['decode', 'nms']\n\n def __init__(self, num_classes=80, decode=None, nms=None):\n super(BBoxPostProcess, self).__init__()\n self.num_classes = num_classes\n self.decode = decode\n self.nms = nms\n self.fake_bboxes = paddle.to_tensor(\n np.array(\n [[-1, 0.0, 0.0, 0.0, 0.0, 0.0]], dtype='float32'))\n self.fake_bbox_num = paddle.to_tensor(np.array([1], dtype='int32'))\n\n def forward(self, head_out, rois, im_shape, scale_factor):\n \"\"\"\n Decode the bbox and do NMS if needed.\n\n Args:\n head_out (tuple): bbox_pred and cls_prob of bbox_head output.\n rois (tuple): roi and rois_num of rpn_head output.\n im_shape (Tensor): The shape of the input image.\n scale_factor (Tensor): The scale factor of the input image.\n Returns:\n bbox_pred (Tensor): The output prediction with shape [N, 6], including\n labels, scores and bboxes. The size of bboxes are corresponding\n to the input image, the bboxes may be used in other branch.\n bbox_num (Tensor): The number of prediction boxes of each batch with\n shape [1], and is N.\n \"\"\"\n if self.nms is not None:\n bboxes, score = self.decode(head_out, rois, im_shape, scale_factor)\n bbox_pred, bbox_num, _ = self.nms(bboxes, score, self.num_classes)\n else:\n bbox_pred, bbox_num = self.decode(head_out, rois, im_shape,\n scale_factor)\n return bbox_pred, bbox_num\n\n def get_pred(self, bboxes, bbox_num, im_shape, scale_factor):\n \"\"\"\n Rescale, clip and filter the bbox from the output of NMS to\n get final prediction.\n\n Notes:\n Currently only support bs = 1.\n\n Args:\n bboxes (Tensor): The output bboxes with shape [N, 6] after decode\n and NMS, including labels, scores and bboxes.\n bbox_num (Tensor): The number of prediction boxes of each batch with\n shape [1], and is N.\n im_shape (Tensor): The shape of the input image.\n scale_factor (Tensor): The scale factor of the input image.\n Returns:\n pred_result (Tensor): The final prediction results with shape [N, 6]\n including labels, scores and bboxes.\n \"\"\"\n\n bboxes_list = []\n bbox_num_list = []\n id_start = 0\n # add fake bbox when output is empty for each batch\n for i in range(bbox_num.shape[0]):\n if bbox_num[i] == 0:\n bboxes_i = self.fake_bboxes\n bbox_num_i = self.fake_bbox_num\n id_start += 1\n else:\n bboxes_i = bboxes[id_start:id_start + bbox_num[i], :]\n bbox_num_i = bbox_num[i]\n id_start += bbox_num[i]\n bboxes_list.append(bboxes_i)\n bbox_num_list.append(bbox_num_i)\n bboxes = paddle.concat(bboxes_list)\n bbox_num = paddle.concat(bbox_num_list)\n\n origin_shape = paddle.floor(im_shape / scale_factor + 0.5)\n\n origin_shape_list = []\n scale_factor_list = []\n # scale_factor: scale_y, scale_x\n for i in range(bbox_num.shape[0]):\n expand_shape = paddle.expand(origin_shape[i:i + 1, :],\n [bbox_num[i], 2])\n scale_y, scale_x = scale_factor[i][0], scale_factor[i][1]\n scale = paddle.concat([scale_x, scale_y, scale_x, scale_y])\n expand_scale = paddle.expand(scale, [bbox_num[i], 4])\n origin_shape_list.append(expand_shape)\n scale_factor_list.append(expand_scale)\n\n self.origin_shape_list = paddle.concat(origin_shape_list)\n scale_factor_list = paddle.concat(scale_factor_list)\n\n # bboxes: [N, 6], label, score, bbox\n pred_label = bboxes[:, 0:1]\n pred_score = bboxes[:, 1:2]\n pred_bbox = bboxes[:, 2:]\n # rescale bbox to original image\n scaled_bbox = pred_bbox / scale_factor_list\n origin_h = self.origin_shape_list[:, 0]\n origin_w = self.origin_shape_list[:, 1]\n zeros = paddle.zeros_like(origin_h)\n # clip bbox to [0, original_size]\n x1 = paddle.maximum(paddle.minimum(scaled_bbox[:, 0], origin_w), zeros)\n y1 = paddle.maximum(paddle.minimum(scaled_bbox[:, 1], origin_h), zeros)\n x2 = paddle.maximum(paddle.minimum(scaled_bbox[:, 2], origin_w), zeros)\n y2 = paddle.maximum(paddle.minimum(scaled_bbox[:, 3], origin_h), zeros)\n pred_bbox = paddle.stack([x1, y1, x2, y2], axis=-1)\n # filter empty bbox\n keep_mask = nonempty_bbox(pred_bbox, return_mask=True)\n keep_mask = paddle.unsqueeze(keep_mask, [1])\n pred_label = paddle.where(keep_mask, pred_label,\n paddle.ones_like(pred_label) * -1)\n pred_result = paddle.concat(\n [pred_label, pred_score, pred_bbox], axis=1)\n return pred_result\n\n def get_origin_shape(self, ):\n return self.origin_shape_list\n\n\n@register\nclass MaskPostProcess(object):\n \"\"\"\n refer to:\n https://github.com/facebookresearch/detectron2/layers/mask_ops.py\n\n Get Mask output according to the output from model\n \"\"\"\n\n def __init__(self, binary_thresh=0.5):\n super(MaskPostProcess, self).__init__()\n self.binary_thresh = binary_thresh\n\n def paste_mask(self, masks, boxes, im_h, im_w):\n \"\"\"\n Paste the mask prediction to the original image.\n \"\"\"\n\n x0, y0, x1, y1 = paddle.split(boxes, 4, axis=1)\n masks = paddle.unsqueeze(masks, [0, 1])\n img_y = paddle.arange(0, im_h, dtype='float32') + 0.5\n img_x = paddle.arange(0, im_w, dtype='float32') + 0.5\n img_y = (img_y - y0) / (y1 - y0) * 2 - 1\n img_x = (img_x - x0) / (x1 - x0) * 2 - 1\n img_x = paddle.unsqueeze(img_x, [1])\n img_y = paddle.unsqueeze(img_y, [2])\n N = boxes.shape[0]\n\n gx = paddle.expand(img_x, [N, img_y.shape[1], img_x.shape[2]])\n gy = paddle.expand(img_y, [N, img_y.shape[1], img_x.shape[2]])\n grid = paddle.stack([gx, gy], axis=3)\n img_masks = F.grid_sample(masks, grid, align_corners=False)\n return img_masks[:, 0]\n\n def __call__(self, mask_out, bboxes, bbox_num, origin_shape):\n \"\"\"\n Decode the mask_out and paste the mask to the origin image.\n\n Args:\n mask_out (Tensor): mask_head output with shape [N, 28, 28].\n bbox_pred (Tensor): The output bboxes with shape [N, 6] after decode\n and NMS, including labels, scores and bboxes.\n bbox_num (Tensor): The number of prediction boxes of each batch with\n shape [1], and is N.\n origin_shape (Tensor): The origin shape of the input image, the tensor\n shape is [N, 2], and each row is [h, w].\n Returns:\n pred_result (Tensor): The final prediction mask results with shape\n [N, h, w] in binary mask style.\n \"\"\"\n num_mask = mask_out.shape[0]\n origin_shape = paddle.cast(origin_shape, 'int32')\n # TODO: support bs > 1 and mask output dtype is bool\n pred_result = paddle.zeros(\n [num_mask, origin_shape[0][0], origin_shape[0][1]], dtype='int32')\n if bbox_num == 1 and bboxes[0][0] == -1:\n return pred_result\n\n # TODO: optimize chunk paste\n pred_result = []\n for i in range(bboxes.shape[0]):\n im_h, im_w = origin_shape[i][0], origin_shape[i][1]\n pred_mask = self.paste_mask(mask_out[i], bboxes[i:i + 1, 2:], im_h,\n im_w)\n pred_mask = pred_mask >= self.binary_thresh\n pred_mask = paddle.cast(pred_mask, 'int32')\n pred_result.append(pred_mask)\n pred_result = paddle.concat(pred_result)\n return pred_result\n\n\n@register\nclass FCOSPostProcess(object):\n __inject__ = ['decode', 'nms']\n\n def __init__(self, decode=None, nms=None):\n super(FCOSPostProcess, self).__init__()\n self.decode = decode\n self.nms = nms\n\n def __call__(self, fcos_head_outs, scale_factor):\n \"\"\"\n Decode the bbox and do NMS in FCOS.\n \"\"\"\n locations, cls_logits, bboxes_reg, centerness = fcos_head_outs\n bboxes, score = self.decode(locations, cls_logits, bboxes_reg,\n centerness, scale_factor)\n bbox_pred, bbox_num, _ = self.nms(bboxes, score)\n return bbox_pred, bbox_num\n\n\n@register\nclass S2ANetBBoxPostProcess(nn.Layer):\n __shared__ = ['num_classes']\n __inject__ = ['nms']\n\n def __init__(self, num_classes=15, nms_pre=2000, min_bbox_size=0,\n nms=None):\n super(S2ANetBBoxPostProcess, self).__init__()\n self.num_classes = num_classes\n self.nms_pre = paddle.to_tensor(nms_pre)\n self.min_bbox_size = min_bbox_size\n self.nms = nms\n self.origin_shape_list = []\n self.fake_pred_cls_score_bbox = paddle.to_tensor(\n np.array(\n [[-1, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]],\n dtype='float32'))\n self.fake_bbox_num = paddle.to_tensor(np.array([1], dtype='int32'))\n\n def forward(self, pred_scores, pred_bboxes):\n \"\"\"\n pred_scores : [N, M] score\n pred_bboxes : [N, 5] xc, yc, w, h, a\n im_shape : [N, 2] im_shape\n scale_factor : [N, 2] scale_factor\n \"\"\"\n pred_ploys0 = rbox2poly(pred_bboxes)\n pred_ploys = paddle.unsqueeze(pred_ploys0, axis=0)\n\n # pred_scores [NA, 16] --> [16, NA]\n pred_scores0 = paddle.transpose(pred_scores, [1, 0])\n pred_scores = paddle.unsqueeze(pred_scores0, axis=0)\n\n pred_cls_score_bbox, bbox_num, _ = self.nms(pred_ploys, pred_scores,\n self.num_classes)\n # Prevent empty bbox_pred from decode or NMS.\n # Bboxes and score before NMS may be empty due to the score threshold.\n if pred_cls_score_bbox.shape[0] <= 0 or pred_cls_score_bbox.shape[\n 1] <= 1:\n pred_cls_score_bbox = self.fake_pred_cls_score_bbox\n bbox_num = self.fake_bbox_num\n\n pred_cls_score_bbox = paddle.reshape(pred_cls_score_bbox, [-1, 10])\n return pred_cls_score_bbox, bbox_num\n\n def get_pred(self, bboxes, bbox_num, im_shape, scale_factor):\n \"\"\"\n Rescale, clip and filter the bbox from the output of NMS to\n get final prediction.\n Args:\n bboxes(Tensor): bboxes [N, 10]\n bbox_num(Tensor): bbox_num\n im_shape(Tensor): [1 2]\n scale_factor(Tensor): [1 2]\n Returns:\n bbox_pred(Tensor): The output is the prediction with shape [N, 8]\n including labels, scores and bboxes. The size of\n bboxes are corresponding to the original image.\n \"\"\"\n origin_shape = paddle.floor(im_shape / scale_factor + 0.5)\n\n origin_shape_list = []\n scale_factor_list = []\n # scale_factor: scale_y, scale_x\n for i in range(bbox_num.shape[0]):\n expand_shape = paddle.expand(origin_shape[i:i + 1, :],\n [bbox_num[i], 2])\n scale_y, scale_x = scale_factor[i][0], scale_factor[i][1]\n scale = paddle.concat([\n scale_x, scale_y, scale_x, scale_y, scale_x, scale_y, scale_x,\n scale_y\n ])\n expand_scale = paddle.expand(scale, [bbox_num[i], 8])\n origin_shape_list.append(expand_shape)\n scale_factor_list.append(expand_scale)\n\n origin_shape_list = paddle.concat(origin_shape_list)\n scale_factor_list = paddle.concat(scale_factor_list)\n\n # bboxes: [N, 10], label, score, bbox\n pred_label_score = bboxes[:, 0:2]\n pred_bbox = bboxes[:, 2:]\n\n # rescale bbox to original image\n pred_bbox = pred_bbox.reshape([-1, 8])\n scaled_bbox = pred_bbox / scale_factor_list\n origin_h = origin_shape_list[:, 0]\n origin_w = origin_shape_list[:, 1]\n\n bboxes = scaled_bbox\n zeros = paddle.zeros_like(origin_h)\n x1 = paddle.maximum(paddle.minimum(bboxes[:, 0], origin_w - 1), zeros)\n y1 = paddle.maximum(paddle.minimum(bboxes[:, 1], origin_h - 1), zeros)\n x2 = paddle.maximum(paddle.minimum(bboxes[:, 2], origin_w - 1), zeros)\n y2 = paddle.maximum(paddle.minimum(bboxes[:, 3], origin_h - 1), zeros)\n x3 = paddle.maximum(paddle.minimum(bboxes[:, 4], origin_w - 1), zeros)\n y3 = paddle.maximum(paddle.minimum(bboxes[:, 5], origin_h - 1), zeros)\n x4 = paddle.maximum(paddle.minimum(bboxes[:, 6], origin_w - 1), zeros)\n y4 = paddle.maximum(paddle.minimum(bboxes[:, 7], origin_h - 1), zeros)\n pred_bbox = paddle.stack([x1, y1, x2, y2, x3, y3, x4, y4], axis=-1)\n pred_result = paddle.concat([pred_label_score, pred_bbox], axis=1)\n return pred_result\n\n\n@register\nclass JDEBBoxPostProcess(nn.Layer):\n __shared__ = ['num_classes']\n __inject__ = ['decode', 'nms']\n\n def __init__(self, num_classes=1, decode=None, nms=None, return_idx=True):\n super(JDEBBoxPostProcess, self).__init__()\n self.num_classes = num_classes\n self.decode = decode\n self.nms = nms\n self.return_idx = return_idx\n\n self.fake_bbox_pred = paddle.to_tensor(\n np.array(\n [[-1, 0.0, 0.0, 0.0, 0.0, 0.0]], dtype='float32'))\n self.fake_bbox_num = paddle.to_tensor(np.array([1], dtype='int32'))\n self.fake_nms_keep_idx = paddle.to_tensor(\n np.array(\n [[0]], dtype='int32'))\n\n self.fake_yolo_boxes_out = paddle.to_tensor(\n np.array(\n [[[0.0, 0.0, 0.0, 0.0]]], dtype='float32'))\n self.fake_yolo_scores_out = paddle.to_tensor(\n np.array(\n [[[0.0]]], dtype='float32'))\n self.fake_boxes_idx = paddle.to_tensor(np.array([[0]], dtype='int64'))\n\n def forward(self, head_out, anchors):\n \"\"\"\n Decode the bbox and do NMS for JDE model.\n\n Args:\n head_out (list): Bbox_pred and cls_prob of bbox_head output.\n anchors (list): Anchors of JDE model.\n\n Returns:\n boxes_idx (Tensor): The index of kept bboxes after decode 'JDEBox'.\n bbox_pred (Tensor): The output is the prediction with shape [N, 6]\n including labels, scores and bboxes.\n bbox_num (Tensor): The number of prediction of each batch with shape [N].\n nms_keep_idx (Tensor): The index of kept bboxes after NMS.\n \"\"\"\n boxes_idx, yolo_boxes_scores = self.decode(head_out, anchors)\n\n if len(boxes_idx) == 0:\n boxes_idx = self.fake_boxes_idx\n yolo_boxes_out = self.fake_yolo_boxes_out\n yolo_scores_out = self.fake_yolo_scores_out\n else:\n yolo_boxes = paddle.gather_nd(yolo_boxes_scores, boxes_idx)\n # TODO: only support bs=1 now\n yolo_boxes_out = paddle.reshape(\n yolo_boxes[:, :4], shape=[1, len(boxes_idx), 4])\n yolo_scores_out = paddle.reshape(\n yolo_boxes[:, 4:5], shape=[1, 1, len(boxes_idx)])\n boxes_idx = boxes_idx[:, 1:]\n\n if self.return_idx:\n bbox_pred, bbox_num, nms_keep_idx = self.nms(\n yolo_boxes_out, yolo_scores_out, self.num_classes)\n if bbox_pred.shape[0] == 0:\n bbox_pred = self.fake_bbox_pred\n bbox_num = self.fake_bbox_num\n nms_keep_idx = self.fake_nms_keep_idx\n return boxes_idx, bbox_pred, bbox_num, nms_keep_idx\n else:\n bbox_pred, bbox_num, _ = self.nms(yolo_boxes_out, yolo_scores_out,\n self.num_classes)\n if bbox_pred.shape[0] == 0:\n bbox_pred = self.fake_bbox_pred\n bbox_num = self.fake_bbox_num\n return _, bbox_pred, bbox_num, _\n\n\n@register\nclass CenterNetPostProcess(TTFBox):\n \"\"\"\n Postprocess the model outputs to get final prediction:\n 1. Do NMS for heatmap to get top `max_per_img` bboxes.\n 2. Decode bboxes using center offset and box size.\n 3. Rescale decoded bboxes reference to the origin image shape.\n\n Args:\n max_per_img(int): the maximum number of predicted objects in a image,\n 500 by default.\n down_ratio(int): the down ratio from images to heatmap, 4 by default.\n regress_ltrb (bool): whether to regress left/top/right/bottom or\n width/height for a box, true by default.\n for_mot (bool): whether return other features used in tracking model.\n \"\"\"\n\n __shared__ = ['down_ratio', 'for_mot']\n\n def __init__(self,\n max_per_img=500,\n down_ratio=4,\n regress_ltrb=True,\n for_mot=False):\n super(TTFBox, self).__init__()\n self.max_per_img = max_per_img\n self.down_ratio = down_ratio\n self.regress_ltrb = regress_ltrb\n self.for_mot = for_mot\n\n def __call__(self, hm, wh, reg, im_shape, scale_factor):\n heat = self._simple_nms(hm)\n scores, inds, topk_clses, ys, xs = self._topk(heat)\n scores = scores.unsqueeze(1)\n clses = topk_clses.unsqueeze(1)\n\n reg_t = paddle.transpose(reg, [0, 2, 3, 1])\n # Like TTFBox, batch size is 1.\n # TODO: support batch size > 1\n reg = paddle.reshape(reg_t, [-1, reg_t.shape[-1]])\n reg = paddle.gather(reg, inds)\n xs = paddle.cast(xs, 'float32')\n ys = paddle.cast(ys, 'float32')\n xs = xs + reg[:, 0:1]\n ys = ys + reg[:, 1:2]\n\n wh_t = paddle.transpose(wh, [0, 2, 3, 1])\n wh = paddle.reshape(wh_t, [-1, wh_t.shape[-1]])\n wh = paddle.gather(wh, inds)\n\n if self.regress_ltrb:\n x1 = xs - wh[:, 0:1]\n y1 = ys - wh[:, 1:2]\n x2 = xs + wh[:, 2:3]\n y2 = ys + wh[:, 3:4]\n else:\n x1 = xs - wh[:, 0:1] / 2\n y1 = ys - wh[:, 1:2] / 2\n x2 = xs + wh[:, 0:1] / 2\n y2 = ys + wh[:, 1:2] / 2\n\n n, c, feat_h, feat_w = hm.shape[:]\n padw = (feat_w * self.down_ratio - im_shape[0, 1]) / 2\n padh = (feat_h * self.down_ratio - im_shape[0, 0]) / 2\n x1 = x1 * self.down_ratio\n y1 = y1 * self.down_ratio\n x2 = x2 * self.down_ratio\n y2 = y2 * self.down_ratio\n\n x1 = x1 - padw\n y1 = y1 - padh\n x2 = x2 - padw\n y2 = y2 - padh\n\n bboxes = paddle.concat([x1, y1, x2, y2], axis=1)\n scale_y = scale_factor[:, 0:1]\n scale_x = scale_factor[:, 1:2]\n scale_expand = paddle.concat(\n [scale_x, scale_y, scale_x, scale_y], axis=1)\n boxes_shape = bboxes.shape[:]\n scale_expand = paddle.expand(scale_expand, shape=boxes_shape)\n bboxes = paddle.divide(bboxes, scale_expand)\n if self.for_mot:\n results = paddle.concat([bboxes, scores, clses], axis=1)\n return results, inds, topk_clses\n else:\n results = paddle.concat([clses, scores, bboxes], axis=1)\n return results, paddle.shape(results)[0:1], topk_clses\n\n\n@register\nclass DETRBBoxPostProcess(object):\n __shared__ = ['num_classes', 'use_focal_loss']\n __inject__ = []\n\n def __init__(self,\n num_classes=80,\n num_top_queries=100,\n use_focal_loss=False):\n super(DETRBBoxPostProcess, self).__init__()\n self.num_classes = num_classes\n self.num_top_queries = num_top_queries\n self.use_focal_loss = use_focal_loss\n\n def __call__(self, head_out, im_shape, scale_factor):\n \"\"\"\n Decode the bbox.\n\n Args:\n head_out (tuple): bbox_pred, cls_logit and masks of bbox_head output.\n im_shape (Tensor): The shape of the input image.\n scale_factor (Tensor): The scale factor of the input image.\n Returns:\n bbox_pred (Tensor): The output prediction with shape [N, 6], including\n labels, scores and bboxes. The size of bboxes are corresponding\n to the input image, the bboxes may be used in other branch.\n bbox_num (Tensor): The number of prediction boxes of each batch with\n shape [bs], and is N.\n \"\"\"\n bboxes, logits, masks = head_out\n\n bbox_pred = bbox_cxcywh_to_xyxy(bboxes)\n origin_shape = paddle.floor(im_shape / scale_factor + 0.5)\n img_h, img_w = origin_shape.unbind(1)\n origin_shape = paddle.stack(\n [img_w, img_h, img_w, img_h], axis=-1).unsqueeze(0)\n bbox_pred *= origin_shape\n\n scores = F.sigmoid(logits) if self.use_focal_loss else F.softmax(\n logits)[:, :, :-1]\n\n if not self.use_focal_loss:\n scores, labels = scores.max(-1), scores.argmax(-1)\n if scores.shape[1] > self.num_top_queries:\n scores, index = paddle.topk(\n scores, self.num_top_queries, axis=-1)\n labels = paddle.stack(\n [paddle.gather(l, i) for l, i in zip(labels, index)])\n bbox_pred = paddle.stack(\n [paddle.gather(b, i) for b, i in zip(bbox_pred, index)])\n else:\n scores, index = paddle.topk(\n scores.reshape([logits.shape[0], -1]),\n self.num_top_queries,\n axis=-1)\n labels = index % logits.shape[2]\n index = index // logits.shape[2]\n bbox_pred = paddle.stack(\n [paddle.gather(b, i) for b, i in zip(bbox_pred, index)])\n\n bbox_pred = paddle.concat(\n [\n labels.unsqueeze(-1).astype('float32'), scores.unsqueeze(-1),\n bbox_pred\n ],\n axis=-1)\n bbox_num = paddle.to_tensor(\n bbox_pred.shape[1], dtype='int32').tile([bbox_pred.shape[0]])\n bbox_pred = bbox_pred.reshape([-1, 6])\n return bbox_pred, bbox_num\n\n\n@register\nclass SparsePostProcess(object):\n __shared__ = ['num_classes']\n\n def __init__(self, num_proposals, num_classes=80):\n super(SparsePostProcess, self).__init__()\n self.num_classes = num_classes\n self.num_proposals = num_proposals\n\n def __call__(self, box_cls, box_pred, scale_factor_wh, img_whwh):\n \"\"\"\n Arguments:\n box_cls (Tensor): tensor of shape (batch_size, num_proposals, K).\n The tensor predicts the classification probability for each proposal.\n box_pred (Tensor): tensors of shape (batch_size, num_proposals, 4).\n The tensor predicts 4-vector (x,y,w,h) box\n regression values for every proposal\n scale_factor_wh (Tensor): tensors of shape [batch_size, 2] the scalor of per img\n img_whwh (Tensor): tensors of shape [batch_size, 4]\n Returns:\n bbox_pred (Tensor): tensors of shape [num_boxes, 6] Each row has 6 values:\n [label, confidence, xmin, ymin, xmax, ymax]\n bbox_num (Tensor): tensors of shape [batch_size] the number of RoIs in each image.\n \"\"\"\n assert len(box_cls) == len(scale_factor_wh) == len(img_whwh)\n\n img_wh = img_whwh[:, :2]\n\n scores = F.sigmoid(box_cls)\n labels = paddle.arange(0, self.num_classes). \\\n unsqueeze(0).tile([self.num_proposals, 1]).flatten(start_axis=0, stop_axis=1)\n\n classes_all = []\n scores_all = []\n boxes_all = []\n for i, (scores_per_image,\n box_pred_per_image) in enumerate(zip(scores, box_pred)):\n\n scores_per_image, topk_indices = scores_per_image.flatten(\n 0, 1).topk(\n self.num_proposals, sorted=False)\n labels_per_image = paddle.gather(labels, topk_indices, axis=0)\n\n box_pred_per_image = box_pred_per_image.reshape([-1, 1, 4]).tile(\n [1, self.num_classes, 1]).reshape([-1, 4])\n box_pred_per_image = paddle.gather(\n box_pred_per_image, topk_indices, axis=0)\n\n classes_all.append(labels_per_image)\n scores_all.append(scores_per_image)\n boxes_all.append(box_pred_per_image)\n\n bbox_num = paddle.zeros([len(scale_factor_wh)], dtype=\"int32\")\n boxes_final = []\n\n for i in range(len(scale_factor_wh)):\n classes = classes_all[i]\n boxes = boxes_all[i]\n scores = scores_all[i]\n\n boxes[:, 0::2] = paddle.clip(\n boxes[:, 0::2], min=0,\n max=img_wh[i][0]) / scale_factor_wh[i][0]\n boxes[:, 1::2] = paddle.clip(\n boxes[:, 1::2], min=0,\n max=img_wh[i][1]) / scale_factor_wh[i][1]\n boxes_w, boxes_h = (boxes[:, 2] - boxes[:, 0]).numpy(), (\n boxes[:, 3] - boxes[:, 1]).numpy()\n\n keep = (boxes_w > 1.) & (boxes_h > 1.)\n\n if (keep.sum() == 0):\n bboxes = paddle.zeros([1, 6]).astype(\"float32\")\n else:\n boxes = paddle.to_tensor(boxes.numpy()[keep]).astype(\"float32\")\n classes = paddle.to_tensor(classes.numpy()[keep]).astype(\n \"float32\").unsqueeze(-1)\n scores = paddle.to_tensor(scores.numpy()[keep]).astype(\n \"float32\").unsqueeze(-1)\n\n bboxes = paddle.concat([classes, scores, boxes], axis=-1)\n\n boxes_final.append(bboxes)\n bbox_num[i] = bboxes.shape[0]\n\n bbox_pred = paddle.concat(boxes_final)\n return bbox_pred, bbox_num\n\n\ndef nms(dets, thresh):\n \"\"\"Apply classic DPM-style greedy NMS.\"\"\"\n if dets.shape[0] == 0:\n return dets[[], :]\n scores = dets[:, 0]\n x1 = dets[:, 1]\n y1 = dets[:, 2]\n x2 = dets[:, 3]\n y2 = dets[:, 4]\n\n areas = (x2 - x1 + 1) * (y2 - y1 + 1)\n order = scores.argsort()[::-1]\n\n ndets = dets.shape[0]\n suppressed = np.zeros((ndets), dtype=np.int)\n\n # nominal indices\n # _i, _j\n # sorted indices\n # i, j\n # temp variables for box i's (the box currently under consideration)\n # ix1, iy1, ix2, iy2, iarea\n\n # variables for computing overlap with box j (lower scoring box)\n # xx1, yy1, xx2, yy2\n # w, h\n # inter, ovr\n\n for _i in range(ndets):\n i = order[_i]\n if suppressed[i] == 1:\n continue\n ix1 = x1[i]\n iy1 = y1[i]\n ix2 = x2[i]\n iy2 = y2[i]\n iarea = areas[i]\n for _j in range(_i + 1, ndets):\n j = order[_j]\n if suppressed[j] == 1:\n continue\n xx1 = max(ix1, x1[j])\n yy1 = max(iy1, y1[j])\n xx2 = min(ix2, x2[j])\n yy2 = min(iy2, y2[j])\n w = max(0.0, xx2 - xx1 + 1)\n h = max(0.0, yy2 - yy1 + 1)\n inter = w * h\n ovr = inter / (iarea + areas[j] - inter)\n if ovr >= thresh:\n suppressed[j] = 1\n keep = np.where(suppressed == 0)[0]\n dets = dets[keep, :]\n return dets\n",
"#!/usr/bin/env python\n# coding: utf-8\n# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport cv2\nimport uuid\nimport json\nimport os\nimport os.path as osp\nimport shutil\nimport numpy as np\nimport PIL.Image\nfrom .base import MyEncoder, is_pic, get_encoding\nimport math\n\n\nclass X2Seg(object):\n def __init__(self):\n self.labels2ids = {'_background_': 0}\n\n def shapes_to_label(self, img_shape, shapes, label_name_to_value):\n # 该函数基于https://github.com/wkentaro/labelme/blob/master/labelme/utils/shape.py实现。\n def shape_to_mask(img_shape,\n points,\n shape_type=None,\n line_width=10,\n point_size=5):\n mask = np.zeros(img_shape[:2], dtype=np.uint8)\n mask = PIL.Image.fromarray(mask)\n draw = PIL.ImageDraw.Draw(mask)\n xy = [tuple(point) for point in points]\n if shape_type == 'circle':\n assert len(\n xy) == 2, 'Shape of shape_type=circle must have 2 points'\n (cx, cy), (px, py) = xy\n d = math.sqrt((cx - px)**2 + (cy - py)**2)\n draw.ellipse(\n [cx - d, cy - d, cx + d, cy + d], outline=1, fill=1)\n elif shape_type == 'rectangle':\n assert len(\n xy) == 2, 'Shape of shape_type=rectangle must have 2 points'\n draw.rectangle(xy, outline=1, fill=1)\n elif shape_type == 'line':\n assert len(\n xy) == 2, 'Shape of shape_type=line must have 2 points'\n draw.line(xy=xy, fill=1, width=line_width)\n elif shape_type == 'linestrip':\n draw.line(xy=xy, fill=1, width=line_width)\n elif shape_type == 'point':\n assert len(\n xy) == 1, 'Shape of shape_type=point must have 1 points'\n cx, cy = xy[0]\n r = point_size\n draw.ellipse(\n [cx - r, cy - r, cx + r, cy + r], outline=1, fill=1)\n else:\n assert len(xy) > 2, 'Polygon must have points more than 2'\n draw.polygon(xy=xy, outline=1, fill=1)\n mask = np.array(mask, dtype=bool)\n return mask\n\n cls = np.zeros(img_shape[:2], dtype=np.int32)\n ins = np.zeros_like(cls)\n instances = []\n for shape in shapes:\n points = shape['points']\n label = shape['label']\n group_id = shape.get('group_id')\n if group_id is None:\n group_id = uuid.uuid1()\n shape_type = shape.get('shape_type', None)\n\n cls_name = label\n instance = (cls_name, group_id)\n\n if instance not in instances:\n instances.append(instance)\n ins_id = instances.index(instance) + 1\n cls_id = label_name_to_value[cls_name]\n mask = shape_to_mask(img_shape[:2], points, shape_type)\n cls[mask] = cls_id\n ins[mask] = ins_id\n return cls, ins\n\n def get_color_map_list(self, num_classes):\n color_map = num_classes * [0, 0, 0]\n for i in range(0, num_classes):\n j = 0\n lab = i\n while lab:\n color_map[i * 3] |= (((lab >> 0) & 1) << (7 - j))\n color_map[i * 3 + 1] |= (((lab >> 1) & 1) << (7 - j))\n color_map[i * 3 + 2] |= (((lab >> 2) & 1) << (7 - j))\n j += 1\n lab >>= 3\n return color_map\n\n def convert(self, image_dir, json_dir, dataset_save_dir):\n \"\"\"转换。\n Args:\n image_dir (str): 图像文件存放的路径。\n json_dir (str): 与每张图像对应的json文件的存放路径。\n dataset_save_dir (str): 转换后数据集存放路径。\n \"\"\"\n assert osp.exists(image_dir), \"The image folder does not exist!\"\n assert osp.exists(json_dir), \"The json folder does not exist!\"\n if not osp.exists(dataset_save_dir):\n os.makedirs(dataset_save_dir)\n # Convert the image files.\n new_image_dir = osp.join(dataset_save_dir, \"JPEGImages\")\n if osp.exists(new_image_dir):\n raise Exception(\n \"The directory {} is already exist, please remove the directory first\".\n format(new_image_dir))\n os.makedirs(new_image_dir)\n for img_name in os.listdir(image_dir):\n if is_pic(img_name):\n shutil.copyfile(\n osp.join(image_dir, img_name),\n osp.join(new_image_dir, img_name))\n # Convert the json files.\n png_dir = osp.join(dataset_save_dir, \"Annotations\")\n if osp.exists(png_dir):\n shutil.rmtree(png_dir)\n os.makedirs(png_dir)\n self.get_labels2ids(new_image_dir, json_dir)\n self.json2png(new_image_dir, json_dir, png_dir)\n # Generate the labels.txt\n ids2labels = {v: k for k, v in self.labels2ids.items()}\n with open(osp.join(dataset_save_dir, 'labels.txt'), 'w') as fw:\n for i in range(len(ids2labels)):\n fw.write(ids2labels[i] + '\\n')\n\n\nclass JingLing2Seg(X2Seg):\n \"\"\"将使用标注精灵标注的数据集转换为Seg数据集。\n \"\"\"\n\n def __init__(self):\n super(JingLing2Seg, self).__init__()\n\n def get_labels2ids(self, image_dir, json_dir):\n for img_name in os.listdir(image_dir):\n img_name_part = osp.splitext(img_name)[0]\n json_file = osp.join(json_dir, img_name_part + \".json\")\n if not osp.exists(json_file):\n os.remove(osp.join(image_dir, img_name))\n continue\n with open(json_file, mode=\"r\", \\\n encoding=get_encoding(json_file)) as j:\n json_info = json.load(j)\n if 'outputs' in json_info:\n for output in json_info['outputs']['object']:\n cls_name = output['name']\n if cls_name not in self.labels2ids:\n self.labels2ids[cls_name] = len(self.labels2ids)\n\n def json2png(self, image_dir, json_dir, png_dir):\n color_map = self.get_color_map_list(256)\n for img_name in os.listdir(image_dir):\n img_name_part = osp.splitext(img_name)[0]\n json_file = osp.join(json_dir, img_name_part + \".json\")\n if not osp.exists(json_file):\n os.remove(osp.join(image_dir, img_name))\n continue\n with open(json_file, mode=\"r\", \\\n encoding=get_encoding(json_file)) as j:\n json_info = json.load(j)\n data_shapes = []\n if 'outputs' in json_info:\n for output in json_info['outputs']['object']:\n if 'polygon' in output.keys():\n polygon = output['polygon']\n name = output['name']\n points = []\n for i in range(1, int(len(polygon) / 2) + 1):\n points.append([\n polygon['x' + str(i)], polygon['y' + str(\n i)]\n ])\n shape = {\n 'label': name,\n 'points': points,\n 'shape_type': 'polygon'\n }\n data_shapes.append(shape)\n if 'size' not in json_info:\n continue\n img_shape = (json_info['size']['height'],\n json_info['size']['width'],\n json_info['size']['depth'])\n lbl, _ = self.shapes_to_label(\n img_shape=img_shape,\n shapes=data_shapes,\n label_name_to_value=self.labels2ids, )\n out_png_file = osp.join(png_dir, img_name_part + '.png')\n if lbl.min() >= 0 and lbl.max() <= 255:\n lbl_pil = PIL.Image.fromarray(lbl.astype(np.uint8), mode='P')\n lbl_pil.putpalette(color_map)\n lbl_pil.save(out_png_file)\n else:\n raise ValueError(\n '[%s] Cannot save the pixel-wise class label as PNG. '\n 'Please consider using the .npy format.' % out_png_file)\n\n\nclass LabelMe2Seg(X2Seg):\n \"\"\"将使用LabelMe标注的数据集转换为Seg数据集。\n \"\"\"\n\n def __init__(self):\n super(LabelMe2Seg, self).__init__()\n\n def get_labels2ids(self, image_dir, json_dir):\n for img_name in os.listdir(image_dir):\n img_name_part = osp.splitext(img_name)[0]\n json_file = osp.join(json_dir, img_name_part + \".json\")\n if not osp.exists(json_file):\n os.remove(osp.join(image_dir, img_name))\n continue\n with open(json_file, mode=\"r\", \\\n encoding=get_encoding(json_file)) as j:\n json_info = json.load(j)\n for shape in json_info['shapes']:\n cls_name = shape['label']\n if cls_name not in self.labels2ids:\n self.labels2ids[cls_name] = len(self.labels2ids)\n\n def json2png(self, image_dir, json_dir, png_dir):\n color_map = self.get_color_map_list(256)\n for img_name in os.listdir(image_dir):\n img_name_part = osp.splitext(img_name)[0]\n json_file = osp.join(json_dir, img_name_part + \".json\")\n if not osp.exists(json_file):\n os.remove(osp.join(image_dir, img_name))\n continue\n img_file = osp.join(image_dir, img_name)\n img = np.asarray(PIL.Image.open(img_file))\n with open(json_file, mode=\"r\", \\\n encoding=get_encoding(json_file)) as j:\n json_info = json.load(j)\n lbl, _ = self.shapes_to_label(\n img_shape=img.shape,\n shapes=json_info['shapes'],\n label_name_to_value=self.labels2ids, )\n out_png_file = osp.join(png_dir, img_name_part + '.png')\n if lbl.min() >= 0 and lbl.max() <= 255:\n lbl_pil = PIL.Image.fromarray(lbl.astype(np.uint8), mode='P')\n lbl_pil.putpalette(color_map)\n lbl_pil.save(out_png_file)\n else:\n raise ValueError(\n '[%s] Cannot save the pixel-wise class label as PNG. '\n 'Please consider using the .npy format.' % out_png_file)\n\n\nclass EasyData2Seg(X2Seg):\n \"\"\"将使用EasyData标注的分割数据集转换为Seg数据集。\n \"\"\"\n\n def __init__(self):\n super(EasyData2Seg, self).__init__()\n\n def get_labels2ids(self, image_dir, json_dir):\n for img_name in os.listdir(image_dir):\n img_name_part = osp.splitext(img_name)[0]\n json_file = osp.join(json_dir, img_name_part + \".json\")\n if not osp.exists(json_file):\n os.remove(osp.join(image_dir, img_name))\n continue\n with open(json_file, mode=\"r\", \\\n encoding=get_encoding(json_file)) as j:\n json_info = json.load(j)\n for shape in json_info[\"labels\"]:\n cls_name = shape['name']\n if cls_name not in self.labels2ids:\n self.labels2ids[cls_name] = len(self.labels2ids)\n\n def mask2polygon(self, mask, label):\n contours, hierarchy = cv2.findContours(\n (mask).astype(np.uint8), cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)\n segmentation = []\n for contour in contours:\n contour_list = contour.flatten().tolist()\n if len(contour_list) > 4:\n points = []\n for i in range(0, len(contour_list), 2):\n points.append([contour_list[i], contour_list[i + 1]])\n shape = {\n 'label': label,\n 'points': points,\n 'shape_type': 'polygon'\n }\n segmentation.append(shape)\n return segmentation\n\n def json2png(self, image_dir, json_dir, png_dir):\n from pycocotools.mask import decode\n color_map = self.get_color_map_list(256)\n for img_name in os.listdir(image_dir):\n img_name_part = osp.splitext(img_name)[0]\n json_file = osp.join(json_dir, img_name_part + \".json\")\n if not osp.exists(json_file):\n os.remove(osp.join(image_dir, img_name))\n continue\n img_file = osp.join(image_dir, img_name)\n img = np.asarray(PIL.Image.open(img_file))\n img_h = img.shape[0]\n img_w = img.shape[1]\n with open(json_file, mode=\"r\", \\\n encoding=get_encoding(json_file)) as j:\n json_info = json.load(j)\n data_shapes = []\n for shape in json_info['labels']:\n mask_dict = {}\n mask_dict['size'] = [img_h, img_w]\n mask_dict['counts'] = shape['mask'].encode()\n mask = decode(mask_dict)\n polygon = self.mask2polygon(mask, shape[\"name\"])\n data_shapes.extend(polygon)\n lbl, _ = self.shapes_to_label(\n img_shape=img.shape,\n shapes=data_shapes,\n label_name_to_value=self.labels2ids, )\n out_png_file = osp.join(png_dir, img_name_part + '.png')\n if lbl.min() >= 0 and lbl.max() <= 255:\n lbl_pil = PIL.Image.fromarray(lbl.astype(np.uint8), mode='P')\n lbl_pil.putpalette(color_map)\n lbl_pil.save(out_png_file)\n else:\n raise ValueError(\n '[%s] Cannot save the pixel-wise class label as PNG. '\n 'Please consider using the .npy format.' % out_png_file)\n"
] | [
[
"numpy.cumsum"
],
[
"numpy.ones_like",
"numpy.asarray",
"scipy.sparse.csr_matrix",
"numpy.transpose",
"numpy.array",
"numpy.zeros"
],
[
"numpy.ascontiguousarray",
"numpy.asarray"
],
[
"numpy.array",
"numpy.zeros",
"numpy.where"
],
[
"numpy.array",
"numpy.zeros",
"numpy.zeros_like"
]
] |
lucasmtz/ACAR-Net | [
"08a224625f04bbf595baaeb1c79ec491642e0059"
] | [
"models/heads/linear.py"
] | [
"import torch\nimport torch.nn as nn\nimport torchvision\n\n__all__ = [\"linear\"]\n\n\nclass LinearHead(nn.Module):\n def __init__(self, width, roi_spatial=7, num_classes=60, dropout=0.0, bias=False):\n super().__init__()\n\n self.roi_spatial = roi_spatial\n self.roi_maxpool = nn.MaxPool2d(roi_spatial)\n\n self.fc = nn.Linear(width, num_classes, bias=bias)\n\n if dropout > 0:\n self.dp = nn.Dropout(dropout)\n else:\n self.dp = None\n\n # data: features, rois\n # returns: outputs\n def forward(self, data):\n if not isinstance(data[\"features\"], list):\n features = [data[\"features\"]]\n else:\n features = data[\"features\"]\n\n roi_features = []\n for f in features:\n sp = f.shape\n h, w = sp[3:]\n feats = nn.AdaptiveAvgPool3d((1, h, w))(f).view(-1, sp[1], h, w)\n\n rois = data[\"rois\"].clone()\n rois[:, 1] = rois[:, 1] * w\n rois[:, 2] = rois[:, 2] * h\n rois[:, 3] = rois[:, 3] * w\n rois[:, 4] = rois[:, 4] * h\n rois = rois.detach()\n roi_feats = torchvision.ops.roi_align(feats, rois, (self.roi_spatial, self.roi_spatial))\n roi_feats = self.roi_maxpool(roi_feats).view(-1, sp[1])\n\n roi_features.append(roi_feats)\n\n roi_features = torch.cat(roi_features, dim=1)\n if self.dp is not None:\n roi_features = self.dp(roi_features)\n outputs = self.fc(roi_features)\n\n return {\"outputs\": outputs}\n\n\ndef linear(**kwargs):\n model = LinearHead(**kwargs)\n return model\n"
] | [
[
"torch.nn.Dropout",
"torch.cat",
"torch.nn.AdaptiveAvgPool3d",
"torch.nn.Linear",
"torch.nn.MaxPool2d"
]
] |
873040/Abhishek | [
"2ddd716e66bc5cc6e6f0787508dd07da0e02e75a",
"2ddd716e66bc5cc6e6f0787508dd07da0e02e75a",
"2ddd716e66bc5cc6e6f0787508dd07da0e02e75a",
"2ddd716e66bc5cc6e6f0787508dd07da0e02e75a",
"2ddd716e66bc5cc6e6f0787508dd07da0e02e75a",
"2ddd716e66bc5cc6e6f0787508dd07da0e02e75a",
"2ddd716e66bc5cc6e6f0787508dd07da0e02e75a",
"2ddd716e66bc5cc6e6f0787508dd07da0e02e75a",
"2ddd716e66bc5cc6e6f0787508dd07da0e02e75a",
"2ddd716e66bc5cc6e6f0787508dd07da0e02e75a",
"2ddd716e66bc5cc6e6f0787508dd07da0e02e75a",
"2ddd716e66bc5cc6e6f0787508dd07da0e02e75a",
"2ddd716e66bc5cc6e6f0787508dd07da0e02e75a",
"2ddd716e66bc5cc6e6f0787508dd07da0e02e75a",
"2ddd716e66bc5cc6e6f0787508dd07da0e02e75a",
"2ddd716e66bc5cc6e6f0787508dd07da0e02e75a",
"2ddd716e66bc5cc6e6f0787508dd07da0e02e75a",
"2ddd716e66bc5cc6e6f0787508dd07da0e02e75a",
"2ddd716e66bc5cc6e6f0787508dd07da0e02e75a",
"2ddd716e66bc5cc6e6f0787508dd07da0e02e75a",
"2ddd716e66bc5cc6e6f0787508dd07da0e02e75a",
"2ddd716e66bc5cc6e6f0787508dd07da0e02e75a",
"2ddd716e66bc5cc6e6f0787508dd07da0e02e75a",
"2ddd716e66bc5cc6e6f0787508dd07da0e02e75a",
"2ddd716e66bc5cc6e6f0787508dd07da0e02e75a",
"2ddd716e66bc5cc6e6f0787508dd07da0e02e75a",
"2ddd716e66bc5cc6e6f0787508dd07da0e02e75a",
"2ddd716e66bc5cc6e6f0787508dd07da0e02e75a",
"2ddd716e66bc5cc6e6f0787508dd07da0e02e75a",
"2ddd716e66bc5cc6e6f0787508dd07da0e02e75a",
"2ddd716e66bc5cc6e6f0787508dd07da0e02e75a",
"2ddd716e66bc5cc6e6f0787508dd07da0e02e75a",
"2ddd716e66bc5cc6e6f0787508dd07da0e02e75a",
"2ddd716e66bc5cc6e6f0787508dd07da0e02e75a",
"2ddd716e66bc5cc6e6f0787508dd07da0e02e75a"
] | [
"research/delf/delf/python/examples/detector.py",
"official/nlp/transformer/transformer_main.py",
"official/nlp/transformer/utils/metrics.py",
"research/struct2depth/util.py",
"research/slim/datasets/download_and_convert_flowers.py",
"research/object_detection/models/faster_rcnn_inception_resnet_v2_feature_extractor.py",
"official/vision/detection/executor/detection_executor.py",
"official/nlp/xlnet/run_classifier.py",
"research/skip_thoughts/skip_thoughts/skip_thoughts_encoder.py",
"official/benchmark/bert_benchmark.py",
"official/nlp/data/create_pretraining_data.py",
"research/maskgan/model_utils/variable_mapping.py",
"research/compression/entropy_coder/lib/blocks_lstm_test.py",
"research/object_detection/predictors/mask_rcnn_keras_box_predictor_test.py",
"research/brain_coder/single_task/ga_train_test.py",
"research/cognitive_mapping_and_planning/scripts/script_plot_trajectory.py",
"research/fivo/experimental/data.py",
"research/object_detection/models/ssd_mobilenet_v2_fpn_feature_extractor.py",
"research/fivo/fivo/bounds.py",
"research/slim/nets/inception_v2_test.py",
"research/object_detection/utils/label_map_util_test.py",
"research/pcl_rl/replay_buffer.py",
"research/ptn/nets/perspective_transform.py",
"research/object_detection/core/keypoint_ops_test.py",
"research/object_detection/predictors/heads/keras_class_head_test.py",
"research/rebar/download_data.py",
"research/rebar/rebar.py",
"research/deep_speech/deep_speech.py",
"research/fivo/run_fivo.py",
"research/vid2depth/ops/icp_grad.py",
"research/domain_adaptation/pixel_domain_adaptation/pixelda_task_towers.py",
"research/attention_ocr/python/common_flags.py",
"research/object_detection/builders/optimizer_builder.py",
"research/struct2depth/nets.py",
"research/object_detection/models/ssd_mobilenet_v2_keras_feature_extractor.py"
] | [
"# Copyright 2019 The TensorFlow Authors All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Module to construct object detector function.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport tensorflow as tf\n\n\ndef MakeDetector(sess, model_dir, import_scope=None):\n \"\"\"Creates a function to detect objects in an image.\n\n Args:\n sess: TensorFlow session to use.\n model_dir: Directory where SavedModel is located.\n import_scope: Optional scope to use for model.\n\n Returns:\n Function that receives an image and returns detection results.\n \"\"\"\n tf.saved_model.loader.load(\n sess, [tf.saved_model.tag_constants.SERVING],\n model_dir,\n import_scope=import_scope)\n import_scope_prefix = import_scope + '/' if import_scope is not None else ''\n input_images = sess.graph.get_tensor_by_name('%sinput_images:0' %\n import_scope_prefix)\n boxes = sess.graph.get_tensor_by_name('%sdetection_boxes:0' %\n import_scope_prefix)\n scores = sess.graph.get_tensor_by_name('%sdetection_scores:0' %\n import_scope_prefix)\n class_indices = sess.graph.get_tensor_by_name('%sdetection_classes:0' %\n import_scope_prefix)\n\n def DetectorFn(images):\n \"\"\"Receives an image and returns detected boxes.\n\n Args:\n images: Uint8 array with shape (batch, height, width 3) containing a batch\n of RGB images.\n\n Returns:\n Tuple (boxes, scores, class_indices).\n \"\"\"\n return sess.run([boxes, scores, class_indices],\n feed_dict={input_images: images})\n\n return DetectorFn\n",
"# Copyright 2018 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Train and evaluate the Transformer model.\n\nSee README for description of setting the training schedule and evaluating the\nBLEU score.\n\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport os\nimport tempfile\n\nfrom absl import app\nfrom absl import flags\nfrom absl import logging\nimport tensorflow as tf\n\nfrom official.modeling import performance\nfrom official.nlp.transformer import compute_bleu\nfrom official.nlp.transformer import data_pipeline\nfrom official.nlp.transformer import metrics\nfrom official.nlp.transformer import misc\nfrom official.nlp.transformer import optimizer\nfrom official.nlp.transformer import transformer\nfrom official.nlp.transformer import translate\nfrom official.nlp.transformer.utils import tokenizer\nfrom official.utils.flags import core as flags_core\nfrom official.utils.logs import logger\nfrom official.utils.misc import distribution_utils\nfrom official.utils.misc import keras_utils\n\nINF = int(1e9)\nBLEU_DIR = \"bleu\"\n_SINGLE_SAMPLE = 1\n\n\ndef translate_and_compute_bleu(model,\n params,\n subtokenizer,\n bleu_source,\n bleu_ref,\n distribution_strategy=None):\n \"\"\"Translate file and report the cased and uncased bleu scores.\n\n Args:\n model: A Keras model, used to generate the translations.\n params: A dictionary, containing the translation related parameters.\n subtokenizer: A subtokenizer object, used for encoding and decoding source\n and translated lines.\n bleu_source: A file containing source sentences for translation.\n bleu_ref: A file containing the reference for the translated sentences.\n distribution_strategy: A platform distribution strategy, used for TPU based\n translation.\n\n Returns:\n uncased_score: A float, the case insensitive BLEU score.\n cased_score: A float, the case sensitive BLEU score.\n \"\"\"\n # Create temporary file to store translation.\n tmp = tempfile.NamedTemporaryFile(delete=False)\n tmp_filename = tmp.name\n\n translate.translate_file(\n model,\n params,\n subtokenizer,\n bleu_source,\n output_file=tmp_filename,\n print_all_translations=False,\n distribution_strategy=distribution_strategy)\n\n # Compute uncased and cased bleu scores.\n uncased_score = compute_bleu.bleu_wrapper(bleu_ref, tmp_filename, False)\n cased_score = compute_bleu.bleu_wrapper(bleu_ref, tmp_filename, True)\n os.remove(tmp_filename)\n return uncased_score, cased_score\n\n\ndef evaluate_and_log_bleu(model,\n params,\n bleu_source,\n bleu_ref,\n vocab_file,\n distribution_strategy=None):\n \"\"\"Calculate and record the BLEU score.\n\n Args:\n model: A Keras model, used to generate the translations.\n params: A dictionary, containing the translation related parameters.\n bleu_source: A file containing source sentences for translation.\n bleu_ref: A file containing the reference for the translated sentences.\n vocab_file: A file containing the vocabulary for translation.\n distribution_strategy: A platform distribution strategy, used for TPU based\n translation.\n\n Returns:\n uncased_score: A float, the case insensitive BLEU score.\n cased_score: A float, the case sensitive BLEU score.\n \"\"\"\n subtokenizer = tokenizer.Subtokenizer(vocab_file)\n\n uncased_score, cased_score = translate_and_compute_bleu(\n model, params, subtokenizer, bleu_source, bleu_ref, distribution_strategy)\n\n logging.info(\"Bleu score (uncased): %s\", uncased_score)\n logging.info(\"Bleu score (cased): %s\", cased_score)\n return uncased_score, cased_score\n\n\nclass TransformerTask(object):\n \"\"\"Main entry of Transformer model.\"\"\"\n\n def __init__(self, flags_obj):\n \"\"\"Init function of TransformerMain.\n\n Args:\n flags_obj: Object containing parsed flag values, i.e., FLAGS.\n\n Raises:\n ValueError: if not using static batch for input data on TPU.\n \"\"\"\n self.flags_obj = flags_obj\n self.predict_model = None\n\n # Add flag-defined parameters to params object\n num_gpus = flags_core.get_num_gpus(flags_obj)\n self.params = params = misc.get_model_params(flags_obj.param_set, num_gpus)\n\n params[\"num_gpus\"] = num_gpus\n params[\"use_ctl\"] = flags_obj.use_ctl\n params[\"data_dir\"] = flags_obj.data_dir\n params[\"model_dir\"] = flags_obj.model_dir\n params[\"static_batch\"] = flags_obj.static_batch\n params[\"max_length\"] = flags_obj.max_length\n params[\"decode_batch_size\"] = flags_obj.decode_batch_size\n params[\"decode_max_length\"] = flags_obj.decode_max_length\n params[\"padded_decode\"] = flags_obj.padded_decode\n params[\"num_parallel_calls\"] = (\n flags_obj.num_parallel_calls or tf.data.experimental.AUTOTUNE)\n\n params[\"use_synthetic_data\"] = flags_obj.use_synthetic_data\n params[\"batch_size\"] = flags_obj.batch_size or params[\"default_batch_size\"]\n params[\"repeat_dataset\"] = None\n params[\"dtype\"] = flags_core.get_tf_dtype(flags_obj)\n params[\"enable_tensorboard\"] = flags_obj.enable_tensorboard\n params[\"enable_metrics_in_training\"] = flags_obj.enable_metrics_in_training\n params[\"steps_between_evals\"] = flags_obj.steps_between_evals\n params[\"enable_checkpointing\"] = flags_obj.enable_checkpointing\n\n self.distribution_strategy = distribution_utils.get_distribution_strategy(\n distribution_strategy=flags_obj.distribution_strategy,\n num_gpus=num_gpus,\n all_reduce_alg=flags_obj.all_reduce_alg,\n num_packs=flags_obj.num_packs,\n tpu_address=flags_obj.tpu or \"\")\n if self.use_tpu:\n params[\"num_replicas\"] = self.distribution_strategy.num_replicas_in_sync\n if not params[\"static_batch\"]:\n raise ValueError(\"TPU requires static batch for input data.\")\n else:\n logging.info(\"Running transformer with num_gpus = %d\", num_gpus)\n\n if self.distribution_strategy:\n logging.info(\"For training, using distribution strategy: %s\",\n self.distribution_strategy)\n else:\n logging.info(\"Not using any distribution strategy.\")\n\n performance.set_mixed_precision_policy(\n params[\"dtype\"],\n flags_core.get_loss_scale(flags_obj, default_for_fp16=\"dynamic\"))\n\n @property\n def use_tpu(self):\n if self.distribution_strategy:\n return isinstance(self.distribution_strategy,\n tf.distribute.experimental.TPUStrategy)\n return False\n\n def train(self):\n \"\"\"Trains the model.\"\"\"\n params = self.params\n flags_obj = self.flags_obj\n # Sets config options.\n keras_utils.set_session_config(enable_xla=flags_obj.enable_xla)\n\n _ensure_dir(flags_obj.model_dir)\n with distribution_utils.get_strategy_scope(self.distribution_strategy):\n model = transformer.create_model(params, is_train=True)\n opt = self._create_optimizer()\n\n current_step = 0\n checkpoint = tf.train.Checkpoint(model=model, optimizer=opt)\n latest_checkpoint = tf.train.latest_checkpoint(flags_obj.model_dir)\n if latest_checkpoint:\n checkpoint.restore(latest_checkpoint)\n logging.info(\"Loaded checkpoint %s\", latest_checkpoint)\n current_step = opt.iterations.numpy()\n\n if params[\"use_ctl\"]:\n train_loss_metric = tf.keras.metrics.Mean(\n \"training_loss\", dtype=tf.float32)\n if params[\"enable_tensorboard\"]:\n summary_writer = tf.compat.v2.summary.create_file_writer(\n flags_obj.model_dir)\n else:\n summary_writer = tf.compat.v2.summary.create_noop_writer()\n train_metrics = [train_loss_metric]\n if params[\"enable_metrics_in_training\"]:\n train_metrics = train_metrics + model.metrics\n else:\n model.compile(opt)\n\n model.summary()\n\n if self.use_tpu:\n # Different from experimental_distribute_dataset,\n # experimental_distribute_datasets_from_function requires\n # per-replica/local batch size.\n params[\"batch_size\"] /= self.distribution_strategy.num_replicas_in_sync\n train_ds = (\n self.distribution_strategy\n .experimental_distribute_datasets_from_function(\n lambda ctx: data_pipeline.train_input_fn(params, ctx)))\n else:\n train_ds = data_pipeline.train_input_fn(params)\n map_data_fn = data_pipeline.map_data_for_transformer_fn\n train_ds = train_ds.map(\n map_data_fn, num_parallel_calls=params[\"num_parallel_calls\"])\n if params[\"use_ctl\"]:\n train_ds_iterator = iter(train_ds)\n\n callbacks = self._create_callbacks(flags_obj.model_dir, 0, params)\n\n # Only TimeHistory callback is supported for CTL\n if params[\"use_ctl\"]:\n callbacks = [cb for cb in callbacks\n if isinstance(cb, keras_utils.TimeHistory)]\n\n # TODO(b/139418525): Refactor the custom training loop logic.\n @tf.function\n def train_steps(iterator, steps):\n \"\"\"Training steps function for TPU runs.\n\n Args:\n iterator: The input iterator of the training dataset.\n steps: An integer, the number of training steps.\n\n Returns:\n A float, the loss value.\n \"\"\"\n\n def _step_fn(inputs):\n \"\"\"Per-replica step function.\"\"\"\n inputs, targets = inputs\n with tf.GradientTape() as tape:\n logits = model([inputs, targets], training=True)\n loss = metrics.transformer_loss(logits, targets,\n params[\"label_smoothing\"],\n params[\"vocab_size\"])\n # Scales the loss, which results in using the average loss across all\n # of the replicas for backprop.\n scaled_loss = loss / self.distribution_strategy.num_replicas_in_sync\n\n # De-dupes variables due to keras tracking issues.\n tvars = list({id(v): v for v in model.trainable_variables}.values())\n grads = tape.gradient(scaled_loss, tvars)\n opt.apply_gradients(zip(grads, tvars))\n # For reporting, the metric takes the mean of losses.\n train_loss_metric.update_state(loss)\n\n for _ in tf.range(steps):\n train_loss_metric.reset_states()\n self.distribution_strategy.run(\n _step_fn, args=(next(iterator),))\n\n cased_score, uncased_score = None, None\n cased_score_history, uncased_score_history = [], []\n while current_step < flags_obj.train_steps:\n remaining_steps = flags_obj.train_steps - current_step\n train_steps_per_eval = (\n remaining_steps if remaining_steps < flags_obj.steps_between_evals\n else flags_obj.steps_between_evals)\n current_iteration = current_step // flags_obj.steps_between_evals\n\n logging.info(\n \"Start train iteration at global step:{}\".format(current_step))\n history = None\n if params[\"use_ctl\"]:\n if not self.use_tpu:\n raise NotImplementedError(\n \"Custom training loop on GPUs is not implemented.\")\n\n # Runs training steps.\n with summary_writer.as_default():\n for cb in callbacks:\n cb.on_epoch_begin(current_iteration)\n cb.on_batch_begin(0)\n\n train_steps(\n train_ds_iterator,\n tf.convert_to_tensor(train_steps_per_eval, dtype=tf.int32))\n current_step += train_steps_per_eval\n train_loss = train_loss_metric.result().numpy().astype(float)\n logging.info(\"Train Step: %d/%d / loss = %s\", current_step,\n flags_obj.train_steps, train_loss)\n\n for cb in callbacks:\n cb.on_batch_end(train_steps_per_eval - 1)\n cb.on_epoch_end(current_iteration)\n\n if params[\"enable_tensorboard\"]:\n for metric_obj in train_metrics:\n tf.compat.v2.summary.scalar(metric_obj.name, metric_obj.result(),\n current_step)\n summary_writer.flush()\n\n for cb in callbacks:\n cb.on_train_end()\n\n if flags_obj.enable_checkpointing:\n # avoid check-pointing when running for benchmarking.\n checkpoint_name = checkpoint.save(\n os.path.join(flags_obj.model_dir,\n \"ctl_step_{}.ckpt\".format(current_step)))\n logging.info(\"Saved checkpoint to %s\", checkpoint_name)\n else:\n if self.use_tpu:\n raise NotImplementedError(\n \"Keras model.fit on TPUs is not implemented.\")\n history = model.fit(\n train_ds,\n initial_epoch=current_iteration,\n epochs=current_iteration + 1,\n steps_per_epoch=train_steps_per_eval,\n callbacks=callbacks,\n # If TimeHistory is enabled, progress bar would be messy. Increase\n # the verbose level to get rid of it.\n verbose=(2 if flags_obj.enable_time_history else 1))\n current_step += train_steps_per_eval\n logging.info(\"Train history: {}\".format(history.history))\n\n logging.info(\"End train iteration at global step:{}\".format(current_step))\n\n if (flags_obj.bleu_source and flags_obj.bleu_ref):\n uncased_score, cased_score = self.eval()\n cased_score_history.append([current_iteration + 1, cased_score])\n uncased_score_history.append([current_iteration + 1, uncased_score])\n\n stats = ({\n \"loss\": train_loss\n } if history is None else misc.build_stats(history, callbacks))\n if uncased_score and cased_score:\n stats[\"bleu_uncased\"] = uncased_score\n stats[\"bleu_cased\"] = cased_score\n stats[\"bleu_uncased_history\"] = uncased_score_history\n stats[\"bleu_cased_history\"] = cased_score_history\n return stats\n\n def eval(self):\n \"\"\"Evaluates the model.\"\"\"\n distribution_strategy = self.distribution_strategy if self.use_tpu else None\n\n # We only want to create the model under DS scope for TPU case.\n # When 'distribution_strategy' is None, a no-op DummyContextManager will\n # be used.\n with distribution_utils.get_strategy_scope(distribution_strategy):\n if not self.predict_model:\n self.predict_model = transformer.create_model(self.params, False)\n self._load_weights_if_possible(\n self.predict_model,\n tf.train.latest_checkpoint(self.flags_obj.model_dir))\n self.predict_model.summary()\n return evaluate_and_log_bleu(\n self.predict_model, self.params, self.flags_obj.bleu_source,\n self.flags_obj.bleu_ref, self.flags_obj.vocab_file,\n distribution_strategy)\n\n def predict(self):\n \"\"\"Predicts result from the model.\"\"\"\n params = self.params\n flags_obj = self.flags_obj\n\n with tf.name_scope(\"model\"):\n model = transformer.create_model(params, is_train=False)\n self._load_weights_if_possible(\n model, tf.train.latest_checkpoint(self.flags_obj.model_dir))\n model.summary()\n subtokenizer = tokenizer.Subtokenizer(flags_obj.vocab_file)\n\n ds = data_pipeline.eval_input_fn(params)\n ds = ds.map(lambda x, y: x).take(_SINGLE_SAMPLE)\n ret = model.predict(ds)\n val_outputs, _ = ret\n length = len(val_outputs)\n for i in range(length):\n translate.translate_from_input(val_outputs[i], subtokenizer)\n\n def _create_callbacks(self, cur_log_dir, init_steps, params):\n \"\"\"Creates a list of callbacks.\"\"\"\n sfunc = optimizer.LearningRateFn(params[\"learning_rate\"],\n params[\"hidden_size\"],\n params[\"learning_rate_warmup_steps\"])\n scheduler_callback = optimizer.LearningRateScheduler(sfunc, init_steps)\n callbacks = misc.get_callbacks(params[\"steps_between_evals\"])\n callbacks.append(scheduler_callback)\n if params[\"enable_checkpointing\"]:\n ckpt_full_path = os.path.join(cur_log_dir, \"cp-{epoch:04d}.ckpt\")\n callbacks.append(\n tf.keras.callbacks.ModelCheckpoint(\n ckpt_full_path, save_weights_only=True))\n return callbacks\n\n def _load_weights_if_possible(self, model, init_weight_path=None):\n \"\"\"Loads model weights when it is provided.\"\"\"\n if init_weight_path:\n logging.info(\"Load weights: {}\".format(init_weight_path))\n # TODO(b/139414977): Having the same variable restoring method for both\n # TPU and GPU.\n if self.use_tpu:\n checkpoint = tf.train.Checkpoint(\n model=model, optimizer=self._create_optimizer())\n checkpoint.restore(init_weight_path)\n else:\n model.load_weights(init_weight_path)\n else:\n logging.info(\"Weights not loaded from path:{}\".format(init_weight_path))\n\n def _create_optimizer(self):\n \"\"\"Creates optimizer.\"\"\"\n params = self.params\n lr_schedule = optimizer.LearningRateSchedule(\n params[\"learning_rate\"], params[\"hidden_size\"],\n params[\"learning_rate_warmup_steps\"])\n opt = tf.keras.optimizers.Adam(\n lr_schedule if self.use_tpu else params[\"learning_rate\"],\n params[\"optimizer_adam_beta1\"],\n params[\"optimizer_adam_beta2\"],\n epsilon=params[\"optimizer_adam_epsilon\"])\n\n opt = performance.configure_optimizer(\n opt,\n use_float16=params[\"dtype\"] == tf.float16,\n use_graph_rewrite=self.flags_obj.fp16_implementation == \"graph_rewrite\",\n loss_scale=flags_core.get_loss_scale(\n self.flags_obj, default_for_fp16=\"dynamic\"))\n\n return opt\n\n\ndef _ensure_dir(log_dir):\n \"\"\"Makes log dir if not existed.\"\"\"\n if not tf.io.gfile.exists(log_dir):\n tf.io.gfile.makedirs(log_dir)\n\n\ndef main(_):\n flags_obj = flags.FLAGS\n with logger.benchmark_context(flags_obj):\n task = TransformerTask(flags_obj)\n\n # Execute flag override logic for better model performance\n if flags_obj.tf_gpu_thread_mode:\n keras_utils.set_gpu_thread_mode_and_count(\n per_gpu_thread_count=flags_obj.per_gpu_thread_count,\n gpu_thread_mode=flags_obj.tf_gpu_thread_mode,\n num_gpus=flags_obj.num_gpus,\n datasets_num_private_threads=flags_obj.datasets_num_private_threads)\n\n if flags_obj.mode == \"train\":\n task.train()\n elif flags_obj.mode == \"predict\":\n task.predict()\n elif flags_obj.mode == \"eval\":\n task.eval()\n else:\n raise ValueError(\"Invalid mode {}\".format(flags_obj.mode))\n\n\nif __name__ == \"__main__\":\n logging.set_verbosity(logging.INFO)\n misc.define_transformer_flags()\n app.run(main)\n",
"# Copyright 2018 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the 'License');\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an 'AS IS' BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Functions for calculating loss, accuracy, and other model metrics.\n\nMetrics:\n - Padded loss, accuracy, and negative log perplexity. Source:\n https://github.com/tensorflow/tensor2tensor/blob/master/tensor2tensor/utils/metrics.py\n - BLEU approximation. Source:\n https://github.com/tensorflow/tensor2tensor/blob/master/tensor2tensor/utils/bleu_hook.py\n - ROUGE score. Source:\n https://github.com/tensorflow/tensor2tensor/blob/master/tensor2tensor/utils/rouge.py\n\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport collections\nimport math\n\nimport numpy as np\nimport six\nfrom six.moves import xrange # pylint: disable=redefined-builtin\nimport tensorflow.compat.v1 as tf\n\n\ndef _pad_tensors_to_same_length(x, y):\n \"\"\"Pad x and y so that the results have the same length (second dimension).\"\"\"\n with tf.name_scope(\"pad_to_same_length\"):\n x_length = tf.shape(x)[1]\n y_length = tf.shape(y)[1]\n\n max_length = tf.maximum(x_length, y_length)\n\n x = tf.pad(x, [[0, 0], [0, max_length - x_length], [0, 0]])\n y = tf.pad(y, [[0, 0], [0, max_length - y_length]])\n return x, y\n\n\ndef padded_cross_entropy_loss(logits, labels, smoothing, vocab_size):\n \"\"\"Calculate cross entropy loss while ignoring padding.\n\n Args:\n logits: Tensor of size [batch_size, length_logits, vocab_size]\n labels: Tensor of size [batch_size, length_labels]\n smoothing: Label smoothing constant, used to determine the on and off values\n vocab_size: int size of the vocabulary\n Returns:\n Returns the cross entropy loss and weight tensors: float32 tensors with\n shape [batch_size, max(length_logits, length_labels)]\n \"\"\"\n with tf.name_scope(\"loss\", values=[logits, labels]):\n logits, labels = _pad_tensors_to_same_length(logits, labels)\n\n # Calculate smoothing cross entropy\n with tf.name_scope(\"smoothing_cross_entropy\", values=[logits, labels]):\n confidence = 1.0 - smoothing\n low_confidence = (1.0 - confidence) / tf.to_float(vocab_size - 1)\n soft_targets = tf.one_hot(\n tf.cast(labels, tf.int32),\n depth=vocab_size,\n on_value=confidence,\n off_value=low_confidence)\n xentropy = tf.nn.softmax_cross_entropy_with_logits_v2(\n logits=logits, labels=soft_targets)\n\n # Calculate the best (lowest) possible value of cross entropy, and\n # subtract from the cross entropy loss.\n normalizing_constant = -(\n confidence * tf.log(confidence) + tf.to_float(vocab_size - 1) *\n low_confidence * tf.log(low_confidence + 1e-20))\n xentropy -= normalizing_constant\n\n weights = tf.to_float(tf.not_equal(labels, 0))\n return xentropy * weights, weights\n\n\ndef _convert_to_eval_metric(metric_fn):\n \"\"\"Wrap a metric fn that returns scores and weights as an eval metric fn.\n\n The input metric_fn returns values for the current batch. The wrapper\n aggregates the return values collected over all of the batches evaluated.\n\n Args:\n metric_fn: function that returns scores and weights for the current batch's\n logits and predicted labels.\n\n Returns:\n function that aggregates the scores and weights from metric_fn.\n \"\"\"\n def problem_metric_fn(*args):\n \"\"\"Returns an aggregation of the metric_fn's returned values.\"\"\"\n (scores, weights) = metric_fn(*args)\n\n # The tf.metrics.mean function assures correct aggregation.\n return tf.metrics.mean(scores, weights)\n return problem_metric_fn\n\n\ndef get_eval_metrics(logits, labels, params):\n \"\"\"Return dictionary of model evaluation metrics.\"\"\"\n metrics = {\n \"accuracy\": _convert_to_eval_metric(padded_accuracy)(logits, labels),\n \"accuracy_top5\": _convert_to_eval_metric(padded_accuracy_top5)(\n logits, labels),\n \"accuracy_per_sequence\": _convert_to_eval_metric(\n padded_sequence_accuracy)(logits, labels),\n \"neg_log_perplexity\": _convert_to_eval_metric(padded_neg_log_perplexity)(\n logits, labels, params[\"vocab_size\"]),\n }\n\n if not params[\"use_tpu\"]:\n # TPU does not support tf.py_func\n metrics.update({\n \"approx_bleu_score\": _convert_to_eval_metric(\n bleu_score)(logits, labels),\n \"rouge_2_fscore\": _convert_to_eval_metric(\n rouge_2_fscore)(logits, labels),\n \"rouge_L_fscore\": _convert_to_eval_metric(\n rouge_l_fscore)(logits, labels),\n })\n\n # Prefix each of the metric names with \"metrics/\". This allows the metric\n # graphs to display under the \"metrics\" category in TensorBoard.\n metrics = {\"metrics/%s\" % k: v for k, v in six.iteritems(metrics)}\n return metrics\n\n\ndef padded_accuracy(logits, labels):\n \"\"\"Percentage of times that predictions matches labels on non-0s.\"\"\"\n with tf.variable_scope(\"padded_accuracy\", values=[logits, labels]):\n logits, labels = _pad_tensors_to_same_length(logits, labels)\n weights = tf.to_float(tf.not_equal(labels, 0))\n outputs = tf.to_int32(tf.argmax(logits, axis=-1))\n padded_labels = tf.to_int32(labels)\n return tf.to_float(tf.equal(outputs, padded_labels)), weights\n\n\ndef padded_accuracy_topk(logits, labels, k):\n \"\"\"Percentage of times that top-k predictions matches labels on non-0s.\"\"\"\n with tf.variable_scope(\"padded_accuracy_topk\", values=[logits, labels]):\n logits, labels = _pad_tensors_to_same_length(logits, labels)\n weights = tf.to_float(tf.not_equal(labels, 0))\n effective_k = tf.minimum(k, tf.shape(logits)[-1])\n _, outputs = tf.nn.top_k(logits, k=effective_k)\n outputs = tf.to_int32(outputs)\n padded_labels = tf.to_int32(labels)\n padded_labels = tf.expand_dims(padded_labels, axis=-1)\n padded_labels += tf.zeros_like(outputs) # Pad to same shape.\n same = tf.to_float(tf.equal(outputs, padded_labels))\n same_topk = tf.reduce_sum(same, axis=-1)\n return same_topk, weights\n\n\ndef padded_accuracy_top5(logits, labels):\n return padded_accuracy_topk(logits, labels, 5)\n\n\ndef padded_sequence_accuracy(logits, labels):\n \"\"\"Percentage of times that predictions matches labels everywhere (non-0).\"\"\"\n with tf.variable_scope(\"padded_sequence_accuracy\", values=[logits, labels]):\n logits, labels = _pad_tensors_to_same_length(logits, labels)\n weights = tf.to_float(tf.not_equal(labels, 0))\n outputs = tf.to_int32(tf.argmax(logits, axis=-1))\n padded_labels = tf.to_int32(labels)\n not_correct = tf.to_float(tf.not_equal(outputs, padded_labels)) * weights\n axis = list(range(1, len(outputs.get_shape())))\n correct_seq = 1.0 - tf.minimum(1.0, tf.reduce_sum(not_correct, axis=axis))\n return correct_seq, tf.constant(1.0)\n\n\ndef padded_neg_log_perplexity(logits, labels, vocab_size):\n \"\"\"Average log-perplexity excluding padding 0s. No smoothing.\"\"\"\n num, den = padded_cross_entropy_loss(logits, labels, 0, vocab_size)\n return -num, den\n\n\ndef bleu_score(logits, labels):\n \"\"\"Approximate BLEU score computation between labels and predictions.\n\n An approximate BLEU scoring method since we do not glue word pieces or\n decode the ids and tokenize the output. By default, we use ngram order of 4\n and use brevity penalty. Also, this does not have beam search.\n\n Args:\n logits: Tensor of size [batch_size, length_logits, vocab_size]\n labels: Tensor of size [batch-size, length_labels]\n\n Returns:\n bleu: int, approx bleu score\n \"\"\"\n predictions = tf.to_int32(tf.argmax(logits, axis=-1))\n # TODO: Look into removing use of py_func\n bleu = tf.py_func(compute_bleu, (labels, predictions), tf.float32)\n return bleu, tf.constant(1.0)\n\n\ndef _get_ngrams_with_counter(segment, max_order):\n \"\"\"Extracts all n-grams up to a given maximum order from an input segment.\n\n Args:\n segment: text segment from which n-grams will be extracted.\n max_order: maximum length in tokens of the n-grams returned by this\n methods.\n\n Returns:\n The Counter containing all n-grams upto max_order in segment\n with a count of how many times each n-gram occurred.\n \"\"\"\n ngram_counts = collections.Counter()\n for order in xrange(1, max_order + 1):\n for i in xrange(0, len(segment) - order + 1):\n ngram = tuple(segment[i:i + order])\n ngram_counts[ngram] += 1\n return ngram_counts\n\n\ndef compute_bleu(reference_corpus, translation_corpus, max_order=4,\n use_bp=True):\n \"\"\"Computes BLEU score of translated segments against one or more references.\n\n Args:\n reference_corpus: list of references for each translation. Each\n reference should be tokenized into a list of tokens.\n translation_corpus: list of translations to score. Each translation\n should be tokenized into a list of tokens.\n max_order: Maximum n-gram order to use when computing BLEU score.\n use_bp: boolean, whether to apply brevity penalty.\n\n Returns:\n BLEU score.\n \"\"\"\n reference_length = 0\n translation_length = 0\n bp = 1.0\n geo_mean = 0\n\n matches_by_order = [0] * max_order\n possible_matches_by_order = [0] * max_order\n precisions = []\n\n for (references, translations) in zip(reference_corpus, translation_corpus):\n reference_length += len(references)\n translation_length += len(translations)\n ref_ngram_counts = _get_ngrams_with_counter(references, max_order)\n translation_ngram_counts = _get_ngrams_with_counter(translations, max_order)\n\n overlap = dict((ngram,\n min(count, translation_ngram_counts[ngram]))\n for ngram, count in ref_ngram_counts.items())\n\n for ngram in overlap:\n matches_by_order[len(ngram) - 1] += overlap[ngram]\n for ngram in translation_ngram_counts:\n possible_matches_by_order[len(ngram) - 1] += translation_ngram_counts[\n ngram]\n\n precisions = [0] * max_order\n smooth = 1.0\n\n for i in xrange(0, max_order):\n if possible_matches_by_order[i] > 0:\n precisions[i] = float(matches_by_order[i]) / possible_matches_by_order[i]\n if matches_by_order[i] > 0:\n precisions[i] = float(matches_by_order[i]) / possible_matches_by_order[\n i]\n else:\n smooth *= 2\n precisions[i] = 1.0 / (smooth * possible_matches_by_order[i])\n else:\n precisions[i] = 0.0\n\n if max(precisions) > 0:\n p_log_sum = sum(math.log(p) for p in precisions if p)\n geo_mean = math.exp(p_log_sum / max_order)\n\n if use_bp:\n ratio = translation_length / reference_length\n bp = math.exp(1 - 1. / ratio) if ratio < 1.0 else 1.0\n bleu = geo_mean * bp\n return np.float32(bleu)\n\n\ndef rouge_2_fscore(logits, labels):\n \"\"\"ROUGE-2 F1 score computation between labels and predictions.\n\n This is an approximate ROUGE scoring method since we do not glue word pieces\n or decode the ids and tokenize the output.\n\n Args:\n logits: tensor, model predictions\n labels: tensor, gold output.\n\n Returns:\n rouge2_fscore: approx rouge-2 f1 score.\n \"\"\"\n predictions = tf.to_int32(tf.argmax(logits, axis=-1))\n # TODO: Look into removing use of py_func\n rouge_2_f_score = tf.py_func(rouge_n, (predictions, labels), tf.float32)\n return rouge_2_f_score, tf.constant(1.0)\n\n\ndef _get_ngrams(n, text):\n \"\"\"Calculates n-grams.\n\n Args:\n n: which n-grams to calculate\n text: An array of tokens\n\n Returns:\n A set of n-grams\n \"\"\"\n ngram_set = set()\n text_length = len(text)\n max_index_ngram_start = text_length - n\n for i in range(max_index_ngram_start + 1):\n ngram_set.add(tuple(text[i:i + n]))\n return ngram_set\n\n\ndef rouge_n(eval_sentences, ref_sentences, n=2):\n \"\"\"Computes ROUGE-N f1 score of two text collections of sentences.\n\n Source: https://www.microsoft.com/en-us/research/publication/\n rouge-a-package-for-automatic-evaluation-of-summaries/\n\n Args:\n eval_sentences: Predicted sentences.\n ref_sentences: Sentences from the reference set\n n: Size of ngram. Defaults to 2.\n\n Returns:\n f1 score for ROUGE-N\n \"\"\"\n f1_scores = []\n for eval_sentence, ref_sentence in zip(eval_sentences, ref_sentences):\n eval_ngrams = _get_ngrams(n, eval_sentence)\n ref_ngrams = _get_ngrams(n, ref_sentence)\n ref_count = len(ref_ngrams)\n eval_count = len(eval_ngrams)\n\n # Count the overlapping ngrams between evaluated and reference\n overlapping_ngrams = eval_ngrams.intersection(ref_ngrams)\n overlapping_count = len(overlapping_ngrams)\n\n # Handle edge case. This isn't mathematically correct, but it's good enough\n if eval_count == 0:\n precision = 0.0\n else:\n precision = float(overlapping_count) / eval_count\n if ref_count == 0:\n recall = 0.0\n else:\n recall = float(overlapping_count) / ref_count\n f1_scores.append(2.0 * ((precision * recall) / (precision + recall + 1e-8)))\n\n # return overlapping_count / reference_count\n return np.mean(f1_scores, dtype=np.float32)\n\n\ndef rouge_l_fscore(predictions, labels):\n \"\"\"ROUGE scores computation between labels and predictions.\n\n This is an approximate ROUGE scoring method since we do not glue word pieces\n or decode the ids and tokenize the output.\n\n Args:\n predictions: tensor, model predictions\n labels: tensor, gold output.\n\n Returns:\n rouge_l_fscore: approx rouge-l f1 score.\n \"\"\"\n outputs = tf.to_int32(tf.argmax(predictions, axis=-1))\n rouge_l_f_score = tf.py_func(rouge_l_sentence_level, (outputs, labels),\n tf.float32)\n return rouge_l_f_score, tf.constant(1.0)\n\n\ndef rouge_l_sentence_level(eval_sentences, ref_sentences):\n \"\"\"Computes ROUGE-L (sentence level) of two collections of sentences.\n\n Source: https://www.microsoft.com/en-us/research/publication/\n rouge-a-package-for-automatic-evaluation-of-summaries/\n\n Calculated according to:\n R_lcs = LCS(X,Y)/m\n P_lcs = LCS(X,Y)/n\n F_lcs = ((1 + beta^2)*R_lcs*P_lcs) / (R_lcs + (beta^2) * P_lcs)\n\n where:\n X = reference summary\n Y = Candidate summary\n m = length of reference summary\n n = length of candidate summary\n\n Args:\n eval_sentences: The sentences that have been picked by the summarizer\n ref_sentences: The sentences from the reference set\n\n Returns:\n A float: F_lcs\n \"\"\"\n\n f1_scores = []\n for eval_sentence, ref_sentence in zip(eval_sentences, ref_sentences):\n m = float(len(ref_sentence))\n n = float(len(eval_sentence))\n lcs = _len_lcs(eval_sentence, ref_sentence)\n f1_scores.append(_f_lcs(lcs, m, n))\n return np.mean(f1_scores, dtype=np.float32)\n\n\ndef _len_lcs(x, y):\n \"\"\"Returns the length of the Longest Common Subsequence between two seqs.\n\n Source: http://www.algorithmist.com/index.php/Longest_Common_Subsequence\n\n Args:\n x: sequence of words\n y: sequence of words\n\n Returns\n integer: Length of LCS between x and y\n \"\"\"\n table = _lcs(x, y)\n n, m = len(x), len(y)\n return table[n, m]\n\n\ndef _lcs(x, y):\n \"\"\"Computes the length of the LCS between two seqs.\n\n The implementation below uses a DP programming algorithm and runs\n in O(nm) time where n = len(x) and m = len(y).\n Source: http://www.algorithmist.com/index.php/Longest_Common_Subsequence\n\n Args:\n x: collection of words\n y: collection of words\n\n Returns:\n Table of dictionary of coord and len lcs\n \"\"\"\n n, m = len(x), len(y)\n table = dict()\n for i in range(n + 1):\n for j in range(m + 1):\n if i == 0 or j == 0:\n table[i, j] = 0\n elif x[i - 1] == y[j - 1]:\n table[i, j] = table[i - 1, j - 1] + 1\n else:\n table[i, j] = max(table[i - 1, j], table[i, j - 1])\n return table\n\n\ndef _f_lcs(llcs, m, n):\n \"\"\"Computes the LCS-based F-measure score.\n\n Source: http://research.microsoft.com/en-us/um/people/cyl/download/papers/\n rouge-working-note-v1.3.1.pdf\n\n Args:\n llcs: Length of LCS\n m: number of words in reference summary\n n: number of words in candidate summary\n\n Returns:\n Float. LCS-based F-measure score\n \"\"\"\n r_lcs = llcs / m\n p_lcs = llcs / n\n beta = p_lcs / (r_lcs + 1e-12)\n num = (1 + (beta ** 2)) * r_lcs * p_lcs\n denom = r_lcs + ((beta ** 2) * p_lcs)\n f_lcs = num / (denom + 1e-12)\n return f_lcs\n",
"\n# Copyright 2018 The TensorFlow Authors All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\n\"\"\"Contains common utilities and functions.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport locale\nimport os\nimport re\nfrom absl import logging\nimport matplotlib\nmatplotlib.use('Agg')\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport tensorflow as tf\nimport cv2\ngfile = tf.gfile\n\n\nCMAP_DEFAULT = 'plasma'\n# Defines the cropping that is applied to the Cityscapes dataset with respect to\n# the original raw input resolution.\nCITYSCAPES_CROP = [256, 768, 192, 1856]\n\n\ndef crop_cityscapes(im, resize=None):\n ymin, ymax, xmin, xmax = CITYSCAPES_CROP\n im = im[ymin:ymax, xmin:xmax]\n if resize is not None:\n im = cv2.resize(im, resize)\n return im\n\n\ndef gray2rgb(im, cmap=CMAP_DEFAULT):\n cmap = plt.get_cmap(cmap)\n result_img = cmap(im.astype(np.float32))\n if result_img.shape[2] > 3:\n result_img = np.delete(result_img, 3, 2)\n return result_img\n\n\ndef load_image(img_file, resize=None, interpolation='linear'):\n \"\"\"Load image from disk. Output value range: [0,1].\"\"\"\n im_data = np.fromstring(gfile.Open(img_file).read(), np.uint8)\n im = cv2.imdecode(im_data, cv2.IMREAD_COLOR)\n im = cv2.cvtColor(im, cv2.COLOR_BGR2RGB)\n if resize and resize != im.shape[:2]:\n ip = cv2.INTER_LINEAR if interpolation == 'linear' else cv2.INTER_NEAREST\n im = cv2.resize(im, resize, interpolation=ip)\n return np.array(im, dtype=np.float32) / 255.0\n\n\ndef save_image(img_file, im, file_extension):\n \"\"\"Save image from disk. Expected input value range: [0,1].\"\"\"\n im = (im * 255.0).astype(np.uint8)\n with gfile.Open(img_file, 'w') as f:\n im = cv2.cvtColor(im, cv2.COLOR_RGB2BGR)\n _, im_data = cv2.imencode('.%s' % file_extension, im)\n f.write(im_data.tostring())\n\n\ndef normalize_depth_for_display(depth, pc=95, crop_percent=0, normalizer=None,\n cmap=CMAP_DEFAULT):\n \"\"\"Converts a depth map to an RGB image.\"\"\"\n # Convert to disparity.\n\n disp = 1.0 / (depth + 1e-6)\n if normalizer is not None:\n disp /= normalizer\n else:\n disp /= (np.percentile(disp, pc) + 1e-6)\n disp = np.clip(disp, 0, 1)\n disp = gray2rgb(disp, cmap=cmap)\n keep_h = int(disp.shape[0] * (1 - crop_percent))\n disp = disp[:keep_h]\n return disp\n\n\ndef get_seq_start_end(target_index, seq_length, sample_every=1):\n \"\"\"Returns absolute seq start and end indices for a given target frame.\"\"\"\n half_offset = int((seq_length - 1) / 2) * sample_every\n end_index = target_index + half_offset\n start_index = end_index - (seq_length - 1) * sample_every\n return start_index, end_index\n\n\ndef get_seq_middle(seq_length):\n \"\"\"Returns relative index for the middle frame in sequence.\"\"\"\n half_offset = int((seq_length - 1) / 2)\n return seq_length - 1 - half_offset\n\n\ndef info(obj):\n \"\"\"Return info on shape and dtype of a numpy array or TensorFlow tensor.\"\"\"\n if obj is None:\n return 'None.'\n elif isinstance(obj, list):\n if obj:\n return 'List of %d... %s' % (len(obj), info(obj[0]))\n else:\n return 'Empty list.'\n elif isinstance(obj, tuple):\n if obj:\n return 'Tuple of %d... %s' % (len(obj), info(obj[0]))\n else:\n return 'Empty tuple.'\n else:\n if is_a_numpy_array(obj):\n return 'Array with shape: %s, dtype: %s' % (obj.shape, obj.dtype)\n else:\n return str(obj)\n\n\ndef is_a_numpy_array(obj):\n \"\"\"Returns true if obj is a numpy array.\"\"\"\n return type(obj).__module__ == np.__name__\n\n\ndef count_parameters(also_print=True):\n \"\"\"Cound the number of parameters in the model.\n\n Args:\n also_print: Boolean. If True also print the numbers.\n\n Returns:\n The total number of parameters.\n \"\"\"\n total = 0\n if also_print:\n logging.info('Model Parameters:')\n for (_, v) in get_vars_to_save_and_restore().items():\n shape = v.get_shape()\n if also_print:\n logging.info('%s %s: %s', v.op.name, shape,\n format_number(shape.num_elements()))\n total += shape.num_elements()\n if also_print:\n logging.info('Total: %s', format_number(total))\n return total\n\n\ndef get_vars_to_save_and_restore(ckpt=None):\n \"\"\"Returns list of variables that should be saved/restored.\n\n Args:\n ckpt: Path to existing checkpoint. If present, returns only the subset of\n variables that exist in given checkpoint.\n\n Returns:\n List of all variables that need to be saved/restored.\n \"\"\"\n model_vars = tf.trainable_variables()\n # Add batchnorm variables.\n bn_vars = [v for v in tf.global_variables()\n if 'moving_mean' in v.op.name or 'moving_variance' in v.op.name or\n 'mu' in v.op.name or 'sigma' in v.op.name or\n 'global_scale_var' in v.op.name]\n model_vars.extend(bn_vars)\n model_vars = sorted(model_vars, key=lambda x: x.op.name)\n mapping = {}\n if ckpt is not None:\n ckpt_var = tf.contrib.framework.list_variables(ckpt)\n ckpt_var_names = [name for (name, unused_shape) in ckpt_var]\n ckpt_var_shapes = [shape for (unused_name, shape) in ckpt_var]\n not_loaded = list(ckpt_var_names)\n for v in model_vars:\n if v.op.name not in ckpt_var_names:\n # For backward compatibility, try additional matching.\n v_additional_name = v.op.name.replace('egomotion_prediction/', '')\n if v_additional_name in ckpt_var_names:\n # Check if shapes match.\n ind = ckpt_var_names.index(v_additional_name)\n if ckpt_var_shapes[ind] == v.get_shape():\n mapping[v_additional_name] = v\n not_loaded.remove(v_additional_name)\n continue\n else:\n logging.warn('Shape mismatch, will not restore %s.', v.op.name)\n logging.warn('Did not find var %s in checkpoint: %s', v.op.name,\n os.path.basename(ckpt))\n else:\n # Check if shapes match.\n ind = ckpt_var_names.index(v.op.name)\n if ckpt_var_shapes[ind] == v.get_shape():\n mapping[v.op.name] = v\n not_loaded.remove(v.op.name)\n else:\n logging.warn('Shape mismatch, will not restore %s.', v.op.name)\n if not_loaded:\n logging.warn('The following variables in the checkpoint were not loaded:')\n for varname_not_loaded in not_loaded:\n logging.info('%s', varname_not_loaded)\n else: # just get model vars.\n for v in model_vars:\n mapping[v.op.name] = v\n return mapping\n\n\ndef get_imagenet_vars_to_restore(imagenet_ckpt):\n \"\"\"Returns dict of variables to restore from ImageNet-checkpoint.\"\"\"\n vars_to_restore_imagenet = {}\n ckpt_var_names = tf.contrib.framework.list_variables(imagenet_ckpt)\n ckpt_var_names = [name for (name, unused_shape) in ckpt_var_names]\n model_vars = tf.global_variables()\n for v in model_vars:\n if 'global_step' in v.op.name: continue\n mvname_noprefix = v.op.name.replace('depth_prediction/', '')\n mvname_noprefix = mvname_noprefix.replace('moving_mean', 'mu')\n mvname_noprefix = mvname_noprefix.replace('moving_variance', 'sigma')\n if mvname_noprefix in ckpt_var_names:\n vars_to_restore_imagenet[mvname_noprefix] = v\n else:\n logging.info('The following variable will not be restored from '\n 'pretrained ImageNet-checkpoint: %s', mvname_noprefix)\n return vars_to_restore_imagenet\n\n\ndef format_number(n):\n \"\"\"Formats number with thousands commas.\"\"\"\n locale.setlocale(locale.LC_ALL, 'en_US')\n return locale.format('%d', n, grouping=True)\n\n\ndef atoi(text):\n return int(text) if text.isdigit() else text\n\n\ndef natural_keys(text):\n return [atoi(c) for c in re.split(r'(\\d+)', text)]\n\n\ndef read_text_lines(filepath):\n with tf.gfile.Open(filepath, 'r') as f:\n lines = f.readlines()\n lines = [l.rstrip() for l in lines]\n return lines\n",
"# Copyright 2016 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\nr\"\"\"Downloads and converts Flowers data to TFRecords of TF-Example protos.\n\nThis module downloads the Flowers data, uncompresses it, reads the files\nthat make up the Flowers data and creates two TFRecord datasets: one for train\nand one for test. Each TFRecord dataset is comprised of a set of TF-Example\nprotocol buffers, each of which contain a single image and label.\n\nThe script should take about a minute to run.\n\n\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport math\nimport os\nimport random\nimport sys\n\nimport tensorflow as tf\n\nfrom datasets import dataset_utils\n\n# The URL where the Flowers data can be downloaded.\n_DATA_URL = 'http://download.tensorflow.org/example_images/flower_photos.tgz'\n\n# The number of images in the validation set.\n_NUM_VALIDATION = 350\n\n# Seed for repeatability.\n_RANDOM_SEED = 0\n\n# The number of shards per dataset split.\n_NUM_SHARDS = 5\n\n\nclass ImageReader(object):\n \"\"\"Helper class that provides TensorFlow image coding utilities.\"\"\"\n\n def __init__(self):\n # Initializes function that decodes RGB JPEG data.\n self._decode_jpeg_data = tf.placeholder(dtype=tf.string)\n self._decode_jpeg = tf.image.decode_jpeg(self._decode_jpeg_data, channels=3)\n\n def read_image_dims(self, sess, image_data):\n image = self.decode_jpeg(sess, image_data)\n return image.shape[0], image.shape[1]\n\n def decode_jpeg(self, sess, image_data):\n image = sess.run(self._decode_jpeg,\n feed_dict={self._decode_jpeg_data: image_data})\n assert len(image.shape) == 3\n assert image.shape[2] == 3\n return image\n\n\ndef _get_filenames_and_classes(dataset_dir):\n \"\"\"Returns a list of filenames and inferred class names.\n\n Args:\n dataset_dir: A directory containing a set of subdirectories representing\n class names. Each subdirectory should contain PNG or JPG encoded images.\n\n Returns:\n A list of image file paths, relative to `dataset_dir` and the list of\n subdirectories, representing class names.\n \"\"\"\n flower_root = os.path.join(dataset_dir, 'flower_photos')\n directories = []\n class_names = []\n for filename in os.listdir(flower_root):\n path = os.path.join(flower_root, filename)\n if os.path.isdir(path):\n directories.append(path)\n class_names.append(filename)\n\n photo_filenames = []\n for directory in directories:\n for filename in os.listdir(directory):\n path = os.path.join(directory, filename)\n photo_filenames.append(path)\n\n return photo_filenames, sorted(class_names)\n\n\ndef _get_dataset_filename(dataset_dir, split_name, shard_id):\n output_filename = 'flowers_%s_%05d-of-%05d.tfrecord' % (\n split_name, shard_id, _NUM_SHARDS)\n return os.path.join(dataset_dir, output_filename)\n\n\ndef _convert_dataset(split_name, filenames, class_names_to_ids, dataset_dir):\n \"\"\"Converts the given filenames to a TFRecord dataset.\n\n Args:\n split_name: The name of the dataset, either 'train' or 'validation'.\n filenames: A list of absolute paths to png or jpg images.\n class_names_to_ids: A dictionary from class names (strings) to ids\n (integers).\n dataset_dir: The directory where the converted datasets are stored.\n \"\"\"\n assert split_name in ['train', 'validation']\n\n num_per_shard = int(math.ceil(len(filenames) / float(_NUM_SHARDS)))\n\n with tf.Graph().as_default():\n image_reader = ImageReader()\n\n with tf.Session('') as sess:\n\n for shard_id in range(_NUM_SHARDS):\n output_filename = _get_dataset_filename(\n dataset_dir, split_name, shard_id)\n\n with tf.python_io.TFRecordWriter(output_filename) as tfrecord_writer:\n start_ndx = shard_id * num_per_shard\n end_ndx = min((shard_id+1) * num_per_shard, len(filenames))\n for i in range(start_ndx, end_ndx):\n sys.stdout.write('\\r>> Converting image %d/%d shard %d' % (\n i+1, len(filenames), shard_id))\n sys.stdout.flush()\n\n # Read the filename:\n image_data = tf.gfile.GFile(filenames[i], 'rb').read()\n height, width = image_reader.read_image_dims(sess, image_data)\n\n class_name = os.path.basename(os.path.dirname(filenames[i]))\n class_id = class_names_to_ids[class_name]\n\n example = dataset_utils.image_to_tfexample(\n image_data, b'jpg', height, width, class_id)\n tfrecord_writer.write(example.SerializeToString())\n\n sys.stdout.write('\\n')\n sys.stdout.flush()\n\n\ndef _clean_up_temporary_files(dataset_dir):\n \"\"\"Removes temporary files used to create the dataset.\n\n Args:\n dataset_dir: The directory where the temporary files are stored.\n \"\"\"\n filename = _DATA_URL.split('/')[-1]\n filepath = os.path.join(dataset_dir, filename)\n tf.gfile.Remove(filepath)\n\n tmp_dir = os.path.join(dataset_dir, 'flower_photos')\n tf.gfile.DeleteRecursively(tmp_dir)\n\n\ndef _dataset_exists(dataset_dir):\n for split_name in ['train', 'validation']:\n for shard_id in range(_NUM_SHARDS):\n output_filename = _get_dataset_filename(\n dataset_dir, split_name, shard_id)\n if not tf.gfile.Exists(output_filename):\n return False\n return True\n\n\ndef run(dataset_dir):\n \"\"\"Runs the download and conversion operation.\n\n Args:\n dataset_dir: The dataset directory where the dataset is stored.\n \"\"\"\n if not tf.gfile.Exists(dataset_dir):\n tf.gfile.MakeDirs(dataset_dir)\n\n if _dataset_exists(dataset_dir):\n print('Dataset files already exist. Exiting without re-creating them.')\n return\n\n dataset_utils.download_and_uncompress_tarball(_DATA_URL, dataset_dir)\n photo_filenames, class_names = _get_filenames_and_classes(dataset_dir)\n class_names_to_ids = dict(zip(class_names, range(len(class_names))))\n\n # Divide into train and test:\n random.seed(_RANDOM_SEED)\n random.shuffle(photo_filenames)\n training_filenames = photo_filenames[_NUM_VALIDATION:]\n validation_filenames = photo_filenames[:_NUM_VALIDATION]\n\n # First, convert the training and validation sets.\n _convert_dataset('train', training_filenames, class_names_to_ids,\n dataset_dir)\n _convert_dataset('validation', validation_filenames, class_names_to_ids,\n dataset_dir)\n\n # Finally, write the labels file:\n labels_to_class_names = dict(zip(range(len(class_names)), class_names))\n dataset_utils.write_label_file(labels_to_class_names, dataset_dir)\n\n _clean_up_temporary_files(dataset_dir)\n print('\\nFinished converting the Flowers dataset!')\n",
"# Copyright 2017 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\n\"\"\"Inception Resnet v2 Faster R-CNN implementation.\n\nSee \"Inception-v4, Inception-ResNet and the Impact of Residual Connections on\nLearning\" by Szegedy et al. (https://arxiv.org/abs/1602.07261)\nas well as\n\"Speed/accuracy trade-offs for modern convolutional object detectors\" by\nHuang et al. (https://arxiv.org/abs/1611.10012)\n\"\"\"\n\nimport tensorflow as tf\nfrom tensorflow.contrib import slim as contrib_slim\n\nfrom object_detection.meta_architectures import faster_rcnn_meta_arch\nfrom object_detection.utils import variables_helper\nfrom nets import inception_resnet_v2\n\nslim = contrib_slim\n\n\nclass FasterRCNNInceptionResnetV2FeatureExtractor(\n faster_rcnn_meta_arch.FasterRCNNFeatureExtractor):\n \"\"\"Faster R-CNN with Inception Resnet v2 feature extractor implementation.\"\"\"\n\n def __init__(self,\n is_training,\n first_stage_features_stride,\n batch_norm_trainable=False,\n reuse_weights=None,\n weight_decay=0.0):\n \"\"\"Constructor.\n\n Args:\n is_training: See base class.\n first_stage_features_stride: See base class.\n batch_norm_trainable: See base class.\n reuse_weights: See base class.\n weight_decay: See base class.\n\n Raises:\n ValueError: If `first_stage_features_stride` is not 8 or 16.\n \"\"\"\n if first_stage_features_stride != 8 and first_stage_features_stride != 16:\n raise ValueError('`first_stage_features_stride` must be 8 or 16.')\n super(FasterRCNNInceptionResnetV2FeatureExtractor, self).__init__(\n is_training, first_stage_features_stride, batch_norm_trainable,\n reuse_weights, weight_decay)\n\n def preprocess(self, resized_inputs):\n \"\"\"Faster R-CNN with Inception Resnet v2 preprocessing.\n\n Maps pixel values to the range [-1, 1].\n\n Args:\n resized_inputs: A [batch, height_in, width_in, channels] float32 tensor\n representing a batch of images with values between 0 and 255.0.\n\n Returns:\n preprocessed_inputs: A [batch, height_out, width_out, channels] float32\n tensor representing a batch of images.\n\n \"\"\"\n return (2.0 / 255.0) * resized_inputs - 1.0\n\n def _extract_proposal_features(self, preprocessed_inputs, scope):\n \"\"\"Extracts first stage RPN features.\n\n Extracts features using the first half of the Inception Resnet v2 network.\n We construct the network in `align_feature_maps=True` mode, which means\n that all VALID paddings in the network are changed to SAME padding so that\n the feature maps are aligned.\n\n Args:\n preprocessed_inputs: A [batch, height, width, channels] float32 tensor\n representing a batch of images.\n scope: A scope name.\n\n Returns:\n rpn_feature_map: A tensor with shape [batch, height, width, depth]\n Raises:\n InvalidArgumentError: If the spatial size of `preprocessed_inputs`\n (height or width) is less than 33.\n ValueError: If the created network is missing the required activation.\n \"\"\"\n if len(preprocessed_inputs.get_shape().as_list()) != 4:\n raise ValueError('`preprocessed_inputs` must be 4 dimensional, got a '\n 'tensor of shape %s' % preprocessed_inputs.get_shape())\n\n with slim.arg_scope(inception_resnet_v2.inception_resnet_v2_arg_scope(\n weight_decay=self._weight_decay)):\n # Forces is_training to False to disable batch norm update.\n with slim.arg_scope([slim.batch_norm],\n is_training=self._train_batch_norm):\n with tf.variable_scope('InceptionResnetV2',\n reuse=self._reuse_weights) as scope:\n return inception_resnet_v2.inception_resnet_v2_base(\n preprocessed_inputs, final_endpoint='PreAuxLogits',\n scope=scope, output_stride=self._first_stage_features_stride,\n align_feature_maps=True)\n\n def _extract_box_classifier_features(self, proposal_feature_maps, scope):\n \"\"\"Extracts second stage box classifier features.\n\n This function reconstructs the \"second half\" of the Inception ResNet v2\n network after the part defined in `_extract_proposal_features`.\n\n Args:\n proposal_feature_maps: A 4-D float tensor with shape\n [batch_size * self.max_num_proposals, crop_height, crop_width, depth]\n representing the feature map cropped to each proposal.\n scope: A scope name.\n\n Returns:\n proposal_classifier_features: A 4-D float tensor with shape\n [batch_size * self.max_num_proposals, height, width, depth]\n representing box classifier features for each proposal.\n \"\"\"\n with tf.variable_scope('InceptionResnetV2', reuse=self._reuse_weights):\n with slim.arg_scope(inception_resnet_v2.inception_resnet_v2_arg_scope(\n weight_decay=self._weight_decay)):\n # Forces is_training to False to disable batch norm update.\n with slim.arg_scope([slim.batch_norm],\n is_training=self._train_batch_norm):\n with slim.arg_scope([slim.conv2d, slim.max_pool2d, slim.avg_pool2d],\n stride=1, padding='SAME'):\n with tf.variable_scope('Mixed_7a'):\n with tf.variable_scope('Branch_0'):\n tower_conv = slim.conv2d(proposal_feature_maps,\n 256, 1, scope='Conv2d_0a_1x1')\n tower_conv_1 = slim.conv2d(\n tower_conv, 384, 3, stride=2,\n padding='VALID', scope='Conv2d_1a_3x3')\n with tf.variable_scope('Branch_1'):\n tower_conv1 = slim.conv2d(\n proposal_feature_maps, 256, 1, scope='Conv2d_0a_1x1')\n tower_conv1_1 = slim.conv2d(\n tower_conv1, 288, 3, stride=2,\n padding='VALID', scope='Conv2d_1a_3x3')\n with tf.variable_scope('Branch_2'):\n tower_conv2 = slim.conv2d(\n proposal_feature_maps, 256, 1, scope='Conv2d_0a_1x1')\n tower_conv2_1 = slim.conv2d(tower_conv2, 288, 3,\n scope='Conv2d_0b_3x3')\n tower_conv2_2 = slim.conv2d(\n tower_conv2_1, 320, 3, stride=2,\n padding='VALID', scope='Conv2d_1a_3x3')\n with tf.variable_scope('Branch_3'):\n tower_pool = slim.max_pool2d(\n proposal_feature_maps, 3, stride=2, padding='VALID',\n scope='MaxPool_1a_3x3')\n net = tf.concat(\n [tower_conv_1, tower_conv1_1, tower_conv2_2, tower_pool], 3)\n net = slim.repeat(net, 9, inception_resnet_v2.block8, scale=0.20)\n net = inception_resnet_v2.block8(net, activation_fn=None)\n proposal_classifier_features = slim.conv2d(\n net, 1536, 1, scope='Conv2d_7b_1x1')\n return proposal_classifier_features\n\n def restore_from_classification_checkpoint_fn(\n self,\n first_stage_feature_extractor_scope,\n second_stage_feature_extractor_scope):\n \"\"\"Returns a map of variables to load from a foreign checkpoint.\n\n Note that this overrides the default implementation in\n faster_rcnn_meta_arch.FasterRCNNFeatureExtractor which does not work for\n InceptionResnetV2 checkpoints.\n\n TODO(jonathanhuang,rathodv): revisit whether it's possible to force the\n `Repeat` namescope as created in `_extract_box_classifier_features` to\n start counting at 2 (e.g. `Repeat_2`) so that the default restore_fn can\n be used.\n\n Args:\n first_stage_feature_extractor_scope: A scope name for the first stage\n feature extractor.\n second_stage_feature_extractor_scope: A scope name for the second stage\n feature extractor.\n\n Returns:\n A dict mapping variable names (to load from a checkpoint) to variables in\n the model graph.\n \"\"\"\n\n variables_to_restore = {}\n for variable in variables_helper.get_global_variables_safely():\n if variable.op.name.startswith(\n first_stage_feature_extractor_scope):\n var_name = variable.op.name.replace(\n first_stage_feature_extractor_scope + '/', '')\n variables_to_restore[var_name] = variable\n if variable.op.name.startswith(\n second_stage_feature_extractor_scope):\n var_name = variable.op.name.replace(\n second_stage_feature_extractor_scope\n + '/InceptionResnetV2/Repeat', 'InceptionResnetV2/Repeat_2')\n var_name = var_name.replace(\n second_stage_feature_extractor_scope + '/', '')\n variables_to_restore[var_name] = variable\n return variables_to_restore\n",
"# Copyright 2019 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"An executor class for running model on TensorFlow 2.0.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\n# from __future__ import google_type_annotations\nfrom __future__ import print_function\n\nfrom absl import logging\n\nimport tensorflow.compat.v2 as tf\nfrom official.modeling.training import distributed_executor as executor\nfrom official.vision.detection.utils.object_detection import visualization_utils\n\n\nclass DetectionDistributedExecutor(executor.DistributedExecutor):\n \"\"\"Detection specific customer training loop executor.\n\n Subclasses the DistributedExecutor and adds support for numpy based metrics.\n \"\"\"\n\n def __init__(self,\n predict_post_process_fn=None,\n trainable_variables_filter=None,\n **kwargs):\n super(DetectionDistributedExecutor, self).__init__(**kwargs)\n if predict_post_process_fn:\n assert callable(predict_post_process_fn)\n if trainable_variables_filter:\n assert callable(trainable_variables_filter)\n self._predict_post_process_fn = predict_post_process_fn\n self._trainable_variables_filter = trainable_variables_filter\n self.eval_steps = tf.Variable(\n 0,\n trainable=False,\n dtype=tf.int32,\n synchronization=tf.VariableSynchronization.ON_READ,\n aggregation=tf.VariableAggregation.ONLY_FIRST_REPLICA,\n shape=[])\n\n def _create_replicated_step(self,\n strategy,\n model,\n loss_fn,\n optimizer,\n metric=None):\n trainable_variables = model.trainable_variables\n if self._trainable_variables_filter:\n trainable_variables = self._trainable_variables_filter(\n trainable_variables)\n logging.info('Filter trainable variables from %d to %d',\n len(model.trainable_variables), len(trainable_variables))\n _update_state = lambda labels, outputs: None\n if isinstance(metric, tf.keras.metrics.Metric):\n _update_state = lambda labels, outputs: metric.update_state(\n labels, outputs)\n else:\n logging.error('Detection: train metric is not an instance of '\n 'tf.keras.metrics.Metric.')\n\n def _replicated_step(inputs):\n \"\"\"Replicated training step.\"\"\"\n inputs, labels = inputs\n\n with tf.GradientTape() as tape:\n outputs = model(inputs, training=True)\n all_losses = loss_fn(labels, outputs)\n losses = {}\n for k, v in all_losses.items():\n losses[k] = tf.reduce_mean(v)\n per_replica_loss = losses['total_loss'] / strategy.num_replicas_in_sync\n _update_state(labels, outputs)\n\n grads = tape.gradient(per_replica_loss, trainable_variables)\n optimizer.apply_gradients(zip(grads, trainable_variables))\n return losses\n\n return _replicated_step\n\n def _create_test_step(self, strategy, model, metric):\n \"\"\"Creates a distributed test step.\"\"\"\n\n @tf.function\n def test_step(iterator, eval_steps):\n \"\"\"Calculates evaluation metrics on distributed devices.\"\"\"\n\n def _test_step_fn(inputs, eval_steps):\n \"\"\"Replicated accuracy calculation.\"\"\"\n inputs, labels = inputs\n model_outputs = model(inputs, training=False)\n if self._predict_post_process_fn:\n labels, prediction_outputs = self._predict_post_process_fn(\n labels, model_outputs)\n num_remaining_visualizations = (\n self._params.eval.num_images_to_visualize - eval_steps)\n # If there are remaining number of visualizations that needs to be\n # done, add next batch outputs for visualization.\n #\n # TODO(hongjunchoi): Once dynamic slicing is supported on TPU, only\n # write correct slice of outputs to summary file.\n if num_remaining_visualizations > 0:\n visualization_utils.visualize_images_with_bounding_boxes(\n inputs, prediction_outputs['detection_boxes'],\n self.global_train_step, self.eval_summary_writer)\n\n return labels, prediction_outputs\n\n labels, outputs = strategy.run(\n _test_step_fn, args=(\n next(iterator),\n eval_steps,\n ))\n outputs = tf.nest.map_structure(strategy.experimental_local_results,\n outputs)\n labels = tf.nest.map_structure(strategy.experimental_local_results,\n labels)\n\n eval_steps.assign_add(self._params.eval.batch_size)\n return labels, outputs\n\n return test_step\n\n def _run_evaluation(self, test_step, current_training_step, metric,\n test_iterator):\n \"\"\"Runs validation steps and aggregate metrics.\"\"\"\n self.eval_steps.assign(0)\n if not test_iterator or not metric:\n logging.warning(\n 'Both test_iterator (%s) and metrics (%s) must not be None.',\n test_iterator, metric)\n return None\n logging.info('Running evaluation after step: %s.', current_training_step)\n while True:\n try:\n labels, outputs = test_step(test_iterator, self.eval_steps)\n if metric:\n metric.update_state(labels, outputs)\n except (StopIteration, tf.errors.OutOfRangeError):\n break\n\n metric_result = metric.result()\n if isinstance(metric, tf.keras.metrics.Metric):\n metric_result = tf.nest.map_structure(lambda x: x.numpy().astype(float),\n metric_result)\n logging.info('Step: [%d] Validation metric = %s', current_training_step,\n metric_result)\n return metric_result\n",
"# Copyright 2019 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"XLNet classification finetuning runner in tf2.0.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\n# from __future__ import google_type_annotations\nfrom __future__ import print_function\n\nimport functools\nfrom absl import app\nfrom absl import flags\nfrom absl import logging\n\nimport numpy as np\nimport tensorflow as tf\n# pylint: disable=unused-import\nfrom official.nlp.xlnet import common_flags\nfrom official.nlp.xlnet import data_utils\nfrom official.nlp.xlnet import optimization\nfrom official.nlp.xlnet import training_utils\nfrom official.nlp.xlnet import xlnet_config\nfrom official.nlp.xlnet import xlnet_modeling as modeling\nfrom official.utils.misc import tpu_lib\n\nflags.DEFINE_integer(\"n_class\", default=2, help=\"Number of classes.\")\nflags.DEFINE_string(\n \"summary_type\",\n default=\"last\",\n help=\"Method used to summarize a sequence into a vector.\")\n\nFLAGS = flags.FLAGS\n\n\ndef get_classificationxlnet_model(model_config,\n run_config,\n n_class,\n summary_type=\"last\"):\n model = modeling.ClassificationXLNetModel(\n model_config, run_config, n_class, summary_type, name=\"model\")\n return model\n\n\ndef run_evaluation(strategy,\n test_input_fn,\n eval_steps,\n model,\n step,\n eval_summary_writer=None):\n \"\"\"Run evaluation for classification task.\n\n Args:\n strategy: distribution strategy.\n test_input_fn: input function for evaluation data.\n eval_steps: total number of evaluation steps.\n model: keras model object.\n step: current train step.\n eval_summary_writer: summary writer used to record evaluation metrics. As\n there are fake data samples in validation set, we use mask to get rid of\n them when calculating the accuracy. For the reason that there will be\n dynamic-shape tensor, we first collect logits, labels and masks from TPU\n and calculate the accuracy via numpy locally.\n\n Returns:\n A float metric, accuracy.\n \"\"\"\n\n def _test_step_fn(inputs):\n \"\"\"Replicated validation step.\"\"\"\n\n inputs[\"mems\"] = None\n _, logits = model(inputs, training=False)\n return logits, inputs[\"label_ids\"], inputs[\"is_real_example\"]\n\n @tf.function\n def _run_evaluation(test_iterator):\n \"\"\"Runs validation steps.\"\"\"\n logits, labels, masks = strategy.run(\n _test_step_fn, args=(next(test_iterator),))\n return logits, labels, masks\n\n test_iterator = data_utils.get_input_iterator(test_input_fn, strategy)\n correct = 0\n total = 0\n for _ in range(eval_steps):\n logits, labels, masks = _run_evaluation(test_iterator)\n logits = strategy.experimental_local_results(logits)\n labels = strategy.experimental_local_results(labels)\n masks = strategy.experimental_local_results(masks)\n merged_logits = []\n merged_labels = []\n merged_masks = []\n\n for i in range(strategy.num_replicas_in_sync):\n merged_logits.append(logits[i].numpy())\n merged_labels.append(labels[i].numpy())\n merged_masks.append(masks[i].numpy())\n merged_logits = np.vstack(np.array(merged_logits))\n merged_labels = np.hstack(np.array(merged_labels))\n merged_masks = np.hstack(np.array(merged_masks))\n real_index = np.where(np.equal(merged_masks, 1))\n correct += np.sum(\n np.equal(\n np.argmax(merged_logits[real_index], axis=-1),\n merged_labels[real_index]))\n total += np.shape(real_index)[-1]\n accuracy = float(correct) / float(total)\n logging.info(\"Train step: %d / acc = %d/%d = %f\", step, correct, total,\n accuracy)\n if eval_summary_writer:\n with eval_summary_writer.as_default():\n tf.summary.scalar(\"eval_acc\", float(correct) / float(total), step=step)\n eval_summary_writer.flush()\n return accuracy\n\n\ndef get_metric_fn():\n train_acc_metric = tf.keras.metrics.SparseCategoricalAccuracy(\n \"acc\", dtype=tf.float32)\n return train_acc_metric\n\n\ndef main(unused_argv):\n del unused_argv\n if FLAGS.strategy_type == \"mirror\":\n strategy = tf.distribute.MirroredStrategy()\n elif FLAGS.strategy_type == \"tpu\":\n cluster_resolver = tpu_lib.tpu_initialize(FLAGS.tpu)\n strategy = tf.distribute.experimental.TPUStrategy(cluster_resolver)\n else:\n raise ValueError(\"The distribution strategy type is not supported: %s\" %\n FLAGS.strategy_type)\n if strategy:\n logging.info(\"***** Number of cores used : %d\",\n strategy.num_replicas_in_sync)\n train_input_fn = functools.partial(data_utils.get_classification_input_data,\n FLAGS.train_batch_size, FLAGS.seq_len,\n strategy, True, FLAGS.train_tfrecord_path)\n test_input_fn = functools.partial(data_utils.get_classification_input_data,\n FLAGS.test_batch_size, FLAGS.seq_len,\n strategy, False, FLAGS.test_tfrecord_path)\n\n total_training_steps = FLAGS.train_steps\n steps_per_loop = FLAGS.iterations\n eval_steps = int(FLAGS.test_data_size / FLAGS.test_batch_size)\n eval_fn = functools.partial(run_evaluation, strategy, test_input_fn,\n eval_steps)\n optimizer, learning_rate_fn = optimization.create_optimizer(\n FLAGS.learning_rate,\n total_training_steps,\n FLAGS.warmup_steps,\n adam_epsilon=FLAGS.adam_epsilon)\n model_config = xlnet_config.XLNetConfig(FLAGS)\n run_config = xlnet_config.create_run_config(True, False, FLAGS)\n model_fn = functools.partial(get_classificationxlnet_model, model_config,\n run_config, FLAGS.n_class, FLAGS.summary_type)\n input_meta_data = {}\n input_meta_data[\"d_model\"] = FLAGS.d_model\n input_meta_data[\"mem_len\"] = FLAGS.mem_len\n input_meta_data[\"batch_size_per_core\"] = int(FLAGS.train_batch_size /\n strategy.num_replicas_in_sync)\n input_meta_data[\"n_layer\"] = FLAGS.n_layer\n input_meta_data[\"lr_layer_decay_rate\"] = FLAGS.lr_layer_decay_rate\n input_meta_data[\"n_class\"] = FLAGS.n_class\n\n training_utils.train(\n strategy=strategy,\n model_fn=model_fn,\n input_meta_data=input_meta_data,\n eval_fn=eval_fn,\n metric_fn=get_metric_fn,\n train_input_fn=train_input_fn,\n init_checkpoint=FLAGS.init_checkpoint,\n init_from_transformerxl=FLAGS.init_from_transformerxl,\n total_training_steps=total_training_steps,\n steps_per_loop=steps_per_loop,\n optimizer=optimizer,\n learning_rate_fn=learning_rate_fn,\n model_dir=FLAGS.model_dir,\n save_steps=FLAGS.save_steps)\n\n\nif __name__ == \"__main__\":\n app.run(main)\n",
"# Copyright 2017 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Class for encoding text using a trained SkipThoughtsModel.\n\nExample usage:\n g = tf.Graph()\n with g.as_default():\n encoder = SkipThoughtsEncoder(embeddings)\n restore_fn = encoder.build_graph_from_config(model_config, checkpoint_path)\n\n with tf.Session(graph=g) as sess:\n restore_fn(sess)\n skip_thought_vectors = encoder.encode(sess, data)\n\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport os.path\n\n\nimport nltk\nimport nltk.tokenize\nimport numpy as np\nimport tensorflow as tf\n\nfrom skip_thoughts import skip_thoughts_model\nfrom skip_thoughts.data import special_words\n\n\ndef _pad(seq, target_len):\n \"\"\"Pads a sequence of word embeddings up to the target length.\n\n Args:\n seq: Sequence of word embeddings.\n target_len: Desired padded sequence length.\n\n Returns:\n embeddings: Input sequence padded with zero embeddings up to the target\n length.\n mask: A 0/1 vector with zeros corresponding to padded embeddings.\n\n Raises:\n ValueError: If len(seq) is not in the interval (0, target_len].\n \"\"\"\n seq_len = len(seq)\n if seq_len <= 0 or seq_len > target_len:\n raise ValueError(\"Expected 0 < len(seq) <= %d, got %d\" % (target_len,\n seq_len))\n\n emb_dim = seq[0].shape[0]\n padded_seq = np.zeros(shape=(target_len, emb_dim), dtype=seq[0].dtype)\n mask = np.zeros(shape=(target_len,), dtype=np.int8)\n for i in range(seq_len):\n padded_seq[i] = seq[i]\n mask[i] = 1\n return padded_seq, mask\n\n\ndef _batch_and_pad(sequences):\n \"\"\"Batches and pads sequences of word embeddings into a 2D array.\n\n Args:\n sequences: A list of batch_size sequences of word embeddings.\n\n Returns:\n embeddings: A numpy array with shape [batch_size, padded_length, emb_dim].\n mask: A numpy 0/1 array with shape [batch_size, padded_length] with zeros\n corresponding to padded elements.\n \"\"\"\n batch_embeddings = []\n batch_mask = []\n batch_len = max([len(seq) for seq in sequences])\n for seq in sequences:\n embeddings, mask = _pad(seq, batch_len)\n batch_embeddings.append(embeddings)\n batch_mask.append(mask)\n return np.array(batch_embeddings), np.array(batch_mask)\n\n\nclass SkipThoughtsEncoder(object):\n \"\"\"Skip-thoughts sentence encoder.\"\"\"\n\n def __init__(self, embeddings):\n \"\"\"Initializes the encoder.\n\n Args:\n embeddings: Dictionary of word to embedding vector (1D numpy array).\n \"\"\"\n self._sentence_detector = nltk.data.load(\"tokenizers/punkt/english.pickle\")\n self._embeddings = embeddings\n\n def _create_restore_fn(self, checkpoint_path, saver):\n \"\"\"Creates a function that restores a model from checkpoint.\n\n Args:\n checkpoint_path: Checkpoint file or a directory containing a checkpoint\n file.\n saver: Saver for restoring variables from the checkpoint file.\n\n Returns:\n restore_fn: A function such that restore_fn(sess) loads model variables\n from the checkpoint file.\n\n Raises:\n ValueError: If checkpoint_path does not refer to a checkpoint file or a\n directory containing a checkpoint file.\n \"\"\"\n if tf.gfile.IsDirectory(checkpoint_path):\n latest_checkpoint = tf.train.latest_checkpoint(checkpoint_path)\n if not latest_checkpoint:\n raise ValueError(\"No checkpoint file found in: %s\" % checkpoint_path)\n checkpoint_path = latest_checkpoint\n\n def _restore_fn(sess):\n tf.logging.info(\"Loading model from checkpoint: %s\", checkpoint_path)\n saver.restore(sess, checkpoint_path)\n tf.logging.info(\"Successfully loaded checkpoint: %s\",\n os.path.basename(checkpoint_path))\n\n return _restore_fn\n\n def build_graph_from_config(self, model_config, checkpoint_path):\n \"\"\"Builds the inference graph from a configuration object.\n\n Args:\n model_config: Object containing configuration for building the model.\n checkpoint_path: Checkpoint file or a directory containing a checkpoint\n file.\n\n Returns:\n restore_fn: A function such that restore_fn(sess) loads model variables\n from the checkpoint file.\n \"\"\"\n tf.logging.info(\"Building model.\")\n model = skip_thoughts_model.SkipThoughtsModel(model_config, mode=\"encode\")\n model.build()\n saver = tf.train.Saver()\n\n return self._create_restore_fn(checkpoint_path, saver)\n\n def build_graph_from_proto(self, graph_def_file, saver_def_file,\n checkpoint_path):\n \"\"\"Builds the inference graph from serialized GraphDef and SaverDef protos.\n\n Args:\n graph_def_file: File containing a serialized GraphDef proto.\n saver_def_file: File containing a serialized SaverDef proto.\n checkpoint_path: Checkpoint file or a directory containing a checkpoint\n file.\n\n Returns:\n restore_fn: A function such that restore_fn(sess) loads model variables\n from the checkpoint file.\n \"\"\"\n # Load the Graph.\n tf.logging.info(\"Loading GraphDef from file: %s\", graph_def_file)\n graph_def = tf.GraphDef()\n with tf.gfile.FastGFile(graph_def_file, \"rb\") as f:\n graph_def.ParseFromString(f.read())\n tf.import_graph_def(graph_def, name=\"\")\n\n # Load the Saver.\n tf.logging.info(\"Loading SaverDef from file: %s\", saver_def_file)\n saver_def = tf.train.SaverDef()\n with tf.gfile.FastGFile(saver_def_file, \"rb\") as f:\n saver_def.ParseFromString(f.read())\n saver = tf.train.Saver(saver_def=saver_def)\n\n return self._create_restore_fn(checkpoint_path, saver)\n\n def _tokenize(self, item):\n \"\"\"Tokenizes an input string into a list of words.\"\"\"\n tokenized = []\n for s in self._sentence_detector.tokenize(item):\n tokenized.extend(nltk.tokenize.word_tokenize(s))\n\n return tokenized\n\n def _word_to_embedding(self, w):\n \"\"\"Returns the embedding of a word.\"\"\"\n return self._embeddings.get(w, self._embeddings[special_words.UNK])\n\n def _preprocess(self, data, use_eos):\n \"\"\"Preprocesses text for the encoder.\n\n Args:\n data: A list of input strings.\n use_eos: Whether to append the end-of-sentence word to each sentence.\n\n Returns:\n embeddings: A list of word embedding sequences corresponding to the input\n strings.\n \"\"\"\n preprocessed_data = []\n for item in data:\n tokenized = self._tokenize(item)\n if use_eos:\n tokenized.append(special_words.EOS)\n preprocessed_data.append([self._word_to_embedding(w) for w in tokenized])\n return preprocessed_data\n\n def encode(self,\n sess,\n data,\n use_norm=True,\n verbose=True,\n batch_size=128,\n use_eos=False):\n \"\"\"Encodes a sequence of sentences as skip-thought vectors.\n\n Args:\n sess: TensorFlow Session.\n data: A list of input strings.\n use_norm: Whether to normalize skip-thought vectors to unit L2 norm.\n verbose: Whether to log every batch.\n batch_size: Batch size for the encoder.\n use_eos: Whether to append the end-of-sentence word to each input\n sentence.\n\n Returns:\n thought_vectors: A list of numpy arrays corresponding to the skip-thought\n encodings of sentences in 'data'.\n \"\"\"\n data = self._preprocess(data, use_eos)\n thought_vectors = []\n\n batch_indices = np.arange(0, len(data), batch_size)\n for batch, start_index in enumerate(batch_indices):\n if verbose:\n tf.logging.info(\"Batch %d / %d.\", batch, len(batch_indices))\n\n embeddings, mask = _batch_and_pad(\n data[start_index:start_index + batch_size])\n feed_dict = {\n \"encode_emb:0\": embeddings,\n \"encode_mask:0\": mask,\n }\n thought_vectors.extend(\n sess.run(\"encoder/thought_vectors:0\", feed_dict=feed_dict))\n\n if use_norm:\n thought_vectors = [v / np.linalg.norm(v) for v in thought_vectors]\n\n return thought_vectors\n",
"# Copyright 2019 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Executes BERT benchmarks and accuracy tests.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport functools\nimport json\nimport math\nimport os\nimport time\n\n# pylint: disable=g-bad-import-order\nfrom absl import flags\nfrom absl.testing import flagsaver\nimport tensorflow as tf\n# pylint: enable=g-bad-import-order\n\nfrom official.benchmark import bert_benchmark_utils as benchmark_utils\nfrom official.nlp.bert import configs\nfrom official.nlp.bert import run_classifier\nfrom official.utils.misc import distribution_utils\nfrom official.benchmark import benchmark_wrappers\n\n# pylint: disable=line-too-long\nPRETRAINED_CHECKPOINT_PATH = 'gs://cloud-tpu-checkpoints/bert/keras_bert/uncased_L-24_H-1024_A-16/bert_model.ckpt'\nCLASSIFIER_TRAIN_DATA_PATH = 'gs://tf-perfzero-data/bert/classification/mrpc_train.tf_record'\nCLASSIFIER_EVAL_DATA_PATH = 'gs://tf-perfzero-data/bert/classification/mrpc_eval.tf_record'\nCLASSIFIER_INPUT_META_DATA_PATH = 'gs://tf-perfzero-data/bert/classification/mrpc_meta_data'\nMODEL_CONFIG_FILE_PATH = 'gs://cloud-tpu-checkpoints/bert/keras_bert/uncased_L-24_H-1024_A-16/bert_config.json'\n# pylint: enable=line-too-long\n\nTMP_DIR = os.getenv('TMPDIR')\nFLAGS = flags.FLAGS\n\n\nclass BertClassifyBenchmarkBase(benchmark_utils.BertBenchmarkBase):\n \"\"\"Base class to hold methods common to test classes in the module.\"\"\"\n\n def __init__(self, output_dir=None, tpu=None):\n super(BertClassifyBenchmarkBase, self).__init__(output_dir)\n self.num_epochs = None\n self.num_steps_per_epoch = None\n self.tpu = tpu\n FLAGS.steps_per_loop = 50\n\n @flagsaver.flagsaver\n def _run_bert_classifier(self, callbacks=None, use_ds=True):\n \"\"\"Starts BERT classification task.\"\"\"\n with tf.io.gfile.GFile(FLAGS.input_meta_data_path, 'rb') as reader:\n input_meta_data = json.loads(reader.read().decode('utf-8'))\n\n bert_config = configs.BertConfig.from_json_file(FLAGS.bert_config_file)\n epochs = self.num_epochs if self.num_epochs else FLAGS.num_train_epochs\n if self.num_steps_per_epoch:\n steps_per_epoch = self.num_steps_per_epoch\n else:\n train_data_size = input_meta_data['train_data_size']\n steps_per_epoch = int(train_data_size / FLAGS.train_batch_size)\n warmup_steps = int(epochs * steps_per_epoch * 0.1)\n eval_steps = int(\n math.ceil(input_meta_data['eval_data_size'] / FLAGS.eval_batch_size))\n if self.tpu:\n strategy = distribution_utils.get_distribution_strategy(\n distribution_strategy='tpu', tpu_address=self.tpu)\n else:\n strategy = distribution_utils.get_distribution_strategy(\n distribution_strategy='mirrored' if use_ds else 'off',\n num_gpus=self.num_gpus)\n\n max_seq_length = input_meta_data['max_seq_length']\n train_input_fn = run_classifier.get_dataset_fn(\n FLAGS.train_data_path,\n max_seq_length,\n FLAGS.train_batch_size,\n is_training=True)\n eval_input_fn = run_classifier.get_dataset_fn(\n FLAGS.eval_data_path,\n max_seq_length,\n FLAGS.eval_batch_size,\n is_training=False)\n run_classifier.run_bert_classifier(\n strategy,\n bert_config,\n input_meta_data,\n FLAGS.model_dir,\n epochs,\n steps_per_epoch,\n FLAGS.steps_per_loop,\n eval_steps,\n warmup_steps,\n FLAGS.learning_rate,\n FLAGS.init_checkpoint,\n train_input_fn,\n eval_input_fn,\n custom_callbacks=callbacks)\n\n\nclass BertClassifyBenchmarkReal(BertClassifyBenchmarkBase):\n \"\"\"Short benchmark performance tests for BERT model.\n\n Tests BERT classification performance in different GPU, TPU configurations.\n The naming convention of below test cases follow\n `benchmark_(number of gpus)_gpu_(dataset type)` for GPUs and\n `benchmark_(topology)_tpu_(dataset type)` for TPUs.\n \"\"\"\n\n def __init__(self, output_dir=TMP_DIR, tpu=None, **kwargs):\n super(BertClassifyBenchmarkReal, self).__init__(\n output_dir=output_dir, tpu=tpu)\n\n self.train_data_path = CLASSIFIER_TRAIN_DATA_PATH\n self.eval_data_path = CLASSIFIER_EVAL_DATA_PATH\n self.bert_config_file = MODEL_CONFIG_FILE_PATH\n self.input_meta_data_path = CLASSIFIER_INPUT_META_DATA_PATH\n\n # Since we only care about performance metrics, we limit\n # the number of training steps and epochs to prevent unnecessarily\n # long tests.\n self.num_steps_per_epoch = 100\n self.num_epochs = 1\n\n @benchmark_wrappers.enable_runtime_flags\n def _run_and_report_benchmark(self,\n training_summary_path,\n min_accuracy=0,\n max_accuracy=1,\n use_ds=True):\n \"\"\"Starts BERT performance benchmark test.\"\"\"\n start_time_sec = time.time()\n self._run_bert_classifier(callbacks=[self.timer_callback], use_ds=use_ds)\n wall_time_sec = time.time() - start_time_sec\n\n with tf.io.gfile.GFile(training_summary_path, 'rb') as reader:\n summary = json.loads(reader.read().decode('utf-8'))\n\n # Since we do not load from any pretrained checkpoints, we ignore all\n # accuracy metrics.\n summary.pop('eval_metrics', None)\n summary['start_time_sec'] = start_time_sec\n\n super(BertClassifyBenchmarkReal, self)._report_benchmark(\n stats=summary,\n wall_time_sec=wall_time_sec,\n min_accuracy=min_accuracy,\n max_accuracy=max_accuracy)\n\n def benchmark_1_gpu_mrpc(self):\n \"\"\"Test BERT model performance with 1 GPU.\"\"\"\n\n self._setup()\n self.num_gpus = 1\n FLAGS.model_dir = self._get_model_dir('benchmark_1_gpu_mrpc')\n FLAGS.train_data_path = self.train_data_path\n FLAGS.eval_data_path = self.eval_data_path\n FLAGS.input_meta_data_path = self.input_meta_data_path\n FLAGS.bert_config_file = self.bert_config_file\n FLAGS.train_batch_size = 4\n FLAGS.eval_batch_size = 4\n\n summary_path = os.path.join(FLAGS.model_dir,\n 'summaries/training_summary.txt')\n self._run_and_report_benchmark(summary_path)\n\n def benchmark_1_gpu_mrpc_xla(self):\n \"\"\"Test BERT model performance with 1 GPU.\"\"\"\n\n self._setup()\n self.num_gpus = 1\n FLAGS.model_dir = self._get_model_dir('benchmark_1_gpu_mrpc_xla')\n FLAGS.train_data_path = self.train_data_path\n FLAGS.eval_data_path = self.eval_data_path\n FLAGS.input_meta_data_path = self.input_meta_data_path\n FLAGS.bert_config_file = self.bert_config_file\n FLAGS.train_batch_size = 4\n FLAGS.eval_batch_size = 4\n FLAGS.enable_xla = True\n\n summary_path = os.path.join(FLAGS.model_dir,\n 'summaries/training_summary.txt')\n self._run_and_report_benchmark(summary_path)\n\n def benchmark_1_gpu_mrpc_no_dist_strat(self):\n \"\"\"Test BERT model performance with 1 GPU, no distribution strategy.\"\"\"\n\n self._setup()\n self.num_gpus = 1\n FLAGS.model_dir = self._get_model_dir('benchmark_1_gpu_mrpc_no_dist_strat')\n FLAGS.train_data_path = self.train_data_path\n FLAGS.eval_data_path = self.eval_data_path\n FLAGS.input_meta_data_path = self.input_meta_data_path\n FLAGS.bert_config_file = self.bert_config_file\n FLAGS.train_batch_size = 4\n FLAGS.eval_batch_size = 4\n\n summary_path = os.path.join(FLAGS.model_dir,\n 'summaries/training_summary.txt')\n self._run_and_report_benchmark(summary_path, use_ds=False)\n\n def benchmark_8_gpu_mrpc(self):\n \"\"\"Test BERT model performance with 8 GPUs.\"\"\"\n\n self._setup()\n FLAGS.model_dir = self._get_model_dir('benchmark_8_gpu_mrpc')\n FLAGS.train_data_path = self.train_data_path\n FLAGS.eval_data_path = self.eval_data_path\n FLAGS.input_meta_data_path = self.input_meta_data_path\n FLAGS.bert_config_file = self.bert_config_file\n\n summary_path = os.path.join(FLAGS.model_dir,\n 'summaries/training_summary.txt')\n self._run_and_report_benchmark(summary_path)\n\n def benchmark_1_gpu_amp_mrpc_no_dist_strat(self):\n \"\"\"Performance for 1 GPU no DS with automatic mixed precision.\"\"\"\n self._setup()\n self.num_gpus = 1\n FLAGS.model_dir = self._get_model_dir(\n 'benchmark_1_gpu_amp_mrpc_no_dist_strat')\n FLAGS.train_data_path = self.train_data_path\n FLAGS.eval_data_path = self.eval_data_path\n FLAGS.input_meta_data_path = self.input_meta_data_path\n FLAGS.bert_config_file = self.bert_config_file\n FLAGS.train_batch_size = 4\n FLAGS.eval_batch_size = 4\n FLAGS.dtype = 'fp16'\n FLAGS.fp16_implementation = 'graph_rewrite'\n\n summary_path = os.path.join(FLAGS.model_dir,\n 'summaries/training_summary.txt')\n self._run_and_report_benchmark(summary_path, use_ds=False)\n\n def benchmark_8_gpu_amp_mrpc(self):\n \"\"\"Test BERT model performance with 8 GPUs with automatic mixed precision.\n \"\"\"\n\n self._setup()\n self.num_gpus = 8\n FLAGS.model_dir = self._get_model_dir('benchmark_8_gpu_amp_mrpc')\n FLAGS.train_data_path = self.train_data_path\n FLAGS.eval_data_path = self.eval_data_path\n FLAGS.input_meta_data_path = self.input_meta_data_path\n FLAGS.bert_config_file = self.bert_config_file\n FLAGS.train_batch_size = 32\n FLAGS.eval_batch_size = 32\n FLAGS.dtype = 'fp16'\n FLAGS.fp16_implementation = 'graph_rewrite'\n\n summary_path = os.path.join(FLAGS.model_dir,\n 'summaries/training_summary.txt')\n self._run_and_report_benchmark(summary_path, use_ds=False)\n\n def benchmark_2x2_tpu_mrpc(self):\n \"\"\"Test BERT model performance with 2x2 TPU.\"\"\"\n\n self._setup()\n FLAGS.model_dir = self._get_model_dir('benchmark_2x2_tpu_mrpc')\n FLAGS.train_data_path = self.train_data_path\n FLAGS.eval_data_path = self.eval_data_path\n FLAGS.input_meta_data_path = self.input_meta_data_path\n FLAGS.bert_config_file = self.bert_config_file\n FLAGS.train_batch_size = 32\n FLAGS.eval_batch_size = 32\n\n summary_path = os.path.join(FLAGS.model_dir,\n 'summaries/training_summary.txt')\n self._run_and_report_benchmark(summary_path, use_ds=False)\n\n\nclass BertClassifyAccuracy(BertClassifyBenchmarkBase):\n \"\"\"Short accuracy test for BERT model.\n\n Tests BERT classification task model accuracy. The naming\n convention of below test cases follow\n `benchmark_(number of gpus)_gpu_(dataset type)` format.\n \"\"\"\n\n def __init__(self, output_dir=TMP_DIR, **kwargs):\n self.train_data_path = CLASSIFIER_TRAIN_DATA_PATH\n self.eval_data_path = CLASSIFIER_EVAL_DATA_PATH\n self.bert_config_file = MODEL_CONFIG_FILE_PATH\n self.input_meta_data_path = CLASSIFIER_INPUT_META_DATA_PATH\n self.pretrained_checkpoint_path = PRETRAINED_CHECKPOINT_PATH\n\n super(BertClassifyAccuracy, self).__init__(output_dir=output_dir)\n\n @benchmark_wrappers.enable_runtime_flags\n def _run_and_report_benchmark(self,\n training_summary_path,\n min_accuracy=0.84,\n max_accuracy=0.88):\n \"\"\"Starts BERT accuracy benchmark test.\"\"\"\n\n start_time_sec = time.time()\n self._run_bert_classifier(callbacks=[self.timer_callback])\n wall_time_sec = time.time() - start_time_sec\n\n with tf.io.gfile.GFile(training_summary_path, 'rb') as reader:\n summary = json.loads(reader.read().decode('utf-8'))\n\n super(BertClassifyAccuracy, self)._report_benchmark(\n stats=summary,\n wall_time_sec=wall_time_sec,\n min_accuracy=min_accuracy,\n max_accuracy=max_accuracy)\n\n def _setup(self):\n super(BertClassifyAccuracy, self)._setup()\n FLAGS.train_data_path = self.train_data_path\n FLAGS.eval_data_path = self.eval_data_path\n FLAGS.input_meta_data_path = self.input_meta_data_path\n FLAGS.bert_config_file = self.bert_config_file\n FLAGS.init_checkpoint = self.pretrained_checkpoint_path\n\n def benchmark_8_gpu_mrpc(self):\n \"\"\"Run BERT model accuracy test with 8 GPUs.\n\n Due to comparatively small cardinality of MRPC dataset, training\n accuracy metric has high variance between trainings. As so, we\n set the wide range of allowed accuracy (84% to 88%).\n \"\"\"\n self._setup()\n FLAGS.model_dir = self._get_model_dir('benchmark_8_gpu_mrpc')\n\n summary_path = os.path.join(FLAGS.model_dir,\n 'summaries/training_summary.txt')\n self._run_and_report_benchmark(summary_path)\n\n def benchmark_8_gpu_mrpc_xla(self):\n \"\"\"Run BERT model accuracy test with 8 GPUs with XLA.\"\"\"\n self._setup()\n FLAGS.model_dir = self._get_model_dir('benchmark_8_gpu_mrpc_xla')\n FLAGS.enable_xla = True\n summary_path = os.path.join(FLAGS.model_dir,\n 'summaries/training_summary.txt')\n self._run_and_report_benchmark(summary_path)\n\n\nif __name__ == '__main__':\n tf.test.main()\n",
"# Copyright 2019 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Create masked LM/next sentence masked_lm TF examples for BERT.\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport collections\nimport random\n\nfrom absl import app\nfrom absl import flags\nfrom absl import logging\nimport tensorflow as tf\n\nfrom official.nlp.bert import tokenization\n\nFLAGS = flags.FLAGS\n\nflags.DEFINE_string(\"input_file\", None,\n \"Input raw text file (or comma-separated list of files).\")\n\nflags.DEFINE_string(\n \"output_file\", None,\n \"Output TF example file (or comma-separated list of files).\")\n\nflags.DEFINE_string(\"vocab_file\", None,\n \"The vocabulary file that the BERT model was trained on.\")\n\nflags.DEFINE_bool(\n \"do_lower_case\", True,\n \"Whether to lower case the input text. Should be True for uncased \"\n \"models and False for cased models.\")\n\nflags.DEFINE_bool(\n \"do_whole_word_mask\", False,\n \"Whether to use whole word masking rather than per-WordPiece masking.\")\n\nflags.DEFINE_bool(\n \"gzip_compress\", False,\n \"Whether to use `GZIP` compress option to get compressed TFRecord files.\")\n\nflags.DEFINE_integer(\"max_seq_length\", 128, \"Maximum sequence length.\")\n\nflags.DEFINE_integer(\"max_predictions_per_seq\", 20,\n \"Maximum number of masked LM predictions per sequence.\")\n\nflags.DEFINE_integer(\"random_seed\", 12345, \"Random seed for data generation.\")\n\nflags.DEFINE_integer(\n \"dupe_factor\", 10,\n \"Number of times to duplicate the input data (with different masks).\")\n\nflags.DEFINE_float(\"masked_lm_prob\", 0.15, \"Masked LM probability.\")\n\nflags.DEFINE_float(\n \"short_seq_prob\", 0.1,\n \"Probability of creating sequences which are shorter than the \"\n \"maximum length.\")\n\n\nclass TrainingInstance(object):\n \"\"\"A single training instance (sentence pair).\"\"\"\n\n def __init__(self, tokens, segment_ids, masked_lm_positions, masked_lm_labels,\n is_random_next):\n self.tokens = tokens\n self.segment_ids = segment_ids\n self.is_random_next = is_random_next\n self.masked_lm_positions = masked_lm_positions\n self.masked_lm_labels = masked_lm_labels\n\n def __str__(self):\n s = \"\"\n s += \"tokens: %s\\n\" % (\" \".join(\n [tokenization.printable_text(x) for x in self.tokens]))\n s += \"segment_ids: %s\\n\" % (\" \".join([str(x) for x in self.segment_ids]))\n s += \"is_random_next: %s\\n\" % self.is_random_next\n s += \"masked_lm_positions: %s\\n\" % (\" \".join(\n [str(x) for x in self.masked_lm_positions]))\n s += \"masked_lm_labels: %s\\n\" % (\" \".join(\n [tokenization.printable_text(x) for x in self.masked_lm_labels]))\n s += \"\\n\"\n return s\n\n def __repr__(self):\n return self.__str__()\n\n\ndef write_instance_to_example_files(instances, tokenizer, max_seq_length,\n max_predictions_per_seq, output_files,\n gzip_compress):\n \"\"\"Create TF example files from `TrainingInstance`s.\"\"\"\n writers = []\n for output_file in output_files:\n writers.append(\n tf.io.TFRecordWriter(\n output_file, options=\"GZIP\" if gzip_compress else \"\"))\n\n writer_index = 0\n\n total_written = 0\n for (inst_index, instance) in enumerate(instances):\n input_ids = tokenizer.convert_tokens_to_ids(instance.tokens)\n input_mask = [1] * len(input_ids)\n segment_ids = list(instance.segment_ids)\n assert len(input_ids) <= max_seq_length\n\n while len(input_ids) < max_seq_length:\n input_ids.append(0)\n input_mask.append(0)\n segment_ids.append(0)\n\n assert len(input_ids) == max_seq_length\n assert len(input_mask) == max_seq_length\n assert len(segment_ids) == max_seq_length\n\n masked_lm_positions = list(instance.masked_lm_positions)\n masked_lm_ids = tokenizer.convert_tokens_to_ids(instance.masked_lm_labels)\n masked_lm_weights = [1.0] * len(masked_lm_ids)\n\n while len(masked_lm_positions) < max_predictions_per_seq:\n masked_lm_positions.append(0)\n masked_lm_ids.append(0)\n masked_lm_weights.append(0.0)\n\n next_sentence_label = 1 if instance.is_random_next else 0\n\n features = collections.OrderedDict()\n features[\"input_ids\"] = create_int_feature(input_ids)\n features[\"input_mask\"] = create_int_feature(input_mask)\n features[\"segment_ids\"] = create_int_feature(segment_ids)\n features[\"masked_lm_positions\"] = create_int_feature(masked_lm_positions)\n features[\"masked_lm_ids\"] = create_int_feature(masked_lm_ids)\n features[\"masked_lm_weights\"] = create_float_feature(masked_lm_weights)\n features[\"next_sentence_labels\"] = create_int_feature([next_sentence_label])\n\n tf_example = tf.train.Example(features=tf.train.Features(feature=features))\n\n writers[writer_index].write(tf_example.SerializeToString())\n writer_index = (writer_index + 1) % len(writers)\n\n total_written += 1\n\n if inst_index < 20:\n logging.info(\"*** Example ***\")\n logging.info(\"tokens: %s\", \" \".join(\n [tokenization.printable_text(x) for x in instance.tokens]))\n\n for feature_name in features.keys():\n feature = features[feature_name]\n values = []\n if feature.int64_list.value:\n values = feature.int64_list.value\n elif feature.float_list.value:\n values = feature.float_list.value\n logging.info(\"%s: %s\", feature_name, \" \".join([str(x) for x in values]))\n\n for writer in writers:\n writer.close()\n\n logging.info(\"Wrote %d total instances\", total_written)\n\n\ndef create_int_feature(values):\n feature = tf.train.Feature(int64_list=tf.train.Int64List(value=list(values)))\n return feature\n\n\ndef create_float_feature(values):\n feature = tf.train.Feature(float_list=tf.train.FloatList(value=list(values)))\n return feature\n\n\ndef create_training_instances(input_files,\n tokenizer,\n max_seq_length,\n dupe_factor,\n short_seq_prob,\n masked_lm_prob,\n max_predictions_per_seq,\n rng,\n do_whole_word_mask=False):\n \"\"\"Create `TrainingInstance`s from raw text.\"\"\"\n all_documents = [[]]\n\n # Input file format:\n # (1) One sentence per line. These should ideally be actual sentences, not\n # entire paragraphs or arbitrary spans of text. (Because we use the\n # sentence boundaries for the \"next sentence prediction\" task).\n # (2) Blank lines between documents. Document boundaries are needed so\n # that the \"next sentence prediction\" task doesn't span between documents.\n for input_file in input_files:\n with tf.io.gfile.GFile(input_file, \"rb\") as reader:\n while True:\n line = tokenization.convert_to_unicode(reader.readline())\n if not line:\n break\n line = line.strip()\n\n # Empty lines are used as document delimiters\n if not line:\n all_documents.append([])\n tokens = tokenizer.tokenize(line)\n if tokens:\n all_documents[-1].append(tokens)\n\n # Remove empty documents\n all_documents = [x for x in all_documents if x]\n rng.shuffle(all_documents)\n\n vocab_words = list(tokenizer.vocab.keys())\n instances = []\n for _ in range(dupe_factor):\n for document_index in range(len(all_documents)):\n instances.extend(\n create_instances_from_document(\n all_documents, document_index, max_seq_length, short_seq_prob,\n masked_lm_prob, max_predictions_per_seq, vocab_words, rng,\n do_whole_word_mask))\n\n rng.shuffle(instances)\n return instances\n\n\ndef create_instances_from_document(\n all_documents, document_index, max_seq_length, short_seq_prob,\n masked_lm_prob, max_predictions_per_seq, vocab_words, rng,\n do_whole_word_mask=False):\n \"\"\"Creates `TrainingInstance`s for a single document.\"\"\"\n document = all_documents[document_index]\n\n # Account for [CLS], [SEP], [SEP]\n max_num_tokens = max_seq_length - 3\n\n # We *usually* want to fill up the entire sequence since we are padding\n # to `max_seq_length` anyways, so short sequences are generally wasted\n # computation. However, we *sometimes*\n # (i.e., short_seq_prob == 0.1 == 10% of the time) want to use shorter\n # sequences to minimize the mismatch between pre-training and fine-tuning.\n # The `target_seq_length` is just a rough target however, whereas\n # `max_seq_length` is a hard limit.\n target_seq_length = max_num_tokens\n if rng.random() < short_seq_prob:\n target_seq_length = rng.randint(2, max_num_tokens)\n\n # We DON'T just concatenate all of the tokens from a document into a long\n # sequence and choose an arbitrary split point because this would make the\n # next sentence prediction task too easy. Instead, we split the input into\n # segments \"A\" and \"B\" based on the actual \"sentences\" provided by the user\n # input.\n instances = []\n current_chunk = []\n current_length = 0\n i = 0\n while i < len(document):\n segment = document[i]\n current_chunk.append(segment)\n current_length += len(segment)\n if i == len(document) - 1 or current_length >= target_seq_length:\n if current_chunk:\n # `a_end` is how many segments from `current_chunk` go into the `A`\n # (first) sentence.\n a_end = 1\n if len(current_chunk) >= 2:\n a_end = rng.randint(1, len(current_chunk) - 1)\n\n tokens_a = []\n for j in range(a_end):\n tokens_a.extend(current_chunk[j])\n\n tokens_b = []\n # Random next\n is_random_next = False\n if len(current_chunk) == 1 or rng.random() < 0.5:\n is_random_next = True\n target_b_length = target_seq_length - len(tokens_a)\n\n # This should rarely go for more than one iteration for large\n # corpora. However, just to be careful, we try to make sure that\n # the random document is not the same as the document\n # we're processing.\n for _ in range(10):\n random_document_index = rng.randint(0, len(all_documents) - 1)\n if random_document_index != document_index:\n break\n\n random_document = all_documents[random_document_index]\n random_start = rng.randint(0, len(random_document) - 1)\n for j in range(random_start, len(random_document)):\n tokens_b.extend(random_document[j])\n if len(tokens_b) >= target_b_length:\n break\n # We didn't actually use these segments so we \"put them back\" so\n # they don't go to waste.\n num_unused_segments = len(current_chunk) - a_end\n i -= num_unused_segments\n # Actual next\n else:\n is_random_next = False\n for j in range(a_end, len(current_chunk)):\n tokens_b.extend(current_chunk[j])\n truncate_seq_pair(tokens_a, tokens_b, max_num_tokens, rng)\n\n assert len(tokens_a) >= 1\n assert len(tokens_b) >= 1\n\n tokens = []\n segment_ids = []\n tokens.append(\"[CLS]\")\n segment_ids.append(0)\n for token in tokens_a:\n tokens.append(token)\n segment_ids.append(0)\n\n tokens.append(\"[SEP]\")\n segment_ids.append(0)\n\n for token in tokens_b:\n tokens.append(token)\n segment_ids.append(1)\n tokens.append(\"[SEP]\")\n segment_ids.append(1)\n\n (tokens, masked_lm_positions,\n masked_lm_labels) = create_masked_lm_predictions(\n tokens, masked_lm_prob, max_predictions_per_seq, vocab_words, rng,\n do_whole_word_mask)\n instance = TrainingInstance(\n tokens=tokens,\n segment_ids=segment_ids,\n is_random_next=is_random_next,\n masked_lm_positions=masked_lm_positions,\n masked_lm_labels=masked_lm_labels)\n instances.append(instance)\n current_chunk = []\n current_length = 0\n i += 1\n\n return instances\n\n\nMaskedLmInstance = collections.namedtuple(\"MaskedLmInstance\",\n [\"index\", \"label\"])\n\n\ndef create_masked_lm_predictions(tokens, masked_lm_prob,\n max_predictions_per_seq, vocab_words, rng,\n do_whole_word_mask):\n \"\"\"Creates the predictions for the masked LM objective.\"\"\"\n\n cand_indexes = []\n for (i, token) in enumerate(tokens):\n if token == \"[CLS]\" or token == \"[SEP]\":\n continue\n # Whole Word Masking means that if we mask all of the wordpieces\n # corresponding to an original word. When a word has been split into\n # WordPieces, the first token does not have any marker and any subsequence\n # tokens are prefixed with ##. So whenever we see the ## token, we\n # append it to the previous set of word indexes.\n #\n # Note that Whole Word Masking does *not* change the training code\n # at all -- we still predict each WordPiece independently, softmaxed\n # over the entire vocabulary.\n if (do_whole_word_mask and len(cand_indexes) >= 1 and\n token.startswith(\"##\")):\n cand_indexes[-1].append(i)\n else:\n cand_indexes.append([i])\n\n rng.shuffle(cand_indexes)\n\n output_tokens = list(tokens)\n\n num_to_predict = min(max_predictions_per_seq,\n max(1, int(round(len(tokens) * masked_lm_prob))))\n\n masked_lms = []\n covered_indexes = set()\n for index_set in cand_indexes:\n if len(masked_lms) >= num_to_predict:\n break\n # If adding a whole-word mask would exceed the maximum number of\n # predictions, then just skip this candidate.\n if len(masked_lms) + len(index_set) > num_to_predict:\n continue\n is_any_index_covered = False\n for index in index_set:\n if index in covered_indexes:\n is_any_index_covered = True\n break\n if is_any_index_covered:\n continue\n for index in index_set:\n covered_indexes.add(index)\n\n masked_token = None\n # 80% of the time, replace with [MASK]\n if rng.random() < 0.8:\n masked_token = \"[MASK]\"\n else:\n # 10% of the time, keep original\n if rng.random() < 0.5:\n masked_token = tokens[index]\n # 10% of the time, replace with random word\n else:\n masked_token = vocab_words[rng.randint(0, len(vocab_words) - 1)]\n\n output_tokens[index] = masked_token\n\n masked_lms.append(MaskedLmInstance(index=index, label=tokens[index]))\n assert len(masked_lms) <= num_to_predict\n masked_lms = sorted(masked_lms, key=lambda x: x.index)\n\n masked_lm_positions = []\n masked_lm_labels = []\n for p in masked_lms:\n masked_lm_positions.append(p.index)\n masked_lm_labels.append(p.label)\n\n return (output_tokens, masked_lm_positions, masked_lm_labels)\n\n\ndef truncate_seq_pair(tokens_a, tokens_b, max_num_tokens, rng):\n \"\"\"Truncates a pair of sequences to a maximum sequence length.\"\"\"\n while True:\n total_length = len(tokens_a) + len(tokens_b)\n if total_length <= max_num_tokens:\n break\n\n trunc_tokens = tokens_a if len(tokens_a) > len(tokens_b) else tokens_b\n assert len(trunc_tokens) >= 1\n\n # We want to sometimes truncate from the front and sometimes from the\n # back to add more randomness and avoid biases.\n if rng.random() < 0.5:\n del trunc_tokens[0]\n else:\n trunc_tokens.pop()\n\n\ndef main(_):\n tokenizer = tokenization.FullTokenizer(\n vocab_file=FLAGS.vocab_file, do_lower_case=FLAGS.do_lower_case)\n\n input_files = []\n for input_pattern in FLAGS.input_file.split(\",\"):\n input_files.extend(tf.io.gfile.glob(input_pattern))\n\n logging.info(\"*** Reading from input files ***\")\n for input_file in input_files:\n logging.info(\" %s\", input_file)\n\n rng = random.Random(FLAGS.random_seed)\n instances = create_training_instances(\n input_files, tokenizer, FLAGS.max_seq_length, FLAGS.dupe_factor,\n FLAGS.short_seq_prob, FLAGS.masked_lm_prob, FLAGS.max_predictions_per_seq,\n rng, FLAGS.do_whole_word_mask)\n\n output_files = FLAGS.output_file.split(\",\")\n logging.info(\"*** Writing to output files ***\")\n for output_file in output_files:\n logging.info(\" %s\", output_file)\n\n write_instance_to_example_files(instances, tokenizer, FLAGS.max_seq_length,\n FLAGS.max_predictions_per_seq, output_files,\n FLAGS.gzip_compress)\n\n\nif __name__ == \"__main__\":\n flags.mark_flag_as_required(\"input_file\")\n flags.mark_flag_as_required(\"output_file\")\n flags.mark_flag_as_required(\"vocab_file\")\n app.run(main)\n",
"# Copyright 2017 The TensorFlow Authors All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\n# Dependency imports\n\nimport tensorflow as tf\n\nFLAGS = tf.app.flags.FLAGS\n\n\ndef rnn_nas(hparams, model):\n assert model == 'gen' or model == 'dis'\n\n # This logic is only valid for rnn_zaremba\n if model == 'gen':\n assert FLAGS.generator_model == 'rnn_nas'\n assert hparams.gen_num_layers == 2\n\n if model == 'dis':\n assert FLAGS.discriminator_model == 'rnn_nas'\n assert hparams.dis_num_layers == 2\n\n # Output variables only for the Generator. Discriminator output biases\n # will begin randomly initialized.\n if model == 'gen':\n softmax_b = [\n v for v in tf.trainable_variables() if v.op.name == 'gen/rnn/softmax_b'\n ][0]\n\n # Common elements to Generator and Discriminator.\n embedding = [\n v for v in tf.trainable_variables()\n if v.op.name == str(model) + '/rnn/embedding'\n ][0]\n lstm_w_0 = [\n v for v in tf.trainable_variables()\n if v.op.name ==\n str(model) + '/rnn/GenericMultiRNNCell/Cell0/Alien/rnn_builder/big_h_mat'\n ][0]\n lstm_b_0 = [\n v for v in tf.trainable_variables()\n if v.op.name == str(model) +\n '/rnn/GenericMultiRNNCell/Cell0/Alien/rnn_builder/big_inputs_mat'\n ][0]\n lstm_w_1 = [\n v for v in tf.trainable_variables()\n if v.op.name ==\n str(model) + '/rnn/GenericMultiRNNCell/Cell1/Alien/rnn_builder/big_h_mat'\n ][0]\n lstm_b_1 = [\n v for v in tf.trainable_variables()\n if v.op.name == str(model) +\n '/rnn/GenericMultiRNNCell/Cell1/Alien/rnn_builder/big_inputs_mat'\n ][0]\n\n # Dictionary mapping.\n if model == 'gen':\n variable_mapping = {\n 'Model/embeddings/input_embedding':\n embedding,\n 'Model/RNN/GenericMultiRNNCell/Cell0/Alien/rnn_builder/big_h_mat':\n lstm_w_0,\n 'Model/RNN/GenericMultiRNNCell/Cell0/Alien/rnn_builder/big_inputs_mat':\n lstm_b_0,\n 'Model/RNN/GenericMultiRNNCell/Cell1/Alien/rnn_builder/big_h_mat':\n lstm_w_1,\n 'Model/RNN/GenericMultiRNNCell/Cell1/Alien/rnn_builder/big_inputs_mat':\n lstm_b_1,\n 'Model/softmax_b':\n softmax_b\n }\n else:\n variable_mapping = {\n 'Model/embeddings/input_embedding':\n embedding,\n 'Model/RNN/GenericMultiRNNCell/Cell0/Alien/rnn_builder/big_h_mat':\n lstm_w_0,\n 'Model/RNN/GenericMultiRNNCell/Cell0/Alien/rnn_builder/big_inputs_mat':\n lstm_b_0,\n 'Model/RNN/GenericMultiRNNCell/Cell1/Alien/rnn_builder/big_h_mat':\n lstm_w_1,\n 'Model/RNN/GenericMultiRNNCell/Cell1/Alien/rnn_builder/big_inputs_mat':\n lstm_b_1\n }\n\n return variable_mapping\n\n\ndef cnn():\n \"\"\"Variable mapping for the CNN embedding.\n\n Returns:\n variable_mapping: Dictionary with Key: ckpt_name, Value: model_var.\n \"\"\"\n # This logic is only valid for cnn\n assert FLAGS.discriminator_model == 'cnn'\n\n # Retrieve CNN embedding.\n embedding = [\n v for v in tf.trainable_variables() if v.op.name == 'dis/embedding'\n ][0]\n\n # Variable mapping.\n variable_mapping = {'Model/embedding': embedding}\n\n return variable_mapping\n\n\ndef rnn_zaremba(hparams, model):\n \"\"\"Returns the PTB Variable name to MaskGAN Variable dictionary mapping. This\n is a highly restrictive function just for testing. This will need to be\n generalized.\n\n Args:\n hparams: Hyperparameters for the MaskGAN.\n model: Model type, one of ['gen', 'dis'].\n\n Returns:\n variable_mapping: Dictionary with Key: ckpt_name, Value: model_var.\n \"\"\"\n assert model == 'gen' or model == 'dis'\n\n # This logic is only valid for rnn_zaremba\n if model == 'gen':\n assert FLAGS.generator_model == 'rnn_zaremba'\n assert hparams.gen_num_layers == 2\n\n if model == 'dis':\n assert (FLAGS.discriminator_model == 'rnn_zaremba' or\n FLAGS.discriminator_model == 'rnn_vd')\n assert hparams.dis_num_layers == 2\n\n # Output variables only for the Generator. Discriminator output weights\n # and biases will begin randomly initialized.\n if model == 'gen':\n softmax_w = [\n v for v in tf.trainable_variables() if v.op.name == 'gen/rnn/softmax_w'\n ][0]\n softmax_b = [\n v for v in tf.trainable_variables() if v.op.name == 'gen/rnn/softmax_b'\n ][0]\n\n # Common elements to Generator and Discriminator.\n if not FLAGS.dis_share_embedding or model != 'dis':\n embedding = [\n v for v in tf.trainable_variables()\n if v.op.name == str(model) + '/rnn/embedding'\n ][0]\n lstm_w_0 = [\n v for v in tf.trainable_variables() if v.op.name == str(model) +\n '/rnn/multi_rnn_cell/cell_0/basic_lstm_cell/kernel'\n ][0]\n lstm_b_0 = [\n v for v in tf.trainable_variables() if v.op.name == str(model) +\n '/rnn/multi_rnn_cell/cell_0/basic_lstm_cell/bias'\n ][0]\n lstm_w_1 = [\n v for v in tf.trainable_variables() if v.op.name == str(model) +\n '/rnn/multi_rnn_cell/cell_1/basic_lstm_cell/kernel'\n ][0]\n lstm_b_1 = [\n v for v in tf.trainable_variables() if v.op.name == str(model) +\n '/rnn/multi_rnn_cell/cell_1/basic_lstm_cell/bias'\n ][0]\n\n # Dictionary mapping.\n if model == 'gen':\n variable_mapping = {\n 'Model/embedding': embedding,\n 'Model/RNN/multi_rnn_cell/cell_0/basic_lstm_cell/kernel': lstm_w_0,\n 'Model/RNN/multi_rnn_cell/cell_0/basic_lstm_cell/bias': lstm_b_0,\n 'Model/RNN/multi_rnn_cell/cell_1/basic_lstm_cell/kernel': lstm_w_1,\n 'Model/RNN/multi_rnn_cell/cell_1/basic_lstm_cell/bias': lstm_b_1,\n 'Model/softmax_w': softmax_w,\n 'Model/softmax_b': softmax_b\n }\n else:\n if FLAGS.dis_share_embedding:\n variable_mapping = {\n 'Model/RNN/multi_rnn_cell/cell_0/basic_lstm_cell/kernel': lstm_w_0,\n 'Model/RNN/multi_rnn_cell/cell_0/basic_lstm_cell/bias': lstm_b_0,\n 'Model/RNN/multi_rnn_cell/cell_1/basic_lstm_cell/kernel': lstm_w_1,\n 'Model/RNN/multi_rnn_cell/cell_1/basic_lstm_cell/bias': lstm_b_1\n }\n else:\n variable_mapping = {\n 'Model/embedding': embedding,\n 'Model/RNN/multi_rnn_cell/cell_0/basic_lstm_cell/kernel': lstm_w_0,\n 'Model/RNN/multi_rnn_cell/cell_0/basic_lstm_cell/bias': lstm_b_0,\n 'Model/RNN/multi_rnn_cell/cell_1/basic_lstm_cell/kernel': lstm_w_1,\n 'Model/RNN/multi_rnn_cell/cell_1/basic_lstm_cell/bias': lstm_b_1\n }\n\n return variable_mapping\n\n\ndef gen_encoder_seq2seq_nas(hparams):\n \"\"\"Returns the NAS Variable name to MaskGAN Variable\n dictionary mapping. This is a highly restrictive function just for testing.\n This is for the *unidirecitional* seq2seq_nas encoder.\n\n Args:\n hparams: Hyperparameters for the MaskGAN.\n\n Returns:\n variable_mapping: Dictionary with Key: ckpt_name, Value: model_varself.\n \"\"\"\n assert FLAGS.generator_model == 'seq2seq_nas'\n assert hparams.gen_num_layers == 2\n ## Encoder forward variables.\n\n if not FLAGS.seq2seq_share_embedding:\n encoder_embedding = [\n v for v in tf.trainable_variables()\n if v.op.name == 'gen/encoder/rnn/embedding'\n ][0]\n encoder_lstm_w_0 = [\n v for v in tf.trainable_variables()\n if v.op.name ==\n 'gen/encoder/rnn/GenericMultiRNNCell/Cell0/Alien/rnn_builder/big_h_mat'\n ][0]\n encoder_lstm_b_0 = [\n v for v in tf.trainable_variables()\n if v.op.name ==\n 'gen/encoder/rnn/GenericMultiRNNCell/Cell0/Alien/rnn_builder/big_inputs_mat'\n ][0]\n encoder_lstm_w_1 = [\n v for v in tf.trainable_variables()\n if v.op.name ==\n 'gen/encoder/rnn/GenericMultiRNNCell/Cell1/Alien/rnn_builder/big_h_mat'\n ][0]\n encoder_lstm_b_1 = [\n v for v in tf.trainable_variables()\n if v.op.name ==\n 'gen/encoder/rnn/GenericMultiRNNCell/Cell1/Alien/rnn_builder/big_inputs_mat'\n ][0]\n\n if not FLAGS.seq2seq_share_embedding:\n variable_mapping = {\n 'Model/embeddings/input_embedding':\n encoder_embedding,\n 'Model/RNN/GenericMultiRNNCell/Cell0/Alien/rnn_builder/big_h_mat':\n encoder_lstm_w_0,\n 'Model/RNN/GenericMultiRNNCell/Cell0/Alien/rnn_builder/big_inputs_mat':\n encoder_lstm_b_0,\n 'Model/RNN/GenericMultiRNNCell/Cell1/Alien/rnn_builder/big_h_mat':\n encoder_lstm_w_1,\n 'Model/RNN/GenericMultiRNNCell/Cell1/Alien/rnn_builder/big_inputs_mat':\n encoder_lstm_b_1\n }\n else:\n variable_mapping = {\n 'Model/RNN/GenericMultiRNNCell/Cell0/Alien/rnn_builder/big_h_mat':\n encoder_lstm_w_0,\n 'Model/RNN/GenericMultiRNNCell/Cell0/Alien/rnn_builder/big_inputs_mat':\n encoder_lstm_b_0,\n 'Model/RNN/GenericMultiRNNCell/Cell1/Alien/rnn_builder/big_h_mat':\n encoder_lstm_w_1,\n 'Model/RNN/GenericMultiRNNCell/Cell1/Alien/rnn_builder/big_inputs_mat':\n encoder_lstm_b_1\n }\n return variable_mapping\n\n\ndef gen_decoder_seq2seq_nas(hparams):\n assert FLAGS.generator_model == 'seq2seq_nas'\n assert hparams.gen_num_layers == 2\n\n decoder_embedding = [\n v for v in tf.trainable_variables()\n if v.op.name == 'gen/decoder/rnn/embedding'\n ][0]\n decoder_lstm_w_0 = [\n v for v in tf.trainable_variables()\n if v.op.name ==\n 'gen/decoder/rnn/GenericMultiRNNCell/Cell0/Alien/rnn_builder/big_h_mat'\n ][0]\n decoder_lstm_b_0 = [\n v for v in tf.trainable_variables()\n if v.op.name ==\n 'gen/decoder/rnn/GenericMultiRNNCell/Cell0/Alien/rnn_builder/big_inputs_mat'\n ][0]\n decoder_lstm_w_1 = [\n v for v in tf.trainable_variables()\n if v.op.name ==\n 'gen/decoder/rnn/GenericMultiRNNCell/Cell1/Alien/rnn_builder/big_h_mat'\n ][0]\n decoder_lstm_b_1 = [\n v for v in tf.trainable_variables()\n if v.op.name ==\n 'gen/decoder/rnn/GenericMultiRNNCell/Cell1/Alien/rnn_builder/big_inputs_mat'\n ][0]\n\n decoder_softmax_b = [\n v for v in tf.trainable_variables()\n if v.op.name == 'gen/decoder/rnn/softmax_b'\n ][0]\n\n variable_mapping = {\n 'Model/embeddings/input_embedding':\n decoder_embedding,\n 'Model/RNN/GenericMultiRNNCell/Cell0/Alien/rnn_builder/big_h_mat':\n decoder_lstm_w_0,\n 'Model/RNN/GenericMultiRNNCell/Cell0/Alien/rnn_builder/big_inputs_mat':\n decoder_lstm_b_0,\n 'Model/RNN/GenericMultiRNNCell/Cell1/Alien/rnn_builder/big_h_mat':\n decoder_lstm_w_1,\n 'Model/RNN/GenericMultiRNNCell/Cell1/Alien/rnn_builder/big_inputs_mat':\n decoder_lstm_b_1,\n 'Model/softmax_b':\n decoder_softmax_b\n }\n\n return variable_mapping\n\n\ndef gen_encoder_seq2seq(hparams):\n \"\"\"Returns the PTB Variable name to MaskGAN Variable\n dictionary mapping. This is a highly restrictive function just for testing.\n This is foe the *unidirecitional* seq2seq_zaremba encoder.\n\n Args:\n hparams: Hyperparameters for the MaskGAN.\n\n Returns:\n variable_mapping: Dictionary with Key: ckpt_name, Value: model_varself.\n \"\"\"\n assert (FLAGS.generator_model == 'seq2seq_zaremba' or\n FLAGS.generator_model == 'seq2seq_vd')\n assert hparams.gen_num_layers == 2\n\n ## Encoder forward variables.\n if not FLAGS.seq2seq_share_embedding:\n encoder_embedding = [\n v for v in tf.trainable_variables()\n if v.op.name == 'gen/encoder/rnn/embedding'\n ][0]\n encoder_lstm_w_0 = [\n v for v in tf.trainable_variables() if v.op.name ==\n 'gen/encoder/rnn/multi_rnn_cell/cell_0/basic_lstm_cell/kernel'\n ][0]\n encoder_lstm_b_0 = [\n v for v in tf.trainable_variables() if v.op.name ==\n 'gen/encoder/rnn/multi_rnn_cell/cell_0/basic_lstm_cell/bias'\n ][0]\n encoder_lstm_w_1 = [\n v for v in tf.trainable_variables() if v.op.name ==\n 'gen/encoder/rnn/multi_rnn_cell/cell_1/basic_lstm_cell/kernel'\n ][0]\n encoder_lstm_b_1 = [\n v for v in tf.trainable_variables() if v.op.name ==\n 'gen/encoder/rnn/multi_rnn_cell/cell_1/basic_lstm_cell/bias'\n ][0]\n\n if FLAGS.data_set == 'ptb':\n model_str = 'Model'\n else:\n model_str = 'model'\n\n if not FLAGS.seq2seq_share_embedding:\n variable_mapping = {\n str(model_str) + '/embedding':\n encoder_embedding,\n str(model_str) + '/RNN/multi_rnn_cell/cell_0/basic_lstm_cell/kernel':\n encoder_lstm_w_0,\n str(model_str) + '/RNN/multi_rnn_cell/cell_0/basic_lstm_cell/bias':\n encoder_lstm_b_0,\n str(model_str) + '/RNN/multi_rnn_cell/cell_1/basic_lstm_cell/kernel':\n encoder_lstm_w_1,\n str(model_str) + '/RNN/multi_rnn_cell/cell_1/basic_lstm_cell/bias':\n encoder_lstm_b_1\n }\n else:\n variable_mapping = {\n str(model_str) + '/RNN/multi_rnn_cell/cell_0/basic_lstm_cell/kernel':\n encoder_lstm_w_0,\n str(model_str) + '/RNN/multi_rnn_cell/cell_0/basic_lstm_cell/bias':\n encoder_lstm_b_0,\n str(model_str) + '/RNN/multi_rnn_cell/cell_1/basic_lstm_cell/kernel':\n encoder_lstm_w_1,\n str(model_str) + '/RNN/multi_rnn_cell/cell_1/basic_lstm_cell/bias':\n encoder_lstm_b_1\n }\n return variable_mapping\n\n\ndef gen_decoder_seq2seq(hparams):\n assert (FLAGS.generator_model == 'seq2seq_zaremba' or\n FLAGS.generator_model == 'seq2seq_vd')\n assert hparams.gen_num_layers == 2\n\n decoder_embedding = [\n v for v in tf.trainable_variables()\n if v.op.name == 'gen/decoder/rnn/embedding'\n ][0]\n decoder_lstm_w_0 = [\n v for v in tf.trainable_variables() if v.op.name ==\n 'gen/decoder/rnn/multi_rnn_cell/cell_0/basic_lstm_cell/kernel'\n ][0]\n decoder_lstm_b_0 = [\n v for v in tf.trainable_variables() if v.op.name ==\n 'gen/decoder/rnn/multi_rnn_cell/cell_0/basic_lstm_cell/bias'\n ][0]\n decoder_lstm_w_1 = [\n v for v in tf.trainable_variables() if v.op.name ==\n 'gen/decoder/rnn/multi_rnn_cell/cell_1/basic_lstm_cell/kernel'\n ][0]\n decoder_lstm_b_1 = [\n v for v in tf.trainable_variables() if v.op.name ==\n 'gen/decoder/rnn/multi_rnn_cell/cell_1/basic_lstm_cell/bias'\n ][0]\n decoder_softmax_b = [\n v for v in tf.trainable_variables()\n if v.op.name == 'gen/decoder/rnn/softmax_b'\n ][0]\n\n if FLAGS.data_set == 'ptb':\n model_str = 'Model'\n else:\n model_str = 'model'\n\n variable_mapping = {\n str(model_str) + '/embedding':\n decoder_embedding,\n str(model_str) + '/RNN/multi_rnn_cell/cell_0/basic_lstm_cell/kernel':\n decoder_lstm_w_0,\n str(model_str) + '/RNN/multi_rnn_cell/cell_0/basic_lstm_cell/bias':\n decoder_lstm_b_0,\n str(model_str) + '/RNN/multi_rnn_cell/cell_1/basic_lstm_cell/kernel':\n decoder_lstm_w_1,\n str(model_str) + '/RNN/multi_rnn_cell/cell_1/basic_lstm_cell/bias':\n decoder_lstm_b_1,\n str(model_str) + '/softmax_b':\n decoder_softmax_b\n }\n return variable_mapping\n\n\ndef dis_fwd_bidirectional(hparams):\n \"\"\"Returns the *forward* PTB Variable name to MaskGAN Variable dictionary\n mapping. This is a highly restrictive function just for testing. This is for\n the bidirectional_zaremba discriminator.\n\n Args:\n FLAGS: Flags for the model.\n hparams: Hyperparameters for the MaskGAN.\n\n Returns:\n variable_mapping: Dictionary with Key: ckpt_name, Value: model_varself.\n \"\"\"\n assert (FLAGS.discriminator_model == 'bidirectional_zaremba' or\n FLAGS.discriminator_model == 'bidirectional_vd')\n assert hparams.dis_num_layers == 2\n\n # Forward Discriminator Elements.\n if not FLAGS.dis_share_embedding:\n embedding = [\n v for v in tf.trainable_variables() if v.op.name == 'dis/embedding'\n ][0]\n fw_lstm_w_0 = [\n v for v in tf.trainable_variables()\n if v.op.name == 'dis/rnn/fw/multi_rnn_cell/cell_0/basic_lstm_cell/kernel'\n ][0]\n fw_lstm_b_0 = [\n v for v in tf.trainable_variables()\n if v.op.name == 'dis/rnn/fw/multi_rnn_cell/cell_0/basic_lstm_cell/bias'\n ][0]\n fw_lstm_w_1 = [\n v for v in tf.trainable_variables()\n if v.op.name == 'dis/rnn/fw/multi_rnn_cell/cell_1/basic_lstm_cell/kernel'\n ][0]\n fw_lstm_b_1 = [\n v for v in tf.trainable_variables()\n if v.op.name == 'dis/rnn/fw/multi_rnn_cell/cell_1/basic_lstm_cell/bias'\n ][0]\n if FLAGS.dis_share_embedding:\n variable_mapping = {\n 'Model/RNN/multi_rnn_cell/cell_0/basic_lstm_cell/kernel': fw_lstm_w_0,\n 'Model/RNN/multi_rnn_cell/cell_0/basic_lstm_cell/bias': fw_lstm_b_0,\n 'Model/RNN/multi_rnn_cell/cell_1/basic_lstm_cell/kernel': fw_lstm_w_1,\n 'Model/RNN/multi_rnn_cell/cell_1/basic_lstm_cell/bias': fw_lstm_b_1\n }\n else:\n variable_mapping = {\n 'Model/embedding': embedding,\n 'Model/RNN/multi_rnn_cell/cell_0/basic_lstm_cell/kernel': fw_lstm_w_0,\n 'Model/RNN/multi_rnn_cell/cell_0/basic_lstm_cell/bias': fw_lstm_b_0,\n 'Model/RNN/multi_rnn_cell/cell_1/basic_lstm_cell/kernel': fw_lstm_w_1,\n 'Model/RNN/multi_rnn_cell/cell_1/basic_lstm_cell/bias': fw_lstm_b_1\n }\n return variable_mapping\n\n\ndef dis_bwd_bidirectional(hparams):\n \"\"\"Returns the *backward* PTB Variable name to MaskGAN Variable dictionary\n mapping. This is a highly restrictive function just for testing. This is for\n the bidirectional_zaremba discriminator.\n\n Args:\n hparams: Hyperparameters for the MaskGAN.\n\n Returns:\n variable_mapping: Dictionary with Key: ckpt_name, Value: model_varself.\n \"\"\"\n assert (FLAGS.discriminator_model == 'bidirectional_zaremba' or\n FLAGS.discriminator_model == 'bidirectional_vd')\n assert hparams.dis_num_layers == 2\n\n # Backward Discriminator Elements.\n bw_lstm_w_0 = [\n v for v in tf.trainable_variables()\n if v.op.name == 'dis/rnn/bw/multi_rnn_cell/cell_0/basic_lstm_cell/kernel'\n ][0]\n bw_lstm_b_0 = [\n v for v in tf.trainable_variables()\n if v.op.name == 'dis/rnn/bw/multi_rnn_cell/cell_0/basic_lstm_cell/bias'\n ][0]\n bw_lstm_w_1 = [\n v for v in tf.trainable_variables()\n if v.op.name == 'dis/rnn/bw/multi_rnn_cell/cell_1/basic_lstm_cell/kernel'\n ][0]\n bw_lstm_b_1 = [\n v for v in tf.trainable_variables()\n if v.op.name == 'dis/rnn/bw/multi_rnn_cell/cell_1/basic_lstm_cell/bias'\n ][0]\n\n variable_mapping = {\n 'Model/RNN/multi_rnn_cell/cell_0/basic_lstm_cell/kernel': bw_lstm_w_0,\n 'Model/RNN/multi_rnn_cell/cell_0/basic_lstm_cell/bias': bw_lstm_b_0,\n 'Model/RNN/multi_rnn_cell/cell_1/basic_lstm_cell/kernel': bw_lstm_w_1,\n 'Model/RNN/multi_rnn_cell/cell_1/basic_lstm_cell/bias': bw_lstm_b_1\n }\n return variable_mapping\n\n\ndef dis_encoder_seq2seq(hparams):\n \"\"\"Returns the PTB Variable name to MaskGAN Variable\n dictionary mapping.\n\n Args:\n hparams: Hyperparameters for the MaskGAN.\n\n Returns:\n variable_mapping: Dictionary with Key: ckpt_name, Value: model_varself.\n \"\"\"\n assert FLAGS.discriminator_model == 'seq2seq_vd'\n assert hparams.dis_num_layers == 2\n\n ## Encoder forward variables.\n encoder_lstm_w_0 = [\n v for v in tf.trainable_variables() if v.op.name ==\n 'dis/encoder/rnn/multi_rnn_cell/cell_0/basic_lstm_cell/kernel'\n ][0]\n encoder_lstm_b_0 = [\n v for v in tf.trainable_variables() if v.op.name ==\n 'dis/encoder/rnn/multi_rnn_cell/cell_0/basic_lstm_cell/bias'\n ][0]\n encoder_lstm_w_1 = [\n v for v in tf.trainable_variables() if v.op.name ==\n 'dis/encoder/rnn/multi_rnn_cell/cell_1/basic_lstm_cell/kernel'\n ][0]\n encoder_lstm_b_1 = [\n v for v in tf.trainable_variables() if v.op.name ==\n 'dis/encoder/rnn/multi_rnn_cell/cell_1/basic_lstm_cell/bias'\n ][0]\n\n if FLAGS.data_set == 'ptb':\n model_str = 'Model'\n else:\n model_str = 'model'\n\n variable_mapping = {\n str(model_str) + '/RNN/multi_rnn_cell/cell_0/basic_lstm_cell/kernel':\n encoder_lstm_w_0,\n str(model_str) + '/RNN/multi_rnn_cell/cell_0/basic_lstm_cell/bias':\n encoder_lstm_b_0,\n str(model_str) + '/RNN/multi_rnn_cell/cell_1/basic_lstm_cell/kernel':\n encoder_lstm_w_1,\n str(model_str) + '/RNN/multi_rnn_cell/cell_1/basic_lstm_cell/bias':\n encoder_lstm_b_1\n }\n return variable_mapping\n\n\ndef dis_decoder_seq2seq(hparams):\n assert FLAGS.discriminator_model == 'seq2seq_vd'\n assert hparams.dis_num_layers == 2\n\n if not FLAGS.dis_share_embedding:\n decoder_embedding = [\n v for v in tf.trainable_variables()\n if v.op.name == 'dis/decoder/rnn/embedding'\n ][0]\n decoder_lstm_w_0 = [\n v for v in tf.trainable_variables() if v.op.name ==\n 'dis/decoder/rnn/multi_rnn_cell/cell_0/basic_lstm_cell/kernel'\n ][0]\n decoder_lstm_b_0 = [\n v for v in tf.trainable_variables() if v.op.name ==\n 'dis/decoder/rnn/multi_rnn_cell/cell_0/basic_lstm_cell/bias'\n ][0]\n decoder_lstm_w_1 = [\n v for v in tf.trainable_variables() if v.op.name ==\n 'dis/decoder/rnn/multi_rnn_cell/cell_1/basic_lstm_cell/kernel'\n ][0]\n decoder_lstm_b_1 = [\n v for v in tf.trainable_variables() if v.op.name ==\n 'dis/decoder/rnn/multi_rnn_cell/cell_1/basic_lstm_cell/bias'\n ][0]\n\n if FLAGS.data_set == 'ptb':\n model_str = 'Model'\n else:\n model_str = 'model'\n\n if not FLAGS.dis_share_embedding:\n variable_mapping = {\n str(model_str) + '/embedding':\n decoder_embedding,\n str(model_str) + '/RNN/multi_rnn_cell/cell_0/basic_lstm_cell/kernel':\n decoder_lstm_w_0,\n str(model_str) + '/RNN/multi_rnn_cell/cell_0/basic_lstm_cell/bias':\n decoder_lstm_b_0,\n str(model_str) + '/RNN/multi_rnn_cell/cell_1/basic_lstm_cell/kernel':\n decoder_lstm_w_1,\n str(model_str) + '/RNN/multi_rnn_cell/cell_1/basic_lstm_cell/bias':\n decoder_lstm_b_1\n }\n else:\n variable_mapping = {\n str(model_str) + '/RNN/multi_rnn_cell/cell_0/basic_lstm_cell/kernel':\n decoder_lstm_w_0,\n str(model_str) + '/RNN/multi_rnn_cell/cell_0/basic_lstm_cell/bias':\n decoder_lstm_b_0,\n str(model_str) + '/RNN/multi_rnn_cell/cell_1/basic_lstm_cell/kernel':\n decoder_lstm_w_1,\n str(model_str) + '/RNN/multi_rnn_cell/cell_1/basic_lstm_cell/bias':\n decoder_lstm_b_1,\n }\n return variable_mapping\n\n\ndef dis_seq2seq_vd(hparams):\n assert FLAGS.discriminator_model == 'seq2seq_vd'\n assert hparams.dis_num_layers == 2\n\n if not FLAGS.dis_share_embedding:\n decoder_embedding = [\n v for v in tf.trainable_variables()\n if v.op.name == 'dis/decoder/rnn/embedding'\n ][0]\n\n ## Encoder variables.\n encoder_lstm_w_0 = [\n v for v in tf.trainable_variables() if v.op.name ==\n 'dis/encoder/rnn/multi_rnn_cell/cell_0/basic_lstm_cell/kernel'\n ][0]\n encoder_lstm_b_0 = [\n v for v in tf.trainable_variables() if v.op.name ==\n 'dis/encoder/rnn/multi_rnn_cell/cell_0/basic_lstm_cell/bias'\n ][0]\n encoder_lstm_w_1 = [\n v for v in tf.trainable_variables() if v.op.name ==\n 'dis/encoder/rnn/multi_rnn_cell/cell_1/basic_lstm_cell/kernel'\n ][0]\n encoder_lstm_b_1 = [\n v for v in tf.trainable_variables() if v.op.name ==\n 'dis/encoder/rnn/multi_rnn_cell/cell_1/basic_lstm_cell/bias'\n ][0]\n\n ## Attention.\n if FLAGS.attention_option is not None:\n decoder_attention_keys = [\n v for v in tf.trainable_variables()\n if v.op.name == 'dis/decoder/attention_keys/weights'\n ][0]\n decoder_attention_construct_weights = [\n v for v in tf.trainable_variables()\n if v.op.name == 'dis/decoder/rnn/attention_construct/weights'\n ][0]\n\n ## Decoder.\n decoder_lstm_w_0 = [\n v for v in tf.trainable_variables() if v.op.name ==\n 'dis/decoder/rnn/multi_rnn_cell/cell_0/basic_lstm_cell/kernel'\n ][0]\n decoder_lstm_b_0 = [\n v for v in tf.trainable_variables() if v.op.name ==\n 'dis/decoder/rnn/multi_rnn_cell/cell_0/basic_lstm_cell/bias'\n ][0]\n decoder_lstm_w_1 = [\n v for v in tf.trainable_variables() if v.op.name ==\n 'dis/decoder/rnn/multi_rnn_cell/cell_1/basic_lstm_cell/kernel'\n ][0]\n decoder_lstm_b_1 = [\n v for v in tf.trainable_variables() if v.op.name ==\n 'dis/decoder/rnn/multi_rnn_cell/cell_1/basic_lstm_cell/bias'\n ][0]\n\n # Standard variable mappings.\n variable_mapping = {\n 'gen/encoder/rnn/multi_rnn_cell/cell_0/basic_lstm_cell/kernel':\n encoder_lstm_w_0,\n 'gen/encoder/rnn/multi_rnn_cell/cell_0/basic_lstm_cell/bias':\n encoder_lstm_b_0,\n 'gen/encoder/rnn/multi_rnn_cell/cell_1/basic_lstm_cell/kernel':\n encoder_lstm_w_1,\n 'gen/encoder/rnn/multi_rnn_cell/cell_1/basic_lstm_cell/bias':\n encoder_lstm_b_1,\n 'gen/decoder/rnn/multi_rnn_cell/cell_0/basic_lstm_cell/kernel':\n decoder_lstm_w_0,\n 'gen/decoder/rnn/multi_rnn_cell/cell_0/basic_lstm_cell/bias':\n decoder_lstm_b_0,\n 'gen/decoder/rnn/multi_rnn_cell/cell_1/basic_lstm_cell/kernel':\n decoder_lstm_w_1,\n 'gen/decoder/rnn/multi_rnn_cell/cell_1/basic_lstm_cell/bias':\n decoder_lstm_b_1\n }\n\n # Optional variable mappings.\n if not FLAGS.dis_share_embedding:\n variable_mapping['gen/decoder/rnn/embedding'] = decoder_embedding\n if FLAGS.attention_option is not None:\n variable_mapping[\n 'gen/decoder/attention_keys/weights'] = decoder_attention_keys\n variable_mapping[\n 'gen/decoder/rnn/attention_construct/weights'] = decoder_attention_construct_weights\n\n return variable_mapping\n",
"# Copyright 2017 The TensorFlow Authors All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\n\"\"\"Tests for LSTM tensorflow blocks.\"\"\"\nfrom __future__ import division\n\nimport numpy as np\nimport tensorflow as tf\n\nimport block_base\nimport blocks_std\nimport blocks_lstm\n\n\nclass BlocksLSTMTest(tf.test.TestCase):\n\n def CheckUnary(self, y, op_type):\n self.assertEqual(op_type, y.op.type)\n self.assertEqual(1, len(y.op.inputs))\n return y.op.inputs[0]\n\n def CheckBinary(self, y, op_type):\n self.assertEqual(op_type, y.op.type)\n self.assertEqual(2, len(y.op.inputs))\n return y.op.inputs\n\n def testLSTM(self):\n lstm = blocks_lstm.LSTM(10)\n lstm.hidden = tf.zeros(shape=[10, 10], dtype=tf.float32)\n lstm.cell = tf.zeros(shape=[10, 10], dtype=tf.float32)\n x = tf.placeholder(dtype=tf.float32, shape=[10, 11])\n y = lstm(x)\n\n o, tanhc = self.CheckBinary(y, 'Mul')\n self.assertEqual(self.CheckUnary(o, 'Sigmoid').name, 'LSTM/split:3')\n\n self.assertIs(lstm.cell, self.CheckUnary(tanhc, 'Tanh'))\n fc, ij = self.CheckBinary(lstm.cell, 'Add')\n\n f, _ = self.CheckBinary(fc, 'Mul')\n self.assertEqual(self.CheckUnary(f, 'Sigmoid').name, 'LSTM/split:0')\n\n i, j = self.CheckBinary(ij, 'Mul')\n self.assertEqual(self.CheckUnary(i, 'Sigmoid').name, 'LSTM/split:1')\n j = self.CheckUnary(j, 'Tanh')\n self.assertEqual(j.name, 'LSTM/split:2')\n\n def testLSTMBiasInit(self):\n lstm = blocks_lstm.LSTM(9)\n x = tf.placeholder(dtype=tf.float32, shape=[15, 7])\n lstm(x)\n b = lstm._nn._bias\n\n with self.test_session():\n tf.global_variables_initializer().run()\n bias_var = b._bias.eval()\n\n comp = ([1.0] * 9) + ([0.0] * 27)\n self.assertAllEqual(bias_var, comp)\n\n def testConv2DLSTM(self):\n lstm = blocks_lstm.Conv2DLSTM(depth=10,\n filter_size=[1, 1],\n hidden_filter_size=[1, 1],\n strides=[1, 1],\n padding='SAME')\n lstm.hidden = tf.zeros(shape=[10, 11, 11, 10], dtype=tf.float32)\n lstm.cell = tf.zeros(shape=[10, 11, 11, 10], dtype=tf.float32)\n x = tf.placeholder(dtype=tf.float32, shape=[10, 11, 11, 1])\n y = lstm(x)\n\n o, tanhc = self.CheckBinary(y, 'Mul')\n self.assertEqual(self.CheckUnary(o, 'Sigmoid').name, 'Conv2DLSTM/split:3')\n\n self.assertIs(lstm.cell, self.CheckUnary(tanhc, 'Tanh'))\n fc, ij = self.CheckBinary(lstm.cell, 'Add')\n\n f, _ = self.CheckBinary(fc, 'Mul')\n self.assertEqual(self.CheckUnary(f, 'Sigmoid').name, 'Conv2DLSTM/split:0')\n\n i, j = self.CheckBinary(ij, 'Mul')\n self.assertEqual(self.CheckUnary(i, 'Sigmoid').name, 'Conv2DLSTM/split:1')\n j = self.CheckUnary(j, 'Tanh')\n self.assertEqual(j.name, 'Conv2DLSTM/split:2')\n\n def testConv2DLSTMBiasInit(self):\n lstm = blocks_lstm.Conv2DLSTM(9, 1, 1, [1, 1], 'SAME')\n x = tf.placeholder(dtype=tf.float32, shape=[1, 7, 7, 7])\n lstm(x)\n b = lstm._bias\n\n with self.test_session():\n tf.global_variables_initializer().run()\n bias_var = b._bias.eval()\n\n comp = ([1.0] * 9) + ([0.0] * 27)\n self.assertAllEqual(bias_var, comp)\n\n\nif __name__ == '__main__':\n tf.test.main()\n",
"# Copyright 2017 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\n\"\"\"Tests for object_detection.predictors.mask_rcnn_box_predictor.\"\"\"\nimport numpy as np\nimport tensorflow as tf\n\nfrom google.protobuf import text_format\nfrom object_detection.builders import box_predictor_builder\nfrom object_detection.builders import hyperparams_builder\nfrom object_detection.predictors import mask_rcnn_keras_box_predictor as box_predictor\nfrom object_detection.protos import hyperparams_pb2\nfrom object_detection.utils import test_case\n\n\nclass MaskRCNNKerasBoxPredictorTest(test_case.TestCase):\n\n def _build_hyperparams(self,\n op_type=hyperparams_pb2.Hyperparams.FC):\n hyperparams = hyperparams_pb2.Hyperparams()\n hyperparams_text_proto = \"\"\"\n activation: NONE\n regularizer {\n l2_regularizer {\n }\n }\n initializer {\n truncated_normal_initializer {\n }\n }\n \"\"\"\n text_format.Merge(hyperparams_text_proto, hyperparams)\n hyperparams.op = op_type\n return hyperparams_builder.KerasLayerHyperparams(hyperparams)\n\n def test_get_boxes_with_five_classes(self):\n def graph_fn(image_features):\n mask_box_predictor = (\n box_predictor_builder.build_mask_rcnn_keras_box_predictor(\n is_training=False,\n num_classes=5,\n fc_hyperparams=self._build_hyperparams(),\n freeze_batchnorm=False,\n use_dropout=False,\n dropout_keep_prob=0.5,\n box_code_size=4,\n ))\n box_predictions = mask_box_predictor(\n [image_features],\n prediction_stage=2)\n return (box_predictions[box_predictor.BOX_ENCODINGS],\n box_predictions[box_predictor.CLASS_PREDICTIONS_WITH_BACKGROUND])\n image_features = np.random.rand(2, 7, 7, 3).astype(np.float32)\n (box_encodings,\n class_predictions_with_background) = self.execute(graph_fn,\n [image_features])\n self.assertAllEqual(box_encodings.shape, [2, 1, 5, 4])\n self.assertAllEqual(class_predictions_with_background.shape, [2, 1, 6])\n\n def test_get_boxes_with_five_classes_share_box_across_classes(self):\n def graph_fn(image_features):\n mask_box_predictor = (\n box_predictor_builder.build_mask_rcnn_keras_box_predictor(\n is_training=False,\n num_classes=5,\n fc_hyperparams=self._build_hyperparams(),\n freeze_batchnorm=False,\n use_dropout=False,\n dropout_keep_prob=0.5,\n box_code_size=4,\n share_box_across_classes=True\n ))\n box_predictions = mask_box_predictor(\n [image_features],\n prediction_stage=2)\n return (box_predictions[box_predictor.BOX_ENCODINGS],\n box_predictions[box_predictor.CLASS_PREDICTIONS_WITH_BACKGROUND])\n image_features = np.random.rand(2, 7, 7, 3).astype(np.float32)\n (box_encodings,\n class_predictions_with_background) = self.execute(graph_fn,\n [image_features])\n self.assertAllEqual(box_encodings.shape, [2, 1, 1, 4])\n self.assertAllEqual(class_predictions_with_background.shape, [2, 1, 6])\n\n def test_get_instance_masks(self):\n def graph_fn(image_features):\n mask_box_predictor = (\n box_predictor_builder.build_mask_rcnn_keras_box_predictor(\n is_training=False,\n num_classes=5,\n fc_hyperparams=self._build_hyperparams(),\n freeze_batchnorm=False,\n use_dropout=False,\n dropout_keep_prob=0.5,\n box_code_size=4,\n conv_hyperparams=self._build_hyperparams(\n op_type=hyperparams_pb2.Hyperparams.CONV),\n predict_instance_masks=True))\n box_predictions = mask_box_predictor(\n [image_features],\n prediction_stage=3)\n return (box_predictions[box_predictor.MASK_PREDICTIONS],)\n image_features = np.random.rand(2, 7, 7, 3).astype(np.float32)\n mask_predictions = self.execute(graph_fn, [image_features])\n self.assertAllEqual(mask_predictions.shape, [2, 1, 5, 14, 14])\n\n def test_do_not_return_instance_masks_without_request(self):\n image_features = tf.random_uniform([2, 7, 7, 3], dtype=tf.float32)\n mask_box_predictor = (\n box_predictor_builder.build_mask_rcnn_keras_box_predictor(\n is_training=False,\n num_classes=5,\n fc_hyperparams=self._build_hyperparams(),\n freeze_batchnorm=False,\n use_dropout=False,\n dropout_keep_prob=0.5,\n box_code_size=4))\n box_predictions = mask_box_predictor(\n [image_features],\n prediction_stage=2)\n self.assertEqual(len(box_predictions), 2)\n self.assertTrue(box_predictor.BOX_ENCODINGS in box_predictions)\n self.assertTrue(box_predictor.CLASS_PREDICTIONS_WITH_BACKGROUND\n in box_predictions)\n\n\nif __name__ == '__main__':\n tf.test.main()\n",
"from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\n\"\"\"Tests for ga_train.\n\nTests that ga runs for a few generations without crashing.\n\"\"\"\n\nfrom absl import flags\nimport tensorflow as tf\n\nfrom single_task import defaults # brain coder\nfrom single_task import run # brain coder\n\nFLAGS = flags.FLAGS\n\n\nclass GaTest(tf.test.TestCase):\n\n def RunTrainingSteps(self, config_string, num_steps=10):\n \"\"\"Run a few training steps with the given config.\n\n Just check that nothing crashes.\n\n Args:\n config_string: Config encoded in a string. See\n $REPO_PATH/common/config_lib.py\n num_steps: Number of training steps to run. Defaults to 10.\n \"\"\"\n config = defaults.default_config_with_updates(config_string)\n FLAGS.max_npe = num_steps * config.batch_size\n FLAGS.logdir = tf.test.get_temp_dir()\n FLAGS.config = config_string\n run.main(None)\n\n def testGeneticAlgorithm(self):\n self.RunTrainingSteps(\n 'env=c(task=\"reverse\"),'\n 'agent=c(algorithm=\"ga\"),'\n 'timestep_limit=40,batch_size=64')\n\n def testUniformRandomSearch(self):\n self.RunTrainingSteps(\n 'env=c(task=\"reverse\"),'\n 'agent=c(algorithm=\"rand\"),'\n 'timestep_limit=40,batch_size=64')\n\n\nif __name__ == '__main__':\n tf.test.main()\n",
"# Copyright 2016 The TensorFlow Authors All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\nr\"\"\"\nCode for plotting trajectories in the top view, and also plot first person views\nfrom saved trajectories. Does not run the network but only loads the mesh data\nto plot the view points.\n CUDA_VISIBLE_DEVICES=0 LD_LIBRARY_PATH=/opt/cuda-8.0/lib64:/opt/cudnnv51/lib64\n PYTHONPATH='.' PYOPENGL_PLATFORM=egl python scripts/script_plot_trajectory.py \\\n --first_person --num_steps 40 \\\n --config_name cmp.lmap_Msc.clip5.sbpd_d_r2r \\\n --imset test --alsologtostderr --base_dir output --out_dir vis\n\n\"\"\"\nimport os, sys, numpy as np, copy\nimport matplotlib\nmatplotlib.use(\"Agg\")\nimport matplotlib.pyplot as plt\nimport matplotlib.animation as animation\nfrom matplotlib.gridspec import GridSpec\n\nimport tensorflow as tf\nfrom tensorflow.contrib import slim\nimport cv2\nimport logging\nfrom tensorflow.python.platform import gfile\nfrom tensorflow.python.platform import app\nfrom tensorflow.python.platform import flags\n\nfrom datasets import nav_env\nimport scripts.script_nav_agent_release as sna\nimport src.file_utils as fu\nfrom src import graph_utils\nfrom src import utils\nFLAGS = flags.FLAGS\n\nflags.DEFINE_string('out_dir', 'vis', 'Directory where to store the output')\nflags.DEFINE_string('type', '', 'Optional type.')\nflags.DEFINE_bool('first_person', False, 'Visualize the first person view.')\nflags.DEFINE_bool('top_view', False, 'Visualize the trajectory in the top view.')\nflags.DEFINE_integer('num_steps', 40, 'Number of steps to run the model for.')\nflags.DEFINE_string('imset', 'test', '')\nflags.DEFINE_string('base_dir', 'output', 'Cache directory.')\n\ndef _get_suffix_str():\n return ''\n\n\ndef _load_trajectory():\n base_dir = FLAGS.base_dir\n config_name = FLAGS.config_name+_get_suffix_str()\n\n dir_name = os.path.join(base_dir, FLAGS.type, config_name)\n logging.info('Waiting for snapshot in directory %s.', dir_name)\n last_checkpoint = slim.evaluation.wait_for_new_checkpoint(dir_name, None)\n checkpoint_iter = int(os.path.basename(last_checkpoint).split('-')[1])\n\n # Load the distances.\n a = utils.load_variables(os.path.join(dir_name, 'bench_on_'+FLAGS.imset,\n 'all_locs_at_t_{:d}.pkl'.format(checkpoint_iter)))\n return a\n\ndef _compute_hardness():\n # Load the stanford data to compute the hardness.\n if FLAGS.type == '':\n args = sna.get_args_for_config(FLAGS.config_name+'+bench_'+FLAGS.imset)\n else:\n args = sna.get_args_for_config(FLAGS.type+'.'+FLAGS.config_name+'+bench_'+FLAGS.imset)\n\n args.navtask.logdir = None\n R = lambda: nav_env.get_multiplexer_class(args.navtask, 0)\n R = R()\n\n rng_data = [np.random.RandomState(0), np.random.RandomState(0)]\n\n # Sample a room.\n h_dists = []\n gt_dists = []\n for i in range(250):\n e = R.sample_env(rng_data)\n nodes = e.task.nodes\n\n # Initialize the agent.\n init_env_state = e.reset(rng_data)\n\n gt_dist_to_goal = [e.episode.dist_to_goal[0][j][s]\n for j, s in enumerate(e.episode.start_node_ids)]\n\n for j in range(args.navtask.task_params.batch_size):\n start_node_id = e.episode.start_node_ids[j]\n end_node_id =e.episode.goal_node_ids[0][j]\n h_dist = graph_utils.heuristic_fn_vec(\n nodes[[start_node_id],:], nodes[[end_node_id], :],\n n_ori=args.navtask.task_params.n_ori,\n step_size=args.navtask.task_params.step_size)[0][0]\n gt_dist = e.episode.dist_to_goal[0][j][start_node_id]\n h_dists.append(h_dist)\n gt_dists.append(gt_dist)\n\n h_dists = np.array(h_dists)\n gt_dists = np.array(gt_dists)\n e = R.sample_env([np.random.RandomState(0), np.random.RandomState(0)])\n input = e.get_common_data()\n orig_maps = input['orig_maps'][0,0,:,:,0]\n return h_dists, gt_dists, orig_maps\n\ndef plot_trajectory_first_person(dt, orig_maps, out_dir):\n out_dir = os.path.join(out_dir, FLAGS.config_name+_get_suffix_str(),\n FLAGS.imset)\n fu.makedirs(out_dir)\n\n # Load the model so that we can render.\n plt.set_cmap('gray')\n samples_per_action = 8; wait_at_action = 0;\n\n Writer = animation.writers['mencoder']\n writer = Writer(fps=3*(samples_per_action+wait_at_action),\n metadata=dict(artist='anonymous'), bitrate=1800)\n\n args = sna.get_args_for_config(FLAGS.config_name + '+bench_'+FLAGS.imset)\n args.navtask.logdir = None\n navtask_ = copy.deepcopy(args.navtask)\n navtask_.camera_param.modalities = ['rgb']\n navtask_.task_params.modalities = ['rgb']\n sz = 512\n navtask_.camera_param.height = sz\n navtask_.camera_param.width = sz\n navtask_.task_params.img_height = sz\n navtask_.task_params.img_width = sz\n R = lambda: nav_env.get_multiplexer_class(navtask_, 0)\n R = R()\n b = R.buildings[0]\n\n f = [0 for _ in range(wait_at_action)] + \\\n [float(_)/samples_per_action for _ in range(samples_per_action)];\n\n # Generate things for it to render.\n inds_to_do = []\n inds_to_do += [1, 4, 10] #1291, 1268, 1273, 1289, 1302, 1426, 1413, 1449, 1399, 1390]\n\n for i in inds_to_do:\n fig = plt.figure(figsize=(10,8))\n gs = GridSpec(3,4)\n gs.update(wspace=0.05, hspace=0.05, left=0.0, top=0.97, right=1.0, bottom=0.)\n ax = fig.add_subplot(gs[:,:-1])\n ax1 = fig.add_subplot(gs[0,-1])\n ax2 = fig.add_subplot(gs[1,-1])\n ax3 = fig.add_subplot(gs[2,-1])\n axes = [ax, ax1, ax2, ax3]\n # ax = fig.add_subplot(gs[:,:])\n # axes = [ax]\n for ax in axes:\n ax.set_axis_off()\n\n node_ids = dt['all_node_ids'][i, :, 0]*1\n # Prune so that last node is not repeated more than 3 times?\n if np.all(node_ids[-4:] == node_ids[-1]):\n while node_ids[-4] == node_ids[-1]:\n node_ids = node_ids[:-1]\n num_steps = np.minimum(FLAGS.num_steps, len(node_ids))\n\n xyt = b.to_actual_xyt_vec(b.task.nodes[node_ids])\n xyt_diff = xyt[1:,:] - xyt[:-1:,:]\n xyt_diff[:,2] = np.mod(xyt_diff[:,2], 4)\n ind = np.where(xyt_diff[:,2] == 3)[0]\n xyt_diff[ind, 2] = -1\n xyt_diff = np.expand_dims(xyt_diff, axis=1)\n to_cat = [xyt_diff*_ for _ in f]\n perturbs_all = np.concatenate(to_cat, axis=1)\n perturbs_all = np.concatenate([perturbs_all, np.zeros_like(perturbs_all[:,:,:1])], axis=2)\n node_ids_all = np.expand_dims(node_ids, axis=1)*1\n node_ids_all = np.concatenate([node_ids_all for _ in f], axis=1)\n node_ids_all = np.reshape(node_ids_all[:-1,:], -1)\n perturbs_all = np.reshape(perturbs_all, [-1, 4])\n imgs = b.render_nodes(b.task.nodes[node_ids_all,:], perturb=perturbs_all)\n\n # Get action at each node.\n actions = []\n _, action_to_nodes = b.get_feasible_actions(node_ids)\n for j in range(num_steps-1):\n action_to_node = action_to_nodes[j]\n node_to_action = dict(zip(action_to_node.values(), action_to_node.keys()))\n actions.append(node_to_action[node_ids[j+1]])\n\n def init_fn():\n return fig,\n gt_dist_to_goal = []\n\n # Render trajectories.\n def worker(j):\n # Plot the image.\n step_number = j/(samples_per_action + wait_at_action)\n img = imgs[j]; ax = axes[0]; ax.clear(); ax.set_axis_off();\n img = img.astype(np.uint8); ax.imshow(img);\n tt = ax.set_title(\n \"First Person View\\n\" +\n \"Top corners show diagnostics (distance, agents' action) not input to agent.\",\n fontsize=12)\n plt.setp(tt, color='white')\n\n # Distance to goal.\n t = 'Dist to Goal:\\n{:2d} steps'.format(int(dt['all_d_at_t'][i, step_number]))\n t = ax.text(0.01, 0.99, t,\n horizontalalignment='left',\n verticalalignment='top',\n fontsize=20, color='red',\n transform=ax.transAxes, alpha=1.0)\n t.set_bbox(dict(color='white', alpha=0.85, pad=-0.1))\n\n # Action to take.\n action_latex = ['$\\odot$ ', '$\\curvearrowright$ ', '$\\curvearrowleft$ ', r'$\\Uparrow$ ']\n t = ax.text(0.99, 0.99, action_latex[actions[step_number]],\n horizontalalignment='right',\n verticalalignment='top',\n fontsize=40, color='green',\n transform=ax.transAxes, alpha=1.0)\n t.set_bbox(dict(color='white', alpha=0.85, pad=-0.1))\n\n\n # Plot the map top view.\n ax = axes[-1]\n if j == 0:\n # Plot the map\n locs = dt['all_locs'][i,:num_steps,:]\n goal_loc = dt['all_goal_locs'][i,:,:]\n xymin = np.minimum(np.min(goal_loc, axis=0), np.min(locs, axis=0))\n xymax = np.maximum(np.max(goal_loc, axis=0), np.max(locs, axis=0))\n xy1 = (xymax+xymin)/2. - 0.7*np.maximum(np.max(xymax-xymin), 24)\n xy2 = (xymax+xymin)/2. + 0.7*np.maximum(np.max(xymax-xymin), 24)\n\n ax.set_axis_on()\n ax.patch.set_facecolor((0.333, 0.333, 0.333))\n ax.set_xticks([]); ax.set_yticks([]);\n ax.imshow(orig_maps, origin='lower', vmin=-1.0, vmax=2.0)\n ax.plot(goal_loc[:,0], goal_loc[:,1], 'g*', markersize=12)\n\n locs = dt['all_locs'][i,:1,:]\n ax.plot(locs[:,0], locs[:,1], 'b.', markersize=12)\n\n ax.set_xlim([xy1[0], xy2[0]])\n ax.set_ylim([xy1[1], xy2[1]])\n\n locs = dt['all_locs'][i,step_number,:]\n locs = np.expand_dims(locs, axis=0)\n ax.plot(locs[:,0], locs[:,1], 'r.', alpha=1.0, linewidth=0, markersize=4)\n tt = ax.set_title('Trajectory in topview', fontsize=14)\n plt.setp(tt, color='white')\n return fig,\n\n line_ani = animation.FuncAnimation(fig, worker,\n (num_steps-1)*(wait_at_action+samples_per_action),\n interval=500, blit=True, init_func=init_fn)\n tmp_file_name = 'tmp.mp4'\n line_ani.save(tmp_file_name, writer=writer, savefig_kwargs={'facecolor':'black'})\n out_file_name = os.path.join(out_dir, 'vis_{:04d}.mp4'.format(i))\n print(out_file_name)\n\n if fu.exists(out_file_name):\n gfile.Remove(out_file_name)\n gfile.Copy(tmp_file_name, out_file_name)\n gfile.Remove(tmp_file_name)\n plt.close(fig)\n\ndef plot_trajectory(dt, hardness, orig_maps, out_dir):\n out_dir = os.path.join(out_dir, FLAGS.config_name+_get_suffix_str(),\n FLAGS.imset)\n fu.makedirs(out_dir)\n out_file = os.path.join(out_dir, 'all_locs_at_t.pkl')\n dt['hardness'] = hardness\n utils.save_variables(out_file, dt.values(), dt.keys(), overwrite=True)\n\n #Plot trajectories onto the maps\n plt.set_cmap('gray')\n for i in range(4000):\n goal_loc = dt['all_goal_locs'][i, :, :]\n locs = np.concatenate((dt['all_locs'][i,:,:],\n dt['all_locs'][i,:,:]), axis=0)\n xymin = np.minimum(np.min(goal_loc, axis=0), np.min(locs, axis=0))\n xymax = np.maximum(np.max(goal_loc, axis=0), np.max(locs, axis=0))\n xy1 = (xymax+xymin)/2. - 1.*np.maximum(np.max(xymax-xymin), 24)\n xy2 = (xymax+xymin)/2. + 1.*np.maximum(np.max(xymax-xymin), 24)\n\n fig, ax = utils.tight_imshow_figure(plt, figsize=(6,6))\n ax.set_axis_on()\n ax.patch.set_facecolor((0.333, 0.333, 0.333))\n ax.set_xticks([])\n ax.set_yticks([])\n\n all_locs = dt['all_locs'][i,:,:]*1\n uniq = np.where(np.any(all_locs[1:,:] != all_locs[:-1,:], axis=1))[0]+1\n uniq = np.sort(uniq).tolist()\n uniq.insert(0,0)\n uniq = np.array(uniq)\n all_locs = all_locs[uniq, :]\n\n ax.plot(dt['all_locs'][i, 0, 0],\n dt['all_locs'][i, 0, 1], 'b.', markersize=24)\n ax.plot(dt['all_goal_locs'][i, 0, 0],\n dt['all_goal_locs'][i, 0, 1], 'g*', markersize=19)\n ax.plot(all_locs[:,0], all_locs[:,1], 'r', alpha=0.4, linewidth=2)\n ax.scatter(all_locs[:,0], all_locs[:,1],\n c=5+np.arange(all_locs.shape[0])*1./all_locs.shape[0],\n cmap='Reds', s=30, linewidth=0)\n ax.imshow(orig_maps, origin='lower', vmin=-1.0, vmax=2.0, aspect='equal')\n ax.set_xlim([xy1[0], xy2[0]])\n ax.set_ylim([xy1[1], xy2[1]])\n\n file_name = os.path.join(out_dir, 'trajectory_{:04d}.png'.format(i))\n print(file_name)\n with fu.fopen(file_name, 'w') as f:\n plt.savefig(f)\n plt.close(fig)\n\n\ndef main(_):\n a = _load_trajectory()\n h_dists, gt_dists, orig_maps = _compute_hardness()\n hardness = 1.-h_dists*1./ gt_dists\n\n if FLAGS.top_view:\n plot_trajectory(a, hardness, orig_maps, out_dir=FLAGS.out_dir)\n\n if FLAGS.first_person:\n plot_trajectory_first_person(a, orig_maps, out_dir=FLAGS.out_dir)\n\nif __name__ == '__main__':\n app.run()\n",
"# Copyright 2018 The TensorFlow Authors All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\n\"\"\"Datasets.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport numpy as np\nimport tensorflow as tf\n\nimport models\n\n\ndef make_long_chain_dataset(\n state_size=1,\n num_obs=5,\n steps_per_obs=3,\n variance=1.,\n observation_variance=1.,\n batch_size=4,\n num_samples=1,\n observation_type=models.STANDARD_OBSERVATION,\n transition_type=models.STANDARD_TRANSITION,\n fixed_observation=None,\n dtype=\"float32\"):\n \"\"\"Creates a long chain data generating process.\n\n Creates a tf.data.Dataset that provides batches of data from a long\n chain.\n\n Args:\n state_size: The dimension of the state space of the process.\n num_obs: The number of observations in the chain.\n steps_per_obs: The number of steps between each observation.\n variance: The variance of the normal distributions used at each timestep.\n batch_size: The number of trajectories to include in each batch.\n num_samples: The number of replicas of each trajectory to include in each\n batch.\n dtype: The datatype of the states and observations.\n Returns:\n dataset: A tf.data.Dataset that can be iterated over.\n \"\"\"\n num_timesteps = num_obs * steps_per_obs\n def data_generator():\n \"\"\"An infinite generator of latents and observations from the model.\"\"\"\n while True:\n states = []\n observations = []\n # z0 ~ Normal(0, sqrt(variance)).\n states.append(\n np.random.normal(size=[state_size],\n scale=np.sqrt(variance)).astype(dtype))\n # start at 1 because we've already generated z0\n # go to num_timesteps+1 because we want to include the num_timesteps-th step\n for t in xrange(1, num_timesteps+1):\n if transition_type == models.ROUND_TRANSITION:\n loc = np.round(states[-1])\n elif transition_type == models.STANDARD_TRANSITION:\n loc = states[-1]\n new_state = np.random.normal(size=[state_size],\n loc=loc,\n scale=np.sqrt(variance))\n states.append(new_state.astype(dtype))\n if t % steps_per_obs == 0:\n if fixed_observation is None:\n if observation_type == models.SQUARED_OBSERVATION:\n loc = np.square(states[-1])\n elif observation_type == models.ABS_OBSERVATION:\n loc = np.abs(states[-1])\n elif observation_type == models.STANDARD_OBSERVATION:\n loc = states[-1]\n new_obs = np.random.normal(size=[state_size],\n loc=loc,\n scale=np.sqrt(observation_variance)).astype(dtype)\n else:\n new_obs = np.ones([state_size])* fixed_observation\n\n observations.append(new_obs)\n yield states, observations\n\n dataset = tf.data.Dataset.from_generator(\n data_generator,\n output_types=(tf.as_dtype(dtype), tf.as_dtype(dtype)),\n output_shapes=([num_timesteps+1, state_size], [num_obs, state_size]))\n dataset = dataset.repeat().batch(batch_size)\n\n def tile_batch(state, observation):\n state = tf.tile(state, [num_samples, 1, 1])\n observation = tf.tile(observation, [num_samples, 1, 1])\n return state, observation\n\n dataset = dataset.map(tile_batch, num_parallel_calls=12).prefetch(1024)\n return dataset\n\n\ndef make_dataset(bs=None,\n state_size=1,\n num_timesteps=10,\n variance=1.,\n prior_type=\"unimodal\",\n bimodal_prior_weight=0.5,\n bimodal_prior_mean=1,\n transition_type=models.STANDARD_TRANSITION,\n fixed_observation=None,\n batch_size=4,\n num_samples=1,\n dtype='float32'):\n \"\"\"Creates a data generating process.\n\n Creates a tf.data.Dataset that provides batches of data.\n\n Args:\n bs: The parameters of the data generating process. If None, new bs are\n randomly generated.\n state_size: The dimension of the state space of the process.\n num_timesteps: The length of the state sequences in the process.\n variance: The variance of the normal distributions used at each timestep.\n batch_size: The number of trajectories to include in each batch.\n num_samples: The number of replicas of each trajectory to include in each\n batch.\n Returns:\n bs: The true bs used to generate the data\n dataset: A tf.data.Dataset that can be iterated over.\n \"\"\"\n\n if bs is None:\n bs = [np.random.uniform(size=[state_size]).astype(dtype) for _ in xrange(num_timesteps)]\n tf.logging.info(\"data generating processs bs: %s\",\n np.array(bs).reshape(num_timesteps))\n\n\n def data_generator():\n \"\"\"An infinite generator of latents and observations from the model.\"\"\"\n while True:\n states = []\n if prior_type == \"unimodal\" or prior_type == \"nonlinear\":\n # Prior is Normal(0, sqrt(variance)).\n states.append(np.random.normal(size=[state_size], scale=np.sqrt(variance)).astype(dtype))\n elif prior_type == \"bimodal\":\n if np.random.uniform() > bimodal_prior_weight:\n loc = bimodal_prior_mean\n else:\n loc = - bimodal_prior_mean\n states.append(np.random.normal(size=[state_size],\n loc=loc,\n scale=np.sqrt(variance)\n ).astype(dtype))\n\n for t in xrange(num_timesteps):\n if transition_type == models.ROUND_TRANSITION:\n loc = np.round(states[-1])\n elif transition_type == models.STANDARD_TRANSITION:\n loc = states[-1]\n loc += bs[t]\n new_state = np.random.normal(size=[state_size],\n loc=loc,\n scale=np.sqrt(variance)).astype(dtype)\n states.append(new_state)\n\n if fixed_observation is None:\n observation = states[-1]\n else:\n observation = np.ones_like(states[-1]) * fixed_observation\n yield np.array(states[:-1]), observation\n\n dataset = tf.data.Dataset.from_generator(\n data_generator,\n output_types=(tf.as_dtype(dtype), tf.as_dtype(dtype)),\n output_shapes=([num_timesteps, state_size], [state_size]))\n dataset = dataset.repeat().batch(batch_size)\n\n def tile_batch(state, observation):\n state = tf.tile(state, [num_samples, 1, 1])\n observation = tf.tile(observation, [num_samples, 1])\n return state, observation\n\n dataset = dataset.map(tile_batch, num_parallel_calls=12).prefetch(1024)\n return np.array(bs), dataset\n",
"# Copyright 2018 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\n\"\"\"SSD MobilenetV2 FPN Feature Extractor.\"\"\"\n\nimport copy\nimport functools\nimport tensorflow as tf\nfrom tensorflow.contrib import slim as contrib_slim\n\nfrom object_detection.meta_architectures import ssd_meta_arch\nfrom object_detection.models import feature_map_generators\nfrom object_detection.utils import context_manager\nfrom object_detection.utils import ops\nfrom object_detection.utils import shape_utils\nfrom nets.mobilenet import mobilenet\nfrom nets.mobilenet import mobilenet_v2\n\nslim = contrib_slim\n\n\n# A modified config of mobilenet v2 that makes it more detection friendly.\ndef _create_modified_mobilenet_config():\n conv_defs = copy.deepcopy(mobilenet_v2.V2_DEF)\n conv_defs['spec'][-1] = mobilenet.op(\n slim.conv2d, stride=1, kernel_size=[1, 1], num_outputs=256)\n return conv_defs\n\n\nclass SSDMobileNetV2FpnFeatureExtractor(ssd_meta_arch.SSDFeatureExtractor):\n \"\"\"SSD Feature Extractor using MobilenetV2 FPN features.\"\"\"\n\n def __init__(self,\n is_training,\n depth_multiplier,\n min_depth,\n pad_to_multiple,\n conv_hyperparams_fn,\n fpn_min_level=3,\n fpn_max_level=7,\n additional_layer_depth=256,\n reuse_weights=None,\n use_explicit_padding=False,\n use_depthwise=False,\n use_native_resize_op=False,\n override_base_feature_extractor_hyperparams=False):\n \"\"\"SSD FPN feature extractor based on Mobilenet v2 architecture.\n\n Args:\n is_training: whether the network is in training mode.\n depth_multiplier: float depth multiplier for feature extractor.\n min_depth: minimum feature extractor depth.\n pad_to_multiple: the nearest multiple to zero pad the input height and\n width dimensions to.\n conv_hyperparams_fn: A function to construct tf slim arg_scope for conv2d\n and separable_conv2d ops in the layers that are added on top of the base\n feature extractor.\n fpn_min_level: the highest resolution feature map to use in FPN. The valid\n values are {2, 3, 4, 5} which map to MobileNet v2 layers\n {layer_4, layer_7, layer_14, layer_19}, respectively.\n fpn_max_level: the smallest resolution feature map to construct or use in\n FPN. FPN constructions uses features maps starting from fpn_min_level\n upto the fpn_max_level. In the case that there are not enough feature\n maps in the backbone network, additional feature maps are created by\n applying stride 2 convolutions until we get the desired number of fpn\n levels.\n additional_layer_depth: additional feature map layer channel depth.\n reuse_weights: whether to reuse variables. Default is None.\n use_explicit_padding: Whether to use explicit padding when extracting\n features. Default is False.\n use_depthwise: Whether to use depthwise convolutions. Default is False.\n use_native_resize_op: Whether to use tf.image.nearest_neighbor_resize\n to do upsampling in FPN. Default is false.\n override_base_feature_extractor_hyperparams: Whether to override\n hyperparameters of the base feature extractor with the one from\n `conv_hyperparams_fn`.\n \"\"\"\n super(SSDMobileNetV2FpnFeatureExtractor, self).__init__(\n is_training=is_training,\n depth_multiplier=depth_multiplier,\n min_depth=min_depth,\n pad_to_multiple=pad_to_multiple,\n conv_hyperparams_fn=conv_hyperparams_fn,\n reuse_weights=reuse_weights,\n use_explicit_padding=use_explicit_padding,\n use_depthwise=use_depthwise,\n override_base_feature_extractor_hyperparams=\n override_base_feature_extractor_hyperparams)\n self._fpn_min_level = fpn_min_level\n self._fpn_max_level = fpn_max_level\n self._additional_layer_depth = additional_layer_depth\n self._conv_defs = None\n if self._use_depthwise:\n self._conv_defs = _create_modified_mobilenet_config()\n self._use_native_resize_op = use_native_resize_op\n\n def preprocess(self, resized_inputs):\n \"\"\"SSD preprocessing.\n\n Maps pixel values to the range [-1, 1].\n\n Args:\n resized_inputs: a [batch, height, width, channels] float tensor\n representing a batch of images.\n\n Returns:\n preprocessed_inputs: a [batch, height, width, channels] float tensor\n representing a batch of images.\n \"\"\"\n return (2.0 / 255.0) * resized_inputs - 1.0\n\n def extract_features(self, preprocessed_inputs):\n \"\"\"Extract features from preprocessed inputs.\n\n Args:\n preprocessed_inputs: a [batch, height, width, channels] float tensor\n representing a batch of images.\n\n Returns:\n feature_maps: a list of tensors where the ith tensor has shape\n [batch, height_i, width_i, depth_i]\n \"\"\"\n preprocessed_inputs = shape_utils.check_min_image_dim(\n 33, preprocessed_inputs)\n\n with tf.variable_scope('MobilenetV2', reuse=self._reuse_weights) as scope:\n with slim.arg_scope(\n mobilenet_v2.training_scope(is_training=None, bn_decay=0.9997)), \\\n slim.arg_scope(\n [mobilenet.depth_multiplier], min_depth=self._min_depth):\n with (slim.arg_scope(self._conv_hyperparams_fn())\n if self._override_base_feature_extractor_hyperparams else\n context_manager.IdentityContextManager()):\n _, image_features = mobilenet_v2.mobilenet_base(\n ops.pad_to_multiple(preprocessed_inputs, self._pad_to_multiple),\n final_endpoint='layer_19',\n depth_multiplier=self._depth_multiplier,\n conv_defs=self._conv_defs,\n use_explicit_padding=self._use_explicit_padding,\n scope=scope)\n depth_fn = lambda d: max(int(d * self._depth_multiplier), self._min_depth)\n with slim.arg_scope(self._conv_hyperparams_fn()):\n with tf.variable_scope('fpn', reuse=self._reuse_weights):\n feature_blocks = [\n 'layer_4', 'layer_7', 'layer_14', 'layer_19'\n ]\n base_fpn_max_level = min(self._fpn_max_level, 5)\n feature_block_list = []\n for level in range(self._fpn_min_level, base_fpn_max_level + 1):\n feature_block_list.append(feature_blocks[level - 2])\n fpn_features = feature_map_generators.fpn_top_down_feature_maps(\n [(key, image_features[key]) for key in feature_block_list],\n depth=depth_fn(self._additional_layer_depth),\n use_depthwise=self._use_depthwise,\n use_explicit_padding=self._use_explicit_padding,\n use_native_resize_op=self._use_native_resize_op)\n feature_maps = []\n for level in range(self._fpn_min_level, base_fpn_max_level + 1):\n feature_maps.append(fpn_features['top_down_{}'.format(\n feature_blocks[level - 2])])\n last_feature_map = fpn_features['top_down_{}'.format(\n feature_blocks[base_fpn_max_level - 2])]\n # Construct coarse features\n padding = 'VALID' if self._use_explicit_padding else 'SAME'\n kernel_size = 3\n for i in range(base_fpn_max_level + 1, self._fpn_max_level + 1):\n if self._use_depthwise:\n conv_op = functools.partial(\n slim.separable_conv2d, depth_multiplier=1)\n else:\n conv_op = slim.conv2d\n if self._use_explicit_padding:\n last_feature_map = ops.fixed_padding(\n last_feature_map, kernel_size)\n last_feature_map = conv_op(\n last_feature_map,\n num_outputs=depth_fn(self._additional_layer_depth),\n kernel_size=[kernel_size, kernel_size],\n stride=2,\n padding=padding,\n scope='bottom_up_Conv2d_{}'.format(i - base_fpn_max_level + 19))\n feature_maps.append(last_feature_map)\n return feature_maps\n",
"# Copyright 2018 The TensorFlow Authors All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\n\"\"\"Implementation of objectives for training stochastic latent variable models.\n\nContains implementations of the Importance Weighted Autoencoder objective (IWAE)\nand the Filtering Variational objective (FIVO).\n\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport functools\nimport tensorflow as tf\n\nfrom fivo import nested_utils as nested\nfrom fivo import smc\n\n\ndef iwae(model,\n observations,\n seq_lengths,\n num_samples=1,\n parallel_iterations=30,\n swap_memory=True):\n \"\"\"Computes the IWAE lower bound on the log marginal probability.\n\n This method accepts a stochastic latent variable model and some observations\n and computes a stochastic lower bound on the log marginal probability of the\n observations. The IWAE estimator is defined by averaging multiple importance\n weights. For more details see \"Importance Weighted Autoencoders\" by Burda\n et al. https://arxiv.org/abs/1509.00519.\n\n When num_samples = 1, this bound becomes the evidence lower bound (ELBO).\n\n Args:\n model: A subclass of ELBOTrainableSequenceModel that implements one\n timestep of the model. See models/vrnn.py for an example.\n observations: The inputs to the model. A potentially nested list or tuple of\n Tensors each of shape [max_seq_len, batch_size, ...]. The Tensors must\n have a rank at least two and have matching shapes in the first two\n dimensions, which represent time and the batch respectively. The model\n will be provided with the observations before computing the bound.\n seq_lengths: A [batch_size] Tensor of ints encoding the length of each\n sequence in the batch (sequences can be padded to a common length).\n num_samples: The number of samples to use.\n parallel_iterations: The number of parallel iterations to use for the\n internal while loop.\n swap_memory: Whether GPU-CPU memory swapping should be enabled for the\n internal while loop.\n\n Returns:\n log_p_hat: A Tensor of shape [batch_size] containing IWAE's estimate of the\n log marginal probability of the observations.\n log_weights: A Tensor of shape [max_seq_len, batch_size, num_samples]\n containing the log weights at each timestep. Will not be valid for\n timesteps past the end of a sequence.\n \"\"\"\n log_p_hat, log_weights, _, final_state = fivo(\n model,\n observations,\n seq_lengths,\n num_samples=num_samples,\n resampling_criterion=smc.never_resample_criterion,\n parallel_iterations=parallel_iterations,\n swap_memory=swap_memory)\n return log_p_hat, log_weights, final_state\n\n\ndef fivo(model,\n observations,\n seq_lengths,\n num_samples=1,\n resampling_criterion=smc.ess_criterion,\n resampling_type='multinomial',\n relaxed_resampling_temperature=0.5,\n parallel_iterations=30,\n swap_memory=True,\n random_seed=None):\n \"\"\"Computes the FIVO lower bound on the log marginal probability.\n\n This method accepts a stochastic latent variable model and some observations\n and computes a stochastic lower bound on the log marginal probability of the\n observations. The lower bound is defined by a particle filter's unbiased\n estimate of the marginal probability of the observations. For more details see\n \"Filtering Variational Objectives\" by Maddison et al.\n https://arxiv.org/abs/1705.09279.\n\n When the resampling criterion is \"never resample\", this bound becomes IWAE.\n\n Args:\n model: A subclass of ELBOTrainableSequenceModel that implements one\n timestep of the model. See models/vrnn.py for an example.\n observations: The inputs to the model. A potentially nested list or tuple of\n Tensors each of shape [max_seq_len, batch_size, ...]. The Tensors must\n have a rank at least two and have matching shapes in the first two\n dimensions, which represent time and the batch respectively. The model\n will be provided with the observations before computing the bound.\n seq_lengths: A [batch_size] Tensor of ints encoding the length of each\n sequence in the batch (sequences can be padded to a common length).\n num_samples: The number of particles to use in each particle filter.\n resampling_criterion: The resampling criterion to use for this particle\n filter. Must accept the number of samples, the current log weights,\n and the current timestep and return a boolean Tensor of shape [batch_size]\n indicating whether each particle filter should resample. See\n ess_criterion and related functions for examples. When\n resampling_criterion is never_resample_criterion, resampling_fn is ignored\n and never called.\n resampling_type: The type of resampling, one of \"multinomial\" or \"relaxed\".\n relaxed_resampling_temperature: A positive temperature only used for relaxed\n resampling.\n parallel_iterations: The number of parallel iterations to use for the\n internal while loop. Note that values greater than 1 can introduce\n non-determinism even when random_seed is provided.\n swap_memory: Whether GPU-CPU memory swapping should be enabled for the\n internal while loop.\n random_seed: The random seed to pass to the resampling operations in\n the particle filter. Mainly useful for testing.\n\n Returns:\n log_p_hat: A Tensor of shape [batch_size] containing FIVO's estimate of the\n log marginal probability of the observations.\n log_weights: A Tensor of shape [max_seq_len, batch_size, num_samples]\n containing the log weights at each timestep of the particle filter. Note\n that on timesteps when a resampling operation is performed the log weights\n are reset to 0. Will not be valid for timesteps past the end of a\n sequence.\n resampled: A Tensor of shape [max_seq_len, batch_size] indicating when the\n particle filters resampled. Will be 1.0 on timesteps when resampling\n occurred and 0.0 on timesteps when it did not.\n \"\"\"\n # batch_size is the number of particle filters running in parallel.\n batch_size = tf.shape(seq_lengths)[0]\n\n # Each sequence in the batch will be the input data for a different\n # particle filter. The batch will be laid out as:\n # particle 1 of particle filter 1\n # particle 1 of particle filter 2\n # ...\n # particle 1 of particle filter batch_size\n # particle 2 of particle filter 1\n # ...\n # particle num_samples of particle filter batch_size\n observations = nested.tile_tensors(observations, [1, num_samples])\n tiled_seq_lengths = tf.tile(seq_lengths, [num_samples])\n model.set_observations(observations, tiled_seq_lengths)\n\n if resampling_type == 'multinomial':\n resampling_fn = smc.multinomial_resampling\n elif resampling_type == 'relaxed':\n resampling_fn = functools.partial(\n smc.relaxed_resampling, temperature=relaxed_resampling_temperature)\n resampling_fn = functools.partial(resampling_fn, random_seed=random_seed)\n\n def transition_fn(prev_state, t):\n if prev_state is None:\n return model.zero_state(batch_size * num_samples, tf.float32)\n return model.propose_and_weight(prev_state, t)\n\n log_p_hat, log_weights, resampled, final_state, _ = smc.smc(\n transition_fn,\n seq_lengths,\n num_particles=num_samples,\n resampling_criterion=resampling_criterion,\n resampling_fn=resampling_fn,\n parallel_iterations=parallel_iterations,\n swap_memory=swap_memory)\n\n return log_p_hat, log_weights, resampled, final_state\n\ndef fivo_aux_td(\n model,\n observations,\n seq_lengths,\n num_samples=1,\n resampling_criterion=smc.ess_criterion,\n resampling_type='multinomial',\n relaxed_resampling_temperature=0.5,\n parallel_iterations=30,\n swap_memory=True,\n random_seed=None):\n \"\"\"Experimental.\"\"\"\n # batch_size is the number of particle filters running in parallel.\n batch_size = tf.shape(seq_lengths)[0]\n max_seq_len = tf.reduce_max(seq_lengths)\n\n # Each sequence in the batch will be the input data for a different\n # particle filter. The batch will be laid out as:\n # particle 1 of particle filter 1\n # particle 1 of particle filter 2\n # ...\n # particle 1 of particle filter batch_size\n # particle 2 of particle filter 1\n # ...\n # particle num_samples of particle filter batch_size\n observations = nested.tile_tensors(observations, [1, num_samples])\n tiled_seq_lengths = tf.tile(seq_lengths, [num_samples])\n model.set_observations(observations, tiled_seq_lengths)\n\n if resampling_type == 'multinomial':\n resampling_fn = smc.multinomial_resampling\n elif resampling_type == 'relaxed':\n resampling_fn = functools.partial(\n smc.relaxed_resampling, temperature=relaxed_resampling_temperature)\n resampling_fn = functools.partial(resampling_fn, random_seed=random_seed)\n\n def transition_fn(prev_state, t):\n if prev_state is None:\n model_init_state = model.zero_state(batch_size * num_samples, tf.float32)\n return (tf.zeros([num_samples*batch_size], dtype=tf.float32),\n (tf.zeros([num_samples*batch_size, model.latent_size], dtype=tf.float32),\n tf.zeros([num_samples*batch_size, model.latent_size], dtype=tf.float32)),\n model_init_state)\n\n prev_log_r, prev_log_r_tilde, prev_model_state = prev_state\n (new_model_state, zt, log_q_zt, log_p_zt,\n log_p_x_given_z, log_r_tilde, p_ztplus1) = model(prev_model_state, t)\n r_tilde_mu, r_tilde_sigma_sq = log_r_tilde\n # Compute the weight without r.\n log_weight = log_p_zt + log_p_x_given_z - log_q_zt\n # Compute log_r and log_r_tilde.\n p_mu = tf.stop_gradient(p_ztplus1.mean())\n p_sigma_sq = tf.stop_gradient(p_ztplus1.variance())\n log_r = (tf.log(r_tilde_sigma_sq) -\n tf.log(r_tilde_sigma_sq + p_sigma_sq) -\n tf.square(r_tilde_mu - p_mu)/(r_tilde_sigma_sq + p_sigma_sq))\n # log_r is [num_samples*batch_size, latent_size]. We sum it along the last\n # dimension to compute log r.\n log_r = 0.5*tf.reduce_sum(log_r, axis=-1)\n # Compute prev log r tilde\n prev_r_tilde_mu, prev_r_tilde_sigma_sq = prev_log_r_tilde\n prev_log_r_tilde = -0.5*tf.reduce_sum(\n tf.square(tf.stop_gradient(zt) - prev_r_tilde_mu)/prev_r_tilde_sigma_sq, axis=-1)\n # If the sequence is on the last timestep, log_r and log_r_tilde are just zeros.\n last_timestep = t >= (tiled_seq_lengths - 1)\n log_r = tf.where(last_timestep,\n tf.zeros_like(log_r),\n log_r)\n prev_log_r_tilde = tf.where(last_timestep,\n tf.zeros_like(prev_log_r_tilde),\n prev_log_r_tilde)\n log_weight += tf.stop_gradient(log_r - prev_log_r)\n new_state = (log_r, log_r_tilde, new_model_state)\n loop_fn_args = (log_r, prev_log_r_tilde, log_p_x_given_z, log_r - prev_log_r)\n return log_weight, new_state, loop_fn_args\n\n def loop_fn(loop_state, loop_args, unused_model_state, log_weights, resampled, mask, t):\n if loop_state is None:\n return (tf.zeros([batch_size], dtype=tf.float32),\n tf.zeros([batch_size], dtype=tf.float32),\n tf.zeros([num_samples, batch_size], dtype=tf.float32))\n log_p_hat_acc, bellman_loss_acc, log_r_diff_acc = loop_state\n log_r, prev_log_r_tilde, log_p_x_given_z, log_r_diff = loop_args\n # Compute the log_p_hat update\n log_p_hat_update = tf.reduce_logsumexp(\n log_weights, axis=0) - tf.log(tf.to_float(num_samples))\n # If it is the last timestep, we always add the update.\n log_p_hat_acc += tf.cond(t >= max_seq_len-1,\n lambda: log_p_hat_update,\n lambda: log_p_hat_update * resampled)\n # Compute the Bellman update.\n log_r = tf.reshape(log_r, [num_samples, batch_size])\n prev_log_r_tilde = tf.reshape(prev_log_r_tilde, [num_samples, batch_size])\n log_p_x_given_z = tf.reshape(log_p_x_given_z, [num_samples, batch_size])\n mask = tf.reshape(mask, [num_samples, batch_size])\n # On the first timestep there is no bellman error because there is no\n # prev_log_r_tilde.\n mask = tf.cond(tf.equal(t, 0),\n lambda: tf.zeros_like(mask),\n lambda: mask)\n # On the first timestep also fix up prev_log_r_tilde, which will be -inf.\n prev_log_r_tilde = tf.where(\n tf.is_inf(prev_log_r_tilde),\n tf.zeros_like(prev_log_r_tilde),\n prev_log_r_tilde)\n # log_lambda is [num_samples, batch_size]\n log_lambda = tf.reduce_mean(prev_log_r_tilde - log_p_x_given_z - log_r,\n axis=0, keepdims=True)\n bellman_error = mask * tf.square(\n prev_log_r_tilde -\n tf.stop_gradient(log_lambda + log_p_x_given_z + log_r)\n )\n bellman_loss_acc += tf.reduce_mean(bellman_error, axis=0)\n # Compute the log_r_diff update\n log_r_diff_acc += mask * tf.reshape(log_r_diff, [num_samples, batch_size])\n return (log_p_hat_acc, bellman_loss_acc, log_r_diff_acc)\n\n log_weights, resampled, accs = smc.smc(\n transition_fn,\n seq_lengths,\n num_particles=num_samples,\n resampling_criterion=resampling_criterion,\n resampling_fn=resampling_fn,\n loop_fn=loop_fn,\n parallel_iterations=parallel_iterations,\n swap_memory=swap_memory)\n\n log_p_hat, bellman_loss, log_r_diff = accs\n loss_per_seq = [- log_p_hat, bellman_loss]\n tf.summary.scalar(\"bellman_loss\",\n tf.reduce_mean(bellman_loss / tf.to_float(seq_lengths)))\n tf.summary.scalar(\"log_r_diff\",\n tf.reduce_mean(tf.reduce_mean(log_r_diff, axis=0) / tf.to_float(seq_lengths)))\n return loss_per_seq, log_p_hat, log_weights, resampled\n",
"# Copyright 2016 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Tests for nets.inception_v2.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport numpy as np\nimport tensorflow as tf\nfrom tensorflow.contrib import slim as contrib_slim\n\nfrom nets import inception\n\nslim = contrib_slim\n\n\nclass InceptionV2Test(tf.test.TestCase):\n\n def testBuildClassificationNetwork(self):\n batch_size = 5\n height, width = 224, 224\n num_classes = 1000\n\n inputs = tf.random.uniform((batch_size, height, width, 3))\n logits, end_points = inception.inception_v2(inputs, num_classes)\n self.assertTrue(logits.op.name.startswith(\n 'InceptionV2/Logits/SpatialSqueeze'))\n self.assertListEqual(logits.get_shape().as_list(),\n [batch_size, num_classes])\n self.assertTrue('Predictions' in end_points)\n self.assertListEqual(end_points['Predictions'].get_shape().as_list(),\n [batch_size, num_classes])\n\n def testBuildPreLogitsNetwork(self):\n batch_size = 5\n height, width = 224, 224\n num_classes = None\n\n inputs = tf.random.uniform((batch_size, height, width, 3))\n net, end_points = inception.inception_v2(inputs, num_classes)\n self.assertTrue(net.op.name.startswith('InceptionV2/Logits/AvgPool'))\n self.assertListEqual(net.get_shape().as_list(), [batch_size, 1, 1, 1024])\n self.assertFalse('Logits' in end_points)\n self.assertFalse('Predictions' in end_points)\n\n def testBuildBaseNetwork(self):\n batch_size = 5\n height, width = 224, 224\n\n inputs = tf.random.uniform((batch_size, height, width, 3))\n mixed_5c, end_points = inception.inception_v2_base(inputs)\n self.assertTrue(mixed_5c.op.name.startswith('InceptionV2/Mixed_5c'))\n self.assertListEqual(mixed_5c.get_shape().as_list(),\n [batch_size, 7, 7, 1024])\n expected_endpoints = ['Mixed_3b', 'Mixed_3c', 'Mixed_4a', 'Mixed_4b',\n 'Mixed_4c', 'Mixed_4d', 'Mixed_4e', 'Mixed_5a',\n 'Mixed_5b', 'Mixed_5c', 'Conv2d_1a_7x7',\n 'MaxPool_2a_3x3', 'Conv2d_2b_1x1', 'Conv2d_2c_3x3',\n 'MaxPool_3a_3x3']\n self.assertItemsEqual(list(end_points.keys()), expected_endpoints)\n\n def testBuildOnlyUptoFinalEndpoint(self):\n batch_size = 5\n height, width = 224, 224\n endpoints = ['Conv2d_1a_7x7', 'MaxPool_2a_3x3', 'Conv2d_2b_1x1',\n 'Conv2d_2c_3x3', 'MaxPool_3a_3x3', 'Mixed_3b', 'Mixed_3c',\n 'Mixed_4a', 'Mixed_4b', 'Mixed_4c', 'Mixed_4d', 'Mixed_4e',\n 'Mixed_5a', 'Mixed_5b', 'Mixed_5c']\n for index, endpoint in enumerate(endpoints):\n with tf.Graph().as_default():\n inputs = tf.random.uniform((batch_size, height, width, 3))\n out_tensor, end_points = inception.inception_v2_base(\n inputs, final_endpoint=endpoint)\n self.assertTrue(out_tensor.op.name.startswith(\n 'InceptionV2/' + endpoint))\n self.assertItemsEqual(endpoints[:index + 1], list(end_points.keys()))\n\n def testBuildAndCheckAllEndPointsUptoMixed5c(self):\n batch_size = 5\n height, width = 224, 224\n\n inputs = tf.random.uniform((batch_size, height, width, 3))\n _, end_points = inception.inception_v2_base(inputs,\n final_endpoint='Mixed_5c')\n endpoints_shapes = {'Mixed_3b': [batch_size, 28, 28, 256],\n 'Mixed_3c': [batch_size, 28, 28, 320],\n 'Mixed_4a': [batch_size, 14, 14, 576],\n 'Mixed_4b': [batch_size, 14, 14, 576],\n 'Mixed_4c': [batch_size, 14, 14, 576],\n 'Mixed_4d': [batch_size, 14, 14, 576],\n 'Mixed_4e': [batch_size, 14, 14, 576],\n 'Mixed_5a': [batch_size, 7, 7, 1024],\n 'Mixed_5b': [batch_size, 7, 7, 1024],\n 'Mixed_5c': [batch_size, 7, 7, 1024],\n 'Conv2d_1a_7x7': [batch_size, 112, 112, 64],\n 'MaxPool_2a_3x3': [batch_size, 56, 56, 64],\n 'Conv2d_2b_1x1': [batch_size, 56, 56, 64],\n 'Conv2d_2c_3x3': [batch_size, 56, 56, 192],\n 'MaxPool_3a_3x3': [batch_size, 28, 28, 192]}\n self.assertItemsEqual(\n list(endpoints_shapes.keys()), list(end_points.keys()))\n for endpoint_name in endpoints_shapes:\n expected_shape = endpoints_shapes[endpoint_name]\n self.assertTrue(endpoint_name in end_points)\n self.assertListEqual(end_points[endpoint_name].get_shape().as_list(),\n expected_shape)\n\n def testModelHasExpectedNumberOfParameters(self):\n batch_size = 5\n height, width = 224, 224\n inputs = tf.random.uniform((batch_size, height, width, 3))\n with slim.arg_scope(inception.inception_v2_arg_scope()):\n inception.inception_v2_base(inputs)\n total_params, _ = slim.model_analyzer.analyze_vars(\n slim.get_model_variables())\n self.assertAlmostEqual(10173112, total_params)\n\n def testBuildEndPointsWithDepthMultiplierLessThanOne(self):\n batch_size = 5\n height, width = 224, 224\n num_classes = 1000\n\n inputs = tf.random.uniform((batch_size, height, width, 3))\n _, end_points = inception.inception_v2(inputs, num_classes)\n\n endpoint_keys = [key for key in end_points.keys()\n if key.startswith('Mixed') or key.startswith('Conv')]\n\n _, end_points_with_multiplier = inception.inception_v2(\n inputs, num_classes, scope='depth_multiplied_net',\n depth_multiplier=0.5)\n\n for key in endpoint_keys:\n original_depth = end_points[key].get_shape().as_list()[3]\n new_depth = end_points_with_multiplier[key].get_shape().as_list()[3]\n self.assertEqual(0.5 * original_depth, new_depth)\n\n def testBuildEndPointsWithDepthMultiplierGreaterThanOne(self):\n batch_size = 5\n height, width = 224, 224\n num_classes = 1000\n\n inputs = tf.random.uniform((batch_size, height, width, 3))\n _, end_points = inception.inception_v2(inputs, num_classes)\n\n endpoint_keys = [key for key in end_points.keys()\n if key.startswith('Mixed') or key.startswith('Conv')]\n\n _, end_points_with_multiplier = inception.inception_v2(\n inputs, num_classes, scope='depth_multiplied_net',\n depth_multiplier=2.0)\n\n for key in endpoint_keys:\n original_depth = end_points[key].get_shape().as_list()[3]\n new_depth = end_points_with_multiplier[key].get_shape().as_list()[3]\n self.assertEqual(2.0 * original_depth, new_depth)\n\n def testRaiseValueErrorWithInvalidDepthMultiplier(self):\n batch_size = 5\n height, width = 224, 224\n num_classes = 1000\n\n inputs = tf.random.uniform((batch_size, height, width, 3))\n with self.assertRaises(ValueError):\n _ = inception.inception_v2(inputs, num_classes, depth_multiplier=-0.1)\n with self.assertRaises(ValueError):\n _ = inception.inception_v2(inputs, num_classes, depth_multiplier=0.0)\n\n def testBuildEndPointsWithUseSeparableConvolutionFalse(self):\n batch_size = 5\n height, width = 224, 224\n\n inputs = tf.random.uniform((batch_size, height, width, 3))\n _, end_points = inception.inception_v2_base(inputs)\n\n endpoint_keys = [\n key for key in end_points.keys()\n if key.startswith('Mixed') or key.startswith('Conv')\n ]\n\n _, end_points_with_replacement = inception.inception_v2_base(\n inputs, use_separable_conv=False)\n\n # The endpoint shapes must be equal to the original shape even when the\n # separable convolution is replaced with a normal convolution.\n for key in endpoint_keys:\n original_shape = end_points[key].get_shape().as_list()\n self.assertTrue(key in end_points_with_replacement)\n new_shape = end_points_with_replacement[key].get_shape().as_list()\n self.assertListEqual(original_shape, new_shape)\n\n def testBuildEndPointsNCHWDataFormat(self):\n batch_size = 5\n height, width = 224, 224\n\n inputs = tf.random.uniform((batch_size, height, width, 3))\n _, end_points = inception.inception_v2_base(inputs)\n\n endpoint_keys = [\n key for key in end_points.keys()\n if key.startswith('Mixed') or key.startswith('Conv')\n ]\n\n inputs_in_nchw = tf.random.uniform((batch_size, 3, height, width))\n _, end_points_with_replacement = inception.inception_v2_base(\n inputs_in_nchw, use_separable_conv=False, data_format='NCHW')\n\n # With the 'NCHW' data format, all endpoint activations have a transposed\n # shape from the original shape with the 'NHWC' layout.\n for key in endpoint_keys:\n transposed_original_shape = tf.transpose(\n a=end_points[key], perm=[0, 3, 1, 2]).get_shape().as_list()\n self.assertTrue(key in end_points_with_replacement)\n new_shape = end_points_with_replacement[key].get_shape().as_list()\n self.assertListEqual(transposed_original_shape, new_shape)\n\n def testBuildErrorsForDataFormats(self):\n batch_size = 5\n height, width = 224, 224\n\n inputs = tf.random.uniform((batch_size, height, width, 3))\n\n # 'NCWH' data format is not supported.\n with self.assertRaises(ValueError):\n _ = inception.inception_v2_base(inputs, data_format='NCWH')\n\n # 'NCHW' data format is not supported for separable convolution.\n with self.assertRaises(ValueError):\n _ = inception.inception_v2_base(inputs, data_format='NCHW')\n\n def testHalfSizeImages(self):\n batch_size = 5\n height, width = 112, 112\n num_classes = 1000\n\n inputs = tf.random.uniform((batch_size, height, width, 3))\n logits, end_points = inception.inception_v2(inputs, num_classes)\n self.assertTrue(logits.op.name.startswith('InceptionV2/Logits'))\n self.assertListEqual(logits.get_shape().as_list(),\n [batch_size, num_classes])\n pre_pool = end_points['Mixed_5c']\n self.assertListEqual(pre_pool.get_shape().as_list(),\n [batch_size, 4, 4, 1024])\n\n def testBuildBaseNetworkWithoutRootBlock(self):\n batch_size = 5\n height, width = 28, 28\n channels = 192\n\n inputs = tf.random.uniform((batch_size, height, width, channels))\n _, end_points = inception.inception_v2_base(\n inputs, include_root_block=False)\n endpoints_shapes = {\n 'Mixed_3b': [batch_size, 28, 28, 256],\n 'Mixed_3c': [batch_size, 28, 28, 320],\n 'Mixed_4a': [batch_size, 14, 14, 576],\n 'Mixed_4b': [batch_size, 14, 14, 576],\n 'Mixed_4c': [batch_size, 14, 14, 576],\n 'Mixed_4d': [batch_size, 14, 14, 576],\n 'Mixed_4e': [batch_size, 14, 14, 576],\n 'Mixed_5a': [batch_size, 7, 7, 1024],\n 'Mixed_5b': [batch_size, 7, 7, 1024],\n 'Mixed_5c': [batch_size, 7, 7, 1024]\n }\n self.assertItemsEqual(\n list(endpoints_shapes.keys()), list(end_points.keys()))\n for endpoint_name in endpoints_shapes:\n expected_shape = endpoints_shapes[endpoint_name]\n self.assertTrue(endpoint_name in end_points)\n self.assertListEqual(end_points[endpoint_name].get_shape().as_list(),\n expected_shape)\n\n def testUnknownImageShape(self):\n tf.compat.v1.reset_default_graph()\n batch_size = 2\n height, width = 224, 224\n num_classes = 1000\n input_np = np.random.uniform(0, 1, (batch_size, height, width, 3))\n with self.test_session() as sess:\n inputs = tf.compat.v1.placeholder(\n tf.float32, shape=(batch_size, None, None, 3))\n logits, end_points = inception.inception_v2(inputs, num_classes)\n self.assertTrue(logits.op.name.startswith('InceptionV2/Logits'))\n self.assertListEqual(logits.get_shape().as_list(),\n [batch_size, num_classes])\n pre_pool = end_points['Mixed_5c']\n feed_dict = {inputs: input_np}\n tf.compat.v1.global_variables_initializer().run()\n pre_pool_out = sess.run(pre_pool, feed_dict=feed_dict)\n self.assertListEqual(list(pre_pool_out.shape), [batch_size, 7, 7, 1024])\n\n def testGlobalPoolUnknownImageShape(self):\n tf.compat.v1.reset_default_graph()\n batch_size = 1\n height, width = 250, 300\n num_classes = 1000\n input_np = np.random.uniform(0, 1, (batch_size, height, width, 3))\n with self.test_session() as sess:\n inputs = tf.compat.v1.placeholder(\n tf.float32, shape=(batch_size, None, None, 3))\n logits, end_points = inception.inception_v2(inputs, num_classes,\n global_pool=True)\n self.assertTrue(logits.op.name.startswith('InceptionV2/Logits'))\n self.assertListEqual(logits.get_shape().as_list(),\n [batch_size, num_classes])\n pre_pool = end_points['Mixed_5c']\n feed_dict = {inputs: input_np}\n tf.compat.v1.global_variables_initializer().run()\n pre_pool_out = sess.run(pre_pool, feed_dict=feed_dict)\n self.assertListEqual(list(pre_pool_out.shape), [batch_size, 8, 10, 1024])\n\n def testUnknowBatchSize(self):\n batch_size = 1\n height, width = 224, 224\n num_classes = 1000\n\n inputs = tf.compat.v1.placeholder(tf.float32, (None, height, width, 3))\n logits, _ = inception.inception_v2(inputs, num_classes)\n self.assertTrue(logits.op.name.startswith('InceptionV2/Logits'))\n self.assertListEqual(logits.get_shape().as_list(),\n [None, num_classes])\n images = tf.random.uniform((batch_size, height, width, 3))\n\n with self.test_session() as sess:\n sess.run(tf.compat.v1.global_variables_initializer())\n output = sess.run(logits, {inputs: images.eval()})\n self.assertEquals(output.shape, (batch_size, num_classes))\n\n def testEvaluation(self):\n batch_size = 2\n height, width = 224, 224\n num_classes = 1000\n\n eval_inputs = tf.random.uniform((batch_size, height, width, 3))\n logits, _ = inception.inception_v2(eval_inputs, num_classes,\n is_training=False)\n predictions = tf.argmax(input=logits, axis=1)\n\n with self.test_session() as sess:\n sess.run(tf.compat.v1.global_variables_initializer())\n output = sess.run(predictions)\n self.assertEquals(output.shape, (batch_size,))\n\n def testTrainEvalWithReuse(self):\n train_batch_size = 5\n eval_batch_size = 2\n height, width = 150, 150\n num_classes = 1000\n\n train_inputs = tf.random.uniform((train_batch_size, height, width, 3))\n inception.inception_v2(train_inputs, num_classes)\n eval_inputs = tf.random.uniform((eval_batch_size, height, width, 3))\n logits, _ = inception.inception_v2(eval_inputs, num_classes, reuse=True)\n predictions = tf.argmax(input=logits, axis=1)\n\n with self.test_session() as sess:\n sess.run(tf.compat.v1.global_variables_initializer())\n output = sess.run(predictions)\n self.assertEquals(output.shape, (eval_batch_size,))\n\n def testLogitsNotSqueezed(self):\n num_classes = 25\n images = tf.random.uniform([1, 224, 224, 3])\n logits, _ = inception.inception_v2(images,\n num_classes=num_classes,\n spatial_squeeze=False)\n\n with self.test_session() as sess:\n tf.compat.v1.global_variables_initializer().run()\n logits_out = sess.run(logits)\n self.assertListEqual(list(logits_out.shape), [1, 1, 1, num_classes])\n\n def testNoBatchNormScaleByDefault(self):\n height, width = 224, 224\n num_classes = 1000\n inputs = tf.compat.v1.placeholder(tf.float32, (1, height, width, 3))\n with slim.arg_scope(inception.inception_v2_arg_scope()):\n inception.inception_v2(inputs, num_classes, is_training=False)\n\n self.assertEqual(tf.compat.v1.global_variables('.*/BatchNorm/gamma:0$'), [])\n\n def testBatchNormScale(self):\n height, width = 224, 224\n num_classes = 1000\n inputs = tf.compat.v1.placeholder(tf.float32, (1, height, width, 3))\n with slim.arg_scope(\n inception.inception_v2_arg_scope(batch_norm_scale=True)):\n inception.inception_v2(inputs, num_classes, is_training=False)\n\n gamma_names = set(\n v.op.name\n for v in tf.compat.v1.global_variables('.*/BatchNorm/gamma:0$'))\n self.assertGreater(len(gamma_names), 0)\n for v in tf.compat.v1.global_variables('.*/BatchNorm/moving_mean:0$'):\n self.assertIn(v.op.name[:-len('moving_mean')] + 'gamma', gamma_names)\n\n\nif __name__ == '__main__':\n tf.test.main()\n",
"# Copyright 2017 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Tests for object_detection.utils.label_map_util.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport os\nfrom six.moves import range\nimport tensorflow as tf\n\nfrom google.protobuf import text_format\nfrom object_detection.protos import string_int_label_map_pb2\nfrom object_detection.utils import label_map_util\n\n\nclass LabelMapUtilTest(tf.test.TestCase):\n\n def _generate_label_map(self, num_classes):\n label_map_proto = string_int_label_map_pb2.StringIntLabelMap()\n for i in range(1, num_classes + 1):\n item = label_map_proto.item.add()\n item.id = i\n item.name = 'label_' + str(i)\n item.display_name = str(i)\n return label_map_proto\n\n def test_get_label_map_dict(self):\n label_map_string = \"\"\"\n item {\n id:2\n name:'cat'\n }\n item {\n id:1\n name:'dog'\n }\n \"\"\"\n label_map_path = os.path.join(self.get_temp_dir(), 'label_map.pbtxt')\n with tf.gfile.Open(label_map_path, 'wb') as f:\n f.write(label_map_string)\n\n label_map_dict = label_map_util.get_label_map_dict(label_map_path)\n self.assertEqual(label_map_dict['dog'], 1)\n self.assertEqual(label_map_dict['cat'], 2)\n\n def test_get_label_map_dict_from_proto(self):\n label_map_string = \"\"\"\n item {\n id:2\n name:'cat'\n }\n item {\n id:1\n name:'dog'\n }\n \"\"\"\n label_map_proto = text_format.Parse(\n label_map_string, string_int_label_map_pb2.StringIntLabelMap())\n label_map_dict = label_map_util.get_label_map_dict(label_map_proto)\n self.assertEqual(label_map_dict['dog'], 1)\n self.assertEqual(label_map_dict['cat'], 2)\n\n def test_get_label_map_dict_display(self):\n label_map_string = \"\"\"\n item {\n id:2\n display_name:'cat'\n }\n item {\n id:1\n display_name:'dog'\n }\n \"\"\"\n label_map_path = os.path.join(self.get_temp_dir(), 'label_map.pbtxt')\n with tf.gfile.Open(label_map_path, 'wb') as f:\n f.write(label_map_string)\n\n label_map_dict = label_map_util.get_label_map_dict(\n label_map_path, use_display_name=True)\n self.assertEqual(label_map_dict['dog'], 1)\n self.assertEqual(label_map_dict['cat'], 2)\n\n def test_load_bad_label_map(self):\n label_map_string = \"\"\"\n item {\n id:0\n name:'class that should not be indexed at zero'\n }\n item {\n id:2\n name:'cat'\n }\n item {\n id:1\n name:'dog'\n }\n \"\"\"\n label_map_path = os.path.join(self.get_temp_dir(), 'label_map.pbtxt')\n with tf.gfile.Open(label_map_path, 'wb') as f:\n f.write(label_map_string)\n\n with self.assertRaises(ValueError):\n label_map_util.load_labelmap(label_map_path)\n\n def test_load_label_map_with_background(self):\n label_map_string = \"\"\"\n item {\n id:0\n name:'background'\n }\n item {\n id:2\n name:'cat'\n }\n item {\n id:1\n name:'dog'\n }\n \"\"\"\n label_map_path = os.path.join(self.get_temp_dir(), 'label_map.pbtxt')\n with tf.gfile.Open(label_map_path, 'wb') as f:\n f.write(label_map_string)\n\n label_map_dict = label_map_util.get_label_map_dict(label_map_path)\n self.assertEqual(label_map_dict['background'], 0)\n self.assertEqual(label_map_dict['dog'], 1)\n self.assertEqual(label_map_dict['cat'], 2)\n\n def test_get_label_map_dict_with_fill_in_gaps_and_background(self):\n label_map_string = \"\"\"\n item {\n id:3\n name:'cat'\n }\n item {\n id:1\n name:'dog'\n }\n \"\"\"\n label_map_path = os.path.join(self.get_temp_dir(), 'label_map.pbtxt')\n with tf.gfile.Open(label_map_path, 'wb') as f:\n f.write(label_map_string)\n\n label_map_dict = label_map_util.get_label_map_dict(\n label_map_path, fill_in_gaps_and_background=True)\n\n self.assertEqual(label_map_dict['background'], 0)\n self.assertEqual(label_map_dict['dog'], 1)\n self.assertEqual(label_map_dict['2'], 2)\n self.assertEqual(label_map_dict['cat'], 3)\n self.assertEqual(len(label_map_dict), max(label_map_dict.values()) + 1)\n\n def test_keep_categories_with_unique_id(self):\n label_map_proto = string_int_label_map_pb2.StringIntLabelMap()\n label_map_string = \"\"\"\n item {\n id:2\n name:'cat'\n }\n item {\n id:1\n name:'child'\n }\n item {\n id:1\n name:'person'\n }\n item {\n id:1\n name:'n00007846'\n }\n \"\"\"\n text_format.Merge(label_map_string, label_map_proto)\n categories = label_map_util.convert_label_map_to_categories(\n label_map_proto, max_num_classes=3)\n self.assertListEqual([{\n 'id': 2,\n 'name': u'cat'\n }, {\n 'id': 1,\n 'name': u'child'\n }], categories)\n\n def test_convert_label_map_to_categories_no_label_map(self):\n categories = label_map_util.convert_label_map_to_categories(\n None, max_num_classes=3)\n expected_categories_list = [{\n 'name': u'category_1',\n 'id': 1\n }, {\n 'name': u'category_2',\n 'id': 2\n }, {\n 'name': u'category_3',\n 'id': 3\n }]\n self.assertListEqual(expected_categories_list, categories)\n\n def test_convert_label_map_to_categories(self):\n label_map_proto = self._generate_label_map(num_classes=4)\n categories = label_map_util.convert_label_map_to_categories(\n label_map_proto, max_num_classes=3)\n expected_categories_list = [{\n 'name': u'1',\n 'id': 1\n }, {\n 'name': u'2',\n 'id': 2\n }, {\n 'name': u'3',\n 'id': 3\n }]\n self.assertListEqual(expected_categories_list, categories)\n\n def test_convert_label_map_to_categories_with_few_classes(self):\n label_map_proto = self._generate_label_map(num_classes=4)\n cat_no_offset = label_map_util.convert_label_map_to_categories(\n label_map_proto, max_num_classes=2)\n expected_categories_list = [{\n 'name': u'1',\n 'id': 1\n }, {\n 'name': u'2',\n 'id': 2\n }]\n self.assertListEqual(expected_categories_list, cat_no_offset)\n\n def test_get_max_label_map_index(self):\n num_classes = 4\n label_map_proto = self._generate_label_map(num_classes=num_classes)\n max_index = label_map_util.get_max_label_map_index(label_map_proto)\n self.assertEqual(num_classes, max_index)\n\n def test_create_category_index(self):\n categories = [{'name': u'1', 'id': 1}, {'name': u'2', 'id': 2}]\n category_index = label_map_util.create_category_index(categories)\n self.assertDictEqual({\n 1: {\n 'name': u'1',\n 'id': 1\n },\n 2: {\n 'name': u'2',\n 'id': 2\n }\n }, category_index)\n\n def test_create_categories_from_labelmap(self):\n label_map_string = \"\"\"\n item {\n id:1\n name:'dog'\n }\n item {\n id:2\n name:'cat'\n }\n \"\"\"\n label_map_path = os.path.join(self.get_temp_dir(), 'label_map.pbtxt')\n with tf.gfile.Open(label_map_path, 'wb') as f:\n f.write(label_map_string)\n\n categories = label_map_util.create_categories_from_labelmap(label_map_path)\n self.assertListEqual([{\n 'name': u'dog',\n 'id': 1\n }, {\n 'name': u'cat',\n 'id': 2\n }], categories)\n\n def test_create_category_index_from_labelmap(self):\n label_map_string = \"\"\"\n item {\n id:2\n name:'cat'\n }\n item {\n id:1\n name:'dog'\n }\n \"\"\"\n label_map_path = os.path.join(self.get_temp_dir(), 'label_map.pbtxt')\n with tf.gfile.Open(label_map_path, 'wb') as f:\n f.write(label_map_string)\n\n category_index = label_map_util.create_category_index_from_labelmap(\n label_map_path)\n self.assertDictEqual({\n 1: {\n 'name': u'dog',\n 'id': 1\n },\n 2: {\n 'name': u'cat',\n 'id': 2\n }\n }, category_index)\n\n def test_create_category_index_from_labelmap_display(self):\n label_map_string = \"\"\"\n item {\n id:2\n name:'cat'\n display_name:'meow'\n }\n item {\n id:1\n name:'dog'\n display_name:'woof'\n }\n \"\"\"\n label_map_path = os.path.join(self.get_temp_dir(), 'label_map.pbtxt')\n with tf.gfile.Open(label_map_path, 'wb') as f:\n f.write(label_map_string)\n\n self.assertDictEqual({\n 1: {\n 'name': u'dog',\n 'id': 1\n },\n 2: {\n 'name': u'cat',\n 'id': 2\n }\n }, label_map_util.create_category_index_from_labelmap(\n label_map_path, False))\n\n self.assertDictEqual({\n 1: {\n 'name': u'woof',\n 'id': 1\n },\n 2: {\n 'name': u'meow',\n 'id': 2\n }\n }, label_map_util.create_category_index_from_labelmap(label_map_path))\n\n\nif __name__ == '__main__':\n tf.test.main()\n",
"# Copyright 2017 The TensorFlow Authors All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\n\"\"\"Replay buffer.\n\nImplements replay buffer in Python.\n\"\"\"\n\nimport random\nimport numpy as np\nfrom six.moves import xrange\n\n\nclass ReplayBuffer(object):\n\n def __init__(self, max_size):\n self.max_size = max_size\n self.cur_size = 0\n self.buffer = {}\n self.init_length = 0\n\n def __len__(self):\n return self.cur_size\n\n def seed_buffer(self, episodes):\n self.init_length = len(episodes)\n self.add(episodes, np.ones(self.init_length))\n\n def add(self, episodes, *args):\n \"\"\"Add episodes to buffer.\"\"\"\n idx = 0\n while self.cur_size < self.max_size and idx < len(episodes):\n self.buffer[self.cur_size] = episodes[idx]\n self.cur_size += 1\n idx += 1\n\n if idx < len(episodes):\n remove_idxs = self.remove_n(len(episodes) - idx)\n for remove_idx in remove_idxs:\n self.buffer[remove_idx] = episodes[idx]\n idx += 1\n\n assert len(self.buffer) == self.cur_size\n\n def remove_n(self, n):\n \"\"\"Get n items for removal.\"\"\"\n # random removal\n idxs = random.sample(xrange(self.init_length, self.cur_size), n)\n return idxs\n\n def get_batch(self, n):\n \"\"\"Get batch of episodes to train on.\"\"\"\n # random batch\n idxs = random.sample(xrange(self.cur_size), n)\n return [self.buffer[idx] for idx in idxs], None\n\n def update_last_batch(self, delta):\n pass\n\n\nclass PrioritizedReplayBuffer(ReplayBuffer):\n\n def __init__(self, max_size, alpha=0.2,\n eviction_strategy='rand'):\n self.max_size = max_size\n self.alpha = alpha\n self.eviction_strategy = eviction_strategy\n assert self.eviction_strategy in ['rand', 'fifo', 'rank']\n self.remove_idx = 0\n\n self.cur_size = 0\n self.buffer = {}\n self.priorities = np.zeros(self.max_size)\n self.init_length = 0\n\n def __len__(self):\n return self.cur_size\n\n def add(self, episodes, priorities, new_idxs=None):\n \"\"\"Add episodes to buffer.\"\"\"\n if new_idxs is None:\n idx = 0\n new_idxs = []\n while self.cur_size < self.max_size and idx < len(episodes):\n self.buffer[self.cur_size] = episodes[idx]\n new_idxs.append(self.cur_size)\n self.cur_size += 1\n idx += 1\n\n if idx < len(episodes):\n remove_idxs = self.remove_n(len(episodes) - idx)\n for remove_idx in remove_idxs:\n self.buffer[remove_idx] = episodes[idx]\n new_idxs.append(remove_idx)\n idx += 1\n else:\n assert len(new_idxs) == len(episodes)\n for new_idx, ep in zip(new_idxs, episodes):\n self.buffer[new_idx] = ep\n\n self.priorities[new_idxs] = priorities\n self.priorities[0:self.init_length] = np.max(\n self.priorities[self.init_length:])\n\n assert len(self.buffer) == self.cur_size\n return new_idxs\n\n def remove_n(self, n):\n \"\"\"Get n items for removal.\"\"\"\n assert self.init_length + n <= self.cur_size\n\n if self.eviction_strategy == 'rand':\n # random removal\n idxs = random.sample(xrange(self.init_length, self.cur_size), n)\n elif self.eviction_strategy == 'fifo':\n # overwrite elements in cyclical fashion\n idxs = [\n self.init_length +\n (self.remove_idx + i) % (self.max_size - self.init_length)\n for i in xrange(n)]\n self.remove_idx = idxs[-1] + 1 - self.init_length\n elif self.eviction_strategy == 'rank':\n # remove lowest-priority indices\n idxs = np.argpartition(self.priorities, n)[:n]\n\n return idxs\n\n def sampling_distribution(self):\n p = self.priorities[:self.cur_size]\n p = np.exp(self.alpha * (p - np.max(p)))\n norm = np.sum(p)\n if norm > 0:\n uniform = 0.0\n p = p / norm * (1 - uniform) + 1.0 / self.cur_size * uniform\n else:\n p = np.ones(self.cur_size) / self.cur_size\n return p\n\n def get_batch(self, n):\n \"\"\"Get batch of episodes to train on.\"\"\"\n p = self.sampling_distribution()\n idxs = np.random.choice(self.cur_size, size=int(n), replace=False, p=p)\n self.last_batch = idxs\n return [self.buffer[idx] for idx in idxs], p[idxs]\n\n def update_last_batch(self, delta):\n \"\"\"Update last batch idxs with new priority.\"\"\"\n self.priorities[self.last_batch] = np.abs(delta)\n self.priorities[0:self.init_length] = np.max(\n self.priorities[self.init_length:])\n",
"# Copyright 2017 The TensorFlow Authors All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\n\"\"\"Perspective Transformer Layer Implementation.\n\nTransform the volume based on 4 x 4 perspective projection matrix.\n\nReference:\n(1) \"Perspective Transformer Nets: Perspective Transformer Nets:\nLearning Single-View 3D Object Reconstruction without 3D Supervision.\"\nXinchen Yan, Jimei Yang, Ersin Yumer, Yijie Guo, Honglak Lee. In NIPS 2016\nhttps://papers.nips.cc/paper/6206-perspective-transformer-nets-learning-single-view-3d-object-reconstruction-without-3d-supervision.pdf\n\n(2) Official implementation in Torch: https://github.com/xcyan/ptnbhwd\n\n(3) 2D Transformer implementation in TF:\ngithub.com/tensorflow/models/tree/master/research/transformer\n\n\"\"\"\n\nimport tensorflow as tf\n\n\ndef transformer(voxels,\n theta,\n out_size,\n z_near,\n z_far,\n name='PerspectiveTransformer'):\n \"\"\"Perspective Transformer Layer.\n\n Args:\n voxels: A tensor of size [num_batch, depth, height, width, num_channels].\n It is the output of a deconv/upsampling conv network (tf.float32).\n theta: A tensor of size [num_batch, 16].\n It is the inverse camera transformation matrix (tf.float32).\n out_size: A tuple representing the size of output of\n transformer layer (float).\n z_near: A number representing the near clipping plane (float).\n z_far: A number representing the far clipping plane (float).\n\n Returns:\n A transformed tensor (tf.float32).\n\n \"\"\"\n def _repeat(x, n_repeats):\n with tf.variable_scope('_repeat'):\n rep = tf.transpose(\n tf.expand_dims(tf.ones(shape=tf.stack([\n n_repeats,\n ])), 1), [1, 0])\n rep = tf.to_int32(rep)\n x = tf.matmul(tf.reshape(x, (-1, 1)), rep)\n return tf.reshape(x, [-1])\n\n def _interpolate(im, x, y, z, out_size):\n \"\"\"Bilinear interploation layer.\n\n Args:\n im: A 5D tensor of size [num_batch, depth, height, width, num_channels].\n It is the input volume for the transformation layer (tf.float32).\n x: A tensor of size [num_batch, out_depth, out_height, out_width]\n representing the inverse coordinate mapping for x (tf.float32).\n y: A tensor of size [num_batch, out_depth, out_height, out_width]\n representing the inverse coordinate mapping for y (tf.float32).\n z: A tensor of size [num_batch, out_depth, out_height, out_width]\n representing the inverse coordinate mapping for z (tf.float32).\n out_size: A tuple representing the output size of transformation layer\n (float).\n\n Returns:\n A transformed tensor (tf.float32).\n\n \"\"\"\n with tf.variable_scope('_interpolate'):\n num_batch = im.get_shape().as_list()[0]\n depth = im.get_shape().as_list()[1]\n height = im.get_shape().as_list()[2]\n width = im.get_shape().as_list()[3]\n channels = im.get_shape().as_list()[4]\n\n x = tf.to_float(x)\n y = tf.to_float(y)\n z = tf.to_float(z)\n depth_f = tf.to_float(depth)\n height_f = tf.to_float(height)\n width_f = tf.to_float(width)\n # Number of disparity interpolated.\n out_depth = out_size[0]\n out_height = out_size[1]\n out_width = out_size[2]\n zero = tf.zeros([], dtype='int32')\n # 0 <= z < depth, 0 <= y < height & 0 <= x < width.\n max_z = tf.to_int32(tf.shape(im)[1] - 1)\n max_y = tf.to_int32(tf.shape(im)[2] - 1)\n max_x = tf.to_int32(tf.shape(im)[3] - 1)\n\n # Converts scale indices from [-1, 1] to [0, width/height/depth].\n x = (x + 1.0) * (width_f) / 2.0\n y = (y + 1.0) * (height_f) / 2.0\n z = (z + 1.0) * (depth_f) / 2.0\n\n x0 = tf.to_int32(tf.floor(x))\n x1 = x0 + 1\n y0 = tf.to_int32(tf.floor(y))\n y1 = y0 + 1\n z0 = tf.to_int32(tf.floor(z))\n z1 = z0 + 1\n\n x0_clip = tf.clip_by_value(x0, zero, max_x)\n x1_clip = tf.clip_by_value(x1, zero, max_x)\n y0_clip = tf.clip_by_value(y0, zero, max_y)\n y1_clip = tf.clip_by_value(y1, zero, max_y)\n z0_clip = tf.clip_by_value(z0, zero, max_z)\n z1_clip = tf.clip_by_value(z1, zero, max_z)\n dim3 = width\n dim2 = width * height\n dim1 = width * height * depth\n base = _repeat(\n tf.range(num_batch) * dim1, out_depth * out_height * out_width)\n base_z0_y0 = base + z0_clip * dim2 + y0_clip * dim3\n base_z0_y1 = base + z0_clip * dim2 + y1_clip * dim3\n base_z1_y0 = base + z1_clip * dim2 + y0_clip * dim3\n base_z1_y1 = base + z1_clip * dim2 + y1_clip * dim3\n\n idx_z0_y0_x0 = base_z0_y0 + x0_clip\n idx_z0_y0_x1 = base_z0_y0 + x1_clip\n idx_z0_y1_x0 = base_z0_y1 + x0_clip\n idx_z0_y1_x1 = base_z0_y1 + x1_clip\n idx_z1_y0_x0 = base_z1_y0 + x0_clip\n idx_z1_y0_x1 = base_z1_y0 + x1_clip\n idx_z1_y1_x0 = base_z1_y1 + x0_clip\n idx_z1_y1_x1 = base_z1_y1 + x1_clip\n\n # Use indices to lookup pixels in the flat image and restore\n # channels dim\n im_flat = tf.reshape(im, tf.stack([-1, channels]))\n im_flat = tf.to_float(im_flat)\n i_z0_y0_x0 = tf.gather(im_flat, idx_z0_y0_x0)\n i_z0_y0_x1 = tf.gather(im_flat, idx_z0_y0_x1)\n i_z0_y1_x0 = tf.gather(im_flat, idx_z0_y1_x0)\n i_z0_y1_x1 = tf.gather(im_flat, idx_z0_y1_x1)\n i_z1_y0_x0 = tf.gather(im_flat, idx_z1_y0_x0)\n i_z1_y0_x1 = tf.gather(im_flat, idx_z1_y0_x1)\n i_z1_y1_x0 = tf.gather(im_flat, idx_z1_y1_x0)\n i_z1_y1_x1 = tf.gather(im_flat, idx_z1_y1_x1)\n\n # Finally calculate interpolated values.\n x0_f = tf.to_float(x0)\n x1_f = tf.to_float(x1)\n y0_f = tf.to_float(y0)\n y1_f = tf.to_float(y1)\n z0_f = tf.to_float(z0)\n z1_f = tf.to_float(z1)\n # Check the out-of-boundary case.\n x0_valid = tf.to_float(\n tf.less_equal(x0, max_x) & tf.greater_equal(x0, 0))\n x1_valid = tf.to_float(\n tf.less_equal(x1, max_x) & tf.greater_equal(x1, 0))\n y0_valid = tf.to_float(\n tf.less_equal(y0, max_y) & tf.greater_equal(y0, 0))\n y1_valid = tf.to_float(\n tf.less_equal(y1, max_y) & tf.greater_equal(y1, 0))\n z0_valid = tf.to_float(\n tf.less_equal(z0, max_z) & tf.greater_equal(z0, 0))\n z1_valid = tf.to_float(\n tf.less_equal(z1, max_z) & tf.greater_equal(z1, 0))\n\n w_z0_y0_x0 = tf.expand_dims(((x1_f - x) * (y1_f - y) *\n (z1_f - z) * x1_valid * y1_valid * z1_valid),\n 1)\n w_z0_y0_x1 = tf.expand_dims(((x - x0_f) * (y1_f - y) *\n (z1_f - z) * x0_valid * y1_valid * z1_valid),\n 1)\n w_z0_y1_x0 = tf.expand_dims(((x1_f - x) * (y - y0_f) *\n (z1_f - z) * x1_valid * y0_valid * z1_valid),\n 1)\n w_z0_y1_x1 = tf.expand_dims(((x - x0_f) * (y - y0_f) *\n (z1_f - z) * x0_valid * y0_valid * z1_valid),\n 1)\n w_z1_y0_x0 = tf.expand_dims(((x1_f - x) * (y1_f - y) *\n (z - z0_f) * x1_valid * y1_valid * z0_valid),\n 1)\n w_z1_y0_x1 = tf.expand_dims(((x - x0_f) * (y1_f - y) *\n (z - z0_f) * x0_valid * y1_valid * z0_valid),\n 1)\n w_z1_y1_x0 = tf.expand_dims(((x1_f - x) * (y - y0_f) *\n (z - z0_f) * x1_valid * y0_valid * z0_valid),\n 1)\n w_z1_y1_x1 = tf.expand_dims(((x - x0_f) * (y - y0_f) *\n (z - z0_f) * x0_valid * y0_valid * z0_valid),\n 1)\n\n output = tf.add_n([\n w_z0_y0_x0 * i_z0_y0_x0, w_z0_y0_x1 * i_z0_y0_x1,\n w_z0_y1_x0 * i_z0_y1_x0, w_z0_y1_x1 * i_z0_y1_x1,\n w_z1_y0_x0 * i_z1_y0_x0, w_z1_y0_x1 * i_z1_y0_x1,\n w_z1_y1_x0 * i_z1_y1_x0, w_z1_y1_x1 * i_z1_y1_x1\n ])\n return output\n\n def _meshgrid(depth, height, width, z_near, z_far):\n with tf.variable_scope('_meshgrid'):\n x_t = tf.reshape(\n tf.tile(tf.linspace(-1.0, 1.0, width), [height * depth]),\n [depth, height, width])\n y_t = tf.reshape(\n tf.tile(tf.linspace(-1.0, 1.0, height), [width * depth]),\n [depth, width, height])\n y_t = tf.transpose(y_t, [0, 2, 1])\n sample_grid = tf.tile(\n tf.linspace(float(z_near), float(z_far), depth), [width * height])\n z_t = tf.reshape(sample_grid, [height, width, depth])\n z_t = tf.transpose(z_t, [2, 0, 1])\n\n z_t = 1 / z_t\n d_t = 1 / z_t\n x_t /= z_t\n y_t /= z_t\n\n x_t_flat = tf.reshape(x_t, (1, -1))\n y_t_flat = tf.reshape(y_t, (1, -1))\n d_t_flat = tf.reshape(d_t, (1, -1))\n\n ones = tf.ones_like(x_t_flat)\n grid = tf.concat([d_t_flat, y_t_flat, x_t_flat, ones], 0)\n return grid\n\n def _transform(theta, input_dim, out_size, z_near, z_far):\n with tf.variable_scope('_transform'):\n num_batch = input_dim.get_shape().as_list()[0]\n num_channels = input_dim.get_shape().as_list()[4]\n theta = tf.reshape(theta, (-1, 4, 4))\n theta = tf.cast(theta, 'float32')\n\n out_depth = out_size[0]\n out_height = out_size[1]\n out_width = out_size[2]\n grid = _meshgrid(out_depth, out_height, out_width, z_near, z_far)\n grid = tf.expand_dims(grid, 0)\n grid = tf.reshape(grid, [-1])\n grid = tf.tile(grid, tf.stack([num_batch]))\n grid = tf.reshape(grid, tf.stack([num_batch, 4, -1]))\n\n # Transform A x (x_t', y_t', 1, d_t)^T -> (x_s, y_s, z_s, 1).\n t_g = tf.matmul(theta, grid)\n z_s = tf.slice(t_g, [0, 0, 0], [-1, 1, -1])\n y_s = tf.slice(t_g, [0, 1, 0], [-1, 1, -1])\n x_s = tf.slice(t_g, [0, 2, 0], [-1, 1, -1])\n\n z_s_flat = tf.reshape(z_s, [-1])\n y_s_flat = tf.reshape(y_s, [-1])\n x_s_flat = tf.reshape(x_s, [-1])\n\n input_transformed = _interpolate(input_dim, x_s_flat, y_s_flat, z_s_flat,\n out_size)\n\n output = tf.reshape(\n input_transformed,\n tf.stack([num_batch, out_depth, out_height, out_width, num_channels]))\n\n return output\n\n with tf.variable_scope(name):\n output = _transform(theta, voxels, out_size, z_near, z_far)\n return output\n",
"# Copyright 2017 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\n\"\"\"Tests for object_detection.core.keypoint_ops.\"\"\"\nimport numpy as np\nimport tensorflow as tf\n\nfrom object_detection.core import keypoint_ops\n\n\nclass KeypointOpsTest(tf.test.TestCase):\n \"\"\"Tests for common keypoint operations.\"\"\"\n\n def test_scale(self):\n keypoints = tf.constant([\n [[0.0, 0.0], [100.0, 200.0]],\n [[50.0, 120.0], [100.0, 140.0]]\n ])\n y_scale = tf.constant(1.0 / 100)\n x_scale = tf.constant(1.0 / 200)\n\n expected_keypoints = tf.constant([\n [[0., 0.], [1.0, 1.0]],\n [[0.5, 0.6], [1.0, 0.7]]\n ])\n output = keypoint_ops.scale(keypoints, y_scale, x_scale)\n\n with self.test_session() as sess:\n output_, expected_keypoints_ = sess.run([output, expected_keypoints])\n self.assertAllClose(output_, expected_keypoints_)\n\n def test_clip_to_window(self):\n keypoints = tf.constant([\n [[0.25, 0.5], [0.75, 0.75]],\n [[0.5, 0.0], [1.0, 1.0]]\n ])\n window = tf.constant([0.25, 0.25, 0.75, 0.75])\n\n expected_keypoints = tf.constant([\n [[0.25, 0.5], [0.75, 0.75]],\n [[0.5, 0.25], [0.75, 0.75]]\n ])\n output = keypoint_ops.clip_to_window(keypoints, window)\n\n with self.test_session() as sess:\n output_, expected_keypoints_ = sess.run([output, expected_keypoints])\n self.assertAllClose(output_, expected_keypoints_)\n\n def test_prune_outside_window(self):\n keypoints = tf.constant([\n [[0.25, 0.5], [0.75, 0.75]],\n [[0.5, 0.0], [1.0, 1.0]]\n ])\n window = tf.constant([0.25, 0.25, 0.75, 0.75])\n\n expected_keypoints = tf.constant([[[0.25, 0.5], [0.75, 0.75]],\n [[np.nan, np.nan], [np.nan, np.nan]]])\n output = keypoint_ops.prune_outside_window(keypoints, window)\n\n with self.test_session() as sess:\n output_, expected_keypoints_ = sess.run([output, expected_keypoints])\n self.assertAllClose(output_, expected_keypoints_)\n\n def test_change_coordinate_frame(self):\n keypoints = tf.constant([\n [[0.25, 0.5], [0.75, 0.75]],\n [[0.5, 0.0], [1.0, 1.0]]\n ])\n window = tf.constant([0.25, 0.25, 0.75, 0.75])\n\n expected_keypoints = tf.constant([\n [[0, 0.5], [1.0, 1.0]],\n [[0.5, -0.5], [1.5, 1.5]]\n ])\n output = keypoint_ops.change_coordinate_frame(keypoints, window)\n\n with self.test_session() as sess:\n output_, expected_keypoints_ = sess.run([output, expected_keypoints])\n self.assertAllClose(output_, expected_keypoints_)\n\n def test_to_normalized_coordinates(self):\n keypoints = tf.constant([\n [[10., 30.], [30., 45.]],\n [[20., 0.], [40., 60.]]\n ])\n output = keypoint_ops.to_normalized_coordinates(\n keypoints, 40, 60)\n expected_keypoints = tf.constant([\n [[0.25, 0.5], [0.75, 0.75]],\n [[0.5, 0.0], [1.0, 1.0]]\n ])\n\n with self.test_session() as sess:\n output_, expected_keypoints_ = sess.run([output, expected_keypoints])\n self.assertAllClose(output_, expected_keypoints_)\n\n def test_to_normalized_coordinates_already_normalized(self):\n keypoints = tf.constant([\n [[0.25, 0.5], [0.75, 0.75]],\n [[0.5, 0.0], [1.0, 1.0]]\n ])\n output = keypoint_ops.to_normalized_coordinates(\n keypoints, 40, 60)\n\n with self.test_session() as sess:\n with self.assertRaisesOpError('assertion failed'):\n sess.run(output)\n\n def test_to_absolute_coordinates(self):\n keypoints = tf.constant([\n [[0.25, 0.5], [0.75, 0.75]],\n [[0.5, 0.0], [1.0, 1.0]]\n ])\n output = keypoint_ops.to_absolute_coordinates(\n keypoints, 40, 60)\n expected_keypoints = tf.constant([\n [[10., 30.], [30., 45.]],\n [[20., 0.], [40., 60.]]\n ])\n\n with self.test_session() as sess:\n output_, expected_keypoints_ = sess.run([output, expected_keypoints])\n self.assertAllClose(output_, expected_keypoints_)\n\n def test_to_absolute_coordinates_already_absolute(self):\n keypoints = tf.constant([\n [[10., 30.], [30., 45.]],\n [[20., 0.], [40., 60.]]\n ])\n output = keypoint_ops.to_absolute_coordinates(\n keypoints, 40, 60)\n\n with self.test_session() as sess:\n with self.assertRaisesOpError('assertion failed'):\n sess.run(output)\n\n def test_flip_horizontal(self):\n keypoints = tf.constant([\n [[0.1, 0.1], [0.2, 0.2], [0.3, 0.3]],\n [[0.4, 0.4], [0.5, 0.5], [0.6, 0.6]]\n ])\n flip_permutation = [0, 2, 1]\n\n expected_keypoints = tf.constant([\n [[0.1, 0.9], [0.3, 0.7], [0.2, 0.8]],\n [[0.4, 0.6], [0.6, 0.4], [0.5, 0.5]],\n ])\n output = keypoint_ops.flip_horizontal(keypoints, 0.5, flip_permutation)\n\n with self.test_session() as sess:\n output_, expected_keypoints_ = sess.run([output, expected_keypoints])\n self.assertAllClose(output_, expected_keypoints_)\n\n def test_flip_vertical(self):\n keypoints = tf.constant([\n [[0.1, 0.1], [0.2, 0.2], [0.3, 0.3]],\n [[0.4, 0.4], [0.5, 0.5], [0.6, 0.6]]\n ])\n flip_permutation = [0, 2, 1]\n\n expected_keypoints = tf.constant([\n [[0.9, 0.1], [0.7, 0.3], [0.8, 0.2]],\n [[0.6, 0.4], [0.4, 0.6], [0.5, 0.5]],\n ])\n output = keypoint_ops.flip_vertical(keypoints, 0.5, flip_permutation)\n\n with self.test_session() as sess:\n output_, expected_keypoints_ = sess.run([output, expected_keypoints])\n self.assertAllClose(output_, expected_keypoints_)\n\n def test_rot90(self):\n keypoints = tf.constant([\n [[0.1, 0.1], [0.2, 0.2], [0.3, 0.3]],\n [[0.4, 0.6], [0.5, 0.6], [0.6, 0.7]]\n ])\n expected_keypoints = tf.constant([\n [[0.9, 0.1], [0.8, 0.2], [0.7, 0.3]],\n [[0.4, 0.4], [0.4, 0.5], [0.3, 0.6]],\n ])\n output = keypoint_ops.rot90(keypoints)\n\n with self.test_session() as sess:\n output_, expected_keypoints_ = sess.run([output, expected_keypoints])\n self.assertAllClose(output_, expected_keypoints_)\n\n\nif __name__ == '__main__':\n tf.test.main()\n",
"# Copyright 2018 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\n\"\"\"Tests for object_detection.predictors.heads.class_head.\"\"\"\nimport tensorflow as tf\n\nfrom google.protobuf import text_format\nfrom object_detection.builders import hyperparams_builder\nfrom object_detection.predictors.heads import keras_class_head\nfrom object_detection.protos import hyperparams_pb2\nfrom object_detection.utils import test_case\n\n\nclass ConvolutionalKerasClassPredictorTest(test_case.TestCase):\n\n def _build_conv_hyperparams(self):\n conv_hyperparams = hyperparams_pb2.Hyperparams()\n conv_hyperparams_text_proto = \"\"\"\n activation: NONE\n regularizer {\n l2_regularizer {\n }\n }\n initializer {\n truncated_normal_initializer {\n }\n }\n \"\"\"\n text_format.Merge(conv_hyperparams_text_proto, conv_hyperparams)\n return hyperparams_builder.KerasLayerHyperparams(conv_hyperparams)\n\n def test_prediction_size_depthwise_false(self):\n conv_hyperparams = self._build_conv_hyperparams()\n class_prediction_head = keras_class_head.ConvolutionalClassHead(\n is_training=True,\n num_class_slots=20,\n use_dropout=True,\n dropout_keep_prob=0.5,\n kernel_size=3,\n conv_hyperparams=conv_hyperparams,\n freeze_batchnorm=False,\n num_predictions_per_location=1,\n use_depthwise=False)\n image_feature = tf.random_uniform(\n [64, 17, 19, 1024], minval=-10.0, maxval=10.0, dtype=tf.float32)\n class_predictions = class_prediction_head(image_feature,)\n self.assertAllEqual([64, 323, 20],\n class_predictions.get_shape().as_list())\n\n def test_prediction_size_depthwise_true(self):\n conv_hyperparams = self._build_conv_hyperparams()\n class_prediction_head = keras_class_head.ConvolutionalClassHead(\n is_training=True,\n num_class_slots=20,\n use_dropout=True,\n dropout_keep_prob=0.5,\n kernel_size=3,\n conv_hyperparams=conv_hyperparams,\n freeze_batchnorm=False,\n num_predictions_per_location=1,\n use_depthwise=True)\n image_feature = tf.random_uniform(\n [64, 17, 19, 1024], minval=-10.0, maxval=10.0, dtype=tf.float32)\n class_predictions = class_prediction_head(image_feature,)\n self.assertAllEqual([64, 323, 20],\n class_predictions.get_shape().as_list())\n\n\nclass MaskRCNNClassHeadTest(test_case.TestCase):\n\n def _build_fc_hyperparams(self,\n op_type=hyperparams_pb2.Hyperparams.FC):\n hyperparams = hyperparams_pb2.Hyperparams()\n hyperparams_text_proto = \"\"\"\n activation: NONE\n regularizer {\n l2_regularizer {\n }\n }\n initializer {\n truncated_normal_initializer {\n }\n }\n \"\"\"\n text_format.Merge(hyperparams_text_proto, hyperparams)\n hyperparams.op = op_type\n return hyperparams_builder.KerasLayerHyperparams(hyperparams)\n\n def test_prediction_size(self):\n class_prediction_head = keras_class_head.MaskRCNNClassHead(\n is_training=False,\n num_class_slots=20,\n fc_hyperparams=self._build_fc_hyperparams(),\n freeze_batchnorm=False,\n use_dropout=True,\n dropout_keep_prob=0.5)\n roi_pooled_features = tf.random_uniform(\n [64, 7, 7, 1024], minval=-10.0, maxval=10.0, dtype=tf.float32)\n prediction = class_prediction_head(roi_pooled_features)\n self.assertAllEqual([64, 1, 20], prediction.get_shape().as_list())\n\n\nclass WeightSharedConvolutionalKerasClassPredictorTest(test_case.TestCase):\n\n def _build_conv_hyperparams(self):\n conv_hyperparams = hyperparams_pb2.Hyperparams()\n conv_hyperparams_text_proto = \"\"\"\n activation: NONE\n regularizer {\n l2_regularizer {\n }\n }\n initializer {\n truncated_normal_initializer {\n }\n }\n \"\"\"\n text_format.Merge(conv_hyperparams_text_proto, conv_hyperparams)\n return hyperparams_builder.KerasLayerHyperparams(conv_hyperparams)\n\n def test_prediction_size_depthwise_false(self):\n conv_hyperparams = self._build_conv_hyperparams()\n class_prediction_head = keras_class_head.WeightSharedConvolutionalClassHead(\n num_class_slots=20,\n conv_hyperparams=conv_hyperparams,\n num_predictions_per_location=1,\n use_depthwise=False)\n image_feature = tf.random_uniform(\n [64, 17, 19, 1024], minval=-10.0, maxval=10.0, dtype=tf.float32)\n class_predictions = class_prediction_head(image_feature)\n self.assertAllEqual([64, 323, 20], class_predictions.get_shape().as_list())\n\n def test_prediction_size_depthwise_true(self):\n conv_hyperparams = self._build_conv_hyperparams()\n class_prediction_head = keras_class_head.WeightSharedConvolutionalClassHead(\n num_class_slots=20,\n conv_hyperparams=conv_hyperparams,\n num_predictions_per_location=1,\n use_depthwise=True)\n image_feature = tf.random_uniform(\n [64, 17, 19, 1024], minval=-10.0, maxval=10.0, dtype=tf.float32)\n class_predictions = class_prediction_head(image_feature)\n self.assertAllEqual([64, 323, 20], class_predictions.get_shape().as_list())\n\n def test_variable_count_depth_wise_true(self):\n g = tf.Graph()\n with g.as_default():\n conv_hyperparams = self._build_conv_hyperparams()\n class_prediction_head = (\n keras_class_head.WeightSharedConvolutionalClassHead(\n num_class_slots=20,\n conv_hyperparams=conv_hyperparams,\n num_predictions_per_location=1,\n use_depthwise=True))\n image_feature = tf.random_uniform(\n [64, 17, 19, 1024], minval=-10.0, maxval=10.0, dtype=tf.float32)\n _ = class_prediction_head(image_feature)\n variables = g.get_collection(tf.GraphKeys.GLOBAL_VARIABLES)\n self.assertEqual(len(variables), 3)\n\n def test_variable_count_depth_wise_False(self):\n g = tf.Graph()\n with g.as_default():\n conv_hyperparams = self._build_conv_hyperparams()\n class_prediction_head = (\n keras_class_head.WeightSharedConvolutionalClassHead(\n num_class_slots=20,\n conv_hyperparams=conv_hyperparams,\n num_predictions_per_location=1,\n use_depthwise=False))\n image_feature = tf.random_uniform(\n [64, 17, 19, 1024], minval=-10.0, maxval=10.0, dtype=tf.float32)\n _ = class_prediction_head(image_feature)\n variables = g.get_collection(tf.GraphKeys.GLOBAL_VARIABLES)\n self.assertEqual(len(variables), 2)\n\n\nif __name__ == '__main__':\n tf.test.main()\n",
"# Copyright 2017 Google Inc. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\n\"\"\"Download MNIST, Omniglot datasets for Rebar.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport urllib\nimport gzip\nimport os\nimport config\nimport struct\nimport numpy as np\nimport cPickle as pickle\nimport datasets\n\nMNIST_URL = 'see README'\nMNIST_BINARIZED_URL = 'see README'\nOMNIGLOT_URL = 'see README'\n\nMNIST_FLOAT_TRAIN = 'train-images-idx3-ubyte'\n\n\ndef load_mnist_float(local_filename):\n with open(local_filename, 'rb') as f:\n f.seek(4)\n nimages, rows, cols = struct.unpack('>iii', f.read(12))\n dim = rows*cols\n\n images = np.fromfile(f, dtype=np.dtype(np.ubyte))\n images = (images/255.0).astype('float32').reshape((nimages, dim))\n\n return images\n\nif __name__ == '__main__':\n if not os.path.exists(config.DATA_DIR):\n os.makedirs(config.DATA_DIR)\n\n # Get MNIST and convert to npy file\n local_filename = os.path.join(config.DATA_DIR, MNIST_FLOAT_TRAIN)\n if not os.path.exists(local_filename):\n urllib.urlretrieve(\"%s/%s.gz\" % (MNIST_URL, MNIST_FLOAT_TRAIN), local_filename+'.gz')\n with gzip.open(local_filename+'.gz', 'rb') as f:\n file_content = f.read()\n with open(local_filename, 'wb') as f:\n f.write(file_content)\n os.remove(local_filename+'.gz')\n\n mnist_float_train = load_mnist_float(local_filename)[:-10000]\n # save in a nice format\n np.save(os.path.join(config.DATA_DIR, config.MNIST_FLOAT), mnist_float_train)\n\n # Get binarized MNIST\n splits = ['train', 'valid', 'test']\n mnist_binarized = []\n for split in splits:\n filename = 'binarized_mnist_%s.amat' % split\n url = '%s/binarized_mnist_%s.amat' % (MNIST_BINARIZED_URL, split)\n local_filename = os.path.join(config.DATA_DIR, filename)\n if not os.path.exists(local_filename):\n urllib.urlretrieve(url, local_filename)\n\n with open(local_filename, 'rb') as f:\n mnist_binarized.append((np.array([map(int, line.split()) for line in f.readlines()]).astype('float32'), None))\n\n # save in a nice format\n with open(os.path.join(config.DATA_DIR, config.MNIST_BINARIZED), 'w') as out:\n pickle.dump(mnist_binarized, out)\n\n # Get Omniglot\n local_filename = os.path.join(config.DATA_DIR, config.OMNIGLOT)\n if not os.path.exists(local_filename):\n urllib.urlretrieve(OMNIGLOT_URL,\n local_filename)\n\n",
"# Copyright 2017 Google Inc. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport functools\nimport tensorflow as tf\nimport numpy as np\nfrom scipy.misc import logsumexp\n\nimport tensorflow.contrib.slim as slim\nfrom tensorflow.python.ops import init_ops\nimport utils as U\n\ntry:\n xrange # Python 2\nexcept NameError:\n xrange = range # Python 3\n\nFLAGS = tf.flags.FLAGS\n\nQ_COLLECTION = \"q_collection\"\nP_COLLECTION = \"p_collection\"\n\nclass SBN(object): # REINFORCE\n\n def __init__(self,\n hparams,\n activation_func=tf.nn.sigmoid,\n mean_xs = None,\n eval_mode=False):\n self.eval_mode = eval_mode\n self.hparams = hparams\n self.mean_xs = mean_xs\n self.train_bias= -np.log(1./np.clip(mean_xs, 0.001, 0.999)-1.).astype(np.float32)\n self.activation_func = activation_func\n\n self.n_samples = tf.placeholder('int32')\n self.x = tf.placeholder('float', [None, self.hparams.n_input])\n self._x = tf.tile(self.x, [self.n_samples, 1])\n\n self.batch_size = tf.shape(self._x)[0]\n\n self.uniform_samples = dict()\n self.uniform_samples_v = dict()\n self.prior = tf.Variable(tf.zeros([self.hparams.n_hidden],\n dtype=tf.float32),\n name='p_prior',\n collections=[tf.GraphKeys.GLOBAL_VARIABLES, P_COLLECTION])\n\n self.run_recognition_network = False\n self.run_generator_network = False\n\n # Initialize temperature\n self.pre_temperature_variable = tf.Variable(\n np.log(self.hparams.temperature),\n trainable=False,\n dtype=tf.float32)\n self.temperature_variable = tf.exp(self.pre_temperature_variable)\n\n self.global_step = tf.Variable(0, trainable=False)\n self.baseline_loss = []\n self.ema = tf.train.ExponentialMovingAverage(decay=0.999)\n self.maintain_ema_ops = []\n self.optimizer_class = tf.train.AdamOptimizer(\n learning_rate=1*self.hparams.learning_rate,\n beta2=self.hparams.beta2)\n\n self._generate_randomness()\n self._create_network()\n\n\n def initialize(self, sess):\n self.sess = sess\n\n def _create_eta(self, shape=[], collection='CV'):\n return 2 * tf.sigmoid(tf.Variable(tf.zeros(shape), trainable=False,\n collections=[collection, tf.GraphKeys.GLOBAL_VARIABLES, Q_COLLECTION]))\n\n def _create_baseline(self, n_output=1, n_hidden=100,\n is_zero_init=False,\n collection='BASELINE'):\n # center input\n h = self._x\n if self.mean_xs is not None:\n h -= self.mean_xs\n\n if is_zero_init:\n initializer = init_ops.zeros_initializer()\n else:\n initializer = slim.variance_scaling_initializer()\n\n with slim.arg_scope([slim.fully_connected],\n variables_collections=[collection, Q_COLLECTION],\n trainable=False,\n weights_initializer=initializer):\n h = slim.fully_connected(h, n_hidden, activation_fn=tf.nn.tanh)\n baseline = slim.fully_connected(h, n_output, activation_fn=None)\n\n if n_output == 1:\n baseline = tf.reshape(baseline, [-1]) # very important to reshape\n return baseline\n\n\n def _create_transformation(self, input, n_output, reuse, scope_prefix):\n \"\"\"Create the deterministic transformation between stochastic layers.\n\n If self.hparam.nonlinear:\n 2 x tanh layers\n Else:\n 1 x linear layer\n \"\"\"\n if self.hparams.nonlinear:\n h = slim.fully_connected(input,\n self.hparams.n_hidden,\n reuse=reuse,\n activation_fn=tf.nn.tanh,\n scope='%s_nonlinear_1' % scope_prefix)\n h = slim.fully_connected(h,\n self.hparams.n_hidden,\n reuse=reuse,\n activation_fn=tf.nn.tanh,\n scope='%s_nonlinear_2' % scope_prefix)\n h = slim.fully_connected(h,\n n_output,\n reuse=reuse,\n activation_fn=None,\n scope='%s' % scope_prefix)\n else:\n h = slim.fully_connected(input,\n n_output,\n reuse=reuse,\n activation_fn=None,\n scope='%s' % scope_prefix)\n return h\n\n def _recognition_network(self, sampler=None, log_likelihood_func=None):\n \"\"\"x values -> samples from Q and return log Q(h|x).\"\"\"\n samples = {}\n reuse = None if not self.run_recognition_network else True\n\n # Set defaults\n if sampler is None:\n sampler = self._random_sample\n\n if log_likelihood_func is None:\n log_likelihood_func = lambda sample, log_params: (\n U.binary_log_likelihood(sample['activation'], log_params))\n\n logQ = []\n\n\n if self.hparams.task in ['sbn', 'omni']:\n # Initialize the edge case\n samples[-1] = {'activation': self._x}\n if self.mean_xs is not None:\n samples[-1]['activation'] -= self.mean_xs # center the input\n samples[-1]['activation'] = (samples[-1]['activation'] + 1)/2.0\n\n with slim.arg_scope([slim.fully_connected],\n weights_initializer=slim.variance_scaling_initializer(),\n variables_collections=[Q_COLLECTION]):\n for i in xrange(self.hparams.n_layer):\n # Set up the input to the layer\n input = 2.0*samples[i-1]['activation'] - 1.0\n\n # Create the conditional distribution (output is the logits)\n h = self._create_transformation(input,\n n_output=self.hparams.n_hidden,\n reuse=reuse,\n scope_prefix='q_%d' % i)\n\n samples[i] = sampler(h, self.uniform_samples[i], i)\n logQ.append(log_likelihood_func(samples[i], h))\n\n self.run_recognition_network = True\n return logQ, samples\n elif self.hparams.task == 'sp':\n # Initialize the edge case\n samples[-1] = {'activation': tf.split(self._x,\n num_or_size_splits=2,\n axis=1)[0]} # top half of digit\n if self.mean_xs is not None:\n samples[-1]['activation'] -= np.split(self.mean_xs, 2, 0)[0] # center the input\n samples[-1]['activation'] = (samples[-1]['activation'] + 1)/2.0\n\n with slim.arg_scope([slim.fully_connected],\n weights_initializer=slim.variance_scaling_initializer(),\n variables_collections=[Q_COLLECTION]):\n for i in xrange(self.hparams.n_layer):\n # Set up the input to the layer\n input = 2.0*samples[i-1]['activation'] - 1.0\n\n # Create the conditional distribution (output is the logits)\n h = self._create_transformation(input,\n n_output=self.hparams.n_hidden,\n reuse=reuse,\n scope_prefix='q_%d' % i)\n\n samples[i] = sampler(h, self.uniform_samples[i], i)\n logQ.append(log_likelihood_func(samples[i], h))\n\n self.run_recognition_network = True\n return logQ, samples\n\n def _generator_network(self, samples, logQ, log_likelihood_func=None):\n '''Returns learning signal and function.\n\n This is the implementation for SBNs for the ELBO.\n\n Args:\n samples: dictionary of sampled latent variables\n logQ: list of log q(h_i) terms\n log_likelihood_func: function used to compute log probs for the latent\n variables\n\n Returns:\n learning_signal: the \"reward\" function\n function_term: part of the function that depends on the parameters\n and needs to have the gradient taken through\n '''\n reuse=None if not self.run_generator_network else True\n\n if self.hparams.task in ['sbn', 'omni']:\n if log_likelihood_func is None:\n log_likelihood_func = lambda sample, log_params: (\n U.binary_log_likelihood(sample['activation'], log_params))\n\n logPPrior = log_likelihood_func(\n samples[self.hparams.n_layer-1],\n tf.expand_dims(self.prior, 0))\n\n with slim.arg_scope([slim.fully_connected],\n weights_initializer=slim.variance_scaling_initializer(),\n variables_collections=[P_COLLECTION]):\n\n for i in reversed(xrange(self.hparams.n_layer)):\n if i == 0:\n n_output = self.hparams.n_input\n else:\n n_output = self.hparams.n_hidden\n input = 2.0*samples[i]['activation']-1.0\n\n h = self._create_transformation(input,\n n_output,\n reuse=reuse,\n scope_prefix='p_%d' % i)\n\n if i == 0:\n # Assume output is binary\n logP = U.binary_log_likelihood(self._x, h + self.train_bias)\n else:\n logPPrior += log_likelihood_func(samples[i-1], h)\n\n self.run_generator_network = True\n return logP + logPPrior - tf.add_n(logQ), logP + logPPrior\n elif self.hparams.task == 'sp':\n with slim.arg_scope([slim.fully_connected],\n weights_initializer=slim.variance_scaling_initializer(),\n variables_collections=[P_COLLECTION]):\n n_output = int(self.hparams.n_input/2)\n i = self.hparams.n_layer - 1 # use the last layer\n input = 2.0*samples[i]['activation']-1.0\n\n h = self._create_transformation(input,\n n_output,\n reuse=reuse,\n scope_prefix='p_%d' % i)\n\n # Predict on the lower half of the image\n logP = U.binary_log_likelihood(tf.split(self._x,\n num_or_size_splits=2,\n axis=1)[1],\n h + np.split(self.train_bias, 2, 0)[1])\n\n self.run_generator_network = True\n return logP, logP\n\n\n def _create_loss(self):\n # Hard loss\n logQHard, samples = self._recognition_network()\n reinforce_learning_signal, reinforce_model_grad = self._generator_network(samples, logQHard)\n logQHard = tf.add_n(logQHard)\n\n # REINFORCE\n learning_signal = tf.stop_gradient(U.center(reinforce_learning_signal))\n self.optimizerLoss = -(learning_signal*logQHard +\n reinforce_model_grad)\n self.lHat = map(tf.reduce_mean, [\n reinforce_learning_signal,\n U.rms(learning_signal),\n ])\n\n return reinforce_learning_signal\n\n def _reshape(self, t):\n return tf.transpose(tf.reshape(t,\n [self.n_samples, -1]))\n\n\n def compute_tensor_variance(self, t):\n \"\"\"Compute the mean per component variance.\n\n Use a moving average to estimate the required moments.\n \"\"\"\n t_sq = tf.reduce_mean(tf.square(t))\n self.maintain_ema_ops.append(self.ema.apply([t, t_sq]))\n\n # mean per component variance\n variance_estimator = (self.ema.average(t_sq) -\n tf.reduce_mean(\n tf.square(self.ema.average(t))))\n\n return variance_estimator\n\n def _create_train_op(self, grads_and_vars, extra_grads_and_vars=[]):\n '''\n Args:\n grads_and_vars: gradients to apply and compute running average variance\n extra_grads_and_vars: gradients to apply (not used to compute average variance)\n '''\n # Variance summaries\n first_moment = U.vectorize(grads_and_vars, skip_none=True)\n second_moment = tf.square(first_moment)\n self.maintain_ema_ops.append(self.ema.apply([first_moment, second_moment]))\n\n # Add baseline losses\n if len(self.baseline_loss) > 0:\n mean_baseline_loss = tf.reduce_mean(tf.add_n(self.baseline_loss))\n extra_grads_and_vars += self.optimizer_class.compute_gradients(\n mean_baseline_loss,\n var_list=tf.get_collection('BASELINE'))\n\n # Ensure that all required tensors are computed before updates are executed\n extra_optimizer = tf.train.AdamOptimizer(\n learning_rate=10*self.hparams.learning_rate,\n beta2=self.hparams.beta2)\n with tf.control_dependencies(\n [tf.group(*[g for g, _ in (grads_and_vars + extra_grads_and_vars) if g is not None])]):\n\n # Filter out the P_COLLECTION variables if we're in eval mode\n if self.eval_mode:\n grads_and_vars = [(g, v) for g, v in grads_and_vars\n if v not in tf.get_collection(P_COLLECTION)]\n\n train_op = self.optimizer_class.apply_gradients(grads_and_vars,\n global_step=self.global_step)\n\n if len(extra_grads_and_vars) > 0:\n extra_train_op = extra_optimizer.apply_gradients(extra_grads_and_vars)\n else:\n extra_train_op = tf.no_op()\n\n self.optimizer = tf.group(train_op, extra_train_op, *self.maintain_ema_ops)\n\n # per parameter variance\n variance_estimator = (self.ema.average(second_moment) -\n tf.square(self.ema.average(first_moment)))\n self.grad_variance = tf.reduce_mean(variance_estimator)\n\n def _create_network(self):\n logF = self._create_loss()\n self.optimizerLoss = tf.reduce_mean(self.optimizerLoss)\n\n # Setup optimizer\n grads_and_vars = self.optimizer_class.compute_gradients(self.optimizerLoss)\n self._create_train_op(grads_and_vars)\n\n # Create IWAE lower bound for evaluation\n self.logF = self._reshape(logF)\n self.iwae = tf.reduce_mean(U.logSumExp(self.logF, axis=1) -\n tf.log(tf.to_float(self.n_samples)))\n\n def partial_fit(self, X, n_samples=1):\n if hasattr(self, 'grad_variances'):\n grad_variance_field_to_return = self.grad_variances\n else:\n grad_variance_field_to_return = self.grad_variance\n _, res, grad_variance, step, temperature = self.sess.run(\n (self.optimizer, self.lHat, grad_variance_field_to_return, self.global_step, self.temperature_variable),\n feed_dict={self.x: X, self.n_samples: n_samples})\n return res, grad_variance, step, temperature\n\n def partial_grad(self, X, n_samples=1):\n control_variate_grads, step = self.sess.run(\n (self.control_variate_grads, self.global_step),\n feed_dict={self.x: X, self.n_samples: n_samples})\n return control_variate_grads, step\n\n def partial_eval(self, X, n_samples=5):\n if n_samples < 1000:\n res, iwae = self.sess.run(\n (self.lHat, self.iwae),\n feed_dict={self.x: X, self.n_samples: n_samples})\n res = [iwae] + res\n else: # special case to handle OOM\n assert n_samples % 100 == 0, \"When using large # of samples, it must be divisble by 100\"\n res = []\n for i in xrange(int(n_samples/100)):\n logF, = self.sess.run(\n (self.logF,),\n feed_dict={self.x: X, self.n_samples: 100})\n res.append(logsumexp(logF, axis=1))\n res = [np.mean(logsumexp(res, axis=0) - np.log(n_samples))]\n return res\n\n\n # Random samplers\n def _mean_sample(self, log_alpha, _, layer):\n \"\"\"Returns mean of random variables parameterized by log_alpha.\"\"\"\n mu = tf.nn.sigmoid(log_alpha)\n return {\n 'preactivation': mu,\n 'activation': mu,\n 'log_param': log_alpha,\n }\n\n def _generate_randomness(self):\n for i in xrange(self.hparams.n_layer):\n self.uniform_samples[i] = tf.stop_gradient(tf.random_uniform(\n [self.batch_size, self.hparams.n_hidden]))\n\n def _u_to_v(self, log_alpha, u, eps = 1e-8):\n \"\"\"Convert u to tied randomness in v.\"\"\"\n u_prime = tf.nn.sigmoid(-log_alpha) # g(u') = 0\n\n v_1 = (u - u_prime) / tf.clip_by_value(1 - u_prime, eps, 1)\n v_1 = tf.clip_by_value(v_1, 0, 1)\n v_1 = tf.stop_gradient(v_1)\n v_1 = v_1*(1 - u_prime) + u_prime\n v_0 = u / tf.clip_by_value(u_prime, eps, 1)\n v_0 = tf.clip_by_value(v_0, 0, 1)\n v_0 = tf.stop_gradient(v_0)\n v_0 = v_0 * u_prime\n\n v = tf.where(u > u_prime, v_1, v_0)\n v = tf.check_numerics(v, 'v sampling is not numerically stable.')\n v = v + tf.stop_gradient(-v + u) # v and u are the same up to numerical errors\n\n return v\n\n def _random_sample(self, log_alpha, u, layer):\n \"\"\"Returns sampled random variables parameterized by log_alpha.\"\"\"\n # Generate tied randomness for later\n if layer not in self.uniform_samples_v:\n self.uniform_samples_v[layer] = self._u_to_v(log_alpha, u)\n\n # Sample random variable underlying softmax/argmax\n x = log_alpha + U.safe_log_prob(u) - U.safe_log_prob(1 - u)\n samples = tf.stop_gradient(tf.to_float(x > 0))\n\n return {\n 'preactivation': x,\n 'activation': samples,\n 'log_param': log_alpha,\n }\n\n def _random_sample_soft(self, log_alpha, u, layer, temperature=None):\n \"\"\"Returns sampled random variables parameterized by log_alpha.\"\"\"\n if temperature is None:\n temperature = self.hparams.temperature\n\n # Sample random variable underlying softmax/argmax\n x = log_alpha + U.safe_log_prob(u) - U.safe_log_prob(1 - u)\n x /= tf.expand_dims(temperature, -1)\n\n if self.hparams.muprop_relaxation:\n y = tf.nn.sigmoid(x + log_alpha * tf.expand_dims(temperature/(temperature + 1), -1))\n else:\n y = tf.nn.sigmoid(x)\n\n return {\n 'preactivation': x,\n 'activation': y,\n 'log_param': log_alpha\n }\n\n def _random_sample_soft_v(self, log_alpha, _, layer, temperature=None):\n \"\"\"Returns sampled random variables parameterized by log_alpha.\"\"\"\n v = self.uniform_samples_v[layer]\n\n return self._random_sample_soft(log_alpha, v, layer, temperature)\n\n def get_gumbel_gradient(self):\n logQ, softSamples = self._recognition_network(sampler=self._random_sample_soft)\n logQ = tf.add_n(logQ)\n logPPrior, logP = self._generator_network(softSamples)\n\n softELBO = logPPrior + logP - logQ\n gumbel_gradient = (self.optimizer_class.\n compute_gradients(softELBO))\n debug = {\n 'softELBO': softELBO,\n }\n\n return gumbel_gradient, debug\n\n # samplers used for quadratic version\n def _random_sample_switch(self, log_alpha, u, layer, switch_layer, temperature=None):\n \"\"\"Run partial discrete, then continuous path.\n\n Args:\n switch_layer: this layer and beyond will be continuous\n \"\"\"\n if layer < switch_layer:\n return self._random_sample(log_alpha, u, layer)\n else:\n return self._random_sample_soft(log_alpha, u, layer, temperature)\n\n def _random_sample_switch_v(self, log_alpha, u, layer, switch_layer, temperature=None):\n \"\"\"Run partial discrete, then continuous path.\n\n Args:\n switch_layer: this layer and beyond will be continuous\n \"\"\"\n if layer < switch_layer:\n return self._random_sample(log_alpha, u, layer)\n else:\n return self._random_sample_soft_v(log_alpha, u, layer, temperature)\n\n\n # #####\n # Gradient computation\n # #####\n def get_nvil_gradient(self):\n \"\"\"Compute the NVIL gradient.\"\"\"\n # Hard loss\n logQHard, samples = self._recognition_network()\n ELBO, reinforce_model_grad = self._generator_network(samples, logQHard)\n logQHard = tf.add_n(logQHard)\n\n # Add baselines (no variance normalization)\n learning_signal = tf.stop_gradient(ELBO) - self._create_baseline()\n\n # Set up losses\n self.baseline_loss.append(tf.square(learning_signal))\n optimizerLoss = -(tf.stop_gradient(learning_signal)*logQHard +\n reinforce_model_grad)\n optimizerLoss = tf.reduce_mean(optimizerLoss)\n\n nvil_gradient = self.optimizer_class.compute_gradients(optimizerLoss)\n debug = {\n 'ELBO': ELBO,\n 'RMS of centered learning signal': U.rms(learning_signal),\n }\n\n return nvil_gradient, debug\n\n\n def get_simple_muprop_gradient(self):\n \"\"\" Computes the simple muprop gradient.\n\n This muprop control variate does not include the linear term.\n \"\"\"\n # Hard loss\n logQHard, hardSamples = self._recognition_network()\n hardELBO, reinforce_model_grad = self._generator_network(hardSamples, logQHard)\n\n # Soft loss\n logQ, muSamples = self._recognition_network(sampler=self._mean_sample)\n muELBO, _ = self._generator_network(muSamples, logQ)\n\n scaling_baseline = self._create_eta(collection='BASELINE')\n learning_signal = (hardELBO\n - scaling_baseline * muELBO\n - self._create_baseline())\n self.baseline_loss.append(tf.square(learning_signal))\n\n optimizerLoss = -(tf.stop_gradient(learning_signal) * tf.add_n(logQHard)\n + reinforce_model_grad)\n optimizerLoss = tf.reduce_mean(optimizerLoss)\n\n simple_muprop_gradient = (self.optimizer_class.\n compute_gradients(optimizerLoss))\n debug = {\n 'ELBO': hardELBO,\n 'muELBO': muELBO,\n 'RMS': U.rms(learning_signal),\n }\n\n return simple_muprop_gradient, debug\n\n def get_muprop_gradient(self):\n \"\"\"\n random sample function that actually returns mean\n new forward pass that returns logQ as a list\n\n can get x_i from samples\n \"\"\"\n\n # Hard loss\n logQHard, hardSamples = self._recognition_network()\n hardELBO, reinforce_model_grad = self._generator_network(hardSamples, logQHard)\n\n # Soft loss\n logQ, muSamples = self._recognition_network(sampler=self._mean_sample)\n muELBO, _ = self._generator_network(muSamples, logQ)\n\n # Compute gradients\n muELBOGrads = tf.gradients(tf.reduce_sum(muELBO),\n [ muSamples[i]['activation'] for\n i in xrange(self.hparams.n_layer) ])\n\n # Compute MuProp gradient estimates\n learning_signal = hardELBO\n optimizerLoss = 0.0\n learning_signals = []\n for i in xrange(self.hparams.n_layer):\n dfDiff = tf.reduce_sum(\n muELBOGrads[i] * (hardSamples[i]['activation'] -\n muSamples[i]['activation']),\n axis=1)\n dfMu = tf.reduce_sum(\n tf.stop_gradient(muELBOGrads[i]) *\n tf.nn.sigmoid(hardSamples[i]['log_param']),\n axis=1)\n\n scaling_baseline_0 = self._create_eta(collection='BASELINE')\n scaling_baseline_1 = self._create_eta(collection='BASELINE')\n learning_signals.append(learning_signal - scaling_baseline_0 * muELBO - scaling_baseline_1 * dfDiff - self._create_baseline())\n self.baseline_loss.append(tf.square(learning_signals[i]))\n\n optimizerLoss += (\n logQHard[i] * tf.stop_gradient(learning_signals[i]) +\n tf.stop_gradient(scaling_baseline_1) * dfMu)\n optimizerLoss += reinforce_model_grad\n optimizerLoss *= -1\n\n optimizerLoss = tf.reduce_mean(optimizerLoss)\n\n muprop_gradient = self.optimizer_class.compute_gradients(optimizerLoss)\n debug = {\n 'ELBO': hardELBO,\n 'muELBO': muELBO,\n }\n\n debug.update(dict([\n ('RMS learning signal layer %d' % i, U.rms(learning_signal))\n for (i, learning_signal) in enumerate(learning_signals)]))\n\n return muprop_gradient, debug\n\n # REBAR gradient helper functions\n def _create_gumbel_control_variate(self, logQHard, temperature=None):\n '''Calculate gumbel control variate.\n '''\n if temperature is None:\n temperature = self.hparams.temperature\n\n logQ, softSamples = self._recognition_network(sampler=functools.partial(\n self._random_sample_soft, temperature=temperature))\n softELBO, _ = self._generator_network(softSamples, logQ)\n logQ = tf.add_n(logQ)\n\n # Generate the softELBO_v (should be the same value but different grads)\n logQ_v, softSamples_v = self._recognition_network(sampler=functools.partial(\n self._random_sample_soft_v, temperature=temperature))\n softELBO_v, _ = self._generator_network(softSamples_v, logQ_v)\n logQ_v = tf.add_n(logQ_v)\n\n # Compute losses\n learning_signal = tf.stop_gradient(softELBO_v)\n\n # Control variate\n h = (tf.stop_gradient(learning_signal) * tf.add_n(logQHard)\n - softELBO + softELBO_v)\n\n extra = (softELBO_v, -softELBO + softELBO_v)\n\n return h, extra\n\n def _create_gumbel_control_variate_quadratic(self, logQHard, temperature=None):\n '''Calculate gumbel control variate.\n '''\n if temperature is None:\n temperature = self.hparams.temperature\n\n h = 0\n extra = []\n for layer in xrange(self.hparams.n_layer):\n logQ, softSamples = self._recognition_network(sampler=functools.partial(\n self._random_sample_switch, switch_layer=layer, temperature=temperature))\n softELBO, _ = self._generator_network(softSamples, logQ)\n\n # Generate the softELBO_v (should be the same value but different grads)\n logQ_v, softSamples_v = self._recognition_network(sampler=functools.partial(\n self._random_sample_switch_v, switch_layer=layer, temperature=temperature))\n softELBO_v, _ = self._generator_network(softSamples_v, logQ_v)\n\n # Compute losses\n learning_signal = tf.stop_gradient(softELBO_v)\n\n # Control variate\n h += (tf.stop_gradient(learning_signal) * logQHard[layer]\n - softELBO + softELBO_v)\n\n extra.append((softELBO_v, -softELBO + softELBO_v))\n\n return h, extra\n\n def _create_hard_elbo(self):\n logQHard, hardSamples = self._recognition_network()\n hardELBO, reinforce_model_grad = self._generator_network(hardSamples, logQHard)\n reinforce_learning_signal = tf.stop_gradient(hardELBO)\n\n # Center learning signal\n baseline = self._create_baseline(collection='CV')\n reinforce_learning_signal = tf.stop_gradient(reinforce_learning_signal) - baseline\n\n nvil_gradient = (tf.stop_gradient(hardELBO) - baseline) * tf.add_n(logQHard) + reinforce_model_grad\n\n return hardELBO, nvil_gradient, logQHard\n\n def multiply_by_eta(self, h_grads, eta):\n # Modifies eta\n res = []\n eta_statistics = []\n for (g, v) in h_grads:\n if g is None:\n res.append((g, v))\n else:\n if 'network' not in eta:\n eta['network'] = self._create_eta()\n res.append((g*eta['network'], v))\n eta_statistics.append(eta['network'])\n\n return res, eta_statistics\n\n def multiply_by_eta_per_layer(self, h_grads, eta):\n # Modifies eta\n res = []\n eta_statistics = []\n for (g, v) in h_grads:\n if g is None:\n res.append((g, v))\n else:\n if v not in eta:\n eta[v] = self._create_eta()\n res.append((g*eta[v], v))\n eta_statistics.append(eta[v])\n\n return res, eta_statistics\n\n def multiply_by_eta_per_unit(self, h_grads, eta):\n # Modifies eta\n res = []\n eta_statistics = []\n for (g, v) in h_grads:\n if g is None:\n res.append((g, v))\n else:\n if v not in eta:\n g_shape = g.shape_as_list()\n assert len(g_shape) <= 2, 'Gradient has too many dimensions'\n if len(g_shape) == 1:\n eta[v] = self._create_eta(g_shape)\n else:\n eta[v] = self._create_eta([1, g_shape[1]])\n h_grads.append((g*eta[v], v))\n eta_statistics.extend(tf.nn.moments(tf.squeeze(eta[v]), axes=[0]))\n return res, eta_statistics\n\n def get_dynamic_rebar_gradient(self):\n \"\"\"Get the dynamic rebar gradient (t, eta optimized).\"\"\"\n tiled_pre_temperature = tf.tile([self.pre_temperature_variable],\n [self.batch_size])\n temperature = tf.exp(tiled_pre_temperature)\n\n hardELBO, nvil_gradient, logQHard = self._create_hard_elbo()\n if self.hparams.quadratic:\n gumbel_cv, extra = self._create_gumbel_control_variate_quadratic(logQHard, temperature=temperature)\n else:\n gumbel_cv, extra = self._create_gumbel_control_variate(logQHard, temperature=temperature)\n\n f_grads = self.optimizer_class.compute_gradients(tf.reduce_mean(-nvil_gradient))\n\n eta = {}\n h_grads, eta_statistics = self.multiply_by_eta_per_layer(\n self.optimizer_class.compute_gradients(tf.reduce_mean(gumbel_cv)),\n eta)\n\n model_grads = U.add_grads_and_vars(f_grads, h_grads)\n total_grads = model_grads\n\n # Construct the variance objective\n g = U.vectorize(model_grads, set_none_to_zero=True)\n self.maintain_ema_ops.append(self.ema.apply([g]))\n gbar = 0 #tf.stop_gradient(self.ema.average(g))\n variance_objective = tf.reduce_mean(tf.square(g - gbar))\n\n reinf_g_t = 0\n if self.hparams.quadratic:\n for layer in xrange(self.hparams.n_layer):\n gumbel_learning_signal, _ = extra[layer]\n df_dt = tf.gradients(gumbel_learning_signal, tiled_pre_temperature)[0]\n reinf_g_t_i, _ = self.multiply_by_eta_per_layer(\n self.optimizer_class.compute_gradients(tf.reduce_mean(tf.stop_gradient(df_dt) * logQHard[layer])),\n eta)\n reinf_g_t += U.vectorize(reinf_g_t_i, set_none_to_zero=True)\n\n reparam = tf.add_n([reparam_i for _, reparam_i in extra])\n else:\n gumbel_learning_signal, reparam = extra\n df_dt = tf.gradients(gumbel_learning_signal, tiled_pre_temperature)[0]\n reinf_g_t, _ = self.multiply_by_eta_per_layer(\n self.optimizer_class.compute_gradients(tf.reduce_mean(tf.stop_gradient(df_dt) * tf.add_n(logQHard))),\n eta)\n reinf_g_t = U.vectorize(reinf_g_t, set_none_to_zero=True)\n\n reparam_g, _ = self.multiply_by_eta_per_layer(\n self.optimizer_class.compute_gradients(tf.reduce_mean(reparam)),\n eta)\n reparam_g = U.vectorize(reparam_g, set_none_to_zero=True)\n reparam_g_t = tf.gradients(tf.reduce_mean(2*tf.stop_gradient(g - gbar)*reparam_g), self.pre_temperature_variable)[0]\n\n variance_objective_grad = tf.reduce_mean(2*(g - gbar)*reinf_g_t) + reparam_g_t\n\n debug = { 'ELBO': hardELBO,\n 'etas': eta_statistics,\n 'variance_objective': variance_objective,\n }\n return total_grads, debug, variance_objective, variance_objective_grad\n\n def get_rebar_gradient(self):\n \"\"\"Get the rebar gradient.\"\"\"\n hardELBO, nvil_gradient, logQHard = self._create_hard_elbo()\n if self.hparams.quadratic:\n gumbel_cv, _ = self._create_gumbel_control_variate_quadratic(logQHard)\n else:\n gumbel_cv, _ = self._create_gumbel_control_variate(logQHard)\n\n f_grads = self.optimizer_class.compute_gradients(tf.reduce_mean(-nvil_gradient))\n\n eta = {}\n h_grads, eta_statistics = self.multiply_by_eta_per_layer(\n self.optimizer_class.compute_gradients(tf.reduce_mean(gumbel_cv)),\n eta)\n\n model_grads = U.add_grads_and_vars(f_grads, h_grads)\n total_grads = model_grads\n\n # Construct the variance objective\n variance_objective = tf.reduce_mean(tf.square(U.vectorize(model_grads, set_none_to_zero=True)))\n\n debug = { 'ELBO': hardELBO,\n 'etas': eta_statistics,\n 'variance_objective': variance_objective,\n }\n return total_grads, debug, variance_objective\n\n###\n# Create varaints\n###\nclass SBNSimpleMuProp(SBN):\n def _create_loss(self):\n simple_muprop_gradient, debug = self.get_simple_muprop_gradient()\n\n self.lHat = map(tf.reduce_mean, [\n debug['ELBO'],\n debug['muELBO'],\n ])\n\n return debug['ELBO'], simple_muprop_gradient\n\n def _create_network(self):\n logF, loss_grads = self._create_loss()\n self._create_train_op(loss_grads)\n\n # Create IWAE lower bound for evaluation\n self.logF = self._reshape(logF)\n self.iwae = tf.reduce_mean(U.logSumExp(self.logF, axis=1) -\n tf.log(tf.to_float(self.n_samples)))\n\nclass SBNMuProp(SBN):\n def _create_loss(self):\n muprop_gradient, debug = self.get_muprop_gradient()\n\n self.lHat = map(tf.reduce_mean, [\n debug['ELBO'],\n debug['muELBO'],\n ])\n\n return debug['ELBO'], muprop_gradient\n\n def _create_network(self):\n logF, loss_grads = self._create_loss()\n self._create_train_op(loss_grads)\n\n # Create IWAE lower bound for evaluation\n self.logF = self._reshape(logF)\n self.iwae = tf.reduce_mean(U.logSumExp(self.logF, axis=1) -\n tf.log(tf.to_float(self.n_samples)))\n\n\nclass SBNNVIL(SBN):\n def _create_loss(self):\n nvil_gradient, debug = self.get_nvil_gradient()\n\n self.lHat = map(tf.reduce_mean, [\n debug['ELBO'],\n ])\n\n return debug['ELBO'], nvil_gradient\n\n def _create_network(self):\n logF, loss_grads = self._create_loss()\n self._create_train_op(loss_grads)\n\n # Create IWAE lower bound for evaluation\n self.logF = self._reshape(logF)\n self.iwae = tf.reduce_mean(U.logSumExp(self.logF, axis=1) -\n tf.log(tf.to_float(self.n_samples)))\n\n\nclass SBNRebar(SBN):\n def _create_loss(self):\n rebar_gradient, debug, variance_objective = self.get_rebar_gradient()\n\n self.lHat = map(tf.reduce_mean, [\n debug['ELBO'],\n ])\n self.lHat.extend(map(tf.reduce_mean, debug['etas']))\n\n return debug['ELBO'], rebar_gradient, variance_objective\n\n def _create_network(self):\n logF, loss_grads, variance_objective = self._create_loss()\n\n # Create additional updates for control variates and temperature\n eta_grads = (self.optimizer_class.compute_gradients(variance_objective,\n var_list=tf.get_collection('CV')))\n\n self._create_train_op(loss_grads, eta_grads)\n\n # Create IWAE lower bound for evaluation\n self.logF = self._reshape(logF)\n self.iwae = tf.reduce_mean(U.logSumExp(self.logF, axis=1) -\n tf.log(tf.to_float(self.n_samples)))\n\nclass SBNDynamicRebar(SBN):\n def _create_loss(self):\n rebar_gradient, debug, variance_objective, variance_objective_grad = self.get_dynamic_rebar_gradient()\n\n self.lHat = map(tf.reduce_mean, [\n debug['ELBO'],\n self.temperature_variable,\n ])\n self.lHat.extend(debug['etas'])\n\n return debug['ELBO'], rebar_gradient, variance_objective, variance_objective_grad\n\n def _create_network(self):\n logF, loss_grads, variance_objective, variance_objective_grad = self._create_loss()\n\n # Create additional updates for control variates and temperature\n eta_grads = (self.optimizer_class.compute_gradients(variance_objective,\n var_list=tf.get_collection('CV'))\n + [(variance_objective_grad, self.pre_temperature_variable)])\n\n self._create_train_op(loss_grads, eta_grads)\n\n # Create IWAE lower bound for evaluation\n self.logF = self._reshape(logF)\n self.iwae = tf.reduce_mean(U.logSumExp(self.logF, axis=1) -\n tf.log(tf.to_float(self.n_samples)))\n\n\nclass SBNTrackGradVariances(SBN):\n \"\"\"Follow NVIL, compute gradient variances for NVIL, MuProp and REBAR.\"\"\"\n def compute_gradient_moments(self, grads_and_vars):\n first_moment = U.vectorize(grads_and_vars, set_none_to_zero=True)\n second_moment = tf.square(first_moment)\n self.maintain_ema_ops.append(self.ema.apply([first_moment, second_moment]))\n\n return self.ema.average(first_moment), self.ema.average(second_moment)\n\n def _create_loss(self):\n self.losses = [\n ('NVIL', self.get_nvil_gradient),\n ('SimpleMuProp', self.get_simple_muprop_gradient),\n ('MuProp', self.get_muprop_gradient),\n ]\n\n moments = []\n for k, v in self.losses:\n print(k)\n gradient, debug = v()\n if k == 'SimpleMuProp':\n ELBO = debug['ELBO']\n gradient_to_follow = gradient\n\n moments.append(self.compute_gradient_moments(\n gradient))\n\n self.losses.append(('DynamicREBAR', self.get_dynamic_rebar_gradient))\n dynamic_rebar_gradient, _, variance_objective, variance_objective_grad = self.get_dynamic_rebar_gradient()\n moments.append(self.compute_gradient_moments(dynamic_rebar_gradient))\n\n self.losses.append(('REBAR', self.get_rebar_gradient))\n rebar_gradient, _, variance_objective2 = self.get_rebar_gradient()\n moments.append(self.compute_gradient_moments(rebar_gradient))\n\n mu = tf.reduce_mean(tf.stack([f for f, _ in moments]), axis=0)\n self.grad_variances = []\n deviations = []\n for f, s in moments:\n self.grad_variances.append(tf.reduce_mean(s - tf.square(mu)))\n deviations.append(tf.reduce_mean(tf.square(f - mu)))\n\n self.lHat = map(tf.reduce_mean, [\n ELBO,\n self.temperature_variable,\n variance_objective_grad,\n variance_objective_grad*variance_objective_grad,\n ])\n self.lHat.extend(deviations)\n self.lHat.append(tf.log(tf.reduce_mean(mu*mu)))\n # self.lHat.extend(map(tf.log, grad_variances))\n\n return ELBO, gradient_to_follow, variance_objective + variance_objective2, variance_objective_grad\n\n def _create_network(self):\n logF, loss_grads, variance_objective, variance_objective_grad = self._create_loss()\n eta_grads = (self.optimizer_class.compute_gradients(variance_objective,\n var_list=tf.get_collection('CV'))\n + [(variance_objective_grad, self.pre_temperature_variable)])\n self._create_train_op(loss_grads, eta_grads)\n\n # Create IWAE lower bound for evaluation\n self.logF = self._reshape(logF)\n self.iwae = tf.reduce_mean(U.logSumExp(self.logF, axis=1) -\n tf.log(tf.to_float(self.n_samples)))\n\n\nclass SBNGumbel(SBN):\n def _random_sample_soft(self, log_alpha, u, layer, temperature=None):\n \"\"\"Returns sampled random variables parameterized by log_alpha.\"\"\"\n if temperature is None:\n temperature = self.hparams.temperature\n\n # Sample random variable underlying softmax/argmax\n x = log_alpha + U.safe_log_prob(u) - U.safe_log_prob(1 - u)\n x /= temperature\n\n if self.hparams.muprop_relaxation:\n x += temperature/(temperature + 1)*log_alpha\n\n y = tf.nn.sigmoid(x)\n\n return {\n 'preactivation': x,\n 'activation': y,\n 'log_param': log_alpha\n }\n\n def _create_loss(self):\n # Hard loss\n logQHard, hardSamples = self._recognition_network()\n hardELBO, _ = self._generator_network(hardSamples, logQHard)\n\n logQ, softSamples = self._recognition_network(sampler=self._random_sample_soft)\n softELBO, _ = self._generator_network(softSamples, logQ)\n\n self.optimizerLoss = -softELBO\n self.lHat = map(tf.reduce_mean, [\n hardELBO,\n softELBO,\n ])\n\n return hardELBO\n\ndefault_hparams = tf.contrib.training.HParams(model='SBNGumbel',\n n_hidden=200,\n n_input=784,\n n_layer=1,\n nonlinear=False,\n learning_rate=0.001,\n temperature=0.5,\n n_samples=1,\n batch_size=24,\n trial=1,\n muprop_relaxation=True,\n dynamic_b=False, # dynamic binarization\n quadratic=True,\n beta2=0.99999,\n task='sbn',\n )\n",
"# Copyright 2018 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Main entry to train and evaluate DeepSpeech model.\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport os\n# pylint: disable=g-bad-import-order\nfrom absl import app as absl_app\nfrom absl import flags\nimport tensorflow as tf\n# pylint: enable=g-bad-import-order\n\nimport data.dataset as dataset\nimport decoder\nimport deep_speech_model\nfrom official.utils.flags import core as flags_core\nfrom official.utils.logs import hooks_helper\nfrom official.utils.logs import logger\nfrom official.utils.misc import distribution_utils\nfrom official.utils.misc import model_helpers\n\n# Default vocabulary file\n_VOCABULARY_FILE = os.path.join(\n os.path.dirname(__file__), \"data/vocabulary.txt\")\n# Evaluation metrics\n_WER_KEY = \"WER\"\n_CER_KEY = \"CER\"\n\n\ndef compute_length_after_conv(max_time_steps, ctc_time_steps, input_length):\n \"\"\"Computes the time_steps/ctc_input_length after convolution.\n\n Suppose that the original feature contains two parts:\n 1) Real spectrogram signals, spanning input_length steps.\n 2) Padded part with all 0s.\n The total length of those two parts is denoted as max_time_steps, which is\n the padded length of the current batch. After convolution layers, the time\n steps of a spectrogram feature will be decreased. As we know the percentage\n of its original length within the entire length, we can compute the time steps\n for the signal after conv as follows (using ctc_input_length to denote):\n ctc_input_length = (input_length / max_time_steps) * output_length_of_conv.\n This length is then fed into ctc loss function to compute loss.\n\n Args:\n max_time_steps: max_time_steps for the batch, after padding.\n ctc_time_steps: number of timesteps after convolution.\n input_length: actual length of the original spectrogram, without padding.\n\n Returns:\n the ctc_input_length after convolution layer.\n \"\"\"\n ctc_input_length = tf.to_float(tf.multiply(\n input_length, ctc_time_steps))\n return tf.to_int32(tf.floordiv(\n ctc_input_length, tf.to_float(max_time_steps)))\n\n\ndef ctc_loss(label_length, ctc_input_length, labels, logits):\n \"\"\"Computes the ctc loss for the current batch of predictions.\"\"\"\n label_length = tf.to_int32(tf.squeeze(label_length))\n ctc_input_length = tf.to_int32(tf.squeeze(ctc_input_length))\n sparse_labels = tf.to_int32(\n tf.keras.backend.ctc_label_dense_to_sparse(labels, label_length))\n y_pred = tf.log(tf.transpose(\n logits, perm=[1, 0, 2]) + tf.keras.backend.epsilon())\n\n return tf.expand_dims(\n tf.nn.ctc_loss(labels=sparse_labels, inputs=y_pred,\n sequence_length=ctc_input_length),\n axis=1)\n\n\ndef evaluate_model(estimator, speech_labels, entries, input_fn_eval):\n \"\"\"Evaluate the model performance using WER anc CER as metrics.\n\n WER: Word Error Rate\n CER: Character Error Rate\n\n Args:\n estimator: estimator to evaluate.\n speech_labels: a string specifying all the character in the vocabulary.\n entries: a list of data entries (audio_file, file_size, transcript) for the\n given dataset.\n input_fn_eval: data input function for evaluation.\n\n Returns:\n Evaluation result containing 'wer' and 'cer' as two metrics.\n \"\"\"\n # Get predictions\n predictions = estimator.predict(input_fn=input_fn_eval)\n\n # Get probabilities of each predicted class\n probs = [pred[\"probabilities\"] for pred in predictions]\n\n num_of_examples = len(probs)\n targets = [entry[2] for entry in entries] # The ground truth transcript\n\n total_wer, total_cer = 0, 0\n greedy_decoder = decoder.DeepSpeechDecoder(speech_labels)\n for i in range(num_of_examples):\n # Decode string.\n decoded_str = greedy_decoder.decode(probs[i])\n # Compute CER.\n total_cer += greedy_decoder.cer(decoded_str, targets[i]) / float(\n len(targets[i]))\n # Compute WER.\n total_wer += greedy_decoder.wer(decoded_str, targets[i]) / float(\n len(targets[i].split()))\n\n # Get mean value\n total_cer /= num_of_examples\n total_wer /= num_of_examples\n\n global_step = estimator.get_variable_value(tf.GraphKeys.GLOBAL_STEP)\n eval_results = {\n _WER_KEY: total_wer,\n _CER_KEY: total_cer,\n tf.GraphKeys.GLOBAL_STEP: global_step,\n }\n\n return eval_results\n\n\ndef model_fn(features, labels, mode, params):\n \"\"\"Define model function for deep speech model.\n\n Args:\n features: a dictionary of input_data features. It includes the data\n input_length, label_length and the spectrogram features.\n labels: a list of labels for the input data.\n mode: current estimator mode; should be one of\n `tf.estimator.ModeKeys.TRAIN`, `EVALUATE`, `PREDICT`.\n params: a dict of hyper parameters to be passed to model_fn.\n\n Returns:\n EstimatorSpec parameterized according to the input params and the\n current mode.\n \"\"\"\n num_classes = params[\"num_classes\"]\n input_length = features[\"input_length\"]\n label_length = features[\"label_length\"]\n features = features[\"features\"]\n\n # Create DeepSpeech2 model.\n model = deep_speech_model.DeepSpeech2(\n flags_obj.rnn_hidden_layers, flags_obj.rnn_type,\n flags_obj.is_bidirectional, flags_obj.rnn_hidden_size,\n num_classes, flags_obj.use_bias)\n\n if mode == tf.estimator.ModeKeys.PREDICT:\n logits = model(features, training=False)\n predictions = {\n \"classes\": tf.argmax(logits, axis=2),\n \"probabilities\": tf.nn.softmax(logits),\n \"logits\": logits\n }\n return tf.estimator.EstimatorSpec(\n mode=mode,\n predictions=predictions)\n\n # In training mode.\n logits = model(features, training=True)\n probs = tf.nn.softmax(logits)\n ctc_input_length = compute_length_after_conv(\n tf.shape(features)[1], tf.shape(probs)[1], input_length)\n # Compute CTC loss\n loss = tf.reduce_mean(ctc_loss(\n label_length, ctc_input_length, labels, probs))\n\n optimizer = tf.train.AdamOptimizer(learning_rate=flags_obj.learning_rate)\n global_step = tf.train.get_or_create_global_step()\n minimize_op = optimizer.minimize(loss, global_step=global_step)\n update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)\n # Create the train_op that groups both minimize_ops and update_ops\n train_op = tf.group(minimize_op, update_ops)\n\n return tf.estimator.EstimatorSpec(\n mode=mode,\n loss=loss,\n train_op=train_op)\n\n\ndef generate_dataset(data_dir):\n \"\"\"Generate a speech dataset.\"\"\"\n audio_conf = dataset.AudioConfig(sample_rate=flags_obj.sample_rate,\n window_ms=flags_obj.window_ms,\n stride_ms=flags_obj.stride_ms,\n normalize=True)\n train_data_conf = dataset.DatasetConfig(\n audio_conf,\n data_dir,\n flags_obj.vocabulary_file,\n flags_obj.sortagrad\n )\n speech_dataset = dataset.DeepSpeechDataset(train_data_conf)\n return speech_dataset\n\n\ndef run_deep_speech(_):\n \"\"\"Run deep speech training and eval loop.\"\"\"\n tf.set_random_seed(flags_obj.seed)\n # Data preprocessing\n tf.logging.info(\"Data preprocessing...\")\n train_speech_dataset = generate_dataset(flags_obj.train_data_dir)\n eval_speech_dataset = generate_dataset(flags_obj.eval_data_dir)\n\n # Number of label classes. Label string is \"[a-z]' -\"\n num_classes = len(train_speech_dataset.speech_labels)\n\n # Use distribution strategy for multi-gpu training\n num_gpus = flags_core.get_num_gpus(flags_obj)\n distribution_strategy = distribution_utils.get_distribution_strategy(num_gpus=num_gpus)\n run_config = tf.estimator.RunConfig(\n train_distribute=distribution_strategy)\n\n estimator = tf.estimator.Estimator(\n model_fn=model_fn,\n model_dir=flags_obj.model_dir,\n config=run_config,\n params={\n \"num_classes\": num_classes,\n }\n )\n\n # Benchmark logging\n run_params = {\n \"batch_size\": flags_obj.batch_size,\n \"train_epochs\": flags_obj.train_epochs,\n \"rnn_hidden_size\": flags_obj.rnn_hidden_size,\n \"rnn_hidden_layers\": flags_obj.rnn_hidden_layers,\n \"rnn_type\": flags_obj.rnn_type,\n \"is_bidirectional\": flags_obj.is_bidirectional,\n \"use_bias\": flags_obj.use_bias\n }\n\n dataset_name = \"LibriSpeech\"\n benchmark_logger = logger.get_benchmark_logger()\n benchmark_logger.log_run_info(\"deep_speech\", dataset_name, run_params,\n test_id=flags_obj.benchmark_test_id)\n\n train_hooks = hooks_helper.get_train_hooks(\n flags_obj.hooks,\n model_dir=flags_obj.model_dir,\n batch_size=flags_obj.batch_size)\n\n per_replica_batch_size = distribution_utils.per_replica_batch_size(\n flags_obj.batch_size, num_gpus)\n\n def input_fn_train():\n return dataset.input_fn(\n per_replica_batch_size, train_speech_dataset)\n\n def input_fn_eval():\n return dataset.input_fn(\n per_replica_batch_size, eval_speech_dataset)\n\n total_training_cycle = (flags_obj.train_epochs //\n flags_obj.epochs_between_evals)\n for cycle_index in range(total_training_cycle):\n tf.logging.info(\"Starting a training cycle: %d/%d\",\n cycle_index + 1, total_training_cycle)\n\n # Perform batch_wise dataset shuffling\n train_speech_dataset.entries = dataset.batch_wise_dataset_shuffle(\n train_speech_dataset.entries, cycle_index, flags_obj.sortagrad,\n flags_obj.batch_size)\n\n estimator.train(input_fn=input_fn_train, hooks=train_hooks)\n\n # Evaluation\n tf.logging.info(\"Starting to evaluate...\")\n\n eval_results = evaluate_model(\n estimator, eval_speech_dataset.speech_labels,\n eval_speech_dataset.entries, input_fn_eval)\n\n # Log the WER and CER results.\n benchmark_logger.log_evaluation_result(eval_results)\n tf.logging.info(\n \"Iteration {}: WER = {:.2f}, CER = {:.2f}\".format(\n cycle_index + 1, eval_results[_WER_KEY], eval_results[_CER_KEY]))\n\n # If some evaluation threshold is met\n if model_helpers.past_stop_threshold(\n flags_obj.wer_threshold, eval_results[_WER_KEY]):\n break\n\n\ndef define_deep_speech_flags():\n \"\"\"Add flags for run_deep_speech.\"\"\"\n # Add common flags\n flags_core.define_base(\n data_dir=False, # we use train_data_dir and eval_data_dir instead\n export_dir=True,\n train_epochs=True,\n hooks=True,\n num_gpu=True,\n epochs_between_evals=True\n )\n flags_core.define_performance(\n num_parallel_calls=False,\n inter_op=False,\n intra_op=False,\n synthetic_data=False,\n max_train_steps=False,\n dtype=False\n )\n flags_core.define_benchmark()\n flags.adopt_module_key_flags(flags_core)\n\n flags_core.set_defaults(\n model_dir=\"/tmp/deep_speech_model/\",\n export_dir=\"/tmp/deep_speech_saved_model/\",\n train_epochs=10,\n batch_size=128,\n hooks=\"\")\n\n # Deep speech flags\n flags.DEFINE_integer(\n name=\"seed\", default=1,\n help=flags_core.help_wrap(\"The random seed.\"))\n\n flags.DEFINE_string(\n name=\"train_data_dir\",\n default=\"/tmp/librispeech_data/test-clean/LibriSpeech/test-clean.csv\",\n help=flags_core.help_wrap(\"The csv file path of train dataset.\"))\n\n flags.DEFINE_string(\n name=\"eval_data_dir\",\n default=\"/tmp/librispeech_data/test-clean/LibriSpeech/test-clean.csv\",\n help=flags_core.help_wrap(\"The csv file path of evaluation dataset.\"))\n\n flags.DEFINE_bool(\n name=\"sortagrad\", default=True,\n help=flags_core.help_wrap(\n \"If true, sort examples by audio length and perform no \"\n \"batch_wise shuffling for the first epoch.\"))\n\n flags.DEFINE_integer(\n name=\"sample_rate\", default=16000,\n help=flags_core.help_wrap(\"The sample rate for audio.\"))\n\n flags.DEFINE_integer(\n name=\"window_ms\", default=20,\n help=flags_core.help_wrap(\"The frame length for spectrogram.\"))\n\n flags.DEFINE_integer(\n name=\"stride_ms\", default=10,\n help=flags_core.help_wrap(\"The frame step.\"))\n\n flags.DEFINE_string(\n name=\"vocabulary_file\", default=_VOCABULARY_FILE,\n help=flags_core.help_wrap(\"The file path of vocabulary file.\"))\n\n # RNN related flags\n flags.DEFINE_integer(\n name=\"rnn_hidden_size\", default=800,\n help=flags_core.help_wrap(\"The hidden size of RNNs.\"))\n\n flags.DEFINE_integer(\n name=\"rnn_hidden_layers\", default=5,\n help=flags_core.help_wrap(\"The number of RNN layers.\"))\n\n flags.DEFINE_bool(\n name=\"use_bias\", default=True,\n help=flags_core.help_wrap(\"Use bias in the last fully-connected layer\"))\n\n flags.DEFINE_bool(\n name=\"is_bidirectional\", default=True,\n help=flags_core.help_wrap(\"If rnn unit is bidirectional\"))\n\n flags.DEFINE_enum(\n name=\"rnn_type\", default=\"gru\",\n enum_values=deep_speech_model.SUPPORTED_RNNS.keys(),\n case_sensitive=False,\n help=flags_core.help_wrap(\"Type of RNN cell.\"))\n\n # Training related flags\n flags.DEFINE_float(\n name=\"learning_rate\", default=5e-4,\n help=flags_core.help_wrap(\"The initial learning rate.\"))\n\n # Evaluation metrics threshold\n flags.DEFINE_float(\n name=\"wer_threshold\", default=None,\n help=flags_core.help_wrap(\n \"If passed, training will stop when the evaluation metric WER is \"\n \"greater than or equal to wer_threshold. For libri speech dataset \"\n \"the desired wer_threshold is 0.23 which is the result achieved by \"\n \"MLPerf implementation.\"))\n\n\ndef main(_):\n with logger.benchmark_context(flags_obj):\n run_deep_speech(flags_obj)\n\n\nif __name__ == \"__main__\":\n tf.logging.set_verbosity(tf.logging.INFO)\n define_deep_speech_flags()\n flags_obj = flags.FLAGS\n absl_app.run(main)\n\n",
"# Copyright 2018 The TensorFlow Authors All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\n\"\"\"A script to run training for sequential latent variable models.\n\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport tensorflow as tf\n\nfrom fivo import ghmm_runners\nfrom fivo import runners\n\n# Shared flags.\ntf.app.flags.DEFINE_enum(\"mode\", \"train\",\n [\"train\", \"eval\", \"sample\"],\n \"The mode of the binary.\")\ntf.app.flags.DEFINE_enum(\"model\", \"vrnn\",\n [\"vrnn\", \"ghmm\", \"srnn\"],\n \"Model choice.\")\ntf.app.flags.DEFINE_integer(\"latent_size\", 64,\n \"The size of the latent state of the model.\")\ntf.app.flags.DEFINE_enum(\"dataset_type\", \"pianoroll\",\n [\"pianoroll\", \"speech\", \"pose\"],\n \"The type of dataset.\")\ntf.app.flags.DEFINE_string(\"dataset_path\", \"\",\n \"Path to load the dataset from.\")\ntf.app.flags.DEFINE_integer(\"data_dimension\", None,\n \"The dimension of each vector in the data sequence. \"\n \"Defaults to 88 for pianoroll datasets and 200 for speech \"\n \"datasets. Should not need to be changed except for \"\n \"testing.\")\ntf.app.flags.DEFINE_integer(\"batch_size\", 4,\n \"Batch size.\")\ntf.app.flags.DEFINE_integer(\"num_samples\", 4,\n \"The number of samples (or particles) for multisample \"\n \"algorithms.\")\ntf.app.flags.DEFINE_string(\"logdir\", \"/tmp/smc_vi\",\n \"The directory to keep checkpoints and summaries in.\")\ntf.app.flags.DEFINE_integer(\"random_seed\", None,\n \"A random seed for seeding the TensorFlow graph.\")\ntf.app.flags.DEFINE_integer(\"parallel_iterations\", 30,\n \"The number of parallel iterations to use for the while \"\n \"loop that computes the bounds.\")\n\n# Training flags.\ntf.app.flags.DEFINE_enum(\"bound\", \"fivo\",\n [\"elbo\", \"iwae\", \"fivo\", \"fivo-aux\"],\n \"The bound to optimize.\")\ntf.app.flags.DEFINE_boolean(\"normalize_by_seq_len\", True,\n \"If true, normalize the loss by the number of timesteps \"\n \"per sequence.\")\ntf.app.flags.DEFINE_float(\"learning_rate\", 0.0002,\n \"The learning rate for ADAM.\")\ntf.app.flags.DEFINE_integer(\"max_steps\", int(1e9),\n \"The number of gradient update steps to train for.\")\ntf.app.flags.DEFINE_integer(\"summarize_every\", 50,\n \"The number of steps between summaries.\")\ntf.app.flags.DEFINE_enum(\"resampling_type\", \"multinomial\",\n [\"multinomial\", \"relaxed\"],\n \"The resampling strategy to use for training.\")\ntf.app.flags.DEFINE_float(\"relaxed_resampling_temperature\", 0.5,\n \"The relaxation temperature for relaxed resampling.\")\ntf.app.flags.DEFINE_enum(\"proposal_type\", \"filtering\",\n [\"prior\", \"filtering\", \"smoothing\",\n \"true-filtering\", \"true-smoothing\"],\n \"The type of proposal to use. true-filtering and true-smoothing \"\n \"are only available for the GHMM. The specific implementation \"\n \"of each proposal type is left to model-writers.\")\n\n# Distributed training flags.\ntf.app.flags.DEFINE_string(\"master\", \"\",\n \"The BNS name of the TensorFlow master to use.\")\ntf.app.flags.DEFINE_integer(\"task\", 0,\n \"Task id of the replica running the training.\")\ntf.app.flags.DEFINE_integer(\"ps_tasks\", 0,\n \"Number of tasks in the ps job. If 0 no ps job is used.\")\ntf.app.flags.DEFINE_boolean(\"stagger_workers\", True,\n \"If true, bring one worker online every 1000 steps.\")\n\n# Evaluation flags.\ntf.app.flags.DEFINE_enum(\"split\", \"train\",\n [\"train\", \"test\", \"valid\"],\n \"Split to evaluate the model on.\")\n\n# Sampling flags.\ntf.app.flags.DEFINE_integer(\"sample_length\", 50,\n \"The number of timesteps to sample for.\")\ntf.app.flags.DEFINE_integer(\"prefix_length\", 25,\n \"The number of timesteps to condition the model on \"\n \"before sampling.\")\ntf.app.flags.DEFINE_string(\"sample_out_dir\", None,\n \"The directory to write the samples to. \"\n \"Defaults to logdir.\")\n\n# GHMM flags.\ntf.app.flags.DEFINE_float(\"variance\", 0.1,\n \"The variance of the ghmm.\")\ntf.app.flags.DEFINE_integer(\"num_timesteps\", 5,\n \"The number of timesteps to run the gmp for.\")\nFLAGS = tf.app.flags.FLAGS\n\nPIANOROLL_DEFAULT_DATA_DIMENSION = 88\nSPEECH_DEFAULT_DATA_DIMENSION = 200\n\n\ndef main(unused_argv):\n tf.logging.set_verbosity(tf.logging.INFO)\n if FLAGS.model in [\"vrnn\", \"srnn\"]:\n if FLAGS.data_dimension is None:\n if FLAGS.dataset_type == \"pianoroll\":\n FLAGS.data_dimension = PIANOROLL_DEFAULT_DATA_DIMENSION\n elif FLAGS.dataset_type == \"speech\":\n FLAGS.data_dimension = SPEECH_DEFAULT_DATA_DIMENSION\n if FLAGS.mode == \"train\":\n runners.run_train(FLAGS)\n elif FLAGS.mode == \"eval\":\n runners.run_eval(FLAGS)\n elif FLAGS.mode == \"sample\":\n runners.run_sample(FLAGS)\n elif FLAGS.model == \"ghmm\":\n if FLAGS.mode == \"train\":\n ghmm_runners.run_train(FLAGS)\n elif FLAGS.mode == \"eval\":\n ghmm_runners.run_eval(FLAGS)\n\nif __name__ == \"__main__\":\n tf.app.run(main)\n",
"# Copyright 2017 The TensorFlow Authors All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\n\"\"\"The gradient of the icp op.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nfrom tensorflow.python.framework import ops\n\n\[email protected]('Icp')\ndef _icp_grad(op, grad_transform, grad_residual):\n \"\"\"The gradients for `icp`.\n\n Args:\n op: The `icp` `Operation` that we are differentiating, which we can use\n to find the inputs and outputs of the original op.\n grad_transform: Gradient with respect to `transform` output of the `icp` op.\n grad_residual: Gradient with respect to `residual` output of the\n `icp` op.\n\n Returns:\n Gradients with respect to the inputs of `icp`.\n \"\"\"\n unused_transform = op.outputs[0]\n unused_residual = op.outputs[1]\n unused_source = op.inputs[0]\n unused_ego_motion = op.inputs[1]\n unused_target = op.inputs[2]\n\n grad_p = -grad_residual\n grad_ego_motion = -grad_transform\n\n return [grad_p, grad_ego_motion, None]\n",
"# Copyright 2017 Google Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Task towers for PixelDA model.\"\"\"\nimport tensorflow as tf\n\nslim = tf.contrib.slim\n\n\ndef add_task_specific_model(images,\n hparams,\n num_classes=10,\n is_training=False,\n reuse_private=False,\n private_scope=None,\n reuse_shared=False,\n shared_scope=None):\n \"\"\"Create a classifier for the given images.\n\n The classifier is composed of a few 'private' layers followed by a few\n 'shared' layers. This lets us account for different image 'style', while\n sharing the last few layers as 'content' layers.\n\n Args:\n images: A `Tensor` of size [batch_size, height, width, 3].\n hparams: model hparams\n num_classes: The number of output classes.\n is_training: whether model is training\n reuse_private: Whether or not to reuse the private weights, which are the\n first few layers in the classifier\n private_scope: The name of the variable_scope for the private (unshared)\n components of the classifier.\n reuse_shared: Whether or not to reuse the shared weights, which are the last\n few layers in the classifier\n shared_scope: The name of the variable_scope for the shared components of\n the classifier.\n\n Returns:\n The logits, a `Tensor` of shape [batch_size, num_classes].\n\n Raises:\n ValueError: If hparams.task_classifier is an unknown value\n \"\"\"\n\n model = hparams.task_tower\n # Make sure the classifier name shows up in graph\n shared_scope = shared_scope or (model + '_shared')\n kwargs = {\n 'num_classes': num_classes,\n 'is_training': is_training,\n 'reuse_private': reuse_private,\n 'reuse_shared': reuse_shared,\n }\n\n if private_scope:\n kwargs['private_scope'] = private_scope\n if shared_scope:\n kwargs['shared_scope'] = shared_scope\n\n quaternion_pred = None\n with slim.arg_scope(\n [slim.conv2d, slim.fully_connected],\n activation_fn=tf.nn.relu,\n weights_regularizer=tf.contrib.layers.l2_regularizer(\n hparams.weight_decay_task_classifier)):\n with slim.arg_scope([slim.conv2d], padding='SAME'):\n if model == 'doubling_pose_estimator':\n logits, quaternion_pred = doubling_cnn_class_and_quaternion(\n images, num_private_layers=hparams.num_private_layers, **kwargs)\n elif model == 'mnist':\n logits, _ = mnist_classifier(images, **kwargs)\n elif model == 'svhn':\n logits, _ = svhn_classifier(images, **kwargs)\n elif model == 'gtsrb':\n logits, _ = gtsrb_classifier(images, **kwargs)\n elif model == 'pose_mini':\n logits, quaternion_pred = pose_mini_tower(images, **kwargs)\n else:\n raise ValueError('Unknown task classifier %s' % model)\n\n return logits, quaternion_pred\n\n\n#####################################\n# Classifiers used in the DSN paper #\n#####################################\n\n\ndef mnist_classifier(images,\n is_training=False,\n num_classes=10,\n reuse_private=False,\n private_scope='mnist',\n reuse_shared=False,\n shared_scope='task_model'):\n \"\"\"Creates the convolutional MNIST model from the gradient reversal paper.\n\n Note that since the output is a set of 'logits', the values fall in the\n interval of (-infinity, infinity). Consequently, to convert the outputs to a\n probability distribution over the characters, one will need to convert them\n using the softmax function:\n logits, endpoints = conv_mnist(images, is_training=False)\n predictions = tf.nn.softmax(logits)\n\n Args:\n images: the MNIST digits, a tensor of size [batch_size, 28, 28, 1].\n is_training: specifies whether or not we're currently training the model.\n This variable will determine the behaviour of the dropout layer.\n num_classes: the number of output classes to use.\n\n Returns:\n the output logits, a tensor of size [batch_size, num_classes].\n a dictionary with key/values the layer names and tensors.\n \"\"\"\n\n net = {}\n\n with tf.variable_scope(private_scope, reuse=reuse_private):\n net['conv1'] = slim.conv2d(images, 32, [5, 5], scope='conv1')\n net['pool1'] = slim.max_pool2d(net['conv1'], [2, 2], 2, scope='pool1')\n\n with tf.variable_scope(shared_scope, reuse=reuse_shared):\n net['conv2'] = slim.conv2d(net['pool1'], 48, [5, 5], scope='conv2')\n net['pool2'] = slim.max_pool2d(net['conv2'], [2, 2], 2, scope='pool2')\n net['fc3'] = slim.fully_connected(\n slim.flatten(net['pool2']), 100, scope='fc3')\n net['fc4'] = slim.fully_connected(\n slim.flatten(net['fc3']), 100, scope='fc4')\n logits = slim.fully_connected(\n net['fc4'], num_classes, activation_fn=None, scope='fc5')\n return logits, net\n\n\ndef svhn_classifier(images,\n is_training=False,\n num_classes=10,\n reuse_private=False,\n private_scope=None,\n reuse_shared=False,\n shared_scope='task_model'):\n \"\"\"Creates the convolutional SVHN model from the gradient reversal paper.\n\n Note that since the output is a set of 'logits', the values fall in the\n interval of (-infinity, infinity). Consequently, to convert the outputs to a\n probability distribution over the characters, one will need to convert them\n using the softmax function:\n logits = mnist.Mnist(images, is_training=False)\n predictions = tf.nn.softmax(logits)\n\n Args:\n images: the SVHN digits, a tensor of size [batch_size, 40, 40, 3].\n is_training: specifies whether or not we're currently training the model.\n This variable will determine the behaviour of the dropout layer.\n num_classes: the number of output classes to use.\n\n Returns:\n the output logits, a tensor of size [batch_size, num_classes].\n a dictionary with key/values the layer names and tensors.\n \"\"\"\n\n net = {}\n\n with tf.variable_scope(private_scope, reuse=reuse_private):\n net['conv1'] = slim.conv2d(images, 64, [5, 5], scope='conv1')\n net['pool1'] = slim.max_pool2d(net['conv1'], [3, 3], 2, scope='pool1')\n\n with tf.variable_scope(shared_scope, reuse=reuse_shared):\n net['conv2'] = slim.conv2d(net['pool1'], 64, [5, 5], scope='conv2')\n net['pool2'] = slim.max_pool2d(net['conv2'], [3, 3], 2, scope='pool2')\n net['conv3'] = slim.conv2d(net['pool2'], 128, [5, 5], scope='conv3')\n\n net['fc3'] = slim.fully_connected(\n slim.flatten(net['conv3']), 3072, scope='fc3')\n net['fc4'] = slim.fully_connected(\n slim.flatten(net['fc3']), 2048, scope='fc4')\n\n logits = slim.fully_connected(\n net['fc4'], num_classes, activation_fn=None, scope='fc5')\n\n return logits, net\n\n\ndef gtsrb_classifier(images,\n is_training=False,\n num_classes=43,\n reuse_private=False,\n private_scope='gtsrb',\n reuse_shared=False,\n shared_scope='task_model'):\n \"\"\"Creates the convolutional GTSRB model from the gradient reversal paper.\n\n Note that since the output is a set of 'logits', the values fall in the\n interval of (-infinity, infinity). Consequently, to convert the outputs to a\n probability distribution over the characters, one will need to convert them\n using the softmax function:\n logits = mnist.Mnist(images, is_training=False)\n predictions = tf.nn.softmax(logits)\n\n Args:\n images: the SVHN digits, a tensor of size [batch_size, 40, 40, 3].\n is_training: specifies whether or not we're currently training the model.\n This variable will determine the behaviour of the dropout layer.\n num_classes: the number of output classes to use.\n reuse_private: Whether or not to reuse the private components of the model.\n private_scope: The name of the private scope.\n reuse_shared: Whether or not to reuse the shared components of the model.\n shared_scope: The name of the shared scope.\n\n Returns:\n the output logits, a tensor of size [batch_size, num_classes].\n a dictionary with key/values the layer names and tensors.\n \"\"\"\n\n net = {}\n\n with tf.variable_scope(private_scope, reuse=reuse_private):\n net['conv1'] = slim.conv2d(images, 96, [5, 5], scope='conv1')\n net['pool1'] = slim.max_pool2d(net['conv1'], [2, 2], 2, scope='pool1')\n with tf.variable_scope(shared_scope, reuse=reuse_shared):\n net['conv2'] = slim.conv2d(net['pool1'], 144, [3, 3], scope='conv2')\n net['pool2'] = slim.max_pool2d(net['conv2'], [2, 2], 2, scope='pool2')\n net['conv3'] = slim.conv2d(net['pool2'], 256, [5, 5], scope='conv3')\n net['pool3'] = slim.max_pool2d(net['conv3'], [2, 2], 2, scope='pool3')\n\n net['fc3'] = slim.fully_connected(\n slim.flatten(net['pool3']), 512, scope='fc3')\n logits = slim.fully_connected(\n net['fc3'], num_classes, activation_fn=None, scope='fc4')\n\n return logits, net\n\n\n#########################\n# pose_mini task towers #\n#########################\n\n\ndef pose_mini_tower(images,\n num_classes=11,\n is_training=False,\n reuse_private=False,\n private_scope='pose_mini',\n reuse_shared=False,\n shared_scope='task_model'):\n \"\"\"Task tower for the pose_mini dataset.\"\"\"\n\n with tf.variable_scope(private_scope, reuse=reuse_private):\n net = slim.conv2d(images, 32, [5, 5], scope='conv1')\n net = slim.max_pool2d(net, [2, 2], stride=2, scope='pool1')\n with tf.variable_scope(shared_scope, reuse=reuse_shared):\n net = slim.conv2d(net, 64, [5, 5], scope='conv2')\n net = slim.max_pool2d(net, [2, 2], stride=2, scope='pool2')\n net = slim.flatten(net)\n\n net = slim.fully_connected(net, 128, scope='fc3')\n net = slim.dropout(net, 0.5, is_training=is_training, scope='dropout')\n with tf.variable_scope('quaternion_prediction'):\n quaternion_pred = slim.fully_connected(\n net, 4, activation_fn=tf.tanh, scope='fc_q')\n quaternion_pred = tf.nn.l2_normalize(quaternion_pred, 1)\n\n logits = slim.fully_connected(\n net, num_classes, activation_fn=None, scope='fc4')\n\n return logits, quaternion_pred\n\n\ndef doubling_cnn_class_and_quaternion(images,\n num_private_layers=1,\n num_classes=10,\n is_training=False,\n reuse_private=False,\n private_scope='doubling_cnn',\n reuse_shared=False,\n shared_scope='task_model'):\n \"\"\"Alternate conv, pool while doubling filter count.\"\"\"\n net = images\n depth = 32\n layer_id = 1\n\n with tf.variable_scope(private_scope, reuse=reuse_private):\n while num_private_layers > 0 and net.shape.as_list()[1] > 5:\n net = slim.conv2d(net, depth, [3, 3], scope='conv%s' % layer_id)\n net = slim.max_pool2d(net, [2, 2], stride=2, scope='pool%s' % layer_id)\n depth *= 2\n layer_id += 1\n num_private_layers -= 1\n\n with tf.variable_scope(shared_scope, reuse=reuse_shared):\n while net.shape.as_list()[1] > 5:\n net = slim.conv2d(net, depth, [3, 3], scope='conv%s' % layer_id)\n net = slim.max_pool2d(net, [2, 2], stride=2, scope='pool%s' % layer_id)\n depth *= 2\n layer_id += 1\n\n net = slim.flatten(net)\n net = slim.fully_connected(net, 100, scope='fc1')\n net = slim.dropout(net, 0.5, is_training=is_training, scope='dropout')\n quaternion_pred = slim.fully_connected(\n net, 4, activation_fn=tf.tanh, scope='fc_q')\n quaternion_pred = tf.nn.l2_normalize(quaternion_pred, 1)\n\n logits = slim.fully_connected(\n net, num_classes, activation_fn=None, scope='fc_logits')\n\n return logits, quaternion_pred\n",
"# Copyright 2017 The TensorFlow Authors All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\n\"\"\"Define flags are common for both train.py and eval.py scripts.\"\"\"\nimport sys\n\nfrom tensorflow.python.platform import flags\nimport logging\n\nimport datasets\nimport model\n\nFLAGS = flags.FLAGS\n\nlogging.basicConfig(\n level=logging.DEBUG,\n stream=sys.stderr,\n format='%(levelname)s '\n '%(asctime)s.%(msecs)06d: '\n '%(filename)s: '\n '%(lineno)d '\n '%(message)s',\n datefmt='%Y-%m-%d %H:%M:%S')\n\n\ndef define():\n \"\"\"Define common flags.\"\"\"\n # yapf: disable\n flags.DEFINE_integer('batch_size', 32,\n 'Batch size.')\n\n flags.DEFINE_integer('crop_width', None,\n 'Width of the central crop for images.')\n\n flags.DEFINE_integer('crop_height', None,\n 'Height of the central crop for images.')\n\n flags.DEFINE_string('train_log_dir', '/tmp/attention_ocr/train',\n 'Directory where to write event logs.')\n\n flags.DEFINE_string('dataset_name', 'fsns',\n 'Name of the dataset. Supported: fsns')\n\n flags.DEFINE_string('split_name', 'train',\n 'Dataset split name to run evaluation for: test,train.')\n\n flags.DEFINE_string('dataset_dir', None,\n 'Dataset root folder.')\n\n flags.DEFINE_string('checkpoint', '',\n 'Path for checkpoint to restore weights from.')\n\n flags.DEFINE_string('master',\n '',\n 'BNS name of the TensorFlow master to use.')\n\n # Model hyper parameters\n flags.DEFINE_float('learning_rate', 0.004,\n 'learning rate')\n\n flags.DEFINE_string('optimizer', 'momentum',\n 'the optimizer to use')\n\n flags.DEFINE_float('momentum', 0.9,\n 'momentum value for the momentum optimizer if used')\n\n flags.DEFINE_bool('use_augment_input', True,\n 'If True will use image augmentation')\n\n # Method hyper parameters\n # conv_tower_fn\n flags.DEFINE_string('final_endpoint', 'Mixed_5d',\n 'Endpoint to cut inception tower')\n\n # sequence_logit_fn\n flags.DEFINE_bool('use_attention', True,\n 'If True will use the attention mechanism')\n\n flags.DEFINE_bool('use_autoregression', True,\n 'If True will use autoregression (a feedback link)')\n\n flags.DEFINE_integer('num_lstm_units', 256,\n 'number of LSTM units for sequence LSTM')\n\n flags.DEFINE_float('weight_decay', 0.00004,\n 'weight decay for char prediction FC layers')\n\n flags.DEFINE_float('lstm_state_clip_value', 10.0,\n 'cell state is clipped by this value prior to the cell'\n ' output activation')\n\n # 'sequence_loss_fn'\n flags.DEFINE_float('label_smoothing', 0.1,\n 'weight for label smoothing')\n\n flags.DEFINE_bool('ignore_nulls', True,\n 'ignore null characters for computing the loss')\n\n flags.DEFINE_bool('average_across_timesteps', False,\n 'divide the returned cost by the total label weight')\n # yapf: enable\n\n\ndef get_crop_size():\n if FLAGS.crop_width and FLAGS.crop_height:\n return (FLAGS.crop_width, FLAGS.crop_height)\n else:\n return None\n\n\ndef create_dataset(split_name):\n ds_module = getattr(datasets, FLAGS.dataset_name)\n return ds_module.get_split(split_name, dataset_dir=FLAGS.dataset_dir)\n\n\ndef create_mparams():\n return {\n 'conv_tower_fn':\n model.ConvTowerParams(final_endpoint=FLAGS.final_endpoint),\n 'sequence_logit_fn':\n model.SequenceLogitsParams(\n use_attention=FLAGS.use_attention,\n use_autoregression=FLAGS.use_autoregression,\n num_lstm_units=FLAGS.num_lstm_units,\n weight_decay=FLAGS.weight_decay,\n lstm_state_clip_value=FLAGS.lstm_state_clip_value),\n 'sequence_loss_fn':\n model.SequenceLossParams(\n label_smoothing=FLAGS.label_smoothing,\n ignore_nulls=FLAGS.ignore_nulls,\n average_across_timesteps=FLAGS.average_across_timesteps)\n }\n\n\ndef create_model(*args, **kwargs):\n ocr_model = model.Model(mparams=create_mparams(), *args, **kwargs)\n return ocr_model\n",
"# Copyright 2017 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\n\"\"\"Functions to build DetectionModel training optimizers.\"\"\"\n\nimport tensorflow as tf\n\n\nfrom object_detection.utils import learning_schedules\n\n\ndef build_optimizers_tf_v1(optimizer_config, global_step=None):\n \"\"\"Create a TF v1 compatible optimizer based on config.\n\n Args:\n optimizer_config: A Optimizer proto message.\n global_step: A variable representing the current step.\n If None, defaults to tf.train.get_or_create_global_step()\n\n Returns:\n An optimizer and a list of variables for summary.\n\n Raises:\n ValueError: when using an unsupported input data type.\n \"\"\"\n optimizer_type = optimizer_config.WhichOneof('optimizer')\n optimizer = None\n\n summary_vars = []\n if optimizer_type == 'rms_prop_optimizer':\n config = optimizer_config.rms_prop_optimizer\n learning_rate = _create_learning_rate(config.learning_rate,\n global_step=global_step)\n summary_vars.append(learning_rate)\n optimizer = tf.train.RMSPropOptimizer(\n learning_rate,\n decay=config.decay,\n momentum=config.momentum_optimizer_value,\n epsilon=config.epsilon)\n\n if optimizer_type == 'momentum_optimizer':\n config = optimizer_config.momentum_optimizer\n learning_rate = _create_learning_rate(config.learning_rate,\n global_step=global_step)\n summary_vars.append(learning_rate)\n optimizer = tf.train.MomentumOptimizer(\n learning_rate,\n momentum=config.momentum_optimizer_value)\n\n if optimizer_type == 'adam_optimizer':\n config = optimizer_config.adam_optimizer\n learning_rate = _create_learning_rate(config.learning_rate,\n global_step=global_step)\n summary_vars.append(learning_rate)\n optimizer = tf.train.AdamOptimizer(learning_rate)\n\n\n if optimizer is None:\n raise ValueError('Optimizer %s not supported.' % optimizer_type)\n\n if optimizer_config.use_moving_average:\n optimizer = tf.contrib.opt.MovingAverageOptimizer(\n optimizer, average_decay=optimizer_config.moving_average_decay)\n\n return optimizer, summary_vars\n\n\ndef build_optimizers_tf_v2(optimizer_config, global_step=None):\n \"\"\"Create a TF v2 compatible optimizer based on config.\n\n Args:\n optimizer_config: A Optimizer proto message.\n global_step: A variable representing the current step.\n If None, defaults to tf.train.get_or_create_global_step()\n\n Returns:\n An optimizer and a list of variables for summary.\n\n Raises:\n ValueError: when using an unsupported input data type.\n \"\"\"\n optimizer_type = optimizer_config.WhichOneof('optimizer')\n optimizer = None\n\n summary_vars = []\n if optimizer_type == 'rms_prop_optimizer':\n config = optimizer_config.rms_prop_optimizer\n learning_rate = _create_learning_rate(config.learning_rate,\n global_step=global_step)\n summary_vars.append(learning_rate)\n optimizer = tf.keras.optimizers.RMSprop(\n learning_rate,\n decay=config.decay,\n momentum=config.momentum_optimizer_value,\n epsilon=config.epsilon)\n\n if optimizer_type == 'momentum_optimizer':\n config = optimizer_config.momentum_optimizer\n learning_rate = _create_learning_rate(config.learning_rate,\n global_step=global_step)\n summary_vars.append(learning_rate)\n optimizer = tf.keras.optimizers.SGD(\n learning_rate,\n momentum=config.momentum_optimizer_value)\n\n if optimizer_type == 'adam_optimizer':\n config = optimizer_config.adam_optimizer\n learning_rate = _create_learning_rate(config.learning_rate,\n global_step=global_step)\n summary_vars.append(learning_rate)\n optimizer = tf.keras.optimizers.Adam(learning_rate)\n\n if optimizer is None:\n raise ValueError('Optimizer %s not supported.' % optimizer_type)\n\n if optimizer_config.use_moving_average:\n raise ValueError('Moving average not supported in eager mode.')\n\n return optimizer, summary_vars\n\n\ndef build(config, global_step=None):\n\n if tf.executing_eagerly():\n return build_optimizers_tf_v2(config, global_step)\n else:\n return build_optimizers_tf_v1(config, global_step)\n\n\ndef _create_learning_rate(learning_rate_config, global_step=None):\n \"\"\"Create optimizer learning rate based on config.\n\n Args:\n learning_rate_config: A LearningRate proto message.\n global_step: A variable representing the current step.\n If None, defaults to tf.train.get_or_create_global_step()\n\n Returns:\n A learning rate.\n\n Raises:\n ValueError: when using an unsupported input data type.\n \"\"\"\n if global_step is None:\n global_step = tf.train.get_or_create_global_step()\n learning_rate = None\n learning_rate_type = learning_rate_config.WhichOneof('learning_rate')\n if learning_rate_type == 'constant_learning_rate':\n config = learning_rate_config.constant_learning_rate\n learning_rate = tf.constant(config.learning_rate, dtype=tf.float32,\n name='learning_rate')\n\n if learning_rate_type == 'exponential_decay_learning_rate':\n config = learning_rate_config.exponential_decay_learning_rate\n learning_rate = learning_schedules.exponential_decay_with_burnin(\n global_step,\n config.initial_learning_rate,\n config.decay_steps,\n config.decay_factor,\n burnin_learning_rate=config.burnin_learning_rate,\n burnin_steps=config.burnin_steps,\n min_learning_rate=config.min_learning_rate,\n staircase=config.staircase)\n\n if learning_rate_type == 'manual_step_learning_rate':\n config = learning_rate_config.manual_step_learning_rate\n if not config.schedule:\n raise ValueError('Empty learning rate schedule.')\n learning_rate_step_boundaries = [x.step for x in config.schedule]\n learning_rate_sequence = [config.initial_learning_rate]\n learning_rate_sequence += [x.learning_rate for x in config.schedule]\n learning_rate = learning_schedules.manual_stepping(\n global_step, learning_rate_step_boundaries,\n learning_rate_sequence, config.warmup)\n\n if learning_rate_type == 'cosine_decay_learning_rate':\n config = learning_rate_config.cosine_decay_learning_rate\n learning_rate = learning_schedules.cosine_decay_with_warmup(\n global_step,\n config.learning_rate_base,\n config.total_steps,\n config.warmup_learning_rate,\n config.warmup_steps,\n config.hold_base_rate_steps)\n\n if learning_rate is None:\n raise ValueError('Learning_rate %s not supported.' % learning_rate_type)\n\n return learning_rate\n",
"\n# Copyright 2018 The TensorFlow Authors All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\n\"\"\"Depth and Ego-Motion networks.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport numpy as np\nimport tensorflow as tf\nslim = tf.contrib.slim\n\nSIMPLE = 'simple'\nRESNET = 'resnet'\nARCHITECTURES = [SIMPLE, RESNET]\n\nSCALE_TRANSLATION = 0.001\nSCALE_ROTATION = 0.01\n\n# Disparity (inverse depth) values range from 0.01 to 10. Note that effectively,\n# this is undone if depth normalization is used, which scales the values to\n# have a mean of 1.\nDISP_SCALING = 10\nMIN_DISP = 0.01\nWEIGHT_DECAY_KEY = 'WEIGHT_DECAY'\nEGOMOTION_VEC_SIZE = 6\n\n\ndef egomotion_net(image_stack, disp_bottleneck_stack, joint_encoder, seq_length,\n weight_reg):\n \"\"\"Predict ego-motion vectors from a stack of frames or embeddings.\n\n Args:\n image_stack: Input tensor with shape [B, h, w, seq_length * 3] in order.\n disp_bottleneck_stack: Input tensor with shape [B, h_hidden, w_hidden,\n seq_length * c_hidden] in order.\n joint_encoder: Determines if the same encoder is used for computing the\n bottleneck layer of both the egomotion and the depth prediction\n network. If enabled, disp_bottleneck_stack is used as input, and the\n encoding steps are skipped. If disabled, a separate encoder is defined\n on image_stack.\n seq_length: The sequence length used.\n weight_reg: The amount of weight regularization.\n\n Returns:\n Egomotion vectors with shape [B, seq_length - 1, 6].\n \"\"\"\n num_egomotion_vecs = seq_length - 1\n with tf.variable_scope('pose_exp_net') as sc:\n end_points_collection = sc.original_name_scope + '_end_points'\n with slim.arg_scope([slim.conv2d, slim.conv2d_transpose],\n normalizer_fn=None,\n weights_regularizer=slim.l2_regularizer(weight_reg),\n normalizer_params=None,\n activation_fn=tf.nn.relu,\n outputs_collections=end_points_collection):\n if not joint_encoder:\n # Define separate encoder. If sharing, we can skip the encoding step,\n # as the bottleneck layer will already be passed as input.\n cnv1 = slim.conv2d(image_stack, 16, [7, 7], stride=2, scope='cnv1')\n cnv2 = slim.conv2d(cnv1, 32, [5, 5], stride=2, scope='cnv2')\n cnv3 = slim.conv2d(cnv2, 64, [3, 3], stride=2, scope='cnv3')\n cnv4 = slim.conv2d(cnv3, 128, [3, 3], stride=2, scope='cnv4')\n cnv5 = slim.conv2d(cnv4, 256, [3, 3], stride=2, scope='cnv5')\n\n with tf.variable_scope('pose'):\n inputs = disp_bottleneck_stack if joint_encoder else cnv5\n cnv6 = slim.conv2d(inputs, 256, [3, 3], stride=2, scope='cnv6')\n cnv7 = slim.conv2d(cnv6, 256, [3, 3], stride=2, scope='cnv7')\n pred_channels = EGOMOTION_VEC_SIZE * num_egomotion_vecs\n egomotion_pred = slim.conv2d(cnv7, pred_channels, [1, 1], scope='pred',\n stride=1, normalizer_fn=None,\n activation_fn=None)\n egomotion_avg = tf.reduce_mean(egomotion_pred, [1, 2])\n egomotion_res = tf.reshape(\n egomotion_avg, [-1, num_egomotion_vecs, EGOMOTION_VEC_SIZE])\n # Tinghui found that scaling by a small constant facilitates training.\n egomotion_scaled = tf.concat([egomotion_res[:, 0:3] * SCALE_TRANSLATION,\n egomotion_res[:, 3:6] * SCALE_ROTATION],\n axis=1)\n return egomotion_scaled\n\n\ndef objectmotion_net(image_stack, disp_bottleneck_stack, joint_encoder,\n seq_length, weight_reg):\n \"\"\"Predict object-motion vectors from a stack of frames or embeddings.\n\n Args:\n image_stack: Input tensor with shape [B, h, w, seq_length * 3] in order.\n disp_bottleneck_stack: Input tensor with shape [B, h_hidden, w_hidden,\n seq_length * c_hidden] in order.\n joint_encoder: Determines if the same encoder is used for computing the\n bottleneck layer of both the egomotion and the depth prediction\n network. If enabled, disp_bottleneck_stack is used as input, and the\n encoding steps are skipped. If disabled, a separate encoder is defined\n on image_stack.\n seq_length: The sequence length used.\n weight_reg: The amount of weight regularization.\n\n Returns:\n Egomotion vectors with shape [B, seq_length - 1, 6].\n \"\"\"\n num_egomotion_vecs = seq_length - 1\n with tf.variable_scope('pose_exp_net') as sc:\n end_points_collection = sc.original_name_scope + '_end_points'\n with slim.arg_scope([slim.conv2d, slim.conv2d_transpose],\n normalizer_fn=None,\n weights_regularizer=slim.l2_regularizer(weight_reg),\n normalizer_params=None,\n activation_fn=tf.nn.relu,\n outputs_collections=end_points_collection):\n if not joint_encoder:\n # Define separate encoder. If sharing, we can skip the encoding step,\n # as the bottleneck layer will already be passed as input.\n cnv1 = slim.conv2d(image_stack, 16, [7, 7], stride=2, scope='cnv1')\n cnv2 = slim.conv2d(cnv1, 32, [5, 5], stride=2, scope='cnv2')\n cnv3 = slim.conv2d(cnv2, 64, [3, 3], stride=2, scope='cnv3')\n cnv4 = slim.conv2d(cnv3, 128, [3, 3], stride=2, scope='cnv4')\n cnv5 = slim.conv2d(cnv4, 256, [3, 3], stride=2, scope='cnv5')\n\n with tf.variable_scope('pose'):\n inputs = disp_bottleneck_stack if joint_encoder else cnv5\n cnv6 = slim.conv2d(inputs, 256, [3, 3], stride=2, scope='cnv6')\n cnv7 = slim.conv2d(cnv6, 256, [3, 3], stride=2, scope='cnv7')\n pred_channels = EGOMOTION_VEC_SIZE * num_egomotion_vecs\n egomotion_pred = slim.conv2d(cnv7, pred_channels, [1, 1], scope='pred',\n stride=1, normalizer_fn=None,\n activation_fn=None)\n egomotion_avg = tf.reduce_mean(egomotion_pred, [1, 2])\n egomotion_res = tf.reshape(\n egomotion_avg, [-1, num_egomotion_vecs, EGOMOTION_VEC_SIZE])\n # Tinghui found that scaling by a small constant facilitates training.\n egomotion_scaled = tf.concat([egomotion_res[:, 0:3] * SCALE_TRANSLATION,\n egomotion_res[:, 3:6] * SCALE_ROTATION],\n axis=1)\n return egomotion_scaled\n\n\ndef disp_net(architecture, image, use_skip, weight_reg, is_training):\n \"\"\"Defines an encoder-decoder architecture for depth prediction.\"\"\"\n if architecture not in ARCHITECTURES:\n raise ValueError('Unknown architecture.')\n encoder_selected = encoder(architecture)\n decoder_selected = decoder(architecture)\n\n # Encode image.\n bottleneck, skip_connections = encoder_selected(image, weight_reg,\n is_training)\n # Decode to depth.\n multiscale_disps_i = decoder_selected(target_image=image,\n bottleneck=bottleneck,\n weight_reg=weight_reg,\n use_skip=use_skip,\n skip_connections=skip_connections)\n return multiscale_disps_i, bottleneck\n\n\ndef encoder(architecture):\n return encoder_resnet if architecture == RESNET else encoder_simple\n\n\ndef decoder(architecture):\n return decoder_resnet if architecture == RESNET else decoder_simple\n\n\ndef encoder_simple(target_image, weight_reg, is_training):\n \"\"\"Defines the old encoding architecture.\"\"\"\n del is_training\n with slim.arg_scope([slim.conv2d],\n normalizer_fn=None,\n normalizer_params=None,\n weights_regularizer=slim.l2_regularizer(weight_reg),\n activation_fn=tf.nn.relu):\n # Define (joint) encoder.\n cnv1 = slim.conv2d(target_image, 32, [7, 7], stride=2, scope='cnv1')\n cnv1b = slim.conv2d(cnv1, 32, [7, 7], stride=1, scope='cnv1b')\n cnv2 = slim.conv2d(cnv1b, 64, [5, 5], stride=2, scope='cnv2')\n cnv2b = slim.conv2d(cnv2, 64, [5, 5], stride=1, scope='cnv2b')\n cnv3 = slim.conv2d(cnv2b, 128, [3, 3], stride=2, scope='cnv3')\n cnv3b = slim.conv2d(cnv3, 128, [3, 3], stride=1, scope='cnv3b')\n cnv4 = slim.conv2d(cnv3b, 256, [3, 3], stride=2, scope='cnv4')\n cnv4b = slim.conv2d(cnv4, 256, [3, 3], stride=1, scope='cnv4b')\n cnv5 = slim.conv2d(cnv4b, 512, [3, 3], stride=2, scope='cnv5')\n cnv5b = slim.conv2d(cnv5, 512, [3, 3], stride=1, scope='cnv5b')\n cnv6 = slim.conv2d(cnv5b, 512, [3, 3], stride=2, scope='cnv6')\n cnv6b = slim.conv2d(cnv6, 512, [3, 3], stride=1, scope='cnv6b')\n cnv7 = slim.conv2d(cnv6b, 512, [3, 3], stride=2, scope='cnv7')\n cnv7b = slim.conv2d(cnv7, 512, [3, 3], stride=1, scope='cnv7b')\n return cnv7b, (cnv6b, cnv5b, cnv4b, cnv3b, cnv2b, cnv1b)\n\n\ndef decoder_simple(target_image, bottleneck, weight_reg, use_skip,\n skip_connections):\n \"\"\"Defines the old depth decoder architecture.\"\"\"\n h = target_image.get_shape()[1].value\n w = target_image.get_shape()[2].value\n (cnv6b, cnv5b, cnv4b, cnv3b, cnv2b, cnv1b) = skip_connections\n with slim.arg_scope([slim.conv2d, slim.conv2d_transpose],\n normalizer_fn=None,\n normalizer_params=None,\n weights_regularizer=slim.l2_regularizer(weight_reg),\n activation_fn=tf.nn.relu):\n up7 = slim.conv2d_transpose(bottleneck, 512, [3, 3], stride=2,\n scope='upcnv7')\n up7 = _resize_like(up7, cnv6b)\n if use_skip:\n i7_in = tf.concat([up7, cnv6b], axis=3)\n else:\n i7_in = up7\n icnv7 = slim.conv2d(i7_in, 512, [3, 3], stride=1, scope='icnv7')\n\n up6 = slim.conv2d_transpose(icnv7, 512, [3, 3], stride=2, scope='upcnv6')\n up6 = _resize_like(up6, cnv5b)\n if use_skip:\n i6_in = tf.concat([up6, cnv5b], axis=3)\n else:\n i6_in = up6\n icnv6 = slim.conv2d(i6_in, 512, [3, 3], stride=1, scope='icnv6')\n\n up5 = slim.conv2d_transpose(icnv6, 256, [3, 3], stride=2, scope='upcnv5')\n up5 = _resize_like(up5, cnv4b)\n if use_skip:\n i5_in = tf.concat([up5, cnv4b], axis=3)\n else:\n i5_in = up5\n icnv5 = slim.conv2d(i5_in, 256, [3, 3], stride=1, scope='icnv5')\n\n up4 = slim.conv2d_transpose(icnv5, 128, [3, 3], stride=2, scope='upcnv4')\n up4 = _resize_like(up4, cnv3b)\n if use_skip:\n i4_in = tf.concat([up4, cnv3b], axis=3)\n else:\n i4_in = up4\n icnv4 = slim.conv2d(i4_in, 128, [3, 3], stride=1, scope='icnv4')\n disp4 = (slim.conv2d(icnv4, 1, [3, 3], stride=1, activation_fn=tf.sigmoid,\n normalizer_fn=None, scope='disp4')\n * DISP_SCALING + MIN_DISP)\n disp4_up = tf.image.resize_bilinear(disp4, [np.int(h / 4), np.int(w / 4)],\n align_corners=True)\n\n up3 = slim.conv2d_transpose(icnv4, 64, [3, 3], stride=2, scope='upcnv3')\n up3 = _resize_like(up3, cnv2b)\n if use_skip:\n i3_in = tf.concat([up3, cnv2b, disp4_up], axis=3)\n else:\n i3_in = tf.concat([up3, disp4_up])\n icnv3 = slim.conv2d(i3_in, 64, [3, 3], stride=1, scope='icnv3')\n disp3 = (slim.conv2d(icnv3, 1, [3, 3], stride=1, activation_fn=tf.sigmoid,\n normalizer_fn=None, scope='disp3')\n * DISP_SCALING + MIN_DISP)\n disp3_up = tf.image.resize_bilinear(disp3, [np.int(h / 2), np.int(w / 2)],\n align_corners=True)\n\n up2 = slim.conv2d_transpose(icnv3, 32, [3, 3], stride=2, scope='upcnv2')\n up2 = _resize_like(up2, cnv1b)\n if use_skip:\n i2_in = tf.concat([up2, cnv1b, disp3_up], axis=3)\n else:\n i2_in = tf.concat([up2, disp3_up])\n icnv2 = slim.conv2d(i2_in, 32, [3, 3], stride=1, scope='icnv2')\n disp2 = (slim.conv2d(icnv2, 1, [3, 3], stride=1, activation_fn=tf.sigmoid,\n normalizer_fn=None, scope='disp2')\n * DISP_SCALING + MIN_DISP)\n disp2_up = tf.image.resize_bilinear(disp2, [h, w], align_corners=True)\n\n up1 = slim.conv2d_transpose(icnv2, 16, [3, 3], stride=2, scope='upcnv1')\n i1_in = tf.concat([up1, disp2_up], axis=3)\n icnv1 = slim.conv2d(i1_in, 16, [3, 3], stride=1, scope='icnv1')\n disp1 = (slim.conv2d(icnv1, 1, [3, 3], stride=1, activation_fn=tf.sigmoid,\n normalizer_fn=None, scope='disp1')\n * DISP_SCALING + MIN_DISP)\n return [disp1, disp2, disp3, disp4]\n\n\ndef encoder_resnet(target_image, weight_reg, is_training):\n \"\"\"Defines a ResNet18-based encoding architecture.\n\n This implementation follows Juyong Kim's implementation of ResNet18 on GitHub:\n https://github.com/dalgu90/resnet-18-tensorflow\n\n Args:\n target_image: Input tensor with shape [B, h, w, 3] to encode.\n weight_reg: Parameter ignored.\n is_training: Whether the model is being trained or not.\n\n Returns:\n Tuple of tensors, with the first being the bottleneck layer as tensor of\n size [B, h_hid, w_hid, c_hid], and others being intermediate layers\n for building skip-connections.\n \"\"\"\n del weight_reg\n encoder_filters = [64, 64, 128, 256, 512]\n stride = 2\n\n # conv1\n with tf.variable_scope('conv1'):\n x = _conv(target_image, 7, encoder_filters[0], stride)\n x = _bn(x, is_train=is_training)\n econv1 = _relu(x)\n x = tf.nn.max_pool(econv1, [1, 3, 3, 1], [1, 2, 2, 1], 'SAME')\n\n # conv2_x\n x = _residual_block(x, is_training, name='conv2_1')\n econv2 = _residual_block(x, is_training, name='conv2_2')\n\n # conv3_x\n x = _residual_block_first(econv2, is_training, encoder_filters[2], stride,\n name='conv3_1')\n econv3 = _residual_block(x, is_training, name='conv3_2')\n\n # conv4_x\n x = _residual_block_first(econv3, is_training, encoder_filters[3], stride,\n name='conv4_1')\n econv4 = _residual_block(x, is_training, name='conv4_2')\n\n # conv5_x\n x = _residual_block_first(econv4, is_training, encoder_filters[4], stride,\n name='conv5_1')\n econv5 = _residual_block(x, is_training, name='conv5_2')\n return econv5, (econv4, econv3, econv2, econv1)\n\n\ndef decoder_resnet(target_image, bottleneck, weight_reg, use_skip,\n skip_connections):\n \"\"\"Defines the depth decoder architecture.\n\n Args:\n target_image: The original encoder input tensor with shape [B, h, w, 3].\n Just the shape information is used here.\n bottleneck: Bottleneck layer to be decoded.\n weight_reg: The amount of weight regularization.\n use_skip: Whether the passed skip connections econv1, econv2, econv3 and\n econv4 should be used.\n skip_connections: Tensors for building skip-connections.\n\n Returns:\n Disparities at 4 different scales.\n \"\"\"\n (econv4, econv3, econv2, econv1) = skip_connections\n decoder_filters = [16, 32, 64, 128, 256]\n default_pad = tf.constant([[0, 0], [1, 1], [1, 1], [0, 0]])\n reg = slim.l2_regularizer(weight_reg) if weight_reg > 0.0 else None\n with slim.arg_scope([slim.conv2d, slim.conv2d_transpose],\n normalizer_fn=None,\n normalizer_params=None,\n activation_fn=tf.nn.relu,\n weights_regularizer=reg):\n upconv5 = slim.conv2d_transpose(bottleneck, decoder_filters[4], [3, 3],\n stride=2, scope='upconv5')\n upconv5 = _resize_like(upconv5, econv4)\n if use_skip:\n i5_in = tf.concat([upconv5, econv4], axis=3)\n else:\n i5_in = upconv5\n i5_in = tf.pad(i5_in, default_pad, mode='REFLECT')\n iconv5 = slim.conv2d(i5_in, decoder_filters[4], [3, 3], stride=1,\n scope='iconv5', padding='VALID')\n\n upconv4 = slim.conv2d_transpose(iconv5, decoder_filters[3], [3, 3],\n stride=2, scope='upconv4')\n upconv4 = _resize_like(upconv4, econv3)\n if use_skip:\n i4_in = tf.concat([upconv4, econv3], axis=3)\n else:\n i4_in = upconv4\n i4_in = tf.pad(i4_in, default_pad, mode='REFLECT')\n iconv4 = slim.conv2d(i4_in, decoder_filters[3], [3, 3], stride=1,\n scope='iconv4', padding='VALID')\n\n disp4_input = tf.pad(iconv4, default_pad, mode='REFLECT')\n disp4 = (slim.conv2d(disp4_input, 1, [3, 3], stride=1,\n activation_fn=tf.sigmoid, normalizer_fn=None,\n scope='disp4', padding='VALID')\n * DISP_SCALING + MIN_DISP)\n\n upconv3 = slim.conv2d_transpose(iconv4, decoder_filters[2], [3, 3],\n stride=2, scope='upconv3')\n upconv3 = _resize_like(upconv3, econv2)\n if use_skip:\n i3_in = tf.concat([upconv3, econv2], axis=3)\n else:\n i3_in = upconv3\n i3_in = tf.pad(i3_in, default_pad, mode='REFLECT')\n iconv3 = slim.conv2d(i3_in, decoder_filters[2], [3, 3], stride=1,\n scope='iconv3', padding='VALID')\n disp3_input = tf.pad(iconv3, default_pad, mode='REFLECT')\n disp3 = (slim.conv2d(disp3_input, 1, [3, 3], stride=1,\n activation_fn=tf.sigmoid, normalizer_fn=None,\n scope='disp3', padding='VALID')\n * DISP_SCALING + MIN_DISP)\n\n upconv2 = slim.conv2d_transpose(iconv3, decoder_filters[1], [3, 3],\n stride=2, scope='upconv2')\n upconv2 = _resize_like(upconv2, econv1)\n if use_skip:\n i2_in = tf.concat([upconv2, econv1], axis=3)\n else:\n i2_in = upconv2\n i2_in = tf.pad(i2_in, default_pad, mode='REFLECT')\n iconv2 = slim.conv2d(i2_in, decoder_filters[1], [3, 3], stride=1,\n scope='iconv2', padding='VALID')\n disp2_input = tf.pad(iconv2, default_pad, mode='REFLECT')\n disp2 = (slim.conv2d(disp2_input, 1, [3, 3], stride=1,\n activation_fn=tf.sigmoid, normalizer_fn=None,\n scope='disp2', padding='VALID')\n * DISP_SCALING + MIN_DISP)\n\n upconv1 = slim.conv2d_transpose(iconv2, decoder_filters[0], [3, 3],\n stride=2, scope='upconv1')\n upconv1 = _resize_like(upconv1, target_image)\n upconv1 = tf.pad(upconv1, default_pad, mode='REFLECT')\n iconv1 = slim.conv2d(upconv1, decoder_filters[0], [3, 3], stride=1,\n scope='iconv1', padding='VALID')\n disp1_input = tf.pad(iconv1, default_pad, mode='REFLECT')\n disp1 = (slim.conv2d(disp1_input, 1, [3, 3], stride=1,\n activation_fn=tf.sigmoid, normalizer_fn=None,\n scope='disp1', padding='VALID')\n * DISP_SCALING + MIN_DISP)\n\n return [disp1, disp2, disp3, disp4]\n\n\ndef _residual_block_first(x, is_training, out_channel, strides, name='unit'):\n \"\"\"Helper function for defining ResNet architecture.\"\"\"\n in_channel = x.get_shape().as_list()[-1]\n with tf.variable_scope(name):\n # Shortcut connection\n if in_channel == out_channel:\n if strides == 1:\n shortcut = tf.identity(x)\n else:\n shortcut = tf.nn.max_pool(x, [1, strides, strides, 1],\n [1, strides, strides, 1], 'VALID')\n else:\n shortcut = _conv(x, 1, out_channel, strides, name='shortcut')\n # Residual\n x = _conv(x, 3, out_channel, strides, name='conv_1')\n x = _bn(x, is_train=is_training, name='bn_1')\n x = _relu(x, name='relu_1')\n x = _conv(x, 3, out_channel, 1, name='conv_2')\n x = _bn(x, is_train=is_training, name='bn_2')\n # Merge\n x = x + shortcut\n x = _relu(x, name='relu_2')\n return x\n\n\ndef _residual_block(x, is_training, input_q=None, output_q=None, name='unit'):\n \"\"\"Helper function for defining ResNet architecture.\"\"\"\n num_channel = x.get_shape().as_list()[-1]\n with tf.variable_scope(name):\n shortcut = x # Shortcut connection\n # Residual\n x = _conv(x, 3, num_channel, 1, input_q=input_q, output_q=output_q,\n name='conv_1')\n x = _bn(x, is_train=is_training, name='bn_1')\n x = _relu(x, name='relu_1')\n x = _conv(x, 3, num_channel, 1, input_q=output_q, output_q=output_q,\n name='conv_2')\n x = _bn(x, is_train=is_training, name='bn_2')\n # Merge\n x = x + shortcut\n x = _relu(x, name='relu_2')\n return x\n\n\ndef _conv(x, filter_size, out_channel, stride, pad='SAME', input_q=None,\n output_q=None, name='conv'):\n \"\"\"Helper function for defining ResNet architecture.\"\"\"\n if (input_q is None) ^ (output_q is None):\n raise ValueError('Input/Output splits are not correctly given.')\n\n in_shape = x.get_shape()\n with tf.variable_scope(name):\n # Main operation: conv2d\n with tf.device('/CPU:0'):\n kernel = tf.get_variable(\n 'kernel', [filter_size, filter_size, in_shape[3], out_channel],\n tf.float32, initializer=tf.random_normal_initializer(\n stddev=np.sqrt(2.0/filter_size/filter_size/out_channel)))\n if kernel not in tf.get_collection(WEIGHT_DECAY_KEY):\n tf.add_to_collection(WEIGHT_DECAY_KEY, kernel)\n conv = tf.nn.conv2d(x, kernel, [1, stride, stride, 1], pad)\n return conv\n\n\ndef _bn(x, is_train, name='bn'):\n \"\"\"Helper function for defining ResNet architecture.\"\"\"\n bn = tf.layers.batch_normalization(x, training=is_train, name=name)\n return bn\n\n\ndef _relu(x, name=None, leakness=0.0):\n \"\"\"Helper function for defining ResNet architecture.\"\"\"\n if leakness > 0.0:\n name = 'lrelu' if name is None else name\n return tf.maximum(x, x*leakness, name='lrelu')\n else:\n name = 'relu' if name is None else name\n return tf.nn.relu(x, name='relu')\n\n\ndef _resize_like(inputs, ref):\n i_h, i_w = inputs.get_shape()[1], inputs.get_shape()[2]\n r_h, r_w = ref.get_shape()[1], ref.get_shape()[2]\n if i_h == r_h and i_w == r_w:\n return inputs\n else:\n # TODO(casser): Other interpolation methods could be explored here.\n return tf.image.resize_bilinear(inputs, [r_h.value, r_w.value],\n align_corners=True)\n",
"# Copyright 2018 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\n\"\"\"SSDFeatureExtractor for MobilenetV2 features.\"\"\"\n\nimport tensorflow as tf\n\nfrom object_detection.meta_architectures import ssd_meta_arch\nfrom object_detection.models import feature_map_generators\nfrom object_detection.models.keras_models import mobilenet_v2\nfrom object_detection.utils import ops\nfrom object_detection.utils import shape_utils\n\n\nclass SSDMobileNetV2KerasFeatureExtractor(\n ssd_meta_arch.SSDKerasFeatureExtractor):\n \"\"\"SSD Feature Extractor using MobilenetV2 features.\"\"\"\n\n def __init__(self,\n is_training,\n depth_multiplier,\n min_depth,\n pad_to_multiple,\n conv_hyperparams,\n freeze_batchnorm,\n inplace_batchnorm_update,\n use_explicit_padding=False,\n use_depthwise=False,\n num_layers=6,\n override_base_feature_extractor_hyperparams=False,\n name=None):\n \"\"\"MobileNetV2 Feature Extractor for SSD Models.\n\n Mobilenet v2 (experimental), designed by sandler@. More details can be found\n in //knowledge/cerebra/brain/compression/mobilenet/mobilenet_experimental.py\n\n Args:\n is_training: whether the network is in training mode.\n depth_multiplier: float depth multiplier for feature extractor (Functions\n as a width multiplier for the mobilenet_v2 network itself).\n min_depth: minimum feature extractor depth.\n pad_to_multiple: the nearest multiple to zero pad the input height and\n width dimensions to.\n conv_hyperparams: `hyperparams_builder.KerasLayerHyperparams` object\n containing convolution hyperparameters for the layers added on top of\n the base feature extractor.\n freeze_batchnorm: Whether to freeze batch norm parameters during\n training or not. When training with a small batch size (e.g. 1), it is\n desirable to freeze batch norm update and use pretrained batch norm\n params.\n inplace_batchnorm_update: Whether to update batch norm moving average\n values inplace. When this is false train op must add a control\n dependency on tf.graphkeys.UPDATE_OPS collection in order to update\n batch norm statistics.\n use_explicit_padding: Whether to use explicit padding when extracting\n features. Default is False.\n use_depthwise: Whether to use depthwise convolutions. Default is False.\n num_layers: Number of SSD layers.\n override_base_feature_extractor_hyperparams: Whether to override\n hyperparameters of the base feature extractor with the one from\n `conv_hyperparams_fn`.\n name: A string name scope to assign to the model. If 'None', Keras\n will auto-generate one from the class name.\n \"\"\"\n super(SSDMobileNetV2KerasFeatureExtractor, self).__init__(\n is_training=is_training,\n depth_multiplier=depth_multiplier,\n min_depth=min_depth,\n pad_to_multiple=pad_to_multiple,\n conv_hyperparams=conv_hyperparams,\n freeze_batchnorm=freeze_batchnorm,\n inplace_batchnorm_update=inplace_batchnorm_update,\n use_explicit_padding=use_explicit_padding,\n use_depthwise=use_depthwise,\n num_layers=num_layers,\n override_base_feature_extractor_hyperparams=\n override_base_feature_extractor_hyperparams,\n name=name)\n self._feature_map_layout = {\n 'from_layer': ['layer_15/expansion_output', 'layer_19', '', '', '', ''\n ][:self._num_layers],\n 'layer_depth': [-1, -1, 512, 256, 256, 128][:self._num_layers],\n 'use_depthwise': self._use_depthwise,\n 'use_explicit_padding': self._use_explicit_padding,\n }\n\n self.mobilenet_v2 = None\n self.feature_map_generator = None\n\n def build(self, input_shape):\n full_mobilenet_v2 = mobilenet_v2.mobilenet_v2(\n batchnorm_training=(self._is_training and not self._freeze_batchnorm),\n conv_hyperparams=(self._conv_hyperparams\n if self._override_base_feature_extractor_hyperparams\n else None),\n weights=None,\n use_explicit_padding=self._use_explicit_padding,\n alpha=self._depth_multiplier,\n min_depth=self._min_depth,\n include_top=False)\n conv2d_11_pointwise = full_mobilenet_v2.get_layer(\n name='block_13_expand_relu').output\n conv2d_13_pointwise = full_mobilenet_v2.get_layer(name='out_relu').output\n self.mobilenet_v2 = tf.keras.Model(\n inputs=full_mobilenet_v2.inputs,\n outputs=[conv2d_11_pointwise, conv2d_13_pointwise])\n self.feature_map_generator = (\n feature_map_generators.KerasMultiResolutionFeatureMaps(\n feature_map_layout=self._feature_map_layout,\n depth_multiplier=self._depth_multiplier,\n min_depth=self._min_depth,\n insert_1x1_conv=True,\n is_training=self._is_training,\n conv_hyperparams=self._conv_hyperparams,\n freeze_batchnorm=self._freeze_batchnorm,\n name='FeatureMaps'))\n self.built = True\n\n def preprocess(self, resized_inputs):\n \"\"\"SSD preprocessing.\n\n Maps pixel values to the range [-1, 1].\n\n Args:\n resized_inputs: a [batch, height, width, channels] float tensor\n representing a batch of images.\n\n Returns:\n preprocessed_inputs: a [batch, height, width, channels] float tensor\n representing a batch of images.\n \"\"\"\n return (2.0 / 255.0) * resized_inputs - 1.0\n\n def _extract_features(self, preprocessed_inputs):\n \"\"\"Extract features from preprocessed inputs.\n\n Args:\n preprocessed_inputs: a [batch, height, width, channels] float tensor\n representing a batch of images.\n\n Returns:\n feature_maps: a list of tensors where the ith tensor has shape\n [batch, height_i, width_i, depth_i]\n \"\"\"\n preprocessed_inputs = shape_utils.check_min_image_dim(\n 33, preprocessed_inputs)\n\n image_features = self.mobilenet_v2(\n ops.pad_to_multiple(preprocessed_inputs, self._pad_to_multiple))\n\n feature_maps = self.feature_map_generator({\n 'layer_15/expansion_output': image_features[0],\n 'layer_19': image_features[1]})\n\n return feature_maps.values()\n"
] | [
[
"tensorflow.saved_model.loader.load"
],
[
"tensorflow.convert_to_tensor",
"tensorflow.keras.callbacks.ModelCheckpoint",
"tensorflow.train.latest_checkpoint",
"tensorflow.range",
"tensorflow.io.gfile.exists",
"tensorflow.train.Checkpoint",
"tensorflow.compat.v2.summary.create_file_writer",
"tensorflow.io.gfile.makedirs",
"tensorflow.compat.v2.summary.create_noop_writer",
"tensorflow.keras.optimizers.Adam",
"tensorflow.name_scope",
"tensorflow.keras.metrics.Mean",
"tensorflow.GradientTape"
],
[
"tensorflow.compat.v1.not_equal",
"tensorflow.compat.v1.metrics.mean",
"tensorflow.compat.v1.equal",
"tensorflow.compat.v1.py_func",
"tensorflow.compat.v1.shape",
"numpy.mean",
"tensorflow.compat.v1.nn.softmax_cross_entropy_with_logits_v2",
"tensorflow.compat.v1.constant",
"tensorflow.compat.v1.to_int32",
"tensorflow.compat.v1.nn.top_k",
"tensorflow.compat.v1.reduce_sum",
"tensorflow.compat.v1.maximum",
"numpy.float32",
"tensorflow.compat.v1.zeros_like",
"tensorflow.compat.v1.variable_scope",
"tensorflow.compat.v1.name_scope",
"tensorflow.compat.v1.cast",
"tensorflow.compat.v1.log",
"tensorflow.compat.v1.expand_dims",
"tensorflow.compat.v1.argmax",
"tensorflow.compat.v1.to_float",
"tensorflow.compat.v1.pad"
],
[
"numpy.clip",
"tensorflow.gfile.Open",
"matplotlib.use",
"tensorflow.global_variables",
"matplotlib.pyplot.get_cmap",
"numpy.percentile",
"numpy.delete",
"tensorflow.trainable_variables",
"numpy.array",
"tensorflow.contrib.framework.list_variables"
],
[
"tensorflow.Graph",
"tensorflow.gfile.DeleteRecursively",
"tensorflow.gfile.Exists",
"tensorflow.gfile.GFile",
"tensorflow.placeholder",
"tensorflow.python_io.TFRecordWriter",
"tensorflow.gfile.MakeDirs",
"tensorflow.gfile.Remove",
"tensorflow.Session",
"tensorflow.image.decode_jpeg"
],
[
"tensorflow.variable_scope",
"tensorflow.concat"
],
[
"tensorflow.compat.v2.nest.map_structure",
"tensorflow.compat.v2.Variable",
"tensorflow.compat.v2.reduce_mean",
"tensorflow.compat.v2.GradientTape"
],
[
"tensorflow.distribute.experimental.TPUStrategy",
"numpy.argmax",
"numpy.shape",
"numpy.equal",
"tensorflow.keras.metrics.SparseCategoricalAccuracy",
"numpy.array",
"tensorflow.distribute.MirroredStrategy"
],
[
"tensorflow.import_graph_def",
"tensorflow.train.latest_checkpoint",
"tensorflow.gfile.FastGFile",
"tensorflow.train.SaverDef",
"numpy.linalg.norm",
"tensorflow.logging.info",
"tensorflow.train.Saver",
"tensorflow.gfile.IsDirectory",
"numpy.array",
"numpy.zeros",
"tensorflow.GraphDef"
],
[
"tensorflow.io.gfile.GFile",
"tensorflow.test.main"
],
[
"tensorflow.io.TFRecordWriter",
"tensorflow.io.gfile.GFile",
"tensorflow.io.gfile.glob",
"tensorflow.train.Features"
],
[
"tensorflow.trainable_variables"
],
[
"tensorflow.global_variables_initializer",
"tensorflow.placeholder",
"tensorflow.test.main",
"tensorflow.zeros"
],
[
"numpy.random.rand",
"tensorflow.random_uniform",
"tensorflow.test.main"
],
[
"tensorflow.test.main",
"tensorflow.test.get_temp_dir"
],
[
"tensorflow.python.platform.app.run",
"numpy.expand_dims",
"numpy.all",
"numpy.concatenate",
"numpy.max",
"tensorflow.python.platform.flags.DEFINE_integer",
"numpy.zeros_like",
"numpy.any",
"tensorflow.python.platform.flags.DEFINE_string",
"tensorflow.python.platform.gfile.Remove",
"numpy.where",
"tensorflow.contrib.slim.evaluation.wait_for_new_checkpoint",
"numpy.reshape",
"numpy.arange",
"matplotlib.gridspec.GridSpec",
"matplotlib.pyplot.close",
"matplotlib.pyplot.figure",
"numpy.min",
"tensorflow.python.platform.gfile.Copy",
"matplotlib.pyplot.savefig",
"matplotlib.animation.FuncAnimation",
"numpy.random.RandomState",
"numpy.array",
"matplotlib.use",
"tensorflow.python.platform.flags.DEFINE_bool",
"numpy.sort",
"matplotlib.pyplot.set_cmap",
"matplotlib.pyplot.setp",
"numpy.mod"
],
[
"numpy.square",
"numpy.ones_like",
"numpy.sqrt",
"tensorflow.as_dtype",
"numpy.abs",
"numpy.ones",
"numpy.round",
"numpy.random.uniform",
"numpy.array",
"tensorflow.tile"
],
[
"tensorflow.variable_scope"
],
[
"tensorflow.cond",
"tensorflow.reduce_max",
"tensorflow.shape",
"tensorflow.reduce_mean",
"tensorflow.reduce_sum",
"tensorflow.zeros",
"tensorflow.reshape",
"tensorflow.equal",
"tensorflow.stop_gradient",
"tensorflow.is_inf",
"tensorflow.zeros_like",
"tensorflow.reduce_logsumexp",
"tensorflow.log",
"tensorflow.square",
"tensorflow.to_float",
"tensorflow.tile"
],
[
"tensorflow.Graph",
"tensorflow.transpose",
"tensorflow.compat.v1.global_variables",
"tensorflow.random.uniform",
"tensorflow.test.main",
"tensorflow.compat.v1.global_variables_initializer",
"tensorflow.compat.v1.placeholder",
"numpy.random.uniform",
"tensorflow.argmax",
"tensorflow.compat.v1.reset_default_graph"
],
[
"tensorflow.test.main",
"tensorflow.gfile.Open"
],
[
"numpy.abs",
"numpy.ones",
"numpy.max",
"numpy.argpartition",
"numpy.zeros",
"numpy.sum"
],
[
"tensorflow.concat",
"tensorflow.zeros",
"tensorflow.stack",
"tensorflow.cast",
"tensorflow.to_int32",
"tensorflow.linspace",
"tensorflow.add_n",
"tensorflow.floor",
"tensorflow.gather",
"tensorflow.to_float",
"tensorflow.matmul",
"tensorflow.shape",
"tensorflow.less_equal",
"tensorflow.clip_by_value",
"tensorflow.transpose",
"tensorflow.range",
"tensorflow.slice",
"tensorflow.reshape",
"tensorflow.ones_like",
"tensorflow.expand_dims",
"tensorflow.variable_scope",
"tensorflow.greater_equal"
],
[
"tensorflow.constant",
"tensorflow.test.main"
],
[
"tensorflow.Graph",
"tensorflow.random_uniform",
"tensorflow.test.main"
],
[
"numpy.dtype"
],
[
"numpy.split",
"tensorflow.zeros",
"tensorflow.reduce_sum",
"tensorflow.stack",
"tensorflow.train.ExponentialMovingAverage",
"tensorflow.train.AdamOptimizer",
"tensorflow.where",
"tensorflow.group",
"tensorflow.add_n",
"tensorflow.Variable",
"numpy.clip",
"tensorflow.get_collection",
"tensorflow.check_numerics",
"tensorflow.gradients",
"tensorflow.squeeze",
"tensorflow.stop_gradient",
"scipy.misc.logsumexp",
"tensorflow.to_float",
"tensorflow.square",
"tensorflow.tile",
"numpy.log",
"tensorflow.nn.sigmoid",
"tensorflow.contrib.slim.arg_scope",
"tensorflow.shape",
"tensorflow.exp",
"tensorflow.placeholder",
"tensorflow.contrib.slim.fully_connected",
"tensorflow.no_op",
"tensorflow.split",
"tensorflow.contrib.training.HParams",
"tensorflow.clip_by_value",
"tensorflow.reduce_mean",
"tensorflow.reshape",
"tensorflow.python.ops.init_ops.zeros_initializer",
"tensorflow.expand_dims",
"tensorflow.random_uniform",
"tensorflow.contrib.slim.variance_scaling_initializer"
],
[
"tensorflow.train.AdamOptimizer",
"tensorflow.estimator.RunConfig",
"tensorflow.group",
"tensorflow.keras.backend.ctc_label_dense_to_sparse",
"tensorflow.get_collection",
"tensorflow.squeeze",
"tensorflow.train.get_or_create_global_step",
"tensorflow.logging.set_verbosity",
"tensorflow.to_float",
"tensorflow.argmax",
"tensorflow.estimator.Estimator",
"tensorflow.shape",
"tensorflow.logging.info",
"tensorflow.set_random_seed",
"tensorflow.nn.ctc_loss",
"tensorflow.nn.softmax",
"tensorflow.multiply",
"tensorflow.transpose",
"tensorflow.estimator.EstimatorSpec",
"tensorflow.keras.backend.epsilon"
],
[
"tensorflow.app.flags.DEFINE_integer",
"tensorflow.app.flags.DEFINE_enum",
"tensorflow.app.flags.DEFINE_string",
"tensorflow.logging.set_verbosity",
"tensorflow.app.flags.DEFINE_float",
"tensorflow.app.flags.DEFINE_boolean",
"tensorflow.app.run"
],
[
"tensorflow.python.framework.ops.RegisterGradient"
],
[
"tensorflow.variable_scope",
"tensorflow.nn.l2_normalize",
"tensorflow.contrib.layers.l2_regularizer"
],
[
"tensorflow.python.platform.flags.DEFINE_float",
"tensorflow.python.platform.flags.DEFINE_string",
"tensorflow.python.platform.flags.DEFINE_integer",
"tensorflow.python.platform.flags.DEFINE_bool"
],
[
"tensorflow.executing_eagerly",
"tensorflow.constant",
"tensorflow.train.RMSPropOptimizer",
"tensorflow.keras.optimizers.RMSprop",
"tensorflow.train.get_or_create_global_step",
"tensorflow.contrib.opt.MovingAverageOptimizer",
"tensorflow.train.MomentumOptimizer",
"tensorflow.keras.optimizers.Adam",
"tensorflow.train.AdamOptimizer",
"tensorflow.keras.optimizers.SGD"
],
[
"tensorflow.nn.relu",
"tensorflow.image.resize_bilinear",
"tensorflow.device",
"tensorflow.layers.batch_normalization",
"tensorflow.constant",
"tensorflow.concat",
"tensorflow.reduce_mean",
"tensorflow.nn.max_pool",
"tensorflow.maximum",
"tensorflow.get_collection",
"tensorflow.reshape",
"tensorflow.identity",
"numpy.sqrt",
"numpy.int",
"tensorflow.pad",
"tensorflow.variable_scope",
"tensorflow.add_to_collection",
"tensorflow.nn.conv2d"
],
[
"tensorflow.keras.Model"
]
] |
tonyduan/ge-vae | [
"fe3325cb643900d09536b3e1d964443d25625781"
] | [
"src/models/ep.py"
] | [
"import torch\nimport torch.nn as nn\nfrom torch.distributions import Bernoulli\nfrom src.modules.attn import MAB, PMA, SAB, ISAB, ISABStack \nfrom src.utils import *\nfrom src.modules.mlp import *\n\n\nclass EdgePredictor(nn.Module):\n\n def __init__(self, embedding_dim, device):\n super().__init__()\n self.pairwise_query = ISABStack(8, embedding_dim, 256, num_heads = 4, \n num_inds = 16, device = device)\n self.device = device\n self.baseline = nn.Parameter(torch.zeros(1, device = device))\n self.scale1 = nn.Parameter(torch.zeros(1, device = device))\n\n def forward(self, E, V):\n mask = construct_embedding_mask(V).byte()\n Z1 = self.pairwise_query(E, mask)\n F = Z1 @ Z1.transpose(1, 2)\n return F * torch.exp(self.scale1) + self.baseline #+ \\\n\n def log_prob_per_edge(self, E, A, V):\n mask = construct_adjacency_mask(V)\n counts = V * (V - 1) / 2\n loss = Bernoulli(logits = self.forward(E, V)).log_prob(A)\n loss = torch.sum(torch.triu(loss, diagonal = 1) * mask, dim = (1, 2))\n return loss #/ counts\n\n"
] | [
[
"torch.exp",
"torch.triu",
"torch.zeros"
]
] |
kunalghosh/Multi_Fidelity_Prediction_GP | [
"c858554f5c1f0c4aafa12cf7c441bd2d56b115f5",
"c858554f5c1f0c4aafa12cf7c441bd2d56b115f5"
] | [
"mfgp/task2/init_train_idxs.py",
"mfgp/task2/train_gp.py"
] | [
"# Run `init_train_idxs.py <int: dataset size> <int: initial training set size>`:\n# Creates a `train_idxs.npz` file with the initial set of training indices. \n# e.g `python init_train_idxs.py 64000 1000`\n\nimport sys\nimport numpy as np\nfrom sklearn.model_selection import train_test_split\n\ndataset_size = int(sys.argv[1])\ninit_trainset_size = int(sys.argv[2])\nvalidation_set_size = 500 # usually training set is much larger so 500 is reasonable\n\nnp.random.seed(1)\n\ntrain_idxs, remaining_idxs = train_test_split(range(dataset_size), train_size = init_trainset_size, random_state=0)\nvalid_idxs, test_idxs = train_test_split(remaining_idxs, train_size = 500, random_state=0)\n\n# save the values in train_idxs.npy\nnp.savez(\"train_idxs.npz\", train_idxs=train_idxs, valid_idxs=valid_idxs, test_idxs=test_idxs)\n",
"# Run `train_gp.py train_idxs.npz` : Trains an exact GP on the entire list of \n# indices in `train_idxs.npz`. If there are multiple rows it is flattened and\n# the training is done on the entire dataset. \n# This also generates a new file `predictive_means_and_vars_iter{i}.npy`\n\nimport numpy as np\nnp.random.seed(1)\n\n\n"
] | [
[
"numpy.savez",
"sklearn.model_selection.train_test_split",
"numpy.random.seed"
],
[
"numpy.random.seed"
]
] |
hhy-ee/PedestrianDetection-NohNMS | [
"482078a6bd0ff8cf03fbf7f6988e475f75c56e57",
"482078a6bd0ff8cf03fbf7f6988e475f75c56e57",
"482078a6bd0ff8cf03fbf7f6988e475f75c56e57"
] | [
"tools/visualize_json_results.py",
"detectron2/modeling/proposal_generator/rpn.py",
"dqrf/criterion.py"
] | [
"#!/usr/bin/env python\n# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved\n\nimport argparse\nimport json\nimport numpy as np\nimport os\nfrom collections import defaultdict\nimport cv2\nimport tqdm\nfrom fvcore.common.file_io import PathManager\n\nfrom detectron2.data import DatasetCatalog, MetadataCatalog\nfrom detectron2.structures import Boxes, BoxMode, Instances\nfrom detectron2.utils.logger import setup_logger\nfrom detectron2.utils.visualizer import Visualizer\n\n\ndef create_instances(predictions, image_size):\n ret = Instances(image_size)\n\n score = np.asarray([x[\"score\"] for x in predictions])\n chosen = (score > args.conf_threshold).nonzero()[0]\n if chosen.shape[0] == 0:\n return None\n score = score[chosen]\n bbox = np.asarray([predictions[i][\"bbox\"] for i in chosen])\n bbox = BoxMode.convert(bbox, BoxMode.XYWH_ABS, BoxMode.XYXY_ABS)\n\n labels = np.asarray([dataset_id_map(predictions[i][\"category_id\"]) for i in chosen])\n\n ret.scores = score\n ret.pred_boxes = Boxes(bbox)\n ret.pred_classes = labels\n\n try:\n ret.pred_masks = [predictions[i][\"segmentation\"] for i in chosen]\n except KeyError:\n pass\n return ret\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser(\n description=\"A script that visualizes the json predictions from COCO or LVIS dataset.\"\n )\n parser.add_argument(\"--input\", required=True, help=\"JSON file produced by the model\")\n parser.add_argument(\"--output\", required=True, help=\"output directory\")\n parser.add_argument(\"--dataset\", help=\"name of the dataset\", default=\"coco_2017_val\")\n parser.add_argument(\"--conf-threshold\", default=0.5, type=float, help=\"confidence threshold\")\n args = parser.parse_args()\n\n logger = setup_logger()\n\n with PathManager.open(args.input, \"r\") as f:\n predictions = json.load(f)\n\n pred_by_image = defaultdict(list)\n for p in predictions:\n pred_by_image[p[\"image_id\"]].append(p)\n\n dicts = list(DatasetCatalog.get(args.dataset))\n metadata = MetadataCatalog.get(args.dataset)\n if hasattr(metadata, \"thing_dataset_id_to_contiguous_id\"):\n\n def dataset_id_map(ds_id):\n return metadata.thing_dataset_id_to_contiguous_id[ds_id]\n\n elif \"lvis\" in args.dataset:\n # LVIS results are in the same format as COCO results, but have a different\n # mapping from dataset category id to contiguous category id in [0, #categories - 1]\n def dataset_id_map(ds_id):\n return ds_id - 1\n\n else:\n raise ValueError(\"Unsupported dataset: {}\".format(args.dataset))\n\n os.makedirs(args.output, exist_ok=True)\n\n for dic in tqdm.tqdm(dicts):\n img = cv2.imread(dic[\"file_name\"], cv2.IMREAD_COLOR)[:, :, ::-1]\n basename = os.path.basename(dic[\"file_name\"])\n\n predictions = create_instances(pred_by_image[dic[\"image_id\"]], img.shape[:2])\n if predictions is not None:\n vis = Visualizer(img, metadata)\n vis_pred = vis.draw_instance_predictions(predictions).get_image()\n else:\n vis_pred = img\n\n vis = Visualizer(img, metadata)\n vis_gt = vis.draw_dataset_dict(dic).get_image()\n\n concat = np.concatenate((vis_pred, vis_gt), axis=0)\n cv2.imwrite(os.path.join(args.output, basename), concat[:, :, ::-1])\n",
"# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved\nfrom typing import Dict, List\nimport torch\nimport torch.nn.functional as F\nfrom torch import nn\n\nfrom detectron2.layers import ShapeSpec\nfrom detectron2.utils.registry import Registry\n\nfrom ..anchor_generator import build_anchor_generator\nfrom ..box_regression import Box2BoxTransform\nfrom ..matcher import Matcher\nfrom .build import PROPOSAL_GENERATOR_REGISTRY\nfrom .rpn_outputs import RPNOutputs, find_top_rpn_proposals, find_top_rpn_proposals_all_level\n\nRPN_HEAD_REGISTRY = Registry(\"RPN_HEAD\")\n\"\"\"\nRegistry for RPN heads, which take feature maps and perform\nobjectness classification and bounding box regression for anchors.\n\nThe registered object will be called with `obj(cfg, input_shape)`.\nThe call should return a `nn.Module` object.\n\"\"\"\n\n\ndef build_rpn_head(cfg, input_shape):\n \"\"\"\n Build an RPN head defined by `cfg.MODEL.RPN.HEAD_NAME`.\n \"\"\"\n name = cfg.MODEL.RPN.HEAD_NAME\n return RPN_HEAD_REGISTRY.get(name)(cfg, input_shape)\n\n\n@RPN_HEAD_REGISTRY.register()\nclass StandardRPNHead(nn.Module):\n \"\"\"\n RPN classification and regression heads. Uses a 3x3 conv to produce a shared\n hidden state from which one 1x1 conv predicts objectness logits for each anchor\n and a second 1x1 conv predicts bounding-box deltas specifying how to deform\n each anchor into an object proposal.\n \"\"\"\n\n def __init__(self, cfg, input_shape: List[ShapeSpec]):\n super().__init__()\n\n # Standard RPN is shared across levels:\n in_channels = [s.channels for s in input_shape]\n assert len(set(in_channels)) == 1, \"Each level must have the same channel!\"\n in_channels = in_channels[0]\n\n # RPNHead should take the same input as anchor generator\n # NOTE: it assumes that creating an anchor generator does not have unwanted side effect.\n anchor_generator = build_anchor_generator(cfg, input_shape)\n num_cell_anchors = anchor_generator.num_cell_anchors\n box_dim = anchor_generator.box_dim\n assert (\n len(set(num_cell_anchors)) == 1\n ), \"Each level must have the same number of cell anchors\"\n num_cell_anchors = num_cell_anchors[0]\n\n # 3x3 conv for the hidden representation\n self.conv = nn.Conv2d(in_channels, in_channels, kernel_size=3, stride=1, padding=1)\n # 1x1 conv for predicting objectness logits\n self.objectness_logits = nn.Conv2d(in_channels, num_cell_anchors, kernel_size=1, stride=1)\n # 1x1 conv for predicting box2box transform deltas\n self.anchor_deltas = nn.Conv2d(\n in_channels, num_cell_anchors * box_dim, kernel_size=1, stride=1\n )\n\n for l in [self.conv, self.objectness_logits, self.anchor_deltas]:\n nn.init.normal_(l.weight, std=0.01)\n nn.init.constant_(l.bias, 0)\n\n def forward(self, features):\n \"\"\"\n Args:\n features (list[Tensor]): list of feature maps\n \"\"\"\n pred_objectness_logits = []\n pred_anchor_deltas = []\n for x in features:\n t = F.relu(self.conv(x))\n pred_objectness_logits.append(self.objectness_logits(t))\n pred_anchor_deltas.append(self.anchor_deltas(t))\n return pred_objectness_logits, pred_anchor_deltas\n\n\n@RPN_HEAD_REGISTRY.register()\nclass SeparateRPNHead(nn.Module):\n \"\"\"\n RPN classification and regression heads. Uses a 3x3 conv to produce a shared\n hidden state from which one 1x1 conv predicts objectness logits for each anchor\n and a second 1x1 conv predicts bounding-box deltas specifying how to deform\n each anchor into an object proposal.\n \"\"\"\n\n def __init__(self, cfg, input_shape: List[ShapeSpec]):\n super().__init__()\n\n # Standard RPN is shared across levels:\n in_channels = [s.channels for s in input_shape]\n assert len(set(in_channels)) == 1, \"Each level must have the same channel!\"\n in_channels = in_channels[0]\n\n # RPNHead should take the same input as anchor generator\n # NOTE: it assumes that creating an anchor generator does not have unwanted side effect.\n anchor_generator = build_anchor_generator(cfg, input_shape)\n num_cell_anchors = anchor_generator.num_cell_anchors\n box_dim = anchor_generator.box_dim\n assert (\n len(set(num_cell_anchors)) == 1\n ), \"Each level must have the same number of cell anchors\"\n num_cell_anchors = num_cell_anchors[0]\n\n # 3x3 conv for the hidden representation\n self.cls_conv = nn.Conv2d(in_channels, in_channels, kernel_size=3, stride=1, padding=1)\n self.reg_conv = nn.Conv2d(in_channels, in_channels, kernel_size=3, stride=1, padding=1)\n # 1x1 conv for predicting objectness logits\n self.objectness_logits = nn.Conv2d(in_channels, num_cell_anchors, kernel_size=1, stride=1)\n # 1x1 conv for predicting box2box transform deltas\n self.anchor_deltas = nn.Conv2d(\n in_channels, num_cell_anchors * box_dim, kernel_size=1, stride=1\n )\n\n for l in [self.cls_conv, self.reg_conv, self.objectness_logits, self.anchor_deltas]:\n nn.init.normal_(l.weight, std=0.01)\n nn.init.constant_(l.bias, 0)\n\n def forward(self, cls_features, reg_features):\n \"\"\"\n Args:\n features (list[Tensor]): list of feature maps\n \"\"\"\n pred_objectness_logits = []\n pred_anchor_deltas = []\n for cls_x, reg_x in zip(cls_features, reg_features):\n cls_t = F.relu(self.cls_conv(cls_x))\n reg_t = F.relu(self.reg_conv(reg_x))\n pred_objectness_logits.append(self.objectness_logits(cls_t))\n pred_anchor_deltas.append(self.anchor_deltas(reg_t))\n return pred_objectness_logits, pred_anchor_deltas\n\n\n@PROPOSAL_GENERATOR_REGISTRY.register()\nclass RPN(nn.Module):\n \"\"\"\n Region Proposal Network, introduced by the Faster R-CNN paper.\n \"\"\"\n\n def __init__(self, cfg, input_shape: Dict[str, ShapeSpec]):\n super().__init__()\n\n # fmt: off\n self.min_box_side_len = cfg.MODEL.PROPOSAL_GENERATOR.MIN_SIZE\n self.in_features = cfg.MODEL.RPN.IN_FEATURES\n self.nms_thresh = cfg.MODEL.RPN.NMS_THRESH\n self.batch_size_per_image = cfg.MODEL.RPN.BATCH_SIZE_PER_IMAGE\n self.positive_fraction = cfg.MODEL.RPN.POSITIVE_FRACTION\n self.smooth_l1_beta = cfg.MODEL.RPN.SMOOTH_L1_BETA\n self.loss_weight = cfg.MODEL.RPN.LOSS_WEIGHT\n self.ignore_ioa = cfg.MODEL.RPN.IGNORE_IOA\n self.allow_oob = cfg.MODEL.ALLOW_BOX_OUT_OF_BOUNDARY\n self.top_proposals_all_level = cfg.MODEL.RPN.TOP_PROPOSALS_ALL_LEVEL\n self.update_matches = cfg.MODEL.RPN.UPDATE_MATCHES\n self.get_gt_per_level = cfg.MODEL.RPN.GET_GT_PER_LEVEL\n self.ignore_ambiguous_sample = cfg.MODEL.RPN.IGNORE_AMBIGUOUS_SAMPLE\n # fmt: on\n\n # Map from self.training state to train/test settings\n self.pre_nms_topk = {\n True: cfg.MODEL.RPN.PRE_NMS_TOPK_TRAIN,\n False: cfg.MODEL.RPN.PRE_NMS_TOPK_TEST,\n }\n self.post_nms_topk = {\n True: cfg.MODEL.RPN.POST_NMS_TOPK_TRAIN,\n False: cfg.MODEL.RPN.POST_NMS_TOPK_TEST,\n }\n self.boundary_threshold = cfg.MODEL.RPN.BOUNDARY_THRESH\n\n self.anchor_generator = build_anchor_generator(\n cfg, [input_shape[f] for f in self.in_features]\n )\n self.box2box_transform = Box2BoxTransform(weights=cfg.MODEL.RPN.BBOX_REG_WEIGHTS)\n self.anchor_matcher = Matcher(\n cfg.MODEL.RPN.IOU_THRESHOLDS,\n cfg.MODEL.RPN.IOU_LABELS,\n allow_low_quality_matches=True,\n update_matches=self.update_matches,\n )\n self.rpn_head = build_rpn_head(cfg, [input_shape[f] for f in self.in_features])\n\n def forward(self, images, features, gt_instances=None):\n \"\"\"\n Args:\n images (ImageList): input images of length `N`\n features (dict[str: Tensor]): input data as a mapping from feature\n map name to tensor. Axis 0 represents the number of images `N` in\n the input data; axes 1-3 are channels, height, and width, which may\n vary between feature maps (e.g., if a feature pyramid is used).\n gt_instances (list[Instances], optional): a length `N` list of `Instances`s.\n Each `Instances` stores ground-truth instances for the corresponding image.\n\n Returns:\n proposals: list[Instances]: contains fields \"proposal_boxes\", \"objectness_logits\"\n loss: dict[Tensor] or None\n \"\"\"\n # gt_boxes = [x.gt_boxes for x in gt_instances] if gt_instances is not None else None\n gt_boxes = (\n [x.gt_boxes[x.gt_classes != -1] for x in gt_instances]\n if gt_instances is not None\n else None\n )\n ignore_gt_boxes = (\n [x.gt_boxes[x.gt_classes == -1] for x in gt_instances]\n if gt_instances is not None\n else None\n )\n del gt_instances\n if isinstance(features, dict):\n features = [features[f] for f in self.in_features]\n pred_objectness_logits, pred_anchor_deltas = self.rpn_head(features)\n anchors = self.anchor_generator(features)\n else:\n cls_features, reg_features = features\n cls_features = [cls_features[f] for f in self.in_features]\n reg_features = [reg_features[f] for f in self.in_features]\n pred_objectness_logits, pred_anchor_deltas = self.rpn_head(cls_features, reg_features)\n anchors = self.anchor_generator(cls_features)\n # TODO: The anchors only depend on the feature map shape; there's probably\n # an opportunity for some optimizations (e.g., caching anchors).\n outputs = RPNOutputs(\n self.box2box_transform,\n self.anchor_matcher,\n self.batch_size_per_image,\n self.positive_fraction,\n images,\n pred_objectness_logits,\n pred_anchor_deltas,\n anchors,\n self.boundary_threshold,\n gt_boxes,\n self.smooth_l1_beta,\n ignore_gt_boxes,\n ignore_ioa=self.ignore_ioa,\n get_gt_per_level=self.get_gt_per_level,\n ignore_ambiguous_sample=self.ignore_ambiguous_sample,\n )\n\n if self.training:\n losses = {k: v * self.loss_weight for k, v in outputs.losses().items()}\n else:\n losses = {}\n with torch.no_grad():\n # Find the top proposals by applying NMS and removing boxes that\n # are too small. The proposals are treated as fixed for approximate\n # joint training with roi heads. This approach ignores the derivative\n # w.r.t. the proposal boxes’ coordinates that are also network\n # responses, so is approximate.\n if self.top_proposals_all_level:\n proposals = find_top_rpn_proposals_all_level(\n outputs.predict_proposals(),\n outputs.predict_objectness_logits(),\n images,\n self.nms_thresh,\n self.pre_nms_topk[self.training],\n self.post_nms_topk[self.training],\n self.min_box_side_len,\n self.training,\n allow_oob=self.allow_oob,\n )\n else:\n proposals = find_top_rpn_proposals(\n outputs.predict_proposals(),\n outputs.predict_objectness_logits(),\n images,\n self.nms_thresh,\n self.pre_nms_topk[self.training],\n self.post_nms_topk[self.training],\n self.min_box_side_len,\n self.training,\n allow_oob=self.allow_oob,\n )\n\n return proposals, losses\n",
"# ------------------------------------------------------------------------\n# Modified by Matthieu Lin\n# Modified from Deformable-DETR (https://github.com/fundamentalvision/Deformable-DETR)\n# Modified from DETR (https://github.com/facebookresearch/detr)\n# Copyright (c) 2020 SenseTime. All Rights Reserved.\n# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved\n# ------------------------------------------------------------------------\n\nfrom torch import nn\nimport torch\nimport copy\nimport torch.nn.functional as F\nfrom dqrf.utils.box_ops import generalized_box_iou, box_cxcywh_to_xyxy\nfrom dqrf.utils.utils import accuracy, is_dist_avail_and_initialized\nimport detectron2.utils.comm as comm\nfrom detectron2.utils.events import get_event_storage\n\n\ndef sigmoid_focal_loss(inputs, targets, num_boxes, alpha: float = 0.25, gamma: float = 2):\n \"\"\"\n Loss used in RetinaNet for dense detection: https://arxiv.org/abs/1708.02002.\n Args:\n inputs: A float tensor of arbitrary shape.\n The predictions for each example.\n targets: A float tensor with the same shape as inputs. Stores the binary\n classification label for each element in inputs\n (0 for the negative class and 1 for the positive class).\n alpha: (optional) Weighting factor in range (0,1) to balance\n positive vs negative examples. Default = -1 (no weighting).\n gamma: Exponent of the modulating factor (1 - p_t) to\n balance easy vs hard examples.\n Returns:\n Loss tensor\n \"\"\"\n prob = inputs.sigmoid()\n ce_loss = F.binary_cross_entropy_with_logits(inputs, targets, reduction=\"none\")\n p_t = prob * targets + (1 - prob) * (1 - targets)\n loss = ce_loss * ((1 - p_t) ** gamma)\n\n if alpha >= 0:\n alpha_t = alpha * targets + (1 - alpha) * (1 - targets)\n loss = alpha_t * loss\n\n return loss.mean(1).sum() / num_boxes\n\nclass SetCriterion(nn.Module):\n \"\"\"\n only changed cost class and cls loss coeff to 2\n \"\"\"\n\n def __init__(self, cfg, matcher):\n \"\"\" Create the criterion.\n Parameters:\n num_classes: number of object categories, omitting the special no-object category\n matcher: module able to compute a matching between targets and proposals\n weight_dict: dict containing as key the names of the losses and as values their relative weight.\n eos_coef: relative classification weight applied to the no-object category\n losses: list of all the losses to be applied. See get_loss for list of available losses.\n \"\"\"\n super().__init__()\n self.num_classes = cfg.MODEL.DQRF_DETR.NUM_CLASSES\n self.matcher = matcher\n self.weight_dict = {'loss_ce': cfg.MODEL.DQRF_DETR.COST_CLASS,\n 'loss_bbox': cfg.MODEL.DQRF_DETR.COST_BBOX,\n 'loss_giou': cfg.MODEL.DQRF_DETR.COST_GIOU,\n }\n # TODO this is a hack\n if cfg.MODEL.DQRF_DETR.AUX_LOSS:\n aux_weight_dict = {}\n for i in range(cfg.MODEL.DQRF_DETR.NUM_DECODER_LAYERS - 1):\n aux_weight_dict.update({k + f'_{i}': v for k, v in self.weight_dict.items()})\n self.weight_dict.update(aux_weight_dict)\n self.losses = ['labels', 'boxes', 'cardinality']\n self.focal_alpha = cfg.MODEL.DQRF_DETR.FOCAL_ALPHA\n self.gammma = cfg.MODEL.DQRF_DETR.GAMMA\n\n def loss_labels(self, outputs, targets, indices, num_boxes, log=True):\n \"\"\"Classification loss (NLL)\n targets dicts must contain the key \"labels\" containing a tensor of dim [nb_target_boxes]\n \"\"\"\n assert 'pred_logits' in outputs\n src_logits = outputs['pred_logits'] # [bs, nquery, #class]\n\n idx = self._get_src_permutation_idx(indices)\n target_classes_o = torch.cat([t[\"labels\"][J] for t, (_, J) in zip(targets, indices)])\n target_classes = torch.full(src_logits.shape[:2], self.num_classes, # bs, nquery\n dtype=torch.int64, device=src_logits.device)\n target_classes[idx] = target_classes_o\n\n\n target_classes_onehot = torch.zeros([src_logits.shape[0], src_logits.shape[1], src_logits.shape[2] + 1],\n dtype=src_logits.dtype, layout=src_logits.layout,\n device=src_logits.device)\n target_classes_onehot.scatter_(2, target_classes.unsqueeze(-1), 1)\n\n target_classes_onehot = target_classes_onehot[:, :, :-1]\n loss_ce = sigmoid_focal_loss(src_logits, target_classes_onehot, num_boxes, alpha=self.focal_alpha,\n gamma=2) * src_logits.shape[1]\n\n losses = {'loss_ce': loss_ce}\n\n if log:\n # TODO this should probably be a separate loss, not hacked in this one here\n losses['loss_class_error'] = 100 - accuracy(src_logits[idx], target_classes_o)[0]\n return losses\n\n @torch.no_grad()\n def loss_cardinality(self, outputs, targets, indices, num_boxes):\n \"\"\" Compute the cardinality error, ie the absolute error in the number of predicted non-empty boxes\n This is not really a loss, it is intended for logging purposes only. It doesn't propagate gradients\n \"\"\"\n pred_logits = outputs['pred_logits']\n device = pred_logits.device\n tgt_lengths = torch.as_tensor([len(v[\"labels\"]) for v in targets], device=device)\n # Count the number of predictions that are NOT \"no-object\" (which is the last class)\n card_pred = (pred_logits.argmax(-1) != pred_logits.shape[-1] - 1).sum(1)\n card_err = F.l1_loss(card_pred.float(), tgt_lengths.float())\n losses = {'loss_cardinality_error': card_err} # must add loss for detectron2 to log it\n return losses\n\n def loss_boxes(self, outputs, targets, indices, num_boxes):\n \"\"\"Compute the losses related to the bounding boxes, the L1 regression loss and the GIoU loss\n targets dicts must contain the key \"boxes\" containing a tensor of dim [nb_target_boxes, 4]\n The target boxes are expected in format (center_x, center_y, h, w), normalized by the image size.\n \"\"\"\n assert 'pred_boxes' in outputs\n idx = self._get_src_permutation_idx(indices)\n src_boxes = outputs['pred_boxes'][idx] # [#matched query, 4] in order\n target_boxes = torch.cat([t['boxes'][i] for t, (_, i) in zip(targets, indices)], dim=0) # [#boxes, 4] in order\n # print(src_boxes.size())\n loss_bbox = F.l1_loss(src_boxes, target_boxes, reduction='none')\n\n losses = {}\n losses['loss_bbox'] = loss_bbox.sum() / num_boxes\n\n # diag since we have already matched each src boxes to its target\n loss_giou = 1 - torch.diag(generalized_box_iou( # [#matched query, #boxes]\n box_cxcywh_to_xyxy(src_boxes),\n box_cxcywh_to_xyxy(target_boxes)))\n\n losses['loss_giou'] = loss_giou.sum() / num_boxes\n\n return losses\n\n def _get_src_permutation_idx(self, indices):\n # permute predictions following indices\n batch_idx = torch.cat([torch.full_like(src, i) for i, (src, _) in enumerate(indices)])\n src_idx = torch.cat([src for (src, _) in indices])\n return batch_idx, src_idx\n\n def _get_tgt_permutation_idx(self, indices):\n # permute targets following indices\n batch_idx = torch.cat([torch.full_like(tgt, i) for i, (_, tgt) in enumerate(indices)])\n tgt_idx = torch.cat([tgt for (_, tgt) in indices])\n return batch_idx, tgt_idx\n\n def get_loss(self, loss, outputs, targets, indices, num_boxes, **kwargs):\n loss_map = {\n 'labels': self.loss_labels,\n 'cardinality': self.loss_cardinality,\n 'boxes': self.loss_boxes,\n }\n assert loss in loss_map, f'do you really want to compute {loss} loss?'\n return loss_map[loss](outputs, targets, indices, num_boxes, **kwargs)\n\n def forward(self, outputs, targets):\n \"\"\" This performs the loss computation.\n Parameters:\n outputs: dict of tensors, see the output specification of the model for the format\n targets: list of dicts, such that len(targets) == batch_size.\n The expected keys in each dict depends on the losses applied, see each loss' doc\n \"\"\"\n\n outputs_without_aux = {k: v for k, v in outputs.items() if k != 'aux_outputs' and k != 'enc_outputs'}\n\n # Retrieve the matching between the outputs of the last layer and the targets\n indices = self.matcher(outputs_without_aux, targets)\n\n # Compute the average number of target boxes accross all nodes, for normalization purposes\n num_boxes = sum(len(t[\"labels\"]) for t in targets)\n num_boxes = torch.as_tensor([num_boxes], dtype=torch.float, device=next(iter(outputs.values())).device)\n if is_dist_avail_and_initialized():\n torch.distributed.all_reduce(num_boxes)\n num_boxes = torch.clamp(num_boxes / comm.get_world_size(), min=1).item()\n\n # Compute all the requested losses\n losses = {}\n for loss in self.losses:\n losses.update(self.get_loss(loss, outputs, targets, indices, num_boxes))\n\n # In case of auxiliary losses, we repeat this process with the output of each intermediate layer.\n if 'aux_outputs' in outputs:\n for i, aux_outputs in enumerate(outputs['aux_outputs']):\n indices = self.matcher(aux_outputs, targets)\n for loss in self.losses:\n if loss == 'masks':\n # Intermediate masks losses are too costly to compute, we ignore them.\n continue\n kwargs = {}\n if loss == 'labels':\n # Logging is enabled only for the last layer\n kwargs = {'log': False}\n l_dict = self.get_loss(loss, aux_outputs, targets, indices, num_boxes, **kwargs)\n l_dict = {k + f'_{i}': v for k, v in l_dict.items()}\n losses.update(l_dict)\n\n return losses\n\n\n"
] | [
[
"numpy.asarray",
"numpy.concatenate"
],
[
"torch.nn.init.constant_",
"torch.no_grad",
"torch.nn.Conv2d",
"torch.nn.init.normal_"
],
[
"torch.nn.functional.l1_loss",
"torch.full",
"torch.zeros",
"torch.cat",
"torch.nn.functional.binary_cross_entropy_with_logits",
"torch.no_grad",
"torch.full_like",
"torch.distributed.all_reduce"
]
] |
joshchang1112/gcnn-survey-paper | [
"591af8d6c4374378831cab2cdec79575e2540d79"
] | [
"utils/data_utils.py"
] | [
"#Copyright 2018 Google LLC\n#\n#Licensed under the Apache License, Version 2.0 (the \"License\");\n#you may not use this file except in compliance with the License.\n#You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n#Unless required by applicable law or agreed to in writing, software\n#distributed under the License is distributed on an \"AS IS\" BASIS,\n#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n#See the License for the specific language governing permissions and\n#limitations under the License.\n\n\n\"\"\"Utils functions to load and process citation data.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\nimport os\nimport pickle as pkl\nimport sys\n\nimport networkx as nx\nimport numpy as np\nimport scipy.sparse as sp\nfrom scipy.sparse.linalg.eigen.arpack import eigsh\nimport tensorflow as tf\nfrom third_party.gcn.gcn.utils import normalize_adj\nfrom third_party.gcn.gcn.utils import parse_index_file\nfrom third_party.gcn.gcn.utils import sample_mask\nfrom third_party.gcn.gcn.utils import sparse_to_tuple\nfrom third_party.gcn.gcn.utils import preprocess_features\n\n\ndef load_test_edge_mask(dataset_str, data_path, drop_edge_prop):\n \"\"\"Remove test edges by loading edge masks.\"\"\"\n edge_mask_path = os.path.join(\n data_path, 'emask.{}.remove{}.npz'.format(dataset_str, drop_edge_prop))\n with tf.gfile.Open(edge_mask_path) as f:\n mask = sp.load_npz(f)\n return mask\n\n\ndef load_edge_masks(dataset_str, data_path, adj_true, drop_edge_prop):\n \"\"\"Loads adjacency matrix as sparse matrix and masks for val & test links.\n\n Args:\n dataset_str: dataset to use\n data_path: path to data folder\n adj_true: true adjacency matrix in dense format,\n drop_edge_prop: proportion of edges to remove.\n\n Returns:\n adj_matrix: adjacency matrix\n train_mask: mask for train edges\n val_mask: mask for val edges\n test_mask: mask for test edges\n \"\"\"\n edge_mask_path = os.path.join(\n data_path, 'emask.{}.remove{}.'.format(dataset_str, drop_edge_prop))\n val_mask = sp.load_npz(edge_mask_path + 'val.npz')\n test_mask = sp.load_npz(edge_mask_path + 'test.npz')\n train_mask = 1. - val_mask.todense() - test_mask.todense()\n # remove val and test edges from true A\n adj_train = np.multiply(adj_true, train_mask)\n train_mask -= np.eye(train_mask.shape[0])\n return adj_train, sparse_to_tuple(val_mask), sparse_to_tuple(\n val_mask), sparse_to_tuple(test_mask)\n\n\ndef add_top_k_edges(data, edge_mask_path, gae_scores_path, topk, nb_nodes,\n norm_adj):\n \"\"\"Loads GAE scores and adds topK edges to train adjacency.\"\"\"\n test_mask = sp.load_npz(os.path.join(edge_mask_path, 'test_mask.npz'))\n train_mask = 1. - test_mask.todense()\n # remove val and test edges from true A\n adj_train_curr = np.multiply(data['adj_true'], train_mask)\n # Predict test edges using precomputed scores\n scores = np.load(os.path.join(gae_scores_path, 'gae_scores.npy'))\n # scores_mask = 1 - np.eye(nb_nodes)\n scores_mask = np.zeros((nb_nodes, nb_nodes))\n scores_mask[:140, 140:] = 1.\n scores_mask[140:, :140] = 1.\n scores = np.multiply(scores, scores_mask).reshape((-1,))\n threshold = scores[np.argsort(-scores)[topk]]\n adj_train_curr += 1 * (scores > threshold).reshape((nb_nodes, nb_nodes))\n adj_train_curr = 1 * (adj_train_curr > 0)\n if norm_adj:\n adj_train_norm = normalize_adj(data['adj_train'])\n else:\n adj_train_norm = sp.coo_matrix(data['adj_train'])\n return adj_train_curr, sparse_to_tuple(adj_train_norm)\n\n\ndef process_adj(adj, model_name):\n \"\"\"Symmetrically normalize adjacency matrix.\"\"\"\n if model_name == 'Cheby':\n laplacian = sp.eye(adj.shape[0]) - normalize_adj(adj - sp.eye(adj.shape[0]))\n # TODO(chamii): compare with\n # adj)\n largest_eigval, _ = eigsh(laplacian, 1, which='LM')\n laplacian_norm = (2. / largest_eigval[0]) * laplacian - sp.eye(adj.shape[0])\n return laplacian_norm\n else:\n return normalize_adj(adj)\n\n\ndef load_data(dataset_str, data_path):\n if dataset_str in ['cora', 'citeseer', 'pubmed']:\n return load_citation_data(dataset_str, data_path)\n else:\n return load_ppi_data(data_path)\n\n\ndef load_ppi_data(data_path):\n \"\"\"Load PPI dataset.\"\"\"\n with tf.gfile.Open(os.path.join(data_path, 'ppi.edges.npz')) as f:\n adj = sp.load_npz(f)\n\n with tf.gfile.Open(os.path.join(data_path, 'ppi.features.norm.npy')) as f:\n features = np.load(f)\n\n with tf.gfile.Open(os.path.join(data_path, 'ppi.labels.npz')) as f:\n labels = sp.load_npz(f).todense()\n\n train_mask = np.load(\n tf.gfile.Open(os.path.join(data_path, 'ppi.train_mask.npy'))) > 0\n val_mask = np.load(\n tf.gfile.Open(os.path.join(data_path, 'ppi.test_mask.npy'))) > 0\n test_mask = np.load(\n tf.gfile.Open(os.path.join(data_path, 'ppi.test_mask.npy'))) > 0\n\n return adj, features, labels, train_mask, val_mask, test_mask\n\n\ndef load_citation_data(dataset_str, data_path):\n \"\"\"Load data.\"\"\"\n names = ['x', 'y', 'tx', 'ty', 'allx', 'ally', 'graph']\n objects = {}\n for name in names:\n with tf.gfile.Open(\n os.path.join(data_path, 'ind.{}.{}'.format(dataset_str, name)),\n 'rb') as f:\n if sys.version_info > (3, 0):\n objects[name] = pkl.load(f) # , encoding='latin1') comment to pass lint\n else:\n objects[name] = pkl.load(f)\n\n test_idx_reorder = parse_index_file(\n os.path.join(data_path, 'ind.{}.test.index'.format(dataset_str)))\n test_idx_range = np.sort(test_idx_reorder)\n\n if dataset_str == 'citeseer':\n # Fix citeseer dataset (there are some isolated nodes in the graph)\n # Find isolated nodes, add them as zero-vecs into the right position\n test_idx_range_full = range(\n min(test_idx_reorder),\n max(test_idx_reorder) + 1)\n tx_extended = sp.lil_matrix((len(test_idx_range_full),\n objects['x'].shape[1]))\n tx_extended[test_idx_range - min(test_idx_range), :] = objects['tx']\n objects['tx'] = tx_extended\n ty_extended = np.zeros((len(test_idx_range_full),\n objects['y'].shape[1]))\n ty_extended[test_idx_range - min(test_idx_range), :] = objects['ty']\n objects['ty'] = ty_extended\n\n features = sp.vstack((objects['allx'], objects['tx'])).tolil()\n features[test_idx_reorder, :] = features[test_idx_range, :]\n adj = nx.adjacency_matrix(nx.from_dict_of_lists(objects['graph']))\n\n labels = np.vstack((objects['ally'], objects['ty']))\n labels[test_idx_reorder, :] = labels[test_idx_range, :]\n\n idx_test = test_idx_range.tolist()\n idx_train = range(len(objects['y']))\n idx_val = range(len(objects['y']), len(objects['y']) + 500)\n\n train_mask = sample_mask(idx_train, labels.shape[0])\n val_mask = sample_mask(idx_val, labels.shape[0])\n test_mask = sample_mask(idx_test, labels.shape[0])\n\n features = preprocess_features(features)\n return adj, features, labels, train_mask, val_mask, test_mask\n\n\ndef construct_feed_dict(adj_normalized, adj, features, placeholders):\n # construct feed dictionary\n feed_dict = dict()\n feed_dict.update({placeholders['features']: features})\n feed_dict.update({placeholders['adj']: adj_normalized})\n feed_dict.update({placeholders['adj_orig']: adj})\n return feed_dict\n\n\ndef mask_val_test_edges(adj, prop):\n \"\"\"Function to mask test and val edges.\"\"\"\n # NOTE: Splits are randomized and results might slightly\n # deviate from reported numbers in the paper.\n\n # Remove diagonal elements\n adj = adj - sp.dia_matrix(\n (adj.diagonal()[np.newaxis, :], [0]), shape=adj.shape)\n adj.eliminate_zeros()\n # Check that diag is zero:\n assert np.diag(adj.todense()).sum() == 0\n\n adj_triu = sp.triu(adj)\n adj_tuple = sparse_to_tuple(adj_triu)\n edges = adj_tuple[0]\n edges_all = sparse_to_tuple(adj)[0]\n num_test = int(np.floor(edges.shape[0] * prop))\n # num_val = int(np.floor(edges.shape[0] * 0.05)) # we keep 5% for validation\n # we keep 10% of training edges for validation\n num_val = int(np.floor((edges.shape[0] - num_test) * 0.05))\n\n all_edge_idx = range(edges.shape[0])\n np.random.shuffle(all_edge_idx)\n val_edge_idx = all_edge_idx[:num_val]\n test_edge_idx = all_edge_idx[num_val:(num_val + num_test)]\n test_edges = edges[test_edge_idx]\n val_edges = edges[val_edge_idx]\n train_edges = np.delete(\n edges, np.hstack([test_edge_idx, val_edge_idx]), axis=0)\n\n def ismember(a, b, tol=5):\n rows_close = np.all(np.round(a - b[:, None], tol) == 0, axis=-1)\n return np.any(rows_close)\n\n test_edges_false = []\n while len(test_edges_false) < len(test_edges):\n idx_i = np.random.randint(0, adj.shape[0])\n idx_j = np.random.randint(0, adj.shape[0])\n if idx_i == idx_j:\n continue\n if ismember([idx_i, idx_j], edges_all):\n continue\n if test_edges_false:\n if ismember([idx_j, idx_i], np.array(test_edges_false)):\n continue\n if ismember([idx_i, idx_j], np.array(test_edges_false)):\n continue\n test_edges_false.append([idx_i, idx_j])\n\n val_edges_false = []\n while len(val_edges_false) < len(val_edges):\n idx_i = np.random.randint(0, adj.shape[0])\n idx_j = np.random.randint(0, adj.shape[0])\n if idx_i == idx_j:\n continue\n if ismember([idx_i, idx_j], train_edges):\n continue\n if ismember([idx_j, idx_i], train_edges):\n continue\n if ismember([idx_i, idx_j], val_edges):\n continue\n if ismember([idx_j, idx_i], val_edges):\n continue\n if val_edges_false:\n if ismember([idx_j, idx_i], np.array(val_edges_false)):\n continue\n if ismember([idx_i, idx_j], np.array(val_edges_false)):\n continue\n val_edges_false.append([idx_i, idx_j])\n\n assert ~ismember(test_edges_false, edges_all)\n assert ~ismember(val_edges_false, edges_all)\n assert ~ismember(val_edges, train_edges)\n assert ~ismember(test_edges, train_edges)\n assert ~ismember(val_edges, test_edges)\n\n data = np.ones(train_edges.shape[0])\n\n # Re-build adj matrix\n adj_train = sp.csr_matrix((data, (train_edges[:, 0], train_edges[:, 1])),\n shape=adj.shape)\n adj_train = adj_train + adj_train.T\n\n # NOTE: these edge lists only contain single direction of edge!\n num_nodes = adj.shape[0]\n val_mask = np.zeros((num_nodes, num_nodes))\n for i, j in val_edges:\n val_mask[i, j] = 1\n val_mask[j, i] = 1\n for i, j in val_edges_false:\n val_mask[i, j] = 1\n val_mask[j, i] = 1\n test_mask = np.zeros((num_nodes, num_nodes))\n for i, j in test_edges:\n test_mask[i, j] = 1\n test_mask[j, i] = 1\n for i, j in test_edges_false:\n test_mask[i, j] = 1\n test_mask[j, i] = 1\n return adj_train, sparse_to_tuple(val_mask), sparse_to_tuple(test_mask)\n\n\ndef mask_test_edges(adj, prop):\n \"\"\"Function to mask test edges.\n\n Args:\n adj: scipy sparse matrix\n prop: proportion of edges to remove (float in [0, 1])\n\n Returns:\n adj_train: adjacency with edges removed\n test_edges: list of positive and negative test edges\n \"\"\"\n # Remove diagonal elements\n adj = adj - sp.dia_matrix(\n (adj.diagonal()[np.newaxis, :], [0]), shape=adj.shape)\n adj.eliminate_zeros()\n # Check that diag is zero:\n assert np.diag(adj.todense()).sum() == 0\n\n adj_triu = sp.triu(adj)\n adj_tuple = sparse_to_tuple(adj_triu)\n edges = adj_tuple[0]\n edges_all = sparse_to_tuple(adj)[0]\n num_test = int(np.floor(edges.shape[0] * prop))\n\n all_edge_idx = range(edges.shape[0])\n np.random.shuffle(all_edge_idx)\n test_edge_idx = all_edge_idx[:num_test]\n test_edges = edges[test_edge_idx]\n train_edges = np.delete(edges, test_edge_idx, axis=0)\n\n def ismember(a, b, tol=5):\n rows_close = np.all(np.round(a - b[:, None], tol) == 0, axis=-1)\n return np.any(rows_close)\n\n test_edges_false = []\n while len(test_edges_false) < len(test_edges):\n idx_i = np.random.randint(0, adj.shape[0])\n idx_j = np.random.randint(0, adj.shape[0])\n if idx_i == idx_j:\n continue\n if ismember([idx_i, idx_j], edges_all):\n continue\n if test_edges_false:\n if ismember([idx_j, idx_i], np.array(test_edges_false)):\n continue\n if ismember([idx_i, idx_j], np.array(test_edges_false)):\n continue\n test_edges_false.append([idx_i, idx_j])\n\n assert ~ismember(test_edges_false, edges_all)\n assert ~ismember(test_edges, train_edges)\n\n data = np.ones(train_edges.shape[0])\n\n # Re-build adj matrix\n adj_train = sp.csr_matrix((data, (train_edges[:, 0], train_edges[:, 1])),\n shape=adj.shape)\n adj_train = adj_train + adj_train.T\n\n # NOTE: these edge lists only contain single direction of edge!\n num_nodes = adj.shape[0]\n test_mask = np.zeros((num_nodes, num_nodes))\n for i, j in test_edges:\n test_mask[i, j] = 1\n test_mask[j, i] = 1\n for i, j in test_edges_false:\n test_mask[i, j] = 1\n test_mask[j, i] = 1\n return adj_train, sparse_to_tuple(test_mask)\n"
] | [
[
"scipy.sparse.load_npz",
"numpy.round",
"numpy.any",
"scipy.sparse.vstack",
"numpy.random.randint",
"numpy.hstack",
"scipy.sparse.coo_matrix",
"numpy.eye",
"numpy.load",
"numpy.zeros",
"numpy.multiply",
"tensorflow.gfile.Open",
"scipy.sparse.csr_matrix",
"scipy.sparse.linalg.eigen.arpack.eigsh",
"numpy.delete",
"scipy.sparse.triu",
"numpy.floor",
"numpy.argsort",
"numpy.array",
"scipy.sparse.eye",
"numpy.sort",
"numpy.random.shuffle",
"numpy.ones",
"numpy.vstack"
]
] |
yexianyi/AI_Practice | [
"80499ab3a06ac055641aa069fe1e37864c9e41c4"
] | [
"MachineLearning/DecisionTree/loan_delinquency.py"
] | [
"'''\nDecision Tree\nPredict if it is possible to default on the loan\n'''\nimport numpy as np\nfrom sklearn import tree\n\ndata = np.genfromtxt(\"exercise.csv\", delimiter=\",\")\n# get train data set\nx_data = data[1:, 1:-1]\n# get test data set\ny_data = data[1:, -1]\n\nprint(x_data)\nprint(y_data)\n\n# Create decision tree\ndtree = tree.DecisionTreeClassifier(min_samples_leaf=5)\ndtree.fit(x_data, y_data)\nprint(dtree.score(x_data, y_data))\n"
] | [
[
"sklearn.tree.DecisionTreeClassifier",
"numpy.genfromtxt"
]
] |
ffilotto/meshio | [
"4413be41e6a63e33273665986f42dab80d585d10"
] | [
"test/test_flac3d.py"
] | [
"import copy\nimport pathlib\nimport sys\n\nimport helpers\nimport numpy\nimport pytest\n\nimport meshio\n\n\[email protected](\n \"mesh, binary, data\",\n [\n (helpers.tet_mesh, False, []),\n (helpers.hex_mesh, False, []),\n (helpers.tet_mesh, False, [1, 2]),\n (helpers.tet_mesh, True, []),\n (helpers.hex_mesh, True, []),\n (helpers.tet_mesh, True, [1, 2]),\n ],\n)\ndef test(mesh, binary, data):\n if data:\n mesh = copy.deepcopy(mesh)\n mesh.cell_data[\"flac3d:zone\"] = [numpy.array(data)]\n helpers.write_read(\n lambda f, m: meshio.flac3d.write(f, m, binary=binary),\n meshio.flac3d.read,\n mesh,\n 1.0e-15,\n )\n\n\n# the failure perhaps has to do with dictionary ordering\[email protected](sys.version_info < (3, 6), reason=\"Fails with 3.5\")\[email protected](\n \"filename\", [\"flac3d_mesh_ex.f3grid\", \"flac3d_mesh_ex_bin.f3grid\"],\n)\ndef test_reference_file(filename):\n this_dir = pathlib.Path(__file__).resolve().parent\n filename = this_dir / \"meshes\" / \"flac3d\" / filename\n\n mesh = meshio.read(filename)\n\n # points\n assert numpy.isclose(mesh.points.sum(), 307.0)\n\n # cells\n ref_num_cells = [\n (\"hexahedron\", 45),\n (\"pyramid\", 9),\n (\"hexahedron\", 18),\n (\"wedge\", 9),\n (\"hexahedron\", 6),\n (\"wedge\", 3),\n (\"hexahedron\", 6),\n (\"wedge\", 3),\n (\"pyramid\", 6),\n (\"tetra\", 3),\n ]\n assert [(k, len(v)) for k, v in mesh.cells] == ref_num_cells\n # Cell data\n ref_sum_cell_data = [45, 9, 18, 9, 6, 3, 6, 3, 6, 3]\n assert [len(arr) for arr in mesh.cell_data[\"flac3d:zone\"]] == ref_sum_cell_data\n"
] | [
[
"numpy.array"
]
] |
aha66/xarray | [
"3cbd21aa8fd3a57c0dd324f2a276d83829518331"
] | [
"xarray/core/dataset.py"
] | [
"import copy\nimport datetime\nimport functools\nimport inspect\nimport sys\nimport warnings\nfrom collections import defaultdict\nfrom distutils.version import LooseVersion\nfrom html import escape\nfrom numbers import Number\nfrom operator import methodcaller\nfrom pathlib import Path\nfrom typing import (\n TYPE_CHECKING,\n Any,\n Callable,\n DefaultDict,\n Dict,\n Hashable,\n Iterable,\n Iterator,\n List,\n Mapping,\n MutableMapping,\n Optional,\n Sequence,\n Set,\n Tuple,\n TypeVar,\n Union,\n cast,\n overload,\n)\n\nimport numpy as np\nimport pandas as pd\n\nimport xarray as xr\n\nfrom ..coding.cftimeindex import _parse_array_of_cftime_strings\nfrom ..plot.dataset_plot import _Dataset_PlotMethods\nfrom . import (\n alignment,\n dtypes,\n duck_array_ops,\n formatting,\n formatting_html,\n groupby,\n ops,\n resample,\n rolling,\n utils,\n weighted,\n)\nfrom .alignment import _broadcast_helper, _get_broadcast_dims_map_common_coords, align\nfrom .common import (\n DataWithCoords,\n ImplementsDatasetReduce,\n _contains_datetime_like_objects,\n)\nfrom .coordinates import (\n DatasetCoordinates,\n assert_coordinate_consistent,\n remap_label_indexers,\n)\nfrom .duck_array_ops import datetime_to_numeric\nfrom .indexes import (\n Indexes,\n default_indexes,\n isel_variable_and_index,\n propagate_indexes,\n remove_unused_levels_categories,\n roll_index,\n)\nfrom .indexing import is_fancy_indexer\nfrom .merge import (\n dataset_merge_method,\n dataset_update_method,\n merge_coordinates_without_align,\n merge_data_and_coords,\n)\nfrom .missing import get_clean_interp_index\nfrom .options import OPTIONS, _get_keep_attrs\nfrom .pycompat import is_duck_dask_array, sparse_array_type\nfrom .utils import (\n Default,\n Frozen,\n HybridMappingProxy,\n SortedKeysDict,\n _default,\n decode_numpy_dict_values,\n drop_dims_from_indexers,\n either_dict_or_kwargs,\n hashable,\n infix_dims,\n is_dict_like,\n is_scalar,\n maybe_wrap_array,\n)\nfrom .variable import (\n IndexVariable,\n Variable,\n as_variable,\n assert_unique_multiindex_level_names,\n broadcast_variables,\n)\n\nif TYPE_CHECKING:\n from ..backends import AbstractDataStore, ZarrStore\n from .dataarray import DataArray\n from .merge import CoercibleMapping\n\n T_DSorDA = TypeVar(\"T_DSorDA\", DataArray, \"Dataset\")\n\n try:\n from dask.delayed import Delayed\n except ImportError:\n Delayed = None\n\n\n# list of attributes of pd.DatetimeIndex that are ndarrays of time info\n_DATETIMEINDEX_COMPONENTS = [\n \"year\",\n \"month\",\n \"day\",\n \"hour\",\n \"minute\",\n \"second\",\n \"microsecond\",\n \"nanosecond\",\n \"date\",\n \"time\",\n \"dayofyear\",\n \"weekofyear\",\n \"dayofweek\",\n \"quarter\",\n]\n\n\ndef _get_virtual_variable(\n variables, key: Hashable, level_vars: Mapping = None, dim_sizes: Mapping = None\n) -> Tuple[Hashable, Hashable, Variable]:\n \"\"\"Get a virtual variable (e.g., 'time.year' or a MultiIndex level)\n from a dict of xarray.Variable objects (if possible)\n \"\"\"\n if level_vars is None:\n level_vars = {}\n if dim_sizes is None:\n dim_sizes = {}\n\n if key in dim_sizes:\n data = pd.Index(range(dim_sizes[key]), name=key)\n variable = IndexVariable((key,), data)\n return key, key, variable\n\n if not isinstance(key, str):\n raise KeyError(key)\n\n split_key = key.split(\".\", 1)\n var_name: Optional[str]\n if len(split_key) == 2:\n ref_name, var_name = split_key\n elif len(split_key) == 1:\n ref_name, var_name = key, None\n else:\n raise KeyError(key)\n\n if ref_name in level_vars:\n dim_var = variables[level_vars[ref_name]]\n ref_var = dim_var.to_index_variable().get_level_variable(ref_name)\n else:\n ref_var = variables[ref_name]\n\n if var_name is None:\n virtual_var = ref_var\n var_name = key\n else:\n if _contains_datetime_like_objects(ref_var):\n ref_var = xr.DataArray(ref_var)\n data = getattr(ref_var.dt, var_name).data\n else:\n data = getattr(ref_var, var_name).data\n virtual_var = Variable(ref_var.dims, data)\n\n return ref_name, var_name, virtual_var\n\n\ndef calculate_dimensions(variables: Mapping[Hashable, Variable]) -> Dict[Hashable, int]:\n \"\"\"Calculate the dimensions corresponding to a set of variables.\n\n Returns dictionary mapping from dimension names to sizes. Raises ValueError\n if any of the dimension sizes conflict.\n \"\"\"\n dims: Dict[Hashable, int] = {}\n last_used = {}\n scalar_vars = {k for k, v in variables.items() if not v.dims}\n for k, var in variables.items():\n for dim, size in zip(var.dims, var.shape):\n if dim in scalar_vars:\n raise ValueError(\n \"dimension %r already exists as a scalar variable\" % dim\n )\n if dim not in dims:\n dims[dim] = size\n last_used[dim] = k\n elif dims[dim] != size:\n raise ValueError(\n \"conflicting sizes for dimension %r: \"\n \"length %s on %r and length %s on %r\"\n % (dim, size, k, dims[dim], last_used[dim])\n )\n return dims\n\n\ndef merge_indexes(\n indexes: Mapping[Hashable, Union[Hashable, Sequence[Hashable]]],\n variables: Mapping[Hashable, Variable],\n coord_names: Set[Hashable],\n append: bool = False,\n) -> Tuple[Dict[Hashable, Variable], Set[Hashable]]:\n \"\"\"Merge variables into multi-indexes.\n\n Not public API. Used in Dataset and DataArray set_index\n methods.\n \"\"\"\n vars_to_replace: Dict[Hashable, Variable] = {}\n vars_to_remove: List[Hashable] = []\n dims_to_replace: Dict[Hashable, Hashable] = {}\n error_msg = \"{} is not the name of an existing variable.\"\n\n for dim, var_names in indexes.items():\n if isinstance(var_names, str) or not isinstance(var_names, Sequence):\n var_names = [var_names]\n\n names: List[Hashable] = []\n codes: List[List[int]] = []\n levels: List[List[int]] = []\n current_index_variable = variables.get(dim)\n\n for n in var_names:\n try:\n var = variables[n]\n except KeyError:\n raise ValueError(error_msg.format(n))\n if (\n current_index_variable is not None\n and var.dims != current_index_variable.dims\n ):\n raise ValueError(\n \"dimension mismatch between %r %s and %r %s\"\n % (dim, current_index_variable.dims, n, var.dims)\n )\n\n if current_index_variable is not None and append:\n current_index = current_index_variable.to_index()\n if isinstance(current_index, pd.MultiIndex):\n names.extend(current_index.names)\n codes.extend(current_index.codes)\n levels.extend(current_index.levels)\n else:\n names.append(\"%s_level_0\" % dim)\n cat = pd.Categorical(current_index.values, ordered=True)\n codes.append(cat.codes)\n levels.append(cat.categories)\n\n if not len(names) and len(var_names) == 1:\n idx = pd.Index(variables[var_names[0]].values)\n\n else: # MultiIndex\n for n in var_names:\n try:\n var = variables[n]\n except KeyError:\n raise ValueError(error_msg.format(n))\n names.append(n)\n cat = pd.Categorical(var.values, ordered=True)\n codes.append(cat.codes)\n levels.append(cat.categories)\n\n idx = pd.MultiIndex(levels, codes, names=names)\n for n in names:\n dims_to_replace[n] = dim\n\n vars_to_replace[dim] = IndexVariable(dim, idx)\n vars_to_remove.extend(var_names)\n\n new_variables = {k: v for k, v in variables.items() if k not in vars_to_remove}\n new_variables.update(vars_to_replace)\n\n # update dimensions if necessary, GH: 3512\n for k, v in new_variables.items():\n if any(d in dims_to_replace for d in v.dims):\n new_dims = [dims_to_replace.get(d, d) for d in v.dims]\n new_variables[k] = v._replace(dims=new_dims)\n new_coord_names = coord_names | set(vars_to_replace)\n new_coord_names -= set(vars_to_remove)\n return new_variables, new_coord_names\n\n\ndef split_indexes(\n dims_or_levels: Union[Hashable, Sequence[Hashable]],\n variables: Mapping[Hashable, Variable],\n coord_names: Set[Hashable],\n level_coords: Mapping[Hashable, Hashable],\n drop: bool = False,\n) -> Tuple[Dict[Hashable, Variable], Set[Hashable]]:\n \"\"\"Extract (multi-)indexes (levels) as variables.\n\n Not public API. Used in Dataset and DataArray reset_index\n methods.\n \"\"\"\n if isinstance(dims_or_levels, str) or not isinstance(dims_or_levels, Sequence):\n dims_or_levels = [dims_or_levels]\n\n dim_levels: DefaultDict[Any, List[Hashable]] = defaultdict(list)\n dims = []\n for k in dims_or_levels:\n if k in level_coords:\n dim_levels[level_coords[k]].append(k)\n else:\n dims.append(k)\n\n vars_to_replace = {}\n vars_to_create: Dict[Hashable, Variable] = {}\n vars_to_remove = []\n\n for d in dims:\n index = variables[d].to_index()\n if isinstance(index, pd.MultiIndex):\n dim_levels[d] = index.names\n else:\n vars_to_remove.append(d)\n if not drop:\n vars_to_create[str(d) + \"_\"] = Variable(d, index, variables[d].attrs)\n\n for d, levs in dim_levels.items():\n index = variables[d].to_index()\n if len(levs) == index.nlevels:\n vars_to_remove.append(d)\n else:\n vars_to_replace[d] = IndexVariable(d, index.droplevel(levs))\n\n if not drop:\n for lev in levs:\n idx = index.get_level_values(lev)\n vars_to_create[idx.name] = Variable(d, idx, variables[d].attrs)\n\n new_variables = dict(variables)\n for v in set(vars_to_remove):\n del new_variables[v]\n new_variables.update(vars_to_replace)\n new_variables.update(vars_to_create)\n new_coord_names = (coord_names | set(vars_to_create)) - set(vars_to_remove)\n\n return new_variables, new_coord_names\n\n\ndef _assert_empty(args: tuple, msg: str = \"%s\") -> None:\n if args:\n raise ValueError(msg % args)\n\n\ndef _check_chunks_compatibility(var, chunks, preferred_chunks):\n for dim in var.dims:\n if dim not in chunks or (dim not in preferred_chunks):\n continue\n\n preferred_chunks_dim = preferred_chunks.get(dim)\n chunks_dim = chunks.get(dim)\n\n if isinstance(chunks_dim, int):\n chunks_dim = (chunks_dim,)\n else:\n chunks_dim = chunks_dim[:-1]\n\n if any(s % preferred_chunks_dim for s in chunks_dim):\n warnings.warn(\n f\"Specified Dask chunks {chunks[dim]} would separate \"\n f\"on disks chunk shape {preferred_chunks[dim]} for dimension {dim}. \"\n \"This could degrade performance. \"\n \"Consider rechunking after loading instead.\",\n stacklevel=2,\n )\n\n\ndef _get_chunk(var, chunks):\n # chunks need to be explicity computed to take correctly into accout\n # backend preferred chunking\n import dask.array as da\n\n if isinstance(var, IndexVariable):\n return {}\n\n if isinstance(chunks, int) or (chunks == \"auto\"):\n chunks = dict.fromkeys(var.dims, chunks)\n\n preferred_chunks = var.encoding.get(\"preferred_chunks\", {})\n preferred_chunks_list = [\n preferred_chunks.get(dim, shape) for dim, shape in zip(var.dims, var.shape)\n ]\n\n chunks_list = [\n chunks.get(dim, None) or preferred_chunks.get(dim, None) for dim in var.dims\n ]\n\n output_chunks_list = da.core.normalize_chunks(\n chunks_list,\n shape=var.shape,\n dtype=var.dtype,\n previous_chunks=preferred_chunks_list,\n )\n\n output_chunks = dict(zip(var.dims, output_chunks_list))\n _check_chunks_compatibility(var, output_chunks, preferred_chunks)\n\n return output_chunks\n\n\ndef _maybe_chunk(\n name,\n var,\n chunks,\n token=None,\n lock=None,\n name_prefix=\"xarray-\",\n overwrite_encoded_chunks=False,\n):\n from dask.base import tokenize\n\n if chunks is not None:\n chunks = {dim: chunks[dim] for dim in var.dims if dim in chunks}\n if var.ndim:\n # when rechunking by different amounts, make sure dask names change\n # by provinding chunks as an input to tokenize.\n # subtle bugs result otherwise. see GH3350\n token2 = tokenize(name, token if token else var._data, chunks)\n name2 = f\"{name_prefix}{name}-{token2}\"\n var = var.chunk(chunks, name=name2, lock=lock)\n\n if overwrite_encoded_chunks and var.chunks is not None:\n var.encoding[\"chunks\"] = tuple(x[0] for x in var.chunks)\n return var\n else:\n return var\n\n\ndef as_dataset(obj: Any) -> \"Dataset\":\n \"\"\"Cast the given object to a Dataset.\n\n Handles Datasets, DataArrays and dictionaries of variables. A new Dataset\n object is only created if the provided object is not already one.\n \"\"\"\n if hasattr(obj, \"to_dataset\"):\n obj = obj.to_dataset()\n if not isinstance(obj, Dataset):\n obj = Dataset(obj)\n return obj\n\n\ndef _get_func_args(func, param_names):\n \"\"\"Use `inspect.signature` to try accessing `func` args. Otherwise, ensure\n they are provided by user.\n \"\"\"\n try:\n func_args = inspect.signature(func).parameters\n except ValueError:\n func_args = {}\n if not param_names:\n raise ValueError(\n \"Unable to inspect `func` signature, and `param_names` was not provided.\"\n )\n if param_names:\n params = param_names\n else:\n params = list(func_args)[1:]\n if any(\n [(p.kind in [p.VAR_POSITIONAL, p.VAR_KEYWORD]) for p in func_args.values()]\n ):\n raise ValueError(\n \"`param_names` must be provided because `func` takes variable length arguments.\"\n )\n return params, func_args\n\n\ndef _initialize_curvefit_params(params, p0, bounds, func_args):\n \"\"\"Set initial guess and bounds for curvefit.\n Priority: 1) passed args 2) func signature 3) scipy defaults\n \"\"\"\n\n def _initialize_feasible(lb, ub):\n # Mimics functionality of scipy.optimize.minpack._initialize_feasible\n lb_finite = np.isfinite(lb)\n ub_finite = np.isfinite(ub)\n p0 = np.nansum(\n [\n 0.5 * (lb + ub) * int(lb_finite & ub_finite),\n (lb + 1) * int(lb_finite & ~ub_finite),\n (ub - 1) * int(~lb_finite & ub_finite),\n ]\n )\n return p0\n\n param_defaults = {p: 1 for p in params}\n bounds_defaults = {p: (-np.inf, np.inf) for p in params}\n for p in params:\n if p in func_args and func_args[p].default is not func_args[p].empty:\n param_defaults[p] = func_args[p].default\n if p in bounds:\n bounds_defaults[p] = tuple(bounds[p])\n if param_defaults[p] < bounds[p][0] or param_defaults[p] > bounds[p][1]:\n param_defaults[p] = _initialize_feasible(bounds[p][0], bounds[p][1])\n if p in p0:\n param_defaults[p] = p0[p]\n return param_defaults, bounds_defaults\n\n\nclass DataVariables(Mapping[Hashable, \"DataArray\"]):\n __slots__ = (\"_dataset\",)\n\n def __init__(self, dataset: \"Dataset\"):\n self._dataset = dataset\n\n def __iter__(self) -> Iterator[Hashable]:\n return (\n key\n for key in self._dataset._variables\n if key not in self._dataset._coord_names\n )\n\n def __len__(self) -> int:\n return len(self._dataset._variables) - len(self._dataset._coord_names)\n\n def __contains__(self, key: Hashable) -> bool:\n return key in self._dataset._variables and key not in self._dataset._coord_names\n\n def __getitem__(self, key: Hashable) -> \"DataArray\":\n if key not in self._dataset._coord_names:\n return cast(\"DataArray\", self._dataset[key])\n raise KeyError(key)\n\n def __repr__(self) -> str:\n return formatting.data_vars_repr(self)\n\n @property\n def variables(self) -> Mapping[Hashable, Variable]:\n all_variables = self._dataset.variables\n return Frozen({k: all_variables[k] for k in self})\n\n def _ipython_key_completions_(self):\n \"\"\"Provide method for the key-autocompletions in IPython. \"\"\"\n return [\n key\n for key in self._dataset._ipython_key_completions_()\n if key not in self._dataset._coord_names\n ]\n\n\nclass _LocIndexer:\n __slots__ = (\"dataset\",)\n\n def __init__(self, dataset: \"Dataset\"):\n self.dataset = dataset\n\n def __getitem__(self, key: Mapping[Hashable, Any]) -> \"Dataset\":\n if not utils.is_dict_like(key):\n raise TypeError(\"can only lookup dictionaries from Dataset.loc\")\n return self.dataset.sel(key)\n\n\nclass Dataset(Mapping, ImplementsDatasetReduce, DataWithCoords):\n \"\"\"A multi-dimensional, in memory, array database.\n\n A dataset resembles an in-memory representation of a NetCDF file,\n and consists of variables, coordinates and attributes which\n together form a self describing dataset.\n\n Dataset implements the mapping interface with keys given by variable\n names and values given by DataArray objects for each variable name.\n\n One dimensional variables with name equal to their dimension are\n index coordinates used for label based indexing.\n\n To load data from a file or file-like object, use the `open_dataset`\n function.\n\n Parameters\n ----------\n data_vars : dict-like, optional\n A mapping from variable names to :py:class:`~xarray.DataArray`\n objects, :py:class:`~xarray.Variable` objects or to tuples of\n the form ``(dims, data[, attrs])`` which can be used as\n arguments to create a new ``Variable``. Each dimension must\n have the same length in all variables in which it appears.\n\n The following notations are accepted:\n\n - mapping {var name: DataArray}\n - mapping {var name: Variable}\n - mapping {var name: (dimension name, array-like)}\n - mapping {var name: (tuple of dimension names, array-like)}\n - mapping {dimension name: array-like}\n (it will be automatically moved to coords, see below)\n\n Each dimension must have the same length in all variables in\n which it appears.\n coords : dict-like, optional\n Another mapping in similar form as the `data_vars` argument,\n except the each item is saved on the dataset as a \"coordinate\".\n These variables have an associated meaning: they describe\n constant/fixed/independent quantities, unlike the\n varying/measured/dependent quantities that belong in\n `variables`. Coordinates values may be given by 1-dimensional\n arrays or scalars, in which case `dims` do not need to be\n supplied: 1D arrays will be assumed to give index values along\n the dimension with the same name.\n\n The following notations are accepted:\n\n - mapping {coord name: DataArray}\n - mapping {coord name: Variable}\n - mapping {coord name: (dimension name, array-like)}\n - mapping {coord name: (tuple of dimension names, array-like)}\n - mapping {dimension name: array-like}\n (the dimension name is implicitly set to be the same as the\n coord name)\n\n The last notation implies that the coord name is the same as\n the dimension name.\n\n attrs : dict-like, optional\n Global attributes to save on this dataset.\n\n Examples\n --------\n Create data:\n\n >>> np.random.seed(0)\n >>> temperature = 15 + 8 * np.random.randn(2, 2, 3)\n >>> precipitation = 10 * np.random.rand(2, 2, 3)\n >>> lon = [[-99.83, -99.32], [-99.79, -99.23]]\n >>> lat = [[42.25, 42.21], [42.63, 42.59]]\n >>> time = pd.date_range(\"2014-09-06\", periods=3)\n >>> reference_time = pd.Timestamp(\"2014-09-05\")\n\n Initialize a dataset with multiple dimensions:\n\n >>> ds = xr.Dataset(\n ... data_vars=dict(\n ... temperature=([\"x\", \"y\", \"time\"], temperature),\n ... precipitation=([\"x\", \"y\", \"time\"], precipitation),\n ... ),\n ... coords=dict(\n ... lon=([\"x\", \"y\"], lon),\n ... lat=([\"x\", \"y\"], lat),\n ... time=time,\n ... reference_time=reference_time,\n ... ),\n ... attrs=dict(description=\"Weather related data.\"),\n ... )\n >>> ds\n <xarray.Dataset>\n Dimensions: (time: 3, x: 2, y: 2)\n Coordinates:\n lon (x, y) float64 -99.83 -99.32 -99.79 -99.23\n lat (x, y) float64 42.25 42.21 42.63 42.59\n * time (time) datetime64[ns] 2014-09-06 2014-09-07 2014-09-08\n reference_time datetime64[ns] 2014-09-05\n Dimensions without coordinates: x, y\n Data variables:\n temperature (x, y, time) float64 29.11 18.2 22.83 ... 18.28 16.15 26.63\n precipitation (x, y, time) float64 5.68 9.256 0.7104 ... 7.992 4.615 7.805\n Attributes:\n description: Weather related data.\n\n Find out where the coldest temperature was and what values the\n other variables had:\n\n >>> ds.isel(ds.temperature.argmin(...))\n <xarray.Dataset>\n Dimensions: ()\n Coordinates:\n lon float64 -99.32\n lat float64 42.21\n time datetime64[ns] 2014-09-08\n reference_time datetime64[ns] 2014-09-05\n Data variables:\n temperature float64 7.182\n precipitation float64 8.326\n Attributes:\n description: Weather related data.\n \"\"\"\n\n _attrs: Optional[Dict[Hashable, Any]]\n _cache: Dict[str, Any]\n _coord_names: Set[Hashable]\n _dims: Dict[Hashable, int]\n _encoding: Optional[Dict[Hashable, Any]]\n _close: Optional[Callable[[], None]]\n _indexes: Optional[Dict[Hashable, pd.Index]]\n _variables: Dict[Hashable, Variable]\n\n __slots__ = (\n \"_attrs\",\n \"_cache\",\n \"_coord_names\",\n \"_dims\",\n \"_encoding\",\n \"_close\",\n \"_indexes\",\n \"_variables\",\n \"__weakref__\",\n )\n\n _groupby_cls = groupby.DatasetGroupBy\n _rolling_cls = rolling.DatasetRolling\n _coarsen_cls = rolling.DatasetCoarsen\n _resample_cls = resample.DatasetResample\n _weighted_cls = weighted.DatasetWeighted\n\n def __init__(\n self,\n # could make a VariableArgs to use more generally, and refine these\n # categories\n data_vars: Mapping[Hashable, Any] = None,\n coords: Mapping[Hashable, Any] = None,\n attrs: Mapping[Hashable, Any] = None,\n ):\n # TODO(shoyer): expose indexes as a public argument in __init__\n\n if data_vars is None:\n data_vars = {}\n if coords is None:\n coords = {}\n\n both_data_and_coords = set(data_vars) & set(coords)\n if both_data_and_coords:\n raise ValueError(\n \"variables %r are found in both data_vars and coords\"\n % both_data_and_coords\n )\n\n if isinstance(coords, Dataset):\n coords = coords.variables\n\n variables, coord_names, dims, indexes, _ = merge_data_and_coords(\n data_vars, coords, compat=\"broadcast_equals\"\n )\n\n self._attrs = dict(attrs) if attrs is not None else None\n self._close = None\n self._encoding = None\n self._variables = variables\n self._coord_names = coord_names\n self._dims = dims\n self._indexes = indexes\n\n @classmethod\n def load_store(cls, store, decoder=None) -> \"Dataset\":\n \"\"\"Create a new dataset from the contents of a backends.*DataStore\n object\n \"\"\"\n variables, attributes = store.load()\n if decoder:\n variables, attributes = decoder(variables, attributes)\n obj = cls(variables, attrs=attributes)\n obj.set_close(store.close)\n return obj\n\n @property\n def variables(self) -> Mapping[Hashable, Variable]:\n \"\"\"Low level interface to Dataset contents as dict of Variable objects.\n\n This ordered dictionary is frozen to prevent mutation that could\n violate Dataset invariants. It contains all variable objects\n constituting the Dataset, including both data variables and\n coordinates.\n \"\"\"\n return Frozen(self._variables)\n\n @property\n def attrs(self) -> Dict[Hashable, Any]:\n \"\"\"Dictionary of global attributes on this dataset\"\"\"\n if self._attrs is None:\n self._attrs = {}\n return self._attrs\n\n @attrs.setter\n def attrs(self, value: Mapping[Hashable, Any]) -> None:\n self._attrs = dict(value)\n\n @property\n def encoding(self) -> Dict:\n \"\"\"Dictionary of global encoding attributes on this dataset\"\"\"\n if self._encoding is None:\n self._encoding = {}\n return self._encoding\n\n @encoding.setter\n def encoding(self, value: Mapping) -> None:\n self._encoding = dict(value)\n\n @property\n def dims(self) -> Mapping[Hashable, int]:\n \"\"\"Mapping from dimension names to lengths.\n\n Cannot be modified directly, but is updated when adding new variables.\n\n Note that type of this object differs from `DataArray.dims`.\n See `Dataset.sizes` and `DataArray.sizes` for consistently named\n properties.\n \"\"\"\n return Frozen(SortedKeysDict(self._dims))\n\n @property\n def sizes(self) -> Mapping[Hashable, int]:\n \"\"\"Mapping from dimension names to lengths.\n\n Cannot be modified directly, but is updated when adding new variables.\n\n This is an alias for `Dataset.dims` provided for the benefit of\n consistency with `DataArray.sizes`.\n\n See Also\n --------\n DataArray.sizes\n \"\"\"\n return self.dims\n\n def load(self, **kwargs) -> \"Dataset\":\n \"\"\"Manually trigger loading and/or computation of this dataset's data\n from disk or a remote source into memory and return this dataset.\n Unlike compute, the original dataset is modified and returned.\n\n Normally, it should not be necessary to call this method in user code,\n because all xarray functions should either work on deferred data or\n load data automatically. However, this method can be necessary when\n working with many file objects on disk.\n\n Parameters\n ----------\n **kwargs : dict\n Additional keyword arguments passed on to ``dask.compute``.\n\n See Also\n --------\n dask.compute\n \"\"\"\n # access .data to coerce everything to numpy or dask arrays\n lazy_data = {\n k: v._data for k, v in self.variables.items() if is_duck_dask_array(v._data)\n }\n if lazy_data:\n import dask.array as da\n\n # evaluate all the dask arrays simultaneously\n evaluated_data = da.compute(*lazy_data.values(), **kwargs)\n\n for k, data in zip(lazy_data, evaluated_data):\n self.variables[k].data = data\n\n # load everything else sequentially\n for k, v in self.variables.items():\n if k not in lazy_data:\n v.load()\n\n return self\n\n def __dask_tokenize__(self):\n from dask.base import normalize_token\n\n return normalize_token(\n (type(self), self._variables, self._coord_names, self._attrs)\n )\n\n def __dask_graph__(self):\n graphs = {k: v.__dask_graph__() for k, v in self.variables.items()}\n graphs = {k: v for k, v in graphs.items() if v is not None}\n if not graphs:\n return None\n else:\n try:\n from dask.highlevelgraph import HighLevelGraph\n\n return HighLevelGraph.merge(*graphs.values())\n except ImportError:\n from dask import sharedict\n\n return sharedict.merge(*graphs.values())\n\n def __dask_keys__(self):\n import dask\n\n return [\n v.__dask_keys__()\n for v in self.variables.values()\n if dask.is_dask_collection(v)\n ]\n\n def __dask_layers__(self):\n import dask\n\n return sum(\n [\n v.__dask_layers__()\n for v in self.variables.values()\n if dask.is_dask_collection(v)\n ],\n (),\n )\n\n @property\n def __dask_optimize__(self):\n import dask.array as da\n\n return da.Array.__dask_optimize__\n\n @property\n def __dask_scheduler__(self):\n import dask.array as da\n\n return da.Array.__dask_scheduler__\n\n def __dask_postcompute__(self):\n return self._dask_postcompute, ()\n\n def __dask_postpersist__(self):\n return self._dask_postpersist, ()\n\n def _dask_postcompute(self, results: \"Iterable[Variable]\") -> \"Dataset\":\n import dask\n\n variables = {}\n results_iter = iter(results)\n\n for k, v in self._variables.items():\n if dask.is_dask_collection(v):\n rebuild, args = v.__dask_postcompute__()\n v = rebuild(next(results_iter), *args)\n variables[k] = v\n\n return Dataset._construct_direct(\n variables,\n self._coord_names,\n self._dims,\n self._attrs,\n self._indexes,\n self._encoding,\n self._close,\n )\n\n def _dask_postpersist(\n self, dsk: Mapping, *, rename: Mapping[str, str] = None\n ) -> \"Dataset\":\n from dask import is_dask_collection\n from dask.highlevelgraph import HighLevelGraph\n from dask.optimization import cull\n\n variables = {}\n\n for k, v in self._variables.items():\n if not is_dask_collection(v):\n variables[k] = v\n continue\n\n if isinstance(dsk, HighLevelGraph):\n # dask >= 2021.3\n # __dask_postpersist__() was called by dask.highlevelgraph.\n # Don't use dsk.cull(), as we need to prevent partial layers:\n # https://github.com/dask/dask/issues/7137\n layers = v.__dask_layers__()\n if rename:\n layers = [rename.get(k, k) for k in layers]\n dsk2 = dsk.cull_layers(layers)\n elif rename: # pragma: nocover\n # At the moment of writing, this is only for forward compatibility.\n # replace_name_in_key requires dask >= 2021.3.\n from dask.base import flatten, replace_name_in_key\n\n keys = [\n replace_name_in_key(k, rename) for k in flatten(v.__dask_keys__())\n ]\n dsk2, _ = cull(dsk, keys)\n else:\n # __dask_postpersist__() was called by dask.optimize or dask.persist\n dsk2, _ = cull(dsk, v.__dask_keys__())\n\n rebuild, args = v.__dask_postpersist__()\n # rename was added in dask 2021.3\n kwargs = {\"rename\": rename} if rename else {}\n variables[k] = rebuild(dsk2, *args, **kwargs)\n\n return Dataset._construct_direct(\n variables,\n self._coord_names,\n self._dims,\n self._attrs,\n self._indexes,\n self._encoding,\n self._close,\n )\n\n def compute(self, **kwargs) -> \"Dataset\":\n \"\"\"Manually trigger loading and/or computation of this dataset's data\n from disk or a remote source into memory and return a new dataset.\n Unlike load, the original dataset is left unaltered.\n\n Normally, it should not be necessary to call this method in user code,\n because all xarray functions should either work on deferred data or\n load data automatically. However, this method can be necessary when\n working with many file objects on disk.\n\n Parameters\n ----------\n **kwargs : dict\n Additional keyword arguments passed on to ``dask.compute``.\n\n See Also\n --------\n dask.compute\n \"\"\"\n new = self.copy(deep=False)\n return new.load(**kwargs)\n\n def _persist_inplace(self, **kwargs) -> \"Dataset\":\n \"\"\"Persist all Dask arrays in memory\"\"\"\n # access .data to coerce everything to numpy or dask arrays\n lazy_data = {\n k: v._data for k, v in self.variables.items() if is_duck_dask_array(v._data)\n }\n if lazy_data:\n import dask\n\n # evaluate all the dask arrays simultaneously\n evaluated_data = dask.persist(*lazy_data.values(), **kwargs)\n\n for k, data in zip(lazy_data, evaluated_data):\n self.variables[k].data = data\n\n return self\n\n def persist(self, **kwargs) -> \"Dataset\":\n \"\"\"Trigger computation, keeping data as dask arrays\n\n This operation can be used to trigger computation on underlying dask\n arrays, similar to ``.compute()`` or ``.load()``. However this\n operation keeps the data as dask arrays. This is particularly useful\n when using the dask.distributed scheduler and you want to load a large\n amount of data into distributed memory.\n\n Parameters\n ----------\n **kwargs : dict\n Additional keyword arguments passed on to ``dask.persist``.\n\n See Also\n --------\n dask.persist\n \"\"\"\n new = self.copy(deep=False)\n return new._persist_inplace(**kwargs)\n\n @classmethod\n def _construct_direct(\n cls,\n variables,\n coord_names,\n dims=None,\n attrs=None,\n indexes=None,\n encoding=None,\n close=None,\n ):\n \"\"\"Shortcut around __init__ for internal use when we want to skip\n costly validation\n \"\"\"\n if dims is None:\n dims = calculate_dimensions(variables)\n obj = object.__new__(cls)\n obj._variables = variables\n obj._coord_names = coord_names\n obj._dims = dims\n obj._indexes = indexes\n obj._attrs = attrs\n obj._close = close\n obj._encoding = encoding\n return obj\n\n def _replace(\n self,\n variables: Dict[Hashable, Variable] = None,\n coord_names: Set[Hashable] = None,\n dims: Dict[Any, int] = None,\n attrs: Union[Dict[Hashable, Any], None, Default] = _default,\n indexes: Union[Dict[Any, pd.Index], None, Default] = _default,\n encoding: Union[dict, None, Default] = _default,\n inplace: bool = False,\n ) -> \"Dataset\":\n \"\"\"Fastpath constructor for internal use.\n\n Returns an object with optionally with replaced attributes.\n\n Explicitly passed arguments are *not* copied when placed on the new\n dataset. It is up to the caller to ensure that they have the right type\n and are not used elsewhere.\n \"\"\"\n if inplace:\n if variables is not None:\n self._variables = variables\n if coord_names is not None:\n self._coord_names = coord_names\n if dims is not None:\n self._dims = dims\n if attrs is not _default:\n self._attrs = attrs\n if indexes is not _default:\n self._indexes = indexes\n if encoding is not _default:\n self._encoding = encoding\n obj = self\n else:\n if variables is None:\n variables = self._variables.copy()\n if coord_names is None:\n coord_names = self._coord_names.copy()\n if dims is None:\n dims = self._dims.copy()\n if attrs is _default:\n attrs = copy.copy(self._attrs)\n if indexes is _default:\n indexes = copy.copy(self._indexes)\n if encoding is _default:\n encoding = copy.copy(self._encoding)\n obj = self._construct_direct(\n variables, coord_names, dims, attrs, indexes, encoding\n )\n return obj\n\n def _replace_with_new_dims(\n self,\n variables: Dict[Hashable, Variable],\n coord_names: set = None,\n attrs: Union[Dict[Hashable, Any], None, Default] = _default,\n indexes: Union[Dict[Hashable, pd.Index], None, Default] = _default,\n inplace: bool = False,\n ) -> \"Dataset\":\n \"\"\"Replace variables with recalculated dimensions.\"\"\"\n dims = calculate_dimensions(variables)\n return self._replace(\n variables, coord_names, dims, attrs, indexes, inplace=inplace\n )\n\n def _replace_vars_and_dims(\n self,\n variables: Dict[Hashable, Variable],\n coord_names: set = None,\n dims: Dict[Hashable, int] = None,\n attrs: Union[Dict[Hashable, Any], None, Default] = _default,\n inplace: bool = False,\n ) -> \"Dataset\":\n \"\"\"Deprecated version of _replace_with_new_dims().\n\n Unlike _replace_with_new_dims(), this method always recalculates\n indexes from variables.\n \"\"\"\n if dims is None:\n dims = calculate_dimensions(variables)\n return self._replace(\n variables, coord_names, dims, attrs, indexes=None, inplace=inplace\n )\n\n def _overwrite_indexes(self, indexes: Mapping[Any, pd.Index]) -> \"Dataset\":\n if not indexes:\n return self\n\n variables = self._variables.copy()\n new_indexes = dict(self.indexes)\n for name, idx in indexes.items():\n variables[name] = IndexVariable(name, idx)\n new_indexes[name] = idx\n obj = self._replace(variables, indexes=new_indexes)\n\n # switch from dimension to level names, if necessary\n dim_names: Dict[Hashable, str] = {}\n for dim, idx in indexes.items():\n if not isinstance(idx, pd.MultiIndex) and idx.name != dim:\n dim_names[dim] = idx.name\n if dim_names:\n obj = obj.rename(dim_names)\n return obj\n\n def copy(self, deep: bool = False, data: Mapping = None) -> \"Dataset\":\n \"\"\"Returns a copy of this dataset.\n\n If `deep=True`, a deep copy is made of each of the component variables.\n Otherwise, a shallow copy of each of the component variable is made, so\n that the underlying memory region of the new dataset is the same as in\n the original dataset.\n\n Use `data` to create a new object with the same structure as\n original but entirely new data.\n\n Parameters\n ----------\n deep : bool, optional\n Whether each component variable is loaded into memory and copied onto\n the new object. Default is False.\n data : dict-like, optional\n Data to use in the new object. Each item in `data` must have same\n shape as corresponding data variable in original. When `data` is\n used, `deep` is ignored for the data variables and only used for\n coords.\n\n Returns\n -------\n object : Dataset\n New object with dimensions, attributes, coordinates, name, encoding,\n and optionally data copied from original.\n\n Examples\n --------\n Shallow copy versus deep copy\n\n >>> da = xr.DataArray(np.random.randn(2, 3))\n >>> ds = xr.Dataset(\n ... {\"foo\": da, \"bar\": (\"x\", [-1, 2])},\n ... coords={\"x\": [\"one\", \"two\"]},\n ... )\n >>> ds.copy()\n <xarray.Dataset>\n Dimensions: (dim_0: 2, dim_1: 3, x: 2)\n Coordinates:\n * x (x) <U3 'one' 'two'\n Dimensions without coordinates: dim_0, dim_1\n Data variables:\n foo (dim_0, dim_1) float64 1.764 0.4002 0.9787 2.241 1.868 -0.9773\n bar (x) int64 -1 2\n\n >>> ds_0 = ds.copy(deep=False)\n >>> ds_0[\"foo\"][0, 0] = 7\n >>> ds_0\n <xarray.Dataset>\n Dimensions: (dim_0: 2, dim_1: 3, x: 2)\n Coordinates:\n * x (x) <U3 'one' 'two'\n Dimensions without coordinates: dim_0, dim_1\n Data variables:\n foo (dim_0, dim_1) float64 7.0 0.4002 0.9787 2.241 1.868 -0.9773\n bar (x) int64 -1 2\n\n >>> ds\n <xarray.Dataset>\n Dimensions: (dim_0: 2, dim_1: 3, x: 2)\n Coordinates:\n * x (x) <U3 'one' 'two'\n Dimensions without coordinates: dim_0, dim_1\n Data variables:\n foo (dim_0, dim_1) float64 7.0 0.4002 0.9787 2.241 1.868 -0.9773\n bar (x) int64 -1 2\n\n Changing the data using the ``data`` argument maintains the\n structure of the original object, but with the new data. Original\n object is unaffected.\n\n >>> ds.copy(data={\"foo\": np.arange(6).reshape(2, 3), \"bar\": [\"a\", \"b\"]})\n <xarray.Dataset>\n Dimensions: (dim_0: 2, dim_1: 3, x: 2)\n Coordinates:\n * x (x) <U3 'one' 'two'\n Dimensions without coordinates: dim_0, dim_1\n Data variables:\n foo (dim_0, dim_1) int64 0 1 2 3 4 5\n bar (x) <U1 'a' 'b'\n\n >>> ds\n <xarray.Dataset>\n Dimensions: (dim_0: 2, dim_1: 3, x: 2)\n Coordinates:\n * x (x) <U3 'one' 'two'\n Dimensions without coordinates: dim_0, dim_1\n Data variables:\n foo (dim_0, dim_1) float64 7.0 0.4002 0.9787 2.241 1.868 -0.9773\n bar (x) int64 -1 2\n\n See Also\n --------\n pandas.DataFrame.copy\n \"\"\"\n if data is None:\n variables = {k: v.copy(deep=deep) for k, v in self._variables.items()}\n elif not utils.is_dict_like(data):\n raise ValueError(\"Data must be dict-like\")\n else:\n var_keys = set(self.data_vars.keys())\n data_keys = set(data.keys())\n keys_not_in_vars = data_keys - var_keys\n if keys_not_in_vars:\n raise ValueError(\n \"Data must only contain variables in original \"\n \"dataset. Extra variables: {}\".format(keys_not_in_vars)\n )\n keys_missing_from_data = var_keys - data_keys\n if keys_missing_from_data:\n raise ValueError(\n \"Data must contain all variables in original \"\n \"dataset. Data is missing {}\".format(keys_missing_from_data)\n )\n variables = {\n k: v.copy(deep=deep, data=data.get(k))\n for k, v in self._variables.items()\n }\n\n attrs = copy.deepcopy(self._attrs) if deep else copy.copy(self._attrs)\n\n return self._replace(variables, attrs=attrs)\n\n @property\n def _level_coords(self) -> Dict[str, Hashable]:\n \"\"\"Return a mapping of all MultiIndex levels and their corresponding\n coordinate name.\n \"\"\"\n level_coords: Dict[str, Hashable] = {}\n for name, index in self.indexes.items():\n if isinstance(index, pd.MultiIndex):\n level_names = index.names\n (dim,) = self.variables[name].dims\n level_coords.update({lname: dim for lname in level_names})\n return level_coords\n\n def _copy_listed(self, names: Iterable[Hashable]) -> \"Dataset\":\n \"\"\"Create a new Dataset with the listed variables from this dataset and\n the all relevant coordinates. Skips all validation.\n \"\"\"\n variables: Dict[Hashable, Variable] = {}\n coord_names = set()\n indexes: Dict[Hashable, pd.Index] = {}\n\n for name in names:\n try:\n variables[name] = self._variables[name]\n except KeyError:\n ref_name, var_name, var = _get_virtual_variable(\n self._variables, name, self._level_coords, self.dims\n )\n variables[var_name] = var\n if ref_name in self._coord_names or ref_name in self.dims:\n coord_names.add(var_name)\n if (var_name,) == var.dims:\n indexes[var_name] = var.to_index()\n\n needed_dims: Set[Hashable] = set()\n for v in variables.values():\n needed_dims.update(v.dims)\n\n dims = {k: self.dims[k] for k in needed_dims}\n\n # preserves ordering of coordinates\n for k in self._variables:\n if k not in self._coord_names:\n continue\n\n if set(self.variables[k].dims) <= needed_dims:\n variables[k] = self._variables[k]\n coord_names.add(k)\n if k in self.indexes:\n indexes[k] = self.indexes[k]\n\n return self._replace(variables, coord_names, dims, indexes=indexes)\n\n def _construct_dataarray(self, name: Hashable) -> \"DataArray\":\n \"\"\"Construct a DataArray by indexing this dataset\"\"\"\n from .dataarray import DataArray\n\n try:\n variable = self._variables[name]\n except KeyError:\n _, name, variable = _get_virtual_variable(\n self._variables, name, self._level_coords, self.dims\n )\n\n needed_dims = set(variable.dims)\n\n coords: Dict[Hashable, Variable] = {}\n # preserve ordering\n for k in self._variables:\n if k in self._coord_names and set(self.variables[k].dims) <= needed_dims:\n coords[k] = self.variables[k]\n\n if self._indexes is None:\n indexes = None\n else:\n indexes = {k: v for k, v in self._indexes.items() if k in coords}\n\n return DataArray(variable, coords, name=name, indexes=indexes, fastpath=True)\n\n def __copy__(self) -> \"Dataset\":\n return self.copy(deep=False)\n\n def __deepcopy__(self, memo=None) -> \"Dataset\":\n # memo does nothing but is required for compatibility with\n # copy.deepcopy\n return self.copy(deep=True)\n\n @property\n def _attr_sources(self) -> Iterable[Mapping[Hashable, Any]]:\n \"\"\"Places to look-up items for attribute-style access\"\"\"\n yield from self._item_sources\n yield self.attrs\n\n @property\n def _item_sources(self) -> Iterable[Mapping[Hashable, Any]]:\n \"\"\"Places to look-up items for key-completion\"\"\"\n yield self.data_vars\n yield HybridMappingProxy(keys=self._coord_names, mapping=self.coords)\n\n # virtual coordinates\n yield HybridMappingProxy(keys=self.dims, mapping=self)\n\n # uses empty dict -- everything here can already be found in self.coords.\n yield HybridMappingProxy(keys=self._level_coords, mapping={})\n\n def __contains__(self, key: object) -> bool:\n \"\"\"The 'in' operator will return true or false depending on whether\n 'key' is an array in the dataset or not.\n \"\"\"\n return key in self._variables\n\n def __len__(self) -> int:\n return len(self.data_vars)\n\n def __bool__(self) -> bool:\n return bool(self.data_vars)\n\n def __iter__(self) -> Iterator[Hashable]:\n return iter(self.data_vars)\n\n def __array__(self, dtype=None):\n raise TypeError(\n \"cannot directly convert an xarray.Dataset into a \"\n \"numpy array. Instead, create an xarray.DataArray \"\n \"first, either with indexing on the Dataset or by \"\n \"invoking the `to_array()` method.\"\n )\n\n @property\n def nbytes(self) -> int:\n return sum(v.nbytes for v in self.variables.values())\n\n @property\n def loc(self) -> _LocIndexer:\n \"\"\"Attribute for location based indexing. Only supports __getitem__,\n and only when the key is a dict of the form {dim: labels}.\n \"\"\"\n return _LocIndexer(self)\n\n # FIXME https://github.com/python/mypy/issues/7328\n @overload\n def __getitem__(self, key: Mapping) -> \"Dataset\": # type: ignore[misc]\n ...\n\n @overload\n def __getitem__(self, key: Hashable) -> \"DataArray\": # type: ignore[misc]\n ...\n\n @overload\n def __getitem__(self, key: Any) -> \"Dataset\":\n ...\n\n def __getitem__(self, key):\n \"\"\"Access variables or coordinates this dataset as a\n :py:class:`~xarray.DataArray`.\n\n Indexing with a list of names will return a new ``Dataset`` object.\n \"\"\"\n if utils.is_dict_like(key):\n return self.isel(**cast(Mapping, key))\n\n if hashable(key):\n return self._construct_dataarray(key)\n else:\n return self._copy_listed(np.asarray(key))\n\n def __setitem__(self, key: Hashable, value) -> None:\n \"\"\"Add an array to this dataset.\n\n If value is a `DataArray`, call its `select_vars()` method, rename it\n to `key` and merge the contents of the resulting dataset into this\n dataset.\n\n If value is an `Variable` object (or tuple of form\n ``(dims, data[, attrs])``), add it to this dataset as a new\n variable.\n \"\"\"\n if utils.is_dict_like(key):\n raise NotImplementedError(\n \"cannot yet use a dictionary as a key to set Dataset values\"\n )\n\n self.update({key: value})\n\n def __delitem__(self, key: Hashable) -> None:\n \"\"\"Remove a variable from this dataset.\"\"\"\n del self._variables[key]\n self._coord_names.discard(key)\n if key in self.indexes:\n assert self._indexes is not None\n del self._indexes[key]\n self._dims = calculate_dimensions(self._variables)\n\n # mutable objects should not be hashable\n # https://github.com/python/mypy/issues/4266\n __hash__ = None # type: ignore[assignment]\n\n def _all_compat(self, other: \"Dataset\", compat_str: str) -> bool:\n \"\"\"Helper function for equals and identical\"\"\"\n\n # some stores (e.g., scipy) do not seem to preserve order, so don't\n # require matching order for equality\n def compat(x: Variable, y: Variable) -> bool:\n return getattr(x, compat_str)(y)\n\n return self._coord_names == other._coord_names and utils.dict_equiv(\n self._variables, other._variables, compat=compat\n )\n\n def broadcast_equals(self, other: \"Dataset\") -> bool:\n \"\"\"Two Datasets are broadcast equal if they are equal after\n broadcasting all variables against each other.\n\n For example, variables that are scalar in one dataset but non-scalar in\n the other dataset can still be broadcast equal if the the non-scalar\n variable is a constant.\n\n See Also\n --------\n Dataset.equals\n Dataset.identical\n \"\"\"\n try:\n return self._all_compat(other, \"broadcast_equals\")\n except (TypeError, AttributeError):\n return False\n\n def equals(self, other: \"Dataset\") -> bool:\n \"\"\"Two Datasets are equal if they have matching variables and\n coordinates, all of which are equal.\n\n Datasets can still be equal (like pandas objects) if they have NaN\n values in the same locations.\n\n This method is necessary because `v1 == v2` for ``Dataset``\n does element-wise comparisons (like numpy.ndarrays).\n\n See Also\n --------\n Dataset.broadcast_equals\n Dataset.identical\n \"\"\"\n try:\n return self._all_compat(other, \"equals\")\n except (TypeError, AttributeError):\n return False\n\n def identical(self, other: \"Dataset\") -> bool:\n \"\"\"Like equals, but also checks all dataset attributes and the\n attributes on all variables and coordinates.\n\n See Also\n --------\n Dataset.broadcast_equals\n Dataset.equals\n \"\"\"\n try:\n return utils.dict_equiv(self.attrs, other.attrs) and self._all_compat(\n other, \"identical\"\n )\n except (TypeError, AttributeError):\n return False\n\n @property\n def indexes(self) -> Indexes:\n \"\"\"Mapping of pandas.Index objects used for label based indexing\"\"\"\n if self._indexes is None:\n self._indexes = default_indexes(self._variables, self._dims)\n return Indexes(self._indexes)\n\n @property\n def coords(self) -> DatasetCoordinates:\n \"\"\"Dictionary of xarray.DataArray objects corresponding to coordinate\n variables\n \"\"\"\n return DatasetCoordinates(self)\n\n @property\n def data_vars(self) -> DataVariables:\n \"\"\"Dictionary of DataArray objects corresponding to data variables\"\"\"\n return DataVariables(self)\n\n def set_coords(self, names: \"Union[Hashable, Iterable[Hashable]]\") -> \"Dataset\":\n \"\"\"Given names of one or more variables, set them as coordinates\n\n Parameters\n ----------\n names : hashable or iterable of hashable\n Name(s) of variables in this dataset to convert into coordinates.\n\n Returns\n -------\n Dataset\n\n See Also\n --------\n Dataset.swap_dims\n \"\"\"\n # TODO: allow inserting new coordinates with this method, like\n # DataFrame.set_index?\n # nb. check in self._variables, not self.data_vars to insure that the\n # operation is idempotent\n if isinstance(names, str) or not isinstance(names, Iterable):\n names = [names]\n else:\n names = list(names)\n self._assert_all_in_dataset(names)\n obj = self.copy()\n obj._coord_names.update(names)\n return obj\n\n def reset_coords(\n self,\n names: \"Union[Hashable, Iterable[Hashable], None]\" = None,\n drop: bool = False,\n ) -> \"Dataset\":\n \"\"\"Given names of coordinates, reset them to become variables\n\n Parameters\n ----------\n names : hashable or iterable of hashable, optional\n Name(s) of non-index coordinates in this dataset to reset into\n variables. By default, all non-index coordinates are reset.\n drop : bool, optional\n If True, remove coordinates instead of converting them into\n variables.\n\n Returns\n -------\n Dataset\n \"\"\"\n if names is None:\n names = self._coord_names - set(self.dims)\n else:\n if isinstance(names, str) or not isinstance(names, Iterable):\n names = [names]\n else:\n names = list(names)\n self._assert_all_in_dataset(names)\n bad_coords = set(names) & set(self.dims)\n if bad_coords:\n raise ValueError(\n \"cannot remove index coordinates with reset_coords: %s\" % bad_coords\n )\n obj = self.copy()\n obj._coord_names.difference_update(names)\n if drop:\n for name in names:\n del obj._variables[name]\n return obj\n\n def dump_to_store(self, store: \"AbstractDataStore\", **kwargs) -> None:\n \"\"\"Store dataset contents to a backends.*DataStore object.\"\"\"\n from ..backends.api import dump_to_store\n\n # TODO: rename and/or cleanup this method to make it more consistent\n # with to_netcdf()\n dump_to_store(self, store, **kwargs)\n\n def to_netcdf(\n self,\n path=None,\n mode: str = \"w\",\n format: str = None,\n group: str = None,\n engine: str = None,\n encoding: Mapping = None,\n unlimited_dims: Iterable[Hashable] = None,\n compute: bool = True,\n invalid_netcdf: bool = False,\n ) -> Union[bytes, \"Delayed\", None]:\n \"\"\"Write dataset contents to a netCDF file.\n\n Parameters\n ----------\n path : str, Path or file-like, optional\n Path to which to save this dataset. File-like objects are only\n supported by the scipy engine. If no path is provided, this\n function returns the resulting netCDF file as bytes; in this case,\n we need to use scipy, which does not support netCDF version 4 (the\n default format becomes NETCDF3_64BIT).\n mode : {\"w\", \"a\"}, default: \"w\"\n Write ('w') or append ('a') mode. If mode='w', any existing file at\n this location will be overwritten. If mode='a', existing variables\n will be overwritten.\n format : {\"NETCDF4\", \"NETCDF4_CLASSIC\", \"NETCDF3_64BIT\", \\\n \"NETCDF3_CLASSIC\"}, optional\n File format for the resulting netCDF file:\n\n * NETCDF4: Data is stored in an HDF5 file, using netCDF4 API\n features.\n * NETCDF4_CLASSIC: Data is stored in an HDF5 file, using only\n netCDF 3 compatible API features.\n * NETCDF3_64BIT: 64-bit offset version of the netCDF 3 file format,\n which fully supports 2+ GB files, but is only compatible with\n clients linked against netCDF version 3.6.0 or later.\n * NETCDF3_CLASSIC: The classic netCDF 3 file format. It does not\n handle 2+ GB files very well.\n\n All formats are supported by the netCDF4-python library.\n scipy.io.netcdf only supports the last two formats.\n\n The default format is NETCDF4 if you are saving a file to disk and\n have the netCDF4-python library available. Otherwise, xarray falls\n back to using scipy to write netCDF files and defaults to the\n NETCDF3_64BIT format (scipy does not support netCDF4).\n group : str, optional\n Path to the netCDF4 group in the given file to open (only works for\n format='NETCDF4'). The group(s) will be created if necessary.\n engine : {\"netcdf4\", \"scipy\", \"h5netcdf\"}, optional\n Engine to use when writing netCDF files. If not provided, the\n default engine is chosen based on available dependencies, with a\n preference for 'netcdf4' if writing to a file on disk.\n encoding : dict, optional\n Nested dictionary with variable names as keys and dictionaries of\n variable specific encodings as values, e.g.,\n ``{\"my_variable\": {\"dtype\": \"int16\", \"scale_factor\": 0.1,\n \"zlib\": True}, ...}``\n\n The `h5netcdf` engine supports both the NetCDF4-style compression\n encoding parameters ``{\"zlib\": True, \"complevel\": 9}`` and the h5py\n ones ``{\"compression\": \"gzip\", \"compression_opts\": 9}``.\n This allows using any compression plugin installed in the HDF5\n library, e.g. LZF.\n\n unlimited_dims : iterable of hashable, optional\n Dimension(s) that should be serialized as unlimited dimensions.\n By default, no dimensions are treated as unlimited dimensions.\n Note that unlimited_dims may also be set via\n ``dataset.encoding[\"unlimited_dims\"]``.\n compute: bool, default: True\n If true compute immediately, otherwise return a\n ``dask.delayed.Delayed`` object that can be computed later.\n invalid_netcdf: bool, default: False\n Only valid along with ``engine=\"h5netcdf\"``. If True, allow writing\n hdf5 files which are invalid netcdf as described in\n https://github.com/shoyer/h5netcdf.\n \"\"\"\n if encoding is None:\n encoding = {}\n from ..backends.api import to_netcdf\n\n return to_netcdf(\n self,\n path,\n mode,\n format=format,\n group=group,\n engine=engine,\n encoding=encoding,\n unlimited_dims=unlimited_dims,\n compute=compute,\n invalid_netcdf=invalid_netcdf,\n )\n\n def to_zarr(\n self,\n store: Union[MutableMapping, str, Path] = None,\n chunk_store: Union[MutableMapping, str, Path] = None,\n mode: str = None,\n synchronizer=None,\n group: str = None,\n encoding: Mapping = None,\n compute: bool = True,\n consolidated: bool = False,\n append_dim: Hashable = None,\n region: Mapping[str, slice] = None,\n ) -> \"ZarrStore\":\n \"\"\"Write dataset contents to a zarr group.\n\n .. note:: Experimental\n The Zarr backend is new and experimental. Please report any\n unexpected behavior via github issues.\n\n Parameters\n ----------\n store : MutableMapping, str or Path, optional\n Store or path to directory in file system.\n chunk_store : MutableMapping, str or Path, optional\n Store or path to directory in file system only for Zarr array chunks.\n Requires zarr-python v2.4.0 or later.\n mode : {\"w\", \"w-\", \"a\", None}, optional\n Persistence mode: \"w\" means create (overwrite if exists);\n \"w-\" means create (fail if exists);\n \"a\" means override existing variables (create if does not exist).\n If ``append_dim`` is set, ``mode`` can be omitted as it is\n internally set to ``\"a\"``. Otherwise, ``mode`` will default to\n `w-` if not set.\n synchronizer : object, optional\n Zarr array synchronizer.\n group : str, optional\n Group path. (a.k.a. `path` in zarr terminology.)\n encoding : dict, optional\n Nested dictionary with variable names as keys and dictionaries of\n variable specific encodings as values, e.g.,\n ``{\"my_variable\": {\"dtype\": \"int16\", \"scale_factor\": 0.1,}, ...}``\n compute : bool, optional\n If True write array data immediately, otherwise return a\n ``dask.delayed.Delayed`` object that can be computed to write\n array data later. Metadata is always updated eagerly.\n consolidated : bool, optional\n If True, apply zarr's `consolidate_metadata` function to the store\n after writing metadata.\n append_dim : hashable, optional\n If set, the dimension along which the data will be appended. All\n other dimensions on overriden variables must remain the same size.\n region : dict, optional\n Optional mapping from dimension names to integer slices along\n dataset dimensions to indicate the region of existing zarr array(s)\n in which to write this dataset's data. For example,\n ``{'x': slice(0, 1000), 'y': slice(10000, 11000)}`` would indicate\n that values should be written to the region ``0:1000`` along ``x``\n and ``10000:11000`` along ``y``.\n\n Two restrictions apply to the use of ``region``:\n\n - If ``region`` is set, _all_ variables in a dataset must have at\n least one dimension in common with the region. Other variables\n should be written in a separate call to ``to_zarr()``.\n - Dimensions cannot be included in both ``region`` and\n ``append_dim`` at the same time. To create empty arrays to fill\n in with ``region``, use a separate call to ``to_zarr()`` with\n ``compute=False``. See \"Appending to existing Zarr stores\" in\n the reference documentation for full details.\n\n References\n ----------\n https://zarr.readthedocs.io/\n\n Notes\n -----\n Zarr chunking behavior:\n If chunks are found in the encoding argument or attribute\n corresponding to any DataArray, those chunks are used.\n If a DataArray is a dask array, it is written with those chunks.\n If not other chunks are found, Zarr uses its own heuristics to\n choose automatic chunk sizes.\n \"\"\"\n from ..backends.api import to_zarr\n\n if encoding is None:\n encoding = {}\n\n return to_zarr(\n self,\n store=store,\n chunk_store=chunk_store,\n mode=mode,\n synchronizer=synchronizer,\n group=group,\n encoding=encoding,\n compute=compute,\n consolidated=consolidated,\n append_dim=append_dim,\n region=region,\n )\n\n def __repr__(self) -> str:\n return formatting.dataset_repr(self)\n\n def _repr_html_(self):\n if OPTIONS[\"display_style\"] == \"text\":\n return f\"<pre>{escape(repr(self))}</pre>\"\n return formatting_html.dataset_repr(self)\n\n def info(self, buf=None) -> None:\n \"\"\"\n Concise summary of a Dataset variables and attributes.\n\n Parameters\n ----------\n buf : file-like, default: sys.stdout\n writable buffer\n\n See Also\n --------\n pandas.DataFrame.assign\n ncdump : netCDF's ncdump\n \"\"\"\n if buf is None: # pragma: no cover\n buf = sys.stdout\n\n lines = []\n lines.append(\"xarray.Dataset {\")\n lines.append(\"dimensions:\")\n for name, size in self.dims.items():\n lines.append(f\"\\t{name} = {size} ;\")\n lines.append(\"\\nvariables:\")\n for name, da in self.variables.items():\n dims = \", \".join(da.dims)\n lines.append(f\"\\t{da.dtype} {name}({dims}) ;\")\n for k, v in da.attrs.items():\n lines.append(f\"\\t\\t{name}:{k} = {v} ;\")\n lines.append(\"\\n// global attributes:\")\n for k, v in self.attrs.items():\n lines.append(f\"\\t:{k} = {v} ;\")\n lines.append(\"}\")\n\n buf.write(\"\\n\".join(lines))\n\n @property\n def chunks(self) -> Mapping[Hashable, Tuple[int, ...]]:\n \"\"\"Block dimensions for this dataset's data or None if it's not a dask\n array.\n \"\"\"\n chunks: Dict[Hashable, Tuple[int, ...]] = {}\n for v in self.variables.values():\n if v.chunks is not None:\n for dim, c in zip(v.dims, v.chunks):\n if dim in chunks and c != chunks[dim]:\n raise ValueError(\n f\"Object has inconsistent chunks along dimension {dim}. \"\n \"This can be fixed by calling unify_chunks().\"\n )\n chunks[dim] = c\n return Frozen(SortedKeysDict(chunks))\n\n def chunk(\n self,\n chunks: Union[\n Number,\n str,\n Mapping[Hashable, Union[None, Number, str, Tuple[Number, ...]]],\n ] = {}, # {} even though it's technically unsafe, is being used intentionally here (#4667)\n name_prefix: str = \"xarray-\",\n token: str = None,\n lock: bool = False,\n ) -> \"Dataset\":\n \"\"\"Coerce all arrays in this dataset into dask arrays with the given\n chunks.\n\n Non-dask arrays in this dataset will be converted to dask arrays. Dask\n arrays will be rechunked to the given chunk sizes.\n\n If neither chunks is not provided for one or more dimensions, chunk\n sizes along that dimension will not be updated; non-dask arrays will be\n converted into dask arrays with a single block.\n\n Parameters\n ----------\n chunks : int, 'auto' or mapping, optional\n Chunk sizes along each dimension, e.g., ``5`` or\n ``{\"x\": 5, \"y\": 5}``.\n name_prefix : str, optional\n Prefix for the name of any new dask arrays.\n token : str, optional\n Token uniquely identifying this dataset.\n lock : optional\n Passed on to :py:func:`dask.array.from_array`, if the array is not\n already as dask array.\n\n Returns\n -------\n chunked : xarray.Dataset\n \"\"\"\n if chunks is None:\n warnings.warn(\n \"None value for 'chunks' is deprecated. \"\n \"It will raise an error in the future. Use instead '{}'\",\n category=FutureWarning,\n )\n chunks = {}\n\n if isinstance(chunks, (Number, str)):\n chunks = dict.fromkeys(self.dims, chunks)\n\n bad_dims = chunks.keys() - self.dims.keys()\n if bad_dims:\n raise ValueError(\n \"some chunks keys are not dimensions on this \" \"object: %s\" % bad_dims\n )\n\n variables = {\n k: _maybe_chunk(k, v, chunks, token, lock, name_prefix)\n for k, v in self.variables.items()\n }\n return self._replace(variables)\n\n def _validate_indexers(\n self, indexers: Mapping[Hashable, Any], missing_dims: str = \"raise\"\n ) -> Iterator[Tuple[Hashable, Union[int, slice, np.ndarray, Variable]]]:\n \"\"\"Here we make sure\n + indexer has a valid keys\n + indexer is in a valid data type\n + string indexers are cast to the appropriate date type if the\n associated index is a DatetimeIndex or CFTimeIndex\n \"\"\"\n from .dataarray import DataArray\n\n indexers = drop_dims_from_indexers(indexers, self.dims, missing_dims)\n\n # all indexers should be int, slice, np.ndarrays, or Variable\n for k, v in indexers.items():\n if isinstance(v, (int, slice, Variable)):\n yield k, v\n elif isinstance(v, DataArray):\n yield k, v.variable\n elif isinstance(v, tuple):\n yield k, as_variable(v)\n elif isinstance(v, Dataset):\n raise TypeError(\"cannot use a Dataset as an indexer\")\n elif isinstance(v, Sequence) and len(v) == 0:\n yield k, np.empty((0,), dtype=\"int64\")\n else:\n v = np.asarray(v)\n\n if v.dtype.kind in \"US\":\n index = self.indexes[k]\n if isinstance(index, pd.DatetimeIndex):\n v = v.astype(\"datetime64[ns]\")\n elif isinstance(index, xr.CFTimeIndex):\n v = _parse_array_of_cftime_strings(v, index.date_type)\n\n if v.ndim > 1:\n raise IndexError(\n \"Unlabeled multi-dimensional array cannot be \"\n \"used for indexing: {}\".format(k)\n )\n yield k, v\n\n def _validate_interp_indexers(\n self, indexers: Mapping[Hashable, Any]\n ) -> Iterator[Tuple[Hashable, Variable]]:\n \"\"\"Variant of _validate_indexers to be used for interpolation\"\"\"\n for k, v in self._validate_indexers(indexers):\n if isinstance(v, Variable):\n if v.ndim == 1:\n yield k, v.to_index_variable()\n else:\n yield k, v\n elif isinstance(v, int):\n yield k, Variable((), v)\n elif isinstance(v, np.ndarray):\n if v.ndim == 0:\n yield k, Variable((), v)\n elif v.ndim == 1:\n yield k, IndexVariable((k,), v)\n else:\n raise AssertionError() # Already tested by _validate_indexers\n else:\n raise TypeError(type(v))\n\n def _get_indexers_coords_and_indexes(self, indexers):\n \"\"\"Extract coordinates and indexes from indexers.\n\n Only coordinate with a name different from any of self.variables will\n be attached.\n \"\"\"\n from .dataarray import DataArray\n\n coords_list = []\n for k, v in indexers.items():\n if isinstance(v, DataArray):\n if v.dtype.kind == \"b\":\n if v.ndim != 1: # we only support 1-d boolean array\n raise ValueError(\n \"{:d}d-boolean array is used for indexing along \"\n \"dimension {!r}, but only 1d boolean arrays are \"\n \"supported.\".format(v.ndim, k)\n )\n # Make sure in case of boolean DataArray, its\n # coordinate also should be indexed.\n v_coords = v[v.values.nonzero()[0]].coords\n else:\n v_coords = v.coords\n coords_list.append(v_coords)\n\n # we don't need to call align() explicitly or check indexes for\n # alignment, because merge_variables already checks for exact alignment\n # between dimension coordinates\n coords, indexes = merge_coordinates_without_align(coords_list)\n assert_coordinate_consistent(self, coords)\n\n # silently drop the conflicted variables.\n attached_coords = {k: v for k, v in coords.items() if k not in self._variables}\n attached_indexes = {\n k: v for k, v in indexes.items() if k not in self._variables\n }\n return attached_coords, attached_indexes\n\n def isel(\n self,\n indexers: Mapping[Hashable, Any] = None,\n drop: bool = False,\n missing_dims: str = \"raise\",\n **indexers_kwargs: Any,\n ) -> \"Dataset\":\n \"\"\"Returns a new dataset with each array indexed along the specified\n dimension(s).\n\n This method selects values from each array using its `__getitem__`\n method, except this method does not require knowing the order of\n each array's dimensions.\n\n Parameters\n ----------\n indexers : dict, optional\n A dict with keys matching dimensions and values given\n by integers, slice objects or arrays.\n indexer can be a integer, slice, array-like or DataArray.\n If DataArrays are passed as indexers, xarray-style indexing will be\n carried out. See :ref:`indexing` for the details.\n One of indexers or indexers_kwargs must be provided.\n drop : bool, optional\n If ``drop=True``, drop coordinates variables indexed by integers\n instead of making them scalar.\n missing_dims : {\"raise\", \"warn\", \"ignore\"}, default: \"raise\"\n What to do if dimensions that should be selected from are not present in the\n Dataset:\n - \"raise\": raise an exception\n - \"warning\": raise a warning, and ignore the missing dimensions\n - \"ignore\": ignore the missing dimensions\n **indexers_kwargs : {dim: indexer, ...}, optional\n The keyword arguments form of ``indexers``.\n One of indexers or indexers_kwargs must be provided.\n\n Returns\n -------\n obj : Dataset\n A new Dataset with the same contents as this dataset, except each\n array and dimension is indexed by the appropriate indexers.\n If indexer DataArrays have coordinates that do not conflict with\n this object, then these coordinates will be attached.\n In general, each array's data will be a view of the array's data\n in this dataset, unless vectorized indexing was triggered by using\n an array indexer, in which case the data will be a copy.\n\n See Also\n --------\n Dataset.sel\n DataArray.isel\n \"\"\"\n indexers = either_dict_or_kwargs(indexers, indexers_kwargs, \"isel\")\n if any(is_fancy_indexer(idx) for idx in indexers.values()):\n return self._isel_fancy(indexers, drop=drop, missing_dims=missing_dims)\n\n # Much faster algorithm for when all indexers are ints, slices, one-dimensional\n # lists, or zero or one-dimensional np.ndarray's\n indexers = drop_dims_from_indexers(indexers, self.dims, missing_dims)\n\n variables = {}\n dims: Dict[Hashable, Tuple[int, ...]] = {}\n coord_names = self._coord_names.copy()\n indexes = self._indexes.copy() if self._indexes is not None else None\n\n for var_name, var_value in self._variables.items():\n var_indexers = {k: v for k, v in indexers.items() if k in var_value.dims}\n if var_indexers:\n var_value = var_value.isel(var_indexers)\n if drop and var_value.ndim == 0 and var_name in coord_names:\n coord_names.remove(var_name)\n if indexes:\n indexes.pop(var_name, None)\n continue\n if indexes and var_name in indexes:\n if var_value.ndim == 1:\n indexes[var_name] = var_value.to_index()\n else:\n del indexes[var_name]\n variables[var_name] = var_value\n dims.update(zip(var_value.dims, var_value.shape))\n\n return self._construct_direct(\n variables=variables,\n coord_names=coord_names,\n dims=dims,\n attrs=self._attrs,\n indexes=indexes,\n encoding=self._encoding,\n close=self._close,\n )\n\n def _isel_fancy(\n self,\n indexers: Mapping[Hashable, Any],\n *,\n drop: bool,\n missing_dims: str = \"raise\",\n ) -> \"Dataset\":\n # Note: we need to preserve the original indexers variable in order to merge the\n # coords below\n indexers_list = list(self._validate_indexers(indexers, missing_dims))\n\n variables: Dict[Hashable, Variable] = {}\n indexes: Dict[Hashable, pd.Index] = {}\n\n for name, var in self.variables.items():\n var_indexers = {k: v for k, v in indexers_list if k in var.dims}\n if drop and name in var_indexers:\n continue # drop this variable\n\n if name in self.indexes:\n new_var, new_index = isel_variable_and_index(\n name, var, self.indexes[name], var_indexers\n )\n if new_index is not None:\n indexes[name] = new_index\n elif var_indexers:\n new_var = var.isel(indexers=var_indexers)\n else:\n new_var = var.copy(deep=False)\n\n variables[name] = new_var\n\n coord_names = self._coord_names & variables.keys()\n selected = self._replace_with_new_dims(variables, coord_names, indexes)\n\n # Extract coordinates from indexers\n coord_vars, new_indexes = selected._get_indexers_coords_and_indexes(indexers)\n variables.update(coord_vars)\n indexes.update(new_indexes)\n coord_names = self._coord_names & variables.keys() | coord_vars.keys()\n return self._replace_with_new_dims(variables, coord_names, indexes=indexes)\n\n def sel(\n self,\n indexers: Mapping[Hashable, Any] = None,\n method: str = None,\n tolerance: Number = None,\n drop: bool = False,\n **indexers_kwargs: Any,\n ) -> \"Dataset\":\n \"\"\"Returns a new dataset with each array indexed by tick labels\n along the specified dimension(s).\n\n In contrast to `Dataset.isel`, indexers for this method should use\n labels instead of integers.\n\n Under the hood, this method is powered by using pandas's powerful Index\n objects. This makes label based indexing essentially just as fast as\n using integer indexing.\n\n It also means this method uses pandas's (well documented) logic for\n indexing. This means you can use string shortcuts for datetime indexes\n (e.g., '2000-01' to select all values in January 2000). It also means\n that slices are treated as inclusive of both the start and stop values,\n unlike normal Python indexing.\n\n Parameters\n ----------\n indexers : dict, optional\n A dict with keys matching dimensions and values given\n by scalars, slices or arrays of tick labels. For dimensions with\n multi-index, the indexer may also be a dict-like object with keys\n matching index level names.\n If DataArrays are passed as indexers, xarray-style indexing will be\n carried out. See :ref:`indexing` for the details.\n One of indexers or indexers_kwargs must be provided.\n method : {None, \"nearest\", \"pad\", \"ffill\", \"backfill\", \"bfill\"}, optional\n Method to use for inexact matches:\n\n * None (default): only exact matches\n * pad / ffill: propagate last valid index value forward\n * backfill / bfill: propagate next valid index value backward\n * nearest: use nearest valid index value\n tolerance : optional\n Maximum distance between original and new labels for inexact\n matches. The values of the index at the matching locations must\n satisfy the equation ``abs(index[indexer] - target) <= tolerance``.\n drop : bool, optional\n If ``drop=True``, drop coordinates variables in `indexers` instead\n of making them scalar.\n **indexers_kwargs : {dim: indexer, ...}, optional\n The keyword arguments form of ``indexers``.\n One of indexers or indexers_kwargs must be provided.\n\n Returns\n -------\n obj : Dataset\n A new Dataset with the same contents as this dataset, except each\n variable and dimension is indexed by the appropriate indexers.\n If indexer DataArrays have coordinates that do not conflict with\n this object, then these coordinates will be attached.\n In general, each array's data will be a view of the array's data\n in this dataset, unless vectorized indexing was triggered by using\n an array indexer, in which case the data will be a copy.\n\n See Also\n --------\n Dataset.isel\n DataArray.sel\n \"\"\"\n indexers = either_dict_or_kwargs(indexers, indexers_kwargs, \"sel\")\n pos_indexers, new_indexes = remap_label_indexers(\n self, indexers=indexers, method=method, tolerance=tolerance\n )\n result = self.isel(indexers=pos_indexers, drop=drop)\n return result._overwrite_indexes(new_indexes)\n\n def head(\n self,\n indexers: Union[Mapping[Hashable, int], int] = None,\n **indexers_kwargs: Any,\n ) -> \"Dataset\":\n \"\"\"Returns a new dataset with the first `n` values of each array\n for the specified dimension(s).\n\n Parameters\n ----------\n indexers : dict or int, default: 5\n A dict with keys matching dimensions and integer values `n`\n or a single integer `n` applied over all dimensions.\n One of indexers or indexers_kwargs must be provided.\n **indexers_kwargs : {dim: n, ...}, optional\n The keyword arguments form of ``indexers``.\n One of indexers or indexers_kwargs must be provided.\n\n See Also\n --------\n Dataset.tail\n Dataset.thin\n DataArray.head\n \"\"\"\n if not indexers_kwargs:\n if indexers is None:\n indexers = 5\n if not isinstance(indexers, int) and not is_dict_like(indexers):\n raise TypeError(\"indexers must be either dict-like or a single integer\")\n if isinstance(indexers, int):\n indexers = {dim: indexers for dim in self.dims}\n indexers = either_dict_or_kwargs(indexers, indexers_kwargs, \"head\")\n for k, v in indexers.items():\n if not isinstance(v, int):\n raise TypeError(\n \"expected integer type indexer for \"\n \"dimension %r, found %r\" % (k, type(v))\n )\n elif v < 0:\n raise ValueError(\n \"expected positive integer as indexer \"\n \"for dimension %r, found %s\" % (k, v)\n )\n indexers_slices = {k: slice(val) for k, val in indexers.items()}\n return self.isel(indexers_slices)\n\n def tail(\n self,\n indexers: Union[Mapping[Hashable, int], int] = None,\n **indexers_kwargs: Any,\n ) -> \"Dataset\":\n \"\"\"Returns a new dataset with the last `n` values of each array\n for the specified dimension(s).\n\n Parameters\n ----------\n indexers : dict or int, default: 5\n A dict with keys matching dimensions and integer values `n`\n or a single integer `n` applied over all dimensions.\n One of indexers or indexers_kwargs must be provided.\n **indexers_kwargs : {dim: n, ...}, optional\n The keyword arguments form of ``indexers``.\n One of indexers or indexers_kwargs must be provided.\n\n See Also\n --------\n Dataset.head\n Dataset.thin\n DataArray.tail\n \"\"\"\n if not indexers_kwargs:\n if indexers is None:\n indexers = 5\n if not isinstance(indexers, int) and not is_dict_like(indexers):\n raise TypeError(\"indexers must be either dict-like or a single integer\")\n if isinstance(indexers, int):\n indexers = {dim: indexers for dim in self.dims}\n indexers = either_dict_or_kwargs(indexers, indexers_kwargs, \"tail\")\n for k, v in indexers.items():\n if not isinstance(v, int):\n raise TypeError(\n \"expected integer type indexer for \"\n \"dimension %r, found %r\" % (k, type(v))\n )\n elif v < 0:\n raise ValueError(\n \"expected positive integer as indexer \"\n \"for dimension %r, found %s\" % (k, v)\n )\n indexers_slices = {\n k: slice(-val, None) if val != 0 else slice(val)\n for k, val in indexers.items()\n }\n return self.isel(indexers_slices)\n\n def thin(\n self,\n indexers: Union[Mapping[Hashable, int], int] = None,\n **indexers_kwargs: Any,\n ) -> \"Dataset\":\n \"\"\"Returns a new dataset with each array indexed along every `n`-th\n value for the specified dimension(s)\n\n Parameters\n ----------\n indexers : dict or int\n A dict with keys matching dimensions and integer values `n`\n or a single integer `n` applied over all dimensions.\n One of indexers or indexers_kwargs must be provided.\n **indexers_kwargs : {dim: n, ...}, optional\n The keyword arguments form of ``indexers``.\n One of indexers or indexers_kwargs must be provided.\n\n See Also\n --------\n Dataset.head\n Dataset.tail\n DataArray.thin\n \"\"\"\n if (\n not indexers_kwargs\n and not isinstance(indexers, int)\n and not is_dict_like(indexers)\n ):\n raise TypeError(\"indexers must be either dict-like or a single integer\")\n if isinstance(indexers, int):\n indexers = {dim: indexers for dim in self.dims}\n indexers = either_dict_or_kwargs(indexers, indexers_kwargs, \"thin\")\n for k, v in indexers.items():\n if not isinstance(v, int):\n raise TypeError(\n \"expected integer type indexer for \"\n \"dimension %r, found %r\" % (k, type(v))\n )\n elif v < 0:\n raise ValueError(\n \"expected positive integer as indexer \"\n \"for dimension %r, found %s\" % (k, v)\n )\n elif v == 0:\n raise ValueError(\"step cannot be zero\")\n indexers_slices = {k: slice(None, None, val) for k, val in indexers.items()}\n return self.isel(indexers_slices)\n\n def broadcast_like(\n self, other: Union[\"Dataset\", \"DataArray\"], exclude: Iterable[Hashable] = None\n ) -> \"Dataset\":\n \"\"\"Broadcast this DataArray against another Dataset or DataArray.\n This is equivalent to xr.broadcast(other, self)[1]\n\n Parameters\n ----------\n other : Dataset or DataArray\n Object against which to broadcast this array.\n exclude : iterable of hashable, optional\n Dimensions that must not be broadcasted\n\n \"\"\"\n if exclude is None:\n exclude = set()\n else:\n exclude = set(exclude)\n args = align(other, self, join=\"outer\", copy=False, exclude=exclude)\n\n dims_map, common_coords = _get_broadcast_dims_map_common_coords(args, exclude)\n\n return _broadcast_helper(args[1], exclude, dims_map, common_coords)\n\n def reindex_like(\n self,\n other: Union[\"Dataset\", \"DataArray\"],\n method: str = None,\n tolerance: Number = None,\n copy: bool = True,\n fill_value: Any = dtypes.NA,\n ) -> \"Dataset\":\n \"\"\"Conform this object onto the indexes of another object, filling in\n missing values with ``fill_value``. The default fill value is NaN.\n\n Parameters\n ----------\n other : Dataset or DataArray\n Object with an 'indexes' attribute giving a mapping from dimension\n names to pandas.Index objects, which provides coordinates upon\n which to index the variables in this dataset. The indexes on this\n other object need not be the same as the indexes on this\n dataset. Any mis-matched index values will be filled in with\n NaN, and any mis-matched dimension names will simply be ignored.\n method : {None, \"nearest\", \"pad\", \"ffill\", \"backfill\", \"bfill\"}, optional\n Method to use for filling index values from other not found in this\n dataset:\n\n * None (default): don't fill gaps\n * pad / ffill: propagate last valid index value forward\n * backfill / bfill: propagate next valid index value backward\n * nearest: use nearest valid index value\n tolerance : optional\n Maximum distance between original and new labels for inexact\n matches. The values of the index at the matching locations must\n satisfy the equation ``abs(index[indexer] - target) <= tolerance``.\n copy : bool, optional\n If ``copy=True``, data in the return value is always copied. If\n ``copy=False`` and reindexing is unnecessary, or can be performed\n with only slice operations, then the output may share memory with\n the input. In either case, a new xarray object is always returned.\n fill_value : scalar or dict-like, optional\n Value to use for newly missing values. If a dict-like maps\n variable names to fill values.\n\n Returns\n -------\n reindexed : Dataset\n Another dataset, with this dataset's data but coordinates from the\n other object.\n\n See Also\n --------\n Dataset.reindex\n align\n \"\"\"\n indexers = alignment.reindex_like_indexers(self, other)\n return self.reindex(\n indexers=indexers,\n method=method,\n copy=copy,\n fill_value=fill_value,\n tolerance=tolerance,\n )\n\n def reindex(\n self,\n indexers: Mapping[Hashable, Any] = None,\n method: str = None,\n tolerance: Number = None,\n copy: bool = True,\n fill_value: Any = dtypes.NA,\n **indexers_kwargs: Any,\n ) -> \"Dataset\":\n \"\"\"Conform this object onto a new set of indexes, filling in\n missing values with ``fill_value``. The default fill value is NaN.\n\n Parameters\n ----------\n indexers : dict, optional\n Dictionary with keys given by dimension names and values given by\n arrays of coordinates tick labels. Any mis-matched coordinate\n values will be filled in with NaN, and any mis-matched dimension\n names will simply be ignored.\n One of indexers or indexers_kwargs must be provided.\n method : {None, \"nearest\", \"pad\", \"ffill\", \"backfill\", \"bfill\"}, optional\n Method to use for filling index values in ``indexers`` not found in\n this dataset:\n\n * None (default): don't fill gaps\n * pad / ffill: propagate last valid index value forward\n * backfill / bfill: propagate next valid index value backward\n * nearest: use nearest valid index value\n tolerance : optional\n Maximum distance between original and new labels for inexact\n matches. The values of the index at the matching locations must\n satisfy the equation ``abs(index[indexer] - target) <= tolerance``.\n copy : bool, optional\n If ``copy=True``, data in the return value is always copied. If\n ``copy=False`` and reindexing is unnecessary, or can be performed\n with only slice operations, then the output may share memory with\n the input. In either case, a new xarray object is always returned.\n fill_value : scalar or dict-like, optional\n Value to use for newly missing values. If a dict-like,\n maps variable names (including coordinates) to fill values.\n sparse : bool, default: False\n use sparse-array.\n **indexers_kwargs : {dim: indexer, ...}, optional\n Keyword arguments in the same form as ``indexers``.\n One of indexers or indexers_kwargs must be provided.\n\n Returns\n -------\n reindexed : Dataset\n Another dataset, with this dataset's data but replaced coordinates.\n\n See Also\n --------\n Dataset.reindex_like\n align\n pandas.Index.get_indexer\n\n Examples\n --------\n Create a dataset with some fictional data.\n\n >>> import xarray as xr\n >>> import pandas as pd\n >>> x = xr.Dataset(\n ... {\n ... \"temperature\": (\"station\", 20 * np.random.rand(4)),\n ... \"pressure\": (\"station\", 500 * np.random.rand(4)),\n ... },\n ... coords={\"station\": [\"boston\", \"nyc\", \"seattle\", \"denver\"]},\n ... )\n >>> x\n <xarray.Dataset>\n Dimensions: (station: 4)\n Coordinates:\n * station (station) <U7 'boston' 'nyc' 'seattle' 'denver'\n Data variables:\n temperature (station) float64 10.98 14.3 12.06 10.9\n pressure (station) float64 211.8 322.9 218.8 445.9\n >>> x.indexes\n station: Index(['boston', 'nyc', 'seattle', 'denver'], dtype='object', name='station')\n\n Create a new index and reindex the dataset. By default values in the new index that\n do not have corresponding records in the dataset are assigned `NaN`.\n\n >>> new_index = [\"boston\", \"austin\", \"seattle\", \"lincoln\"]\n >>> x.reindex({\"station\": new_index})\n <xarray.Dataset>\n Dimensions: (station: 4)\n Coordinates:\n * station (station) <U7 'boston' 'austin' 'seattle' 'lincoln'\n Data variables:\n temperature (station) float64 10.98 nan 12.06 nan\n pressure (station) float64 211.8 nan 218.8 nan\n\n We can fill in the missing values by passing a value to the keyword `fill_value`.\n\n >>> x.reindex({\"station\": new_index}, fill_value=0)\n <xarray.Dataset>\n Dimensions: (station: 4)\n Coordinates:\n * station (station) <U7 'boston' 'austin' 'seattle' 'lincoln'\n Data variables:\n temperature (station) float64 10.98 0.0 12.06 0.0\n pressure (station) float64 211.8 0.0 218.8 0.0\n\n We can also use different fill values for each variable.\n\n >>> x.reindex(\n ... {\"station\": new_index}, fill_value={\"temperature\": 0, \"pressure\": 100}\n ... )\n <xarray.Dataset>\n Dimensions: (station: 4)\n Coordinates:\n * station (station) <U7 'boston' 'austin' 'seattle' 'lincoln'\n Data variables:\n temperature (station) float64 10.98 0.0 12.06 0.0\n pressure (station) float64 211.8 100.0 218.8 100.0\n\n Because the index is not monotonically increasing or decreasing, we cannot use arguments\n to the keyword method to fill the `NaN` values.\n\n >>> x.reindex({\"station\": new_index}, method=\"nearest\")\n Traceback (most recent call last):\n ...\n raise ValueError('index must be monotonic increasing or decreasing')\n ValueError: index must be monotonic increasing or decreasing\n\n To further illustrate the filling functionality in reindex, we will create a\n dataset with a monotonically increasing index (for example, a sequence of dates).\n\n >>> x2 = xr.Dataset(\n ... {\n ... \"temperature\": (\n ... \"time\",\n ... [15.57, 12.77, np.nan, 0.3081, 16.59, 15.12],\n ... ),\n ... \"pressure\": (\"time\", 500 * np.random.rand(6)),\n ... },\n ... coords={\"time\": pd.date_range(\"01/01/2019\", periods=6, freq=\"D\")},\n ... )\n >>> x2\n <xarray.Dataset>\n Dimensions: (time: 6)\n Coordinates:\n * time (time) datetime64[ns] 2019-01-01 2019-01-02 ... 2019-01-06\n Data variables:\n temperature (time) float64 15.57 12.77 nan 0.3081 16.59 15.12\n pressure (time) float64 481.8 191.7 395.9 264.4 284.0 462.8\n\n Suppose we decide to expand the dataset to cover a wider date range.\n\n >>> time_index2 = pd.date_range(\"12/29/2018\", periods=10, freq=\"D\")\n >>> x2.reindex({\"time\": time_index2})\n <xarray.Dataset>\n Dimensions: (time: 10)\n Coordinates:\n * time (time) datetime64[ns] 2018-12-29 2018-12-30 ... 2019-01-07\n Data variables:\n temperature (time) float64 nan nan nan 15.57 ... 0.3081 16.59 15.12 nan\n pressure (time) float64 nan nan nan 481.8 ... 264.4 284.0 462.8 nan\n\n The index entries that did not have a value in the original data frame (for example, `2018-12-29`)\n are by default filled with NaN. If desired, we can fill in the missing values using one of several options.\n\n For example, to back-propagate the last valid value to fill the `NaN` values,\n pass `bfill` as an argument to the `method` keyword.\n\n >>> x3 = x2.reindex({\"time\": time_index2}, method=\"bfill\")\n >>> x3\n <xarray.Dataset>\n Dimensions: (time: 10)\n Coordinates:\n * time (time) datetime64[ns] 2018-12-29 2018-12-30 ... 2019-01-07\n Data variables:\n temperature (time) float64 15.57 15.57 15.57 15.57 ... 16.59 15.12 nan\n pressure (time) float64 481.8 481.8 481.8 481.8 ... 284.0 462.8 nan\n\n Please note that the `NaN` value present in the original dataset (at index value `2019-01-03`)\n will not be filled by any of the value propagation schemes.\n\n >>> x2.where(x2.temperature.isnull(), drop=True)\n <xarray.Dataset>\n Dimensions: (time: 1)\n Coordinates:\n * time (time) datetime64[ns] 2019-01-03\n Data variables:\n temperature (time) float64 nan\n pressure (time) float64 395.9\n >>> x3.where(x3.temperature.isnull(), drop=True)\n <xarray.Dataset>\n Dimensions: (time: 2)\n Coordinates:\n * time (time) datetime64[ns] 2019-01-03 2019-01-07\n Data variables:\n temperature (time) float64 nan nan\n pressure (time) float64 395.9 nan\n\n This is because filling while reindexing does not look at dataset values, but only compares\n the original and desired indexes. If you do want to fill in the `NaN` values present in the\n original dataset, use the :py:meth:`~Dataset.fillna()` method.\n\n \"\"\"\n return self._reindex(\n indexers,\n method,\n tolerance,\n copy,\n fill_value,\n sparse=False,\n **indexers_kwargs,\n )\n\n def _reindex(\n self,\n indexers: Mapping[Hashable, Any] = None,\n method: str = None,\n tolerance: Number = None,\n copy: bool = True,\n fill_value: Any = dtypes.NA,\n sparse: bool = False,\n **indexers_kwargs: Any,\n ) -> \"Dataset\":\n \"\"\"\n same to _reindex but support sparse option\n \"\"\"\n indexers = utils.either_dict_or_kwargs(indexers, indexers_kwargs, \"reindex\")\n\n bad_dims = [d for d in indexers if d not in self.dims]\n if bad_dims:\n raise ValueError(\"invalid reindex dimensions: %s\" % bad_dims)\n\n variables, indexes = alignment.reindex_variables(\n self.variables,\n self.sizes,\n self.indexes,\n indexers,\n method,\n tolerance,\n copy=copy,\n fill_value=fill_value,\n sparse=sparse,\n )\n coord_names = set(self._coord_names)\n coord_names.update(indexers)\n return self._replace_with_new_dims(variables, coord_names, indexes=indexes)\n\n def interp(\n self,\n coords: Mapping[Hashable, Any] = None,\n method: str = \"linear\",\n assume_sorted: bool = False,\n kwargs: Mapping[str, Any] = None,\n **coords_kwargs: Any,\n ) -> \"Dataset\":\n \"\"\"Multidimensional interpolation of Dataset.\n\n Parameters\n ----------\n coords : dict, optional\n Mapping from dimension names to the new coordinates.\n New coordinate can be a scalar, array-like or DataArray.\n If DataArrays are passed as new coordinates, their dimensions are\n used for the broadcasting. Missing values are skipped.\n method : str, optional\n {\"linear\", \"nearest\"} for multidimensional array,\n {\"linear\", \"nearest\", \"zero\", \"slinear\", \"quadratic\", \"cubic\"}\n for 1-dimensional array. \"linear\" is used by default.\n assume_sorted : bool, optional\n If False, values of coordinates that are interpolated over can be\n in any order and they are sorted first. If True, interpolated\n coordinates are assumed to be an array of monotonically increasing\n values.\n kwargs : dict, optional\n Additional keyword arguments passed to scipy's interpolator. Valid\n options and their behavior depend on if 1-dimensional or\n multi-dimensional interpolation is used.\n **coords_kwargs : {dim: coordinate, ...}, optional\n The keyword arguments form of ``coords``.\n One of coords or coords_kwargs must be provided.\n\n Returns\n -------\n interpolated : Dataset\n New dataset on the new coordinates.\n\n Notes\n -----\n scipy is required.\n\n See Also\n --------\n scipy.interpolate.interp1d\n scipy.interpolate.interpn\n\n Examples\n --------\n >>> ds = xr.Dataset(\n ... data_vars={\n ... \"a\": (\"x\", [5, 7, 4]),\n ... \"b\": (\n ... (\"x\", \"y\"),\n ... [[1, 4, 2, 9], [2, 7, 6, np.nan], [6, np.nan, 5, 8]],\n ... ),\n ... },\n ... coords={\"x\": [0, 1, 2], \"y\": [10, 12, 14, 16]},\n ... )\n >>> ds\n <xarray.Dataset>\n Dimensions: (x: 3, y: 4)\n Coordinates:\n * x (x) int64 0 1 2\n * y (y) int64 10 12 14 16\n Data variables:\n a (x) int64 5 7 4\n b (x, y) float64 1.0 4.0 2.0 9.0 2.0 7.0 6.0 nan 6.0 nan 5.0 8.0\n\n 1D interpolation with the default method (linear):\n\n >>> ds.interp(x=[0, 0.75, 1.25, 1.75])\n <xarray.Dataset>\n Dimensions: (x: 4, y: 4)\n Coordinates:\n * y (y) int64 10 12 14 16\n * x (x) float64 0.0 0.75 1.25 1.75\n Data variables:\n a (x) float64 5.0 6.5 6.25 4.75\n b (x, y) float64 1.0 4.0 2.0 nan 1.75 6.25 ... nan 5.0 nan 5.25 nan\n\n 1D interpolation with a different method:\n\n >>> ds.interp(x=[0, 0.75, 1.25, 1.75], method=\"nearest\")\n <xarray.Dataset>\n Dimensions: (x: 4, y: 4)\n Coordinates:\n * y (y) int64 10 12 14 16\n * x (x) float64 0.0 0.75 1.25 1.75\n Data variables:\n a (x) float64 5.0 7.0 7.0 4.0\n b (x, y) float64 1.0 4.0 2.0 9.0 2.0 7.0 ... 6.0 nan 6.0 nan 5.0 8.0\n\n 1D extrapolation:\n\n >>> ds.interp(\n ... x=[1, 1.5, 2.5, 3.5],\n ... method=\"linear\",\n ... kwargs={\"fill_value\": \"extrapolate\"},\n ... )\n <xarray.Dataset>\n Dimensions: (x: 4, y: 4)\n Coordinates:\n * y (y) int64 10 12 14 16\n * x (x) float64 1.0 1.5 2.5 3.5\n Data variables:\n a (x) float64 7.0 5.5 2.5 -0.5\n b (x, y) float64 2.0 7.0 6.0 nan 4.0 nan ... 4.5 nan 12.0 nan 3.5 nan\n\n 2D interpolation:\n\n >>> ds.interp(x=[0, 0.75, 1.25, 1.75], y=[11, 13, 15], method=\"linear\")\n <xarray.Dataset>\n Dimensions: (x: 4, y: 3)\n Coordinates:\n * x (x) float64 0.0 0.75 1.25 1.75\n * y (y) int64 11 13 15\n Data variables:\n a (x) float64 5.0 6.5 6.25 4.75\n b (x, y) float64 2.5 3.0 nan 4.0 5.625 nan nan nan nan nan nan nan\n \"\"\"\n from . import missing\n\n if kwargs is None:\n kwargs = {}\n\n coords = either_dict_or_kwargs(coords, coords_kwargs, \"interp\")\n indexers = dict(self._validate_interp_indexers(coords))\n\n if coords:\n # This avoids broadcasting over coordinates that are both in\n # the original array AND in the indexing array. It essentially\n # forces interpolation along the shared coordinates.\n sdims = (\n set(self.dims)\n .intersection(*[set(nx.dims) for nx in indexers.values()])\n .difference(coords.keys())\n )\n indexers.update({d: self.variables[d] for d in sdims})\n\n obj = self if assume_sorted else self.sortby([k for k in coords])\n\n def maybe_variable(obj, k):\n # workaround to get variable for dimension without coordinate.\n try:\n return obj._variables[k]\n except KeyError:\n return as_variable((k, range(obj.dims[k])))\n\n def _validate_interp_indexer(x, new_x):\n # In the case of datetimes, the restrictions placed on indexers\n # used with interp are stronger than those which are placed on\n # isel, so we need an additional check after _validate_indexers.\n if _contains_datetime_like_objects(\n x\n ) and not _contains_datetime_like_objects(new_x):\n raise TypeError(\n \"When interpolating over a datetime-like \"\n \"coordinate, the coordinates to \"\n \"interpolate to must be either datetime \"\n \"strings or datetimes. \"\n \"Instead got\\n{}\".format(new_x)\n )\n return x, new_x\n\n variables: Dict[Hashable, Variable] = {}\n for name, var in obj._variables.items():\n if name in indexers:\n continue\n\n if var.dtype.kind in \"uifc\":\n var_indexers = {\n k: _validate_interp_indexer(maybe_variable(obj, k), v)\n for k, v in indexers.items()\n if k in var.dims\n }\n variables[name] = missing.interp(var, var_indexers, method, **kwargs)\n elif all(d not in indexers for d in var.dims):\n # keep unrelated object array\n variables[name] = var\n\n coord_names = obj._coord_names & variables.keys()\n indexes = {k: v for k, v in obj.indexes.items() if k not in indexers}\n selected = self._replace_with_new_dims(\n variables.copy(), coord_names, indexes=indexes\n )\n\n # attach indexer as coordinate\n variables.update(indexers)\n for k, v in indexers.items():\n assert isinstance(v, Variable)\n if v.dims == (k,):\n indexes[k] = v.to_index()\n\n # Extract coordinates from indexers\n coord_vars, new_indexes = selected._get_indexers_coords_and_indexes(coords)\n variables.update(coord_vars)\n indexes.update(new_indexes)\n\n coord_names = obj._coord_names & variables.keys() | coord_vars.keys()\n return self._replace_with_new_dims(variables, coord_names, indexes=indexes)\n\n def interp_like(\n self,\n other: Union[\"Dataset\", \"DataArray\"],\n method: str = \"linear\",\n assume_sorted: bool = False,\n kwargs: Mapping[str, Any] = None,\n ) -> \"Dataset\":\n \"\"\"Interpolate this object onto the coordinates of another object,\n filling the out of range values with NaN.\n\n Parameters\n ----------\n other : Dataset or DataArray\n Object with an 'indexes' attribute giving a mapping from dimension\n names to an 1d array-like, which provides coordinates upon\n which to index the variables in this dataset. Missing values are skipped.\n method : str, optional\n {\"linear\", \"nearest\"} for multidimensional array,\n {\"linear\", \"nearest\", \"zero\", \"slinear\", \"quadratic\", \"cubic\"}\n for 1-dimensional array. 'linear' is used by default.\n assume_sorted : bool, optional\n If False, values of coordinates that are interpolated over can be\n in any order and they are sorted first. If True, interpolated\n coordinates are assumed to be an array of monotonically increasing\n values.\n kwargs : dict, optional\n Additional keyword passed to scipy's interpolator.\n\n Returns\n -------\n interpolated : Dataset\n Another dataset by interpolating this dataset's data along the\n coordinates of the other object.\n\n Notes\n -----\n scipy is required.\n If the dataset has object-type coordinates, reindex is used for these\n coordinates instead of the interpolation.\n\n See Also\n --------\n Dataset.interp\n Dataset.reindex_like\n \"\"\"\n if kwargs is None:\n kwargs = {}\n coords = alignment.reindex_like_indexers(self, other)\n\n numeric_coords: Dict[Hashable, pd.Index] = {}\n object_coords: Dict[Hashable, pd.Index] = {}\n for k, v in coords.items():\n if v.dtype.kind in \"uifcMm\":\n numeric_coords[k] = v\n else:\n object_coords[k] = v\n\n ds = self\n if object_coords:\n # We do not support interpolation along object coordinate.\n # reindex instead.\n ds = self.reindex(object_coords)\n return ds.interp(numeric_coords, method, assume_sorted, kwargs)\n\n # Helper methods for rename()\n def _rename_vars(self, name_dict, dims_dict):\n variables = {}\n coord_names = set()\n for k, v in self.variables.items():\n var = v.copy(deep=False)\n var.dims = tuple(dims_dict.get(dim, dim) for dim in v.dims)\n name = name_dict.get(k, k)\n if name in variables:\n raise ValueError(f\"the new name {name!r} conflicts\")\n variables[name] = var\n if k in self._coord_names:\n coord_names.add(name)\n return variables, coord_names\n\n def _rename_dims(self, name_dict):\n return {name_dict.get(k, k): v for k, v in self.dims.items()}\n\n def _rename_indexes(self, name_dict, dims_set):\n if self._indexes is None:\n return None\n indexes = {}\n for k, v in self.indexes.items():\n new_name = name_dict.get(k, k)\n if new_name not in dims_set:\n continue\n if isinstance(v, pd.MultiIndex):\n new_names = [name_dict.get(k, k) for k in v.names]\n index = v.rename(names=new_names)\n else:\n index = v.rename(new_name)\n indexes[new_name] = index\n return indexes\n\n def _rename_all(self, name_dict, dims_dict):\n variables, coord_names = self._rename_vars(name_dict, dims_dict)\n dims = self._rename_dims(dims_dict)\n indexes = self._rename_indexes(name_dict, dims.keys())\n return variables, coord_names, dims, indexes\n\n def rename(\n self,\n name_dict: Mapping[Hashable, Hashable] = None,\n **names: Hashable,\n ) -> \"Dataset\":\n \"\"\"Returns a new object with renamed variables and dimensions.\n\n Parameters\n ----------\n name_dict : dict-like, optional\n Dictionary whose keys are current variable or dimension names and\n whose values are the desired names.\n **names : optional\n Keyword form of ``name_dict``.\n One of name_dict or names must be provided.\n\n Returns\n -------\n renamed : Dataset\n Dataset with renamed variables and dimensions.\n\n See Also\n --------\n Dataset.swap_dims\n Dataset.rename_vars\n Dataset.rename_dims\n DataArray.rename\n \"\"\"\n name_dict = either_dict_or_kwargs(name_dict, names, \"rename\")\n for k in name_dict.keys():\n if k not in self and k not in self.dims:\n raise ValueError(\n \"cannot rename %r because it is not a \"\n \"variable or dimension in this dataset\" % k\n )\n\n variables, coord_names, dims, indexes = self._rename_all(\n name_dict=name_dict, dims_dict=name_dict\n )\n assert_unique_multiindex_level_names(variables)\n return self._replace(variables, coord_names, dims=dims, indexes=indexes)\n\n def rename_dims(\n self, dims_dict: Mapping[Hashable, Hashable] = None, **dims: Hashable\n ) -> \"Dataset\":\n \"\"\"Returns a new object with renamed dimensions only.\n\n Parameters\n ----------\n dims_dict : dict-like, optional\n Dictionary whose keys are current dimension names and\n whose values are the desired names. The desired names must\n not be the name of an existing dimension or Variable in the Dataset.\n **dims : optional\n Keyword form of ``dims_dict``.\n One of dims_dict or dims must be provided.\n\n Returns\n -------\n renamed : Dataset\n Dataset with renamed dimensions.\n\n See Also\n --------\n Dataset.swap_dims\n Dataset.rename\n Dataset.rename_vars\n DataArray.rename\n \"\"\"\n dims_dict = either_dict_or_kwargs(dims_dict, dims, \"rename_dims\")\n for k, v in dims_dict.items():\n if k not in self.dims:\n raise ValueError(\n \"cannot rename %r because it is not a \"\n \"dimension in this dataset\" % k\n )\n if v in self.dims or v in self:\n raise ValueError(\n f\"Cannot rename {k} to {v} because {v} already exists. \"\n \"Try using swap_dims instead.\"\n )\n\n variables, coord_names, sizes, indexes = self._rename_all(\n name_dict={}, dims_dict=dims_dict\n )\n return self._replace(variables, coord_names, dims=sizes, indexes=indexes)\n\n def rename_vars(\n self, name_dict: Mapping[Hashable, Hashable] = None, **names: Hashable\n ) -> \"Dataset\":\n \"\"\"Returns a new object with renamed variables including coordinates\n\n Parameters\n ----------\n name_dict : dict-like, optional\n Dictionary whose keys are current variable or coordinate names and\n whose values are the desired names.\n **names : optional\n Keyword form of ``name_dict``.\n One of name_dict or names must be provided.\n\n Returns\n -------\n renamed : Dataset\n Dataset with renamed variables including coordinates\n\n See Also\n --------\n Dataset.swap_dims\n Dataset.rename\n Dataset.rename_dims\n DataArray.rename\n \"\"\"\n name_dict = either_dict_or_kwargs(name_dict, names, \"rename_vars\")\n for k in name_dict:\n if k not in self:\n raise ValueError(\n \"cannot rename %r because it is not a \"\n \"variable or coordinate in this dataset\" % k\n )\n variables, coord_names, dims, indexes = self._rename_all(\n name_dict=name_dict, dims_dict={}\n )\n return self._replace(variables, coord_names, dims=dims, indexes=indexes)\n\n def swap_dims(\n self, dims_dict: Mapping[Hashable, Hashable] = None, **dims_kwargs\n ) -> \"Dataset\":\n \"\"\"Returns a new object with swapped dimensions.\n\n Parameters\n ----------\n dims_dict : dict-like\n Dictionary whose keys are current dimension names and whose values\n are new names.\n **dims_kwargs : {existing_dim: new_dim, ...}, optional\n The keyword arguments form of ``dims_dict``.\n One of dims_dict or dims_kwargs must be provided.\n\n Returns\n -------\n swapped : Dataset\n Dataset with swapped dimensions.\n\n Examples\n --------\n >>> ds = xr.Dataset(\n ... data_vars={\"a\": (\"x\", [5, 7]), \"b\": (\"x\", [0.1, 2.4])},\n ... coords={\"x\": [\"a\", \"b\"], \"y\": (\"x\", [0, 1])},\n ... )\n >>> ds\n <xarray.Dataset>\n Dimensions: (x: 2)\n Coordinates:\n * x (x) <U1 'a' 'b'\n y (x) int64 0 1\n Data variables:\n a (x) int64 5 7\n b (x) float64 0.1 2.4\n\n >>> ds.swap_dims({\"x\": \"y\"})\n <xarray.Dataset>\n Dimensions: (y: 2)\n Coordinates:\n x (y) <U1 'a' 'b'\n * y (y) int64 0 1\n Data variables:\n a (y) int64 5 7\n b (y) float64 0.1 2.4\n\n >>> ds.swap_dims({\"x\": \"z\"})\n <xarray.Dataset>\n Dimensions: (z: 2)\n Coordinates:\n x (z) <U1 'a' 'b'\n y (z) int64 0 1\n Dimensions without coordinates: z\n Data variables:\n a (z) int64 5 7\n b (z) float64 0.1 2.4\n\n See Also\n --------\n Dataset.rename\n DataArray.swap_dims\n \"\"\"\n # TODO: deprecate this method in favor of a (less confusing)\n # rename_dims() method that only renames dimensions.\n\n dims_dict = either_dict_or_kwargs(dims_dict, dims_kwargs, \"swap_dims\")\n for k, v in dims_dict.items():\n if k not in self.dims:\n raise ValueError(\n \"cannot swap from dimension %r because it is \"\n \"not an existing dimension\" % k\n )\n if v in self.variables and self.variables[v].dims != (k,):\n raise ValueError(\n \"replacement dimension %r is not a 1D \"\n \"variable along the old dimension %r\" % (v, k)\n )\n\n result_dims = {dims_dict.get(dim, dim) for dim in self.dims}\n\n coord_names = self._coord_names.copy()\n coord_names.update({dim for dim in dims_dict.values() if dim in self.variables})\n\n variables: Dict[Hashable, Variable] = {}\n indexes: Dict[Hashable, pd.Index] = {}\n for k, v in self.variables.items():\n dims = tuple(dims_dict.get(dim, dim) for dim in v.dims)\n if k in result_dims:\n var = v.to_index_variable()\n if k in self.indexes:\n indexes[k] = self.indexes[k]\n else:\n new_index = var.to_index()\n if new_index.nlevels == 1:\n # make sure index name matches dimension name\n new_index = new_index.rename(k)\n indexes[k] = new_index\n else:\n var = v.to_base_variable()\n var.dims = dims\n variables[k] = var\n\n return self._replace_with_new_dims(variables, coord_names, indexes=indexes)\n\n def expand_dims(\n self,\n dim: Union[None, Hashable, Sequence[Hashable], Mapping[Hashable, Any]] = None,\n axis: Union[None, int, Sequence[int]] = None,\n **dim_kwargs: Any,\n ) -> \"Dataset\":\n \"\"\"Return a new object with an additional axis (or axes) inserted at\n the corresponding position in the array shape. The new object is a\n view into the underlying array, not a copy.\n\n If dim is already a scalar coordinate, it will be promoted to a 1D\n coordinate consisting of a single value.\n\n Parameters\n ----------\n dim : hashable, sequence of hashable, mapping, or None\n Dimensions to include on the new variable. If provided as hashable\n or sequence of hashable, then dimensions are inserted with length\n 1. If provided as a mapping, then the keys are the new dimensions\n and the values are either integers (giving the length of the new\n dimensions) or array-like (giving the coordinates of the new\n dimensions).\n axis : int, sequence of int, or None\n Axis position(s) where new axis is to be inserted (position(s) on\n the result array). If a list (or tuple) of integers is passed,\n multiple axes are inserted. In this case, dim arguments should be\n same length list. If axis=None is passed, all the axes will be\n inserted to the start of the result array.\n **dim_kwargs : int or sequence or ndarray\n The keywords are arbitrary dimensions being inserted and the values\n are either the lengths of the new dims (if int is given), or their\n coordinates. Note, this is an alternative to passing a dict to the\n dim kwarg and will only be used if dim is None.\n\n Returns\n -------\n expanded : same type as caller\n This object, but with an additional dimension(s).\n \"\"\"\n if dim is None:\n pass\n elif isinstance(dim, Mapping):\n # We're later going to modify dim in place; don't tamper with\n # the input\n dim = dict(dim)\n elif isinstance(dim, int):\n raise TypeError(\n \"dim should be hashable or sequence of hashables or mapping\"\n )\n elif isinstance(dim, str) or not isinstance(dim, Sequence):\n dim = {dim: 1}\n elif isinstance(dim, Sequence):\n if len(dim) != len(set(dim)):\n raise ValueError(\"dims should not contain duplicate values.\")\n dim = {d: 1 for d in dim}\n\n dim = either_dict_or_kwargs(dim, dim_kwargs, \"expand_dims\")\n assert isinstance(dim, MutableMapping)\n\n if axis is None:\n axis = list(range(len(dim)))\n elif not isinstance(axis, Sequence):\n axis = [axis]\n\n if len(dim) != len(axis):\n raise ValueError(\"lengths of dim and axis should be identical.\")\n for d in dim:\n if d in self.dims:\n raise ValueError(f\"Dimension {d} already exists.\")\n if d in self._variables and not utils.is_scalar(self._variables[d]):\n raise ValueError(\n \"{dim} already exists as coordinate or\"\n \" variable name.\".format(dim=d)\n )\n\n variables: Dict[Hashable, Variable] = {}\n coord_names = self._coord_names.copy()\n # If dim is a dict, then ensure that the values are either integers\n # or iterables.\n for k, v in dim.items():\n if hasattr(v, \"__iter__\"):\n # If the value for the new dimension is an iterable, then\n # save the coordinates to the variables dict, and set the\n # value within the dim dict to the length of the iterable\n # for later use.\n variables[k] = xr.IndexVariable((k,), v)\n coord_names.add(k)\n dim[k] = variables[k].size\n elif isinstance(v, int):\n pass # Do nothing if the dimensions value is just an int\n else:\n raise TypeError(\n \"The value of new dimension {k} must be \"\n \"an iterable or an int\".format(k=k)\n )\n\n for k, v in self._variables.items():\n if k not in dim:\n if k in coord_names: # Do not change coordinates\n variables[k] = v\n else:\n result_ndim = len(v.dims) + len(axis)\n for a in axis:\n if a < -result_ndim or result_ndim - 1 < a:\n raise IndexError(\n f\"Axis {a} of variable {k} is out of bounds of the \"\n f\"expanded dimension size {result_ndim}\"\n )\n\n axis_pos = [a if a >= 0 else result_ndim + a for a in axis]\n if len(axis_pos) != len(set(axis_pos)):\n raise ValueError(\"axis should not contain duplicate values\")\n # We need to sort them to make sure `axis` equals to the\n # axis positions of the result array.\n zip_axis_dim = sorted(zip(axis_pos, dim.items()))\n\n all_dims = list(zip(v.dims, v.shape))\n for d, c in zip_axis_dim:\n all_dims.insert(d, c)\n variables[k] = v.set_dims(dict(all_dims))\n else:\n # If dims includes a label of a non-dimension coordinate,\n # it will be promoted to a 1D coordinate with a single value.\n variables[k] = v.set_dims(k).to_index_variable()\n\n new_dims = self._dims.copy()\n new_dims.update(dim)\n\n return self._replace_vars_and_dims(\n variables, dims=new_dims, coord_names=coord_names\n )\n\n def set_index(\n self,\n indexes: Mapping[Hashable, Union[Hashable, Sequence[Hashable]]] = None,\n append: bool = False,\n **indexes_kwargs: Union[Hashable, Sequence[Hashable]],\n ) -> \"Dataset\":\n \"\"\"Set Dataset (multi-)indexes using one or more existing coordinates\n or variables.\n\n Parameters\n ----------\n indexes : {dim: index, ...}\n Mapping from names matching dimensions and values given\n by (lists of) the names of existing coordinates or variables to set\n as new (multi-)index.\n append : bool, optional\n If True, append the supplied index(es) to the existing index(es).\n Otherwise replace the existing index(es) (default).\n **indexes_kwargs : optional\n The keyword arguments form of ``indexes``.\n One of indexes or indexes_kwargs must be provided.\n\n Returns\n -------\n obj : Dataset\n Another dataset, with this dataset's data but replaced coordinates.\n\n Examples\n --------\n >>> arr = xr.DataArray(\n ... data=np.ones((2, 3)),\n ... dims=[\"x\", \"y\"],\n ... coords={\"x\": range(2), \"y\": range(3), \"a\": (\"x\", [3, 4])},\n ... )\n >>> ds = xr.Dataset({\"v\": arr})\n >>> ds\n <xarray.Dataset>\n Dimensions: (x: 2, y: 3)\n Coordinates:\n * x (x) int64 0 1\n * y (y) int64 0 1 2\n a (x) int64 3 4\n Data variables:\n v (x, y) float64 1.0 1.0 1.0 1.0 1.0 1.0\n >>> ds.set_index(x=\"a\")\n <xarray.Dataset>\n Dimensions: (x: 2, y: 3)\n Coordinates:\n * x (x) int64 3 4\n * y (y) int64 0 1 2\n Data variables:\n v (x, y) float64 1.0 1.0 1.0 1.0 1.0 1.0\n\n See Also\n --------\n Dataset.reset_index\n Dataset.swap_dims\n \"\"\"\n indexes = either_dict_or_kwargs(indexes, indexes_kwargs, \"set_index\")\n variables, coord_names = merge_indexes(\n indexes, self._variables, self._coord_names, append=append\n )\n return self._replace_vars_and_dims(variables, coord_names=coord_names)\n\n def reset_index(\n self,\n dims_or_levels: Union[Hashable, Sequence[Hashable]],\n drop: bool = False,\n ) -> \"Dataset\":\n \"\"\"Reset the specified index(es) or multi-index level(s).\n\n Parameters\n ----------\n dims_or_levels : str or list\n Name(s) of the dimension(s) and/or multi-index level(s) that will\n be reset.\n drop : bool, optional\n If True, remove the specified indexes and/or multi-index levels\n instead of extracting them as new coordinates (default: False).\n\n Returns\n -------\n obj : Dataset\n Another dataset, with this dataset's data but replaced coordinates.\n\n See Also\n --------\n Dataset.set_index\n \"\"\"\n variables, coord_names = split_indexes(\n dims_or_levels,\n self._variables,\n self._coord_names,\n cast(Mapping[Hashable, Hashable], self._level_coords),\n drop=drop,\n )\n return self._replace_vars_and_dims(variables, coord_names=coord_names)\n\n def reorder_levels(\n self,\n dim_order: Mapping[Hashable, Sequence[int]] = None,\n **dim_order_kwargs: Sequence[int],\n ) -> \"Dataset\":\n \"\"\"Rearrange index levels using input order.\n\n Parameters\n ----------\n dim_order : optional\n Mapping from names matching dimensions and values given\n by lists representing new level orders. Every given dimension\n must have a multi-index.\n **dim_order_kwargs : optional\n The keyword arguments form of ``dim_order``.\n One of dim_order or dim_order_kwargs must be provided.\n\n Returns\n -------\n obj : Dataset\n Another dataset, with this dataset's data but replaced\n coordinates.\n \"\"\"\n dim_order = either_dict_or_kwargs(dim_order, dim_order_kwargs, \"reorder_levels\")\n variables = self._variables.copy()\n indexes = dict(self.indexes)\n for dim, order in dim_order.items():\n coord = self._variables[dim]\n index = self.indexes[dim]\n if not isinstance(index, pd.MultiIndex):\n raise ValueError(f\"coordinate {dim} has no MultiIndex\")\n new_index = index.reorder_levels(order)\n variables[dim] = IndexVariable(coord.dims, new_index)\n indexes[dim] = new_index\n\n return self._replace(variables, indexes=indexes)\n\n def _stack_once(self, dims, new_dim):\n if ... in dims:\n dims = list(infix_dims(dims, self.dims))\n variables = {}\n for name, var in self.variables.items():\n if name not in dims:\n if any(d in var.dims for d in dims):\n add_dims = [d for d in dims if d not in var.dims]\n vdims = list(var.dims) + add_dims\n shape = [self.dims[d] for d in vdims]\n exp_var = var.set_dims(vdims, shape)\n stacked_var = exp_var.stack(**{new_dim: dims})\n variables[name] = stacked_var\n else:\n variables[name] = var.copy(deep=False)\n\n # consider dropping levels that are unused?\n levels = [self.get_index(dim) for dim in dims]\n idx = utils.multiindex_from_product_levels(levels, names=dims)\n variables[new_dim] = IndexVariable(new_dim, idx)\n\n coord_names = set(self._coord_names) - set(dims) | {new_dim}\n\n indexes = {k: v for k, v in self.indexes.items() if k not in dims}\n indexes[new_dim] = idx\n\n return self._replace_with_new_dims(\n variables, coord_names=coord_names, indexes=indexes\n )\n\n def stack(\n self,\n dimensions: Mapping[Hashable, Sequence[Hashable]] = None,\n **dimensions_kwargs: Sequence[Hashable],\n ) -> \"Dataset\":\n \"\"\"\n Stack any number of existing dimensions into a single new dimension.\n\n New dimensions will be added at the end, and the corresponding\n coordinate variables will be combined into a MultiIndex.\n\n Parameters\n ----------\n dimensions : mapping of hashable to sequence of hashable\n Mapping of the form `new_name=(dim1, dim2, ...)`. Names of new\n dimensions, and the existing dimensions that they replace. An\n ellipsis (`...`) will be replaced by all unlisted dimensions.\n Passing a list containing an ellipsis (`stacked_dim=[...]`) will stack over\n all dimensions.\n **dimensions_kwargs\n The keyword arguments form of ``dimensions``.\n One of dimensions or dimensions_kwargs must be provided.\n\n Returns\n -------\n stacked : Dataset\n Dataset with stacked data.\n\n See Also\n --------\n Dataset.unstack\n \"\"\"\n dimensions = either_dict_or_kwargs(dimensions, dimensions_kwargs, \"stack\")\n result = self\n for new_dim, dims in dimensions.items():\n result = result._stack_once(dims, new_dim)\n return result\n\n def to_stacked_array(\n self,\n new_dim: Hashable,\n sample_dims: Sequence[Hashable],\n variable_dim: str = \"variable\",\n name: Hashable = None,\n ) -> \"DataArray\":\n \"\"\"Combine variables of differing dimensionality into a DataArray\n without broadcasting.\n\n This method is similar to Dataset.to_array but does not broadcast the\n variables.\n\n Parameters\n ----------\n new_dim : hashable\n Name of the new stacked coordinate\n sample_dims : sequence of hashable\n Dimensions that **will not** be stacked. Each array in the dataset\n must share these dimensions. For machine learning applications,\n these define the dimensions over which samples are drawn.\n variable_dim : str, optional\n Name of the level in the stacked coordinate which corresponds to\n the variables.\n name : str, optional\n Name of the new data array.\n\n Returns\n -------\n stacked : DataArray\n DataArray with the specified dimensions and data variables\n stacked together. The stacked coordinate is named ``new_dim``\n and represented by a MultiIndex object with a level containing the\n data variable names. The name of this level is controlled using\n the ``variable_dim`` argument.\n\n See Also\n --------\n Dataset.to_array\n Dataset.stack\n DataArray.to_unstacked_dataset\n\n Examples\n --------\n >>> data = xr.Dataset(\n ... data_vars={\n ... \"a\": ((\"x\", \"y\"), [[0, 1, 2], [3, 4, 5]]),\n ... \"b\": (\"x\", [6, 7]),\n ... },\n ... coords={\"y\": [\"u\", \"v\", \"w\"]},\n ... )\n\n >>> data\n <xarray.Dataset>\n Dimensions: (x: 2, y: 3)\n Coordinates:\n * y (y) <U1 'u' 'v' 'w'\n Dimensions without coordinates: x\n Data variables:\n a (x, y) int64 0 1 2 3 4 5\n b (x) int64 6 7\n\n >>> data.to_stacked_array(\"z\", sample_dims=[\"x\"])\n <xarray.DataArray 'a' (x: 2, z: 4)>\n array([[0, 1, 2, 6],\n [3, 4, 5, 7]])\n Coordinates:\n * z (z) MultiIndex\n - variable (z) object 'a' 'a' 'a' 'b'\n - y (z) object 'u' 'v' 'w' nan\n Dimensions without coordinates: x\n\n \"\"\"\n stacking_dims = tuple(dim for dim in self.dims if dim not in sample_dims)\n\n for variable in self:\n dims = self[variable].dims\n dims_include_sample_dims = set(sample_dims) <= set(dims)\n if not dims_include_sample_dims:\n raise ValueError(\n \"All variables in the dataset must contain the \"\n \"dimensions {}.\".format(dims)\n )\n\n def ensure_stackable(val):\n assign_coords = {variable_dim: val.name}\n for dim in stacking_dims:\n if dim not in val.dims:\n assign_coords[dim] = None\n\n expand_dims = set(stacking_dims).difference(set(val.dims))\n expand_dims.add(variable_dim)\n # must be list for .expand_dims\n expand_dims = list(expand_dims)\n\n return (\n val.assign_coords(**assign_coords)\n .expand_dims(expand_dims)\n .stack({new_dim: (variable_dim,) + stacking_dims})\n )\n\n # concatenate the arrays\n stackable_vars = [ensure_stackable(self[key]) for key in self.data_vars]\n data_array = xr.concat(stackable_vars, dim=new_dim)\n\n # coerce the levels of the MultiIndex to have the same type as the\n # input dimensions. This code is messy, so it might be better to just\n # input a dummy value for the singleton dimension.\n idx = data_array.indexes[new_dim]\n levels = [idx.levels[0]] + [\n level.astype(self[level.name].dtype) for level in idx.levels[1:]\n ]\n new_idx = idx.set_levels(levels)\n data_array[new_dim] = IndexVariable(new_dim, new_idx)\n\n if name is not None:\n data_array.name = name\n\n return data_array\n\n def _unstack_once(self, dim: Hashable, fill_value) -> \"Dataset\":\n index = self.get_index(dim)\n index = remove_unused_levels_categories(index)\n\n variables: Dict[Hashable, Variable] = {}\n indexes = {k: v for k, v in self.indexes.items() if k != dim}\n\n for name, var in self.variables.items():\n if name != dim:\n if dim in var.dims:\n if isinstance(fill_value, Mapping):\n fill_value_ = fill_value[name]\n else:\n fill_value_ = fill_value\n\n variables[name] = var._unstack_once(\n index=index, dim=dim, fill_value=fill_value_\n )\n else:\n variables[name] = var\n\n for name, lev in zip(index.names, index.levels):\n variables[name] = IndexVariable(name, lev)\n indexes[name] = lev\n\n coord_names = set(self._coord_names) - {dim} | set(index.names)\n\n return self._replace_with_new_dims(\n variables, coord_names=coord_names, indexes=indexes\n )\n\n def _unstack_full_reindex(\n self, dim: Hashable, fill_value, sparse: bool\n ) -> \"Dataset\":\n index = self.get_index(dim)\n index = remove_unused_levels_categories(index)\n full_idx = pd.MultiIndex.from_product(index.levels, names=index.names)\n\n # take a shortcut in case the MultiIndex was not modified.\n if index.equals(full_idx):\n obj = self\n else:\n obj = self._reindex(\n {dim: full_idx}, copy=False, fill_value=fill_value, sparse=sparse\n )\n\n new_dim_names = index.names\n new_dim_sizes = [lev.size for lev in index.levels]\n\n variables: Dict[Hashable, Variable] = {}\n indexes = {k: v for k, v in self.indexes.items() if k != dim}\n\n for name, var in obj.variables.items():\n if name != dim:\n if dim in var.dims:\n new_dims = dict(zip(new_dim_names, new_dim_sizes))\n variables[name] = var.unstack({dim: new_dims})\n else:\n variables[name] = var\n\n for name, lev in zip(new_dim_names, index.levels):\n variables[name] = IndexVariable(name, lev)\n indexes[name] = lev\n\n coord_names = set(self._coord_names) - {dim} | set(new_dim_names)\n\n return self._replace_with_new_dims(\n variables, coord_names=coord_names, indexes=indexes\n )\n\n def unstack(\n self,\n dim: Union[Hashable, Iterable[Hashable]] = None,\n fill_value: Any = dtypes.NA,\n sparse: bool = False,\n ) -> \"Dataset\":\n \"\"\"\n Unstack existing dimensions corresponding to MultiIndexes into\n multiple new dimensions.\n\n New dimensions will be added at the end.\n\n Parameters\n ----------\n dim : hashable or iterable of hashable, optional\n Dimension(s) over which to unstack. By default unstacks all\n MultiIndexes.\n fill_value : scalar or dict-like, default: nan\n value to be filled. If a dict-like, maps variable names to\n fill values. If not provided or if the dict-like does not\n contain all variables, the dtype's NA value will be used.\n sparse : bool, default: False\n use sparse-array if True\n\n Returns\n -------\n unstacked : Dataset\n Dataset with unstacked data.\n\n See Also\n --------\n Dataset.stack\n \"\"\"\n if dim is None:\n dims = [\n d for d in self.dims if isinstance(self.get_index(d), pd.MultiIndex)\n ]\n else:\n if isinstance(dim, str) or not isinstance(dim, Iterable):\n dims = [dim]\n else:\n dims = list(dim)\n\n missing_dims = [d for d in dims if d not in self.dims]\n if missing_dims:\n raise ValueError(\n \"Dataset does not contain the dimensions: %s\" % missing_dims\n )\n\n non_multi_dims = [\n d for d in dims if not isinstance(self.get_index(d), pd.MultiIndex)\n ]\n if non_multi_dims:\n raise ValueError(\n \"cannot unstack dimensions that do not \"\n \"have a MultiIndex: %s\" % non_multi_dims\n )\n\n result = self.copy(deep=False)\n for dim in dims:\n\n if (\n # Dask arrays don't support assignment by index, which the fast unstack\n # function requires.\n # https://github.com/pydata/xarray/pull/4746#issuecomment-753282125\n any(is_duck_dask_array(v.data) for v in self.variables.values())\n # Sparse doesn't currently support (though we could special-case\n # it)\n # https://github.com/pydata/sparse/issues/422\n or any(\n isinstance(v.data, sparse_array_type)\n for v in self.variables.values()\n )\n or sparse\n # numpy full_like only added `shape` in 1.17\n or LooseVersion(np.__version__) < LooseVersion(\"1.17\")\n # Until https://github.com/pydata/xarray/pull/4751 is resolved,\n # we check explicitly whether it's a numpy array. Once that is\n # resolved, explicitly exclude pint arrays.\n # # pint doesn't implement `np.full_like` in a way that's\n # # currently compatible.\n # # https://github.com/pydata/xarray/pull/4746#issuecomment-753425173\n # # or any(\n # # isinstance(v.data, pint_array_type) for v in self.variables.values()\n # # )\n or any(\n not isinstance(v.data, np.ndarray) for v in self.variables.values()\n )\n ):\n result = result._unstack_full_reindex(dim, fill_value, sparse)\n else:\n result = result._unstack_once(dim, fill_value)\n return result\n\n def update(self, other: \"CoercibleMapping\") -> \"Dataset\":\n \"\"\"Update this dataset's variables with those from another dataset.\n\n Just like :py:meth:`dict.update` this is a in-place operation.\n\n Parameters\n ----------\n other : Dataset or mapping\n Variables with which to update this dataset. One of:\n\n - Dataset\n - mapping {var name: DataArray}\n - mapping {var name: Variable}\n - mapping {var name: (dimension name, array-like)}\n - mapping {var name: (tuple of dimension names, array-like)}\n\n Returns\n -------\n updated : Dataset\n Updated dataset. Note that since the update is in-place this is the input\n dataset.\n\n It is deprecated since version 0.17 and scheduled to be removed in 0.19.\n\n Raises\n ------\n ValueError\n If any dimensions would have inconsistent sizes in the updated\n dataset.\n\n See Also\n --------\n Dataset.assign\n \"\"\"\n merge_result = dataset_update_method(self, other)\n return self._replace(inplace=True, **merge_result._asdict())\n\n def merge(\n self,\n other: Union[\"CoercibleMapping\", \"DataArray\"],\n overwrite_vars: Union[Hashable, Iterable[Hashable]] = frozenset(),\n compat: str = \"no_conflicts\",\n join: str = \"outer\",\n fill_value: Any = dtypes.NA,\n combine_attrs: str = \"override\",\n ) -> \"Dataset\":\n \"\"\"Merge the arrays of two datasets into a single dataset.\n\n This method generally does not allow for overriding data, with the\n exception of attributes, which are ignored on the second dataset.\n Variables with the same name are checked for conflicts via the equals\n or identical methods.\n\n Parameters\n ----------\n other : Dataset or mapping\n Dataset or variables to merge with this dataset.\n overwrite_vars : hashable or iterable of hashable, optional\n If provided, update variables of these name(s) without checking for\n conflicts in this dataset.\n compat : {\"broadcast_equals\", \"equals\", \"identical\", \\\n \"no_conflicts\"}, optional\n String indicating how to compare variables of the same name for\n potential conflicts:\n\n - 'broadcast_equals': all values must be equal when variables are\n broadcast against each other to ensure common dimensions.\n - 'equals': all values and dimensions must be the same.\n - 'identical': all values, dimensions and attributes must be the\n same.\n - 'no_conflicts': only values which are not null in both datasets\n must be equal. The returned dataset then contains the combination\n of all non-null values.\n join : {\"outer\", \"inner\", \"left\", \"right\", \"exact\"}, optional\n Method for joining ``self`` and ``other`` along shared dimensions:\n\n - 'outer': use the union of the indexes\n - 'inner': use the intersection of the indexes\n - 'left': use indexes from ``self``\n - 'right': use indexes from ``other``\n - 'exact': error instead of aligning non-equal indexes\n fill_value : scalar or dict-like, optional\n Value to use for newly missing values. If a dict-like, maps\n variable names (including coordinates) to fill values.\n combine_attrs : {\"drop\", \"identical\", \"no_conflicts\", \"drop_conflicts\", \\\n \"override\"}, default: \"override\"\n String indicating how to combine attrs of the objects being merged:\n\n - \"drop\": empty attrs on returned Dataset.\n - \"identical\": all attrs must be the same on every object.\n - \"no_conflicts\": attrs from all objects are combined, any that have\n the same name must also have the same value.\n - \"drop_conflicts\": attrs from all objects are combined, any that have\n the same name but different values are dropped.\n - \"override\": skip comparing and copy attrs from the first dataset to\n the result.\n\n Returns\n -------\n merged : Dataset\n Merged dataset.\n\n Raises\n ------\n MergeError\n If any variables conflict (see ``compat``).\n \"\"\"\n other = other.to_dataset() if isinstance(other, xr.DataArray) else other\n merge_result = dataset_merge_method(\n self,\n other,\n overwrite_vars=overwrite_vars,\n compat=compat,\n join=join,\n fill_value=fill_value,\n combine_attrs=combine_attrs,\n )\n return self._replace(**merge_result._asdict())\n\n def _assert_all_in_dataset(\n self, names: Iterable[Hashable], virtual_okay: bool = False\n ) -> None:\n bad_names = set(names) - set(self._variables)\n if virtual_okay:\n bad_names -= self.virtual_variables\n if bad_names:\n raise ValueError(\n \"One or more of the specified variables \"\n \"cannot be found in this dataset\"\n )\n\n def drop_vars(\n self, names: Union[Hashable, Iterable[Hashable]], *, errors: str = \"raise\"\n ) -> \"Dataset\":\n \"\"\"Drop variables from this dataset.\n\n Parameters\n ----------\n names : hashable or iterable of hashable\n Name(s) of variables to drop.\n errors : {\"raise\", \"ignore\"}, optional\n If 'raise' (default), raises a ValueError error if any of the variable\n passed are not in the dataset. If 'ignore', any given names that are in the\n dataset are dropped and no error is raised.\n\n Returns\n -------\n dropped : Dataset\n\n \"\"\"\n # the Iterable check is required for mypy\n if is_scalar(names) or not isinstance(names, Iterable):\n names = {names}\n else:\n names = set(names)\n if errors == \"raise\":\n self._assert_all_in_dataset(names)\n\n variables = {k: v for k, v in self._variables.items() if k not in names}\n coord_names = {k for k in self._coord_names if k in variables}\n indexes = {k: v for k, v in self.indexes.items() if k not in names}\n return self._replace_with_new_dims(\n variables, coord_names=coord_names, indexes=indexes\n )\n\n def drop(self, labels=None, dim=None, *, errors=\"raise\", **labels_kwargs):\n \"\"\"Backward compatible method based on `drop_vars` and `drop_sel`\n\n Using either `drop_vars` or `drop_sel` is encouraged\n\n See Also\n --------\n Dataset.drop_vars\n Dataset.drop_sel\n \"\"\"\n if errors not in [\"raise\", \"ignore\"]:\n raise ValueError('errors must be either \"raise\" or \"ignore\"')\n\n if is_dict_like(labels) and not isinstance(labels, dict):\n warnings.warn(\n \"dropping coordinates using `drop` is be deprecated; use drop_vars.\",\n FutureWarning,\n stacklevel=2,\n )\n return self.drop_vars(labels, errors=errors)\n\n if labels_kwargs or isinstance(labels, dict):\n if dim is not None:\n raise ValueError(\"cannot specify dim and dict-like arguments.\")\n labels = either_dict_or_kwargs(labels, labels_kwargs, \"drop\")\n\n if dim is None and (is_scalar(labels) or isinstance(labels, Iterable)):\n warnings.warn(\n \"dropping variables using `drop` will be deprecated; using drop_vars is encouraged.\",\n PendingDeprecationWarning,\n stacklevel=2,\n )\n return self.drop_vars(labels, errors=errors)\n if dim is not None:\n warnings.warn(\n \"dropping labels using list-like labels is deprecated; using \"\n \"dict-like arguments with `drop_sel`, e.g. `ds.drop_sel(dim=[labels]).\",\n DeprecationWarning,\n stacklevel=2,\n )\n return self.drop_sel({dim: labels}, errors=errors, **labels_kwargs)\n\n warnings.warn(\n \"dropping labels using `drop` will be deprecated; using drop_sel is encouraged.\",\n PendingDeprecationWarning,\n stacklevel=2,\n )\n return self.drop_sel(labels, errors=errors)\n\n def drop_sel(self, labels=None, *, errors=\"raise\", **labels_kwargs):\n \"\"\"Drop index labels from this dataset.\n\n Parameters\n ----------\n labels : mapping of hashable to Any\n Index labels to drop\n errors : {\"raise\", \"ignore\"}, optional\n If 'raise' (default), raises a ValueError error if\n any of the index labels passed are not\n in the dataset. If 'ignore', any given labels that are in the\n dataset are dropped and no error is raised.\n **labels_kwargs : {dim: label, ...}, optional\n The keyword arguments form of ``dim`` and ``labels``\n\n Returns\n -------\n dropped : Dataset\n\n Examples\n --------\n >>> data = np.arange(6).reshape(2, 3)\n >>> labels = [\"a\", \"b\", \"c\"]\n >>> ds = xr.Dataset({\"A\": ([\"x\", \"y\"], data), \"y\": labels})\n >>> ds\n <xarray.Dataset>\n Dimensions: (x: 2, y: 3)\n Coordinates:\n * y (y) <U1 'a' 'b' 'c'\n Dimensions without coordinates: x\n Data variables:\n A (x, y) int64 0 1 2 3 4 5\n >>> ds.drop_sel(y=[\"a\", \"c\"])\n <xarray.Dataset>\n Dimensions: (x: 2, y: 1)\n Coordinates:\n * y (y) <U1 'b'\n Dimensions without coordinates: x\n Data variables:\n A (x, y) int64 1 4\n >>> ds.drop_sel(y=\"b\")\n <xarray.Dataset>\n Dimensions: (x: 2, y: 2)\n Coordinates:\n * y (y) <U1 'a' 'c'\n Dimensions without coordinates: x\n Data variables:\n A (x, y) int64 0 2 3 5\n \"\"\"\n if errors not in [\"raise\", \"ignore\"]:\n raise ValueError('errors must be either \"raise\" or \"ignore\"')\n\n labels = either_dict_or_kwargs(labels, labels_kwargs, \"drop_sel\")\n\n ds = self\n for dim, labels_for_dim in labels.items():\n # Don't cast to set, as it would harm performance when labels\n # is a large numpy array\n if utils.is_scalar(labels_for_dim):\n labels_for_dim = [labels_for_dim]\n labels_for_dim = np.asarray(labels_for_dim)\n try:\n index = self.get_index(dim)\n except KeyError:\n raise ValueError(\"dimension %r does not have coordinate labels\" % dim)\n new_index = index.drop(labels_for_dim, errors=errors)\n ds = ds.loc[{dim: new_index}]\n return ds\n\n def drop_isel(self, indexers=None, **indexers_kwargs):\n \"\"\"Drop index positions from this Dataset.\n\n Parameters\n ----------\n indexers : mapping of hashable to Any\n Index locations to drop\n **indexers_kwargs : {dim: position, ...}, optional\n The keyword arguments form of ``dim`` and ``positions``\n\n Returns\n -------\n dropped : Dataset\n\n Raises\n ------\n IndexError\n\n Examples\n --------\n >>> data = np.arange(6).reshape(2, 3)\n >>> labels = [\"a\", \"b\", \"c\"]\n >>> ds = xr.Dataset({\"A\": ([\"x\", \"y\"], data), \"y\": labels})\n >>> ds\n <xarray.Dataset>\n Dimensions: (x: 2, y: 3)\n Coordinates:\n * y (y) <U1 'a' 'b' 'c'\n Dimensions without coordinates: x\n Data variables:\n A (x, y) int64 0 1 2 3 4 5\n >>> ds.drop_isel(y=[0, 2])\n <xarray.Dataset>\n Dimensions: (x: 2, y: 1)\n Coordinates:\n * y (y) <U1 'b'\n Dimensions without coordinates: x\n Data variables:\n A (x, y) int64 1 4\n >>> ds.drop_isel(y=1)\n <xarray.Dataset>\n Dimensions: (x: 2, y: 2)\n Coordinates:\n * y (y) <U1 'a' 'c'\n Dimensions without coordinates: x\n Data variables:\n A (x, y) int64 0 2 3 5\n \"\"\"\n\n indexers = either_dict_or_kwargs(indexers, indexers_kwargs, \"drop_isel\")\n\n ds = self\n dimension_index = {}\n for dim, pos_for_dim in indexers.items():\n # Don't cast to set, as it would harm performance when labels\n # is a large numpy array\n if utils.is_scalar(pos_for_dim):\n pos_for_dim = [pos_for_dim]\n pos_for_dim = np.asarray(pos_for_dim)\n index = self.get_index(dim)\n new_index = index.delete(pos_for_dim)\n dimension_index[dim] = new_index\n ds = ds.loc[dimension_index]\n return ds\n\n def drop_dims(\n self, drop_dims: Union[Hashable, Iterable[Hashable]], *, errors: str = \"raise\"\n ) -> \"Dataset\":\n \"\"\"Drop dimensions and associated variables from this dataset.\n\n Parameters\n ----------\n drop_dims : hashable or iterable of hashable\n Dimension or dimensions to drop.\n errors : {\"raise\", \"ignore\"}, optional\n If 'raise' (default), raises a ValueError error if any of the\n dimensions passed are not in the dataset. If 'ignore', any given\n labels that are in the dataset are dropped and no error is raised.\n\n Returns\n -------\n obj : Dataset\n The dataset without the given dimensions (or any variables\n containing those dimensions)\n errors : {\"raise\", \"ignore\"}, optional\n If 'raise' (default), raises a ValueError error if\n any of the dimensions passed are not\n in the dataset. If 'ignore', any given dimensions that are in the\n dataset are dropped and no error is raised.\n \"\"\"\n if errors not in [\"raise\", \"ignore\"]:\n raise ValueError('errors must be either \"raise\" or \"ignore\"')\n\n if isinstance(drop_dims, str) or not isinstance(drop_dims, Iterable):\n drop_dims = {drop_dims}\n else:\n drop_dims = set(drop_dims)\n\n if errors == \"raise\":\n missing_dims = drop_dims - set(self.dims)\n if missing_dims:\n raise ValueError(\n \"Dataset does not contain the dimensions: %s\" % missing_dims\n )\n\n drop_vars = {k for k, v in self._variables.items() if set(v.dims) & drop_dims}\n return self.drop_vars(drop_vars)\n\n def transpose(self, *dims: Hashable) -> \"Dataset\":\n \"\"\"Return a new Dataset object with all array dimensions transposed.\n\n Although the order of dimensions on each array will change, the dataset\n dimensions themselves will remain in fixed (sorted) order.\n\n Parameters\n ----------\n *dims : hashable, optional\n By default, reverse the dimensions on each array. Otherwise,\n reorder the dimensions to this order.\n\n Returns\n -------\n transposed : Dataset\n Each array in the dataset (including) coordinates will be\n transposed to the given order.\n\n Notes\n -----\n This operation returns a view of each array's data. It is\n lazy for dask-backed DataArrays but not for numpy-backed DataArrays\n -- the data will be fully loaded into memory.\n\n See Also\n --------\n numpy.transpose\n DataArray.transpose\n \"\"\"\n if dims:\n if set(dims) ^ set(self.dims) and ... not in dims:\n raise ValueError(\n \"arguments to transpose (%s) must be \"\n \"permuted dataset dimensions (%s)\" % (dims, tuple(self.dims))\n )\n ds = self.copy()\n for name, var in self._variables.items():\n var_dims = tuple(dim for dim in dims if dim in (var.dims + (...,)))\n ds._variables[name] = var.transpose(*var_dims)\n return ds\n\n def dropna(\n self,\n dim: Hashable,\n how: str = \"any\",\n thresh: int = None,\n subset: Iterable[Hashable] = None,\n ):\n \"\"\"Returns a new dataset with dropped labels for missing values along\n the provided dimension.\n\n Parameters\n ----------\n dim : hashable\n Dimension along which to drop missing values. Dropping along\n multiple dimensions simultaneously is not yet supported.\n how : {\"any\", \"all\"}, default: \"any\"\n * any : if any NA values are present, drop that label\n * all : if all values are NA, drop that label\n thresh : int, default: None\n If supplied, require this many non-NA values.\n subset : iterable of hashable, optional\n Which variables to check for missing values. By default, all\n variables in the dataset are checked.\n\n Returns\n -------\n Dataset\n \"\"\"\n # TODO: consider supporting multiple dimensions? Or not, given that\n # there are some ugly edge cases, e.g., pandas's dropna differs\n # depending on the order of the supplied axes.\n\n if dim not in self.dims:\n raise ValueError(\"%s must be a single dataset dimension\" % dim)\n\n if subset is None:\n subset = iter(self.data_vars)\n\n count = np.zeros(self.dims[dim], dtype=np.int64)\n size = np.int_(0) # for type checking\n\n for k in subset:\n array = self._variables[k]\n if dim in array.dims:\n dims = [d for d in array.dims if d != dim]\n count += np.asarray(array.count(dims)) # type: ignore[attr-defined]\n size += np.prod([self.dims[d] for d in dims])\n\n if thresh is not None:\n mask = count >= thresh\n elif how == \"any\":\n mask = count == size\n elif how == \"all\":\n mask = count > 0\n elif how is not None:\n raise ValueError(\"invalid how option: %s\" % how)\n else:\n raise TypeError(\"must specify how or thresh\")\n\n return self.isel({dim: mask})\n\n def fillna(self, value: Any) -> \"Dataset\":\n \"\"\"Fill missing values in this object.\n\n This operation follows the normal broadcasting and alignment rules that\n xarray uses for binary arithmetic, except the result is aligned to this\n object (``join='left'``) instead of aligned to the intersection of\n index coordinates (``join='inner'``).\n\n Parameters\n ----------\n value : scalar, ndarray, DataArray, dict or Dataset\n Used to fill all matching missing values in this dataset's data\n variables. Scalars, ndarrays or DataArrays arguments are used to\n fill all data with aligned coordinates (for DataArrays).\n Dictionaries or datasets match data variables and then align\n coordinates if necessary.\n\n Returns\n -------\n Dataset\n\n Examples\n --------\n >>> import numpy as np\n >>> import xarray as xr\n >>> ds = xr.Dataset(\n ... {\n ... \"A\": (\"x\", [np.nan, 2, np.nan, 0]),\n ... \"B\": (\"x\", [3, 4, np.nan, 1]),\n ... \"C\": (\"x\", [np.nan, np.nan, np.nan, 5]),\n ... \"D\": (\"x\", [np.nan, 3, np.nan, 4]),\n ... },\n ... coords={\"x\": [0, 1, 2, 3]},\n ... )\n >>> ds\n <xarray.Dataset>\n Dimensions: (x: 4)\n Coordinates:\n * x (x) int64 0 1 2 3\n Data variables:\n A (x) float64 nan 2.0 nan 0.0\n B (x) float64 3.0 4.0 nan 1.0\n C (x) float64 nan nan nan 5.0\n D (x) float64 nan 3.0 nan 4.0\n\n Replace all `NaN` values with 0s.\n\n >>> ds.fillna(0)\n <xarray.Dataset>\n Dimensions: (x: 4)\n Coordinates:\n * x (x) int64 0 1 2 3\n Data variables:\n A (x) float64 0.0 2.0 0.0 0.0\n B (x) float64 3.0 4.0 0.0 1.0\n C (x) float64 0.0 0.0 0.0 5.0\n D (x) float64 0.0 3.0 0.0 4.0\n\n Replace all `NaN` elements in column ‘A’, ‘B’, ‘C’, and ‘D’, with 0, 1, 2, and 3 respectively.\n\n >>> values = {\"A\": 0, \"B\": 1, \"C\": 2, \"D\": 3}\n >>> ds.fillna(value=values)\n <xarray.Dataset>\n Dimensions: (x: 4)\n Coordinates:\n * x (x) int64 0 1 2 3\n Data variables:\n A (x) float64 0.0 2.0 0.0 0.0\n B (x) float64 3.0 4.0 1.0 1.0\n C (x) float64 2.0 2.0 2.0 5.0\n D (x) float64 3.0 3.0 3.0 4.0\n \"\"\"\n if utils.is_dict_like(value):\n value_keys = getattr(value, \"data_vars\", value).keys()\n if not set(value_keys) <= set(self.data_vars.keys()):\n raise ValueError(\n \"all variables in the argument to `fillna` \"\n \"must be contained in the original dataset\"\n )\n out = ops.fillna(self, value)\n return out\n\n def interpolate_na(\n self,\n dim: Hashable = None,\n method: str = \"linear\",\n limit: int = None,\n use_coordinate: Union[bool, Hashable] = True,\n max_gap: Union[\n int, float, str, pd.Timedelta, np.timedelta64, datetime.timedelta\n ] = None,\n **kwargs: Any,\n ) -> \"Dataset\":\n \"\"\"Fill in NaNs by interpolating according to different methods.\n\n Parameters\n ----------\n dim : str\n Specifies the dimension along which to interpolate.\n method : str, optional\n String indicating which method to use for interpolation:\n\n - 'linear': linear interpolation (Default). Additional keyword\n arguments are passed to :py:func:`numpy.interp`\n - 'nearest', 'zero', 'slinear', 'quadratic', 'cubic', 'polynomial':\n are passed to :py:func:`scipy.interpolate.interp1d`. If\n ``method='polynomial'``, the ``order`` keyword argument must also be\n provided.\n - 'barycentric', 'krog', 'pchip', 'spline', 'akima': use their\n respective :py:class:`scipy.interpolate` classes.\n use_coordinate : bool, str, default: True\n Specifies which index to use as the x values in the interpolation\n formulated as `y = f(x)`. If False, values are treated as if\n eqaully-spaced along ``dim``. If True, the IndexVariable `dim` is\n used. If ``use_coordinate`` is a string, it specifies the name of a\n coordinate variariable to use as the index.\n limit : int, default: None\n Maximum number of consecutive NaNs to fill. Must be greater than 0\n or None for no limit. This filling is done regardless of the size of\n the gap in the data. To only interpolate over gaps less than a given length,\n see ``max_gap``.\n max_gap : int, float, str, pandas.Timedelta, numpy.timedelta64, datetime.timedelta, default: None\n Maximum size of gap, a continuous sequence of NaNs, that will be filled.\n Use None for no limit. When interpolating along a datetime64 dimension\n and ``use_coordinate=True``, ``max_gap`` can be one of the following:\n\n - a string that is valid input for pandas.to_timedelta\n - a :py:class:`numpy.timedelta64` object\n - a :py:class:`pandas.Timedelta` object\n - a :py:class:`datetime.timedelta` object\n\n Otherwise, ``max_gap`` must be an int or a float. Use of ``max_gap`` with unlabeled\n dimensions has not been implemented yet. Gap length is defined as the difference\n between coordinate values at the first data point after a gap and the last value\n before a gap. For gaps at the beginning (end), gap length is defined as the difference\n between coordinate values at the first (last) valid data point and the first (last) NaN.\n For example, consider::\n\n <xarray.DataArray (x: 9)>\n array([nan, nan, nan, 1., nan, nan, 4., nan, nan])\n Coordinates:\n * x (x) int64 0 1 2 3 4 5 6 7 8\n\n The gap lengths are 3-0 = 3; 6-3 = 3; and 8-6 = 2 respectively\n kwargs : dict, optional\n parameters passed verbatim to the underlying interpolation function\n\n Returns\n -------\n interpolated: Dataset\n Filled in Dataset.\n\n See Also\n --------\n numpy.interp\n scipy.interpolate\n\n Examples\n --------\n >>> ds = xr.Dataset(\n ... {\n ... \"A\": (\"x\", [np.nan, 2, 3, np.nan, 0]),\n ... \"B\": (\"x\", [3, 4, np.nan, 1, 7]),\n ... \"C\": (\"x\", [np.nan, np.nan, np.nan, 5, 0]),\n ... \"D\": (\"x\", [np.nan, 3, np.nan, -1, 4]),\n ... },\n ... coords={\"x\": [0, 1, 2, 3, 4]},\n ... )\n >>> ds\n <xarray.Dataset>\n Dimensions: (x: 5)\n Coordinates:\n * x (x) int64 0 1 2 3 4\n Data variables:\n A (x) float64 nan 2.0 3.0 nan 0.0\n B (x) float64 3.0 4.0 nan 1.0 7.0\n C (x) float64 nan nan nan 5.0 0.0\n D (x) float64 nan 3.0 nan -1.0 4.0\n\n >>> ds.interpolate_na(dim=\"x\", method=\"linear\")\n <xarray.Dataset>\n Dimensions: (x: 5)\n Coordinates:\n * x (x) int64 0 1 2 3 4\n Data variables:\n A (x) float64 nan 2.0 3.0 1.5 0.0\n B (x) float64 3.0 4.0 2.5 1.0 7.0\n C (x) float64 nan nan nan 5.0 0.0\n D (x) float64 nan 3.0 1.0 -1.0 4.0\n\n >>> ds.interpolate_na(dim=\"x\", method=\"linear\", fill_value=\"extrapolate\")\n <xarray.Dataset>\n Dimensions: (x: 5)\n Coordinates:\n * x (x) int64 0 1 2 3 4\n Data variables:\n A (x) float64 1.0 2.0 3.0 1.5 0.0\n B (x) float64 3.0 4.0 2.5 1.0 7.0\n C (x) float64 20.0 15.0 10.0 5.0 0.0\n D (x) float64 5.0 3.0 1.0 -1.0 4.0\n \"\"\"\n from .missing import _apply_over_vars_with_dim, interp_na\n\n new = _apply_over_vars_with_dim(\n interp_na,\n self,\n dim=dim,\n method=method,\n limit=limit,\n use_coordinate=use_coordinate,\n max_gap=max_gap,\n **kwargs,\n )\n return new\n\n def ffill(self, dim: Hashable, limit: int = None) -> \"Dataset\":\n \"\"\"Fill NaN values by propogating values forward\n\n *Requires bottleneck.*\n\n Parameters\n ----------\n dim : Hashable\n Specifies the dimension along which to propagate values when\n filling.\n limit : int, default: None\n The maximum number of consecutive NaN values to forward fill. In\n other words, if there is a gap with more than this number of\n consecutive NaNs, it will only be partially filled. Must be greater\n than 0 or None for no limit.\n\n Returns\n -------\n Dataset\n \"\"\"\n from .missing import _apply_over_vars_with_dim, ffill\n\n new = _apply_over_vars_with_dim(ffill, self, dim=dim, limit=limit)\n return new\n\n def bfill(self, dim: Hashable, limit: int = None) -> \"Dataset\":\n \"\"\"Fill NaN values by propogating values backward\n\n *Requires bottleneck.*\n\n Parameters\n ----------\n dim : str\n Specifies the dimension along which to propagate values when\n filling.\n limit : int, default: None\n The maximum number of consecutive NaN values to backward fill. In\n other words, if there is a gap with more than this number of\n consecutive NaNs, it will only be partially filled. Must be greater\n than 0 or None for no limit.\n\n Returns\n -------\n Dataset\n \"\"\"\n from .missing import _apply_over_vars_with_dim, bfill\n\n new = _apply_over_vars_with_dim(bfill, self, dim=dim, limit=limit)\n return new\n\n def combine_first(self, other: \"Dataset\") -> \"Dataset\":\n \"\"\"Combine two Datasets, default to data_vars of self.\n\n The new coordinates follow the normal broadcasting and alignment rules\n of ``join='outer'``. Vacant cells in the expanded coordinates are\n filled with np.nan.\n\n Parameters\n ----------\n other : Dataset\n Used to fill all matching missing values in this array.\n\n Returns\n -------\n Dataset\n \"\"\"\n out = ops.fillna(self, other, join=\"outer\", dataset_join=\"outer\")\n return out\n\n def reduce(\n self,\n func: Callable,\n dim: Union[Hashable, Iterable[Hashable]] = None,\n keep_attrs: bool = None,\n keepdims: bool = False,\n numeric_only: bool = False,\n **kwargs: Any,\n ) -> \"Dataset\":\n \"\"\"Reduce this dataset by applying `func` along some dimension(s).\n\n Parameters\n ----------\n func : callable\n Function which can be called in the form\n `f(x, axis=axis, **kwargs)` to return the result of reducing an\n np.ndarray over an integer valued axis.\n dim : str or sequence of str, optional\n Dimension(s) over which to apply `func`. By default `func` is\n applied over all dimensions.\n keep_attrs : bool, optional\n If True, the dataset's attributes (`attrs`) will be copied from\n the original object to the new one. If False (default), the new\n object will be returned without attributes.\n keepdims : bool, default: False\n If True, the dimensions which are reduced are left in the result\n as dimensions of size one. Coordinates that use these dimensions\n are removed.\n numeric_only : bool, optional\n If True, only apply ``func`` to variables with a numeric dtype.\n **kwargs : Any\n Additional keyword arguments passed on to ``func``.\n\n Returns\n -------\n reduced : Dataset\n Dataset with this object's DataArrays replaced with new DataArrays\n of summarized data and the indicated dimension(s) removed.\n \"\"\"\n if \"axis\" in kwargs:\n raise ValueError(\n \"passing 'axis' to Dataset reduce methods is ambiguous.\"\n \" Please use 'dim' instead.\"\n )\n\n if dim is None or dim is ...:\n dims = set(self.dims)\n elif isinstance(dim, str) or not isinstance(dim, Iterable):\n dims = {dim}\n else:\n dims = set(dim)\n\n missing_dimensions = [d for d in dims if d not in self.dims]\n if missing_dimensions:\n raise ValueError(\n \"Dataset does not contain the dimensions: %s\" % missing_dimensions\n )\n\n if keep_attrs is None:\n keep_attrs = _get_keep_attrs(default=False)\n\n variables: Dict[Hashable, Variable] = {}\n for name, var in self._variables.items():\n reduce_dims = [d for d in var.dims if d in dims]\n if name in self.coords:\n if not reduce_dims:\n variables[name] = var\n else:\n if (\n not numeric_only\n or np.issubdtype(var.dtype, np.number)\n or (var.dtype == np.bool_)\n ):\n if len(reduce_dims) == 1:\n # unpack dimensions for the benefit of functions\n # like np.argmin which can't handle tuple arguments\n (reduce_dims,) = reduce_dims\n elif len(reduce_dims) == var.ndim:\n # prefer to aggregate over axis=None rather than\n # axis=(0, 1) if they will be equivalent, because\n # the former is often more efficient\n reduce_dims = None # type: ignore[assignment]\n variables[name] = var.reduce(\n func,\n dim=reduce_dims,\n keep_attrs=keep_attrs,\n keepdims=keepdims,\n **kwargs,\n )\n\n coord_names = {k for k in self.coords if k in variables}\n indexes = {k: v for k, v in self.indexes.items() if k in variables}\n attrs = self.attrs if keep_attrs else None\n return self._replace_with_new_dims(\n variables, coord_names=coord_names, attrs=attrs, indexes=indexes\n )\n\n def map(\n self,\n func: Callable,\n keep_attrs: bool = None,\n args: Iterable[Any] = (),\n **kwargs: Any,\n ) -> \"Dataset\":\n \"\"\"Apply a function to each variable in this dataset\n\n Parameters\n ----------\n func : callable\n Function which can be called in the form `func(x, *args, **kwargs)`\n to transform each DataArray `x` in this dataset into another\n DataArray.\n keep_attrs : bool, optional\n If True, the dataset's attributes (`attrs`) will be copied from\n the original object to the new one. If False, the new object will\n be returned without attributes.\n args : tuple, optional\n Positional arguments passed on to `func`.\n **kwargs : Any\n Keyword arguments passed on to `func`.\n\n Returns\n -------\n applied : Dataset\n Resulting dataset from applying ``func`` to each data variable.\n\n Examples\n --------\n >>> da = xr.DataArray(np.random.randn(2, 3))\n >>> ds = xr.Dataset({\"foo\": da, \"bar\": (\"x\", [-1, 2])})\n >>> ds\n <xarray.Dataset>\n Dimensions: (dim_0: 2, dim_1: 3, x: 2)\n Dimensions without coordinates: dim_0, dim_1, x\n Data variables:\n foo (dim_0, dim_1) float64 1.764 0.4002 0.9787 2.241 1.868 -0.9773\n bar (x) int64 -1 2\n >>> ds.map(np.fabs)\n <xarray.Dataset>\n Dimensions: (dim_0: 2, dim_1: 3, x: 2)\n Dimensions without coordinates: dim_0, dim_1, x\n Data variables:\n foo (dim_0, dim_1) float64 1.764 0.4002 0.9787 2.241 1.868 0.9773\n bar (x) float64 1.0 2.0\n \"\"\"\n if keep_attrs is None:\n keep_attrs = _get_keep_attrs(default=False)\n variables = {\n k: maybe_wrap_array(v, func(v, *args, **kwargs))\n for k, v in self.data_vars.items()\n }\n if keep_attrs:\n for k, v in variables.items():\n v._copy_attrs_from(self.data_vars[k])\n attrs = self.attrs if keep_attrs else None\n return type(self)(variables, attrs=attrs)\n\n def apply(\n self,\n func: Callable,\n keep_attrs: bool = None,\n args: Iterable[Any] = (),\n **kwargs: Any,\n ) -> \"Dataset\":\n \"\"\"\n Backward compatible implementation of ``map``\n\n See Also\n --------\n Dataset.map\n \"\"\"\n warnings.warn(\n \"Dataset.apply may be deprecated in the future. Using Dataset.map is encouraged\",\n PendingDeprecationWarning,\n stacklevel=2,\n )\n return self.map(func, keep_attrs, args, **kwargs)\n\n def assign(\n self, variables: Mapping[Hashable, Any] = None, **variables_kwargs: Hashable\n ) -> \"Dataset\":\n \"\"\"Assign new data variables to a Dataset, returning a new object\n with all the original variables in addition to the new ones.\n\n Parameters\n ----------\n variables : mapping of hashable to Any\n Mapping from variables names to the new values. If the new values\n are callable, they are computed on the Dataset and assigned to new\n data variables. If the values are not callable, (e.g. a DataArray,\n scalar, or array), they are simply assigned.\n **variables_kwargs\n The keyword arguments form of ``variables``.\n One of variables or variables_kwargs must be provided.\n\n Returns\n -------\n ds : Dataset\n A new Dataset with the new variables in addition to all the\n existing variables.\n\n Notes\n -----\n Since ``kwargs`` is a dictionary, the order of your arguments may not\n be preserved, and so the order of the new variables is not well\n defined. Assigning multiple variables within the same ``assign`` is\n possible, but you cannot reference other variables created within the\n same ``assign`` call.\n\n See Also\n --------\n pandas.DataFrame.assign\n\n Examples\n --------\n >>> x = xr.Dataset(\n ... {\n ... \"temperature_c\": (\n ... (\"lat\", \"lon\"),\n ... 20 * np.random.rand(4).reshape(2, 2),\n ... ),\n ... \"precipitation\": ((\"lat\", \"lon\"), np.random.rand(4).reshape(2, 2)),\n ... },\n ... coords={\"lat\": [10, 20], \"lon\": [150, 160]},\n ... )\n >>> x\n <xarray.Dataset>\n Dimensions: (lat: 2, lon: 2)\n Coordinates:\n * lat (lat) int64 10 20\n * lon (lon) int64 150 160\n Data variables:\n temperature_c (lat, lon) float64 10.98 14.3 12.06 10.9\n precipitation (lat, lon) float64 0.4237 0.6459 0.4376 0.8918\n\n Where the value is a callable, evaluated on dataset:\n\n >>> x.assign(temperature_f=lambda x: x.temperature_c * 9 / 5 + 32)\n <xarray.Dataset>\n Dimensions: (lat: 2, lon: 2)\n Coordinates:\n * lat (lat) int64 10 20\n * lon (lon) int64 150 160\n Data variables:\n temperature_c (lat, lon) float64 10.98 14.3 12.06 10.9\n precipitation (lat, lon) float64 0.4237 0.6459 0.4376 0.8918\n temperature_f (lat, lon) float64 51.76 57.75 53.7 51.62\n\n Alternatively, the same behavior can be achieved by directly referencing an existing dataarray:\n\n >>> x.assign(temperature_f=x[\"temperature_c\"] * 9 / 5 + 32)\n <xarray.Dataset>\n Dimensions: (lat: 2, lon: 2)\n Coordinates:\n * lat (lat) int64 10 20\n * lon (lon) int64 150 160\n Data variables:\n temperature_c (lat, lon) float64 10.98 14.3 12.06 10.9\n precipitation (lat, lon) float64 0.4237 0.6459 0.4376 0.8918\n temperature_f (lat, lon) float64 51.76 57.75 53.7 51.62\n\n \"\"\"\n variables = either_dict_or_kwargs(variables, variables_kwargs, \"assign\")\n data = self.copy()\n # do all calculations first...\n results = data._calc_assign_results(variables)\n # ... and then assign\n data.update(results)\n return data\n\n def to_array(self, dim=\"variable\", name=None):\n \"\"\"Convert this dataset into an xarray.DataArray\n\n The data variables of this dataset will be broadcast against each other\n and stacked along the first axis of the new array. All coordinates of\n this dataset will remain coordinates.\n\n Parameters\n ----------\n dim : str, optional\n Name of the new dimension.\n name : str, optional\n Name of the new data array.\n\n Returns\n -------\n array : xarray.DataArray\n \"\"\"\n from .dataarray import DataArray\n\n data_vars = [self.variables[k] for k in self.data_vars]\n broadcast_vars = broadcast_variables(*data_vars)\n data = duck_array_ops.stack([b.data for b in broadcast_vars], axis=0)\n\n coords = dict(self.coords)\n coords[dim] = list(self.data_vars)\n indexes = propagate_indexes(self._indexes)\n\n dims = (dim,) + broadcast_vars[0].dims\n\n return DataArray(\n data, coords, dims, attrs=self.attrs, name=name, indexes=indexes\n )\n\n def _normalize_dim_order(\n self, dim_order: List[Hashable] = None\n ) -> Dict[Hashable, int]:\n \"\"\"\n Check the validity of the provided dimensions if any and return the mapping\n between dimension name and their size.\n\n Parameters\n ----------\n dim_order\n Dimension order to validate (default to the alphabetical order if None).\n\n Returns\n -------\n result\n Validated dimensions mapping.\n\n \"\"\"\n if dim_order is None:\n dim_order = list(self.dims)\n elif set(dim_order) != set(self.dims):\n raise ValueError(\n \"dim_order {} does not match the set of dimensions of this \"\n \"Dataset: {}\".format(dim_order, list(self.dims))\n )\n\n ordered_dims = {k: self.dims[k] for k in dim_order}\n\n return ordered_dims\n\n def _to_dataframe(self, ordered_dims: Mapping[Hashable, int]):\n columns = [k for k in self.variables if k not in self.dims]\n data = [\n self._variables[k].set_dims(ordered_dims).values.reshape(-1)\n for k in columns\n ]\n index = self.coords.to_index([*ordered_dims])\n return pd.DataFrame(dict(zip(columns, data)), index=index)\n\n def to_dataframe(self, dim_order: List[Hashable] = None) -> pd.DataFrame:\n \"\"\"Convert this dataset into a pandas.DataFrame.\n\n Non-index variables in this dataset form the columns of the\n DataFrame. The DataFrame is indexed by the Cartesian product of\n this dataset's indices.\n\n Parameters\n ----------\n dim_order\n Hierarchical dimension order for the resulting dataframe. All\n arrays are transposed to this order and then written out as flat\n vectors in contiguous order, so the last dimension in this list\n will be contiguous in the resulting DataFrame. This has a major\n influence on which operations are efficient on the resulting\n dataframe.\n\n If provided, must include all dimensions of this dataset. By\n default, dimensions are sorted alphabetically.\n\n Returns\n -------\n result\n Dataset as a pandas DataFrame.\n\n \"\"\"\n\n ordered_dims = self._normalize_dim_order(dim_order=dim_order)\n\n return self._to_dataframe(ordered_dims=ordered_dims)\n\n def _set_sparse_data_from_dataframe(\n self, idx: pd.Index, arrays: List[Tuple[Hashable, np.ndarray]], dims: tuple\n ) -> None:\n from sparse import COO\n\n if isinstance(idx, pd.MultiIndex):\n coords = np.stack([np.asarray(code) for code in idx.codes], axis=0)\n is_sorted = idx.is_lexsorted()\n shape = tuple(lev.size for lev in idx.levels)\n else:\n coords = np.arange(idx.size).reshape(1, -1)\n is_sorted = True\n shape = (idx.size,)\n\n for name, values in arrays:\n # In virtually all real use cases, the sparse array will now have\n # missing values and needs a fill_value. For consistency, don't\n # special case the rare exceptions (e.g., dtype=int without a\n # MultiIndex).\n dtype, fill_value = dtypes.maybe_promote(values.dtype)\n values = np.asarray(values, dtype=dtype)\n\n data = COO(\n coords,\n values,\n shape,\n has_duplicates=False,\n sorted=is_sorted,\n fill_value=fill_value,\n )\n self[name] = (dims, data)\n\n def _set_numpy_data_from_dataframe(\n self, idx: pd.Index, arrays: List[Tuple[Hashable, np.ndarray]], dims: tuple\n ) -> None:\n if not isinstance(idx, pd.MultiIndex):\n for name, values in arrays:\n self[name] = (dims, values)\n return\n\n # NB: similar, more general logic, now exists in\n # variable.unstack_once; we could consider combining them at some\n # point.\n\n shape = tuple(lev.size for lev in idx.levels)\n indexer = tuple(idx.codes)\n\n # We already verified that the MultiIndex has all unique values, so\n # there are missing values if and only if the size of output arrays is\n # larger that the index.\n missing_values = np.prod(shape) > idx.shape[0]\n\n for name, values in arrays:\n # NumPy indexing is much faster than using DataFrame.reindex() to\n # fill in missing values:\n # https://stackoverflow.com/a/35049899/809705\n if missing_values:\n dtype, fill_value = dtypes.maybe_promote(values.dtype)\n data = np.full(shape, fill_value, dtype)\n else:\n # If there are no missing values, keep the existing dtype\n # instead of promoting to support NA, e.g., keep integer\n # columns as integers.\n # TODO: consider removing this special case, which doesn't\n # exist for sparse=True.\n data = np.zeros(shape, values.dtype)\n data[indexer] = values\n self[name] = (dims, data)\n\n @classmethod\n def from_dataframe(cls, dataframe: pd.DataFrame, sparse: bool = False) -> \"Dataset\":\n \"\"\"Convert a pandas.DataFrame into an xarray.Dataset\n\n Each column will be converted into an independent variable in the\n Dataset. If the dataframe's index is a MultiIndex, it will be expanded\n into a tensor product of one-dimensional indices (filling in missing\n values with NaN). This method will produce a Dataset very similar to\n that on which the 'to_dataframe' method was called, except with\n possibly redundant dimensions (since all dataset variables will have\n the same dimensionality)\n\n Parameters\n ----------\n dataframe : DataFrame\n DataFrame from which to copy data and indices.\n sparse : bool, default: False\n If true, create a sparse arrays instead of dense numpy arrays. This\n can potentially save a large amount of memory if the DataFrame has\n a MultiIndex. Requires the sparse package (sparse.pydata.org).\n\n Returns\n -------\n New Dataset.\n\n See Also\n --------\n xarray.DataArray.from_series\n pandas.DataFrame.to_xarray\n \"\"\"\n # TODO: Add an option to remove dimensions along which the variables\n # are constant, to enable consistent serialization to/from a dataframe,\n # even if some variables have different dimensionality.\n\n if not dataframe.columns.is_unique:\n raise ValueError(\"cannot convert DataFrame with non-unique columns\")\n\n idx = remove_unused_levels_categories(dataframe.index)\n\n if isinstance(idx, pd.MultiIndex) and not idx.is_unique:\n raise ValueError(\n \"cannot convert a DataFrame with a non-unique MultiIndex into xarray\"\n )\n\n # Cast to a NumPy array first, in case the Series is a pandas Extension\n # array (which doesn't have a valid NumPy dtype)\n # TODO: allow users to control how this casting happens, e.g., by\n # forwarding arguments to pandas.Series.to_numpy?\n arrays = [(k, np.asarray(v)) for k, v in dataframe.items()]\n\n obj = cls()\n\n if isinstance(idx, pd.MultiIndex):\n dims = tuple(\n name if name is not None else \"level_%i\" % n\n for n, name in enumerate(idx.names)\n )\n for dim, lev in zip(dims, idx.levels):\n obj[dim] = (dim, lev)\n else:\n index_name = idx.name if idx.name is not None else \"index\"\n dims = (index_name,)\n obj[index_name] = (dims, idx)\n\n if sparse:\n obj._set_sparse_data_from_dataframe(idx, arrays, dims)\n else:\n obj._set_numpy_data_from_dataframe(idx, arrays, dims)\n return obj\n\n def to_dask_dataframe(self, dim_order=None, set_index=False):\n \"\"\"\n Convert this dataset into a dask.dataframe.DataFrame.\n\n The dimensions, coordinates and data variables in this dataset form\n the columns of the DataFrame.\n\n Parameters\n ----------\n dim_order : list, optional\n Hierarchical dimension order for the resulting dataframe. All\n arrays are transposed to this order and then written out as flat\n vectors in contiguous order, so the last dimension in this list\n will be contiguous in the resulting DataFrame. This has a major\n influence on which operations are efficient on the resulting dask\n dataframe.\n\n If provided, must include all dimensions of this dataset. By\n default, dimensions are sorted alphabetically.\n set_index : bool, optional\n If set_index=True, the dask DataFrame is indexed by this dataset's\n coordinate. Since dask DataFrames do not support multi-indexes,\n set_index only works if the dataset only contains one dimension.\n\n Returns\n -------\n dask.dataframe.DataFrame\n \"\"\"\n\n import dask.array as da\n import dask.dataframe as dd\n\n ordered_dims = self._normalize_dim_order(dim_order=dim_order)\n\n columns = list(ordered_dims)\n columns.extend(k for k in self.coords if k not in self.dims)\n columns.extend(self.data_vars)\n\n series_list = []\n for name in columns:\n try:\n var = self.variables[name]\n except KeyError:\n # dimension without a matching coordinate\n size = self.dims[name]\n data = da.arange(size, chunks=size, dtype=np.int64)\n var = Variable((name,), data)\n\n # IndexVariable objects have a dummy .chunk() method\n if isinstance(var, IndexVariable):\n var = var.to_base_variable()\n\n dask_array = var.set_dims(ordered_dims).chunk(self.chunks).data\n series = dd.from_array(dask_array.reshape(-1), columns=[name])\n series_list.append(series)\n\n df = dd.concat(series_list, axis=1)\n\n if set_index:\n dim_order = [*ordered_dims]\n\n if len(dim_order) == 1:\n (dim,) = dim_order\n df = df.set_index(dim)\n else:\n # triggers an error about multi-indexes, even if only one\n # dimension is passed\n df = df.set_index(dim_order)\n\n return df\n\n def to_dict(self, data=True):\n \"\"\"\n Convert this dataset to a dictionary following xarray naming\n conventions.\n\n Converts all variables and attributes to native Python objects\n Useful for converting to json. To avoid datetime incompatibility\n use decode_times=False kwarg in xarrray.open_dataset.\n\n Parameters\n ----------\n data : bool, optional\n Whether to include the actual data in the dictionary. When set to\n False, returns just the schema.\n\n See Also\n --------\n Dataset.from_dict\n \"\"\"\n d = {\n \"coords\": {},\n \"attrs\": decode_numpy_dict_values(self.attrs),\n \"dims\": dict(self.dims),\n \"data_vars\": {},\n }\n for k in self.coords:\n d[\"coords\"].update({k: self[k].variable.to_dict(data=data)})\n for k in self.data_vars:\n d[\"data_vars\"].update({k: self[k].variable.to_dict(data=data)})\n return d\n\n @classmethod\n def from_dict(cls, d):\n \"\"\"\n Convert a dictionary into an xarray.Dataset.\n\n Input dict can take several forms:\n\n .. code:: python\n\n d = {\n \"t\": {\"dims\": (\"t\"), \"data\": t},\n \"a\": {\"dims\": (\"t\"), \"data\": x},\n \"b\": {\"dims\": (\"t\"), \"data\": y},\n }\n\n d = {\n \"coords\": {\"t\": {\"dims\": \"t\", \"data\": t, \"attrs\": {\"units\": \"s\"}}},\n \"attrs\": {\"title\": \"air temperature\"},\n \"dims\": \"t\",\n \"data_vars\": {\n \"a\": {\"dims\": \"t\", \"data\": x},\n \"b\": {\"dims\": \"t\", \"data\": y},\n },\n }\n\n where \"t\" is the name of the dimesion, \"a\" and \"b\" are names of data\n variables and t, x, and y are lists, numpy.arrays or pandas objects.\n\n Parameters\n ----------\n d : dict-like\n Mapping with a minimum structure of\n ``{\"var_0\": {\"dims\": [..], \"data\": [..]}, \\\n ...}``\n\n Returns\n -------\n obj : xarray.Dataset\n\n See also\n --------\n Dataset.to_dict\n DataArray.from_dict\n \"\"\"\n\n if not {\"coords\", \"data_vars\"}.issubset(set(d)):\n variables = d.items()\n else:\n import itertools\n\n variables = itertools.chain(\n d.get(\"coords\", {}).items(), d.get(\"data_vars\", {}).items()\n )\n try:\n variable_dict = {\n k: (v[\"dims\"], v[\"data\"], v.get(\"attrs\")) for k, v in variables\n }\n except KeyError as e:\n raise ValueError(\n \"cannot convert dict without the key \"\n \"'{dims_data}'\".format(dims_data=str(e.args[0]))\n )\n obj = cls(variable_dict)\n\n # what if coords aren't dims?\n coords = set(d.get(\"coords\", {})) - set(d.get(\"dims\", {}))\n obj = obj.set_coords(coords)\n\n obj.attrs.update(d.get(\"attrs\", {}))\n\n return obj\n\n @staticmethod\n def _unary_op(f):\n @functools.wraps(f)\n def func(self, *args, **kwargs):\n variables = {}\n keep_attrs = kwargs.pop(\"keep_attrs\", None)\n if keep_attrs is None:\n keep_attrs = _get_keep_attrs(default=True)\n for k, v in self._variables.items():\n if k in self._coord_names:\n variables[k] = v\n else:\n variables[k] = f(v, *args, **kwargs)\n if keep_attrs:\n variables[k].attrs = v._attrs\n attrs = self._attrs if keep_attrs else None\n return self._replace_with_new_dims(variables, attrs=attrs)\n\n return func\n\n @staticmethod\n def _binary_op(f, reflexive=False, join=None):\n @functools.wraps(f)\n def func(self, other):\n from .dataarray import DataArray\n\n if isinstance(other, groupby.GroupBy):\n return NotImplemented\n align_type = OPTIONS[\"arithmetic_join\"] if join is None else join\n if isinstance(other, (DataArray, Dataset)):\n self, other = align(self, other, join=align_type, copy=False)\n g = f if not reflexive else lambda x, y: f(y, x)\n ds = self._calculate_binary_op(g, other, join=align_type)\n return ds\n\n return func\n\n @staticmethod\n def _inplace_binary_op(f):\n @functools.wraps(f)\n def func(self, other):\n from .dataarray import DataArray\n\n if isinstance(other, groupby.GroupBy):\n raise TypeError(\n \"in-place operations between a Dataset and \"\n \"a grouped object are not permitted\"\n )\n # we don't actually modify arrays in-place with in-place Dataset\n # arithmetic -- this lets us automatically align things\n if isinstance(other, (DataArray, Dataset)):\n other = other.reindex_like(self, copy=False)\n g = ops.inplace_to_noninplace_op(f)\n ds = self._calculate_binary_op(g, other, inplace=True)\n self._replace_with_new_dims(\n ds._variables,\n ds._coord_names,\n attrs=ds._attrs,\n indexes=ds._indexes,\n inplace=True,\n )\n return self\n\n return func\n\n def _calculate_binary_op(self, f, other, join=\"inner\", inplace=False):\n def apply_over_both(lhs_data_vars, rhs_data_vars, lhs_vars, rhs_vars):\n if inplace and set(lhs_data_vars) != set(rhs_data_vars):\n raise ValueError(\n \"datasets must have the same data variables \"\n \"for in-place arithmetic operations: %s, %s\"\n % (list(lhs_data_vars), list(rhs_data_vars))\n )\n\n dest_vars = {}\n\n for k in lhs_data_vars:\n if k in rhs_data_vars:\n dest_vars[k] = f(lhs_vars[k], rhs_vars[k])\n elif join in [\"left\", \"outer\"]:\n dest_vars[k] = f(lhs_vars[k], np.nan)\n for k in rhs_data_vars:\n if k not in dest_vars and join in [\"right\", \"outer\"]:\n dest_vars[k] = f(rhs_vars[k], np.nan)\n return dest_vars\n\n if utils.is_dict_like(other) and not isinstance(other, Dataset):\n # can't use our shortcut of doing the binary operation with\n # Variable objects, so apply over our data vars instead.\n new_data_vars = apply_over_both(\n self.data_vars, other, self.data_vars, other\n )\n return Dataset(new_data_vars)\n\n other_coords = getattr(other, \"coords\", None)\n ds = self.coords.merge(other_coords)\n\n if isinstance(other, Dataset):\n new_vars = apply_over_both(\n self.data_vars, other.data_vars, self.variables, other.variables\n )\n else:\n other_variable = getattr(other, \"variable\", other)\n new_vars = {k: f(self.variables[k], other_variable) for k in self.data_vars}\n ds._variables.update(new_vars)\n ds._dims = calculate_dimensions(ds._variables)\n return ds\n\n def _copy_attrs_from(self, other):\n self.attrs = other.attrs\n for v in other.variables:\n if v in self.variables:\n self.variables[v].attrs = other.variables[v].attrs\n\n def diff(self, dim, n=1, label=\"upper\"):\n \"\"\"Calculate the n-th order discrete difference along given axis.\n\n Parameters\n ----------\n dim : str\n Dimension over which to calculate the finite difference.\n n : int, optional\n The number of times values are differenced.\n label : str, optional\n The new coordinate in dimension ``dim`` will have the\n values of either the minuend's or subtrahend's coordinate\n for values 'upper' and 'lower', respectively. Other\n values are not supported.\n\n Returns\n -------\n difference : same type as caller\n The n-th order finite difference of this object.\n .. note::\n `n` matches numpy's behavior and is different from pandas' first\n argument named `periods`.\n\n Examples\n --------\n >>> ds = xr.Dataset({\"foo\": (\"x\", [5, 5, 6, 6])})\n >>> ds.diff(\"x\")\n <xarray.Dataset>\n Dimensions: (x: 3)\n Dimensions without coordinates: x\n Data variables:\n foo (x) int64 0 1 0\n >>> ds.diff(\"x\", 2)\n <xarray.Dataset>\n Dimensions: (x: 2)\n Dimensions without coordinates: x\n Data variables:\n foo (x) int64 1 -1\n\n See Also\n --------\n Dataset.differentiate\n \"\"\"\n if n == 0:\n return self\n if n < 0:\n raise ValueError(f\"order `n` must be non-negative but got {n}\")\n\n # prepare slices\n kwargs_start = {dim: slice(None, -1)}\n kwargs_end = {dim: slice(1, None)}\n\n # prepare new coordinate\n if label == \"upper\":\n kwargs_new = kwargs_end\n elif label == \"lower\":\n kwargs_new = kwargs_start\n else:\n raise ValueError(\"The 'label' argument has to be either 'upper' or 'lower'\")\n\n variables = {}\n\n for name, var in self.variables.items():\n if dim in var.dims:\n if name in self.data_vars:\n variables[name] = var.isel(**kwargs_end) - var.isel(**kwargs_start)\n else:\n variables[name] = var.isel(**kwargs_new)\n else:\n variables[name] = var\n\n indexes = dict(self.indexes)\n if dim in indexes:\n indexes[dim] = indexes[dim][kwargs_new[dim]]\n\n difference = self._replace_with_new_dims(variables, indexes=indexes)\n\n if n > 1:\n return difference.diff(dim, n - 1)\n else:\n return difference\n\n def shift(self, shifts=None, fill_value=dtypes.NA, **shifts_kwargs):\n \"\"\"Shift this dataset by an offset along one or more dimensions.\n\n Only data variables are moved; coordinates stay in place. This is\n consistent with the behavior of ``shift`` in pandas.\n\n Parameters\n ----------\n shifts : mapping of hashable to int\n Integer offset to shift along each of the given dimensions.\n Positive offsets shift to the right; negative offsets shift to the\n left.\n fill_value : scalar or dict-like, optional\n Value to use for newly missing values. If a dict-like, maps\n variable names (including coordinates) to fill values.\n **shifts_kwargs\n The keyword arguments form of ``shifts``.\n One of shifts or shifts_kwargs must be provided.\n\n Returns\n -------\n shifted : Dataset\n Dataset with the same coordinates and attributes but shifted data\n variables.\n\n See Also\n --------\n roll\n\n Examples\n --------\n >>> ds = xr.Dataset({\"foo\": (\"x\", list(\"abcde\"))})\n >>> ds.shift(x=2)\n <xarray.Dataset>\n Dimensions: (x: 5)\n Dimensions without coordinates: x\n Data variables:\n foo (x) object nan nan 'a' 'b' 'c'\n \"\"\"\n shifts = either_dict_or_kwargs(shifts, shifts_kwargs, \"shift\")\n invalid = [k for k in shifts if k not in self.dims]\n if invalid:\n raise ValueError(\"dimensions %r do not exist\" % invalid)\n\n variables = {}\n for name, var in self.variables.items():\n if name in self.data_vars:\n fill_value_ = (\n fill_value.get(name, dtypes.NA)\n if isinstance(fill_value, dict)\n else fill_value\n )\n\n var_shifts = {k: v for k, v in shifts.items() if k in var.dims}\n variables[name] = var.shift(fill_value=fill_value_, shifts=var_shifts)\n else:\n variables[name] = var\n\n return self._replace(variables)\n\n def roll(self, shifts=None, roll_coords=None, **shifts_kwargs):\n \"\"\"Roll this dataset by an offset along one or more dimensions.\n\n Unlike shift, roll may rotate all variables, including coordinates\n if specified. The direction of rotation is consistent with\n :py:func:`numpy.roll`.\n\n Parameters\n ----------\n shifts : dict, optional\n A dict with keys matching dimensions and values given\n by integers to rotate each of the given dimensions. Positive\n offsets roll to the right; negative offsets roll to the left.\n roll_coords : bool\n Indicates whether to roll the coordinates by the offset\n The current default of roll_coords (None, equivalent to True) is\n deprecated and will change to False in a future version.\n Explicitly pass roll_coords to silence the warning.\n **shifts_kwargs : {dim: offset, ...}, optional\n The keyword arguments form of ``shifts``.\n One of shifts or shifts_kwargs must be provided.\n Returns\n -------\n rolled : Dataset\n Dataset with the same coordinates and attributes but rolled\n variables.\n\n See Also\n --------\n shift\n\n Examples\n --------\n >>> ds = xr.Dataset({\"foo\": (\"x\", list(\"abcde\"))})\n >>> ds.roll(x=2)\n <xarray.Dataset>\n Dimensions: (x: 5)\n Dimensions without coordinates: x\n Data variables:\n foo (x) <U1 'd' 'e' 'a' 'b' 'c'\n \"\"\"\n shifts = either_dict_or_kwargs(shifts, shifts_kwargs, \"roll\")\n invalid = [k for k in shifts if k not in self.dims]\n if invalid:\n raise ValueError(\"dimensions %r do not exist\" % invalid)\n\n if roll_coords is None:\n warnings.warn(\n \"roll_coords will be set to False in the future.\"\n \" Explicitly set roll_coords to silence warning.\",\n FutureWarning,\n stacklevel=2,\n )\n roll_coords = True\n\n unrolled_vars = () if roll_coords else self.coords\n\n variables = {}\n for k, v in self.variables.items():\n if k not in unrolled_vars:\n variables[k] = v.roll(\n **{k: s for k, s in shifts.items() if k in v.dims}\n )\n else:\n variables[k] = v\n\n if roll_coords:\n indexes = {}\n for k, v in self.indexes.items():\n (dim,) = self.variables[k].dims\n if dim in shifts:\n indexes[k] = roll_index(v, shifts[dim])\n else:\n indexes[k] = v\n else:\n indexes = dict(self.indexes)\n\n return self._replace(variables, indexes=indexes)\n\n def sortby(self, variables, ascending=True):\n \"\"\"\n Sort object by labels or values (along an axis).\n\n Sorts the dataset, either along specified dimensions,\n or according to values of 1-D dataarrays that share dimension\n with calling object.\n\n If the input variables are dataarrays, then the dataarrays are aligned\n (via left-join) to the calling object prior to sorting by cell values.\n NaNs are sorted to the end, following Numpy convention.\n\n If multiple sorts along the same dimension is\n given, numpy's lexsort is performed along that dimension:\n https://docs.scipy.org/doc/numpy/reference/generated/numpy.lexsort.html\n and the FIRST key in the sequence is used as the primary sort key,\n followed by the 2nd key, etc.\n\n Parameters\n ----------\n variables : str, DataArray, or list of str or DataArray\n 1D DataArray objects or name(s) of 1D variable(s) in\n coords/data_vars whose values are used to sort the dataset.\n ascending : bool, optional\n Whether to sort by ascending or descending order.\n\n Returns\n -------\n sorted : Dataset\n A new dataset where all the specified dims are sorted by dim\n labels.\n \"\"\"\n from .dataarray import DataArray\n\n if not isinstance(variables, list):\n variables = [variables]\n else:\n variables = variables\n variables = [v if isinstance(v, DataArray) else self[v] for v in variables]\n aligned_vars = align(self, *variables, join=\"left\")\n aligned_self = aligned_vars[0]\n aligned_other_vars = aligned_vars[1:]\n vars_by_dim = defaultdict(list)\n for data_array in aligned_other_vars:\n if data_array.ndim != 1:\n raise ValueError(\"Input DataArray is not 1-D.\")\n (key,) = data_array.dims\n vars_by_dim[key].append(data_array)\n\n indices = {}\n for key, arrays in vars_by_dim.items():\n order = np.lexsort(tuple(reversed(arrays)))\n indices[key] = order if ascending else order[::-1]\n return aligned_self.isel(**indices)\n\n def quantile(\n self,\n q,\n dim=None,\n interpolation=\"linear\",\n numeric_only=False,\n keep_attrs=None,\n skipna=True,\n ):\n \"\"\"Compute the qth quantile of the data along the specified dimension.\n\n Returns the qth quantiles(s) of the array elements for each variable\n in the Dataset.\n\n Parameters\n ----------\n q : float or array-like of float\n Quantile to compute, which must be between 0 and 1 inclusive.\n dim : str or sequence of str, optional\n Dimension(s) over which to apply quantile.\n interpolation : {\"linear\", \"lower\", \"higher\", \"midpoint\", \"nearest\"}, default: \"linear\"\n This optional parameter specifies the interpolation method to\n use when the desired quantile lies between two data points\n ``i < j``:\n\n * linear: ``i + (j - i) * fraction``, where ``fraction`` is\n the fractional part of the index surrounded by ``i`` and\n ``j``.\n * lower: ``i``.\n * higher: ``j``.\n * nearest: ``i`` or ``j``, whichever is nearest.\n * midpoint: ``(i + j) / 2``.\n keep_attrs : bool, optional\n If True, the dataset's attributes (`attrs`) will be copied from\n the original object to the new one. If False (default), the new\n object will be returned without attributes.\n numeric_only : bool, optional\n If True, only apply ``func`` to variables with a numeric dtype.\n skipna : bool, optional\n Whether to skip missing values when aggregating.\n\n Returns\n -------\n quantiles : Dataset\n If `q` is a single quantile, then the result is a scalar for each\n variable in data_vars. If multiple percentiles are given, first\n axis of the result corresponds to the quantile and a quantile\n dimension is added to the return Dataset. The other dimensions are\n the dimensions that remain after the reduction of the array.\n\n See Also\n --------\n numpy.nanquantile, numpy.quantile, pandas.Series.quantile, DataArray.quantile\n\n Examples\n --------\n >>> ds = xr.Dataset(\n ... {\"a\": ((\"x\", \"y\"), [[0.7, 4.2, 9.4, 1.5], [6.5, 7.3, 2.6, 1.9]])},\n ... coords={\"x\": [7, 9], \"y\": [1, 1.5, 2, 2.5]},\n ... )\n >>> ds.quantile(0) # or ds.quantile(0, dim=...)\n <xarray.Dataset>\n Dimensions: ()\n Coordinates:\n quantile float64 0.0\n Data variables:\n a float64 0.7\n >>> ds.quantile(0, dim=\"x\")\n <xarray.Dataset>\n Dimensions: (y: 4)\n Coordinates:\n * y (y) float64 1.0 1.5 2.0 2.5\n quantile float64 0.0\n Data variables:\n a (y) float64 0.7 4.2 2.6 1.5\n >>> ds.quantile([0, 0.5, 1])\n <xarray.Dataset>\n Dimensions: (quantile: 3)\n Coordinates:\n * quantile (quantile) float64 0.0 0.5 1.0\n Data variables:\n a (quantile) float64 0.7 3.4 9.4\n >>> ds.quantile([0, 0.5, 1], dim=\"x\")\n <xarray.Dataset>\n Dimensions: (quantile: 3, y: 4)\n Coordinates:\n * y (y) float64 1.0 1.5 2.0 2.5\n * quantile (quantile) float64 0.0 0.5 1.0\n Data variables:\n a (quantile, y) float64 0.7 4.2 2.6 1.5 3.6 ... 1.7 6.5 7.3 9.4 1.9\n \"\"\"\n\n if isinstance(dim, str):\n dims = {dim}\n elif dim in [None, ...]:\n dims = set(self.dims)\n else:\n dims = set(dim)\n\n _assert_empty(\n [d for d in dims if d not in self.dims],\n \"Dataset does not contain the dimensions: %s\",\n )\n\n q = np.asarray(q, dtype=np.float64)\n\n variables = {}\n for name, var in self.variables.items():\n reduce_dims = [d for d in var.dims if d in dims]\n if reduce_dims or not var.dims:\n if name not in self.coords:\n if (\n not numeric_only\n or np.issubdtype(var.dtype, np.number)\n or var.dtype == np.bool_\n ):\n if len(reduce_dims) == var.ndim:\n # prefer to aggregate over axis=None rather than\n # axis=(0, 1) if they will be equivalent, because\n # the former is often more efficient\n reduce_dims = None\n variables[name] = var.quantile(\n q,\n dim=reduce_dims,\n interpolation=interpolation,\n keep_attrs=keep_attrs,\n skipna=skipna,\n )\n\n else:\n variables[name] = var\n\n # construct the new dataset\n coord_names = {k for k in self.coords if k in variables}\n indexes = {k: v for k, v in self.indexes.items() if k in variables}\n if keep_attrs is None:\n keep_attrs = _get_keep_attrs(default=False)\n attrs = self.attrs if keep_attrs else None\n new = self._replace_with_new_dims(\n variables, coord_names=coord_names, attrs=attrs, indexes=indexes\n )\n return new.assign_coords(quantile=q)\n\n def rank(self, dim, pct=False, keep_attrs=None):\n \"\"\"Ranks the data.\n\n Equal values are assigned a rank that is the average of the ranks that\n would have been otherwise assigned to all of the values within\n that set.\n Ranks begin at 1, not 0. If pct is True, computes percentage ranks.\n\n NaNs in the input array are returned as NaNs.\n\n The `bottleneck` library is required.\n\n Parameters\n ----------\n dim : str\n Dimension over which to compute rank.\n pct : bool, optional\n If True, compute percentage ranks, otherwise compute integer ranks.\n keep_attrs : bool, optional\n If True, the dataset's attributes (`attrs`) will be copied from\n the original object to the new one. If False (default), the new\n object will be returned without attributes.\n\n Returns\n -------\n ranked : Dataset\n Variables that do not depend on `dim` are dropped.\n \"\"\"\n if dim not in self.dims:\n raise ValueError(\"Dataset does not contain the dimension: %s\" % dim)\n\n variables = {}\n for name, var in self.variables.items():\n if name in self.data_vars:\n if dim in var.dims:\n variables[name] = var.rank(dim, pct=pct)\n else:\n variables[name] = var\n\n coord_names = set(self.coords)\n if keep_attrs is None:\n keep_attrs = _get_keep_attrs(default=False)\n attrs = self.attrs if keep_attrs else None\n return self._replace(variables, coord_names, attrs=attrs)\n\n def differentiate(self, coord, edge_order=1, datetime_unit=None):\n \"\"\" Differentiate with the second order accurate central\n differences.\n\n .. note::\n This feature is limited to simple cartesian geometry, i.e. coord\n must be one dimensional.\n\n Parameters\n ----------\n coord : str\n The coordinate to be used to compute the gradient.\n edge_order : {1, 2}, default: 1\n N-th order accurate differences at the boundaries.\n datetime_unit : None or {\"Y\", \"M\", \"W\", \"D\", \"h\", \"m\", \"s\", \"ms\", \\\n \"us\", \"ns\", \"ps\", \"fs\", \"as\"}, default: None\n Unit to compute gradient. Only valid for datetime coordinate.\n\n Returns\n -------\n differentiated: Dataset\n\n See also\n --------\n numpy.gradient: corresponding numpy function\n \"\"\"\n from .variable import Variable\n\n if coord not in self.variables and coord not in self.dims:\n raise ValueError(f\"Coordinate {coord} does not exist.\")\n\n coord_var = self[coord].variable\n if coord_var.ndim != 1:\n raise ValueError(\n \"Coordinate {} must be 1 dimensional but is {}\"\n \" dimensional\".format(coord, coord_var.ndim)\n )\n\n dim = coord_var.dims[0]\n if _contains_datetime_like_objects(coord_var):\n if coord_var.dtype.kind in \"mM\" and datetime_unit is None:\n datetime_unit, _ = np.datetime_data(coord_var.dtype)\n elif datetime_unit is None:\n datetime_unit = \"s\" # Default to seconds for cftime objects\n coord_var = coord_var._to_numeric(datetime_unit=datetime_unit)\n\n variables = {}\n for k, v in self.variables.items():\n if k in self.data_vars and dim in v.dims and k not in self.coords:\n if _contains_datetime_like_objects(v):\n v = v._to_numeric(datetime_unit=datetime_unit)\n grad = duck_array_ops.gradient(\n v.data, coord_var, edge_order=edge_order, axis=v.get_axis_num(dim)\n )\n variables[k] = Variable(v.dims, grad)\n else:\n variables[k] = v\n return self._replace(variables)\n\n def integrate(\n self, coord: Union[Hashable, Sequence[Hashable]], datetime_unit: str = None\n ) -> \"Dataset\":\n \"\"\"Integrate along the given coordinate using the trapezoidal rule.\n\n .. note::\n This feature is limited to simple cartesian geometry, i.e. coord\n must be one dimensional.\n\n Parameters\n ----------\n coord : hashable, or sequence of hashable\n Coordinate(s) used for the integration.\n datetime_unit : {'Y', 'M', 'W', 'D', 'h', 'm', 's', 'ms', 'us', 'ns', \\\n 'ps', 'fs', 'as'}, optional\n Specify the unit if datetime coordinate is used.\n\n Returns\n -------\n integrated : Dataset\n\n See also\n --------\n DataArray.integrate\n numpy.trapz : corresponding numpy function\n\n Examples\n --------\n >>> ds = xr.Dataset(\n ... data_vars={\"a\": (\"x\", [5, 5, 6, 6]), \"b\": (\"x\", [1, 2, 1, 0])},\n ... coords={\"x\": [0, 1, 2, 3], \"y\": (\"x\", [1, 7, 3, 5])},\n ... )\n >>> ds\n <xarray.Dataset>\n Dimensions: (x: 4)\n Coordinates:\n * x (x) int64 0 1 2 3\n y (x) int64 1 7 3 5\n Data variables:\n a (x) int64 5 5 6 6\n b (x) int64 1 2 1 0\n >>> ds.integrate(\"x\")\n <xarray.Dataset>\n Dimensions: ()\n Data variables:\n a float64 16.5\n b float64 3.5\n >>> ds.integrate(\"y\")\n <xarray.Dataset>\n Dimensions: ()\n Data variables:\n a float64 20.0\n b float64 4.0\n \"\"\"\n if not isinstance(coord, (list, tuple)):\n coord = (coord,)\n result = self\n for c in coord:\n result = result._integrate_one(c, datetime_unit=datetime_unit)\n return result\n\n def _integrate_one(self, coord, datetime_unit=None):\n from .variable import Variable\n\n if coord not in self.variables and coord not in self.dims:\n raise ValueError(f\"Coordinate {coord} does not exist.\")\n\n coord_var = self[coord].variable\n if coord_var.ndim != 1:\n raise ValueError(\n \"Coordinate {} must be 1 dimensional but is {}\"\n \" dimensional\".format(coord, coord_var.ndim)\n )\n\n dim = coord_var.dims[0]\n if _contains_datetime_like_objects(coord_var):\n if coord_var.dtype.kind in \"mM\" and datetime_unit is None:\n datetime_unit, _ = np.datetime_data(coord_var.dtype)\n elif datetime_unit is None:\n datetime_unit = \"s\" # Default to seconds for cftime objects\n coord_var = coord_var._replace(\n data=datetime_to_numeric(coord_var.data, datetime_unit=datetime_unit)\n )\n\n variables = {}\n coord_names = set()\n for k, v in self.variables.items():\n if k in self.coords:\n if dim not in v.dims:\n variables[k] = v\n coord_names.add(k)\n else:\n if k in self.data_vars and dim in v.dims:\n if _contains_datetime_like_objects(v):\n v = datetime_to_numeric(v, datetime_unit=datetime_unit)\n integ = duck_array_ops.trapz(\n v.data, coord_var.data, axis=v.get_axis_num(dim)\n )\n v_dims = list(v.dims)\n v_dims.remove(dim)\n variables[k] = Variable(v_dims, integ)\n else:\n variables[k] = v\n indexes = {k: v for k, v in self.indexes.items() if k in variables}\n return self._replace_with_new_dims(\n variables, coord_names=coord_names, indexes=indexes\n )\n\n @property\n def real(self):\n return self.map(lambda x: x.real, keep_attrs=True)\n\n @property\n def imag(self):\n return self.map(lambda x: x.imag, keep_attrs=True)\n\n plot = utils.UncachedAccessor(_Dataset_PlotMethods)\n\n def filter_by_attrs(self, **kwargs):\n \"\"\"Returns a ``Dataset`` with variables that match specific conditions.\n\n Can pass in ``key=value`` or ``key=callable``. A Dataset is returned\n containing only the variables for which all the filter tests pass.\n These tests are either ``key=value`` for which the attribute ``key``\n has the exact value ``value`` or the callable passed into\n ``key=callable`` returns True. The callable will be passed a single\n value, either the value of the attribute ``key`` or ``None`` if the\n DataArray does not have an attribute with the name ``key``.\n\n Parameters\n ----------\n **kwargs\n key : str\n Attribute name.\n value : callable or obj\n If value is a callable, it should return a boolean in the form\n of bool = func(attr) where attr is da.attrs[key].\n Otherwise, value will be compared to the each\n DataArray's attrs[key].\n\n Returns\n -------\n new : Dataset\n New dataset with variables filtered by attribute.\n\n Examples\n --------\n >>> # Create an example dataset:\n >>> temp = 15 + 8 * np.random.randn(2, 2, 3)\n >>> precip = 10 * np.random.rand(2, 2, 3)\n >>> lon = [[-99.83, -99.32], [-99.79, -99.23]]\n >>> lat = [[42.25, 42.21], [42.63, 42.59]]\n >>> dims = [\"x\", \"y\", \"time\"]\n >>> temp_attr = dict(standard_name=\"air_potential_temperature\")\n >>> precip_attr = dict(standard_name=\"convective_precipitation_flux\")\n >>> ds = xr.Dataset(\n ... {\n ... \"temperature\": (dims, temp, temp_attr),\n ... \"precipitation\": (dims, precip, precip_attr),\n ... },\n ... coords={\n ... \"lon\": ([\"x\", \"y\"], lon),\n ... \"lat\": ([\"x\", \"y\"], lat),\n ... \"time\": pd.date_range(\"2014-09-06\", periods=3),\n ... \"reference_time\": pd.Timestamp(\"2014-09-05\"),\n ... },\n ... )\n >>> # Get variables matching a specific standard_name.\n >>> ds.filter_by_attrs(standard_name=\"convective_precipitation_flux\")\n <xarray.Dataset>\n Dimensions: (time: 3, x: 2, y: 2)\n Coordinates:\n lon (x, y) float64 -99.83 -99.32 -99.79 -99.23\n lat (x, y) float64 42.25 42.21 42.63 42.59\n * time (time) datetime64[ns] 2014-09-06 2014-09-07 2014-09-08\n reference_time datetime64[ns] 2014-09-05\n Dimensions without coordinates: x, y\n Data variables:\n precipitation (x, y, time) float64 5.68 9.256 0.7104 ... 7.992 4.615 7.805\n >>> # Get all variables that have a standard_name attribute.\n >>> standard_name = lambda v: v is not None\n >>> ds.filter_by_attrs(standard_name=standard_name)\n <xarray.Dataset>\n Dimensions: (time: 3, x: 2, y: 2)\n Coordinates:\n lon (x, y) float64 -99.83 -99.32 -99.79 -99.23\n lat (x, y) float64 42.25 42.21 42.63 42.59\n * time (time) datetime64[ns] 2014-09-06 2014-09-07 2014-09-08\n reference_time datetime64[ns] 2014-09-05\n Dimensions without coordinates: x, y\n Data variables:\n temperature (x, y, time) float64 29.11 18.2 22.83 ... 18.28 16.15 26.63\n precipitation (x, y, time) float64 5.68 9.256 0.7104 ... 7.992 4.615 7.805\n\n \"\"\"\n selection = []\n for var_name, variable in self.variables.items():\n has_value_flag = False\n for attr_name, pattern in kwargs.items():\n attr_value = variable.attrs.get(attr_name)\n if (callable(pattern) and pattern(attr_value)) or attr_value == pattern:\n has_value_flag = True\n else:\n has_value_flag = False\n break\n if has_value_flag is True:\n selection.append(var_name)\n return self[selection]\n\n def unify_chunks(self) -> \"Dataset\":\n \"\"\"Unify chunk size along all chunked dimensions of this Dataset.\n\n Returns\n -------\n Dataset with consistent chunk sizes for all dask-array variables\n\n See Also\n --------\n dask.array.core.unify_chunks\n \"\"\"\n\n try:\n self.chunks\n except ValueError: # \"inconsistent chunks\"\n pass\n else:\n # No variables with dask backend, or all chunks are already aligned\n return self.copy()\n\n # import dask is placed after the quick exit test above to allow\n # running this method if dask isn't installed and there are no chunks\n import dask.array\n\n ds = self.copy()\n\n dims_pos_map = {dim: index for index, dim in enumerate(ds.dims)}\n\n dask_array_names = []\n dask_unify_args = []\n for name, variable in ds.variables.items():\n if isinstance(variable.data, dask.array.Array):\n dims_tuple = [dims_pos_map[dim] for dim in variable.dims]\n dask_array_names.append(name)\n dask_unify_args.append(variable.data)\n dask_unify_args.append(dims_tuple)\n\n _, rechunked_arrays = dask.array.core.unify_chunks(*dask_unify_args)\n\n for name, new_array in zip(dask_array_names, rechunked_arrays):\n ds.variables[name]._data = new_array\n\n return ds\n\n def map_blocks(\n self,\n func: \"Callable[..., T_DSorDA]\",\n args: Sequence[Any] = (),\n kwargs: Mapping[str, Any] = None,\n template: Union[\"DataArray\", \"Dataset\"] = None,\n ) -> \"T_DSorDA\":\n \"\"\"\n Apply a function to each block of this Dataset.\n\n .. warning::\n This method is experimental and its signature may change.\n\n Parameters\n ----------\n func : callable\n User-provided function that accepts a Dataset as its first\n parameter. The function will receive a subset or 'block' of this Dataset (see below),\n corresponding to one chunk along each chunked dimension. ``func`` will be\n executed as ``func(subset_dataset, *subset_args, **kwargs)``.\n\n This function must return either a single DataArray or a single Dataset.\n\n This function cannot add a new chunked dimension.\n args : sequence\n Passed to func after unpacking and subsetting any xarray objects by blocks.\n xarray objects in args must be aligned with obj, otherwise an error is raised.\n kwargs : mapping\n Passed verbatim to func after unpacking. xarray objects, if any, will not be\n subset to blocks. Passing dask collections in kwargs is not allowed.\n template : DataArray or Dataset, optional\n xarray object representing the final result after compute is called. If not provided,\n the function will be first run on mocked-up data, that looks like this object but\n has sizes 0, to determine properties of the returned object such as dtype,\n variable names, attributes, new dimensions and new indexes (if any).\n ``template`` must be provided if the function changes the size of existing dimensions.\n When provided, ``attrs`` on variables in `template` are copied over to the result. Any\n ``attrs`` set by ``func`` will be ignored.\n\n Returns\n -------\n A single DataArray or Dataset with dask backend, reassembled from the outputs of the\n function.\n\n Notes\n -----\n This function is designed for when ``func`` needs to manipulate a whole xarray object\n subset to each block. Each block is loaded into memory. In the more common case where\n ``func`` can work on numpy arrays, it is recommended to use ``apply_ufunc``.\n\n If none of the variables in this object is backed by dask arrays, calling this function is\n equivalent to calling ``func(obj, *args, **kwargs)``.\n\n See Also\n --------\n dask.array.map_blocks, xarray.apply_ufunc, xarray.Dataset.map_blocks\n xarray.DataArray.map_blocks\n\n Examples\n --------\n Calculate an anomaly from climatology using ``.groupby()``. Using\n ``xr.map_blocks()`` allows for parallel operations with knowledge of ``xarray``,\n its indices, and its methods like ``.groupby()``.\n\n >>> def calculate_anomaly(da, groupby_type=\"time.month\"):\n ... gb = da.groupby(groupby_type)\n ... clim = gb.mean(dim=\"time\")\n ... return gb - clim\n ...\n >>> time = xr.cftime_range(\"1990-01\", \"1992-01\", freq=\"M\")\n >>> month = xr.DataArray(time.month, coords={\"time\": time}, dims=[\"time\"])\n >>> np.random.seed(123)\n >>> array = xr.DataArray(\n ... np.random.rand(len(time)),\n ... dims=[\"time\"],\n ... coords={\"time\": time, \"month\": month},\n ... ).chunk()\n >>> ds = xr.Dataset({\"a\": array})\n >>> ds.map_blocks(calculate_anomaly, template=ds).compute()\n <xarray.Dataset>\n Dimensions: (time: 24)\n Coordinates:\n * time (time) object 1990-01-31 00:00:00 ... 1991-12-31 00:00:00\n month (time) int64 1 2 3 4 5 6 7 8 9 10 11 12 1 2 3 4 5 6 7 8 9 10 11 12\n Data variables:\n a (time) float64 0.1289 0.1132 -0.0856 ... 0.2287 0.1906 -0.05901\n\n Note that one must explicitly use ``args=[]`` and ``kwargs={}`` to pass arguments\n to the function being applied in ``xr.map_blocks()``:\n\n >>> ds.map_blocks(\n ... calculate_anomaly,\n ... kwargs={\"groupby_type\": \"time.year\"},\n ... template=ds,\n ... )\n <xarray.Dataset>\n Dimensions: (time: 24)\n Coordinates:\n * time (time) object 1990-01-31 00:00:00 ... 1991-12-31 00:00:00\n month (time) int64 dask.array<chunksize=(24,), meta=np.ndarray>\n Data variables:\n a (time) float64 dask.array<chunksize=(24,), meta=np.ndarray>\n \"\"\"\n from .parallel import map_blocks\n\n return map_blocks(func, self, args, kwargs, template)\n\n def polyfit(\n self,\n dim: Hashable,\n deg: int,\n skipna: bool = None,\n rcond: float = None,\n w: Union[Hashable, Any] = None,\n full: bool = False,\n cov: Union[bool, str] = False,\n ):\n \"\"\"\n Least squares polynomial fit.\n\n This replicates the behaviour of `numpy.polyfit` but differs by skipping\n invalid values when `skipna = True`.\n\n Parameters\n ----------\n dim : hashable\n Coordinate along which to fit the polynomials.\n deg : int\n Degree of the fitting polynomial.\n skipna : bool, optional\n If True, removes all invalid values before fitting each 1D slices of the array.\n Default is True if data is stored in a dask.array or if there is any\n invalid values, False otherwise.\n rcond : float, optional\n Relative condition number to the fit.\n w : hashable or Any, optional\n Weights to apply to the y-coordinate of the sample points.\n Can be an array-like object or the name of a coordinate in the dataset.\n full : bool, optional\n Whether to return the residuals, matrix rank and singular values in addition\n to the coefficients.\n cov : bool or str, optional\n Whether to return to the covariance matrix in addition to the coefficients.\n The matrix is not scaled if `cov='unscaled'`.\n\n Returns\n -------\n polyfit_results : Dataset\n A single dataset which contains (for each \"var\" in the input dataset):\n\n [var]_polyfit_coefficients\n The coefficients of the best fit for each variable in this dataset.\n [var]_polyfit_residuals\n The residuals of the least-square computation for each variable (only included if `full=True`)\n When the matrix rank is deficient, np.nan is returned.\n [dim]_matrix_rank\n The effective rank of the scaled Vandermonde coefficient matrix (only included if `full=True`)\n The rank is computed ignoring the NaN values that might be skipped.\n [dim]_singular_values\n The singular values of the scaled Vandermonde coefficient matrix (only included if `full=True`)\n [var]_polyfit_covariance\n The covariance matrix of the polynomial coefficient estimates (only included if `full=False` and `cov=True`)\n\n Warns\n -----\n RankWarning\n The rank of the coefficient matrix in the least-squares fit is deficient.\n The warning is not raised with in-memory (not dask) data and `full=True`.\n\n See Also\n --------\n numpy.polyfit\n numpy.polyval\n xarray.polyval\n \"\"\"\n variables = {}\n skipna_da = skipna\n\n x = get_clean_interp_index(self, dim, strict=False)\n xname = \"{}_\".format(self[dim].name)\n order = int(deg) + 1\n lhs = np.vander(x, order)\n\n if rcond is None:\n rcond = (\n x.shape[0] * np.core.finfo(x.dtype).eps # type: ignore[attr-defined]\n )\n\n # Weights:\n if w is not None:\n if isinstance(w, Hashable):\n w = self.coords[w]\n w = np.asarray(w)\n if w.ndim != 1:\n raise TypeError(\"Expected a 1-d array for weights.\")\n if w.shape[0] != lhs.shape[0]:\n raise TypeError(\"Expected w and {} to have the same length\".format(dim))\n lhs *= w[:, np.newaxis]\n\n # Scaling\n scale = np.sqrt((lhs * lhs).sum(axis=0))\n lhs /= scale\n\n degree_dim = utils.get_temp_dimname(self.dims, \"degree\")\n\n rank = np.linalg.matrix_rank(lhs)\n\n if full:\n rank = xr.DataArray(rank, name=xname + \"matrix_rank\")\n variables[rank.name] = rank\n sing = np.linalg.svd(lhs, compute_uv=False)\n sing = xr.DataArray(\n sing,\n dims=(degree_dim,),\n coords={degree_dim: np.arange(rank - 1, -1, -1)},\n name=xname + \"singular_values\",\n )\n variables[sing.name] = sing\n\n for name, da in self.data_vars.items():\n if dim not in da.dims:\n continue\n\n if is_duck_dask_array(da.data) and (\n rank != order or full or skipna is None\n ):\n # Current algorithm with dask and skipna=False neither supports\n # deficient ranks nor does it output the \"full\" info (issue dask/dask#6516)\n skipna_da = True\n elif skipna is None:\n skipna_da = bool(np.any(da.isnull()))\n\n dims_to_stack = [dimname for dimname in da.dims if dimname != dim]\n stacked_coords: Dict[Hashable, DataArray] = {}\n if dims_to_stack:\n stacked_dim = utils.get_temp_dimname(dims_to_stack, \"stacked\")\n rhs = da.transpose(dim, *dims_to_stack).stack(\n {stacked_dim: dims_to_stack}\n )\n stacked_coords = {stacked_dim: rhs[stacked_dim]}\n scale_da = scale[:, np.newaxis]\n else:\n rhs = da\n scale_da = scale\n\n if w is not None:\n rhs *= w[:, np.newaxis]\n\n with warnings.catch_warnings():\n if full: # Copy np.polyfit behavior\n warnings.simplefilter(\"ignore\", np.RankWarning)\n else: # Raise only once per variable\n warnings.simplefilter(\"once\", np.RankWarning)\n\n coeffs, residuals = duck_array_ops.least_squares(\n lhs, rhs.data, rcond=rcond, skipna=skipna_da\n )\n\n if isinstance(name, str):\n name = \"{}_\".format(name)\n else:\n # Thus a ReprObject => polyfit was called on a DataArray\n name = \"\"\n\n coeffs = xr.DataArray(\n coeffs / scale_da,\n dims=[degree_dim] + list(stacked_coords.keys()),\n coords={degree_dim: np.arange(order)[::-1], **stacked_coords},\n name=name + \"polyfit_coefficients\",\n )\n if dims_to_stack:\n coeffs = coeffs.unstack(stacked_dim)\n variables[coeffs.name] = coeffs\n\n if full or (cov is True):\n residuals = xr.DataArray(\n residuals if dims_to_stack else residuals.squeeze(),\n dims=list(stacked_coords.keys()),\n coords=stacked_coords,\n name=name + \"polyfit_residuals\",\n )\n if dims_to_stack:\n residuals = residuals.unstack(stacked_dim)\n variables[residuals.name] = residuals\n\n if cov:\n Vbase = np.linalg.inv(np.dot(lhs.T, lhs))\n Vbase /= np.outer(scale, scale)\n if cov == \"unscaled\":\n fac = 1\n else:\n if x.shape[0] <= order:\n raise ValueError(\n \"The number of data points must exceed order to scale the covariance matrix.\"\n )\n fac = residuals / (x.shape[0] - order)\n covariance = xr.DataArray(Vbase, dims=(\"cov_i\", \"cov_j\")) * fac\n variables[name + \"polyfit_covariance\"] = covariance\n\n return Dataset(data_vars=variables, attrs=self.attrs.copy())\n\n def pad(\n self,\n pad_width: Mapping[Hashable, Union[int, Tuple[int, int]]] = None,\n mode: str = \"constant\",\n stat_length: Union[\n int, Tuple[int, int], Mapping[Hashable, Tuple[int, int]]\n ] = None,\n constant_values: Union[\n int, Tuple[int, int], Mapping[Hashable, Tuple[int, int]]\n ] = None,\n end_values: Union[\n int, Tuple[int, int], Mapping[Hashable, Tuple[int, int]]\n ] = None,\n reflect_type: str = None,\n **pad_width_kwargs: Any,\n ) -> \"Dataset\":\n \"\"\"Pad this dataset along one or more dimensions.\n\n .. warning::\n This function is experimental and its behaviour is likely to change\n especially regarding padding of dimension coordinates (or IndexVariables).\n\n When using one of the modes (\"edge\", \"reflect\", \"symmetric\", \"wrap\"),\n coordinates will be padded with the same mode, otherwise coordinates\n are padded using the \"constant\" mode with fill_value dtypes.NA.\n\n Parameters\n ----------\n pad_width : mapping of hashable to tuple of int\n Mapping with the form of {dim: (pad_before, pad_after)}\n describing the number of values padded along each dimension.\n {dim: pad} is a shortcut for pad_before = pad_after = pad\n mode : str, default: \"constant\"\n One of the following string values (taken from numpy docs).\n\n 'constant' (default)\n Pads with a constant value.\n 'edge'\n Pads with the edge values of array.\n 'linear_ramp'\n Pads with the linear ramp between end_value and the\n array edge value.\n 'maximum'\n Pads with the maximum value of all or part of the\n vector along each axis.\n 'mean'\n Pads with the mean value of all or part of the\n vector along each axis.\n 'median'\n Pads with the median value of all or part of the\n vector along each axis.\n 'minimum'\n Pads with the minimum value of all or part of the\n vector along each axis.\n 'reflect'\n Pads with the reflection of the vector mirrored on\n the first and last values of the vector along each\n axis.\n 'symmetric'\n Pads with the reflection of the vector mirrored\n along the edge of the array.\n 'wrap'\n Pads with the wrap of the vector along the axis.\n The first values are used to pad the end and the\n end values are used to pad the beginning.\n stat_length : int, tuple or mapping of hashable to tuple, default: None\n Used in 'maximum', 'mean', 'median', and 'minimum'. Number of\n values at edge of each axis used to calculate the statistic value.\n {dim_1: (before_1, after_1), ... dim_N: (before_N, after_N)} unique\n statistic lengths along each dimension.\n ((before, after),) yields same before and after statistic lengths\n for each dimension.\n (stat_length,) or int is a shortcut for before = after = statistic\n length for all axes.\n Default is ``None``, to use the entire axis.\n constant_values : scalar, tuple or mapping of hashable to tuple, default: 0\n Used in 'constant'. The values to set the padded values for each\n axis.\n ``{dim_1: (before_1, after_1), ... dim_N: (before_N, after_N)}`` unique\n pad constants along each dimension.\n ``((before, after),)`` yields same before and after constants for each\n dimension.\n ``(constant,)`` or ``constant`` is a shortcut for ``before = after = constant`` for\n all dimensions.\n Default is 0.\n end_values : scalar, tuple or mapping of hashable to tuple, default: 0\n Used in 'linear_ramp'. The values used for the ending value of the\n linear_ramp and that will form the edge of the padded array.\n ``{dim_1: (before_1, after_1), ... dim_N: (before_N, after_N)}`` unique\n end values along each dimension.\n ``((before, after),)`` yields same before and after end values for each\n axis.\n ``(constant,)`` or ``constant`` is a shortcut for ``before = after = constant`` for\n all axes.\n Default is 0.\n reflect_type : {\"even\", \"odd\"}, optional\n Used in \"reflect\", and \"symmetric\". The \"even\" style is the\n default with an unaltered reflection around the edge value. For\n the \"odd\" style, the extended part of the array is created by\n subtracting the reflected values from two times the edge value.\n **pad_width_kwargs\n The keyword arguments form of ``pad_width``.\n One of ``pad_width`` or ``pad_width_kwargs`` must be provided.\n\n Returns\n -------\n padded : Dataset\n Dataset with the padded coordinates and data.\n\n See Also\n --------\n Dataset.shift, Dataset.roll, Dataset.bfill, Dataset.ffill, numpy.pad, dask.array.pad\n\n Notes\n -----\n By default when ``mode=\"constant\"`` and ``constant_values=None``, integer types will be\n promoted to ``float`` and padded with ``np.nan``. To avoid type promotion\n specify ``constant_values=np.nan``\n\n Examples\n --------\n >>> ds = xr.Dataset({\"foo\": (\"x\", range(5))})\n >>> ds.pad(x=(1, 2))\n <xarray.Dataset>\n Dimensions: (x: 8)\n Dimensions without coordinates: x\n Data variables:\n foo (x) float64 nan 0.0 1.0 2.0 3.0 4.0 nan nan\n \"\"\"\n pad_width = either_dict_or_kwargs(pad_width, pad_width_kwargs, \"pad\")\n\n if mode in (\"edge\", \"reflect\", \"symmetric\", \"wrap\"):\n coord_pad_mode = mode\n coord_pad_options = {\n \"stat_length\": stat_length,\n \"constant_values\": constant_values,\n \"end_values\": end_values,\n \"reflect_type\": reflect_type,\n }\n else:\n coord_pad_mode = \"constant\"\n coord_pad_options = {}\n\n variables = {}\n for name, var in self.variables.items():\n var_pad_width = {k: v for k, v in pad_width.items() if k in var.dims}\n if not var_pad_width:\n variables[name] = var\n elif name in self.data_vars:\n variables[name] = var.pad(\n pad_width=var_pad_width,\n mode=mode,\n stat_length=stat_length,\n constant_values=constant_values,\n end_values=end_values,\n reflect_type=reflect_type,\n )\n else:\n variables[name] = var.pad(\n pad_width=var_pad_width,\n mode=coord_pad_mode,\n **coord_pad_options, # type: ignore[arg-type]\n )\n\n return self._replace_vars_and_dims(variables)\n\n def idxmin(\n self,\n dim: Hashable = None,\n skipna: bool = None,\n fill_value: Any = dtypes.NA,\n keep_attrs: bool = None,\n ) -> \"Dataset\":\n \"\"\"Return the coordinate label of the minimum value along a dimension.\n\n Returns a new `Dataset` named after the dimension with the values of\n the coordinate labels along that dimension corresponding to minimum\n values along that dimension.\n\n In comparison to :py:meth:`~Dataset.argmin`, this returns the\n coordinate label while :py:meth:`~Dataset.argmin` returns the index.\n\n Parameters\n ----------\n dim : str, optional\n Dimension over which to apply `idxmin`. This is optional for 1D\n variables, but required for variables with 2 or more dimensions.\n skipna : bool or None, default: None\n If True, skip missing values (as marked by NaN). By default, only\n skips missing values for ``float``, ``complex``, and ``object``\n dtypes; other dtypes either do not have a sentinel missing value\n (``int``) or ``skipna=True`` has not been implemented\n (``datetime64`` or ``timedelta64``).\n fill_value : Any, default: NaN\n Value to be filled in case all of the values along a dimension are\n null. By default this is NaN. The fill value and result are\n automatically converted to a compatible dtype if possible.\n Ignored if ``skipna`` is False.\n keep_attrs : bool, default: False\n If True, the attributes (``attrs``) will be copied from the\n original object to the new one. If False (default), the new object\n will be returned without attributes.\n\n Returns\n -------\n reduced : Dataset\n New `Dataset` object with `idxmin` applied to its data and the\n indicated dimension removed.\n\n See Also\n --------\n DataArray.idxmin, Dataset.idxmax, Dataset.min, Dataset.argmin\n\n Examples\n --------\n >>> array1 = xr.DataArray(\n ... [0, 2, 1, 0, -2], dims=\"x\", coords={\"x\": [\"a\", \"b\", \"c\", \"d\", \"e\"]}\n ... )\n >>> array2 = xr.DataArray(\n ... [\n ... [2.0, 1.0, 2.0, 0.0, -2.0],\n ... [-4.0, np.NaN, 2.0, np.NaN, -2.0],\n ... [np.NaN, np.NaN, 1.0, np.NaN, np.NaN],\n ... ],\n ... dims=[\"y\", \"x\"],\n ... coords={\"y\": [-1, 0, 1], \"x\": [\"a\", \"b\", \"c\", \"d\", \"e\"]},\n ... )\n >>> ds = xr.Dataset({\"int\": array1, \"float\": array2})\n >>> ds.min(dim=\"x\")\n <xarray.Dataset>\n Dimensions: (y: 3)\n Coordinates:\n * y (y) int64 -1 0 1\n Data variables:\n int int64 -2\n float (y) float64 -2.0 -4.0 1.0\n >>> ds.argmin(dim=\"x\")\n <xarray.Dataset>\n Dimensions: (y: 3)\n Coordinates:\n * y (y) int64 -1 0 1\n Data variables:\n int int64 4\n float (y) int64 4 0 2\n >>> ds.idxmin(dim=\"x\")\n <xarray.Dataset>\n Dimensions: (y: 3)\n Coordinates:\n * y (y) int64 -1 0 1\n Data variables:\n int <U1 'e'\n float (y) object 'e' 'a' 'c'\n \"\"\"\n return self.map(\n methodcaller(\n \"idxmin\",\n dim=dim,\n skipna=skipna,\n fill_value=fill_value,\n keep_attrs=keep_attrs,\n )\n )\n\n def idxmax(\n self,\n dim: Hashable = None,\n skipna: bool = None,\n fill_value: Any = dtypes.NA,\n keep_attrs: bool = None,\n ) -> \"Dataset\":\n \"\"\"Return the coordinate label of the maximum value along a dimension.\n\n Returns a new `Dataset` named after the dimension with the values of\n the coordinate labels along that dimension corresponding to maximum\n values along that dimension.\n\n In comparison to :py:meth:`~Dataset.argmax`, this returns the\n coordinate label while :py:meth:`~Dataset.argmax` returns the index.\n\n Parameters\n ----------\n dim : str, optional\n Dimension over which to apply `idxmax`. This is optional for 1D\n variables, but required for variables with 2 or more dimensions.\n skipna : bool or None, default: None\n If True, skip missing values (as marked by NaN). By default, only\n skips missing values for ``float``, ``complex``, and ``object``\n dtypes; other dtypes either do not have a sentinel missing value\n (``int``) or ``skipna=True`` has not been implemented\n (``datetime64`` or ``timedelta64``).\n fill_value : Any, default: NaN\n Value to be filled in case all of the values along a dimension are\n null. By default this is NaN. The fill value and result are\n automatically converted to a compatible dtype if possible.\n Ignored if ``skipna`` is False.\n keep_attrs : bool, default: False\n If True, the attributes (``attrs``) will be copied from the\n original object to the new one. If False (default), the new object\n will be returned without attributes.\n\n Returns\n -------\n reduced : Dataset\n New `Dataset` object with `idxmax` applied to its data and the\n indicated dimension removed.\n\n See Also\n --------\n DataArray.idxmax, Dataset.idxmin, Dataset.max, Dataset.argmax\n\n Examples\n --------\n >>> array1 = xr.DataArray(\n ... [0, 2, 1, 0, -2], dims=\"x\", coords={\"x\": [\"a\", \"b\", \"c\", \"d\", \"e\"]}\n ... )\n >>> array2 = xr.DataArray(\n ... [\n ... [2.0, 1.0, 2.0, 0.0, -2.0],\n ... [-4.0, np.NaN, 2.0, np.NaN, -2.0],\n ... [np.NaN, np.NaN, 1.0, np.NaN, np.NaN],\n ... ],\n ... dims=[\"y\", \"x\"],\n ... coords={\"y\": [-1, 0, 1], \"x\": [\"a\", \"b\", \"c\", \"d\", \"e\"]},\n ... )\n >>> ds = xr.Dataset({\"int\": array1, \"float\": array2})\n >>> ds.max(dim=\"x\")\n <xarray.Dataset>\n Dimensions: (y: 3)\n Coordinates:\n * y (y) int64 -1 0 1\n Data variables:\n int int64 2\n float (y) float64 2.0 2.0 1.0\n >>> ds.argmax(dim=\"x\")\n <xarray.Dataset>\n Dimensions: (y: 3)\n Coordinates:\n * y (y) int64 -1 0 1\n Data variables:\n int int64 1\n float (y) int64 0 2 2\n >>> ds.idxmax(dim=\"x\")\n <xarray.Dataset>\n Dimensions: (y: 3)\n Coordinates:\n * y (y) int64 -1 0 1\n Data variables:\n int <U1 'b'\n float (y) object 'a' 'c' 'c'\n \"\"\"\n return self.map(\n methodcaller(\n \"idxmax\",\n dim=dim,\n skipna=skipna,\n fill_value=fill_value,\n keep_attrs=keep_attrs,\n )\n )\n\n def argmin(self, dim=None, **kwargs):\n \"\"\"Indices of the minima of the member variables.\n\n If there are multiple minima, the indices of the first one found will be\n returned.\n\n Parameters\n ----------\n dim : str, optional\n The dimension over which to find the minimum. By default, finds minimum over\n all dimensions - for now returning an int for backward compatibility, but\n this is deprecated, in future will be an error, since DataArray.argmin will\n return a dict with indices for all dimensions, which does not make sense for\n a Dataset.\n keep_attrs : bool, optional\n If True, the attributes (`attrs`) will be copied from the original\n object to the new one. If False (default), the new object will be\n returned without attributes.\n skipna : bool, optional\n If True, skip missing values (as marked by NaN). By default, only\n skips missing values for float dtypes; other dtypes either do not\n have a sentinel missing value (int) or skipna=True has not been\n implemented (object, datetime64 or timedelta64).\n\n Returns\n -------\n result : Dataset\n\n See Also\n --------\n DataArray.argmin\n \"\"\"\n if dim is None:\n warnings.warn(\n \"Once the behaviour of DataArray.argmin() and Variable.argmin() without \"\n \"dim changes to return a dict of indices of each dimension, for \"\n \"consistency it will be an error to call Dataset.argmin() with no argument,\"\n \"since we don't return a dict of Datasets.\",\n DeprecationWarning,\n stacklevel=2,\n )\n if (\n dim is None\n or (not isinstance(dim, Sequence) and dim is not ...)\n or isinstance(dim, str)\n ):\n # Return int index if single dimension is passed, and is not part of a\n # sequence\n argmin_func = getattr(duck_array_ops, \"argmin\")\n return self.reduce(argmin_func, dim=dim, **kwargs)\n else:\n raise ValueError(\n \"When dim is a sequence or ..., DataArray.argmin() returns a dict. \"\n \"dicts cannot be contained in a Dataset, so cannot call \"\n \"Dataset.argmin() with a sequence or ... for dim\"\n )\n\n def argmax(self, dim=None, **kwargs):\n \"\"\"Indices of the maxima of the member variables.\n\n If there are multiple maxima, the indices of the first one found will be\n returned.\n\n Parameters\n ----------\n dim : str, optional\n The dimension over which to find the maximum. By default, finds maximum over\n all dimensions - for now returning an int for backward compatibility, but\n this is deprecated, in future will be an error, since DataArray.argmax will\n return a dict with indices for all dimensions, which does not make sense for\n a Dataset.\n keep_attrs : bool, optional\n If True, the attributes (`attrs`) will be copied from the original\n object to the new one. If False (default), the new object will be\n returned without attributes.\n skipna : bool, optional\n If True, skip missing values (as marked by NaN). By default, only\n skips missing values for float dtypes; other dtypes either do not\n have a sentinel missing value (int) or skipna=True has not been\n implemented (object, datetime64 or timedelta64).\n\n Returns\n -------\n result : Dataset\n\n See Also\n --------\n DataArray.argmax\n\n \"\"\"\n if dim is None:\n warnings.warn(\n \"Once the behaviour of DataArray.argmin() and Variable.argmin() without \"\n \"dim changes to return a dict of indices of each dimension, for \"\n \"consistency it will be an error to call Dataset.argmin() with no argument,\"\n \"since we don't return a dict of Datasets.\",\n DeprecationWarning,\n stacklevel=2,\n )\n if (\n dim is None\n or (not isinstance(dim, Sequence) and dim is not ...)\n or isinstance(dim, str)\n ):\n # Return int index if single dimension is passed, and is not part of a\n # sequence\n argmax_func = getattr(duck_array_ops, \"argmax\")\n return self.reduce(argmax_func, dim=dim, **kwargs)\n else:\n raise ValueError(\n \"When dim is a sequence or ..., DataArray.argmin() returns a dict. \"\n \"dicts cannot be contained in a Dataset, so cannot call \"\n \"Dataset.argmin() with a sequence or ... for dim\"\n )\n\n def query(\n self,\n queries: Mapping[Hashable, Any] = None,\n parser: str = \"pandas\",\n engine: str = None,\n missing_dims: str = \"raise\",\n **queries_kwargs: Any,\n ) -> \"Dataset\":\n \"\"\"Return a new dataset with each array indexed along the specified\n dimension(s), where the indexers are given as strings containing\n Python expressions to be evaluated against the data variables in the\n dataset.\n\n Parameters\n ----------\n queries : dict, optional\n A dict with keys matching dimensions and values given by strings\n containing Python expressions to be evaluated against the data variables\n in the dataset. The expressions will be evaluated using the pandas\n eval() function, and can contain any valid Python expressions but cannot\n contain any Python statements.\n parser : {\"pandas\", \"python\"}, default: \"pandas\"\n The parser to use to construct the syntax tree from the expression.\n The default of 'pandas' parses code slightly different than standard\n Python. Alternatively, you can parse an expression using the 'python'\n parser to retain strict Python semantics.\n engine: {\"python\", \"numexpr\", None}, default: None\n The engine used to evaluate the expression. Supported engines are:\n - None: tries to use numexpr, falls back to python\n - \"numexpr\": evaluates expressions using numexpr\n - \"python\": performs operations as if you had eval’d in top level python\n missing_dims : {\"raise\", \"warn\", \"ignore\"}, default: \"raise\"\n What to do if dimensions that should be selected from are not present in the\n Dataset:\n - \"raise\": raise an exception\n - \"warning\": raise a warning, and ignore the missing dimensions\n - \"ignore\": ignore the missing dimensions\n **queries_kwargs : {dim: query, ...}, optional\n The keyword arguments form of ``queries``.\n One of queries or queries_kwargs must be provided.\n\n Returns\n -------\n obj : Dataset\n A new Dataset with the same contents as this dataset, except each\n array and dimension is indexed by the results of the appropriate\n queries.\n\n See Also\n --------\n Dataset.isel\n pandas.eval\n\n \"\"\"\n\n # allow queries to be given either as a dict or as kwargs\n queries = either_dict_or_kwargs(queries, queries_kwargs, \"query\")\n\n # check queries\n for dim, expr in queries.items():\n if not isinstance(expr, str):\n msg = f\"expr for dim {dim} must be a string to be evaluated, {type(expr)} given\"\n raise ValueError(msg)\n\n # evaluate the queries to create the indexers\n indexers = {\n dim: pd.eval(expr, resolvers=[self], parser=parser, engine=engine)\n for dim, expr in queries.items()\n }\n\n # apply the selection\n return self.isel(indexers, missing_dims=missing_dims)\n\n def curvefit(\n self,\n coords: Union[Union[str, \"DataArray\"], Iterable[Union[str, \"DataArray\"]]],\n func: Callable[..., Any],\n reduce_dims: Union[Hashable, Iterable[Hashable]] = None,\n skipna: bool = True,\n p0: Dict[str, Any] = None,\n bounds: Dict[str, Any] = None,\n param_names: Sequence[str] = None,\n kwargs: Dict[str, Any] = None,\n ):\n \"\"\"\n Curve fitting optimization for arbitrary functions.\n\n Wraps `scipy.optimize.curve_fit` with `apply_ufunc`.\n\n Parameters\n ----------\n coords : DataArray, str or sequence of DataArray, str\n Independent coordinate(s) over which to perform the curve fitting. Must share\n at least one dimension with the calling object. When fitting multi-dimensional\n functions, supply `coords` as a sequence in the same order as arguments in\n `func`. To fit along existing dimensions of the calling object, `coords` can\n also be specified as a str or sequence of strs.\n func : callable\n User specified function in the form `f(x, *params)` which returns a numpy\n array of length `len(x)`. `params` are the fittable parameters which are optimized\n by scipy curve_fit. `x` can also be specified as a sequence containing multiple\n coordinates, e.g. `f((x0, x1), *params)`.\n reduce_dims : str or sequence of str\n Additional dimension(s) over which to aggregate while fitting. For example,\n calling `ds.curvefit(coords='time', reduce_dims=['lat', 'lon'], ...)` will\n aggregate all lat and lon points and fit the specified function along the\n time dimension.\n skipna : bool, optional\n Whether to skip missing values when fitting. Default is True.\n p0 : dictionary, optional\n Optional dictionary of parameter names to initial guesses passed to the\n `curve_fit` `p0` arg. If none or only some parameters are passed, the rest will\n be assigned initial values following the default scipy behavior.\n bounds : dictionary, optional\n Optional dictionary of parameter names to bounding values passed to the\n `curve_fit` `bounds` arg. If none or only some parameters are passed, the rest\n will be unbounded following the default scipy behavior.\n param_names: seq, optional\n Sequence of names for the fittable parameters of `func`. If not supplied,\n this will be automatically determined by arguments of `func`. `param_names`\n should be manually supplied when fitting a function that takes a variable\n number of parameters.\n kwargs : dictionary\n Additional keyword arguments to passed to scipy curve_fit.\n\n Returns\n -------\n curvefit_results : Dataset\n A single dataset which contains:\n\n [var]_curvefit_coefficients\n The coefficients of the best fit.\n [var]_curvefit_covariance\n The covariance matrix of the coefficient estimates.\n\n See also\n --------\n Dataset.polyfit\n scipy.optimize.curve_fit\n \"\"\"\n from scipy.optimize import curve_fit\n\n if p0 is None:\n p0 = {}\n if bounds is None:\n bounds = {}\n if kwargs is None:\n kwargs = {}\n\n if not reduce_dims:\n reduce_dims_ = []\n elif isinstance(reduce_dims, str) or not isinstance(reduce_dims, Iterable):\n reduce_dims_ = [reduce_dims]\n else:\n reduce_dims_ = list(reduce_dims)\n\n if (\n isinstance(coords, str)\n or isinstance(coords, xr.DataArray)\n or not isinstance(coords, Iterable)\n ):\n coords = [coords]\n coords_ = [self[coord] if isinstance(coord, str) else coord for coord in coords]\n\n # Determine whether any coords are dims on self\n for coord in coords_:\n reduce_dims_ += [c for c in self.dims if coord.equals(self[c])]\n reduce_dims_ = list(set(reduce_dims_))\n preserved_dims = list(set(self.dims) - set(reduce_dims_))\n if not reduce_dims_:\n raise ValueError(\n \"No arguments to `coords` were identified as a dimension on the calling \"\n \"object, and no dims were supplied to `reduce_dims`. This would result \"\n \"in fitting on scalar data.\"\n )\n\n # Broadcast all coords with each other\n coords_ = xr.broadcast(*coords_)\n coords_ = [\n coord.broadcast_like(self, exclude=preserved_dims) for coord in coords_\n ]\n\n params, func_args = _get_func_args(func, param_names)\n param_defaults, bounds_defaults = _initialize_curvefit_params(\n params, p0, bounds, func_args\n )\n n_params = len(params)\n kwargs.setdefault(\"p0\", [param_defaults[p] for p in params])\n kwargs.setdefault(\n \"bounds\",\n [\n [bounds_defaults[p][0] for p in params],\n [bounds_defaults[p][1] for p in params],\n ],\n )\n\n def _wrapper(Y, *coords_, **kwargs):\n # Wrap curve_fit with raveled coordinates and pointwise NaN handling\n x = np.vstack([c.ravel() for c in coords_])\n y = Y.ravel()\n if skipna:\n mask = np.all([np.any(~np.isnan(x), axis=0), ~np.isnan(y)], axis=0)\n x = x[:, mask]\n y = y[mask]\n if not len(y):\n popt = np.full([n_params], np.nan)\n pcov = np.full([n_params, n_params], np.nan)\n return popt, pcov\n x = np.squeeze(x)\n popt, pcov = curve_fit(func, x, y, **kwargs)\n return popt, pcov\n\n result = xr.Dataset()\n for name, da in self.data_vars.items():\n if name is xr.core.dataarray._THIS_ARRAY:\n name = \"\"\n else:\n name = f\"{str(name)}_\"\n\n popt, pcov = xr.apply_ufunc(\n _wrapper,\n da,\n *coords_,\n vectorize=True,\n dask=\"parallelized\",\n input_core_dims=[reduce_dims_ for d in range(len(coords_) + 1)],\n output_core_dims=[[\"param\"], [\"cov_i\", \"cov_j\"]],\n dask_gufunc_kwargs={\n \"output_sizes\": {\n \"param\": n_params,\n \"cov_i\": n_params,\n \"cov_j\": n_params,\n },\n },\n output_dtypes=(np.float64, np.float64),\n exclude_dims=set(reduce_dims_),\n kwargs=kwargs,\n )\n result[name + \"curvefit_coefficients\"] = popt\n result[name + \"curvefit_covariance\"] = pcov\n\n result = result.assign_coords(\n {\"param\": params, \"cov_i\": params, \"cov_j\": params}\n )\n result.attrs = self.attrs.copy()\n\n return result\n\n\nops.inject_all_ops_and_reduce_methods(Dataset, array_only=False)\n"
] | [
[
"numpy.dot",
"numpy.linalg.matrix_rank",
"numpy.asarray",
"numpy.squeeze",
"numpy.issubdtype",
"scipy.optimize.curve_fit",
"numpy.vander",
"numpy.linalg.svd",
"numpy.datetime_data",
"numpy.arange",
"pandas.Index",
"numpy.full",
"numpy.outer",
"numpy.zeros",
"pandas.MultiIndex",
"numpy.isnan",
"pandas.Categorical",
"numpy.int_",
"numpy.core.finfo",
"pandas.MultiIndex.from_product",
"pandas.eval",
"numpy.isfinite",
"numpy.prod",
"numpy.empty"
]
] |
mimikaTU/pandas | [
"4fb963b6a3261940de5891323a8d217087a2a9a1",
"d2ab4076512f5571b74e6ea2936910841b10dbe2"
] | [
"pandas/util/testing.py",
"pandas/tests/indexes/test_base.py"
] | [
"from __future__ import division\n# pylint: disable-msg=W0402\n\nimport re\nimport string\nimport sys\nimport tempfile\nimport warnings\nimport inspect\nimport os\nimport subprocess\nimport locale\nimport traceback\n\nfrom datetime import datetime\nfrom functools import wraps\nfrom contextlib import contextmanager\n\nfrom numpy.random import randn, rand\nimport numpy as np\n\nimport pandas as pd\nfrom pandas.core.arrays import ExtensionArray\nfrom pandas.core.dtypes.missing import array_equivalent\nfrom pandas.core.dtypes.common import (\n is_datetimelike_v_numeric,\n is_datetimelike_v_object,\n is_number, is_bool,\n needs_i8_conversion,\n is_categorical_dtype,\n is_interval_dtype,\n is_sequence,\n is_list_like)\nfrom pandas.io.formats.printing import pprint_thing\nfrom pandas.core.algorithms import take_1d\nimport pandas.core.common as com\n\nimport pandas.compat as compat\nfrom pandas.compat import (\n filter, map, zip, range, unichr, lrange, lmap, lzip, u, callable, Counter,\n raise_with_traceback, httplib, StringIO, PY3)\n\nfrom pandas import (bdate_range, CategoricalIndex, Categorical, IntervalIndex,\n DatetimeIndex, TimedeltaIndex, PeriodIndex, RangeIndex,\n Index, MultiIndex,\n Series, DataFrame, Panel)\n\nfrom pandas._libs import testing as _testing\nfrom pandas.io.common import urlopen\n\n\nN = 30\nK = 4\n_RAISE_NETWORK_ERROR_DEFAULT = False\n\n# set testing_mode\n_testing_mode_warnings = (DeprecationWarning, compat.ResourceWarning)\n\n\ndef set_testing_mode():\n # set the testing mode filters\n testing_mode = os.environ.get('PANDAS_TESTING_MODE', 'None')\n if 'deprecate' in testing_mode:\n warnings.simplefilter('always', _testing_mode_warnings)\n\n\ndef reset_testing_mode():\n # reset the testing mode filters\n testing_mode = os.environ.get('PANDAS_TESTING_MODE', 'None')\n if 'deprecate' in testing_mode:\n warnings.simplefilter('ignore', _testing_mode_warnings)\n\n\nset_testing_mode()\n\n\ndef reset_display_options():\n \"\"\"\n Reset the display options for printing and representing objects.\n \"\"\"\n\n pd.reset_option('^display.', silent=True)\n\n\ndef round_trip_pickle(obj, path=None):\n \"\"\"\n Pickle an object and then read it again.\n\n Parameters\n ----------\n obj : pandas object\n The object to pickle and then re-read.\n path : str, default None\n The path where the pickled object is written and then read.\n\n Returns\n -------\n round_trip_pickled_object : pandas object\n The original object that was pickled and then re-read.\n \"\"\"\n\n if path is None:\n path = u('__{random_bytes}__.pickle'.format(random_bytes=rands(10)))\n with ensure_clean(path) as path:\n pd.to_pickle(obj, path)\n return pd.read_pickle(path)\n\n\ndef round_trip_pathlib(writer, reader, path=None):\n \"\"\"\n Write an object to file specified by a pathlib.Path and read it back\n\n Parameters\n ----------\n writer : callable bound to pandas object\n IO writing function (e.g. DataFrame.to_csv )\n reader : callable\n IO reading function (e.g. pd.read_csv )\n path : str, default None\n The path where the object is written and then read.\n\n Returns\n -------\n round_trip_object : pandas object\n The original object that was serialized and then re-read.\n \"\"\"\n\n import pytest\n Path = pytest.importorskip('pathlib').Path\n if path is None:\n path = '___pathlib___'\n with ensure_clean(path) as path:\n writer(Path(path))\n obj = reader(Path(path))\n return obj\n\n\ndef round_trip_localpath(writer, reader, path=None):\n \"\"\"\n Write an object to file specified by a py.path LocalPath and read it back\n\n Parameters\n ----------\n writer : callable bound to pandas object\n IO writing function (e.g. DataFrame.to_csv )\n reader : callable\n IO reading function (e.g. pd.read_csv )\n path : str, default None\n The path where the object is written and then read.\n\n Returns\n -------\n round_trip_object : pandas object\n The original object that was serialized and then re-read.\n \"\"\"\n import pytest\n LocalPath = pytest.importorskip('py.path').local\n if path is None:\n path = '___localpath___'\n with ensure_clean(path) as path:\n writer(LocalPath(path))\n obj = reader(LocalPath(path))\n return obj\n\n\n@contextmanager\ndef decompress_file(path, compression):\n \"\"\"\n Open a compressed file and return a file object\n\n Parameters\n ----------\n path : str\n The path where the file is read from\n\n compression : {'gzip', 'bz2', 'zip', 'xz', None}\n Name of the decompression to use\n\n Returns\n -------\n f : file object\n \"\"\"\n\n if compression is None:\n f = open(path, 'rb')\n elif compression == 'gzip':\n import gzip\n f = gzip.open(path, 'rb')\n elif compression == 'bz2':\n import bz2\n f = bz2.BZ2File(path, 'rb')\n elif compression == 'xz':\n lzma = compat.import_lzma()\n f = lzma.LZMAFile(path, 'rb')\n elif compression == 'zip':\n import zipfile\n zip_file = zipfile.ZipFile(path)\n zip_names = zip_file.namelist()\n if len(zip_names) == 1:\n f = zip_file.open(zip_names.pop())\n else:\n raise ValueError('ZIP file {} error. Only one file per ZIP.'\n .format(path))\n else:\n msg = 'Unrecognized compression type: {}'.format(compression)\n raise ValueError(msg)\n\n yield f\n f.close()\n\n\ndef assert_almost_equal(left, right, check_exact=False,\n check_dtype='equiv', check_less_precise=False,\n **kwargs):\n \"\"\"\n Check that the left and right objects are approximately equal.\n\n Parameters\n ----------\n left : object\n right : object\n check_exact : bool, default False\n Whether to compare number exactly.\n check_dtype: bool, default True\n check dtype if both a and b are the same type\n check_less_precise : bool or int, default False\n Specify comparison precision. Only used when check_exact is False.\n 5 digits (False) or 3 digits (True) after decimal points are compared.\n If int, then specify the digits to compare\n \"\"\"\n if isinstance(left, pd.Index):\n return assert_index_equal(left, right, check_exact=check_exact,\n exact=check_dtype,\n check_less_precise=check_less_precise,\n **kwargs)\n\n elif isinstance(left, pd.Series):\n return assert_series_equal(left, right, check_exact=check_exact,\n check_dtype=check_dtype,\n check_less_precise=check_less_precise,\n **kwargs)\n\n elif isinstance(left, pd.DataFrame):\n return assert_frame_equal(left, right, check_exact=check_exact,\n check_dtype=check_dtype,\n check_less_precise=check_less_precise,\n **kwargs)\n\n else:\n # other sequences\n if check_dtype:\n if is_number(left) and is_number(right):\n # do not compare numeric classes, like np.float64 and float\n pass\n elif is_bool(left) and is_bool(right):\n # do not compare bool classes, like np.bool_ and bool\n pass\n else:\n if (isinstance(left, np.ndarray) or\n isinstance(right, np.ndarray)):\n obj = 'numpy array'\n else:\n obj = 'Input'\n assert_class_equal(left, right, obj=obj)\n return _testing.assert_almost_equal(\n left, right,\n check_dtype=check_dtype,\n check_less_precise=check_less_precise,\n **kwargs)\n\n\ndef _check_isinstance(left, right, cls):\n \"\"\"\n Helper method for our assert_* methods that ensures that\n the two objects being compared have the right type before\n proceeding with the comparison.\n\n Parameters\n ----------\n left : The first object being compared.\n right : The second object being compared.\n cls : The class type to check against.\n\n Raises\n ------\n AssertionError : Either `left` or `right` is not an instance of `cls`.\n \"\"\"\n\n err_msg = \"{name} Expected type {exp_type}, found {act_type} instead\"\n cls_name = cls.__name__\n\n if not isinstance(left, cls):\n raise AssertionError(err_msg.format(name=cls_name, exp_type=cls,\n act_type=type(left)))\n if not isinstance(right, cls):\n raise AssertionError(err_msg.format(name=cls_name, exp_type=cls,\n act_type=type(right)))\n\n\ndef assert_dict_equal(left, right, compare_keys=True):\n\n _check_isinstance(left, right, dict)\n return _testing.assert_dict_equal(left, right, compare_keys=compare_keys)\n\n\ndef randbool(size=(), p=0.5):\n return rand(*size) <= p\n\n\nRANDS_CHARS = np.array(list(string.ascii_letters + string.digits),\n dtype=(np.str_, 1))\nRANDU_CHARS = np.array(list(u(\"\").join(map(unichr, lrange(1488, 1488 + 26))) +\n string.digits), dtype=(np.unicode_, 1))\n\n\ndef rands_array(nchars, size, dtype='O'):\n \"\"\"Generate an array of byte strings.\"\"\"\n retval = (np.random.choice(RANDS_CHARS, size=nchars * np.prod(size))\n .view((np.str_, nchars)).reshape(size))\n if dtype is None:\n return retval\n else:\n return retval.astype(dtype)\n\n\ndef randu_array(nchars, size, dtype='O'):\n \"\"\"Generate an array of unicode strings.\"\"\"\n retval = (np.random.choice(RANDU_CHARS, size=nchars * np.prod(size))\n .view((np.unicode_, nchars)).reshape(size))\n if dtype is None:\n return retval\n else:\n return retval.astype(dtype)\n\n\ndef rands(nchars):\n \"\"\"\n Generate one random byte string.\n\n See `rands_array` if you want to create an array of random strings.\n\n \"\"\"\n return ''.join(np.random.choice(RANDS_CHARS, nchars))\n\n\ndef randu(nchars):\n \"\"\"\n Generate one random unicode string.\n\n See `randu_array` if you want to create an array of random unicode strings.\n\n \"\"\"\n return ''.join(np.random.choice(RANDU_CHARS, nchars))\n\n\ndef close(fignum=None):\n from matplotlib.pyplot import get_fignums, close as _close\n\n if fignum is None:\n for fignum in get_fignums():\n _close(fignum)\n else:\n _close(fignum)\n\n\n# -----------------------------------------------------------------------------\n# locale utilities\n\n\ndef check_output(*popenargs, **kwargs):\n # shamelessly taken from Python 2.7 source\n r\"\"\"Run command with arguments and return its output as a byte string.\n\n If the exit code was non-zero it raises a CalledProcessError. The\n CalledProcessError object will have the return code in the returncode\n attribute and output in the output attribute.\n\n The arguments are the same as for the Popen constructor. Example:\n\n >>> check_output([\"ls\", \"-l\", \"/dev/null\"])\n 'crw-rw-rw- 1 root root 1, 3 Oct 18 2007 /dev/null\\n'\n\n The stdout argument is not allowed as it is used internally.\n To capture standard error in the result, use stderr=STDOUT.\n\n >>> check_output([\"/bin/sh\", \"-c\",\n ... \"ls -l non_existent_file ; exit 0\"],\n ... stderr=STDOUT)\n 'ls: non_existent_file: No such file or directory\\n'\n \"\"\"\n if 'stdout' in kwargs:\n raise ValueError('stdout argument not allowed, it will be overridden.')\n process = subprocess.Popen(stdout=subprocess.PIPE, stderr=subprocess.PIPE,\n *popenargs, **kwargs)\n output, unused_err = process.communicate()\n retcode = process.poll()\n if retcode:\n cmd = kwargs.get(\"args\")\n if cmd is None:\n cmd = popenargs[0]\n raise subprocess.CalledProcessError(retcode, cmd, output=output)\n return output\n\n\ndef _default_locale_getter():\n try:\n raw_locales = check_output(['locale -a'], shell=True)\n except subprocess.CalledProcessError as e:\n raise type(e)(\"{exception}, the 'locale -a' command cannot be found \"\n \"on your system\".format(exception=e))\n return raw_locales\n\n\ndef get_locales(prefix=None, normalize=True,\n locale_getter=_default_locale_getter):\n \"\"\"Get all the locales that are available on the system.\n\n Parameters\n ----------\n prefix : str\n If not ``None`` then return only those locales with the prefix\n provided. For example to get all English language locales (those that\n start with ``\"en\"``), pass ``prefix=\"en\"``.\n normalize : bool\n Call ``locale.normalize`` on the resulting list of available locales.\n If ``True``, only locales that can be set without throwing an\n ``Exception`` are returned.\n locale_getter : callable\n The function to use to retrieve the current locales. This should return\n a string with each locale separated by a newline character.\n\n Returns\n -------\n locales : list of strings\n A list of locale strings that can be set with ``locale.setlocale()``.\n For example::\n\n locale.setlocale(locale.LC_ALL, locale_string)\n\n On error will return None (no locale available, e.g. Windows)\n\n \"\"\"\n try:\n raw_locales = locale_getter()\n except Exception:\n return None\n\n try:\n # raw_locales is \"\\n\" separated list of locales\n # it may contain non-decodable parts, so split\n # extract what we can and then rejoin.\n raw_locales = raw_locales.split(b'\\n')\n out_locales = []\n for x in raw_locales:\n if PY3:\n out_locales.append(str(\n x, encoding=pd.options.display.encoding))\n else:\n out_locales.append(str(x))\n\n except TypeError:\n pass\n\n if prefix is None:\n return _valid_locales(out_locales, normalize)\n\n found = re.compile('{prefix}.*'.format(prefix=prefix)) \\\n .findall('\\n'.join(out_locales))\n return _valid_locales(found, normalize)\n\n\n@contextmanager\ndef set_locale(new_locale, lc_var=locale.LC_ALL):\n \"\"\"Context manager for temporarily setting a locale.\n\n Parameters\n ----------\n new_locale : str or tuple\n A string of the form <language_country>.<encoding>. For example to set\n the current locale to US English with a UTF8 encoding, you would pass\n \"en_US.UTF-8\".\n\n Notes\n -----\n This is useful when you want to run a particular block of code under a\n particular locale, without globally setting the locale. This probably isn't\n thread-safe.\n \"\"\"\n current_locale = locale.getlocale()\n\n try:\n locale.setlocale(lc_var, new_locale)\n\n try:\n normalized_locale = locale.getlocale()\n except ValueError:\n yield new_locale\n else:\n if com._all_not_none(*normalized_locale):\n yield '.'.join(normalized_locale)\n else:\n yield new_locale\n finally:\n locale.setlocale(lc_var, current_locale)\n\n\ndef _can_set_locale(lc):\n \"\"\"Check to see if we can set a locale without throwing an exception.\n\n Parameters\n ----------\n lc : str\n The locale to attempt to set.\n\n Returns\n -------\n isvalid : bool\n Whether the passed locale can be set\n \"\"\"\n try:\n with set_locale(lc):\n pass\n except locale.Error: # horrible name for a Exception subclass\n return False\n else:\n return True\n\n\ndef _valid_locales(locales, normalize):\n \"\"\"Return a list of normalized locales that do not throw an ``Exception``\n when set.\n\n Parameters\n ----------\n locales : str\n A string where each locale is separated by a newline.\n normalize : bool\n Whether to call ``locale.normalize`` on each locale.\n\n Returns\n -------\n valid_locales : list\n A list of valid locales.\n \"\"\"\n if normalize:\n normalizer = lambda x: locale.normalize(x.strip())\n else:\n normalizer = lambda x: x.strip()\n\n return list(filter(_can_set_locale, map(normalizer, locales)))\n\n# -----------------------------------------------------------------------------\n# Stdout / stderr decorators\n\n\ndef capture_stdout(f):\n \"\"\"\n Decorator to capture stdout in a buffer so that it can be checked\n (or suppressed) during testing.\n\n Parameters\n ----------\n f : callable\n The test that is capturing stdout.\n\n Returns\n -------\n f : callable\n The decorated test ``f``, which captures stdout.\n\n Examples\n --------\n\n >>> from pandas.util.testing import capture_stdout\n >>>\n >>> import sys\n >>>\n >>> @capture_stdout\n ... def test_print_pass():\n ... print(\"foo\")\n ... out = sys.stdout.getvalue()\n ... assert out == \"foo\\n\"\n >>>\n >>> @capture_stdout\n ... def test_print_fail():\n ... print(\"foo\")\n ... out = sys.stdout.getvalue()\n ... assert out == \"bar\\n\"\n ...\n AssertionError: assert 'foo\\n' == 'bar\\n'\n \"\"\"\n\n @wraps(f)\n def wrapper(*args, **kwargs):\n try:\n sys.stdout = StringIO()\n f(*args, **kwargs)\n finally:\n sys.stdout = sys.__stdout__\n\n return wrapper\n\n\ndef capture_stderr(f):\n \"\"\"\n Decorator to capture stderr in a buffer so that it can be checked\n (or suppressed) during testing.\n\n Parameters\n ----------\n f : callable\n The test that is capturing stderr.\n\n Returns\n -------\n f : callable\n The decorated test ``f``, which captures stderr.\n\n Examples\n --------\n\n >>> from pandas.util.testing import capture_stderr\n >>>\n >>> import sys\n >>>\n >>> @capture_stderr\n ... def test_stderr_pass():\n ... sys.stderr.write(\"foo\")\n ... out = sys.stderr.getvalue()\n ... assert out == \"foo\\n\"\n >>>\n >>> @capture_stderr\n ... def test_stderr_fail():\n ... sys.stderr.write(\"foo\")\n ... out = sys.stderr.getvalue()\n ... assert out == \"bar\\n\"\n ...\n AssertionError: assert 'foo\\n' == 'bar\\n'\n \"\"\"\n\n @wraps(f)\n def wrapper(*args, **kwargs):\n try:\n sys.stderr = StringIO()\n f(*args, **kwargs)\n finally:\n sys.stderr = sys.__stderr__\n\n return wrapper\n\n# -----------------------------------------------------------------------------\n# Console debugging tools\n\n\ndef debug(f, *args, **kwargs):\n from pdb import Pdb as OldPdb\n try:\n from IPython.core.debugger import Pdb\n kw = dict(color_scheme='Linux')\n except ImportError:\n Pdb = OldPdb\n kw = {}\n pdb = Pdb(**kw)\n return pdb.runcall(f, *args, **kwargs)\n\n\ndef pudebug(f, *args, **kwargs):\n import pudb\n return pudb.runcall(f, *args, **kwargs)\n\n\ndef set_trace():\n from IPython.core.debugger import Pdb\n try:\n Pdb(color_scheme='Linux').set_trace(sys._getframe().f_back)\n except Exception:\n from pdb import Pdb as OldPdb\n OldPdb().set_trace(sys._getframe().f_back)\n\n# -----------------------------------------------------------------------------\n# contextmanager to ensure the file cleanup\n\n\n@contextmanager\ndef ensure_clean(filename=None, return_filelike=False):\n \"\"\"Gets a temporary path and agrees to remove on close.\n\n Parameters\n ----------\n filename : str (optional)\n if None, creates a temporary file which is then removed when out of\n scope. if passed, creates temporary file with filename as ending.\n return_filelike : bool (default False)\n if True, returns a file-like which is *always* cleaned. Necessary for\n savefig and other functions which want to append extensions.\n \"\"\"\n filename = filename or ''\n fd = None\n\n if return_filelike:\n f = tempfile.TemporaryFile(suffix=filename)\n try:\n yield f\n finally:\n f.close()\n else:\n # don't generate tempfile if using a path with directory specified\n if len(os.path.dirname(filename)):\n raise ValueError(\"Can't pass a qualified name to ensure_clean()\")\n\n try:\n fd, filename = tempfile.mkstemp(suffix=filename)\n except UnicodeEncodeError:\n import pytest\n pytest.skip('no unicode file names on this system')\n\n try:\n yield filename\n finally:\n try:\n os.close(fd)\n except Exception as e:\n print(\"Couldn't close file descriptor: {fdesc} (file: {fname})\"\n .format(fdesc=fd, fname=filename))\n try:\n if os.path.exists(filename):\n os.remove(filename)\n except Exception as e:\n print(\"Exception on removing file: {error}\".format(error=e))\n\n\ndef get_data_path(f=''):\n \"\"\"Return the path of a data file, these are relative to the current test\n directory.\n \"\"\"\n # get our callers file\n _, filename, _, _, _, _ = inspect.getouterframes(inspect.currentframe())[1]\n base_dir = os.path.abspath(os.path.dirname(filename))\n return os.path.join(base_dir, 'data', f)\n\n# -----------------------------------------------------------------------------\n# Comparators\n\n\ndef equalContents(arr1, arr2):\n \"\"\"Checks if the set of unique elements of arr1 and arr2 are equivalent.\n \"\"\"\n return frozenset(arr1) == frozenset(arr2)\n\n\ndef assert_index_equal(left, right, exact='equiv', check_names=True,\n check_less_precise=False, check_exact=True,\n check_categorical=True, obj='Index'):\n \"\"\"Check that left and right Index are equal.\n\n Parameters\n ----------\n left : Index\n right : Index\n exact : bool / string {'equiv'}, default False\n Whether to check the Index class, dtype and inferred_type\n are identical. If 'equiv', then RangeIndex can be substituted for\n Int64Index as well.\n check_names : bool, default True\n Whether to check the names attribute.\n check_less_precise : bool or int, default False\n Specify comparison precision. Only used when check_exact is False.\n 5 digits (False) or 3 digits (True) after decimal points are compared.\n If int, then specify the digits to compare\n check_exact : bool, default True\n Whether to compare number exactly.\n check_categorical : bool, default True\n Whether to compare internal Categorical exactly.\n obj : str, default 'Index'\n Specify object name being compared, internally used to show appropriate\n assertion message\n \"\"\"\n\n def _check_types(l, r, obj='Index'):\n if exact:\n assert_class_equal(left, right, exact=exact, obj=obj)\n assert_attr_equal('dtype', l, r, obj=obj)\n # allow string-like to have different inferred_types\n if l.inferred_type in ('string', 'unicode'):\n assert r.inferred_type in ('string', 'unicode')\n else:\n assert_attr_equal('inferred_type', l, r, obj=obj)\n\n def _get_ilevel_values(index, level):\n # accept level number only\n unique = index.levels[level]\n labels = index.labels[level]\n filled = take_1d(unique.values, labels, fill_value=unique._na_value)\n values = unique._shallow_copy(filled, name=index.names[level])\n return values\n\n # instance validation\n _check_isinstance(left, right, Index)\n\n # class / dtype comparison\n _check_types(left, right, obj=obj)\n\n # level comparison\n if left.nlevels != right.nlevels:\n msg1 = '{obj} levels are different'.format(obj=obj)\n msg2 = '{nlevels}, {left}'.format(nlevels=left.nlevels, left=left)\n msg3 = '{nlevels}, {right}'.format(nlevels=right.nlevels, right=right)\n raise_assert_detail(obj, msg1, msg2, msg3)\n\n # length comparison\n if len(left) != len(right):\n msg1 = '{obj} length are different'.format(obj=obj)\n msg2 = '{length}, {left}'.format(length=len(left), left=left)\n msg3 = '{length}, {right}'.format(length=len(right), right=right)\n raise_assert_detail(obj, msg1, msg2, msg3)\n\n # MultiIndex special comparison for little-friendly error messages\n if left.nlevels > 1:\n for level in range(left.nlevels):\n # cannot use get_level_values here because it can change dtype\n llevel = _get_ilevel_values(left, level)\n rlevel = _get_ilevel_values(right, level)\n\n lobj = 'MultiIndex level [{level}]'.format(level=level)\n assert_index_equal(llevel, rlevel,\n exact=exact, check_names=check_names,\n check_less_precise=check_less_precise,\n check_exact=check_exact, obj=lobj)\n # get_level_values may change dtype\n _check_types(left.levels[level], right.levels[level], obj=obj)\n\n if check_exact:\n if not left.equals(right):\n diff = np.sum((left.values != right.values)\n .astype(int)) * 100.0 / len(left)\n msg = '{obj} values are different ({pct} %)'.format(\n obj=obj, pct=np.round(diff, 5))\n raise_assert_detail(obj, msg, left, right)\n else:\n _testing.assert_almost_equal(left.values, right.values,\n check_less_precise=check_less_precise,\n check_dtype=exact,\n obj=obj, lobj=left, robj=right)\n\n # metadata comparison\n if check_names:\n assert_attr_equal('names', left, right, obj=obj)\n if isinstance(left, pd.PeriodIndex) or isinstance(right, pd.PeriodIndex):\n assert_attr_equal('freq', left, right, obj=obj)\n if (isinstance(left, pd.IntervalIndex) or\n isinstance(right, pd.IntervalIndex)):\n assert_attr_equal('closed', left, right, obj=obj)\n\n if check_categorical:\n if is_categorical_dtype(left) or is_categorical_dtype(right):\n assert_categorical_equal(left.values, right.values,\n obj='{obj} category'.format(obj=obj))\n\n\ndef assert_class_equal(left, right, exact=True, obj='Input'):\n \"\"\"checks classes are equal.\"\"\"\n\n def repr_class(x):\n if isinstance(x, Index):\n # return Index as it is to include values in the error message\n return x\n\n try:\n return x.__class__.__name__\n except AttributeError:\n return repr(type(x))\n\n if exact == 'equiv':\n if type(left) != type(right):\n # allow equivalence of Int64Index/RangeIndex\n types = set([type(left).__name__, type(right).__name__])\n if len(types - set(['Int64Index', 'RangeIndex'])):\n msg = '{obj} classes are not equivalent'.format(obj=obj)\n raise_assert_detail(obj, msg, repr_class(left),\n repr_class(right))\n elif exact:\n if type(left) != type(right):\n msg = '{obj} classes are different'.format(obj=obj)\n raise_assert_detail(obj, msg, repr_class(left),\n repr_class(right))\n\n\ndef assert_attr_equal(attr, left, right, obj='Attributes'):\n \"\"\"checks attributes are equal. Both objects must have attribute.\n\n Parameters\n ----------\n attr : str\n Attribute name being compared.\n left : object\n right : object\n obj : str, default 'Attributes'\n Specify object name being compared, internally used to show appropriate\n assertion message\n \"\"\"\n\n left_attr = getattr(left, attr)\n right_attr = getattr(right, attr)\n\n if left_attr is right_attr:\n return True\n elif (is_number(left_attr) and np.isnan(left_attr) and\n is_number(right_attr) and np.isnan(right_attr)):\n # np.nan\n return True\n\n try:\n result = left_attr == right_attr\n except TypeError:\n # datetimetz on rhs may raise TypeError\n result = False\n if not isinstance(result, bool):\n result = result.all()\n\n if result:\n return True\n else:\n msg = 'Attribute \"{attr}\" are different'.format(attr=attr)\n raise_assert_detail(obj, msg, left_attr, right_attr)\n\n\ndef assert_is_valid_plot_return_object(objs):\n import matplotlib.pyplot as plt\n if isinstance(objs, (pd.Series, np.ndarray)):\n for el in objs.ravel():\n msg = ('one of \\'objs\\' is not a matplotlib Axes instance, type '\n 'encountered {name!r}').format(name=el.__class__.__name__)\n assert isinstance(el, (plt.Axes, dict)), msg\n else:\n assert isinstance(objs, (plt.Artist, tuple, dict)), \\\n ('objs is neither an ndarray of Artist instances nor a '\n 'single Artist instance, tuple, or dict, \"objs\" is a {name!r}'\n ).format(name=objs.__class__.__name__)\n\n\ndef isiterable(obj):\n return hasattr(obj, '__iter__')\n\n\ndef is_sorted(seq):\n if isinstance(seq, (Index, Series)):\n seq = seq.values\n # sorting does not change precisions\n return assert_numpy_array_equal(seq, np.sort(np.array(seq)))\n\n\ndef assert_categorical_equal(left, right, check_dtype=True,\n obj='Categorical', check_category_order=True):\n \"\"\"Test that Categoricals are equivalent.\n\n Parameters\n ----------\n left, right : Categorical\n Categoricals to compare\n check_dtype : bool, default True\n Check that integer dtype of the codes are the same\n obj : str, default 'Categorical'\n Specify object name being compared, internally used to show appropriate\n assertion message\n check_category_order : bool, default True\n Whether the order of the categories should be compared, which\n implies identical integer codes. If False, only the resulting\n values are compared. The ordered attribute is\n checked regardless.\n \"\"\"\n _check_isinstance(left, right, Categorical)\n\n if check_category_order:\n assert_index_equal(left.categories, right.categories,\n obj='{obj}.categories'.format(obj=obj))\n assert_numpy_array_equal(left.codes, right.codes,\n check_dtype=check_dtype,\n obj='{obj}.codes'.format(obj=obj))\n else:\n assert_index_equal(left.categories.sort_values(),\n right.categories.sort_values(),\n obj='{obj}.categories'.format(obj=obj))\n assert_index_equal(left.categories.take(left.codes),\n right.categories.take(right.codes),\n obj='{obj}.values'.format(obj=obj))\n\n assert_attr_equal('ordered', left, right, obj=obj)\n\n\ndef raise_assert_detail(obj, message, left, right, diff=None):\n if isinstance(left, np.ndarray):\n left = pprint_thing(left)\n elif is_categorical_dtype(left):\n left = repr(left)\n if isinstance(right, np.ndarray):\n right = pprint_thing(right)\n elif is_categorical_dtype(right):\n right = repr(right)\n\n msg = \"\"\"{obj} are different\n\n{message}\n[left]: {left}\n[right]: {right}\"\"\".format(obj=obj, message=message, left=left, right=right)\n\n if diff is not None:\n msg += \"\\n[diff]: {diff}\".format(diff=diff)\n\n raise AssertionError(msg)\n\n\ndef assert_numpy_array_equal(left, right, strict_nan=False,\n check_dtype=True, err_msg=None,\n obj='numpy array', check_same=None):\n \"\"\" Checks that 'np.ndarray' is equivalent\n\n Parameters\n ----------\n left : np.ndarray or iterable\n right : np.ndarray or iterable\n strict_nan : bool, default False\n If True, consider NaN and None to be different.\n check_dtype: bool, default True\n check dtype if both a and b are np.ndarray\n err_msg : str, default None\n If provided, used as assertion message\n obj : str, default 'numpy array'\n Specify object name being compared, internally used to show appropriate\n assertion message\n check_same : None|'copy'|'same', default None\n Ensure left and right refer/do not refer to the same memory area\n \"\"\"\n\n # instance validation\n # Show a detailed error message when classes are different\n assert_class_equal(left, right, obj=obj)\n # both classes must be an np.ndarray\n _check_isinstance(left, right, np.ndarray)\n\n def _get_base(obj):\n return obj.base if getattr(obj, 'base', None) is not None else obj\n\n left_base = _get_base(left)\n right_base = _get_base(right)\n\n if check_same == 'same':\n if left_base is not right_base:\n msg = \"{left!r} is not {right!r}\".format(\n left=left_base, right=right_base)\n raise AssertionError(msg)\n elif check_same == 'copy':\n if left_base is right_base:\n msg = \"{left!r} is {right!r}\".format(\n left=left_base, right=right_base)\n raise AssertionError(msg)\n\n def _raise(left, right, err_msg):\n if err_msg is None:\n if left.shape != right.shape:\n raise_assert_detail(obj, '{obj} shapes are different'\n .format(obj=obj), left.shape, right.shape)\n\n diff = 0\n for l, r in zip(left, right):\n # count up differences\n if not array_equivalent(l, r, strict_nan=strict_nan):\n diff += 1\n\n diff = diff * 100.0 / left.size\n msg = '{obj} values are different ({pct} %)'.format(\n obj=obj, pct=np.round(diff, 5))\n raise_assert_detail(obj, msg, left, right)\n\n raise AssertionError(err_msg)\n\n # compare shape and values\n if not array_equivalent(left, right, strict_nan=strict_nan):\n _raise(left, right, err_msg)\n\n if check_dtype:\n if isinstance(left, np.ndarray) and isinstance(right, np.ndarray):\n assert_attr_equal('dtype', left, right, obj=obj)\n\n return True\n\n\ndef assert_extension_array_equal(left, right):\n \"\"\"Check that left and right ExtensionArrays are equal.\n\n Parameters\n ----------\n left, right : ExtensionArray\n The two arrays to compare\n\n Notes\n -----\n Missing values are checked separately from valid values.\n A mask of missing values is computed for each and checked to match.\n The remaining all-valid values are cast to object dtype and checked.\n \"\"\"\n assert isinstance(left, ExtensionArray)\n assert left.dtype == right.dtype\n left_na = left.isna()\n right_na = right.isna()\n assert_numpy_array_equal(left_na, right_na)\n\n left_valid = left[~left_na].astype(object)\n right_valid = right[~right_na].astype(object)\n\n assert_numpy_array_equal(left_valid, right_valid)\n\n\n# This could be refactored to use the NDFrame.equals method\ndef assert_series_equal(left, right, check_dtype=True,\n check_index_type='equiv',\n check_series_type=True,\n check_less_precise=False,\n check_names=True,\n check_exact=False,\n check_datetimelike_compat=False,\n check_categorical=True,\n obj='Series'):\n \"\"\"Check that left and right Series are equal.\n\n Parameters\n ----------\n left : Series\n right : Series\n check_dtype : bool, default True\n Whether to check the Series dtype is identical.\n check_index_type : bool / string {'equiv'}, default 'equiv'\n Whether to check the Index class, dtype and inferred_type\n are identical.\n check_series_type : bool, default True\n Whether to check the Series class is identical.\n check_less_precise : bool or int, default False\n Specify comparison precision. Only used when check_exact is False.\n 5 digits (False) or 3 digits (True) after decimal points are compared.\n If int, then specify the digits to compare\n check_exact : bool, default False\n Whether to compare number exactly.\n check_names : bool, default True\n Whether to check the Series and Index names attribute.\n check_datetimelike_compat : bool, default False\n Compare datetime-like which is comparable ignoring dtype.\n check_categorical : bool, default True\n Whether to compare internal Categorical exactly.\n obj : str, default 'Series'\n Specify object name being compared, internally used to show appropriate\n assertion message\n \"\"\"\n\n # instance validation\n _check_isinstance(left, right, Series)\n\n if check_series_type:\n # ToDo: There are some tests using rhs is sparse\n # lhs is dense. Should use assert_class_equal in future\n assert isinstance(left, type(right))\n # assert_class_equal(left, right, obj=obj)\n\n # length comparison\n if len(left) != len(right):\n msg1 = '{len}, {left}'.format(len=len(left), left=left.index)\n msg2 = '{len}, {right}'.format(len=len(right), right=right.index)\n raise_assert_detail(obj, 'Series length are different', msg1, msg2)\n\n # index comparison\n assert_index_equal(left.index, right.index, exact=check_index_type,\n check_names=check_names,\n check_less_precise=check_less_precise,\n check_exact=check_exact,\n check_categorical=check_categorical,\n obj='{obj}.index'.format(obj=obj))\n\n if check_dtype:\n # We want to skip exact dtype checking when `check_categorical`\n # is False. We'll still raise if only one is a `Categorical`,\n # regardless of `check_categorical`\n if (is_categorical_dtype(left) and is_categorical_dtype(right) and\n not check_categorical):\n pass\n else:\n assert_attr_equal('dtype', left, right)\n\n if check_exact:\n assert_numpy_array_equal(left.get_values(), right.get_values(),\n check_dtype=check_dtype,\n obj='{obj}'.format(obj=obj),)\n elif check_datetimelike_compat:\n # we want to check only if we have compat dtypes\n # e.g. integer and M|m are NOT compat, but we can simply check\n # the values in that case\n if (is_datetimelike_v_numeric(left, right) or\n is_datetimelike_v_object(left, right) or\n needs_i8_conversion(left) or\n needs_i8_conversion(right)):\n\n # datetimelike may have different objects (e.g. datetime.datetime\n # vs Timestamp) but will compare equal\n if not Index(left.values).equals(Index(right.values)):\n msg = ('[datetimelike_compat=True] {left} is not equal to '\n '{right}.').format(left=left.values, right=right.values)\n raise AssertionError(msg)\n else:\n assert_numpy_array_equal(left.get_values(), right.get_values(),\n check_dtype=check_dtype)\n elif is_interval_dtype(left) or is_interval_dtype(right):\n # TODO: big hack here\n left = pd.IntervalIndex(left)\n right = pd.IntervalIndex(right)\n assert_index_equal(left, right, obj='{obj}.index'.format(obj=obj))\n\n else:\n _testing.assert_almost_equal(left.get_values(), right.get_values(),\n check_less_precise=check_less_precise,\n check_dtype=check_dtype,\n obj='{obj}'.format(obj=obj))\n\n # metadata comparison\n if check_names:\n assert_attr_equal('name', left, right, obj=obj)\n\n if check_categorical:\n if is_categorical_dtype(left) or is_categorical_dtype(right):\n assert_categorical_equal(left.values, right.values,\n obj='{obj} category'.format(obj=obj))\n\n\n# This could be refactored to use the NDFrame.equals method\ndef assert_frame_equal(left, right, check_dtype=True,\n check_index_type='equiv',\n check_column_type='equiv',\n check_frame_type=True,\n check_less_precise=False,\n check_names=True,\n by_blocks=False,\n check_exact=False,\n check_datetimelike_compat=False,\n check_categorical=True,\n check_like=False,\n obj='DataFrame'):\n \"\"\"Check that left and right DataFrame are equal.\n\n Parameters\n ----------\n left : DataFrame\n right : DataFrame\n check_dtype : bool, default True\n Whether to check the DataFrame dtype is identical.\n check_index_type : bool / string {'equiv'}, default False\n Whether to check the Index class, dtype and inferred_type\n are identical.\n check_column_type : bool / string {'equiv'}, default False\n Whether to check the columns class, dtype and inferred_type\n are identical.\n check_frame_type : bool, default False\n Whether to check the DataFrame class is identical.\n check_less_precise : bool or int, default False\n Specify comparison precision. Only used when check_exact is False.\n 5 digits (False) or 3 digits (True) after decimal points are compared.\n If int, then specify the digits to compare\n check_names : bool, default True\n Whether to check the Index names attribute.\n by_blocks : bool, default False\n Specify how to compare internal data. If False, compare by columns.\n If True, compare by blocks.\n check_exact : bool, default False\n Whether to compare number exactly.\n check_datetimelike_compat : bool, default False\n Compare datetime-like which is comparable ignoring dtype.\n check_categorical : bool, default True\n Whether to compare internal Categorical exactly.\n check_like : bool, default False\n If true, ignore the order of rows & columns\n obj : str, default 'DataFrame'\n Specify object name being compared, internally used to show appropriate\n assertion message\n \"\"\"\n\n # instance validation\n _check_isinstance(left, right, DataFrame)\n\n if check_frame_type:\n # ToDo: There are some tests using rhs is SparseDataFrame\n # lhs is DataFrame. Should use assert_class_equal in future\n assert isinstance(left, type(right))\n # assert_class_equal(left, right, obj=obj)\n\n # shape comparison\n if left.shape != right.shape:\n raise_assert_detail(obj,\n 'DataFrame shape mismatch',\n '{shape!r}'.format(shape=left.shape),\n '{shape!r}'.format(shape=right.shape))\n\n if check_like:\n left, right = left.reindex_like(right), right\n\n # index comparison\n assert_index_equal(left.index, right.index, exact=check_index_type,\n check_names=check_names,\n check_less_precise=check_less_precise,\n check_exact=check_exact,\n check_categorical=check_categorical,\n obj='{obj}.index'.format(obj=obj))\n\n # column comparison\n assert_index_equal(left.columns, right.columns, exact=check_column_type,\n check_names=check_names,\n check_less_precise=check_less_precise,\n check_exact=check_exact,\n check_categorical=check_categorical,\n obj='{obj}.columns'.format(obj=obj))\n\n # compare by blocks\n if by_blocks:\n rblocks = right._to_dict_of_blocks()\n lblocks = left._to_dict_of_blocks()\n for dtype in list(set(list(lblocks.keys()) + list(rblocks.keys()))):\n assert dtype in lblocks\n assert dtype in rblocks\n assert_frame_equal(lblocks[dtype], rblocks[dtype],\n check_dtype=check_dtype, obj='DataFrame.blocks')\n\n # compare by columns\n else:\n for i, col in enumerate(left.columns):\n assert col in right\n lcol = left.iloc[:, i]\n rcol = right.iloc[:, i]\n assert_series_equal(\n lcol, rcol, check_dtype=check_dtype,\n check_index_type=check_index_type,\n check_less_precise=check_less_precise,\n check_exact=check_exact, check_names=check_names,\n check_datetimelike_compat=check_datetimelike_compat,\n check_categorical=check_categorical,\n obj='DataFrame.iloc[:, {idx}]'.format(idx=i))\n\n\ndef assert_panel_equal(left, right,\n check_dtype=True,\n check_panel_type=False,\n check_less_precise=False,\n check_names=False,\n by_blocks=False,\n obj='Panel'):\n \"\"\"Check that left and right Panels are equal.\n\n Parameters\n ----------\n left : Panel (or nd)\n right : Panel (or nd)\n check_dtype : bool, default True\n Whether to check the Panel dtype is identical.\n check_panel_type : bool, default False\n Whether to check the Panel class is identical.\n check_less_precise : bool or int, default False\n Specify comparison precision. Only used when check_exact is False.\n 5 digits (False) or 3 digits (True) after decimal points are compared.\n If int, then specify the digits to compare\n check_names : bool, default True\n Whether to check the Index names attribute.\n by_blocks : bool, default False\n Specify how to compare internal data. If False, compare by columns.\n If True, compare by blocks.\n obj : str, default 'Panel'\n Specify the object name being compared, internally used to show\n the appropriate assertion message.\n \"\"\"\n\n if check_panel_type:\n assert_class_equal(left, right, obj=obj)\n\n for axis in left._AXIS_ORDERS:\n left_ind = getattr(left, axis)\n right_ind = getattr(right, axis)\n assert_index_equal(left_ind, right_ind, check_names=check_names)\n\n if by_blocks:\n rblocks = right._to_dict_of_blocks()\n lblocks = left._to_dict_of_blocks()\n for dtype in list(set(list(lblocks.keys()) + list(rblocks.keys()))):\n assert dtype in lblocks\n assert dtype in rblocks\n array_equivalent(lblocks[dtype].values, rblocks[dtype].values)\n else:\n\n # can potentially be slow\n for i, item in enumerate(left._get_axis(0)):\n msg = \"non-matching item (right) '{item}'\".format(item=item)\n assert item in right, msg\n litem = left.iloc[i]\n ritem = right.iloc[i]\n assert_frame_equal(litem, ritem,\n check_less_precise=check_less_precise,\n check_names=check_names)\n\n for i, item in enumerate(right._get_axis(0)):\n msg = \"non-matching item (left) '{item}'\".format(item=item)\n assert item in left, msg\n\n\n# -----------------------------------------------------------------------------\n# Sparse\n\n\ndef assert_sp_array_equal(left, right, check_dtype=True):\n \"\"\"Check that the left and right SparseArray are equal.\n\n Parameters\n ----------\n left : SparseArray\n right : SparseArray\n check_dtype : bool, default True\n Whether to check the data dtype is identical.\n \"\"\"\n\n _check_isinstance(left, right, pd.SparseArray)\n\n assert_numpy_array_equal(left.sp_values, right.sp_values,\n check_dtype=check_dtype)\n\n # SparseIndex comparison\n assert isinstance(left.sp_index, pd._libs.sparse.SparseIndex)\n assert isinstance(right.sp_index, pd._libs.sparse.SparseIndex)\n\n if not left.sp_index.equals(right.sp_index):\n raise_assert_detail('SparseArray.index', 'index are not equal',\n left.sp_index, right.sp_index)\n\n assert_attr_equal('fill_value', left, right)\n if check_dtype:\n assert_attr_equal('dtype', left, right)\n assert_numpy_array_equal(left.values, right.values,\n check_dtype=check_dtype)\n\n\ndef assert_sp_series_equal(left, right, check_dtype=True, exact_indices=True,\n check_series_type=True, check_names=True,\n obj='SparseSeries'):\n \"\"\"Check that the left and right SparseSeries are equal.\n\n Parameters\n ----------\n left : SparseSeries\n right : SparseSeries\n check_dtype : bool, default True\n Whether to check the Series dtype is identical.\n exact_indices : bool, default True\n check_series_type : bool, default True\n Whether to check the SparseSeries class is identical.\n check_names : bool, default True\n Whether to check the SparseSeries name attribute.\n obj : str, default 'SparseSeries'\n Specify the object name being compared, internally used to show\n the appropriate assertion message.\n \"\"\"\n _check_isinstance(left, right, pd.SparseSeries)\n\n if check_series_type:\n assert_class_equal(left, right, obj=obj)\n\n assert_index_equal(left.index, right.index,\n obj='{obj}.index'.format(obj=obj))\n\n assert_sp_array_equal(left.block.values, right.block.values)\n\n if check_names:\n assert_attr_equal('name', left, right)\n if check_dtype:\n assert_attr_equal('dtype', left, right)\n\n assert_numpy_array_equal(left.values, right.values)\n\n\ndef assert_sp_frame_equal(left, right, check_dtype=True, exact_indices=True,\n check_frame_type=True, obj='SparseDataFrame'):\n \"\"\"Check that the left and right SparseDataFrame are equal.\n\n Parameters\n ----------\n left : SparseDataFrame\n right : SparseDataFrame\n check_dtype : bool, default True\n Whether to check the Series dtype is identical.\n exact_indices : bool, default True\n SparseSeries SparseIndex objects must be exactly the same,\n otherwise just compare dense representations.\n check_frame_type : bool, default True\n Whether to check the SparseDataFrame class is identical.\n obj : str, default 'SparseDataFrame'\n Specify the object name being compared, internally used to show\n the appropriate assertion message.\n \"\"\"\n _check_isinstance(left, right, pd.SparseDataFrame)\n\n if check_frame_type:\n assert_class_equal(left, right, obj=obj)\n\n assert_index_equal(left.index, right.index,\n obj='{obj}.index'.format(obj=obj))\n assert_index_equal(left.columns, right.columns,\n obj='{obj}.columns'.format(obj=obj))\n\n for col, series in compat.iteritems(left):\n assert (col in right)\n # trade-off?\n\n if exact_indices:\n assert_sp_series_equal(series, right[col],\n check_dtype=check_dtype)\n else:\n assert_series_equal(series.to_dense(), right[col].to_dense(),\n check_dtype=check_dtype)\n\n assert_attr_equal('default_fill_value', left, right, obj=obj)\n\n # do I care?\n # assert(left.default_kind == right.default_kind)\n\n for col in right:\n assert (col in left)\n\n# -----------------------------------------------------------------------------\n# Others\n\n\ndef assert_contains_all(iterable, dic):\n for k in iterable:\n assert k in dic, \"Did not contain item: '{key!r}'\".format(key=k)\n\n\ndef assert_copy(iter1, iter2, **eql_kwargs):\n \"\"\"\n iter1, iter2: iterables that produce elements\n comparable with assert_almost_equal\n\n Checks that the elements are equal, but not\n the same object. (Does not check that items\n in sequences are also not the same object)\n \"\"\"\n for elem1, elem2 in zip(iter1, iter2):\n assert_almost_equal(elem1, elem2, **eql_kwargs)\n msg = (\"Expected object {obj1!r} and object {obj2!r} to be \"\n \"different objects, but they were the same object.\"\n ).format(obj1=type(elem1), obj2=type(elem2))\n assert elem1 is not elem2, msg\n\n\ndef getCols(k):\n return string.ascii_uppercase[:k]\n\n\ndef getArangeMat():\n return np.arange(N * K).reshape((N, K))\n\n\n# make index\ndef makeStringIndex(k=10, name=None):\n return Index(rands_array(nchars=10, size=k), name=name)\n\n\ndef makeUnicodeIndex(k=10, name=None):\n return Index(randu_array(nchars=10, size=k), name=name)\n\n\ndef makeCategoricalIndex(k=10, n=3, name=None, **kwargs):\n \"\"\" make a length k index or n categories \"\"\"\n x = rands_array(nchars=4, size=n)\n return CategoricalIndex(np.random.choice(x, k), name=name, **kwargs)\n\n\ndef makeIntervalIndex(k=10, name=None, **kwargs):\n \"\"\" make a length k IntervalIndex \"\"\"\n x = np.linspace(0, 100, num=(k + 1))\n return IntervalIndex.from_breaks(x, name=name, **kwargs)\n\n\ndef makeBoolIndex(k=10, name=None):\n if k == 1:\n return Index([True], name=name)\n elif k == 2:\n return Index([False, True], name=name)\n return Index([False, True] + [False] * (k - 2), name=name)\n\n\ndef makeIntIndex(k=10, name=None):\n return Index(lrange(k), name=name)\n\n\ndef makeUIntIndex(k=10, name=None):\n return Index([2**63 + i for i in lrange(k)], name=name)\n\n\ndef makeRangeIndex(k=10, name=None, **kwargs):\n return RangeIndex(0, k, 1, name=name, **kwargs)\n\n\ndef makeFloatIndex(k=10, name=None):\n values = sorted(np.random.random_sample(k)) - np.random.random_sample(1)\n return Index(values * (10 ** np.random.randint(0, 9)), name=name)\n\n\ndef makeDateIndex(k=10, freq='B', name=None, **kwargs):\n dt = datetime(2000, 1, 1)\n dr = bdate_range(dt, periods=k, freq=freq, name=name)\n return DatetimeIndex(dr, name=name, **kwargs)\n\n\ndef makeTimedeltaIndex(k=10, freq='D', name=None, **kwargs):\n return TimedeltaIndex(start='1 day', periods=k, freq=freq,\n name=name, **kwargs)\n\n\ndef makePeriodIndex(k=10, name=None, **kwargs):\n dt = datetime(2000, 1, 1)\n dr = PeriodIndex(start=dt, periods=k, freq='B', name=name, **kwargs)\n return dr\n\n\ndef makeMultiIndex(k=10, names=None, **kwargs):\n return MultiIndex.from_product(\n (('foo', 'bar'), (1, 2)), names=names, **kwargs)\n\n\ndef all_index_generator(k=10):\n \"\"\"Generator which can be iterated over to get instances of all the various\n index classes.\n\n Parameters\n ----------\n k: length of each of the index instances\n \"\"\"\n all_make_index_funcs = [makeIntIndex, makeFloatIndex, makeStringIndex,\n makeUnicodeIndex, makeDateIndex, makePeriodIndex,\n makeTimedeltaIndex, makeBoolIndex, makeRangeIndex,\n makeIntervalIndex,\n makeCategoricalIndex]\n for make_index_func in all_make_index_funcs:\n yield make_index_func(k=k)\n\n\ndef index_subclass_makers_generator():\n make_index_funcs = [\n makeDateIndex, makePeriodIndex,\n makeTimedeltaIndex, makeRangeIndex,\n makeIntervalIndex, makeCategoricalIndex,\n makeMultiIndex\n ]\n for make_index_func in make_index_funcs:\n yield make_index_func\n\n\ndef all_timeseries_index_generator(k=10):\n \"\"\"Generator which can be iterated over to get instances of all the classes\n which represent time-seires.\n\n Parameters\n ----------\n k: length of each of the index instances\n \"\"\"\n make_index_funcs = [makeDateIndex, makePeriodIndex, makeTimedeltaIndex]\n for make_index_func in make_index_funcs:\n yield make_index_func(k=k)\n\n\n# make series\ndef makeFloatSeries(name=None):\n index = makeStringIndex(N)\n return Series(randn(N), index=index, name=name)\n\n\ndef makeStringSeries(name=None):\n index = makeStringIndex(N)\n return Series(randn(N), index=index, name=name)\n\n\ndef makeObjectSeries(name=None):\n dateIndex = makeDateIndex(N)\n dateIndex = Index(dateIndex, dtype=object)\n index = makeStringIndex(N)\n return Series(dateIndex, index=index, name=name)\n\n\ndef getSeriesData():\n index = makeStringIndex(N)\n return {c: Series(randn(N), index=index) for c in getCols(K)}\n\n\ndef makeTimeSeries(nper=None, freq='B', name=None):\n if nper is None:\n nper = N\n return Series(randn(nper), index=makeDateIndex(nper, freq=freq), name=name)\n\n\ndef makePeriodSeries(nper=None, name=None):\n if nper is None:\n nper = N\n return Series(randn(nper), index=makePeriodIndex(nper), name=name)\n\n\ndef getTimeSeriesData(nper=None, freq='B'):\n return {c: makeTimeSeries(nper, freq) for c in getCols(K)}\n\n\ndef getPeriodData(nper=None):\n return {c: makePeriodSeries(nper) for c in getCols(K)}\n\n\n# make frame\ndef makeTimeDataFrame(nper=None, freq='B'):\n data = getTimeSeriesData(nper, freq)\n return DataFrame(data)\n\n\ndef makeDataFrame():\n data = getSeriesData()\n return DataFrame(data)\n\n\ndef getMixedTypeDict():\n index = Index(['a', 'b', 'c', 'd', 'e'])\n\n data = {\n 'A': [0., 1., 2., 3., 4.],\n 'B': [0., 1., 0., 1., 0.],\n 'C': ['foo1', 'foo2', 'foo3', 'foo4', 'foo5'],\n 'D': bdate_range('1/1/2009', periods=5)\n }\n\n return index, data\n\n\ndef makeMixedDataFrame():\n return DataFrame(getMixedTypeDict()[1])\n\n\ndef makePeriodFrame(nper=None):\n data = getPeriodData(nper)\n return DataFrame(data)\n\n\ndef makePanel(nper=None):\n with warnings.catch_warnings(record=True):\n cols = ['Item' + c for c in string.ascii_uppercase[:K - 1]]\n data = {c: makeTimeDataFrame(nper) for c in cols}\n return Panel.fromDict(data)\n\n\ndef makePeriodPanel(nper=None):\n with warnings.catch_warnings(record=True):\n cols = ['Item' + c for c in string.ascii_uppercase[:K - 1]]\n data = {c: makePeriodFrame(nper) for c in cols}\n return Panel.fromDict(data)\n\n\ndef makeCustomIndex(nentries, nlevels, prefix='#', names=False, ndupe_l=None,\n idx_type=None):\n \"\"\"Create an index/multindex with given dimensions, levels, names, etc'\n\n nentries - number of entries in index\n nlevels - number of levels (> 1 produces multindex)\n prefix - a string prefix for labels\n names - (Optional), bool or list of strings. if True will use default\n names, if false will use no names, if a list is given, the name of\n each level in the index will be taken from the list.\n ndupe_l - (Optional), list of ints, the number of rows for which the\n label will repeated at the corresponding level, you can specify just\n the first few, the rest will use the default ndupe_l of 1.\n len(ndupe_l) <= nlevels.\n idx_type - \"i\"/\"f\"/\"s\"/\"u\"/\"dt\"/\"p\"/\"td\".\n If idx_type is not None, `idx_nlevels` must be 1.\n \"i\"/\"f\" creates an integer/float index,\n \"s\"/\"u\" creates a string/unicode index\n \"dt\" create a datetime index.\n \"td\" create a datetime index.\n\n if unspecified, string labels will be generated.\n \"\"\"\n\n if ndupe_l is None:\n ndupe_l = [1] * nlevels\n assert (is_sequence(ndupe_l) and len(ndupe_l) <= nlevels)\n assert (names is None or names is False or\n names is True or len(names) is nlevels)\n assert idx_type is None or \\\n (idx_type in ('i', 'f', 's', 'u', 'dt', 'p', 'td') and nlevels == 1)\n\n if names is True:\n # build default names\n names = [prefix + str(i) for i in range(nlevels)]\n if names is False:\n # pass None to index constructor for no name\n names = None\n\n # make singelton case uniform\n if isinstance(names, compat.string_types) and nlevels == 1:\n names = [names]\n\n # specific 1D index type requested?\n idx_func = dict(i=makeIntIndex, f=makeFloatIndex,\n s=makeStringIndex, u=makeUnicodeIndex,\n dt=makeDateIndex, td=makeTimedeltaIndex,\n p=makePeriodIndex).get(idx_type)\n if idx_func:\n idx = idx_func(nentries)\n # but we need to fill in the name\n if names:\n idx.name = names[0]\n return idx\n elif idx_type is not None:\n raise ValueError('\"{idx_type}\" is not a legal value for `idx_type`, '\n 'use \"i\"/\"f\"/\"s\"/\"u\"/\"dt/\"p\"/\"td\".'\n .format(idx_type=idx_type))\n\n if len(ndupe_l) < nlevels:\n ndupe_l.extend([1] * (nlevels - len(ndupe_l)))\n assert len(ndupe_l) == nlevels\n\n assert all(x > 0 for x in ndupe_l)\n\n tuples = []\n for i in range(nlevels):\n def keyfunc(x):\n import re\n numeric_tuple = re.sub(r\"[^\\d_]_?\", \"\", x).split(\"_\")\n return lmap(int, numeric_tuple)\n\n # build a list of lists to create the index from\n div_factor = nentries // ndupe_l[i] + 1\n cnt = Counter()\n for j in range(div_factor):\n label = '{prefix}_l{i}_g{j}'.format(prefix=prefix, i=i, j=j)\n cnt[label] = ndupe_l[i]\n # cute Counter trick\n result = list(sorted(cnt.elements(), key=keyfunc))[:nentries]\n tuples.append(result)\n\n tuples = lzip(*tuples)\n\n # convert tuples to index\n if nentries == 1:\n # we have a single level of tuples, i.e. a regular Index\n index = Index(tuples[0], name=names[0])\n elif nlevels == 1:\n name = None if names is None else names[0]\n index = Index((x[0] for x in tuples), name=name)\n else:\n index = MultiIndex.from_tuples(tuples, names=names)\n return index\n\n\ndef makeCustomDataframe(nrows, ncols, c_idx_names=True, r_idx_names=True,\n c_idx_nlevels=1, r_idx_nlevels=1, data_gen_f=None,\n c_ndupe_l=None, r_ndupe_l=None, dtype=None,\n c_idx_type=None, r_idx_type=None):\n \"\"\"\n nrows, ncols - number of data rows/cols\n c_idx_names, idx_names - False/True/list of strings, yields No names ,\n default names or uses the provided names for the levels of the\n corresponding index. You can provide a single string when\n c_idx_nlevels ==1.\n c_idx_nlevels - number of levels in columns index. > 1 will yield MultiIndex\n r_idx_nlevels - number of levels in rows index. > 1 will yield MultiIndex\n data_gen_f - a function f(row,col) which return the data value\n at that position, the default generator used yields values of the form\n \"RxCy\" based on position.\n c_ndupe_l, r_ndupe_l - list of integers, determines the number\n of duplicates for each label at a given level of the corresponding\n index. The default `None` value produces a multiplicity of 1 across\n all levels, i.e. a unique index. Will accept a partial list of length\n N < idx_nlevels, for just the first N levels. If ndupe doesn't divide\n nrows/ncol, the last label might have lower multiplicity.\n dtype - passed to the DataFrame constructor as is, in case you wish to\n have more control in conjuncion with a custom `data_gen_f`\n r_idx_type, c_idx_type - \"i\"/\"f\"/\"s\"/\"u\"/\"dt\"/\"td\".\n If idx_type is not None, `idx_nlevels` must be 1.\n \"i\"/\"f\" creates an integer/float index,\n \"s\"/\"u\" creates a string/unicode index\n \"dt\" create a datetime index.\n \"td\" create a timedelta index.\n\n if unspecified, string labels will be generated.\n\n Examples:\n\n # 5 row, 3 columns, default names on both, single index on both axis\n >> makeCustomDataframe(5,3)\n\n # make the data a random int between 1 and 100\n >> mkdf(5,3,data_gen_f=lambda r,c:randint(1,100))\n\n # 2-level multiindex on rows with each label duplicated\n # twice on first level, default names on both axis, single\n # index on both axis\n >> a=makeCustomDataframe(5,3,r_idx_nlevels=2,r_ndupe_l=[2])\n\n # DatetimeIndex on row, index with unicode labels on columns\n # no names on either axis\n >> a=makeCustomDataframe(5,3,c_idx_names=False,r_idx_names=False,\n r_idx_type=\"dt\",c_idx_type=\"u\")\n\n # 4-level multindex on rows with names provided, 2-level multindex\n # on columns with default labels and default names.\n >> a=makeCustomDataframe(5,3,r_idx_nlevels=4,\n r_idx_names=[\"FEE\",\"FI\",\"FO\",\"FAM\"],\n c_idx_nlevels=2)\n\n >> a=mkdf(5,3,r_idx_nlevels=2,c_idx_nlevels=4)\n \"\"\"\n\n assert c_idx_nlevels > 0\n assert r_idx_nlevels > 0\n assert r_idx_type is None or \\\n (r_idx_type in ('i', 'f', 's',\n 'u', 'dt', 'p', 'td') and r_idx_nlevels == 1)\n assert c_idx_type is None or \\\n (c_idx_type in ('i', 'f', 's',\n 'u', 'dt', 'p', 'td') and c_idx_nlevels == 1)\n\n columns = makeCustomIndex(ncols, nlevels=c_idx_nlevels, prefix='C',\n names=c_idx_names, ndupe_l=c_ndupe_l,\n idx_type=c_idx_type)\n index = makeCustomIndex(nrows, nlevels=r_idx_nlevels, prefix='R',\n names=r_idx_names, ndupe_l=r_ndupe_l,\n idx_type=r_idx_type)\n\n # by default, generate data based on location\n if data_gen_f is None:\n data_gen_f = lambda r, c: \"R{rows}C{cols}\".format(rows=r, cols=c)\n\n data = [[data_gen_f(r, c) for c in range(ncols)] for r in range(nrows)]\n\n return DataFrame(data, index, columns, dtype=dtype)\n\n\ndef _create_missing_idx(nrows, ncols, density, random_state=None):\n if random_state is None:\n random_state = np.random\n else:\n random_state = np.random.RandomState(random_state)\n\n # below is cribbed from scipy.sparse\n size = int(np.round((1 - density) * nrows * ncols))\n # generate a few more to ensure unique values\n min_rows = 5\n fac = 1.02\n extra_size = min(size + min_rows, fac * size)\n\n def _gen_unique_rand(rng, _extra_size):\n ind = rng.rand(int(_extra_size))\n return np.unique(np.floor(ind * nrows * ncols))[:size]\n\n ind = _gen_unique_rand(random_state, extra_size)\n while ind.size < size:\n extra_size *= 1.05\n ind = _gen_unique_rand(random_state, extra_size)\n\n j = np.floor(ind * 1. / nrows).astype(int)\n i = (ind - j * nrows).astype(int)\n return i.tolist(), j.tolist()\n\n\ndef makeMissingCustomDataframe(nrows, ncols, density=.9, random_state=None,\n c_idx_names=True, r_idx_names=True,\n c_idx_nlevels=1, r_idx_nlevels=1,\n data_gen_f=None,\n c_ndupe_l=None, r_ndupe_l=None, dtype=None,\n c_idx_type=None, r_idx_type=None):\n \"\"\"\n Parameters\n ----------\n Density : float, optional\n Float in (0, 1) that gives the percentage of non-missing numbers in\n the DataFrame.\n random_state : {np.random.RandomState, int}, optional\n Random number generator or random seed.\n\n See makeCustomDataframe for descriptions of the rest of the parameters.\n \"\"\"\n df = makeCustomDataframe(nrows, ncols, c_idx_names=c_idx_names,\n r_idx_names=r_idx_names,\n c_idx_nlevels=c_idx_nlevels,\n r_idx_nlevels=r_idx_nlevels,\n data_gen_f=data_gen_f,\n c_ndupe_l=c_ndupe_l, r_ndupe_l=r_ndupe_l,\n dtype=dtype, c_idx_type=c_idx_type,\n r_idx_type=r_idx_type)\n\n i, j = _create_missing_idx(nrows, ncols, density, random_state)\n df.values[i, j] = np.nan\n return df\n\n\ndef makeMissingDataframe(density=.9, random_state=None):\n df = makeDataFrame()\n i, j = _create_missing_idx(*df.shape, density=density,\n random_state=random_state)\n df.values[i, j] = np.nan\n return df\n\n\ndef add_nans(panel):\n I, J, N = panel.shape\n for i, item in enumerate(panel.items):\n dm = panel[item]\n for j, col in enumerate(dm.columns):\n dm[col][:i + j] = np.NaN\n return panel\n\n\ndef add_nans_panel4d(panel4d):\n for l, label in enumerate(panel4d.labels):\n panel = panel4d[label]\n add_nans(panel)\n return panel4d\n\n\nclass TestSubDict(dict):\n\n def __init__(self, *args, **kwargs):\n dict.__init__(self, *args, **kwargs)\n\n\ndef optional_args(decorator):\n \"\"\"allows a decorator to take optional positional and keyword arguments.\n Assumes that taking a single, callable, positional argument means that\n it is decorating a function, i.e. something like this::\n\n @my_decorator\n def function(): pass\n\n Calls decorator with decorator(f, *args, **kwargs)\"\"\"\n\n @wraps(decorator)\n def wrapper(*args, **kwargs):\n def dec(f):\n return decorator(f, *args, **kwargs)\n\n is_decorating = not kwargs and len(args) == 1 and callable(args[0])\n if is_decorating:\n f = args[0]\n args = []\n return dec(f)\n else:\n return dec\n\n return wrapper\n\n\n# skip tests on exceptions with this message\n_network_error_messages = (\n # 'urlopen error timed out',\n # 'timeout: timed out',\n # 'socket.timeout: timed out',\n 'timed out',\n 'Server Hangup',\n 'HTTP Error 503: Service Unavailable',\n '502: Proxy Error',\n 'HTTP Error 502: internal error',\n 'HTTP Error 502',\n 'HTTP Error 503',\n 'HTTP Error 403',\n 'HTTP Error 400',\n 'Temporary failure in name resolution',\n 'Name or service not known',\n 'Connection refused',\n 'certificate verify',\n)\n\n# or this e.errno/e.reason.errno\n_network_errno_vals = (\n 101, # Network is unreachable\n 111, # Connection refused\n 110, # Connection timed out\n 104, # Connection reset Error\n 54, # Connection reset by peer\n 60, # urllib.error.URLError: [Errno 60] Connection timed out\n)\n\n# Both of the above shouldn't mask real issues such as 404's\n# or refused connections (changed DNS).\n# But some tests (test_data yahoo) contact incredibly flakey\n# servers.\n\n# and conditionally raise on these exception types\n_network_error_classes = (IOError, httplib.HTTPException)\n\nif sys.version_info >= (3, 3):\n _network_error_classes += (TimeoutError,) # noqa\n\n\ndef can_connect(url, error_classes=_network_error_classes):\n \"\"\"Try to connect to the given url. True if succeeds, False if IOError\n raised\n\n Parameters\n ----------\n url : basestring\n The URL to try to connect to\n\n Returns\n -------\n connectable : bool\n Return True if no IOError (unable to connect) or URLError (bad url) was\n raised\n \"\"\"\n try:\n with urlopen(url):\n pass\n except error_classes:\n return False\n else:\n return True\n\n\n@optional_args\ndef network(t, url=\"http://www.google.com\",\n raise_on_error=_RAISE_NETWORK_ERROR_DEFAULT,\n check_before_test=False,\n error_classes=_network_error_classes,\n skip_errnos=_network_errno_vals,\n _skip_on_messages=_network_error_messages,\n ):\n \"\"\"\n Label a test as requiring network connection and, if an error is\n encountered, only raise if it does not find a network connection.\n\n In comparison to ``network``, this assumes an added contract to your test:\n you must assert that, under normal conditions, your test will ONLY fail if\n it does not have network connectivity.\n\n You can call this in 3 ways: as a standard decorator, with keyword\n arguments, or with a positional argument that is the url to check.\n\n Parameters\n ----------\n t : callable\n The test requiring network connectivity.\n url : path\n The url to test via ``pandas.io.common.urlopen`` to check\n for connectivity. Defaults to 'http://www.google.com'.\n raise_on_error : bool\n If True, never catches errors.\n check_before_test : bool\n If True, checks connectivity before running the test case.\n error_classes : tuple or Exception\n error classes to ignore. If not in ``error_classes``, raises the error.\n defaults to IOError. Be careful about changing the error classes here.\n skip_errnos : iterable of int\n Any exception that has .errno or .reason.erno set to one\n of these values will be skipped with an appropriate\n message.\n _skip_on_messages: iterable of string\n any exception e for which one of the strings is\n a substring of str(e) will be skipped with an appropriate\n message. Intended to suppress errors where an errno isn't available.\n\n Notes\n -----\n * ``raise_on_error`` supercedes ``check_before_test``\n\n Returns\n -------\n t : callable\n The decorated test ``t``, with checks for connectivity errors.\n\n Example\n -------\n\n Tests decorated with @network will fail if it's possible to make a network\n connection to another URL (defaults to google.com)::\n\n >>> from pandas.util.testing import network\n >>> from pandas.io.common import urlopen\n >>> @network\n ... def test_network():\n ... with urlopen(\"rabbit://bonanza.com\"):\n ... pass\n Traceback\n ...\n URLError: <urlopen error unknown url type: rabit>\n\n You can specify alternative URLs::\n\n >>> @network(\"http://www.yahoo.com\")\n ... def test_something_with_yahoo():\n ... raise IOError(\"Failure Message\")\n >>> test_something_with_yahoo()\n Traceback (most recent call last):\n ...\n IOError: Failure Message\n\n If you set check_before_test, it will check the url first and not run the\n test on failure::\n\n >>> @network(\"failing://url.blaher\", check_before_test=True)\n ... def test_something():\n ... print(\"I ran!\")\n ... raise ValueError(\"Failure\")\n >>> test_something()\n Traceback (most recent call last):\n ...\n\n Errors not related to networking will always be raised.\n \"\"\"\n from pytest import skip\n t.network = True\n\n @compat.wraps(t)\n def wrapper(*args, **kwargs):\n if check_before_test and not raise_on_error:\n if not can_connect(url, error_classes):\n skip()\n try:\n return t(*args, **kwargs)\n except Exception as e:\n errno = getattr(e, 'errno', None)\n if not errno and hasattr(errno, \"reason\"):\n errno = getattr(e.reason, 'errno', None)\n\n if errno in skip_errnos:\n skip(\"Skipping test due to known errno\"\n \" and error {error}\".format(error=e))\n\n try:\n e_str = traceback.format_exc(e)\n except Exception:\n e_str = str(e)\n\n if any(m.lower() in e_str.lower() for m in _skip_on_messages):\n skip(\"Skipping test because exception \"\n \"message is known and error {error}\".format(error=e))\n\n if not isinstance(e, error_classes):\n raise\n\n if raise_on_error or can_connect(url, error_classes):\n raise\n else:\n skip(\"Skipping test due to lack of connectivity\"\n \" and error {error}\".format(e))\n\n return wrapper\n\n\nwith_connectivity_check = network\n\n\nclass SimpleMock(object):\n\n \"\"\"\n Poor man's mocking object\n\n Note: only works for new-style classes, assumes __getattribute__ exists.\n\n >>> a = type(\"Duck\",(),{})\n >>> a.attr1,a.attr2 =\"fizz\",\"buzz\"\n >>> b = SimpleMock(a,\"attr1\",\"bar\")\n >>> b.attr1 == \"bar\" and b.attr2 == \"buzz\"\n True\n >>> a.attr1 == \"fizz\" and a.attr2 == \"buzz\"\n True\n \"\"\"\n\n def __init__(self, obj, *args, **kwds):\n assert(len(args) % 2 == 0)\n attrs = kwds.get(\"attrs\", {})\n for k, v in zip(args[::2], args[1::2]):\n # dict comprehensions break 2.6\n attrs[k] = v\n self.attrs = attrs\n self.obj = obj\n\n def __getattribute__(self, name):\n attrs = object.__getattribute__(self, \"attrs\")\n obj = object.__getattribute__(self, \"obj\")\n return attrs.get(name, type(obj).__getattribute__(obj, name))\n\n\n@contextmanager\ndef stdin_encoding(encoding=None):\n \"\"\"\n Context manager for running bits of code while emulating an arbitrary\n stdin encoding.\n\n >>> import sys\n >>> _encoding = sys.stdin.encoding\n >>> with stdin_encoding('AES'): sys.stdin.encoding\n 'AES'\n >>> sys.stdin.encoding==_encoding\n True\n\n \"\"\"\n import sys\n\n _stdin = sys.stdin\n sys.stdin = SimpleMock(sys.stdin, \"encoding\", encoding)\n yield\n sys.stdin = _stdin\n\n\ndef assert_raises_regex(_exception, _regexp, _callable=None,\n *args, **kwargs):\n r\"\"\"\n Check that the specified Exception is raised and that the error message\n matches a given regular expression pattern. This may be a regular\n expression object or a string containing a regular expression suitable\n for use by `re.search()`. This is a port of the `assertRaisesRegexp`\n function from unittest in Python 2.7.\n\n Examples\n --------\n >>> assert_raises_regex(ValueError, 'invalid literal for.*XYZ', int, 'XYZ')\n >>> import re\n >>> assert_raises_regex(ValueError, re.compile('literal'), int, 'XYZ')\n\n If an exception of a different type is raised, it bubbles up.\n\n >>> assert_raises_regex(TypeError, 'literal', int, 'XYZ')\n Traceback (most recent call last):\n ...\n ValueError: invalid literal for int() with base 10: 'XYZ'\n >>> dct = dict()\n >>> assert_raises_regex(KeyError, 'pear', dct.__getitem__, 'apple')\n Traceback (most recent call last):\n ...\n AssertionError: \"pear\" does not match \"'apple'\"\n\n You can also use this in a with statement.\n >>> with assert_raises_regex(TypeError, 'unsupported operand type\\(s\\)'):\n ... 1 + {}\n >>> with assert_raises_regex(TypeError, 'banana'):\n ... 'apple'[0] = 'b'\n Traceback (most recent call last):\n ...\n AssertionError: \"banana\" does not match \"'str' object does not support \\\nitem assignment\"\n \"\"\"\n manager = _AssertRaisesContextmanager(exception=_exception, regexp=_regexp)\n if _callable is not None:\n with manager:\n _callable(*args, **kwargs)\n else:\n return manager\n\n\nclass _AssertRaisesContextmanager(object):\n \"\"\"\n Context manager behind `assert_raises_regex`.\n \"\"\"\n\n def __init__(self, exception, regexp=None):\n \"\"\"\n Initialize an _AssertRaisesContextManager instance.\n\n Parameters\n ----------\n exception : class\n The expected Exception class.\n regexp : str, default None\n The regex to compare against the Exception message.\n \"\"\"\n\n self.exception = exception\n\n if regexp is not None and not hasattr(regexp, \"search\"):\n regexp = re.compile(regexp, re.DOTALL)\n\n self.regexp = regexp\n\n def __enter__(self):\n return self\n\n def __exit__(self, exc_type, exc_value, trace_back):\n expected = self.exception\n\n if not exc_type:\n exp_name = getattr(expected, \"__name__\", str(expected))\n raise AssertionError(\"{name} not raised.\".format(name=exp_name))\n\n return self.exception_matches(exc_type, exc_value, trace_back)\n\n def exception_matches(self, exc_type, exc_value, trace_back):\n \"\"\"\n Check that the Exception raised matches the expected Exception\n and expected error message regular expression.\n\n Parameters\n ----------\n exc_type : class\n The type of Exception raised.\n exc_value : Exception\n The instance of `exc_type` raised.\n trace_back : stack trace object\n The traceback object associated with `exc_value`.\n\n Returns\n -------\n is_matched : bool\n Whether or not the Exception raised matches the expected\n Exception class and expected error message regular expression.\n\n Raises\n ------\n AssertionError : The error message provided does not match\n the expected error message regular expression.\n \"\"\"\n\n if issubclass(exc_type, self.exception):\n if self.regexp is not None:\n val = str(exc_value)\n\n if not self.regexp.search(val):\n msg = '\"{pat}\" does not match \"{val}\"'.format(\n pat=self.regexp.pattern, val=val)\n e = AssertionError(msg)\n raise_with_traceback(e, trace_back)\n\n return True\n else:\n # Failed, so allow Exception to bubble up.\n return False\n\n\n@contextmanager\ndef assert_produces_warning(expected_warning=Warning, filter_level=\"always\",\n clear=None, check_stacklevel=True):\n \"\"\"\n Context manager for running code expected to either raise a specific\n warning, or not raise any warnings. Verifies that the code raises the\n expected warning, and that it does not raise any other unexpected\n warnings. It is basically a wrapper around ``warnings.catch_warnings``.\n\n Parameters\n ----------\n expected_warning : {Warning, False, None}, default Warning\n The type of Exception raised. ``exception.Warning`` is the base\n class for all warnings. To check that no warning is returned,\n specify ``False`` or ``None``.\n filter_level : str, default \"always\"\n Specifies whether warnings are ignored, displayed, or turned\n into errors.\n Valid values are:\n\n * \"error\" - turns matching warnings into exceptions\n * \"ignore\" - discard the warning\n * \"always\" - always emit a warning\n * \"default\" - print the warning the first time it is generated\n from each location\n * \"module\" - print the warning the first time it is generated\n from each module\n * \"once\" - print the warning the first time it is generated\n\n clear : str, default None\n If not ``None`` then remove any previously raised warnings from\n the ``__warningsregistry__`` to ensure that no warning messages are\n suppressed by this context manager. If ``None`` is specified,\n the ``__warningsregistry__`` keeps track of which warnings have been\n shown, and does not show them again.\n check_stacklevel : bool, default True\n If True, displays the line that called the function containing\n the warning to show were the function is called. Otherwise, the\n line that implements the function is displayed.\n\n Examples\n --------\n >>> import warnings\n >>> with assert_produces_warning():\n ... warnings.warn(UserWarning())\n ...\n >>> with assert_produces_warning(False):\n ... warnings.warn(RuntimeWarning())\n ...\n Traceback (most recent call last):\n ...\n AssertionError: Caused unexpected warning(s): ['RuntimeWarning'].\n >>> with assert_produces_warning(UserWarning):\n ... warnings.warn(RuntimeWarning())\n Traceback (most recent call last):\n ...\n AssertionError: Did not see expected warning of class 'UserWarning'.\n\n ..warn:: This is *not* thread-safe.\n \"\"\"\n with warnings.catch_warnings(record=True) as w:\n\n if clear is not None:\n # make sure that we are clearning these warnings\n # if they have happened before\n # to guarantee that we will catch them\n if not is_list_like(clear):\n clear = [clear]\n for m in clear:\n try:\n m.__warningregistry__.clear()\n except Exception:\n pass\n\n saw_warning = False\n warnings.simplefilter(filter_level)\n yield w\n extra_warnings = []\n\n for actual_warning in w:\n if (expected_warning and issubclass(actual_warning.category,\n expected_warning)):\n saw_warning = True\n\n if check_stacklevel and issubclass(actual_warning.category,\n (FutureWarning,\n DeprecationWarning)):\n from inspect import getframeinfo, stack\n caller = getframeinfo(stack()[2][0])\n msg = (\"Warning not set with correct stacklevel. \"\n \"File where warning is raised: {actual} != \"\n \"{caller}. Warning message: {message}\"\n ).format(actual=actual_warning.filename,\n caller=caller.filename,\n message=actual_warning.message)\n assert actual_warning.filename == caller.filename, msg\n else:\n extra_warnings.append(actual_warning.category.__name__)\n if expected_warning:\n msg = \"Did not see expected warning of class {name!r}.\".format(\n name=expected_warning.__name__)\n assert saw_warning, msg\n assert not extra_warnings, (\"Caused unexpected warning(s): {extra!r}.\"\n ).format(extra=extra_warnings)\n\n\nclass RNGContext(object):\n \"\"\"\n Context manager to set the numpy random number generator speed. Returns\n to the original value upon exiting the context manager.\n\n Parameters\n ----------\n seed : int\n Seed for numpy.random.seed\n\n Examples\n --------\n\n with RNGContext(42):\n np.random.randn()\n \"\"\"\n\n def __init__(self, seed):\n self.seed = seed\n\n def __enter__(self):\n\n self.start_state = np.random.get_state()\n np.random.seed(self.seed)\n\n def __exit__(self, exc_type, exc_value, traceback):\n\n np.random.set_state(self.start_state)\n\n\n@contextmanager\ndef use_numexpr(use, min_elements=None):\n from pandas.core.computation import expressions as expr\n if min_elements is None:\n min_elements = expr._MIN_ELEMENTS\n\n olduse = expr._USE_NUMEXPR\n oldmin = expr._MIN_ELEMENTS\n expr.set_use_numexpr(use)\n expr._MIN_ELEMENTS = min_elements\n yield\n expr._MIN_ELEMENTS = oldmin\n expr.set_use_numexpr(olduse)\n\n\ndef test_parallel(num_threads=2, kwargs_list=None):\n \"\"\"Decorator to run the same function multiple times in parallel.\n\n Parameters\n ----------\n num_threads : int, optional\n The number of times the function is run in parallel.\n kwargs_list : list of dicts, optional\n The list of kwargs to update original\n function kwargs on different threads.\n Notes\n -----\n This decorator does not pass the return value of the decorated function.\n\n Original from scikit-image:\n\n https://github.com/scikit-image/scikit-image/pull/1519\n\n \"\"\"\n\n assert num_threads > 0\n has_kwargs_list = kwargs_list is not None\n if has_kwargs_list:\n assert len(kwargs_list) == num_threads\n import threading\n\n def wrapper(func):\n @wraps(func)\n def inner(*args, **kwargs):\n if has_kwargs_list:\n update_kwargs = lambda i: dict(kwargs, **kwargs_list[i])\n else:\n update_kwargs = lambda i: kwargs\n threads = []\n for i in range(num_threads):\n updated_kwargs = update_kwargs(i)\n thread = threading.Thread(target=func, args=args,\n kwargs=updated_kwargs)\n threads.append(thread)\n for thread in threads:\n thread.start()\n for thread in threads:\n thread.join()\n return inner\n return wrapper\n\n\nclass SubclassedSeries(Series):\n _metadata = ['testattr', 'name']\n\n @property\n def _constructor(self):\n return SubclassedSeries\n\n @property\n def _constructor_expanddim(self):\n return SubclassedDataFrame\n\n\nclass SubclassedDataFrame(DataFrame):\n _metadata = ['testattr']\n\n @property\n def _constructor(self):\n return SubclassedDataFrame\n\n @property\n def _constructor_sliced(self):\n return SubclassedSeries\n\n\nclass SubclassedSparseSeries(pd.SparseSeries):\n _metadata = ['testattr']\n\n @property\n def _constructor(self):\n return SubclassedSparseSeries\n\n @property\n def _constructor_expanddim(self):\n return SubclassedSparseDataFrame\n\n\nclass SubclassedSparseDataFrame(pd.SparseDataFrame):\n _metadata = ['testattr']\n\n @property\n def _constructor(self):\n return SubclassedSparseDataFrame\n\n @property\n def _constructor_sliced(self):\n return SubclassedSparseSeries\n\n\nclass SubclassedCategorical(Categorical):\n\n @property\n def _constructor(self):\n return SubclassedCategorical\n\n\n@contextmanager\ndef patch(ob, attr, value):\n \"\"\"Temporarily patch an attribute of an object.\n\n Parameters\n ----------\n ob : any\n The object to patch. This must support attribute assignment for `attr`.\n attr : str\n The name of the attribute to patch.\n value : any\n The temporary attribute to assign.\n\n Examples\n --------\n >>> class C(object):\n ... attribute = 'original'\n ...\n >>> C.attribute\n 'original'\n >>> with patch(C, 'attribute', 'patched'):\n ... in_context = C.attribute\n ...\n >>> in_context\n 'patched'\n >>> C.attribute # the value is reset when the context manager exists\n 'original'\n\n Correctly replaces attribute when the manager exits with an exception.\n >>> with patch(C, 'attribute', 'patched'):\n ... in_context = C.attribute\n ... raise ValueError()\n Traceback (most recent call last):\n ...\n ValueError\n >>> in_context\n 'patched'\n >>> C.attribute\n 'original'\n \"\"\"\n noattr = object() # mark that the attribute never existed\n old = getattr(ob, attr, noattr)\n setattr(ob, attr, value)\n try:\n yield\n finally:\n if old is noattr:\n delattr(ob, attr)\n else:\n setattr(ob, attr, old)\n\n\n@contextmanager\ndef set_timezone(tz):\n \"\"\"Context manager for temporarily setting a timezone.\n\n Parameters\n ----------\n tz : str\n A string representing a valid timezone.\n\n Examples\n --------\n\n >>> from datetime import datetime\n >>> from dateutil.tz import tzlocal\n >>> tzlocal().tzname(datetime.now())\n 'IST'\n\n >>> with set_timezone('US/Eastern'):\n ... tzlocal().tzname(datetime.now())\n ...\n 'EDT'\n \"\"\"\n\n import os\n import time\n\n def setTZ(tz):\n if tz is None:\n try:\n del os.environ['TZ']\n except KeyError:\n pass\n else:\n os.environ['TZ'] = tz\n time.tzset()\n\n orig_tz = os.environ.get('TZ')\n setTZ(tz)\n try:\n yield\n finally:\n setTZ(orig_tz)\n\n\ndef _make_skipna_wrapper(alternative, skipna_alternative=None):\n \"\"\"Create a function for calling on an array.\n\n Parameters\n ----------\n alternative : function\n The function to be called on the array with no NaNs.\n Only used when 'skipna_alternative' is None.\n skipna_alternative : function\n The function to be called on the original array\n\n Returns\n -------\n skipna_wrapper : function\n \"\"\"\n if skipna_alternative:\n def skipna_wrapper(x):\n return skipna_alternative(x.values)\n else:\n def skipna_wrapper(x):\n nona = x.dropna()\n if len(nona) == 0:\n return np.nan\n return alternative(nona)\n\n return skipna_wrapper\n",
"# -*- coding: utf-8 -*-\n\nimport pytest\n\nfrom datetime import datetime, timedelta\n\nfrom collections import defaultdict\n\nimport pandas.util.testing as tm\nfrom pandas.core.dtypes.generic import ABCIndex\nfrom pandas.core.dtypes.common import is_unsigned_integer_dtype\nfrom pandas.core.indexes.api import Index, MultiIndex\nfrom pandas.tests.indexes.common import Base\n\nfrom pandas.compat import (range, lrange, lzip, u,\n text_type, zip, PY3, PY36, PYPY)\nimport operator\nimport numpy as np\n\nfrom pandas import (period_range, date_range, Series,\n DataFrame, Float64Index, Int64Index, UInt64Index,\n CategoricalIndex, DatetimeIndex, TimedeltaIndex,\n PeriodIndex, RangeIndex, isna)\nfrom pandas.core.index import _get_combined_index, _ensure_index_from_sequences\nfrom pandas.util.testing import assert_almost_equal\nfrom pandas.compat.numpy import np_datetime64_compat\n\nimport pandas.core.config as cf\n\nfrom pandas.core.indexes.datetimes import _to_m8\n\nimport pandas as pd\nfrom pandas._libs.tslib import Timestamp\n\n\nclass TestIndex(Base):\n _holder = Index\n\n def setup_method(self, method):\n self.indices = dict(unicodeIndex=tm.makeUnicodeIndex(100),\n strIndex=tm.makeStringIndex(100),\n dateIndex=tm.makeDateIndex(100),\n periodIndex=tm.makePeriodIndex(100),\n tdIndex=tm.makeTimedeltaIndex(100),\n intIndex=tm.makeIntIndex(100),\n uintIndex=tm.makeUIntIndex(100),\n rangeIndex=tm.makeRangeIndex(100),\n floatIndex=tm.makeFloatIndex(100),\n boolIndex=Index([True, False]),\n catIndex=tm.makeCategoricalIndex(100),\n empty=Index([]),\n tuples=MultiIndex.from_tuples(lzip(\n ['foo', 'bar', 'baz'], [1, 2, 3])),\n repeats=Index([0, 0, 1, 1, 2, 2]))\n self.setup_indices()\n\n def create_index(self):\n return Index(list('abcde'))\n\n def generate_index_types(self, skip_index_keys=[]):\n \"\"\"\n Return a generator of the various index types, leaving\n out the ones with a key in skip_index_keys\n \"\"\"\n for key, idx in self.indices.items():\n if key not in skip_index_keys:\n yield key, idx\n\n def test_new_axis(self):\n new_index = self.dateIndex[None, :]\n assert new_index.ndim == 2\n assert isinstance(new_index, np.ndarray)\n\n def test_copy_and_deepcopy(self, indices):\n super(TestIndex, self).test_copy_and_deepcopy(indices)\n\n new_copy2 = self.intIndex.copy(dtype=int)\n assert new_copy2.dtype.kind == 'i'\n\n def test_constructor(self):\n # regular instance creation\n tm.assert_contains_all(self.strIndex, self.strIndex)\n tm.assert_contains_all(self.dateIndex, self.dateIndex)\n\n # casting\n arr = np.array(self.strIndex)\n index = Index(arr)\n tm.assert_contains_all(arr, index)\n tm.assert_index_equal(self.strIndex, index)\n\n # copy\n arr = np.array(self.strIndex)\n index = Index(arr, copy=True, name='name')\n assert isinstance(index, Index)\n assert index.name == 'name'\n tm.assert_numpy_array_equal(arr, index.values)\n arr[0] = \"SOMEBIGLONGSTRING\"\n assert index[0] != \"SOMEBIGLONGSTRING\"\n\n # what to do here?\n # arr = np.array(5.)\n # pytest.raises(Exception, arr.view, Index)\n\n def test_constructor_corner(self):\n # corner case\n pytest.raises(TypeError, Index, 0)\n\n def test_construction_list_mixed_tuples(self):\n # see gh-10697: if we are constructing from a mixed list of tuples,\n # make sure that we are independent of the sorting order.\n idx1 = Index([('A', 1), 'B'])\n assert isinstance(idx1, Index)\n assert not isinstance(idx1, MultiIndex)\n\n idx2 = Index(['B', ('A', 1)])\n assert isinstance(idx2, Index)\n assert not isinstance(idx2, MultiIndex)\n\n @pytest.mark.parametrize('na_value', [None, np.nan])\n @pytest.mark.parametrize('vtype', [list, tuple, iter])\n def test_construction_list_tuples_nan(self, na_value, vtype):\n # GH 18505 : valid tuples containing NaN\n values = [(1, 'two'), (3., na_value)]\n result = Index(vtype(values))\n expected = MultiIndex.from_tuples(values)\n tm.assert_index_equal(result, expected)\n\n def test_constructor_from_index_datetimetz(self):\n idx = pd.date_range('2015-01-01 10:00', freq='D', periods=3,\n tz='US/Eastern')\n result = pd.Index(idx)\n tm.assert_index_equal(result, idx)\n assert result.tz == idx.tz\n\n result = pd.Index(idx.astype(object))\n tm.assert_index_equal(result, idx)\n assert result.tz == idx.tz\n\n def test_constructor_from_index_timedelta(self):\n idx = pd.timedelta_range('1 days', freq='D', periods=3)\n result = pd.Index(idx)\n tm.assert_index_equal(result, idx)\n\n result = pd.Index(idx.astype(object))\n tm.assert_index_equal(result, idx)\n\n def test_constructor_from_index_period(self):\n idx = pd.period_range('2015-01-01', freq='D', periods=3)\n result = pd.Index(idx)\n tm.assert_index_equal(result, idx)\n\n result = pd.Index(idx.astype(object))\n tm.assert_index_equal(result, idx)\n\n def test_constructor_from_series_datetimetz(self):\n idx = pd.date_range('2015-01-01 10:00', freq='D', periods=3,\n tz='US/Eastern')\n result = pd.Index(pd.Series(idx))\n tm.assert_index_equal(result, idx)\n assert result.tz == idx.tz\n\n def test_constructor_from_series_timedelta(self):\n idx = pd.timedelta_range('1 days', freq='D', periods=3)\n result = pd.Index(pd.Series(idx))\n tm.assert_index_equal(result, idx)\n\n def test_constructor_from_series_period(self):\n idx = pd.period_range('2015-01-01', freq='D', periods=3)\n result = pd.Index(pd.Series(idx))\n tm.assert_index_equal(result, idx)\n\n def test_constructor_from_series(self):\n\n expected = DatetimeIndex([Timestamp('20110101'), Timestamp('20120101'),\n Timestamp('20130101')])\n s = Series([Timestamp('20110101'), Timestamp('20120101'),\n Timestamp('20130101')])\n result = Index(s)\n tm.assert_index_equal(result, expected)\n result = DatetimeIndex(s)\n tm.assert_index_equal(result, expected)\n\n # GH 6273\n # create from a series, passing a freq\n s = Series(pd.to_datetime(['1-1-1990', '2-1-1990', '3-1-1990',\n '4-1-1990', '5-1-1990']))\n result = DatetimeIndex(s, freq='MS')\n expected = DatetimeIndex(['1-1-1990', '2-1-1990', '3-1-1990',\n '4-1-1990', '5-1-1990'], freq='MS')\n tm.assert_index_equal(result, expected)\n\n df = pd.DataFrame(np.random.rand(5, 3))\n df['date'] = ['1-1-1990', '2-1-1990', '3-1-1990', '4-1-1990',\n '5-1-1990']\n result = DatetimeIndex(df['date'], freq='MS')\n expected.name = 'date'\n tm.assert_index_equal(result, expected)\n assert df['date'].dtype == object\n\n exp = pd.Series(['1-1-1990', '2-1-1990', '3-1-1990', '4-1-1990',\n '5-1-1990'], name='date')\n tm.assert_series_equal(df['date'], exp)\n\n # GH 6274\n # infer freq of same\n result = pd.infer_freq(df['date'])\n assert result == 'MS'\n\n def test_constructor_ndarray_like(self):\n # GH 5460#issuecomment-44474502\n # it should be possible to convert any object that satisfies the numpy\n # ndarray interface directly into an Index\n class ArrayLike(object):\n def __init__(self, array):\n self.array = array\n\n def __array__(self, dtype=None):\n return self.array\n\n for array in [np.arange(5), np.array(['a', 'b', 'c']),\n date_range('2000-01-01', periods=3).values]:\n expected = pd.Index(array)\n result = pd.Index(ArrayLike(array))\n tm.assert_index_equal(result, expected)\n\n @pytest.mark.parametrize('dtype', [\n int, 'int64', 'int32', 'int16', 'int8', 'uint64', 'uint32',\n 'uint16', 'uint8'])\n def test_constructor_int_dtype_float(self, dtype):\n # GH 18400\n if is_unsigned_integer_dtype(dtype):\n index_type = UInt64Index\n else:\n index_type = Int64Index\n\n expected = index_type([0, 1, 2, 3])\n result = Index([0., 1., 2., 3.], dtype=dtype)\n tm.assert_index_equal(result, expected)\n\n def test_constructor_int_dtype_nan(self):\n # see gh-15187\n data = [np.nan]\n msg = \"cannot convert\"\n\n with tm.assert_raises_regex(ValueError, msg):\n Index(data, dtype='int64')\n\n with tm.assert_raises_regex(ValueError, msg):\n Index(data, dtype='uint64')\n\n # This, however, should not break\n # because NaN is float.\n expected = Float64Index(data)\n result = Index(data, dtype='float')\n tm.assert_index_equal(result, expected)\n\n def test_index_ctor_infer_nan_nat(self):\n # GH 13467\n exp = pd.Float64Index([np.nan, np.nan])\n assert exp.dtype == np.float64\n tm.assert_index_equal(Index([np.nan, np.nan]), exp)\n tm.assert_index_equal(Index(np.array([np.nan, np.nan])), exp)\n\n exp = pd.DatetimeIndex([pd.NaT, pd.NaT])\n assert exp.dtype == 'datetime64[ns]'\n tm.assert_index_equal(Index([pd.NaT, pd.NaT]), exp)\n tm.assert_index_equal(Index(np.array([pd.NaT, pd.NaT])), exp)\n\n exp = pd.DatetimeIndex([pd.NaT, pd.NaT])\n assert exp.dtype == 'datetime64[ns]'\n\n for data in [[pd.NaT, np.nan], [np.nan, pd.NaT],\n [np.nan, np.datetime64('nat')],\n [np.datetime64('nat'), np.nan]]:\n tm.assert_index_equal(Index(data), exp)\n tm.assert_index_equal(Index(np.array(data, dtype=object)), exp)\n\n exp = pd.TimedeltaIndex([pd.NaT, pd.NaT])\n assert exp.dtype == 'timedelta64[ns]'\n\n for data in [[np.nan, np.timedelta64('nat')],\n [np.timedelta64('nat'), np.nan],\n [pd.NaT, np.timedelta64('nat')],\n [np.timedelta64('nat'), pd.NaT]]:\n tm.assert_index_equal(Index(data), exp)\n tm.assert_index_equal(Index(np.array(data, dtype=object)), exp)\n\n # mixed np.datetime64/timedelta64 nat results in object\n data = [np.datetime64('nat'), np.timedelta64('nat')]\n exp = pd.Index(data, dtype=object)\n tm.assert_index_equal(Index(data), exp)\n tm.assert_index_equal(Index(np.array(data, dtype=object)), exp)\n\n data = [np.timedelta64('nat'), np.datetime64('nat')]\n exp = pd.Index(data, dtype=object)\n tm.assert_index_equal(Index(data), exp)\n tm.assert_index_equal(Index(np.array(data, dtype=object)), exp)\n\n def test_index_ctor_infer_periodindex(self):\n xp = period_range('2012-1-1', freq='M', periods=3)\n rs = Index(xp)\n tm.assert_index_equal(rs, xp)\n assert isinstance(rs, PeriodIndex)\n\n def test_constructor_simple_new(self):\n idx = Index([1, 2, 3, 4, 5], name='int')\n result = idx._simple_new(idx, 'int')\n tm.assert_index_equal(result, idx)\n\n idx = Index([1.1, np.nan, 2.2, 3.0], name='float')\n result = idx._simple_new(idx, 'float')\n tm.assert_index_equal(result, idx)\n\n idx = Index(['A', 'B', 'C', np.nan], name='obj')\n result = idx._simple_new(idx, 'obj')\n tm.assert_index_equal(result, idx)\n\n def test_constructor_dtypes(self):\n\n for idx in [Index(np.array([1, 2, 3], dtype=int)),\n Index(np.array([1, 2, 3], dtype=int), dtype=int),\n Index([1, 2, 3], dtype=int)]:\n assert isinstance(idx, Int64Index)\n\n # These should coerce\n for idx in [Index(np.array([1., 2., 3.], dtype=float), dtype=int),\n Index([1., 2., 3.], dtype=int)]:\n assert isinstance(idx, Int64Index)\n\n for idx in [Index(np.array([1., 2., 3.], dtype=float)),\n Index(np.array([1, 2, 3], dtype=int), dtype=float),\n Index(np.array([1., 2., 3.], dtype=float), dtype=float),\n Index([1, 2, 3], dtype=float),\n Index([1., 2., 3.], dtype=float)]:\n assert isinstance(idx, Float64Index)\n\n for idx in [Index(np.array([True, False, True], dtype=bool)),\n Index([True, False, True]),\n Index(np.array([True, False, True], dtype=bool),\n dtype=bool),\n Index([True, False, True], dtype=bool)]:\n assert isinstance(idx, Index)\n assert idx.dtype == object\n\n for idx in [Index(np.array([1, 2, 3], dtype=int), dtype='category'),\n Index([1, 2, 3], dtype='category'),\n Index(np.array([np_datetime64_compat('2011-01-01'),\n np_datetime64_compat('2011-01-02')]),\n dtype='category'),\n Index([datetime(2011, 1, 1), datetime(2011, 1, 2)],\n dtype='category')]:\n assert isinstance(idx, CategoricalIndex)\n\n for idx in [Index(np.array([np_datetime64_compat('2011-01-01'),\n np_datetime64_compat('2011-01-02')])),\n Index([datetime(2011, 1, 1), datetime(2011, 1, 2)])]:\n assert isinstance(idx, DatetimeIndex)\n\n for idx in [Index(np.array([np_datetime64_compat('2011-01-01'),\n np_datetime64_compat('2011-01-02')]),\n dtype=object),\n Index([datetime(2011, 1, 1),\n datetime(2011, 1, 2)], dtype=object)]:\n assert not isinstance(idx, DatetimeIndex)\n assert isinstance(idx, Index)\n assert idx.dtype == object\n\n for idx in [Index(np.array([np.timedelta64(1, 'D'), np.timedelta64(\n 1, 'D')])), Index([timedelta(1), timedelta(1)])]:\n assert isinstance(idx, TimedeltaIndex)\n\n for idx in [Index(np.array([np.timedelta64(1, 'D'),\n np.timedelta64(1, 'D')]), dtype=object),\n Index([timedelta(1), timedelta(1)], dtype=object)]:\n assert not isinstance(idx, TimedeltaIndex)\n assert isinstance(idx, Index)\n assert idx.dtype == object\n\n def test_constructor_dtypes_datetime(self):\n\n for tz in [None, 'UTC', 'US/Eastern', 'Asia/Tokyo']:\n idx = pd.date_range('2011-01-01', periods=5, tz=tz)\n dtype = idx.dtype\n\n # pass values without timezone, as DatetimeIndex localizes it\n for values in [pd.date_range('2011-01-01', periods=5).values,\n pd.date_range('2011-01-01', periods=5).asi8]:\n\n for res in [pd.Index(values, tz=tz),\n pd.Index(values, dtype=dtype),\n pd.Index(list(values), tz=tz),\n pd.Index(list(values), dtype=dtype)]:\n tm.assert_index_equal(res, idx)\n\n # check compat with DatetimeIndex\n for res in [pd.DatetimeIndex(values, tz=tz),\n pd.DatetimeIndex(values, dtype=dtype),\n pd.DatetimeIndex(list(values), tz=tz),\n pd.DatetimeIndex(list(values), dtype=dtype)]:\n tm.assert_index_equal(res, idx)\n\n def test_constructor_dtypes_timedelta(self):\n\n idx = pd.timedelta_range('1 days', periods=5)\n dtype = idx.dtype\n\n for values in [idx.values, idx.asi8]:\n\n for res in [pd.Index(values, dtype=dtype),\n pd.Index(list(values), dtype=dtype)]:\n tm.assert_index_equal(res, idx)\n\n # check compat with TimedeltaIndex\n for res in [pd.TimedeltaIndex(values, dtype=dtype),\n pd.TimedeltaIndex(list(values), dtype=dtype)]:\n tm.assert_index_equal(res, idx)\n\n def test_constructor_empty(self):\n skip_index_keys = [\"repeats\", \"periodIndex\", \"rangeIndex\",\n \"tuples\"]\n for key, idx in self.generate_index_types(skip_index_keys):\n empty = idx.__class__([])\n assert isinstance(empty, idx.__class__)\n assert not len(empty)\n\n empty = PeriodIndex([], freq='B')\n assert isinstance(empty, PeriodIndex)\n assert not len(empty)\n\n empty = RangeIndex(step=1)\n assert isinstance(empty, pd.RangeIndex)\n assert not len(empty)\n\n empty = MultiIndex(levels=[[1, 2], ['blue', 'red']],\n labels=[[], []])\n assert isinstance(empty, MultiIndex)\n assert not len(empty)\n\n def test_view_with_args(self):\n\n restricted = ['unicodeIndex', 'strIndex', 'catIndex', 'boolIndex',\n 'empty']\n\n for i in restricted:\n ind = self.indices[i]\n\n # with arguments\n pytest.raises(TypeError, lambda: ind.view('i8'))\n\n # these are ok\n for i in list(set(self.indices.keys()) - set(restricted)):\n ind = self.indices[i]\n\n # with arguments\n ind.view('i8')\n\n def test_astype(self):\n casted = self.intIndex.astype('i8')\n\n # it works!\n casted.get_loc(5)\n\n # pass on name\n self.intIndex.name = 'foobar'\n casted = self.intIndex.astype('i8')\n assert casted.name == 'foobar'\n\n def test_equals_object(self):\n # same\n assert Index(['a', 'b', 'c']).equals(Index(['a', 'b', 'c']))\n\n # different length\n assert not Index(['a', 'b', 'c']).equals(Index(['a', 'b']))\n\n # same length, different values\n assert not Index(['a', 'b', 'c']).equals(Index(['a', 'b', 'd']))\n\n # Must also be an Index\n assert not Index(['a', 'b', 'c']).equals(['a', 'b', 'c'])\n\n def test_insert(self):\n\n # GH 7256\n # validate neg/pos inserts\n result = Index(['b', 'c', 'd'])\n\n # test 0th element\n tm.assert_index_equal(Index(['a', 'b', 'c', 'd']),\n result.insert(0, 'a'))\n\n # test Nth element that follows Python list behavior\n tm.assert_index_equal(Index(['b', 'c', 'e', 'd']),\n result.insert(-1, 'e'))\n\n # test loc +/- neq (0, -1)\n tm.assert_index_equal(result.insert(1, 'z'), result.insert(-2, 'z'))\n\n # test empty\n null_index = Index([])\n tm.assert_index_equal(Index(['a']), null_index.insert(0, 'a'))\n\n # GH 18295 (test missing)\n expected = Index(['a', np.nan, 'b', 'c'])\n for na in (np.nan, pd.NaT, None):\n result = Index(list('abc')).insert(1, na)\n tm.assert_index_equal(result, expected)\n\n def test_delete(self):\n idx = Index(['a', 'b', 'c', 'd'], name='idx')\n\n expected = Index(['b', 'c', 'd'], name='idx')\n result = idx.delete(0)\n tm.assert_index_equal(result, expected)\n assert result.name == expected.name\n\n expected = Index(['a', 'b', 'c'], name='idx')\n result = idx.delete(-1)\n tm.assert_index_equal(result, expected)\n assert result.name == expected.name\n\n with pytest.raises((IndexError, ValueError)):\n # either depending on numpy version\n result = idx.delete(5)\n\n def test_identical(self):\n\n # index\n i1 = Index(['a', 'b', 'c'])\n i2 = Index(['a', 'b', 'c'])\n\n assert i1.identical(i2)\n\n i1 = i1.rename('foo')\n assert i1.equals(i2)\n assert not i1.identical(i2)\n\n i2 = i2.rename('foo')\n assert i1.identical(i2)\n\n i3 = Index([('a', 'a'), ('a', 'b'), ('b', 'a')])\n i4 = Index([('a', 'a'), ('a', 'b'), ('b', 'a')], tupleize_cols=False)\n assert not i3.identical(i4)\n\n def test_is_(self):\n ind = Index(range(10))\n assert ind.is_(ind)\n assert ind.is_(ind.view().view().view().view())\n assert not ind.is_(Index(range(10)))\n assert not ind.is_(ind.copy())\n assert not ind.is_(ind.copy(deep=False))\n assert not ind.is_(ind[:])\n assert not ind.is_(np.array(range(10)))\n\n # quasi-implementation dependent\n assert ind.is_(ind.view())\n ind2 = ind.view()\n ind2.name = 'bob'\n assert ind.is_(ind2)\n assert ind2.is_(ind)\n # doesn't matter if Indices are *actually* views of underlying data,\n assert not ind.is_(Index(ind.values))\n arr = np.array(range(1, 11))\n ind1 = Index(arr, copy=False)\n ind2 = Index(arr, copy=False)\n assert not ind1.is_(ind2)\n\n def test_asof(self):\n d = self.dateIndex[0]\n assert self.dateIndex.asof(d) == d\n assert isna(self.dateIndex.asof(d - timedelta(1)))\n\n d = self.dateIndex[-1]\n assert self.dateIndex.asof(d + timedelta(1)) == d\n\n d = self.dateIndex[0].to_pydatetime()\n assert isinstance(self.dateIndex.asof(d), Timestamp)\n\n def test_asof_datetime_partial(self):\n idx = pd.date_range('2010-01-01', periods=2, freq='m')\n expected = Timestamp('2010-02-28')\n result = idx.asof('2010-02')\n assert result == expected\n assert not isinstance(result, Index)\n\n def test_nanosecond_index_access(self):\n s = Series([Timestamp('20130101')]).values.view('i8')[0]\n r = DatetimeIndex([s + 50 + i for i in range(100)])\n x = Series(np.random.randn(100), index=r)\n\n first_value = x.asof(x.index[0])\n\n # this does not yet work, as parsing strings is done via dateutil\n # assert first_value == x['2013-01-01 00:00:00.000000050+0000']\n\n exp_ts = np_datetime64_compat('2013-01-01 00:00:00.000000050+0000',\n 'ns')\n assert first_value == x[Timestamp(exp_ts)]\n\n def test_comparators(self):\n index = self.dateIndex\n element = index[len(index) // 2]\n element = _to_m8(element)\n\n arr = np.array(index)\n\n def _check(op):\n arr_result = op(arr, element)\n index_result = op(index, element)\n\n assert isinstance(index_result, np.ndarray)\n tm.assert_numpy_array_equal(arr_result, index_result)\n\n _check(operator.eq)\n _check(operator.ne)\n _check(operator.gt)\n _check(operator.lt)\n _check(operator.ge)\n _check(operator.le)\n\n def test_booleanindex(self):\n boolIdx = np.repeat(True, len(self.strIndex)).astype(bool)\n boolIdx[5:30:2] = False\n\n subIndex = self.strIndex[boolIdx]\n\n for i, val in enumerate(subIndex):\n assert subIndex.get_loc(val) == i\n\n subIndex = self.strIndex[list(boolIdx)]\n for i, val in enumerate(subIndex):\n assert subIndex.get_loc(val) == i\n\n def test_fancy(self):\n sl = self.strIndex[[1, 2, 3]]\n for i in sl:\n assert i == sl[sl.get_loc(i)]\n\n def test_empty_fancy(self):\n empty_farr = np.array([], dtype=np.float_)\n empty_iarr = np.array([], dtype=np.int_)\n empty_barr = np.array([], dtype=np.bool_)\n\n # pd.DatetimeIndex is excluded, because it overrides getitem and should\n # be tested separately.\n for idx in [self.strIndex, self.intIndex, self.floatIndex]:\n empty_idx = idx.__class__([])\n\n assert idx[[]].identical(empty_idx)\n assert idx[empty_iarr].identical(empty_idx)\n assert idx[empty_barr].identical(empty_idx)\n\n # np.ndarray only accepts ndarray of int & bool dtypes, so should\n # Index.\n pytest.raises(IndexError, idx.__getitem__, empty_farr)\n\n def test_getitem_error(self, indices):\n\n with pytest.raises(IndexError):\n indices[101]\n\n with pytest.raises(IndexError):\n indices['no_int']\n\n def test_intersection(self):\n first = self.strIndex[:20]\n second = self.strIndex[:10]\n intersect = first.intersection(second)\n assert tm.equalContents(intersect, second)\n\n # Corner cases\n inter = first.intersection(first)\n assert inter is first\n\n idx1 = Index([1, 2, 3, 4, 5], name='idx')\n # if target has the same name, it is preserved\n idx2 = Index([3, 4, 5, 6, 7], name='idx')\n expected2 = Index([3, 4, 5], name='idx')\n result2 = idx1.intersection(idx2)\n tm.assert_index_equal(result2, expected2)\n assert result2.name == expected2.name\n\n # if target name is different, it will be reset\n idx3 = Index([3, 4, 5, 6, 7], name='other')\n expected3 = Index([3, 4, 5], name=None)\n result3 = idx1.intersection(idx3)\n tm.assert_index_equal(result3, expected3)\n assert result3.name == expected3.name\n\n # non monotonic\n idx1 = Index([5, 3, 2, 4, 1], name='idx')\n idx2 = Index([4, 7, 6, 5, 3], name='idx')\n expected = Index([5, 3, 4], name='idx')\n result = idx1.intersection(idx2)\n tm.assert_index_equal(result, expected)\n\n idx2 = Index([4, 7, 6, 5, 3], name='other')\n expected = Index([5, 3, 4], name=None)\n result = idx1.intersection(idx2)\n tm.assert_index_equal(result, expected)\n\n # non-monotonic non-unique\n idx1 = Index(['A', 'B', 'A', 'C'])\n idx2 = Index(['B', 'D'])\n expected = Index(['B'], dtype='object')\n result = idx1.intersection(idx2)\n tm.assert_index_equal(result, expected)\n\n idx2 = Index(['B', 'D', 'A'])\n expected = Index(['A', 'B', 'A'], dtype='object')\n result = idx1.intersection(idx2)\n tm.assert_index_equal(result, expected)\n\n # preserve names\n first = self.strIndex[5:20]\n second = self.strIndex[:10]\n first.name = 'A'\n second.name = 'A'\n intersect = first.intersection(second)\n assert intersect.name == 'A'\n\n second.name = 'B'\n intersect = first.intersection(second)\n assert intersect.name is None\n\n first.name = None\n second.name = 'B'\n intersect = first.intersection(second)\n assert intersect.name is None\n\n def test_intersect_str_dates(self):\n dt_dates = [datetime(2012, 2, 9), datetime(2012, 2, 22)]\n\n i1 = Index(dt_dates, dtype=object)\n i2 = Index(['aa'], dtype=object)\n res = i2.intersection(i1)\n\n assert len(res) == 0\n\n def test_union(self):\n first = self.strIndex[5:20]\n second = self.strIndex[:10]\n everything = self.strIndex[:20]\n union = first.union(second)\n assert tm.equalContents(union, everything)\n\n # GH 10149\n cases = [klass(second.values) for klass in [np.array, Series, list]]\n for case in cases:\n result = first.union(case)\n assert tm.equalContents(result, everything)\n\n # Corner cases\n union = first.union(first)\n assert union is first\n\n union = first.union([])\n assert union is first\n\n union = Index([]).union(first)\n assert union is first\n\n # preserve names\n first = Index(list('ab'), name='A')\n second = Index(list('ab'), name='B')\n union = first.union(second)\n expected = Index(list('ab'), name=None)\n tm.assert_index_equal(union, expected)\n\n first = Index(list('ab'), name='A')\n second = Index([], name='B')\n union = first.union(second)\n expected = Index(list('ab'), name=None)\n tm.assert_index_equal(union, expected)\n\n first = Index([], name='A')\n second = Index(list('ab'), name='B')\n union = first.union(second)\n expected = Index(list('ab'), name=None)\n tm.assert_index_equal(union, expected)\n\n first = Index(list('ab'))\n second = Index(list('ab'), name='B')\n union = first.union(second)\n expected = Index(list('ab'), name='B')\n tm.assert_index_equal(union, expected)\n\n first = Index([])\n second = Index(list('ab'), name='B')\n union = first.union(second)\n expected = Index(list('ab'), name='B')\n tm.assert_index_equal(union, expected)\n\n first = Index(list('ab'))\n second = Index([], name='B')\n union = first.union(second)\n expected = Index(list('ab'), name='B')\n tm.assert_index_equal(union, expected)\n\n first = Index(list('ab'), name='A')\n second = Index(list('ab'))\n union = first.union(second)\n expected = Index(list('ab'), name='A')\n tm.assert_index_equal(union, expected)\n\n first = Index(list('ab'), name='A')\n second = Index([])\n union = first.union(second)\n expected = Index(list('ab'), name='A')\n tm.assert_index_equal(union, expected)\n\n first = Index([], name='A')\n second = Index(list('ab'))\n union = first.union(second)\n expected = Index(list('ab'), name='A')\n tm.assert_index_equal(union, expected)\n\n with tm.assert_produces_warning(RuntimeWarning):\n firstCat = self.strIndex.union(self.dateIndex)\n secondCat = self.strIndex.union(self.strIndex)\n\n if self.dateIndex.dtype == np.object_:\n appended = np.append(self.strIndex, self.dateIndex)\n else:\n appended = np.append(self.strIndex, self.dateIndex.astype('O'))\n\n assert tm.equalContents(firstCat, appended)\n assert tm.equalContents(secondCat, self.strIndex)\n tm.assert_contains_all(self.strIndex, firstCat)\n tm.assert_contains_all(self.strIndex, secondCat)\n tm.assert_contains_all(self.dateIndex, firstCat)\n\n def test_add(self):\n idx = self.strIndex\n expected = Index(self.strIndex.values * 2)\n tm.assert_index_equal(idx + idx, expected)\n tm.assert_index_equal(idx + idx.tolist(), expected)\n tm.assert_index_equal(idx.tolist() + idx, expected)\n\n # test add and radd\n idx = Index(list('abc'))\n expected = Index(['a1', 'b1', 'c1'])\n tm.assert_index_equal(idx + '1', expected)\n expected = Index(['1a', '1b', '1c'])\n tm.assert_index_equal('1' + idx, expected)\n\n def test_sub(self):\n idx = self.strIndex\n pytest.raises(TypeError, lambda: idx - 'a')\n pytest.raises(TypeError, lambda: idx - idx)\n pytest.raises(TypeError, lambda: idx - idx.tolist())\n pytest.raises(TypeError, lambda: idx.tolist() - idx)\n\n def test_map_identity_mapping(self):\n # GH 12766\n for name, cur_index in self.indices.items():\n tm.assert_index_equal(cur_index, cur_index.map(lambda x: x))\n\n def test_map_with_tuples(self):\n # GH 12766\n\n # Test that returning a single tuple from an Index\n # returns an Index.\n idx = tm.makeIntIndex(3)\n result = tm.makeIntIndex(3).map(lambda x: (x,))\n expected = Index([(i,) for i in idx])\n tm.assert_index_equal(result, expected)\n\n # Test that returning a tuple from a map of a single index\n # returns a MultiIndex object.\n result = idx.map(lambda x: (x, x == 1))\n expected = MultiIndex.from_tuples([(i, i == 1) for i in idx])\n tm.assert_index_equal(result, expected)\n\n # Test that returning a single object from a MultiIndex\n # returns an Index.\n first_level = ['foo', 'bar', 'baz']\n multi_index = MultiIndex.from_tuples(lzip(first_level, [1, 2, 3]))\n reduced_index = multi_index.map(lambda x: x[0])\n tm.assert_index_equal(reduced_index, Index(first_level))\n\n def test_map_tseries_indices_return_index(self):\n date_index = tm.makeDateIndex(10)\n exp = Index([1] * 10)\n tm.assert_index_equal(exp, date_index.map(lambda x: 1))\n\n period_index = tm.makePeriodIndex(10)\n tm.assert_index_equal(exp, period_index.map(lambda x: 1))\n\n tdelta_index = tm.makeTimedeltaIndex(10)\n tm.assert_index_equal(exp, tdelta_index.map(lambda x: 1))\n\n date_index = tm.makeDateIndex(24, freq='h', name='hourly')\n exp = Index(range(24), name='hourly')\n tm.assert_index_equal(exp, date_index.map(lambda x: x.hour))\n\n @pytest.mark.parametrize(\n \"mapper\",\n [\n lambda values, index: {i: e for e, i in zip(values, index)},\n lambda values, index: pd.Series(values, index)])\n def test_map_dictlike(self, mapper):\n # GH 12756\n expected = Index(['foo', 'bar', 'baz'])\n idx = tm.makeIntIndex(3)\n result = idx.map(mapper(expected.values, idx))\n tm.assert_index_equal(result, expected)\n\n for name in self.indices.keys():\n if name == 'catIndex':\n # Tested in test_categorical\n continue\n elif name == 'repeats':\n # Cannot map duplicated index\n continue\n\n index = self.indices[name]\n expected = Index(np.arange(len(index), 0, -1))\n\n # to match proper result coercion for uints\n if name == 'empty':\n expected = Index([])\n\n result = index.map(mapper(expected, index))\n tm.assert_index_equal(result, expected)\n\n def test_map_with_non_function_missing_values(self):\n # GH 12756\n expected = Index([2., np.nan, 'foo'])\n input = Index([2, 1, 0])\n\n mapper = Series(['foo', 2., 'baz'], index=[0, 2, -1])\n tm.assert_index_equal(expected, input.map(mapper))\n\n mapper = {0: 'foo', 2: 2.0, -1: 'baz'}\n tm.assert_index_equal(expected, input.map(mapper))\n\n def test_map_na_exclusion(self):\n idx = Index([1.5, np.nan, 3, np.nan, 5])\n\n result = idx.map(lambda x: x * 2, na_action='ignore')\n exp = idx * 2\n tm.assert_index_equal(result, exp)\n\n def test_map_defaultdict(self):\n idx = Index([1, 2, 3])\n default_dict = defaultdict(lambda: 'blank')\n default_dict[1] = 'stuff'\n result = idx.map(default_dict)\n expected = Index(['stuff', 'blank', 'blank'])\n tm.assert_index_equal(result, expected)\n\n def test_append_multiple(self):\n index = Index(['a', 'b', 'c', 'd', 'e', 'f'])\n\n foos = [index[:2], index[2:4], index[4:]]\n result = foos[0].append(foos[1:])\n tm.assert_index_equal(result, index)\n\n # empty\n result = index.append([])\n tm.assert_index_equal(result, index)\n\n def test_append_empty_preserve_name(self):\n left = Index([], name='foo')\n right = Index([1, 2, 3], name='foo')\n\n result = left.append(right)\n assert result.name == 'foo'\n\n left = Index([], name='foo')\n right = Index([1, 2, 3], name='bar')\n\n result = left.append(right)\n assert result.name is None\n\n def test_add_string(self):\n # from bug report\n index = Index(['a', 'b', 'c'])\n index2 = index + 'foo'\n\n assert 'a' not in index2\n assert 'afoo' in index2\n\n def test_iadd_string(self):\n index = pd.Index(['a', 'b', 'c'])\n # doesn't fail test unless there is a check before `+=`\n assert 'a' in index\n\n index += '_x'\n assert 'a_x' in index\n\n def test_difference(self):\n\n first = self.strIndex[5:20]\n second = self.strIndex[:10]\n answer = self.strIndex[10:20]\n first.name = 'name'\n # different names\n result = first.difference(second)\n\n assert tm.equalContents(result, answer)\n assert result.name is None\n\n # same names\n second.name = 'name'\n result = first.difference(second)\n assert result.name == 'name'\n\n # with empty\n result = first.difference([])\n assert tm.equalContents(result, first)\n assert result.name == first.name\n\n # with everything\n result = first.difference(first)\n assert len(result) == 0\n assert result.name == first.name\n\n def test_symmetric_difference(self):\n # smoke\n idx1 = Index([1, 2, 3, 4], name='idx1')\n idx2 = Index([2, 3, 4, 5])\n result = idx1.symmetric_difference(idx2)\n expected = Index([1, 5])\n assert tm.equalContents(result, expected)\n assert result.name is None\n\n # __xor__ syntax\n expected = idx1 ^ idx2\n assert tm.equalContents(result, expected)\n assert result.name is None\n\n # multiIndex\n idx1 = MultiIndex.from_tuples(self.tuples)\n idx2 = MultiIndex.from_tuples([('foo', 1), ('bar', 3)])\n result = idx1.symmetric_difference(idx2)\n expected = MultiIndex.from_tuples([('bar', 2), ('baz', 3), ('bar', 3)])\n assert tm.equalContents(result, expected)\n\n # nans:\n # GH 13514 change: {nan} - {nan} == {}\n # (GH 6444, sorting of nans, is no longer an issue)\n idx1 = Index([1, np.nan, 2, 3])\n idx2 = Index([0, 1, np.nan])\n idx3 = Index([0, 1])\n\n result = idx1.symmetric_difference(idx2)\n expected = Index([0.0, 2.0, 3.0])\n tm.assert_index_equal(result, expected)\n\n result = idx1.symmetric_difference(idx3)\n expected = Index([0.0, 2.0, 3.0, np.nan])\n tm.assert_index_equal(result, expected)\n\n # other not an Index:\n idx1 = Index([1, 2, 3, 4], name='idx1')\n idx2 = np.array([2, 3, 4, 5])\n expected = Index([1, 5])\n result = idx1.symmetric_difference(idx2)\n assert tm.equalContents(result, expected)\n assert result.name == 'idx1'\n\n result = idx1.symmetric_difference(idx2, result_name='new_name')\n assert tm.equalContents(result, expected)\n assert result.name == 'new_name'\n\n def test_difference_type(self):\n # GH 20040\n # If taking difference of a set and itself, it\n # needs to preserve the type of the index\n skip_index_keys = ['repeats']\n for key, idx in self.generate_index_types(skip_index_keys):\n result = idx.difference(idx)\n expected = idx.drop(idx)\n tm.assert_index_equal(result, expected)\n\n def test_intersection_difference(self):\n # GH 20040\n # Test that the intersection of an index with an\n # empty index produces the same index as the difference\n # of an index with itself. Test for all types\n skip_index_keys = ['repeats']\n for key, idx in self.generate_index_types(skip_index_keys):\n inter = idx.intersection(idx.drop(idx))\n diff = idx.difference(idx)\n tm.assert_index_equal(inter, diff)\n\n def test_is_numeric(self):\n assert not self.dateIndex.is_numeric()\n assert not self.strIndex.is_numeric()\n assert self.intIndex.is_numeric()\n assert self.floatIndex.is_numeric()\n assert not self.catIndex.is_numeric()\n\n def test_is_object(self):\n assert self.strIndex.is_object()\n assert self.boolIndex.is_object()\n assert not self.catIndex.is_object()\n assert not self.intIndex.is_object()\n assert not self.dateIndex.is_object()\n assert not self.floatIndex.is_object()\n\n def test_is_all_dates(self):\n assert self.dateIndex.is_all_dates\n assert not self.strIndex.is_all_dates\n assert not self.intIndex.is_all_dates\n\n def test_summary(self):\n self._check_method_works(Index._summary)\n # GH3869\n ind = Index(['{other}%s', \"~:{range}:0\"], name='A')\n result = ind._summary()\n # shouldn't be formatted accidentally.\n assert '~:{range}:0' in result\n assert '{other}%s' in result\n\n # GH18217\n def test_summary_deprecated(self):\n ind = Index(['{other}%s', \"~:{range}:0\"], name='A')\n\n with tm.assert_produces_warning(FutureWarning):\n ind.summary()\n\n def test_format(self):\n self._check_method_works(Index.format)\n\n # GH 14626\n # windows has different precision on datetime.datetime.now (it doesn't\n # include us since the default for Timestamp shows these but Index\n # formatting does not we are skipping)\n now = datetime.now()\n if not str(now).endswith(\"000\"):\n index = Index([now])\n formatted = index.format()\n expected = [str(index[0])]\n assert formatted == expected\n\n # 2845\n index = Index([1, 2.0 + 3.0j, np.nan])\n formatted = index.format()\n expected = [str(index[0]), str(index[1]), u('NaN')]\n assert formatted == expected\n\n # is this really allowed?\n index = Index([1, 2.0 + 3.0j, None])\n formatted = index.format()\n expected = [str(index[0]), str(index[1]), u('NaN')]\n assert formatted == expected\n\n self.strIndex[:0].format()\n\n def test_format_with_name_time_info(self):\n # bug I fixed 12/20/2011\n inc = timedelta(hours=4)\n dates = Index([dt + inc for dt in self.dateIndex], name='something')\n\n formatted = dates.format(name=True)\n assert formatted[0] == 'something'\n\n def test_format_datetime_with_time(self):\n t = Index([datetime(2012, 2, 7), datetime(2012, 2, 7, 23)])\n\n result = t.format()\n expected = ['2012-02-07 00:00:00', '2012-02-07 23:00:00']\n assert len(result) == 2\n assert result == expected\n\n def test_format_none(self):\n values = ['a', 'b', 'c', None]\n\n idx = Index(values)\n idx.format()\n assert idx[3] is None\n\n def test_logical_compat(self):\n idx = self.create_index()\n assert idx.all() == idx.values.all()\n assert idx.any() == idx.values.any()\n\n def _check_method_works(self, method):\n method(self.empty)\n method(self.dateIndex)\n method(self.unicodeIndex)\n method(self.strIndex)\n method(self.intIndex)\n method(self.tuples)\n method(self.catIndex)\n\n def test_get_indexer(self):\n idx1 = Index([1, 2, 3, 4, 5])\n idx2 = Index([2, 4, 6])\n\n r1 = idx1.get_indexer(idx2)\n assert_almost_equal(r1, np.array([1, 3, -1], dtype=np.intp))\n\n r1 = idx2.get_indexer(idx1, method='pad')\n e1 = np.array([-1, 0, 0, 1, 1], dtype=np.intp)\n assert_almost_equal(r1, e1)\n\n r2 = idx2.get_indexer(idx1[::-1], method='pad')\n assert_almost_equal(r2, e1[::-1])\n\n rffill1 = idx2.get_indexer(idx1, method='ffill')\n assert_almost_equal(r1, rffill1)\n\n r1 = idx2.get_indexer(idx1, method='backfill')\n e1 = np.array([0, 0, 1, 1, 2], dtype=np.intp)\n assert_almost_equal(r1, e1)\n\n rbfill1 = idx2.get_indexer(idx1, method='bfill')\n assert_almost_equal(r1, rbfill1)\n\n r2 = idx2.get_indexer(idx1[::-1], method='backfill')\n assert_almost_equal(r2, e1[::-1])\n\n def test_get_indexer_invalid(self):\n # GH10411\n idx = Index(np.arange(10))\n\n with tm.assert_raises_regex(ValueError, 'tolerance argument'):\n idx.get_indexer([1, 0], tolerance=1)\n\n with tm.assert_raises_regex(ValueError, 'limit argument'):\n idx.get_indexer([1, 0], limit=1)\n\n @pytest.mark.parametrize(\n 'method, tolerance, indexer, expected',\n [\n ('pad', None, [0, 5, 9], [0, 5, 9]),\n ('backfill', None, [0, 5, 9], [0, 5, 9]),\n ('nearest', None, [0, 5, 9], [0, 5, 9]),\n ('pad', 0, [0, 5, 9], [0, 5, 9]),\n ('backfill', 0, [0, 5, 9], [0, 5, 9]),\n ('nearest', 0, [0, 5, 9], [0, 5, 9]),\n\n ('pad', None, [0.2, 1.8, 8.5], [0, 1, 8]),\n ('backfill', None, [0.2, 1.8, 8.5], [1, 2, 9]),\n ('nearest', None, [0.2, 1.8, 8.5], [0, 2, 9]),\n ('pad', 1, [0.2, 1.8, 8.5], [0, 1, 8]),\n ('backfill', 1, [0.2, 1.8, 8.5], [1, 2, 9]),\n ('nearest', 1, [0.2, 1.8, 8.5], [0, 2, 9]),\n\n ('pad', 0.2, [0.2, 1.8, 8.5], [0, -1, -1]),\n ('backfill', 0.2, [0.2, 1.8, 8.5], [-1, 2, -1]),\n ('nearest', 0.2, [0.2, 1.8, 8.5], [0, 2, -1])])\n def test_get_indexer_nearest(self, method, tolerance, indexer, expected):\n idx = Index(np.arange(10))\n\n actual = idx.get_indexer(indexer, method=method, tolerance=tolerance)\n tm.assert_numpy_array_equal(actual, np.array(expected,\n dtype=np.intp))\n\n @pytest.mark.parametrize('listtype', [list, tuple, Series, np.array])\n @pytest.mark.parametrize(\n 'tolerance, expected',\n list(zip([[0.3, 0.3, 0.1], [0.2, 0.1, 0.1],\n [0.1, 0.5, 0.5]],\n [[0, 2, -1], [0, -1, -1],\n [-1, 2, 9]])))\n def test_get_indexer_nearest_listlike_tolerance(self, tolerance,\n expected, listtype):\n idx = Index(np.arange(10))\n\n actual = idx.get_indexer([0.2, 1.8, 8.5], method='nearest',\n tolerance=listtype(tolerance))\n tm.assert_numpy_array_equal(actual, np.array(expected,\n dtype=np.intp))\n\n def test_get_indexer_nearest_error(self):\n idx = Index(np.arange(10))\n with tm.assert_raises_regex(ValueError, 'limit argument'):\n idx.get_indexer([1, 0], method='nearest', limit=1)\n\n with pytest.raises(ValueError, match='tolerance size must match'):\n idx.get_indexer([1, 0], method='nearest',\n tolerance=[1, 2, 3])\n\n def test_get_indexer_nearest_decreasing(self):\n idx = Index(np.arange(10))[::-1]\n\n all_methods = ['pad', 'backfill', 'nearest']\n for method in all_methods:\n actual = idx.get_indexer([0, 5, 9], method=method)\n tm.assert_numpy_array_equal(actual, np.array([9, 4, 0],\n dtype=np.intp))\n\n for method, expected in zip(all_methods, [[8, 7, 0], [9, 8, 1],\n [9, 7, 0]]):\n actual = idx.get_indexer([0.2, 1.8, 8.5], method=method)\n tm.assert_numpy_array_equal(actual, np.array(expected,\n dtype=np.intp))\n\n def test_get_indexer_strings(self):\n idx = pd.Index(['b', 'c'])\n\n actual = idx.get_indexer(['a', 'b', 'c', 'd'], method='pad')\n expected = np.array([-1, 0, 1, 1], dtype=np.intp)\n tm.assert_numpy_array_equal(actual, expected)\n\n actual = idx.get_indexer(['a', 'b', 'c', 'd'], method='backfill')\n expected = np.array([0, 0, 1, -1], dtype=np.intp)\n tm.assert_numpy_array_equal(actual, expected)\n\n with pytest.raises(TypeError):\n idx.get_indexer(['a', 'b', 'c', 'd'], method='nearest')\n\n with pytest.raises(TypeError):\n idx.get_indexer(['a', 'b', 'c', 'd'], method='pad', tolerance=2)\n\n with pytest.raises(TypeError):\n idx.get_indexer(['a', 'b', 'c', 'd'], method='pad',\n tolerance=[2, 2, 2, 2])\n\n def test_get_indexer_numeric_index_boolean_target(self):\n # GH 16877\n numeric_idx = pd.Index(range(4))\n result = numeric_idx.get_indexer([True, False, True])\n expected = np.array([-1, -1, -1], dtype=np.intp)\n tm.assert_numpy_array_equal(result, expected)\n\n def test_get_loc(self):\n idx = pd.Index([0, 1, 2])\n all_methods = [None, 'pad', 'backfill', 'nearest']\n for method in all_methods:\n assert idx.get_loc(1, method=method) == 1\n if method is not None:\n assert idx.get_loc(1, method=method, tolerance=0) == 1\n with pytest.raises(TypeError):\n idx.get_loc([1, 2], method=method)\n\n for method, loc in [('pad', 1), ('backfill', 2), ('nearest', 1)]:\n assert idx.get_loc(1.1, method) == loc\n\n for method, loc in [('pad', 1), ('backfill', 2), ('nearest', 1)]:\n assert idx.get_loc(1.1, method, tolerance=1) == loc\n\n for method in ['pad', 'backfill', 'nearest']:\n with pytest.raises(KeyError):\n idx.get_loc(1.1, method, tolerance=0.05)\n\n with tm.assert_raises_regex(ValueError, 'must be numeric'):\n idx.get_loc(1.1, 'nearest', tolerance='invalid')\n with tm.assert_raises_regex(ValueError, 'tolerance .* valid if'):\n idx.get_loc(1.1, tolerance=1)\n with pytest.raises(ValueError, match='tolerance size must match'):\n idx.get_loc(1.1, 'nearest', tolerance=[1, 1])\n\n idx = pd.Index(['a', 'c'])\n with pytest.raises(TypeError):\n idx.get_loc('a', method='nearest')\n with pytest.raises(TypeError):\n idx.get_loc('a', method='pad', tolerance='invalid')\n\n def test_slice_locs(self):\n for dtype in [int, float]:\n idx = Index(np.array([0, 1, 2, 5, 6, 7, 9, 10], dtype=dtype))\n n = len(idx)\n\n assert idx.slice_locs(start=2) == (2, n)\n assert idx.slice_locs(start=3) == (3, n)\n assert idx.slice_locs(3, 8) == (3, 6)\n assert idx.slice_locs(5, 10) == (3, n)\n assert idx.slice_locs(end=8) == (0, 6)\n assert idx.slice_locs(end=9) == (0, 7)\n\n # reversed\n idx2 = idx[::-1]\n assert idx2.slice_locs(8, 2) == (2, 6)\n assert idx2.slice_locs(7, 3) == (2, 5)\n\n # float slicing\n idx = Index(np.array([0, 1, 2, 5, 6, 7, 9, 10], dtype=float))\n n = len(idx)\n assert idx.slice_locs(5.0, 10.0) == (3, n)\n assert idx.slice_locs(4.5, 10.5) == (3, 8)\n idx2 = idx[::-1]\n assert idx2.slice_locs(8.5, 1.5) == (2, 6)\n assert idx2.slice_locs(10.5, -1) == (0, n)\n\n # int slicing with floats\n # GH 4892, these are all TypeErrors\n idx = Index(np.array([0, 1, 2, 5, 6, 7, 9, 10], dtype=int))\n pytest.raises(TypeError,\n lambda: idx.slice_locs(5.0, 10.0), (3, n))\n pytest.raises(TypeError,\n lambda: idx.slice_locs(4.5, 10.5), (3, 8))\n idx2 = idx[::-1]\n pytest.raises(TypeError,\n lambda: idx2.slice_locs(8.5, 1.5), (2, 6))\n pytest.raises(TypeError,\n lambda: idx2.slice_locs(10.5, -1), (0, n))\n\n def test_slice_locs_dup(self):\n idx = Index(['a', 'a', 'b', 'c', 'd', 'd'])\n assert idx.slice_locs('a', 'd') == (0, 6)\n assert idx.slice_locs(end='d') == (0, 6)\n assert idx.slice_locs('a', 'c') == (0, 4)\n assert idx.slice_locs('b', 'd') == (2, 6)\n\n idx2 = idx[::-1]\n assert idx2.slice_locs('d', 'a') == (0, 6)\n assert idx2.slice_locs(end='a') == (0, 6)\n assert idx2.slice_locs('d', 'b') == (0, 4)\n assert idx2.slice_locs('c', 'a') == (2, 6)\n\n for dtype in [int, float]:\n idx = Index(np.array([10, 12, 12, 14], dtype=dtype))\n assert idx.slice_locs(12, 12) == (1, 3)\n assert idx.slice_locs(11, 13) == (1, 3)\n\n idx2 = idx[::-1]\n assert idx2.slice_locs(12, 12) == (1, 3)\n assert idx2.slice_locs(13, 11) == (1, 3)\n\n def test_slice_locs_na(self):\n idx = Index([np.nan, 1, 2])\n pytest.raises(KeyError, idx.slice_locs, start=1.5)\n pytest.raises(KeyError, idx.slice_locs, end=1.5)\n assert idx.slice_locs(1) == (1, 3)\n assert idx.slice_locs(np.nan) == (0, 3)\n\n idx = Index([0, np.nan, np.nan, 1, 2])\n assert idx.slice_locs(np.nan) == (1, 5)\n\n def test_slice_locs_negative_step(self):\n idx = Index(list('bcdxy'))\n\n SLC = pd.IndexSlice\n\n def check_slice(in_slice, expected):\n s_start, s_stop = idx.slice_locs(in_slice.start, in_slice.stop,\n in_slice.step)\n result = idx[s_start:s_stop:in_slice.step]\n expected = pd.Index(list(expected))\n tm.assert_index_equal(result, expected)\n\n for in_slice, expected in [\n (SLC[::-1], 'yxdcb'), (SLC['b':'y':-1], ''),\n (SLC['b'::-1], 'b'), (SLC[:'b':-1], 'yxdcb'),\n (SLC[:'y':-1], 'y'), (SLC['y'::-1], 'yxdcb'),\n (SLC['y'::-4], 'yb'),\n # absent labels\n (SLC[:'a':-1], 'yxdcb'), (SLC[:'a':-2], 'ydb'),\n (SLC['z'::-1], 'yxdcb'), (SLC['z'::-3], 'yc'),\n (SLC['m'::-1], 'dcb'), (SLC[:'m':-1], 'yx'),\n (SLC['a':'a':-1], ''), (SLC['z':'z':-1], ''),\n (SLC['m':'m':-1], '')\n ]:\n check_slice(in_slice, expected)\n\n def test_drop(self):\n n = len(self.strIndex)\n\n drop = self.strIndex[lrange(5, 10)]\n dropped = self.strIndex.drop(drop)\n expected = self.strIndex[lrange(5) + lrange(10, n)]\n tm.assert_index_equal(dropped, expected)\n\n pytest.raises(KeyError, self.strIndex.drop, ['foo', 'bar'])\n pytest.raises(KeyError, self.strIndex.drop, ['1', 'bar'])\n\n # errors='ignore'\n mixed = drop.tolist() + ['foo']\n dropped = self.strIndex.drop(mixed, errors='ignore')\n expected = self.strIndex[lrange(5) + lrange(10, n)]\n tm.assert_index_equal(dropped, expected)\n\n dropped = self.strIndex.drop(['foo', 'bar'], errors='ignore')\n expected = self.strIndex[lrange(n)]\n tm.assert_index_equal(dropped, expected)\n\n dropped = self.strIndex.drop(self.strIndex[0])\n expected = self.strIndex[1:]\n tm.assert_index_equal(dropped, expected)\n\n ser = Index([1, 2, 3])\n dropped = ser.drop(1)\n expected = Index([2, 3])\n tm.assert_index_equal(dropped, expected)\n\n # errors='ignore'\n pytest.raises(KeyError, ser.drop, [3, 4])\n\n dropped = ser.drop(4, errors='ignore')\n expected = Index([1, 2, 3])\n tm.assert_index_equal(dropped, expected)\n\n dropped = ser.drop([3, 4, 5], errors='ignore')\n expected = Index([1, 2])\n tm.assert_index_equal(dropped, expected)\n\n @pytest.mark.parametrize(\"values\", [['a', 'b', ('c', 'd')],\n ['a', ('c', 'd'), 'b'],\n [('c', 'd'), 'a', 'b']])\n @pytest.mark.parametrize(\"to_drop\", [[('c', 'd'), 'a'], ['a', ('c', 'd')]])\n def test_drop_tuple(self, values, to_drop):\n # GH 18304\n index = pd.Index(values)\n expected = pd.Index(['b'])\n\n result = index.drop(to_drop)\n tm.assert_index_equal(result, expected)\n\n removed = index.drop(to_drop[0])\n for drop_me in to_drop[1], [to_drop[1]]:\n result = removed.drop(drop_me)\n tm.assert_index_equal(result, expected)\n\n removed = index.drop(to_drop[1])\n for drop_me in to_drop[1], [to_drop[1]]:\n pytest.raises(KeyError, removed.drop, drop_me)\n\n def test_tuple_union_bug(self):\n import pandas\n import numpy as np\n\n aidx1 = np.array([(1, 'A'), (2, 'A'), (1, 'B'), (2, 'B')],\n dtype=[('num', int), ('let', 'a1')])\n aidx2 = np.array([(1, 'A'), (2, 'A'), (1, 'B'),\n (2, 'B'), (1, 'C'), (2, 'C')],\n dtype=[('num', int), ('let', 'a1')])\n\n idx1 = pandas.Index(aidx1)\n idx2 = pandas.Index(aidx2)\n\n # intersection broken?\n int_idx = idx1.intersection(idx2)\n # needs to be 1d like idx1 and idx2\n expected = idx1[:4] # pandas.Index(sorted(set(idx1) & set(idx2)))\n assert int_idx.ndim == 1\n tm.assert_index_equal(int_idx, expected)\n\n # union broken\n union_idx = idx1.union(idx2)\n expected = idx2\n assert union_idx.ndim == 1\n tm.assert_index_equal(union_idx, expected)\n\n def test_is_monotonic_incomparable(self):\n index = Index([5, datetime.now(), 7])\n assert not index.is_monotonic_increasing\n assert not index.is_monotonic_decreasing\n assert not index._is_strictly_monotonic_increasing\n assert not index._is_strictly_monotonic_decreasing\n\n def test_get_set_value(self):\n values = np.random.randn(100)\n date = self.dateIndex[67]\n\n assert_almost_equal(self.dateIndex.get_value(values, date), values[67])\n\n self.dateIndex.set_value(values, date, 10)\n assert values[67] == 10\n\n def test_isin(self):\n values = ['foo', 'bar', 'quux']\n\n idx = Index(['qux', 'baz', 'foo', 'bar'])\n result = idx.isin(values)\n expected = np.array([False, False, True, True])\n tm.assert_numpy_array_equal(result, expected)\n\n # set\n result = idx.isin(set(values))\n tm.assert_numpy_array_equal(result, expected)\n\n # empty, return dtype bool\n idx = Index([])\n result = idx.isin(values)\n assert len(result) == 0\n assert result.dtype == np.bool_\n\n @pytest.mark.skipif(PYPY, reason=\"np.nan is float('nan') on PyPy\")\n def test_isin_nan_not_pypy(self):\n tm.assert_numpy_array_equal(Index(['a', np.nan]).isin([float('nan')]),\n np.array([False, False]))\n\n @pytest.mark.skipif(not PYPY, reason=\"np.nan is float('nan') on PyPy\")\n def test_isin_nan_pypy(self):\n tm.assert_numpy_array_equal(Index(['a', np.nan]).isin([float('nan')]),\n np.array([False, True]))\n\n def test_isin_nan_common(self):\n tm.assert_numpy_array_equal(Index(['a', np.nan]).isin([np.nan]),\n np.array([False, True]))\n tm.assert_numpy_array_equal(Index(['a', pd.NaT]).isin([pd.NaT]),\n np.array([False, True]))\n tm.assert_numpy_array_equal(Index(['a', np.nan]).isin([pd.NaT]),\n np.array([False, False]))\n\n # Float64Index overrides isin, so must be checked separately\n tm.assert_numpy_array_equal(Float64Index([1.0, np.nan]).isin([np.nan]),\n np.array([False, True]))\n tm.assert_numpy_array_equal(\n Float64Index([1.0, np.nan]).isin([float('nan')]),\n np.array([False, True]))\n\n # we cannot compare NaT with NaN\n tm.assert_numpy_array_equal(Float64Index([1.0, np.nan]).isin([pd.NaT]),\n np.array([False, False]))\n\n def test_isin_level_kwarg(self):\n def check_idx(idx):\n values = idx.tolist()[-2:] + ['nonexisting']\n\n expected = np.array([False, False, True, True])\n tm.assert_numpy_array_equal(expected, idx.isin(values, level=0))\n tm.assert_numpy_array_equal(expected, idx.isin(values, level=-1))\n\n pytest.raises(IndexError, idx.isin, values, level=1)\n pytest.raises(IndexError, idx.isin, values, level=10)\n pytest.raises(IndexError, idx.isin, values, level=-2)\n\n pytest.raises(KeyError, idx.isin, values, level=1.0)\n pytest.raises(KeyError, idx.isin, values, level='foobar')\n\n idx.name = 'foobar'\n tm.assert_numpy_array_equal(expected,\n idx.isin(values, level='foobar'))\n\n pytest.raises(KeyError, idx.isin, values, level='xyzzy')\n pytest.raises(KeyError, idx.isin, values, level=np.nan)\n\n check_idx(Index(['qux', 'baz', 'foo', 'bar']))\n # Float64Index overrides isin, so must be checked separately\n check_idx(Float64Index([1.0, 2.0, 3.0, 4.0]))\n\n @pytest.mark.parametrize(\"empty\", [[], Series(), np.array([])])\n def test_isin_empty(self, empty):\n # see gh-16991\n idx = Index([\"a\", \"b\"])\n expected = np.array([False, False])\n\n result = idx.isin(empty)\n tm.assert_numpy_array_equal(expected, result)\n\n def test_boolean_cmp(self):\n values = [1, 2, 3, 4]\n\n idx = Index(values)\n res = (idx == values)\n\n tm.assert_numpy_array_equal(res, np.array(\n [True, True, True, True], dtype=bool))\n\n def test_get_level_values(self):\n result = self.strIndex.get_level_values(0)\n tm.assert_index_equal(result, self.strIndex)\n\n # test for name (GH 17414)\n index_with_name = self.strIndex.copy()\n index_with_name.name = 'a'\n result = index_with_name.get_level_values('a')\n tm.assert_index_equal(result, index_with_name)\n\n def test_slice_keep_name(self):\n idx = Index(['a', 'b'], name='asdf')\n assert idx.name == idx[1:].name\n\n # instance attributes of the form self.<name>Index\n @pytest.mark.parametrize('index_kind',\n ['unicode', 'str', 'date', 'int', 'float'])\n def test_join_self(self, join_type, index_kind):\n\n res = getattr(self, '{0}Index'.format(index_kind))\n\n joined = res.join(res, how=join_type)\n assert res is joined\n\n def test_str_attribute(self):\n # GH9068\n methods = ['strip', 'rstrip', 'lstrip']\n idx = Index([' jack', 'jill ', ' jesse ', 'frank'])\n for method in methods:\n expected = Index([getattr(str, method)(x) for x in idx.values])\n tm.assert_index_equal(\n getattr(Index.str, method)(idx.str), expected)\n\n # create a few instances that are not able to use .str accessor\n indices = [Index(range(5)), tm.makeDateIndex(10),\n MultiIndex.from_tuples([('foo', '1'), ('bar', '3')]),\n PeriodIndex(start='2000', end='2010', freq='A')]\n for idx in indices:\n with tm.assert_raises_regex(AttributeError,\n 'only use .str accessor'):\n idx.str.repeat(2)\n\n idx = Index(['a b c', 'd e', 'f'])\n expected = Index([['a', 'b', 'c'], ['d', 'e'], ['f']])\n tm.assert_index_equal(idx.str.split(), expected)\n tm.assert_index_equal(idx.str.split(expand=False), expected)\n\n expected = MultiIndex.from_tuples([('a', 'b', 'c'), ('d', 'e', np.nan),\n ('f', np.nan, np.nan)])\n tm.assert_index_equal(idx.str.split(expand=True), expected)\n\n # test boolean case, should return np.array instead of boolean Index\n idx = Index(['a1', 'a2', 'b1', 'b2'])\n expected = np.array([True, True, False, False])\n tm.assert_numpy_array_equal(idx.str.startswith('a'), expected)\n assert isinstance(idx.str.startswith('a'), np.ndarray)\n s = Series(range(4), index=idx)\n expected = Series(range(2), index=['a1', 'a2'])\n tm.assert_series_equal(s[s.index.str.startswith('a')], expected)\n\n def test_tab_completion(self):\n # GH 9910\n idx = Index(list('abcd'))\n assert 'str' in dir(idx)\n\n idx = Index(range(4))\n assert 'str' not in dir(idx)\n\n def test_indexing_doesnt_change_class(self):\n idx = Index([1, 2, 3, 'a', 'b', 'c'])\n\n assert idx[1:3].identical(pd.Index([2, 3], dtype=np.object_))\n assert idx[[0, 1]].identical(pd.Index([1, 2], dtype=np.object_))\n\n def test_outer_join_sort(self):\n left_idx = Index(np.random.permutation(15))\n right_idx = tm.makeDateIndex(10)\n\n with tm.assert_produces_warning(RuntimeWarning):\n joined = left_idx.join(right_idx, how='outer')\n\n # right_idx in this case because DatetimeIndex has join precedence over\n # Int64Index\n with tm.assert_produces_warning(RuntimeWarning):\n expected = right_idx.astype(object).union(left_idx.astype(object))\n tm.assert_index_equal(joined, expected)\n\n def test_nan_first_take_datetime(self):\n idx = Index([pd.NaT, Timestamp('20130101'), Timestamp('20130102')])\n res = idx.take([-1, 0, 1])\n exp = Index([idx[-1], idx[0], idx[1]])\n tm.assert_index_equal(res, exp)\n\n def test_take_fill_value(self):\n # GH 12631\n idx = pd.Index(list('ABC'), name='xxx')\n result = idx.take(np.array([1, 0, -1]))\n expected = pd.Index(list('BAC'), name='xxx')\n tm.assert_index_equal(result, expected)\n\n # fill_value\n result = idx.take(np.array([1, 0, -1]), fill_value=True)\n expected = pd.Index(['B', 'A', np.nan], name='xxx')\n tm.assert_index_equal(result, expected)\n\n # allow_fill=False\n result = idx.take(np.array([1, 0, -1]), allow_fill=False,\n fill_value=True)\n expected = pd.Index(['B', 'A', 'C'], name='xxx')\n tm.assert_index_equal(result, expected)\n\n msg = ('When allow_fill=True and fill_value is not None, '\n 'all indices must be >= -1')\n with tm.assert_raises_regex(ValueError, msg):\n idx.take(np.array([1, 0, -2]), fill_value=True)\n with tm.assert_raises_regex(ValueError, msg):\n idx.take(np.array([1, 0, -5]), fill_value=True)\n\n with pytest.raises(IndexError):\n idx.take(np.array([1, -5]))\n\n def test_reindex_preserves_name_if_target_is_list_or_ndarray(self):\n # GH6552\n idx = pd.Index([0, 1, 2])\n\n dt_idx = pd.date_range('20130101', periods=3)\n\n idx.name = None\n assert idx.reindex([])[0].name is None\n assert idx.reindex(np.array([]))[0].name is None\n assert idx.reindex(idx.tolist())[0].name is None\n assert idx.reindex(idx.tolist()[:-1])[0].name is None\n assert idx.reindex(idx.values)[0].name is None\n assert idx.reindex(idx.values[:-1])[0].name is None\n\n # Must preserve name even if dtype changes.\n assert idx.reindex(dt_idx.values)[0].name is None\n assert idx.reindex(dt_idx.tolist())[0].name is None\n\n idx.name = 'foobar'\n assert idx.reindex([])[0].name == 'foobar'\n assert idx.reindex(np.array([]))[0].name == 'foobar'\n assert idx.reindex(idx.tolist())[0].name == 'foobar'\n assert idx.reindex(idx.tolist()[:-1])[0].name == 'foobar'\n assert idx.reindex(idx.values)[0].name == 'foobar'\n assert idx.reindex(idx.values[:-1])[0].name == 'foobar'\n\n # Must preserve name even if dtype changes.\n assert idx.reindex(dt_idx.values)[0].name == 'foobar'\n assert idx.reindex(dt_idx.tolist())[0].name == 'foobar'\n\n def test_reindex_preserves_type_if_target_is_empty_list_or_array(self):\n # GH7774\n idx = pd.Index(list('abc'))\n\n def get_reindex_type(target):\n return idx.reindex(target)[0].dtype.type\n\n assert get_reindex_type([]) == np.object_\n assert get_reindex_type(np.array([])) == np.object_\n assert get_reindex_type(np.array([], dtype=np.int64)) == np.object_\n\n def test_reindex_doesnt_preserve_type_if_target_is_empty_index(self):\n # GH7774\n idx = pd.Index(list('abc'))\n\n def get_reindex_type(target):\n return idx.reindex(target)[0].dtype.type\n\n assert get_reindex_type(pd.Int64Index([])) == np.int64\n assert get_reindex_type(pd.Float64Index([])) == np.float64\n assert get_reindex_type(pd.DatetimeIndex([])) == np.datetime64\n\n reindexed = idx.reindex(pd.MultiIndex(\n [pd.Int64Index([]), pd.Float64Index([])], [[], []]))[0]\n assert reindexed.levels[0].dtype.type == np.int64\n assert reindexed.levels[1].dtype.type == np.float64\n\n def test_groupby(self):\n idx = Index(range(5))\n groups = idx.groupby(np.array([1, 1, 2, 2, 2]))\n exp = {1: pd.Index([0, 1]), 2: pd.Index([2, 3, 4])}\n tm.assert_dict_equal(groups, exp)\n\n def test_equals_op_multiindex(self):\n # GH9785\n # test comparisons of multiindex\n from pandas.compat import StringIO\n df = pd.read_csv(StringIO('a,b,c\\n1,2,3\\n4,5,6'), index_col=[0, 1])\n tm.assert_numpy_array_equal(df.index == df.index,\n np.array([True, True]))\n\n mi1 = MultiIndex.from_tuples([(1, 2), (4, 5)])\n tm.assert_numpy_array_equal(df.index == mi1, np.array([True, True]))\n mi2 = MultiIndex.from_tuples([(1, 2), (4, 6)])\n tm.assert_numpy_array_equal(df.index == mi2, np.array([True, False]))\n mi3 = MultiIndex.from_tuples([(1, 2), (4, 5), (8, 9)])\n with tm.assert_raises_regex(ValueError, \"Lengths must match\"):\n df.index == mi3\n\n index_a = Index(['foo', 'bar', 'baz'])\n with tm.assert_raises_regex(ValueError, \"Lengths must match\"):\n df.index == index_a\n tm.assert_numpy_array_equal(index_a == mi3,\n np.array([False, False, False]))\n\n def test_conversion_preserves_name(self):\n # GH 10875\n i = pd.Index(['01:02:03', '01:02:04'], name='label')\n assert i.name == pd.to_datetime(i).name\n assert i.name == pd.to_timedelta(i).name\n\n def test_string_index_repr(self):\n # py3/py2 repr can differ because of \"u\" prefix\n # which also affects to displayed element size\n\n if PY3:\n coerce = lambda x: x\n else:\n coerce = unicode # noqa\n\n # short\n idx = pd.Index(['a', 'bb', 'ccc'])\n if PY3:\n expected = u\"\"\"Index(['a', 'bb', 'ccc'], dtype='object')\"\"\"\n assert repr(idx) == expected\n else:\n expected = u\"\"\"Index([u'a', u'bb', u'ccc'], dtype='object')\"\"\"\n assert coerce(idx) == expected\n\n # multiple lines\n idx = pd.Index(['a', 'bb', 'ccc'] * 10)\n if PY3:\n expected = u\"\"\"\\\nIndex(['a', 'bb', 'ccc', 'a', 'bb', 'ccc', 'a', 'bb', 'ccc', 'a', 'bb', 'ccc',\n 'a', 'bb', 'ccc', 'a', 'bb', 'ccc', 'a', 'bb', 'ccc', 'a', 'bb', 'ccc',\n 'a', 'bb', 'ccc', 'a', 'bb', 'ccc'],\n dtype='object')\"\"\"\n\n assert repr(idx) == expected\n else:\n expected = u\"\"\"\\\nIndex([u'a', u'bb', u'ccc', u'a', u'bb', u'ccc', u'a', u'bb', u'ccc', u'a',\n u'bb', u'ccc', u'a', u'bb', u'ccc', u'a', u'bb', u'ccc', u'a', u'bb',\n u'ccc', u'a', u'bb', u'ccc', u'a', u'bb', u'ccc', u'a', u'bb', u'ccc'],\n dtype='object')\"\"\"\n\n assert coerce(idx) == expected\n\n # truncated\n idx = pd.Index(['a', 'bb', 'ccc'] * 100)\n if PY3:\n expected = u\"\"\"\\\nIndex(['a', 'bb', 'ccc', 'a', 'bb', 'ccc', 'a', 'bb', 'ccc', 'a',\n ...\n 'ccc', 'a', 'bb', 'ccc', 'a', 'bb', 'ccc', 'a', 'bb', 'ccc'],\n dtype='object', length=300)\"\"\"\n\n assert repr(idx) == expected\n else:\n expected = u\"\"\"\\\nIndex([u'a', u'bb', u'ccc', u'a', u'bb', u'ccc', u'a', u'bb', u'ccc', u'a',\n ...\n u'ccc', u'a', u'bb', u'ccc', u'a', u'bb', u'ccc', u'a', u'bb', u'ccc'],\n dtype='object', length=300)\"\"\"\n\n assert coerce(idx) == expected\n\n # short\n idx = pd.Index([u'あ', u'いい', u'ううう'])\n if PY3:\n expected = u\"\"\"Index(['あ', 'いい', 'ううう'], dtype='object')\"\"\"\n assert repr(idx) == expected\n else:\n expected = u\"\"\"Index([u'あ', u'いい', u'ううう'], dtype='object')\"\"\"\n assert coerce(idx) == expected\n\n # multiple lines\n idx = pd.Index([u'あ', u'いい', u'ううう'] * 10)\n if PY3:\n expected = (u\"Index(['あ', 'いい', 'ううう', 'あ', 'いい', 'ううう', \"\n u\"'あ', 'いい', 'ううう', 'あ', 'いい', 'ううう',\\n\"\n u\" 'あ', 'いい', 'ううう', 'あ', 'いい', 'ううう', \"\n u\"'あ', 'いい', 'ううう', 'あ', 'いい', 'ううう',\\n\"\n u\" 'あ', 'いい', 'ううう', 'あ', 'いい', \"\n u\"'ううう'],\\n\"\n u\" dtype='object')\")\n assert repr(idx) == expected\n else:\n expected = (u\"Index([u'あ', u'いい', u'ううう', u'あ', u'いい', \"\n u\"u'ううう', u'あ', u'いい', u'ううう', u'あ',\\n\"\n u\" u'いい', u'ううう', u'あ', u'いい', u'ううう', \"\n u\"u'あ', u'いい', u'ううう', u'あ', u'いい',\\n\"\n u\" u'ううう', u'あ', u'いい', u'ううう', u'あ', \"\n u\"u'いい', u'ううう', u'あ', u'いい', u'ううう'],\\n\"\n u\" dtype='object')\")\n assert coerce(idx) == expected\n\n # truncated\n idx = pd.Index([u'あ', u'いい', u'ううう'] * 100)\n if PY3:\n expected = (u\"Index(['あ', 'いい', 'ううう', 'あ', 'いい', 'ううう', \"\n u\"'あ', 'いい', 'ううう', 'あ',\\n\"\n u\" ...\\n\"\n u\" 'ううう', 'あ', 'いい', 'ううう', 'あ', 'いい', \"\n u\"'ううう', 'あ', 'いい', 'ううう'],\\n\"\n u\" dtype='object', length=300)\")\n assert repr(idx) == expected\n else:\n expected = (u\"Index([u'あ', u'いい', u'ううう', u'あ', u'いい', \"\n u\"u'ううう', u'あ', u'いい', u'ううう', u'あ',\\n\"\n u\" ...\\n\"\n u\" u'ううう', u'あ', u'いい', u'ううう', u'あ', \"\n u\"u'いい', u'ううう', u'あ', u'いい', u'ううう'],\\n\"\n u\" dtype='object', length=300)\")\n\n assert coerce(idx) == expected\n\n # Emable Unicode option -----------------------------------------\n with cf.option_context('display.unicode.east_asian_width', True):\n\n # short\n idx = pd.Index([u'あ', u'いい', u'ううう'])\n if PY3:\n expected = (u\"Index(['あ', 'いい', 'ううう'], \"\n u\"dtype='object')\")\n assert repr(idx) == expected\n else:\n expected = (u\"Index([u'あ', u'いい', u'ううう'], \"\n u\"dtype='object')\")\n assert coerce(idx) == expected\n\n # multiple lines\n idx = pd.Index([u'あ', u'いい', u'ううう'] * 10)\n if PY3:\n expected = (u\"Index(['あ', 'いい', 'ううう', 'あ', 'いい', \"\n u\"'ううう', 'あ', 'いい', 'ううう',\\n\"\n u\" 'あ', 'いい', 'ううう', 'あ', 'いい', \"\n u\"'ううう', 'あ', 'いい', 'ううう',\\n\"\n u\" 'あ', 'いい', 'ううう', 'あ', 'いい', \"\n u\"'ううう', 'あ', 'いい', 'ううう',\\n\"\n u\" 'あ', 'いい', 'ううう'],\\n\"\n u\" dtype='object')\"\"\")\n\n assert repr(idx) == expected\n else:\n expected = (u\"Index([u'あ', u'いい', u'ううう', u'あ', u'いい', \"\n u\"u'ううう', u'あ', u'いい',\\n\"\n u\" u'ううう', u'あ', u'いい', u'ううう', \"\n u\"u'あ', u'いい', u'ううう', u'あ',\\n\"\n u\" u'いい', u'ううう', u'あ', u'いい', \"\n u\"u'ううう', u'あ', u'いい',\\n\"\n u\" u'ううう', u'あ', u'いい', u'ううう', \"\n u\"u'あ', u'いい', u'ううう'],\\n\"\n u\" dtype='object')\")\n\n assert coerce(idx) == expected\n\n # truncated\n idx = pd.Index([u'あ', u'いい', u'ううう'] * 100)\n if PY3:\n expected = (u\"Index(['あ', 'いい', 'ううう', 'あ', 'いい', \"\n u\"'ううう', 'あ', 'いい', 'ううう',\\n\"\n u\" 'あ',\\n\"\n u\" ...\\n\"\n u\" 'ううう', 'あ', 'いい', 'ううう', 'あ', \"\n u\"'いい', 'ううう', 'あ', 'いい',\\n\"\n u\" 'ううう'],\\n\"\n u\" dtype='object', length=300)\")\n\n assert repr(idx) == expected\n else:\n expected = (u\"Index([u'あ', u'いい', u'ううう', u'あ', u'いい', \"\n u\"u'ううう', u'あ', u'いい',\\n\"\n u\" u'ううう', u'あ',\\n\"\n u\" ...\\n\"\n u\" u'ううう', u'あ', u'いい', u'ううう', \"\n u\"u'あ', u'いい', u'ううう', u'あ',\\n\"\n u\" u'いい', u'ううう'],\\n\"\n u\" dtype='object', length=300)\")\n\n assert coerce(idx) == expected\n\n @pytest.mark.parametrize('dtype', [np.int64, np.float64])\n @pytest.mark.parametrize('delta', [1, 0, -1])\n def test_addsub_arithmetic(self, dtype, delta):\n # GH 8142\n delta = dtype(delta)\n idx = pd.Index([10, 11, 12], dtype=dtype)\n result = idx + delta\n expected = pd.Index(idx.values + delta, dtype=dtype)\n tm.assert_index_equal(result, expected)\n\n # this subtraction used to fail\n result = idx - delta\n expected = pd.Index(idx.values - delta, dtype=dtype)\n tm.assert_index_equal(result, expected)\n\n tm.assert_index_equal(idx + idx, 2 * idx)\n tm.assert_index_equal(idx - idx, 0 * idx)\n assert not (idx - idx).empty\n\n def test_iadd_preserves_name(self):\n # GH#17067, GH#19723 __iadd__ and __isub__ should preserve index name\n ser = pd.Series([1, 2, 3])\n ser.index.name = 'foo'\n\n ser.index += 1\n assert ser.index.name == \"foo\"\n\n ser.index -= 1\n assert ser.index.name == \"foo\"\n\n def test_cached_properties_not_settable(self):\n idx = pd.Index([1, 2, 3])\n with tm.assert_raises_regex(AttributeError, \"Can't set attribute\"):\n idx.is_unique = False\n\n\nclass TestMixedIntIndex(Base):\n # Mostly the tests from common.py for which the results differ\n # in py2 and py3 because ints and strings are uncomparable in py3\n # (GH 13514)\n\n _holder = Index\n\n def setup_method(self, method):\n self.indices = dict(mixedIndex=Index([0, 'a', 1, 'b', 2, 'c']))\n self.setup_indices()\n\n def create_index(self):\n return self.mixedIndex\n\n def test_argsort(self):\n idx = self.create_index()\n if PY36:\n with tm.assert_raises_regex(TypeError, \"'>|<' not supported\"):\n result = idx.argsort()\n elif PY3:\n with tm.assert_raises_regex(TypeError, \"unorderable types\"):\n result = idx.argsort()\n else:\n result = idx.argsort()\n expected = np.array(idx).argsort()\n tm.assert_numpy_array_equal(result, expected, check_dtype=False)\n\n def test_numpy_argsort(self):\n idx = self.create_index()\n if PY36:\n with tm.assert_raises_regex(TypeError, \"'>|<' not supported\"):\n result = np.argsort(idx)\n elif PY3:\n with tm.assert_raises_regex(TypeError, \"unorderable types\"):\n result = np.argsort(idx)\n else:\n result = np.argsort(idx)\n expected = idx.argsort()\n tm.assert_numpy_array_equal(result, expected)\n\n def test_copy_name(self):\n # Check that \"name\" argument passed at initialization is honoured\n # GH12309\n idx = self.create_index()\n\n first = idx.__class__(idx, copy=True, name='mario')\n second = first.__class__(first, copy=False)\n\n # Even though \"copy=False\", we want a new object.\n assert first is not second\n # Not using tm.assert_index_equal() since names differ:\n assert idx.equals(first)\n\n assert first.name == 'mario'\n assert second.name == 'mario'\n\n s1 = Series(2, index=first)\n s2 = Series(3, index=second[:-1])\n\n warning_type = RuntimeWarning if PY3 else None\n with tm.assert_produces_warning(warning_type):\n # Python 3: Unorderable types\n s3 = s1 * s2\n\n assert s3.index.name == 'mario'\n\n def test_copy_name2(self):\n # Check that adding a \"name\" parameter to the copy is honored\n # GH14302\n idx = pd.Index([1, 2], name='MyName')\n idx1 = idx.copy()\n\n assert idx.equals(idx1)\n assert idx.name == 'MyName'\n assert idx1.name == 'MyName'\n\n idx2 = idx.copy(name='NewName')\n\n assert idx.equals(idx2)\n assert idx.name == 'MyName'\n assert idx2.name == 'NewName'\n\n idx3 = idx.copy(names=['NewName'])\n\n assert idx.equals(idx3)\n assert idx.name == 'MyName'\n assert idx.names == ['MyName']\n assert idx3.name == 'NewName'\n assert idx3.names == ['NewName']\n\n def test_union_base(self):\n idx = self.create_index()\n first = idx[3:]\n second = idx[:5]\n\n if PY3:\n with tm.assert_produces_warning(RuntimeWarning):\n # unorderable types\n result = first.union(second)\n expected = Index(['b', 2, 'c', 0, 'a', 1])\n tm.assert_index_equal(result, expected)\n else:\n result = first.union(second)\n expected = Index(['b', 2, 'c', 0, 'a', 1])\n tm.assert_index_equal(result, expected)\n\n # GH 10149\n cases = [klass(second.values)\n for klass in [np.array, Series, list]]\n for case in cases:\n if PY3:\n with tm.assert_produces_warning(RuntimeWarning):\n # unorderable types\n result = first.union(case)\n assert tm.equalContents(result, idx)\n else:\n result = first.union(case)\n assert tm.equalContents(result, idx)\n\n def test_intersection_base(self):\n # (same results for py2 and py3 but sortedness not tested elsewhere)\n idx = self.create_index()\n first = idx[:5]\n second = idx[:3]\n result = first.intersection(second)\n expected = Index([0, 'a', 1])\n tm.assert_index_equal(result, expected)\n\n # GH 10149\n cases = [klass(second.values)\n for klass in [np.array, Series, list]]\n for case in cases:\n result = first.intersection(case)\n assert tm.equalContents(result, second)\n\n def test_difference_base(self):\n # (same results for py2 and py3 but sortedness not tested elsewhere)\n idx = self.create_index()\n first = idx[:4]\n second = idx[3:]\n\n result = first.difference(second)\n expected = Index([0, 1, 'a'])\n tm.assert_index_equal(result, expected)\n\n def test_symmetric_difference(self):\n # (same results for py2 and py3 but sortedness not tested elsewhere)\n idx = self.create_index()\n first = idx[:4]\n second = idx[3:]\n\n result = first.symmetric_difference(second)\n expected = Index([0, 1, 2, 'a', 'c'])\n tm.assert_index_equal(result, expected)\n\n def test_logical_compat(self):\n idx = self.create_index()\n assert idx.all() == idx.values.all()\n assert idx.any() == idx.values.any()\n\n def test_dropna(self):\n # GH 6194\n for dtype in [None, object, 'category']:\n idx = pd.Index([1, 2, 3], dtype=dtype)\n tm.assert_index_equal(idx.dropna(), idx)\n\n idx = pd.Index([1., 2., 3.], dtype=dtype)\n tm.assert_index_equal(idx.dropna(), idx)\n nanidx = pd.Index([1., 2., np.nan, 3.], dtype=dtype)\n tm.assert_index_equal(nanidx.dropna(), idx)\n\n idx = pd.Index(['A', 'B', 'C'], dtype=dtype)\n tm.assert_index_equal(idx.dropna(), idx)\n nanidx = pd.Index(['A', np.nan, 'B', 'C'], dtype=dtype)\n tm.assert_index_equal(nanidx.dropna(), idx)\n\n tm.assert_index_equal(nanidx.dropna(how='any'), idx)\n tm.assert_index_equal(nanidx.dropna(how='all'), idx)\n\n idx = pd.DatetimeIndex(['2011-01-01', '2011-01-02', '2011-01-03'])\n tm.assert_index_equal(idx.dropna(), idx)\n nanidx = pd.DatetimeIndex(['2011-01-01', '2011-01-02',\n '2011-01-03', pd.NaT])\n tm.assert_index_equal(nanidx.dropna(), idx)\n\n idx = pd.TimedeltaIndex(['1 days', '2 days', '3 days'])\n tm.assert_index_equal(idx.dropna(), idx)\n nanidx = pd.TimedeltaIndex([pd.NaT, '1 days', '2 days',\n '3 days', pd.NaT])\n tm.assert_index_equal(nanidx.dropna(), idx)\n\n idx = pd.PeriodIndex(['2012-02', '2012-04', '2012-05'], freq='M')\n tm.assert_index_equal(idx.dropna(), idx)\n nanidx = pd.PeriodIndex(['2012-02', '2012-04', 'NaT', '2012-05'],\n freq='M')\n tm.assert_index_equal(nanidx.dropna(), idx)\n\n msg = \"invalid how option: xxx\"\n with tm.assert_raises_regex(ValueError, msg):\n pd.Index([1, 2, 3]).dropna(how='xxx')\n\n def test_get_combined_index(self):\n result = _get_combined_index([])\n tm.assert_index_equal(result, Index([]))\n\n def test_repeat(self):\n repeats = 2\n idx = pd.Index([1, 2, 3])\n expected = pd.Index([1, 1, 2, 2, 3, 3])\n\n result = idx.repeat(repeats)\n tm.assert_index_equal(result, expected)\n\n with tm.assert_produces_warning(FutureWarning):\n result = idx.repeat(n=repeats)\n tm.assert_index_equal(result, expected)\n\n def test_is_monotonic_na(self):\n examples = [pd.Index([np.nan]),\n pd.Index([np.nan, 1]),\n pd.Index([1, 2, np.nan]),\n pd.Index(['a', 'b', np.nan]),\n pd.to_datetime(['NaT']),\n pd.to_datetime(['NaT', '2000-01-01']),\n pd.to_datetime(['2000-01-01', 'NaT', '2000-01-02']),\n pd.to_timedelta(['1 day', 'NaT']), ]\n for index in examples:\n assert not index.is_monotonic_increasing\n assert not index.is_monotonic_decreasing\n assert not index._is_strictly_monotonic_increasing\n assert not index._is_strictly_monotonic_decreasing\n\n def test_repr_summary(self):\n with cf.option_context('display.max_seq_items', 10):\n r = repr(pd.Index(np.arange(1000)))\n assert len(r) < 200\n assert \"...\" in r\n\n def test_int_name_format(self):\n index = Index(['a', 'b', 'c'], name=0)\n s = Series(lrange(3), index)\n df = DataFrame(lrange(3), index=index)\n repr(s)\n repr(df)\n\n def test_print_unicode_columns(self):\n df = pd.DataFrame({u(\"\\u05d0\"): [1, 2, 3],\n \"\\u05d1\": [4, 5, 6],\n \"c\": [7, 8, 9]})\n repr(df.columns) # should not raise UnicodeDecodeError\n\n def test_unicode_string_with_unicode(self):\n idx = Index(lrange(1000))\n\n if PY3:\n str(idx)\n else:\n text_type(idx)\n\n def test_bytestring_with_unicode(self):\n idx = Index(lrange(1000))\n if PY3:\n bytes(idx)\n else:\n str(idx)\n\n def test_intersect_str_dates(self):\n dt_dates = [datetime(2012, 2, 9), datetime(2012, 2, 22)]\n\n i1 = Index(dt_dates, dtype=object)\n i2 = Index(['aa'], dtype=object)\n res = i2.intersection(i1)\n\n assert len(res) == 0\n\n @pytest.mark.parametrize('op', [operator.eq, operator.ne,\n operator.gt, operator.ge,\n operator.lt, operator.le])\n def test_comparison_tzawareness_compat(self, op):\n # GH#18162\n dr = pd.date_range('2016-01-01', periods=6)\n dz = dr.tz_localize('US/Pacific')\n\n # Check that there isn't a problem aware-aware and naive-naive do not\n # raise\n naive_series = Series(dr)\n aware_series = Series(dz)\n with pytest.raises(TypeError):\n op(dz, naive_series)\n with pytest.raises(TypeError):\n op(dr, aware_series)\n\n # TODO: implement _assert_tzawareness_compat for the reverse\n # comparison with the Series on the left-hand side\n\n\nclass TestIndexUtils(object):\n\n @pytest.mark.parametrize('data, names, expected', [\n ([[1, 2, 3]], None, Index([1, 2, 3])),\n ([[1, 2, 3]], ['name'], Index([1, 2, 3], name='name')),\n ([['a', 'a'], ['c', 'd']], None,\n MultiIndex([['a'], ['c', 'd']], [[0, 0], [0, 1]])),\n ([['a', 'a'], ['c', 'd']], ['L1', 'L2'],\n MultiIndex([['a'], ['c', 'd']], [[0, 0], [0, 1]],\n names=['L1', 'L2'])),\n ])\n def test_ensure_index_from_sequences(self, data, names, expected):\n result = _ensure_index_from_sequences(data, names)\n tm.assert_index_equal(result, expected)\n\n\[email protected]('opname', ['eq', 'ne', 'le', 'lt', 'ge', 'gt',\n 'add', 'radd', 'sub', 'rsub',\n 'mul', 'rmul', 'truediv', 'rtruediv',\n 'floordiv', 'rfloordiv',\n 'pow', 'rpow', 'mod', 'divmod'])\ndef test_generated_op_names(opname, indices):\n index = indices\n if isinstance(index, ABCIndex) and opname == 'rsub':\n # pd.Index.__rsub__ does not exist; though the method does exist\n # for subclasses. see GH#19723\n return\n opname = '__{name}__'.format(name=opname)\n method = getattr(index, opname)\n assert method.__name__ == opname\n\n\[email protected]('idx_maker', tm.index_subclass_makers_generator())\ndef test_index_subclass_constructor_wrong_kwargs(idx_maker):\n # GH #19348\n with tm.assert_raises_regex(TypeError, 'unexpected keyword argument'):\n idx_maker(foo='bar')\n"
] | [
[
"pandas.core.computation.expressions.set_use_numexpr",
"pandas.core.common._all_not_none",
"pandas.PeriodIndex",
"numpy.linspace",
"pandas.Series",
"pandas.RangeIndex",
"pandas.compat.wraps",
"pandas.Panel.fromDict",
"numpy.random.random_sample",
"pandas.DataFrame",
"pandas.MultiIndex.from_tuples",
"numpy.round",
"pandas.compat.map",
"numpy.random.randn",
"pandas.compat.iteritems",
"pandas.core.dtypes.common.is_datetimelike_v_object",
"pandas.compat.lzip",
"pandas._libs.testing.assert_dict_equal",
"pandas.reset_option",
"pandas.IntervalIndex",
"pandas.io.common.urlopen",
"numpy.random.randint",
"pandas.compat.import_lzma",
"pandas.core.dtypes.common.is_interval_dtype",
"numpy.arange",
"pandas.compat.StringIO",
"pandas.compat.callable",
"pandas.compat.raise_with_traceback",
"pandas.Index",
"pandas.DatetimeIndex",
"numpy.random.set_state",
"pandas.core.dtypes.common.is_number",
"pandas.compat.lmap",
"matplotlib.pyplot.close",
"pandas.to_pickle",
"pandas.core.dtypes.common.is_datetimelike_v_numeric",
"pandas.core.dtypes.common.is_categorical_dtype",
"pandas.core.dtypes.common.is_list_like",
"pandas.compat.u",
"pandas.bdate_range",
"numpy.random.choice",
"numpy.isnan",
"pandas.core.dtypes.common.is_sequence",
"pandas.MultiIndex.from_product",
"numpy.random.rand",
"matplotlib.pyplot.get_fignums",
"numpy.floor",
"numpy.random.RandomState",
"numpy.array",
"pandas.core.dtypes.common.needs_i8_conversion",
"pandas.core.dtypes.common.is_bool",
"numpy.random.get_state",
"pandas.TimedeltaIndex",
"numpy.random.seed",
"pandas.core.algorithms.take_1d",
"pandas._libs.testing.assert_almost_equal",
"pandas.core.dtypes.missing.array_equivalent",
"pandas.IntervalIndex.from_breaks",
"pandas.compat.Counter",
"pandas.compat.zip",
"numpy.prod",
"pandas.read_pickle",
"pandas.io.formats.printing.pprint_thing",
"pandas.compat.lrange",
"pandas.compat.range"
],
[
"pandas.to_datetime",
"pandas.util.testing.index_subclass_makers_generator",
"pandas.Series",
"pandas.PeriodIndex",
"pandas.RangeIndex",
"pandas.util.testing.assert_contains_all",
"pandas.util.testing.assert_produces_warning",
"pandas.compat.range",
"pandas.util.testing.assert_index_equal",
"numpy.random.randn",
"pandas.util.testing.makeUnicodeIndex",
"pandas.compat.lzip",
"pandas.core.dtypes.common.is_unsigned_integer_dtype",
"pandas.util.testing.assert_numpy_array_equal",
"numpy.arange",
"pandas.compat.StringIO",
"pandas.compat.text_type",
"pandas.Index",
"pandas.DatetimeIndex",
"pandas.util.testing.assert_series_equal",
"pandas.core.config.option_context",
"pandas._libs.tslib.Timestamp",
"pandas.util.testing.makeDateIndex",
"pandas.Int64Index",
"pandas.core.indexes.datetimes._to_m8",
"pandas.core.index._ensure_index_from_sequences",
"pandas.util.testing.equalContents",
"pandas.core.index._get_combined_index",
"pandas.compat.u",
"pandas.util.testing.makeRangeIndex",
"pandas.compat.numpy.np_datetime64_compat",
"pandas.util.testing.makeCategoricalIndex",
"pandas.Float64Index",
"pandas.util.testing.assert_almost_equal",
"pandas.util.testing.makeStringIndex",
"numpy.timedelta64",
"pandas.core.indexes.api.Index",
"numpy.append",
"numpy.random.rand",
"pandas.date_range",
"pandas.util.testing.makeTimedeltaIndex",
"numpy.argsort",
"numpy.array",
"pandas.infer_freq",
"pandas.timedelta_range",
"pandas.util.testing.assert_dict_equal",
"pandas.TimedeltaIndex",
"pandas.period_range",
"pandas.util.testing.assert_raises_regex",
"numpy.datetime64",
"pandas.util.testing.makeIntIndex",
"pandas.core.indexes.api.MultiIndex",
"numpy.random.permutation",
"pandas.compat.zip",
"pandas.compat.lrange",
"pandas.to_timedelta",
"pandas.util.testing.makeFloatIndex",
"pandas.util.testing.makeUIntIndex",
"pandas.util.testing.makePeriodIndex",
"pandas.core.indexes.api.MultiIndex.from_tuples"
]
] |
ScottBrian/scottbrian_algo1 | [
"57cd8fc5674507db51b1c887d5f9a68462b0ca9d"
] | [
"tests/test_scottbrian_algo1/test_algo_api.py"
] | [
"\"\"\"test_algo_api.py module.\"\"\"\n\n# from datetime import datetime, timedelta\nimport pytest\n# import sys\n# from pathlib import Path\nimport numpy as np\nimport pandas as pd # type: ignore\nimport string\nimport math\n\nfrom typing import Any, List, NamedTuple\n# from typing_extensions import Final\n\nfrom ibapi.tag_value import TagValue # type: ignore\nfrom ibapi.contract import ComboLeg # type: ignore\nfrom ibapi.contract import DeltaNeutralContract\nfrom ibapi.contract import Contract, ContractDetails\n\nfrom scottbrian_algo1.algo_api import AlgoApp, AlreadyConnected, \\\n DisconnectLockHeld, ConnectTimeout, RequestTimeout, DisconnectDuringRequest\n\nfrom scottbrian_algo1.algo_maps import get_contract_dict, get_contract_obj\nfrom scottbrian_algo1.algo_maps import get_contract_details_obj\n\n# from scottbrian_utils.diag_msg import diag_msg\n# from scottbrian_utils.file_catalog import FileCatalog\n\nimport logging\n\nlogger = logging.getLogger(__name__)\n\n\n###############################################################################\n# TestAlgoAppConnect class\n###############################################################################\nclass TestAlgoAppConnect:\n \"\"\"TestAlgoAppConnect class.\"\"\"\n\n def test_mock_connect_to_ib(self,\n algo_app: \"AlgoApp\"\n ) -> None:\n \"\"\"Test connecting to IB.\n\n Args:\n algo_app: pytest fixture instance of AlgoApp (see conftest.py)\n\n \"\"\"\n verify_algo_app_initialized(algo_app)\n\n # we are testing connect_to_ib and the subsequent code that gets\n # control as a result, such as getting the first requestID and then\n # starting a separate thread for the run loop.\n logger.debug(\"about to connect\")\n algo_app.connect_to_ib(\"127.0.0.1\",\n algo_app.PORT_FOR_LIVE_TRADING,\n client_id=0)\n\n # verify that algo_app is connected and alive with a valid reqId\n verify_algo_app_connected(algo_app)\n\n algo_app.disconnect_from_ib()\n verify_algo_app_disconnected(algo_app)\n\n def test_mock_connect_to_ib_with_timeout(self,\n algo_app: \"AlgoApp\",\n mock_ib: Any\n ) -> None:\n \"\"\"Test connecting to IB.\n\n Args:\n algo_app: pytest fixture instance of AlgoApp (see conftest.py)\n mock_ib: pytest fixture of contract_descriptions\n\n \"\"\"\n verify_algo_app_initialized(algo_app)\n\n # we are testing connect_to_ib with a simulated timeout\n logger.debug(\"about to connect\")\n with pytest.raises(ConnectTimeout):\n algo_app.connect_to_ib(\"127.0.0.1\",\n mock_ib.PORT_FOR_REQID_TIMEOUT,\n client_id=0)\n\n # verify that algo_app is not connected\n verify_algo_app_disconnected(algo_app)\n assert algo_app.request_id == 0\n\n def test_connect_to_ib_already_connected(self,\n algo_app: \"AlgoApp\",\n mock_ib: Any\n ) -> None:\n \"\"\"Test connecting to IB.\n\n Args:\n algo_app: pytest fixture instance of AlgoApp (see conftest.py)\n mock_ib: pytest fixture of contract_descriptions\n\n \"\"\"\n verify_algo_app_initialized(algo_app)\n\n # first, connect normally to mock_ib\n logger.debug(\"about to connect\")\n algo_app.connect_to_ib(\"127.0.0.1\",\n algo_app.PORT_FOR_PAPER_TRADING,\n client_id=0)\n # verify that algo_app is connected\n verify_algo_app_connected(algo_app)\n\n # try to connect again - should get error\n with pytest.raises(AlreadyConnected):\n algo_app.connect_to_ib(\"127.0.0.1\",\n algo_app.PORT_FOR_PAPER_TRADING,\n client_id=0)\n\n # verify that algo_app is still connected and alive with a valid reqId\n verify_algo_app_connected(algo_app)\n\n algo_app.disconnect_from_ib()\n verify_algo_app_disconnected(algo_app)\n\n def test_connect_to_ib_with_lock_held(self,\n algo_app: \"AlgoApp\",\n mock_ib: Any\n ) -> None:\n \"\"\"Test connecting to IB with disconnect lock held.\n\n Args:\n algo_app: pytest fixture instance of AlgoApp (see conftest.py)\n mock_ib: pytest fixture of contract_descriptions\n\n \"\"\"\n verify_algo_app_initialized(algo_app)\n\n # obtain the disconnect lock\n logger.debug(\"about to obtain disconnect lock\")\n algo_app.disconnect_lock.acquire()\n\n # try to connect - should get error\n with pytest.raises(DisconnectLockHeld):\n algo_app.connect_to_ib(\"127.0.0.1\",\n algo_app.PORT_FOR_LIVE_TRADING,\n client_id=0)\n\n # verify that algo_app is still simply initialized\n verify_algo_app_initialized(algo_app)\n\n # def test_real_connect_to_IB(self) -> None:\n # \"\"\"Test connecting to IB.\n #\n # Args:\n # algo_app: instance of AlgoApp from conftest pytest fixture\n # monkeypatch: pytest fixture\n #\n # \"\"\"\n # proj_dir = Path.cwd().resolve().parents[1] # back two directories\n # test_cat = \\\n # FileCatalog({'symbols': Path(proj_dir / 't_datasets/symbols.csv')\n # })\n # algo_app = AlgoApp(test_cat)\n # verify_algo_app_initialized(algo_app)\n #\n # # we are testing connect_to_ib and the subsequent code that gets\n # # control as a result, such as getting the first requestID and then\n # # starting a separate thread for the run loop.\n # logger.debug(\"about to connect\")\n # connect_ans = algo_app.connect_to_ib(\"127.0.0.1\", 7496, client_id=0)\n #\n # # verify that algo_app is connected and alive with a valid reqId\n # assert connect_ans\n # assert algo_app.run_thread.is_alive()\n # assert algo_app.isConnected()\n # assert algo_app.request_id == 1\n #\n # algo_app.disconnect_from_ib()\n # assert not algo_app.run_thread.is_alive()\n # assert not algo_app.isConnected()\n\n\n###############################################################################\n# connect disconnect verification\n###############################################################################\ndef verify_algo_app_initialized(algo_app: \"AlgoApp\") -> None:\n \"\"\"Helper function to verify the also_app instance is initialized.\n\n Args:\n algo_app: instance of AlgoApp that is to be checked\n\n \"\"\"\n assert len(algo_app.ds_catalog) > 0\n assert algo_app.request_id == 0\n assert algo_app.symbols.empty\n assert algo_app.stock_symbols.empty\n assert algo_app.response_complete_event.is_set() is False\n assert algo_app.nextValidId_event.is_set() is False\n assert algo_app.__repr__() == 'AlgoApp(ds_catalog)'\n # assert algo_app.run_thread is None\n\n\ndef verify_algo_app_connected(algo_app: \"AlgoApp\") -> None:\n \"\"\"Helper function to verify we are connected to ib.\n\n Args:\n algo_app: instance of AlgoApp that is to be checked\n\n \"\"\"\n assert algo_app.run_thread.is_alive()\n assert algo_app.isConnected()\n assert algo_app.request_id == 1\n\n\ndef verify_algo_app_disconnected(algo_app: \"AlgoApp\") -> None:\n \"\"\"Helper function to verify we are disconnected from ib.\n\n Args:\n algo_app: instance of AlgoApp that is to be checked\n\n \"\"\"\n assert not algo_app.run_thread.is_alive()\n assert not algo_app.isConnected()\n\n\n###############################################################################\n###############################################################################\n# matching symbols\n###############################################################################\n###############################################################################\nclass ExpCounts(NamedTuple):\n \"\"\"NamedTuple for the expected counts.\"\"\"\n sym_non_recursive: int\n sym_recursive: int\n stock_sym_non_recursive: int\n stock_sym_recursive: int\n\n\nclass SymDfs:\n \"\"\"Saved sym dfs.\"\"\"\n def __init__(self,\n mock_sym_df: Any,\n sym_df: Any,\n mock_stock_sym_df: Any,\n stock_sym_df: Any) -> None:\n \"\"\"Initialize the SymDfs.\n\n Args:\n mock_sym_df: mock sym DataFrame\n sym_df: symbol DataFrame\n mock_stock_sym_df: mock stock symbol DataFrame\n stock_sym_df: stock symbols dataFrame\n\n \"\"\"\n self.mock_sym_df = mock_sym_df\n self.sym_df = sym_df\n self.mock_stock_sym_df = mock_stock_sym_df\n self.stock_sym_df = stock_sym_df\n\n\nclass TestAlgoAppMatchingSymbols:\n \"\"\"TestAlgoAppMatchingSymbols class.\"\"\"\n def test_request_symbols_all_combos(self,\n algo_app: \"AlgoApp\",\n mock_ib: Any) -> None:\n \"\"\"Test request_symbols with all patterns.\n\n Args:\n algo_app: pytest fixture instance of AlgoApp (see conftest.py)\n mock_ib: pytest fixture of contract_descriptions\n\n \"\"\"\n verify_algo_app_initialized(algo_app)\n\n logger.debug(\"about to connect\")\n algo_app.connect_to_ib(\"127.0.0.1\",\n algo_app.PORT_FOR_LIVE_TRADING,\n client_id=0)\n verify_algo_app_connected(algo_app)\n algo_app.request_throttle_secs = 0.01\n\n try:\n for idx, search_pattern in enumerate(\n mock_ib.search_patterns()):\n exp_counts = get_exp_number(search_pattern, mock_ib)\n # verify symbol table has zero entries for the symbol\n logger.info(\"calling verify_match_symbols req_type 1 \"\n \"sym %s num %d\", search_pattern, idx)\n algo_app.symbols = pd.DataFrame()\n algo_app.stock_symbols = pd.DataFrame()\n verify_match_symbols(algo_app,\n mock_ib,\n search_pattern,\n exp_counts=exp_counts,\n req_type=1)\n\n logger.info(\"calling verify_match_symbols req_type 2 \"\n \"sym %s num %d\", search_pattern, idx)\n algo_app.symbols = pd.DataFrame()\n algo_app.stock_symbols = pd.DataFrame()\n verify_match_symbols(algo_app,\n mock_ib,\n search_pattern,\n exp_counts=exp_counts,\n req_type=2)\n finally:\n logger.debug('disconnecting')\n algo_app.disconnect_from_ib()\n logger.debug('verifying disconnected')\n verify_algo_app_disconnected(algo_app)\n logger.debug('disconnected - test case returning')\n\n def test_request_symbols_zero_result(self,\n algo_app: \"AlgoApp\",\n mock_ib: Any\n ) -> None:\n \"\"\"Test request_symbols with pattern that finds exactly 1 symbol.\n\n Args:\n algo_app: instance of AlgoApp from conftest pytest fixture\n mock_ib: pytest fixture of contract_descriptions\n\n \"\"\"\n verify_algo_app_initialized(algo_app)\n logger.debug(\"about to connect\")\n algo_app.connect_to_ib(\"127.0.0.1\",\n algo_app.PORT_FOR_LIVE_TRADING,\n client_id=0)\n verify_algo_app_connected(algo_app)\n algo_app.request_throttle_secs = 0.01\n\n try:\n exp_counts = ExpCounts(0, 0, 0, 0)\n\n # verify symbol table has zero entries for the symbols\n for idx, search_pattern in enumerate(\n mock_ib.no_find_search_patterns()):\n logger.info(\"calling verify_match_symbols req_type 1 \"\n \"sym %s num %d\", search_pattern, idx)\n verify_match_symbols(algo_app,\n mock_ib,\n search_pattern,\n exp_counts=exp_counts,\n req_type=1)\n\n logger.info(\"calling verify_match_symbols req_type 2 \"\n \"sym %s num %d\", search_pattern, idx)\n verify_match_symbols(algo_app,\n mock_ib,\n search_pattern,\n exp_counts=exp_counts,\n req_type=2)\n\n finally:\n logger.debug('disconnecting')\n algo_app.disconnect_from_ib()\n logger.debug('verifying disconnected')\n verify_algo_app_disconnected(algo_app)\n logger.debug('disconnected - test case returning')\n\n def test_get_symbols_timeout(self,\n algo_app: \"AlgoApp\",\n mock_ib: Any) -> None:\n \"\"\"Test get_symbols gets timeout.\n\n Args:\n algo_app: instance of AlgoApp from conftest pytest fixture\n mock_ib: pytest fixture of contract_descriptions\n\n \"\"\"\n verify_algo_app_initialized(algo_app)\n try:\n logger.debug(\"about to connect\")\n algo_app.connect_to_ib(\"127.0.0.1\",\n mock_ib.PORT_FOR_SIMULATE_REQUEST_TIMEOUT,\n client_id=0)\n verify_algo_app_connected(algo_app)\n\n with pytest.raises(RequestTimeout):\n algo_app.request_symbols('A')\n\n finally:\n logger.debug('disconnecting')\n algo_app.disconnect_from_ib()\n logger.debug('verifying disconnected')\n verify_algo_app_disconnected(algo_app)\n logger.debug('disconnected - test case returning')\n\n def test_get_symbols_disconnect(self,\n algo_app: \"AlgoApp\",\n mock_ib: Any) -> None:\n \"\"\"Test get_symbols gets disconnected while waiting.\n\n Args:\n algo_app: instance of AlgoApp from conftest pytest fixture\n mock_ib: pytest fixture of contract_descriptions\n\n \"\"\"\n verify_algo_app_initialized(algo_app)\n try:\n logger.debug(\"about to connect\")\n algo_app.connect_to_ib(\"127.0.0.1\",\n mock_ib.\n PORT_FOR_SIMULATE_REQUEST_DISCONNECT,\n client_id=0)\n verify_algo_app_connected(algo_app)\n\n with pytest.raises(DisconnectDuringRequest):\n algo_app.request_symbols('A')\n\n finally:\n logger.debug('disconnecting')\n algo_app.disconnect_from_ib()\n logger.debug('verifying disconnected')\n verify_algo_app_disconnected(algo_app)\n logger.debug('disconnected - test case returning')\n\n def test_get_symbols(self,\n algo_app: \"AlgoApp\",\n mock_ib: Any) -> None:\n \"\"\"Test get_symbols with pattern that finds no symbols.\n\n Args:\n algo_app: instance of AlgoApp from conftest pytest fixture\n mock_ib: pytest fixture of contract_descriptions\n\n \"\"\"\n verify_algo_app_initialized(algo_app)\n try:\n logger.debug(\"about to connect\")\n algo_app.connect_to_ib(\"127.0.0.1\",\n algo_app.PORT_FOR_LIVE_TRADING,\n client_id=0)\n verify_algo_app_connected(algo_app)\n algo_app.request_throttle_secs = 0.01\n\n sym_dfs = SymDfs(pd.DataFrame(),\n pd.DataFrame(),\n pd.DataFrame(),\n pd.DataFrame())\n # full_stock_sym_match_descs = pd.DataFrame()\n # stock_symbols_ds = pd.DataFrame()\n # full_sym_match_descs = pd.DataFrame()\n # symbols_ds = pd.DataFrame()\n # we need to loop from A to Z\n for letter in string.ascii_uppercase:\n logger.debug(\"about to verify_get_symbols for letter %s\",\n letter)\n # full_stock_sym_match_descs, stock_symbols_ds,\\\n # full_sym_match_descs, symbols_ds = \\\n sym_dfs = verify_get_symbols(letter,\n algo_app,\n mock_ib,\n sym_dfs)\n\n finally:\n logger.debug('disconnecting')\n algo_app.disconnect_from_ib()\n logger.debug('verifying disconnected')\n verify_algo_app_disconnected(algo_app)\n logger.debug('disconnected - test case returning')\n\n def test_get_symbols_with_connect_disconnect(self,\n algo_app: \"AlgoApp\",\n mock_ib: Any) -> None:\n \"\"\"Test get_symbols with pattern that finds no symbols.\n\n Args:\n algo_app: instance of AlgoApp from conftest pytest fixture\n mock_ib: pytest fixture of contract_descriptions\n\n \"\"\"\n verify_algo_app_initialized(algo_app)\n\n sym_dfs = SymDfs(pd.DataFrame(),\n pd.DataFrame(),\n pd.DataFrame(),\n pd.DataFrame())\n # full_stock_sym_match_descs = pd.DataFrame()\n # full_sym_match_descs = pd.DataFrame()\n # stock_symbols_ds = pd.DataFrame()\n # symbols_ds = pd.DataFrame()\n # we need to loop from A to Z\n for letter in string.ascii_uppercase:\n try:\n logger.debug(\"about to connect\")\n algo_app.connect_to_ib(\"127.0.0.1\",\n algo_app.PORT_FOR_LIVE_TRADING,\n client_id=0)\n verify_algo_app_connected(algo_app)\n algo_app.request_throttle_secs = 0.01\n\n logger.debug(\"about to verify_get_symbols for letter %s\",\n letter)\n # full_stock_sym_match_descs, stock_symbols_ds, \\\n # full_sym_match_descs, symbols_ds = \\\n sym_dfs = verify_get_symbols(letter,\n algo_app,\n mock_ib,\n sym_dfs)\n\n finally:\n logger.debug('disconnecting')\n algo_app.disconnect_from_ib()\n logger.debug('verifying disconnected')\n verify_algo_app_disconnected(algo_app)\n\n\n###############################################################################\n# matching symbols verification\n###############################################################################\ndef verify_match_symbols(algo_app: \"AlgoApp\",\n mock_ib: Any,\n pattern: str,\n exp_counts: ExpCounts,\n req_type: int = 1) -> None:\n \"\"\"Verify that we find symbols correctly.\n\n Args:\n algo_app: instance of AlgoApp from conftest pytest fixture\n mock_ib: pytest fixture of contract_descriptions\n pattern: symbols to use for searching\n exp_counts: recursive and non-recursive matches expected\n req_type: indicates which request to do\n\n \"\"\"\n assert req_type == 1 or req_type == 2\n if req_type == 1:\n logger.debug(\"about to request_symbols for %s\", pattern)\n algo_app.request_symbols(pattern)\n # assert algo_app.request_id == 2\n else: # req_type == 2:\n logger.debug(\"about to get_symbols_recursive for %s\", pattern)\n algo_app.get_symbols_recursive(pattern)\n assert algo_app.request_id >= 2\n # algo_app.stock_symbols.drop_duplicates(inplace=True)\n\n logger.debug(\"getting stock_sym_match_descs\")\n symbol_starts_with_pattern = \\\n mock_ib.contract_descriptions['symbol'].map(\n lambda symbol: symbol.startswith(pattern))\n stock_sym_match_descs = mock_ib.contract_descriptions.loc[\n symbol_starts_with_pattern\n & (mock_ib.contract_descriptions['secType'] == 'STK')\n & (mock_ib.contract_descriptions['currency'] == 'USD')\n & (if_opt_in_derivativeSecTypes(mock_ib.contract_descriptions)),\n ['conId', 'symbol', 'secType', 'primaryExchange', 'currency',\n 'derivativeSecTypes']\n ]\n\n sym_match_descs = mock_ib.contract_descriptions.loc[\n symbol_starts_with_pattern\n & ((mock_ib.contract_descriptions['secType'] != 'STK')\n | (mock_ib.contract_descriptions['currency'] != 'USD')\n | if_opt_not_in_derivativeSecTypes(mock_ib.contract_descriptions)\n ),\n ['conId', 'symbol', 'secType', 'primaryExchange', 'currency',\n 'derivativeSecTypes']\n ]\n\n logger.debug(\"verifying results counts\")\n\n if req_type == 1:\n assert len(algo_app.stock_symbols) \\\n == exp_counts.stock_sym_non_recursive\n assert len(algo_app.symbols) == exp_counts.sym_non_recursive\n assert len(stock_sym_match_descs) == exp_counts.stock_sym_recursive\n assert len(sym_match_descs) == exp_counts.sym_recursive\n else:\n assert len(algo_app.stock_symbols) == exp_counts.stock_sym_recursive\n assert len(algo_app.symbols) == exp_counts.sym_recursive\n assert len(stock_sym_match_descs) == exp_counts.stock_sym_recursive\n assert len(sym_match_descs) == exp_counts.sym_recursive\n\n logger.debug(\"verifying results match DataFrame\")\n if exp_counts.stock_sym_recursive > 0:\n if req_type == 1:\n stock_sym_match_descs = stock_sym_match_descs.iloc[\n 0:exp_counts.stock_sym_non_recursive]\n stock_sym_match_descs = stock_sym_match_descs.set_index(\n ['conId']).sort_index()\n\n algo_app.stock_symbols.sort_index(inplace=True)\n comp_df = algo_app.stock_symbols.compare(stock_sym_match_descs)\n assert comp_df.empty\n\n if exp_counts.sym_recursive > 0:\n if req_type == 1:\n sym_match_descs = sym_match_descs.iloc[\n 0:exp_counts.sym_non_recursive]\n sym_match_descs = sym_match_descs.set_index(\n ['conId']).sort_index()\n\n algo_app.symbols.sort_index(inplace=True)\n comp_df = algo_app.symbols.compare(sym_match_descs)\n assert comp_df.empty\n logger.debug(\"all results verified for req_type %d\", req_type)\n\n\ndef if_opt_in_derivativeSecTypes(df: Any) -> Any:\n \"\"\"Find the symbols that have options.\n\n Args:\n df: pandas DataFrame of symbols\n\n Returns:\n array of boolean values used in pandas loc function\n\n \"\"\"\n ret_array = np.full(len(df), False)\n for i in range(len(df)):\n if 'OPT' in df.iloc[i].derivativeSecTypes:\n ret_array[i] = True\n return ret_array\n\n\ndef if_opt_not_in_derivativeSecTypes(df: Any) -> Any:\n \"\"\"Find the symbols that do not have options.\n\n Args:\n df: pandas DataFrame of symbols\n\n Returns:\n array of boolean values used in pandas loc function\n\n \"\"\"\n ret_array = np.full(len(df), True)\n for i in range(len(df)):\n if 'OPT' in df.iloc[i].derivativeSecTypes:\n ret_array[i] = False\n return ret_array\n\n\ndef get_exp_number(search_pattern: str, mock_ib: Any) -> ExpCounts:\n \"\"\"Helper function to get number of expected symbols.\n\n Args:\n search_pattern: search arg as string of one or more chars\n mock_ib: mock of ib\n\n Returns:\n number of expected matches for recursive and non-recursive requests\n \"\"\"\n combo_factor = (1 + 3 + 3**2 + 3**3)\n if len(search_pattern) > 4:\n # 5 or more chars will never match (for our mock setup)\n return ExpCounts(0, 0, 0, 0)\n if search_pattern[0] not in string.ascii_uppercase[0:17]:\n return ExpCounts(0, 0, 0, 0) # not in A-Q, inclusive\n if len(search_pattern) >= 2:\n if search_pattern[1] not in string.ascii_uppercase[1:3] + '.':\n return ExpCounts(0, 0, 0, 0) # not in 'BC.'\n combo_factor = (1 + 3 + 3**2)\n if len(search_pattern) >= 3:\n if search_pattern[2] not in string.ascii_uppercase[2:5]:\n return ExpCounts(0, 0, 0, 0) # not in 'CDE'\n combo_factor = (1 + 3)\n if len(search_pattern) == 4:\n if search_pattern[3] not in string.ascii_uppercase[3:5] + '.':\n return ExpCounts(0, 0, 0, 0) # not in 'DE.'\n combo_factor = 1\n\n num_stock_sym_combos = 0\n num_sym_combos = 0\n combo = mock_ib.get_combos(search_pattern[0])\n\n for item in combo:\n if item[0] == 'STK' and item[2] == 'USD' and 'OPT' in item[3]:\n num_stock_sym_combos += 1\n else:\n num_sym_combos += 1\n exp_stock_sym_recursive = num_stock_sym_combos * combo_factor\n exp_sym_recursive = num_sym_combos * combo_factor\n exp_stock_sym_non_recursive = \\\n math.ceil(min(16, len(combo) * combo_factor)\n * (num_stock_sym_combos / len(combo)))\n exp_sym_non_recursive = \\\n math.floor(min(16, len(combo) * combo_factor)\n * (num_sym_combos / len(combo)))\n\n return ExpCounts(exp_sym_non_recursive,\n exp_sym_recursive,\n exp_stock_sym_non_recursive,\n exp_stock_sym_recursive\n )\n\n\ndef verify_get_symbols(letter: str,\n algo_app: \"AlgoApp\",\n mock_ib: Any,\n sym_dfs: SymDfs) -> SymDfs:\n \"\"\"Verify get_symbols.\n\n Args:\n letter: the single letter we are collecting symbols for\n algo_app: instance of AlgoApp from conftest pytest fixture\n mock_ib: pytest fixture of contract_descriptions\n sym_dfs: saved DataFrames between calls\n\n Returns:\n updated sym_dfs\n\n \"\"\"\n if letter != 'A':\n # verify the symbol_status ds\n symbols_status_path = \\\n algo_app.ds_catalog.get_path('symbols_status')\n logger.info('symbols_status_path: %s', symbols_status_path)\n\n assert symbols_status_path.exists()\n symbols_status = pd.read_csv(symbols_status_path,\n header=0,\n index_col=0)\n test_letter = symbols_status.iloc[0, 0]\n assert test_letter == letter\n\n exp_counts = get_exp_number(letter, mock_ib)\n logger.debug(\"about to get_symbols for %s\", letter)\n algo_app.get_symbols()\n assert algo_app.request_id >= 2\n\n logger.debug(\"getting stock_sym_match_descs for %s\", letter)\n symbol_starts_with_pattern = \\\n mock_ib.contract_descriptions['symbol'].map(\n lambda symbol: symbol.startswith(letter))\n stock_sym_match_descs = mock_ib.contract_descriptions.loc[\n symbol_starts_with_pattern\n & (mock_ib.contract_descriptions['secType'] == 'STK')\n & (mock_ib.contract_descriptions['currency'] == 'USD')\n & (if_opt_in_derivativeSecTypes(\n mock_ib.contract_descriptions)),\n ['conId', 'symbol', 'secType', 'primaryExchange', 'currency',\n 'derivativeSecTypes']\n ]\n\n sym_match_descs = mock_ib.contract_descriptions.loc[\n symbol_starts_with_pattern\n & ((mock_ib.contract_descriptions['secType'] != 'STK')\n | (mock_ib.contract_descriptions['currency'] != 'USD')\n | if_opt_not_in_derivativeSecTypes(mock_ib.contract_descriptions)\n ),\n ['conId', 'symbol', 'secType', 'primaryExchange', 'currency',\n 'derivativeSecTypes']\n ]\n # we expect the stock_symbols to accumulate and grow, so the\n # number should now be what was there from the previous\n # iteration of this loop plus what we just now added\n assert len(stock_sym_match_descs) == exp_counts.stock_sym_recursive\n assert len(algo_app.stock_symbols) == (\n exp_counts.stock_sym_recursive + len(sym_dfs.stock_sym_df))\n\n assert len(sym_match_descs) == exp_counts.sym_recursive\n assert len(algo_app.symbols) == (\n exp_counts.sym_recursive + len(sym_dfs.sym_df))\n\n if exp_counts.stock_sym_recursive > 0:\n stock_sym_match_descs = stock_sym_match_descs.set_index(\n ['conId']).sort_index()\n sym_dfs.mock_stock_sym_df \\\n = sym_dfs.mock_stock_sym_df.append(stock_sym_match_descs)\n sym_dfs.mock_stock_sym_df.sort_index(inplace=True)\n\n # check the data set\n stock_symbols_path = algo_app.ds_catalog.get_path('stock_symbols')\n logger.info('stock_symbols_path: %s', stock_symbols_path)\n\n sym_dfs.stock_sym_df = pd.read_csv(stock_symbols_path,\n header=0,\n index_col=0,\n converters={\n 'derivativeSecTypes':\n lambda x: eval(x)})\n comp_df = algo_app.stock_symbols.compare(sym_dfs.stock_sym_df)\n assert comp_df.empty\n\n comp_df = algo_app.stock_symbols.compare(sym_dfs.mock_stock_sym_df)\n assert comp_df.empty\n\n if exp_counts.sym_recursive > 0:\n sym_match_descs = sym_match_descs.set_index(\n ['conId']).sort_index()\n sym_dfs.mock_sym_df = \\\n sym_dfs.mock_sym_df.append(sym_match_descs)\n sym_dfs.mock_sym_df.sort_index(inplace=True)\n\n # check the data set\n symbols_path = \\\n algo_app.ds_catalog.get_path('symbols')\n logger.info('symbols_path: %s', symbols_path)\n\n sym_dfs.sym_df = pd.read_csv(symbols_path,\n header=0,\n index_col=0,\n converters={\n 'derivativeSecTypes':\n lambda x: eval(x)})\n\n comp_df = algo_app.symbols.compare(sym_dfs.sym_df)\n assert comp_df.empty\n\n comp_df = algo_app.symbols.compare(sym_dfs.mock_sym_df)\n assert comp_df.empty\n\n return sym_dfs\n\n\n###############################################################################\n###############################################################################\n# error path\n###############################################################################\n###############################################################################\nclass TestErrorPath:\n \"\"\"Class to test error path.\"\"\"\n def test_error_path_by_request_when_not_connected(self,\n algo_app: \"AlgoApp\",\n capsys: Any) -> None:\n \"\"\"Test the error callback by any request while not connected.\n\n Args:\n algo_app: instance of AlgoApp from conftest pytest fixture\n capsys: pytest fixture to capture print output\n\n \"\"\"\n verify_algo_app_initialized(algo_app)\n logger.debug('verifying disconnected')\n verify_algo_app_disconnected(algo_app)\n\n logger.debug(\"about to request time\")\n algo_app.reqCurrentTime()\n captured = capsys.readouterr().out\n assert captured == 'Error: -1 504 Not connected' + '\\n'\n\n\n###############################################################################\n###############################################################################\n# contract details\n###############################################################################\n###############################################################################\nclass TestAlgoAppContractDetails:\n \"\"\"TestAlgoAppContractDetails class.\"\"\"\n\n def test_get_contract_details_0_entries(self,\n algo_app: \"AlgoApp\",\n mock_ib: Any\n ) -> None:\n \"\"\"Test contract details for non-existent conId.\n\n Args:\n algo_app: pytest fixture instance of AlgoApp (see conftest.py)\n mock_ib: pytest fixture of contract_descriptions\n\n \"\"\"\n verify_algo_app_initialized(algo_app)\n\n logger.debug(\"about to connect\")\n algo_app.connect_to_ib(\"127.0.0.1\",\n algo_app.PORT_FOR_LIVE_TRADING,\n client_id=0)\n\n # verify that algo_app is connected and alive with a valid reqId\n verify_algo_app_connected(algo_app)\n\n contract = Contract() # create an empty contract with conId of 0\n algo_app.get_contract_details(contract)\n\n verify_contract_details(contract, algo_app, mock_ib, [])\n\n algo_app.disconnect_from_ib()\n verify_algo_app_disconnected(algo_app)\n\n def test_get_contract_details_1_entry(self,\n algo_app: \"AlgoApp\",\n mock_ib: Any\n ) -> None:\n \"\"\"Test contract details for 1 entry.\n\n Args:\n algo_app: pytest fixture instance of AlgoApp (see conftest.py)\n mock_ib: pytest fixture of contract_descriptions\n\n \"\"\"\n verify_algo_app_initialized(algo_app)\n\n logger.debug(\"about to connect\")\n algo_app.connect_to_ib(\"127.0.0.1\",\n algo_app.PORT_FOR_LIVE_TRADING,\n client_id=0)\n\n # verify that algo_app is connected and alive with a valid reqId\n verify_algo_app_connected(algo_app)\n\n contract = Contract() # create an empty contract with conId of 0\n contract.conId = 7001\n algo_app.get_contract_details(contract)\n\n verify_contract_details(contract, algo_app, mock_ib, [7001])\n\n algo_app.disconnect_from_ib()\n verify_algo_app_disconnected(algo_app)\n\n def test_get_contract_details_2_entries(self,\n algo_app: \"AlgoApp\",\n mock_ib: Any\n ) -> None:\n \"\"\"Test contract details for 2 entries.\n\n Args:\n algo_app: pytest fixture instance of AlgoApp (see conftest.py)\n mock_ib: pytest fixture of contract_descriptions\n\n \"\"\"\n verify_algo_app_initialized(algo_app)\n\n logger.debug(\"about to connect\")\n algo_app.connect_to_ib(\"127.0.0.1\",\n algo_app.PORT_FOR_LIVE_TRADING,\n client_id=0)\n\n # verify that algo_app is connected and alive with a valid reqId\n verify_algo_app_connected(algo_app)\n\n contract = Contract() # create an empty contract with conId of 0\n contract.conId = 7001\n algo_app.get_contract_details(contract)\n\n verify_contract_details(contract, algo_app, mock_ib, [7001])\n\n contract.conId = 7002\n algo_app.get_contract_details(contract)\n\n verify_contract_details(contract, algo_app, mock_ib, [7001, 7002])\n\n algo_app.disconnect_from_ib()\n verify_algo_app_disconnected(algo_app)\n\n def test_get_contract_details_duplicates(self,\n algo_app: \"AlgoApp\",\n mock_ib: Any\n ) -> None:\n \"\"\"Test contract details for 3 entries plus a duplicate.\n\n Args:\n algo_app: pytest fixture instance of AlgoApp (see conftest.py)\n mock_ib: pytest fixture of contract_descriptions\n\n \"\"\"\n verify_algo_app_initialized(algo_app)\n\n logger.debug(\"about to connect\")\n algo_app.connect_to_ib(\"127.0.0.1\",\n algo_app.PORT_FOR_LIVE_TRADING,\n client_id=0)\n\n # verify that algo_app is connected and alive with a valid reqId\n verify_algo_app_connected(algo_app)\n\n contract = Contract() # create an empty contract with conId of 0\n contract.conId = 7001\n algo_app.get_contract_details(contract)\n\n verify_contract_details(contract, algo_app, mock_ib, [7001])\n\n contract.conId = 7002\n algo_app.get_contract_details(contract)\n\n verify_contract_details(contract, algo_app, mock_ib, [7001, 7002])\n\n contract.conId = 7001 # try to add 7001 again\n algo_app.get_contract_details(contract)\n\n verify_contract_details(contract, algo_app, mock_ib, [7001, 7002])\n\n contract.conId = 7003\n algo_app.get_contract_details(contract)\n\n verify_contract_details(contract, algo_app, mock_ib,\n [7001, 7002, 7003])\n\n contract.conId = 7002 # another duplicate\n algo_app.get_contract_details(contract)\n\n verify_contract_details(contract, algo_app, mock_ib,\n [7001, 7002, 7003])\n\n algo_app.disconnect_from_ib()\n verify_algo_app_disconnected(algo_app)\n\n def test_get_contract_details_many_entries(self,\n algo_app: \"AlgoApp\",\n mock_ib: Any\n ) -> None:\n \"\"\"Test contract details for many entries.\n\n Args:\n algo_app: pytest fixture instance of AlgoApp (see conftest.py)\n mock_ib: pytest fixture of contract_descriptions\n\n \"\"\"\n verify_algo_app_initialized(algo_app)\n\n logger.debug(\"about to connect\")\n algo_app.connect_to_ib(\"127.0.0.1\",\n algo_app.PORT_FOR_LIVE_TRADING,\n client_id=0)\n\n # verify that algo_app is connected and alive with a valid reqId\n verify_algo_app_connected(algo_app)\n\n try:\n conId_list = []\n for conId in range(7001, 7033):\n contract = Contract() # create an empty contract\n contract.conId = conId\n conId_list.append(conId)\n algo_app.get_contract_details(contract)\n\n verify_contract_details(contract,\n algo_app,\n mock_ib,\n conId_list)\n finally:\n algo_app.disconnect_from_ib()\n verify_algo_app_disconnected(algo_app)\n\n\n###############################################################################\n# contract details verification\n###############################################################################\ndef verify_contract_details(contract: \"Contract\",\n algo_app: \"AlgoApp\",\n mock_ib: Any,\n conId_list: List[int]) -> None:\n \"\"\"Verify contract details.\n\n Args:\n contract: the contract used to get details\n algo_app: instance of AlgoApp from conftest pytest fixture\n mock_ib: pytest fixture of contract_descriptions\n conId_list: list of con ids\n\n \"\"\"\n assert len(algo_app.contract_details) == len(conId_list)\n\n if len(conId_list) > 0:\n # first, save the algo_app contracts and contract_details\n contracts_ds = algo_app.contracts\n contract_details_ds = algo_app.contract_details\n\n # next, reload algo_app contracts and contract_details from csv\n # so we can test that they were saved and restored\n # correctly (i.e., we will compare them against\n # what we just loaded)\n contracts_path = algo_app.ds_catalog.get_path('contracts')\n logger.info('contracts_path: %s', contracts_path)\n algo_app.contracts = algo_app.load_contracts(contracts_path)\n algo_app.load_contract_details()\n\n # print('contract_details_ds:\\n', contract_details_ds)\n # print('contract_details_ds.__dict__:\\n',\n # contract_details_ds.__dict__)\n\n for conId in conId_list:\n # match_desc = mock_ib.contract_descriptions.loc[\n # mock_ib.contract_descriptions['conId'] == conId]\n\n # match_desc = match_desc.iloc[0]\n\n contract1 = get_contract_obj(\n algo_app.contracts.loc[conId].to_dict())\n\n contract2 = get_contract_obj(contracts_ds.loc[conId].to_dict())\n\n compare_contracts(contract1,\n contract2)\n\n contract3 = get_contract_from_mock_desc(conId, mock_ib)\n\n compare_contracts(contract1,\n contract3)\n\n contract_details1 = get_contract_details_obj(\n algo_app.contract_details.loc[conId].to_dict())\n\n contract_details2 = get_contract_details_obj(\n contract_details_ds.loc[conId].to_dict())\n\n compare_contract_details(contract_details1,\n contract_details2)\n\n contract_details3 = \\\n get_contract_details_from_mock_desc(conId, mock_ib)\n\n compare_contract_details(contract_details1,\n contract_details3)\n\n\n###############################################################################\n###############################################################################\n# TestExtraContractFields\n###############################################################################\n###############################################################################\nclass TestExtraContractFields:\n \"\"\"TestExtraContractFields class.\"\"\"\n\n ###########################################################################\n # test_contract_combo_legs\n ###########################################################################\n def test_contract_extra_fields(self,\n algo_app: \"AlgoApp\",\n mock_ib: Any\n ) -> None:\n \"\"\"Test combo legs in contract.\n\n Args:\n algo_app: pytest fixture instance of AlgoApp (see conftest.py)\n mock_ib: pytest fixture of contract_descriptions\n\n \"\"\"\n num_contracts = 50\n contract_list = []\n contract_df = pd.DataFrame()\n # get the path for saving/loading the combo legs contract df\n extra_contract_path = \\\n algo_app.ds_catalog.get_path('extra_contract')\n logger.info('extra_contract_path: %s', extra_contract_path)\n\n for i in range(num_contracts):\n conId = 7001 + i\n contract = get_contract_from_mock_desc(conId,\n mock_ib,\n include_extra_details=True)\n\n # add combo legs\n combo_leg_list = build_combo_legs(i, mock_ib)\n if combo_leg_list:\n contract.comboLegs = combo_leg_list\n elif i % 2 == 1: # empty list\n # empty list for odd, None for even\n contract.comboLegs = []\n\n contract_list.append(contract)\n contract_dict = get_contract_dict(contract)\n contract_df = \\\n contract_df.append(pd.DataFrame(contract_dict,\n index=[contract.conId]))\n # Save dataframe to csv\n contract_df.to_csv(extra_contract_path)\n\n # read dataframe from csv\n contract_df2 = algo_app.load_contracts(extra_contract_path)\n\n for i in range(num_contracts):\n contract1 = contract_list[i]\n contract_dict2 = contract_df2.iloc[i].to_dict()\n contract2 = get_contract_obj(contract_dict2)\n\n compare_contracts(contract1, contract2)\n\n\n###############################################################################\n# build_combo_legs\n###############################################################################\ndef build_combo_legs(idx: int,\n mock_ib: Any) -> List[ComboLeg]:\n \"\"\"Build the combo leg list for a contract.\n\n Args:\n idx: the index of the entry being built\n mock_ib: pytest fixture of contract_descriptions\n\n Returns:\n list with zero or more ComboLeg items\n\n \"\"\"\n num_combo_legs = idx % 4 # vary the number built from 0 to 3\n combo_leg_list = []\n for j in range(num_combo_legs):\n combo_leg = ComboLeg()\n combo_leg.conId = \\\n mock_ib.combo_legs.cl_conId.iloc[idx + j]\n combo_leg.ratio = \\\n mock_ib.combo_legs.cl_ratio.iloc[idx + j]\n combo_leg.action = \\\n mock_ib.combo_legs.cl_action.iloc[idx + j]\n combo_leg.exchange = \\\n mock_ib.combo_legs.cl_exchange.iloc[idx + j]\n combo_leg.openClose = \\\n mock_ib.combo_legs.cl_openClose.iloc[idx + j]\n combo_leg.shortSaleSlot = \\\n mock_ib.combo_legs.cl_shortSaleSlot.iloc[idx + j]\n combo_leg.designatedLocation = \\\n mock_ib.combo_legs.cl_designatedLocation.iloc[idx + j]\n combo_leg.exemptCode = \\\n mock_ib.combo_legs.cl_exemptCode.iloc[idx + j]\n\n combo_leg_list.append(combo_leg)\n\n return combo_leg_list\n\n\n###############################################################################\n# get_contract_from_mock_desc\n###############################################################################\ndef get_contract_from_mock_desc(conId: int,\n mock_ib: Any,\n include_extra_details: bool = False\n ) -> Contract:\n \"\"\"Build and return a contract from the mock description.\n\n Args:\n conId: index of mock_desc and mock_dnc to use\n mock_ib: contains contract data frames\n include_extra_details: include more details beyond what is\n returned for reqContractDetails\n\n Returns:\n Contract with fields from input mock_desc and mock_dnc\n\n \"\"\"\n ret_con = Contract()\n ret_con.conId = mock_ib.contract_descriptions.at[conId, 'conId'] # cd\n ret_con.symbol = mock_ib.contract_descriptions.at[conId, 'symbol'] # cd\n ret_con.secType = mock_ib.contract_descriptions.at[conId, 'secType'] # cd\n\n if mock_ib.contract_descriptions.at[conId, 'lastTradeDateOrContractMonth']:\n split_date = \\\n mock_ib.contract_descriptions.at[\n conId, 'lastTradeDateOrContractMonth'].split()\n if len(split_date) > 0: # very well better be!\n ret_con.lastTradeDateOrContractMonth = split_date[0]\n\n ret_con.strike = mock_ib.contract_descriptions.at[conId, 'strike'] # cd\n ret_con.right = mock_ib.contract_descriptions.at[conId, 'right'] # cd\n ret_con.multiplier = \\\n mock_ib.contract_descriptions.at[conId, 'multiplier'] # cd\n ret_con.exchange = \\\n mock_ib.contract_descriptions.at[conId, 'exchange'] # cd\n ret_con.primaryExchange = \\\n mock_ib.contract_descriptions.at[conId, 'primaryExchange'] # cd\n ret_con.currency = \\\n mock_ib.contract_descriptions.at[conId, 'currency'] # cd\n ret_con.localSymbol = \\\n mock_ib.contract_descriptions.at[conId, 'localSymbol'] # cd\n ret_con.tradingClass = \\\n mock_ib.contract_descriptions.at[conId, 'tradingClass'] # cd\n\n ###########################################################################\n # following fields are not included with reqContractDetails\n ###########################################################################\n if include_extra_details:\n ret_con.includeExpired = \\\n mock_ib.contract_descriptions.at[conId, 'includeExpired']\n ret_con.secIdType = mock_ib.contract_descriptions.at[conId,\n 'secIdType']\n ret_con.secId = mock_ib.contract_descriptions.at[conId, 'secId']\n\n # combos\n ret_con.comboLegsDescrip = \\\n mock_ib.contract_descriptions.at[conId, 'comboLegsDescrip']\n # ret_con.comboLegs = mock_ib.contract_descriptions.comboLegs\n\n # build a delta_neutral_contract every third time\n if (conId % 3) == 0:\n delta_neutral_contract = DeltaNeutralContract()\n # item() is used to convert numpy.int64 to python int\n delta_neutral_contract.conId = \\\n mock_ib.delta_neutral_contract.at[conId, 'conId']\n delta_neutral_contract.delta = \\\n mock_ib.delta_neutral_contract.at[conId, 'delta']\n delta_neutral_contract.price = \\\n mock_ib.delta_neutral_contract.at[conId, 'price']\n\n ret_con.deltaNeutralContract = delta_neutral_contract\n\n return ret_con\n\n\n###############################################################################\n# get_contract_details_from_mock_desc\n###############################################################################\ndef get_contract_details_from_mock_desc(conId: int,\n mock_ib: Any\n ) -> ContractDetails:\n \"\"\"Build and return a contract_details from the mock description.\n\n Args:\n conId: index of entry to use\n mock_ib: DataFrame with values for contract_details\n\n Returns:\n ContractDetails with fields from input mock_desc\n\n \"\"\"\n ret_con = ContractDetails()\n ret_con.contract = get_contract_from_mock_desc(conId, mock_ib)\n ret_con.marketName = \\\n mock_ib.contract_descriptions.at[conId, 'marketName'] # cd\n ret_con.minTick = mock_ib.contract_descriptions.at[conId, 'minTick'] # cd\n ret_con.orderTypes = \\\n mock_ib.contract_descriptions.at[conId, 'orderTypes'] # cd\n ret_con.validExchanges = \\\n mock_ib.contract_descriptions.at[conId, 'validExchanges'] # cd\n ret_con.priceMagnifier = \\\n mock_ib.contract_descriptions.at[conId, 'priceMagnifier'] # cd\n ret_con.underConId = \\\n mock_ib.contract_descriptions.at[conId, 'underConId'] # cd\n ret_con.longName = mock_ib.contract_descriptions.at[conId,\n 'longName'] # cd\n ret_con.contractMonth = \\\n mock_ib.contract_descriptions.at[conId, 'contractMonth'] # cd\n ret_con.industry = mock_ib.contract_descriptions.at[conId,\n 'industry'] # cd\n ret_con.category = mock_ib.contract_descriptions.at[conId,\n 'category'] # cd\n ret_con.subcategory = \\\n mock_ib.contract_descriptions.at[conId, 'subcategory'] # cd\n ret_con.timeZoneId = \\\n mock_ib.contract_descriptions.at[conId, 'timeZoneId'] # cd\n ret_con.tradingHours = \\\n mock_ib.contract_descriptions.at[conId, 'tradingHours'] # cd\n ret_con.liquidHours = \\\n mock_ib.contract_descriptions.at[conId, 'liquidHours'] # cd\n ret_con.evRule = mock_ib.contract_descriptions.at[conId, 'evRule'] # cd\n ret_con.evMultiplier = \\\n mock_ib.contract_descriptions.at[conId, 'evMultiplier'] # cd\n ret_con.mdSizeMultiplier = \\\n mock_ib.contract_descriptions.at[conId, 'mdSizeMultiplier'] # cd\n ret_con.aggGroup = mock_ib.contract_descriptions.at[conId,\n 'aggGroup'] # cd\n ret_con.underSymbol = \\\n mock_ib.contract_descriptions.at[conId, 'underSymbol'] # cd\n ret_con.underSecType = \\\n mock_ib.contract_descriptions.at[conId, 'underSecType'] # cd\n ret_con.marketRuleIds = \\\n mock_ib.contract_descriptions.at[conId, 'marketRuleIds'] # cd\n\n secIdList = mock_ib.contract_descriptions.at[conId, 'secIdList']\n new_secIdList = []\n for j in range(0,\n 2 * mock_ib.contract_descriptions.at[conId,\n 'secIdListCount'],\n 2):\n tag = secIdList[j]\n value = secIdList[j+1]\n tag_value = TagValue(tag, value)\n new_secIdList.append(tag_value)\n ret_con.secIdList = new_secIdList # cd\n\n ret_con.realExpirationDate = \\\n mock_ib.contract_descriptions.at[conId, 'realExpirationDate'] # cd\n\n # last trade time come from lastTradeDate as 'date time' (i.e., 2 items)\n if mock_ib.contract_descriptions.at[conId, 'lastTradeDateOrContractMonth']:\n split_date = \\\n mock_ib.contract_descriptions.at[\n conId, 'lastTradeDateOrContractMonth'].split()\n if len(split_date) > 1:\n ret_con.lastTradeTime = split_date[1]\n\n ret_con.stockType = mock_ib.contract_descriptions.at[conId,\n 'stockType'] # cd\n\n return ret_con\n\n\n###############################################################################\n# compare_tag_value\n###############################################################################\ndef compare_tag_value(tag_value1: TagValue,\n tag_value2: TagValue\n ) -> None:\n \"\"\"Compare two tag_value objects for equality.\n\n Args:\n tag_value1: tag_value 1\n tag_value2: tag_value 2\n\n \"\"\"\n assert tag_value1.tag == tag_value2.tag\n\n assert isinstance(tag_value1.tag, str)\n\n assert isinstance(tag_value2.tag, str)\n\n assert tag_value1.value == tag_value2.value\n\n assert isinstance(tag_value1.value, str)\n\n assert isinstance(tag_value2.value, str)\n\n\n###############################################################################\n# compare_combo_legs\n###############################################################################\ndef compare_combo_legs(cl1: ComboLeg,\n cl2: ComboLeg\n ) -> None:\n \"\"\"Compare two combo leg objects for equality.\n\n Args:\n cl1: combo leg 1\n cl2: combo leg 2\n\n \"\"\"\n assert cl1.conId == cl2.conId\n\n assert cl1.ratio == cl2.ratio\n\n assert cl1.action == cl2.action\n\n assert cl1.exchange == cl2.exchange\n\n assert cl1.openClose == cl2.openClose\n\n assert cl1.shortSaleSlot == cl2.shortSaleSlot\n\n assert cl1.designatedLocation == cl2.designatedLocation\n\n assert cl1.exemptCode == cl2.exemptCode\n\n verify_combo_leg_types(cl1)\n verify_combo_leg_types(cl1)\n\n\n###############################################################################\n# verify_combo_leg_types\n###############################################################################\ndef verify_combo_leg_types(combo_leg: ComboLeg) -> None:\n \"\"\"Verify that combo_leg fields are correct type.\n\n Args:\n combo_leg: combo_leg to verify\n\n \"\"\"\n assert isinstance(combo_leg.conId, (int, np.int64))\n\n assert isinstance(combo_leg.ratio, (int, np.int64))\n\n assert isinstance(combo_leg.action, str)\n\n assert isinstance(combo_leg.exchange, str)\n\n assert isinstance(combo_leg.openClose, (int, np.int64))\n\n assert isinstance(combo_leg.shortSaleSlot, (int, np.int64))\n\n assert isinstance(combo_leg.designatedLocation, str)\n\n assert isinstance(combo_leg.exemptCode, (int, np.int64))\n\n\n###############################################################################\n# compare_delta_neutral_contracts\n###############################################################################\ndef compare_delta_neutral_contracts(con1: DeltaNeutralContract,\n con2: DeltaNeutralContract\n ) -> None:\n \"\"\"Compare two delta neutral contracts for equality.\n\n Args:\n con1: contract 1\n con2: contract 2\n\n \"\"\"\n assert con1.conId == con2.conId\n\n assert isinstance(con1.conId, (int, np.int64))\n\n assert isinstance(con2.conId, int)\n\n assert con1.delta == con2.delta\n\n assert isinstance(con1.delta, float)\n\n assert isinstance(con2.delta, float)\n\n assert con1.price == con2.price\n\n assert isinstance(con1.price, float)\n\n assert isinstance(con2.price, float)\n\n\n###############################################################################\n# compare_contracts\n###############################################################################\ndef compare_contracts(con1: Contract, con2: Contract) -> None:\n \"\"\"Compare two contracts for equality.\n\n Args:\n con1: contract 1\n con2: contract 2\n\n \"\"\"\n assert con1.conId == con2.conId\n\n assert con1.symbol == con2.symbol\n\n assert con1.secType == con2.secType\n\n assert (con1.lastTradeDateOrContractMonth\n == con2.lastTradeDateOrContractMonth)\n\n assert con1.strike == con2.strike\n\n assert con1.right == con2.right\n\n assert con1.multiplier == con2.multiplier\n\n assert con1.exchange == con2.exchange\n\n assert con1.primaryExchange == con2.primaryExchange\n\n assert con1.currency == con2.currency\n\n assert con1.localSymbol == con2.localSymbol\n\n assert con1.tradingClass == con2.tradingClass\n\n assert con1.includeExpired == con2.includeExpired\n\n assert con1.secIdType == con2.secIdType\n\n assert con1.secId == con2.secId\n\n # combos\n assert con1.comboLegsDescrip == con2.comboLegsDescrip\n\n if con1.comboLegs and con2.comboLegs:\n assert len(con1.comboLegs) == len(con2.comboLegs)\n\n for i in range(len(con1.comboLegs)):\n compare_combo_legs(con1.comboLegs[i],\n con2.comboLegs[i])\n else: # check whether one contract has it and the other does not\n assert not (con1.comboLegs or con2.comboLegs)\n\n if con1.deltaNeutralContract and con2.deltaNeutralContract:\n compare_delta_neutral_contracts(con1.deltaNeutralContract,\n con2.deltaNeutralContract)\n else: # check whether one contract has it and one does not\n assert not (con1.deltaNeutralContract or con2.deltaNeutralContract)\n\n verify_contract_types(con1)\n verify_contract_types(con2)\n\n\n###############################################################################\n# verify_contract_types\n###############################################################################\ndef verify_contract_types(contract: Contract) -> None:\n \"\"\"Verify that contract fields are correct type.\n\n Args:\n contract: contract to verify\n\n \"\"\"\n assert isinstance(contract.conId, (int, np.int64))\n\n assert isinstance(contract.symbol, str)\n\n assert isinstance(contract.secType, str)\n\n assert isinstance(contract.lastTradeDateOrContractMonth, str)\n\n assert isinstance(contract.strike, float)\n\n assert isinstance(contract.right, str)\n\n assert isinstance(contract.multiplier, str)\n\n assert isinstance(contract.exchange, str)\n\n assert isinstance(contract.primaryExchange, str)\n\n assert isinstance(contract.currency, str)\n\n assert isinstance(contract.localSymbol, str)\n\n assert isinstance(contract.tradingClass, str)\n\n assert isinstance(contract.includeExpired, (bool, np.bool_))\n\n assert isinstance(contract.secIdType, str)\n\n assert isinstance(contract.secId, str)\n\n # combos\n assert isinstance(contract.comboLegsDescrip, str)\n\n assert isinstance(contract.comboLegs, (list, type(None)))\n\n if contract.comboLegs:\n for combo_leg in contract.comboLegs:\n assert isinstance(combo_leg, ComboLeg)\n\n assert isinstance(contract.deltaNeutralContract,\n (DeltaNeutralContract, type(None)))\n\n\n###############################################################################\n# compare_contract_details\n###############################################################################\ndef compare_contract_details(con1: ContractDetails,\n con2: ContractDetails\n ) -> None:\n \"\"\"Compare two contract_details for equality.\n\n Args:\n con1: contract_details 1\n con2: contract_details 2\n\n \"\"\"\n if con1.contract and con2.contract:\n compare_contracts(con1.contract, con2.contract)\n\n else: # check whether one contract_details has it, one does not\n assert not (con1.contract or con2.contract)\n\n assert con1.marketName == con2.marketName\n\n assert con1.minTick == con2.minTick\n\n assert con1.orderTypes == con2.orderTypes\n\n assert con1.validExchanges == con2.validExchanges\n\n assert con1.priceMagnifier == con2.priceMagnifier\n\n assert con1.underConId == con2.underConId\n\n assert con1.longName == con2.longName\n\n assert con1.contractMonth == con2.contractMonth\n\n assert con1.industry == con2.industry\n\n assert con1.category == con2.category\n\n assert con1.subcategory == con2.subcategory\n\n assert con1.timeZoneId == con2.timeZoneId\n\n assert con1.tradingHours == con2.tradingHours\n\n assert con1.liquidHours == con2.liquidHours\n\n assert con1.evRule == con2.evRule\n\n assert con1.evMultiplier == con2.evMultiplier\n\n assert con1.mdSizeMultiplier == con2.mdSizeMultiplier\n\n assert con1.aggGroup == con2.aggGroup\n\n assert con1.underSymbol == con2.underSymbol\n\n assert con1.underSecType == con2.underSecType\n\n assert con1.marketRuleIds == con2.marketRuleIds\n\n if con1.secIdList and con2.secIdList:\n assert len(con1.secIdList) == len(con2.secIdList)\n for i in range(len(con1.secIdList)):\n compare_tag_value(con1.secIdList[i], con2.secIdList[i])\n else: # check whether one contract_details has it, one does not\n assert not (con1.secIdList or con2.secIdList)\n\n assert con1.realExpirationDate == con2.realExpirationDate\n\n assert con1.lastTradeTime == con2.lastTradeTime\n\n assert con1.stockType == con2.stockType\n\n # BOND values\n assert con1.cusip == con2.cusip\n\n assert con1.ratings == con2.ratings\n\n assert con1.descAppend == con2.descAppend\n\n assert con1.bondType == con2.bondType\n\n assert con1.couponType == con2.couponType\n\n assert con1.callable == con2.callable\n\n assert con1.putable == con2.putable\n\n assert con1.coupon == con2.coupon\n\n assert con1.convertible == con2.convertible\n\n assert con1.maturity == con2.maturity\n\n assert con1.issueDate == con2.issueDate\n\n assert con1.nextOptionDate == con2.nextOptionDate\n\n assert con1.nextOptionType == con2.nextOptionType\n\n assert con1.nextOptionPartial == con2.nextOptionPartial\n\n assert con1.notes == con2.notes\n\n\n###############################################################################\n# fundamental data\n###############################################################################\n# class TestAlgoAppFundamentalData:\n# \"\"\"TestAlgoAppContractDetails class.\"\"\"\n#\n# def test_get_contract_details_0_entries(self,\n# algo_app: \"AlgoApp\",\n# mock_ib: Any\n# ) -> None:\n# \"\"\"Test contract details for non-existent conId.\n#\n# Args:\n# algo_app: pytest fixture instance of AlgoApp (see conftest.py)\n# mock_ib: pytest fixture of contract_descriptions\n#\n# \"\"\"\n# verify_algo_app_initialized(algo_app)\n#\n# logger.debug(\"about to connect\")\n# algo_app.connect_to_ib(\"127.0.0.1\",\n# algo_app.PORT_FOR_LIVE_TRADING,\n# client_id=0)\n#\n# # verify that algo_app is connected and alive with a valid reqId\n# verify_algo_app_connected(algo_app)\n#\n# contract = Contract() # create an empty contract with conId of 0\n# algo_app.get_contract_details(contract)\n#\n# verify_contract_details(contract, algo_app, mock_ib, [0])\n#\n# algo_app.disconnect_from_ib()\n# verify_algo_app_disconnected(algo_app)\n"
] | [
[
"pandas.read_csv",
"pandas.DataFrame"
]
] |
bsierieb1/SCDCdm_public | [
"db610c1bda904f79a8142da767cf8e62d1cd8d32"
] | [
"paper_simulation_scripts/run_one_job.py"
] | [
"\"\"\"\nThis script is executed in each job on the server to run simulation studies on all the parameters that are passed to it\n\"\"\"\nimport sys\nimport ast\nimport numpy as np\n\nfrom scdcdm.util import multi_parameter_sampling as mult\n\n# Convert string parameters to lists\ncases = ast.literal_eval(sys.argv[1])\nprint(\"cases:\", cases)\nK = ast.literal_eval(sys.argv[2])\nprint(\"K:\", K)\nn_total = ast.literal_eval(sys.argv[3])\nprint(\"n_total:\", n_total)\nn_samples = ast.literal_eval(sys.argv[4])\nprint(\"n_samples:\", n_samples)\nprint(sys.argv[5])\nb_true = ast.literal_eval(sys.argv[5])\nprint(\"b_true:\", b_true)\nw_true = ast.literal_eval(sys.argv[6])\nprint(\"w_true:\", w_true)\nnum_results = ast.literal_eval(sys.argv[7])\nprint(\"num_results:\", num_results)\nn = ast.literal_eval(sys.argv[8])\nprint(\"n:\", n)\n\n# Run simulation study\n\np = mult.MultiParamSimulation(cases, K, n_total, n_samples, b_true, w_true, num_results,\n baseline_index=4, formula=\"x_0\")\n\np.simulate()\n\np.save(path=\"/home/icb/johannes.ostner/compositional_diff/compositionalDiff-johannes_tests_2/benchmark_results/overall_benchmark/\",\n filename=\"result_b_\" + str(np.round(b_true, 3)).replace(\" \", \" \") + \"_w_\" + str(w_true) + \"_round_\" + str(n))\n"
] | [
[
"numpy.round"
]
] |
yupeijei1997/unif | [
"16685a89446e6ce14080439162a9bfd0c75f0521"
] | [
"uf/application/uda.py"
] | [
"# coding:=utf-8\n# Copyright 2021 Tencent. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the 'License');\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an 'AS IS' BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n''' Applications based on UDA. '''\n\nimport numpy as np\n\nfrom uf.tools import tf\nfrom .base import ClassifierModule\nfrom .bert import BERTClassifier, get_bert_config, get_key_to_depths\nfrom uf.modeling.bert import BERTEncoder\nfrom uf.modeling.uda import UDADecoder\nfrom uf.tokenization.word_piece import get_word_piece_tokenizer\nimport uf.utils as utils\nimport uf.modeling.util as util\n\n\n\nclass UDAClassifier(BERTClassifier, ClassifierModule):\n ''' Single-label classifier on UDA. '''\n _INFER_ATTRIBUTES = BERTClassifier._INFER_ATTRIBUTES\n\n def __init__(self,\n config_file,\n vocab_file,\n max_seq_length=128,\n label_size=None,\n init_checkpoint=None,\n output_dir=None,\n gpu_ids=None,\n drop_pooler=False,\n uda_softmax_temp=-1,\n uda_confidence_thresh=-1,\n tsa_schedule='linear',\n do_lower_case=True,\n truncate_method='LIFO'):\n super(ClassifierModule, self).__init__(\n init_checkpoint, output_dir, gpu_ids)\n\n self.batch_size = 0\n self.max_seq_length = max_seq_length\n self.label_size = label_size\n self.truncate_method = truncate_method\n self._drop_pooler = drop_pooler\n self._uda_softmax_temp = uda_softmax_temp\n self._uda_confidence_thresh = uda_confidence_thresh\n self._tsa_schedule = tsa_schedule\n self._id_to_label = None\n self.__init_args__ = locals()\n\n self.bert_config = get_bert_config(config_file)\n self.tokenizer = get_word_piece_tokenizer(vocab_file, do_lower_case)\n self._key_to_depths = get_key_to_depths(\n self.bert_config.num_hidden_layers)\n\n if '[CLS]' not in self.tokenizer.vocab:\n self.tokenizer.add('[CLS]')\n self.bert_config.vocab_size += 1\n tf.logging.info('Add necessary token `[CLS]` into vocabulary.')\n if '[SEP]' not in self.tokenizer.vocab:\n self.tokenizer.add('[SEP]')\n self.bert_config.vocab_size += 1\n tf.logging.info('Add necessary token `[SEP]` into vocabulary.')\n\n def convert(self, X=None, y=None, sample_weight=None, X_tokenized=None,\n is_training=False):\n self._assert_legal(X, y, sample_weight, X_tokenized)\n\n # simplified when not training\n if not is_training:\n return super().convert(\n X, y, sample_weight, X_tokenized, is_training)\n\n if is_training:\n assert y is not None, '`y` can\\'t be None.'\n\n n_inputs = None\n data = {}\n\n # convert X\n if X or X_tokenized:\n tokenized = False if X else X_tokenized\n (input_ids, input_mask, segment_ids,\n aug_input_ids, aug_input_mask, aug_segment_ids,\n is_supervised) = self._convert_X_reimp(\n X_tokenized if tokenized else X, y, tokenized=tokenized)\n data['input_ids'] = np.array(input_ids, dtype=np.int32)\n data['input_mask'] = np.array(input_mask, dtype=np.int32)\n data['segment_ids'] = np.array(segment_ids, dtype=np.int32)\n data['aug_input_ids'] = np.array(aug_input_ids, dtype=np.int32)\n data['aug_input_mask'] = np.array(aug_input_mask, dtype=np.int32)\n data['aug_segment_ids'] = np.array(aug_segment_ids, dtype=np.int32)\n data['is_supervised'] = np.array(is_supervised, dtype=np.int32)\n n_inputs = len(input_ids)\n\n if n_inputs < self.batch_size:\n self.batch_size = max(n_inputs, len(self._gpu_ids))\n\n # convert y\n if y:\n label_ids = self._convert_y(y)\n data['label_ids'] = np.array(label_ids, dtype=np.int32)\n\n # convert sample_weight\n if is_training or y:\n sample_weight = self._convert_sample_weight(\n sample_weight, n_inputs)\n data['sample_weight'] = np.array(sample_weight, dtype=np.float32)\n\n return data\n\n def _convert_X_reimp(self, X_target, y, tokenized):\n\n # tokenize input texts\n sup_ori_input_tokens = []\n aug_input_tokens = []\n is_supervised = []\n for ex_id, example in enumerate(X_target):\n try:\n label = y[ex_id]\n\n if label is None:\n assert len(example) == 2\n sup_ori_input_tokens.append(\n self._convert_x(example[0], tokenized))\n aug_input_tokens.append(\n self._convert_x(example[1], tokenized))\n is_supervised.append(0)\n else:\n sup_ori_input_tokens.append(\n self._convert_x(example, tokenized))\n aug_input_tokens.append([])\n is_supervised.append(1)\n except AssertionError:\n raise AssertionError (\n 'Must have exactly two inputs for an '\n 'unsupervised example, respectively original '\n 'and augmented.')\n except Exception:\n raise ValueError(\n 'Wrong input format (line %d): \\'%s\\'. '\n % (ex_id, example))\n\n input_ids = []\n input_mask = []\n segment_ids = []\n for ex_id, segments in enumerate(sup_ori_input_tokens):\n _input_tokens = ['[CLS]']\n _input_ids = []\n _input_mask = [1]\n _segment_ids = [0]\n\n utils.truncate_segments(\n segments, self.max_seq_length - len(segments) - 1,\n truncate_method=self.truncate_method)\n for s_id, segment in enumerate(segments):\n _segment_id = min(s_id, 1)\n _input_tokens.extend(segment + ['[SEP]'])\n _input_mask.extend([1] * (len(segment) + 1))\n _segment_ids.extend([_segment_id] * (len(segment) + 1))\n\n _input_ids = self.tokenizer.convert_tokens_to_ids(_input_tokens)\n\n # padding\n for _ in range(self.max_seq_length - len(_input_ids)):\n _input_ids.append(0)\n _input_mask.append(0)\n _segment_ids.append(0)\n\n input_ids.append(_input_ids)\n input_mask.append(_input_mask)\n segment_ids.append(_segment_ids)\n\n aug_input_ids = []\n aug_input_mask = []\n aug_segment_ids = []\n for ex_id, segments in enumerate(aug_input_tokens):\n _input_tokens = ['[CLS]']\n _input_ids = []\n _input_mask = [1]\n _segment_ids = [0]\n\n utils.truncate_segments(\n segments, self.max_seq_length - len(segments) - 1,\n truncate_method=self.truncate_method)\n for s_id, segment in enumerate(segments):\n _segment_id = min(s_id, 1)\n _input_tokens.extend(segment + ['[SEP]'])\n _input_mask.extend([1] * (len(segment) + 1))\n _segment_ids.extend([_segment_id] * (len(segment) + 1))\n\n _input_ids = self.tokenizer.convert_tokens_to_ids(_input_tokens)\n\n # padding\n for _ in range(self.max_seq_length - len(_input_ids)):\n _input_ids.append(0)\n _input_mask.append(0)\n _segment_ids.append(0)\n\n aug_input_ids.append(_input_ids)\n aug_input_mask.append(_input_mask)\n aug_segment_ids.append(_segment_ids)\n\n return (input_ids, input_mask, segment_ids,\n aug_input_ids, aug_input_mask, aug_segment_ids,\n is_supervised)\n\n def _convert_y(self, y):\n label_set = set(y)\n if None in label_set:\n label_set -= {None}\n\n # automatically set `label_size`\n if self.label_size:\n assert len(label_set) <= self.label_size, (\n 'Number of unique `y`s exceeds `label_size`.')\n else:\n self.label_size = len(label_set)\n\n # automatically set `id_to_label`\n if not self._id_to_label:\n self._id_to_label = list(label_set)\n try:\n # Allign if user inputs continual integers.\n # e.g. [2, 0, 1]\n self._id_to_label = list(sorted(self._id_to_label))\n except Exception:\n pass\n if len(self._id_to_label) < self.label_size:\n for i in range(len(self._id_to_label), self.label_size):\n self._id_to_label.append(i)\n\n # automatically set `label_to_id` for prediction\n self._label_to_id = {\n label: index for index, label in enumerate(self._id_to_label)}\n\n label_ids = [self._label_to_id[label]\n if label is not None else -1 for label in y]\n return label_ids\n\n def _set_placeholders(self, target, on_export=False, **kwargs):\n self.placeholders = {\n 'input_ids': utils.get_placeholder(\n target, 'input_ids',\n [None, self.max_seq_length], tf.int32),\n 'input_mask': utils.get_placeholder(\n target, 'input_mask',\n [None, self.max_seq_length], tf.int32),\n 'segment_ids': utils.get_placeholder(\n target, 'segment_ids',\n [None, self.max_seq_length], tf.int32),\n 'label_ids': utils.get_placeholder(\n target, 'label_ids', [None], tf.int32),\n }\n if kwargs.get('is_training'):\n self.placeholders['aug_input_ids'] = utils.get_placeholder(\n target, 'aug_input_ids',\n [None, self.max_seq_length], tf.int32)\n self.placeholders['aug_input_mask'] = utils.get_placeholder(\n target, 'aug_input_mask',\n [None, self.max_seq_length], tf.int32)\n self.placeholders['aug_segment_ids'] = utils.get_placeholder(\n target, 'aug_segment_ids',\n [None, self.max_seq_length], tf.int32)\n self.placeholders['is_supervised'] = utils.get_placeholder(\n target, 'is_supervised',\n [None], tf.float32)\n if not on_export:\n self.placeholders['sample_weight'] = \\\n utils.get_placeholder(\n target, 'sample_weight',\n [None], tf.float32)\n\n def _forward(self, is_training, split_placeholders, **kwargs):\n\n if not is_training:\n return super()._forward(is_training, split_placeholders, **kwargs)\n\n aug_input_ids = tf.boolean_mask(\n split_placeholders['aug_input_ids'],\n mask=(1.0 - split_placeholders['is_supervised']),\n axis=0)\n aug_input_mask = tf.boolean_mask(\n split_placeholders['aug_input_mask'],\n mask=(1.0 - split_placeholders['is_supervised']),\n axis=0)\n aug_segment_ids = tf.boolean_mask(\n split_placeholders['aug_segment_ids'],\n mask=(1.0 - split_placeholders['is_supervised']),\n axis=0)\n input_ids = tf.concat(\n [split_placeholders['input_ids'],\n aug_input_ids], axis=0)\n input_mask = tf.concat(\n [split_placeholders['input_mask'],\n aug_input_mask], axis=0)\n segment_ids = tf.concat(\n [split_placeholders['segment_ids'],\n aug_segment_ids], axis=0)\n encoder = BERTEncoder(\n bert_config=self.bert_config,\n is_training=is_training,\n input_ids=input_ids,\n input_mask=input_mask,\n segment_ids=segment_ids,\n scope='bert',\n drop_pooler=self._drop_pooler,\n **kwargs)\n encoder_output = encoder.get_pooled_output()\n\n label_ids = split_placeholders['label_ids']\n is_expanded = tf.zeros_like(label_ids, dtype=tf.float32)\n batch_size = util.get_shape_list(aug_input_ids)[0]\n aug_is_expanded = tf.ones((batch_size), dtype=tf.float32)\n is_expanded = tf.concat([is_expanded, aug_is_expanded], axis=0)\n decoder = UDADecoder(\n is_training=is_training,\n input_tensor=encoder_output,\n is_supervised=split_placeholders['is_supervised'],\n is_expanded=is_expanded,\n label_ids=label_ids,\n label_size=self.label_size,\n sample_weight=split_placeholders.get('sample_weight'),\n scope='cls/seq_relationship',\n global_step=self._global_step,\n num_train_steps=self.total_steps,\n uda_softmax_temp=self._uda_softmax_temp,\n uda_confidence_thresh=self._uda_confidence_thresh,\n tsa_schedule=self._tsa_schedule,\n **kwargs)\n (total_loss, losses, probs, preds) = decoder.get_forward_outputs()\n return (total_loss, losses, probs, preds)\n\n def _get_fit_ops(self, as_feature=False):\n ops = [self._train_op,\n self._preds['preds'],\n self._losses['supervised'],\n self._losses['unsupervised'],\n ]\n if as_feature:\n ops.extend([self.placeholders['is_supervised'],\n self.placeholders['label_ids']])\n return ops\n\n def _get_fit_info(self, output_arrays, feed_dict, as_feature=False):\n\n if as_feature:\n batch_is_sup = output_arrays[-2]\n batch_labels = output_arrays[-1]\n else:\n batch_is_sup = feed_dict[self.placeholders['is_supervised']]\n batch_labels = feed_dict[self.placeholders['label_ids']]\n\n # accuracy\n batch_preds = output_arrays[1]\n accuracy = np.sum((batch_preds == batch_labels) * batch_is_sup) / \\\n np.sum(batch_is_sup)\n\n # supervised loss\n batch_sup_losses = output_arrays[2]\n sup_loss = np.mean(batch_sup_losses)\n\n # supervised loss\n batch_unsup_losses = output_arrays[3]\n unsup_loss = np.mean(batch_unsup_losses)\n\n info = ''\n info += ', accuracy %.4f' % accuracy\n info += ', supervised loss %.6f' % sup_loss\n info += ', unsupervised loss %.6f' % unsup_loss\n\n return info\n"
] | [
[
"numpy.array",
"numpy.mean",
"numpy.sum"
]
] |
ege-erdil/logistic-fit | [
"7c6cc9ed35877ed8d142dd75b7b98658e19cf7cb"
] | [
"logistic_fit.py"
] | [
"from autograd import grad\r\nimport autograd.numpy as np\r\nfrom scipy.stats import logistic, norm\r\nfrom scipy.optimize import minimize\r\n\r\ndef logistic_pdf(x, loc, scale):\r\n y = (x - loc)/scale\r\n return np.exp(-y)/(scale * (1 + np.exp(-y))**2)\r\n\r\ndef logistic_cdf(x, loc, scale):\r\n y = (x-loc)/scale\r\n if y < -100:\r\n return 0\r\n elif y > 100:\r\n return 1\r\n else:\r\n return 1/(1 + np.exp(-y))\r\n\r\ndef logistic_logpdf(x, loc, scale):\r\n y = (x - loc)/scale\r\n if y < -250:\r\n return y - np.log(scale)\r\n elif y > 250:\r\n return -y - np.log(scale)\r\n else:\r\n return -y - np.log(scale) - 2 * np.log(1 + np.exp(-y))\r\n\r\ndef square_dist(a1, a2):\r\n s = 0\r\n for k in range(len(a1)):\r\n s += (a1[k] - a2[k])**2\r\n return s\r\n\r\ndef log_likelihood_logistic(data, params):\r\n n = len(data)\r\n c = (len(params) + 1)//3\r\n r = 0\r\n\r\n if (len(params) + 1) % 3 != 0:\r\n print(\"Parameters specified incorrectly!\")\r\n return None\r\n\r\n else:\r\n weights = [1]\r\n for k in range(c-1):\r\n weights.append(np.exp(params[2*c + k]))\r\n s = np.sum(weights)\r\n for x in data:\r\n pdf_list = [logistic_logpdf(x, params[2*j], np.exp(params[2*j+1])) for j in range(c)]\r\n pdf_list_avg = np.sum(pdf_list)/c\r\n pdf_list_n = [weights[j] * np.exp(pdf_list[j] - pdf_list_avg) for j in range(c)]\r\n \r\n r += (pdf_list_avg + np.log(np.sum(pdf_list_n)/s))/n\r\n return r\r\n\r\ndef cdf_loss(percentiles, params):\r\n n = len(percentiles)\r\n c = (len(params) + 1)//3\r\n r = 0\r\n\r\n if (len(params) + 1) % 3 != 0:\r\n print(\"Parameters specified incorrectly!\")\r\n return None\r\n\r\n else:\r\n weights = [1]\r\n for k in range(c-1):\r\n weights.append(np.exp(params[2*c + k]))\r\n s = np.sum(weights)\r\n for q in range(1, n):\r\n cdf_list = [logistic_cdf(percentiles[q-1], params[2*j], np.exp(params[2*j+1])) for j in range(c)]\r\n cdf_list_n = [weights[j] * cdf_list[j] for j in range(c)]\r\n \r\n r += (np.sum(cdf_list_n)/s - q/n)**2/n\r\n return r\r\n\r\n\r\ndef estimate(data, bins=20, num = 1, tol = 0.01, maxiter = 100):\r\n fit_params = np.zeros(3*num - 1)\r\n a = np.average(data)\r\n s = np.log(np.std(data))\r\n percentiles = [np.percentile(data, k) for k in range(100//bins, 100, 100//bins)]\r\n for i in range(num):\r\n fit_params[2*i] = np.random.normal(loc=a, scale=np.exp(s), size=1)\r\n fit_params[2*i+1] = np.random.normal(loc=s - np.log(num), scale=1, size=1)\r\n\r\n def training_loss(params):\r\n return cdf_loss(percentiles, params) + 0.0001 * np.dot(params[2*num:], params[2*num:])\r\n \r\n training_loss_jac = grad(training_loss)\r\n\r\n res = minimize(training_loss, jac=training_loss_jac, x0=fit_params, method=\"BFGS\", options = {\"maxiter\": maxiter, \"gtol\": tol})\r\n print(res)\r\n final_params = res.x\r\n for i in range(num):\r\n final_params[2*i+1] = np.exp(final_params[2*i+1])\r\n results = []\r\n for i in range(num):\r\n results.append(final_params[2*i])\r\n results.append(logistic.isf(0.25, loc=final_params[2*i], scale=final_params[2*i+1]) - final_params[2*i])\r\n\r\n for i in range(num-1):\r\n results.append(final_params[2*num + i])\r\n\r\n return results\r\n\r\ndef estimate_log(data, num = 1, tol = 0.01, maxiter = 100):\r\n fit_params = np.zeros(3*num - 1)\r\n a = np.average(data)\r\n s = np.log(np.std(data))\r\n for i in range(num):\r\n fit_params[2*i] = np.random.normal(loc=a, scale=np.exp(s), size=1)\r\n fit_params[2*i+1] = np.random.normal(loc=s - np.log(num), scale=1, size=1)\r\n \r\n def training_likelihood(params):\r\n return log_likelihood_logistic(data, params)\r\n\r\n def training_loss(params):\r\n return -log_likelihood_logistic(data, params)\r\n \r\n training_likelihood_jac = grad(training_likelihood)\r\n training_loss_jac = grad(training_loss)\r\n\r\n res = minimize(training_loss, jac=training_loss_jac, x0=fit_params, method=\"BFGS\", options = {\"maxiter\": maxiter, \"gtol\": tol})\r\n print(res)\r\n final_params = res.x\r\n for i in range(num):\r\n final_params[2*i+1] = np.exp(final_params[2*i+1])\r\n results = []\r\n for i in range(num):\r\n results.append(final_params[2*i])\r\n results.append(logistic.isf(0.25, loc=final_params[2*i], scale=final_params[2*i+1]) - final_params[2*i])\r\n\r\n for i in range(num-1):\r\n results.append(final_params[2*num + i])\r\n\r\n return results\r\n\r\ndef estimate_powell(data, num = 1, tol = 0.01, maxiter = 100):\r\n fit_params = np.zeros(3*num - 1)\r\n a = np.average(data)\r\n s = np.log(np.std(data))\r\n for i in range(num):\r\n fit_params[2*i] = np.random.normal(loc=a, scale=np.exp(s), size=1)\r\n fit_params[2*i+1] = np.random.normal(loc=s - np.log(num), scale=1, size=1)\r\n \r\n def training_likelihood(params):\r\n return log_likelihood_logistic(data, params)\r\n\r\n def training_loss(params):\r\n return -log_likelihood_logistic(data, params)\r\n \r\n training_likelihood_jac = grad(training_likelihood)\r\n training_loss_jac = grad(training_loss)\r\n\r\n res = minimize(training_loss, x0=fit_params, method=\"Powell\", tol=tol, options = {\"maxiter\": maxiter})\r\n print(res)\r\n final_params = res.x\r\n for i in range(num):\r\n final_params[2*i+1] = np.exp(final_params[2*i+1])\r\n results = []\r\n for i in range(num):\r\n results.append(final_params[2*i])\r\n results.append(logistic.isf(0.25, loc=final_params[2*i], scale=final_params[2*i+1]) - final_params[2*i])\r\n\r\n for i in range(num-1):\r\n results.append(final_params[2*num + i])\r\n\r\n return results\r\n"
] | [
[
"scipy.optimize.minimize",
"scipy.stats.logistic.isf"
]
] |
Dangaran/home_station_project | [
"890b342e79e3dd493a8f418ed9283f0d444e5073"
] | [
"info_summary/get_summary_pdf.py"
] | [
"import requests\nimport pandas as pd\nfrom plotnine import *\nimport json\nimport time\nfrom fpdf import FPDF\nfrom datetime import datetime\n\n\n# change pandas display options\npd.options.display.max_columns = 101\npd.options.display.max_rows = 200\npd.options.display.precision = 7\n\n# get aemet and home information\nlast_day = {\n 'date_start': int(time.time()) - 86400,\n 'date_end': int(time.time())\n}\nresponse_aemet = requests.post('url_to_aws_lambda/get-aemet-data', json=last_day)\naemet_info = json.loads(response_aemet.text)\n\nresponse_home = requests.post('url_to_aws_lambda/get-home-data', json=last_day)\nhome_info = json.loads(response_home.text)\n\n\n# merge dataframes\naemet_info_df = pd.DataFrame(aemet_info)\naemet_info_df.sort_values(by=\"timestamp\", inplace=True)\n\nhome_info_df = pd.DataFrame(home_info)\nhome_info_df.sort_values(by=\"timestamp\", inplace=True)\n\nlast_day_info = pd.merge(aemet_info_df, home_info_df, on='timestamp', suffixes=(\"_aemet\", \"_home\"))\n\nlast_day_info = last_day_info.iloc[100:124, :]\n\n\n\n# -----------------------------------------------------------\n# \n# TEMPERATURE ANALYSIS\n#\n# -----------------------------------------------------------\n# prepare data for plotting\nhome_temp_threshold = 20\n# transform hour column to string and sort them \nlast_day_info['hour'] = last_day_info['hour'].astype(str) \nlast_day_info['hour'] = pd.Categorical(last_day_info['hour'], categories=last_day_info['hour'])\n\n# melt data to plot temperatures\ntemp_data_to_plot = last_day_info.melt(id_vars=['hour'], value_vars=['thermal_sensation', 'temperature_aemet', 'temperature_home'], var_name='temp_loc', value_name='temp_value')\n\n# change temp_loc to more readable strings for plotting\ntemp_data_to_plot['temp_loc'].replace({'thermal_sensation': 'Thermal sensation (outside)', \n 'temperature_aemet': 'Temperature (outside)',\n 'temperature_home': 'Temperature (home)',}, inplace=True)\n\n# get home data\nhome_temp_plot = temp_data_to_plot.loc[temp_data_to_plot.temp_loc == 'Temperature (home)', :]\n\n# make the plot\ntemp_plot = ggplot(temp_data_to_plot, aes(x = 'hour', y = 'temp_value', color = 'temp_loc', group = 'temp_loc')) +\\\n geom_line() +\\\n geom_point(size = .5) +\\\n geom_point(aes(x='hour', y='temp_value'), size = .5, color = ['#FF6633' if value <= home_temp_threshold else '#64f564' for value in list(home_temp_plot['temp_value'])], data = home_temp_plot) +\\\n geom_hline(aes(yintercept= home_temp_threshold), size = 1, linetype = 'dotted', alpha = .2) +\\\n labs(title = 'Differences in temperature between outside and inside your house', x = 'Hour', y = 'Temperature (ºC)', color='') +\\\n scale_color_manual(values = ['#64f564', '#e6454a', '#6bb8ff']) +\\\n theme_classic() +\\\n theme(plot_title=element_text(face='bold', ha= 'center', size = 10))\n\nggsave(plot=temp_plot, filename='./today_plots/temp_plot.png', dpi=100)\n\n\n\n\n# -----------------------------------------------------------\n# \n# HUMIDITY ANALYSIS\n#\n# -----------------------------------------------------------\n# prepare plot\nhum_data_to_plot = last_day_info.melt(id_vars=['hour'], value_vars=['humidity_home', 'humidity_aemet'], var_name='hum_loc', value_name='hum_value')\nhum_data_to_plot.hum_value = pd.to_numeric(hum_data_to_plot.hum_value, errors = 'raise')\nhum_data_to_plot['hum_loc'].replace({'humidity_aemet': 'Humidity (outside)',\n 'humidity_home': 'Humidity (home)',}, inplace=True)\n\n\n# create the plot\nhum_plot = ggplot(hum_data_to_plot, aes(x = 'hour', y = 'hum_value', fill = 'hum_loc')) +\\\n geom_bar(stat = 'identity', position='dodge', color = 'grey') +\\\n labs(title = 'Differences in humidity between outside and inside your house', x = 'Hour', y = 'Relative humidity (%)', fill='') +\\\n scale_fill_manual(values = ['#9da6d4', '#4f66e0']) +\\\n theme_classic() +\\\n theme(plot_title=element_text(face='bold', ha= 'center', size = 10))\n\nggsave(plot=hum_plot, filename='./today_plots/hum_plot.png', dpi=100)\n\n\n\n# -----------------------------------------------------------\n# \n# WIND ANALYSIS\n#\n# -----------------------------------------------------------\n# Wind information\n# avg and max speed\navg_wind_speed = round(last_day_info.avg_wind_speed.apply(lambda x: int(x)).mean(), 2)\nmax_wind_speed = round(last_day_info.max_wind_speed.apply(lambda x: int(x)).max(), 2)\n\n# prepare plot\n# count number of cardinal directions \ncardinal_dir_list = ['N', 'NE', 'E', 'SE', 'S', 'SO', 'O', 'NO']\nwind_dir_df = last_day_info.wind_direction.value_counts().to_frame()\nwind_dir_df.reset_index(inplace =True)\nwind_dir_df.rename(columns = {'index': 'cardinal_direction'}, inplace = True)\nwind_dir_df\n\n# complete cardinal column\nmissing_dir = list(set(cardinal_dir_list) - set(wind_dir_df.cardinal_direction.to_list()))\nfor direction in missing_dir:\n wind_dir_df = wind_dir_df.append({'cardinal_direction': direction,\n 'wind_direction': 0}, ignore_index=True)\n\nwind_dir_df\n# create column with correct order to plot\nwind_dir_df = wind_dir_df.sort_values(by = 'cardinal_direction').reset_index(drop = True)\nwind_dir_df['cardinal_order'] = [2, 0, 1, 7, 6, 4, 3, 5]\nwind_dir_df = wind_dir_df.sort_values(by = 'cardinal_order')\nwind_dir_df.index = wind_dir_df.cardinal_order\n\n\n# create x and y axis\nwind_dir_df['x_axis'] = [0,\n int(wind_dir_df.loc[wind_dir_df.cardinal_direction == 'NE', 'wind_direction']),\n int(wind_dir_df.loc[wind_dir_df.cardinal_direction == 'E', 'wind_direction']), \n int(wind_dir_df.loc[wind_dir_df.cardinal_direction == 'SE', 'wind_direction']),\n 0,\n int(-wind_dir_df.loc[wind_dir_df.cardinal_direction == 'SO', 'wind_direction']),\n int(-wind_dir_df.loc[wind_dir_df.cardinal_direction == 'O', 'wind_direction']),\n int(-wind_dir_df.loc[wind_dir_df.cardinal_direction == 'NO', 'wind_direction'])] \n\nwind_dir_df['y_axis'] = [int(wind_dir_df.loc[wind_dir_df.cardinal_direction == 'N', 'wind_direction']),\n int(wind_dir_df.loc[wind_dir_df.cardinal_direction == 'NE', 'wind_direction']),\n 0,\n int(-wind_dir_df.loc[wind_dir_df.cardinal_direction == 'SE', 'wind_direction']),\n int(-wind_dir_df.loc[wind_dir_df.cardinal_direction == 'S', 'wind_direction']),\n int(-wind_dir_df.loc[wind_dir_df.cardinal_direction == 'SO', 'wind_direction']),\n 0,\n int(wind_dir_df.loc[wind_dir_df.cardinal_direction == 'NO', 'wind_direction'])] \n\n# remove 0 columns to plot\nwind_dir_df = wind_dir_df.loc[wind_dir_df.wind_direction != 0, :]\n\n# create the plot\nwind_plot = ggplot(aes(x = 'x_axis', y = 'y_axis'), wind_dir_df) +\\\n geom_point(size = .3, color = 'darkgreen') +\\\n geom_polygon(alpha = .2) +\\\n xlim(-24, 24) +\\\n ylim(-24, 24) +\\\n geom_segment(aes(x=0, xend=22, y=0, yend=0), alpha = 0.1, linetype = 'dotted', arrow = arrow()) +\\\n geom_segment(aes(x=0, xend=-22, y=0, yend=0), alpha = 0.1, linetype = 'dotted', arrow = arrow()) +\\\n geom_segment(aes(x=0, xend=0, y=0, yend=22), alpha = 0.1, linetype = 'dotted', arrow = arrow()) +\\\n geom_segment(aes(x=0, xend=0, y=0, yend=-22), alpha = 0.1, linetype = 'dotted', arrow = arrow()) +\\\n annotate('text', x=23, y= 0, label = 'E', color = 'darkgreen') +\\\n annotate('text', x=-23.3, y= 0, label = 'O', color = 'darkgreen') +\\\n annotate('text', x=0, y= 24, label = 'N', color = 'darkgreen') +\\\n annotate('text', x=0, y= -24, label = 'S', color = 'darkgreen') +\\\n labs(title = 'Wind direction over the last 24 hours', x = '', y = '') +\\\n theme_classic() +\\\n theme(plot_title=element_text(face='bold', ha= 'center', size = 15),\n panel_grid_major = element_blank(), \n panel_grid_minor = element_blank(), \n panel_background = element_blank(),\n axis_line = element_blank(),\n axis_ticks_major = element_blank(),\n axis_text = element_blank())\n \nggsave(plot=wind_plot, filename='./today_plots/wind_plot.png', dpi=100)\n\n\n\n\n# -----------------------------------------------------------\n# \n# SKY ANALYSIS\n#\n# -----------------------------------------------------------\nmost_common_sky = last_day_info.sky_condition.value_counts().idxmax()\nsnow_probability = round(last_day_info.snow_probability.apply(lambda x: int(x)).mean(), 2)\nprecipitation_probability = round(last_day_info.precipitation_probability.apply(lambda x: int(x)).mean(), 2)\nmost_common_warning_lvl = last_day_info.warning_level.value_counts().idxmax()\ntotal_precipitation = round(last_day_info.precipitation.apply(lambda x: int(x)).sum(), 2)\n\n\n\n\n# -----------------------------------------------------------\n# \n# PEOPLE ANALYSIS\n#\n# -----------------------------------------------------------\n# Check number of people\npeople_df = last_day_info.loc[:, ['hour', 'pic_name']]\npeople_df.pic_name = people_df.pic_name.fillna('No_0_data')\npeople_df['people_count'] = people_df.pic_name.apply(lambda x: int(x.split('_')[1]))\n\nhours_with_people_at_home = people_df.loc[people_df.people_count > 0].shape[0]\nmost_people_in_room = people_df.people_count.value_counts(ascending = True).index[0]\n\nrows_with_most_people = people_df.loc[people_df.people_count == most_people_in_room]\nhours_with_most_people = rows_with_most_people.hour.to_list()\npics_names = rows_with_most_people.pic_name.to_list()\n\n\n\n\n# -----------------------------------------------------------\n# \n# PDF CREATION\n#\n# -----------------------------------------------------------\n# export information in pdf\n# extract date\ntoday_timestamp = int(last_day_info.timestamp.reset_index(drop =True)[5])\ntoday_date = datetime.utcfromtimestamp(today_timestamp).strftime('%d/%m/%Y')\n\n\n# create pdf to export\npdf = FPDF()\npdf.add_page()\npdf.set_xy(0, 5)\npdf.set_font('arial', 'B', 12)\npdf.cell(0, 10, 'Home report from {}'.format(today_date), 0, 2, 'C') # title\npdf.cell(5)\n# subtitle\npdf.set_font('arial', '', 10)\npdf.cell(0, 10, 'This report was extracted from the information gathered by the sensors from your Raspberry and Aemet.', 0, 2, 'C')\npdf.set_font('arial', 'B', 12)\n\n# First analysis - Temperature and Humidity\npdf.cell(60, 10, 'Temperature Analysis:', 0, 0, 'R')\npdf.cell(85, 10, 'Humidity Analysis:', 0, 2, 'R')\n\npdf.image('./today_plots/temp_plot.png', x = 3, y = 35, w = 110, h = 70, type = '', link = '')\npdf.image('./today_plots/hum_plot.png', x = 110, y = 35, w = 100, h = 70, type = '', link = '')\n\n# second analysis - Sky and wind\npdf.set_x(60)\npdf.set_y(110)\n\npdf.cell(0, 10, 'Sky Analysis:', 0, 2, 'L')\n\npdf.set_font('arial', '', 10)\npdf.cell(0, 7, 'Most common sky in 24 hours: {}'.format(most_common_sky), 0, 2, 'L')\npdf.cell(0, 7, 'Most common warning level in 24 hours: {}'.format(most_common_warning_lvl), 0, 2, 'L')\npdf.cell(0, 7, 'Probability of Precipitation in 24 hours: {} %'.format(precipitation_probability), 0, 2, 'L')\npdf.cell(0, 7, 'Probability of Snow in 24 hours: {} %'.format(snow_probability), 0, 2, 'L')\npdf.cell(0, 7, 'Total Precipitation in 24 hours: {} mm'.format(total_precipitation), 0, 2, 'L')\n\npdf.image('./today_plots/wind_plot.png', x = 110, y = 112, w = 70, h = 60, type = '', link = '')\n\n# third analysis - Pictures from people\npdf.set_y(170)\n\npdf.set_font('arial', 'B', 12)\npdf.cell(0, 10, 'Camera Analysis:', 0, 2, 'L')\n\npdf.set_font('arial', '', 10)\npdf.cell(0, 7, 'Number of hours with people at home: {}'.format(hours_with_people_at_home), 0, 2, 'L')\npdf.cell(0, 7, 'How many people were in the room at the time of maximum capacity?: {}'.format(most_people_in_room), 0, 2, 'L')\npdf.cell(0, 7, 'How many hours was the house with the maximum number of people?: {}'.format(rows_with_most_people.shape[0]), 0, 2, 'L')\npdf.cell(0, 7, 'What were the hours when the house had the maximum number of people?: {}'.format(', '.join(hours_with_most_people)), 0, 2, 'L')\npdf.cell(0, 7, 'What are the pictura names that correspond to those hours?: {}'.format(', '.join(pics_names)), 0, 2, 'L')\n\npdf.image('../rapsberry/camera/images/{}'.format(pics_names[0]), x = 15, y = 200, w = 70, h = 60, type = '', link = '')\n\n# save output\npdf.output('test.pdf', 'F')\n\n\n"
] | [
[
"pandas.Categorical",
"pandas.merge",
"pandas.to_numeric",
"pandas.DataFrame"
]
] |
Tbarkin121/Tensegrity_IsaacGym | [
"0b6b5227e76b18396862c242a4e8e743248844b3",
"0b6b5227e76b18396862c242a4e8e743248844b3"
] | [
"training/utils/utils.py",
"training/tasks/tensebot.py"
] | [
"# Copyright (c) 2018-2021, NVIDIA Corporation\n# All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are met:\n#\n# 1. Redistributions of source code must retain the above copyright notice, this\n# list of conditions and the following disclaimer.\n#\n# 2. Redistributions in binary form must reproduce the above copyright notice,\n# this list of conditions and the following disclaimer in the documentation\n# and/or other materials provided with the distribution.\n#\n# 3. Neither the name of the copyright holder nor the names of its\n# contributors may be used to endorse or promote products derived from\n# this software without specific prior written permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE\n# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL\n# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR\n# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER\n# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,\n# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\n# python\n\nimport numpy as np\nimport torch\nimport random\nimport os\n\ndef set_np_formatting():\n \"\"\" formats numpy print \"\"\"\n np.set_printoptions(edgeitems=30, infstr='inf',\n linewidth=4000, nanstr='nan', precision=2,\n suppress=False, threshold=10000, formatter=None)\n\n\ndef set_seed(seed, torch_deterministic=False):\n \"\"\" set seed across modules \"\"\"\n if seed == -1 and torch_deterministic:\n seed = 42\n elif seed == -1:\n seed = np.random.randint(0, 10000)\n print(\"Setting seed: {}\".format(seed))\n\n random.seed(seed)\n np.random.seed(seed)\n torch.manual_seed(seed)\n os.environ['PYTHONHASHSEED'] = str(seed)\n torch.cuda.manual_seed(seed)\n torch.cuda.manual_seed_all(seed)\n\n if torch_deterministic:\n # refer to https://docs.nvidia.com/cuda/cublas/index.html#cublasApi_reproducibility\n os.environ['CUBLAS_WORKSPACE_CONFIG'] = ':4096:8'\n torch.backends.cudnn.benchmark = False\n torch.backends.cudnn.deterministic = True\n torch.use_deterministic_algorithms(True)\n else:\n torch.backends.cudnn.benchmark = True\n torch.backends.cudnn.deterministic = False\n\n return seed\n\n# EOF\n",
"import numpy as np\nimport os\nimport torch\nimport time\nimport matplotlib.pyplot as plt\n\nfrom isaacgym import gymutil, gymtorch, gymapi\nfrom isaacgym.torch_utils import *\nfrom isaacgym.gymtorch import *\n\nfrom isaacgymenvs.utils.torch_jit_utils import *\nfrom tasks.base.vec_task import VecTask\n\n\nclass TenseBot(VecTask):\n def __init__(self, cfg, sim_device, graphics_device_id, headless):\n self.cfg = cfg\n self.dt = self.cfg[\"sim\"][\"dt\"]\n\n self.max_episode_length = self.cfg[\"env\"][\"maxEpisodeLength\"]\n # self.randomization_params = self.cfg[\"task\"][\"randomization_params\"]\n # self.randomize = self.cfg[\"task\"][\"randomize\"]\n self.dof_vel_scale = self.cfg[\"env\"][\"dofVelocityScale\"]\n # self.contact_force_scale = self.cfg[\"env\"][\"contactForceScale\"]\n # self.power_scale = self.cfg[\"env\"][\"powerScale\"]\n self.heading_weight = self.cfg[\"env\"][\"headingWeight\"]\n self.up_weight = self.cfg[\"env\"][\"upWeight\"]\n # self.actions_cost_scale = self.cfg[\"env\"][\"actionsCost\"]\n # self.energy_cost_scale = self.cfg[\"env\"][\"energyCost\"]\n # self.joints_at_limit_cost_scale = self.cfg[\"env\"][\"jointsAtLimitCost\"]\n self.death_cost = self.cfg[\"env\"][\"deathCost\"]\n self.termination_height = self.cfg[\"env\"][\"terminationHeight\"]\n\n # self.debug_viz = self.cfg[\"env\"][\"enableDebugVis\"]\n self.plane_static_friction = self.cfg[\"env\"][\"plane\"][\"staticFriction\"]\n self.plane_dynamic_friction = self.cfg[\"env\"][\"plane\"][\"dynamicFriction\"]\n self.plane_restitution = self.cfg[\"env\"][\"plane\"][\"restitution\"]\n\n self.drive_mode = self.cfg[\"env\"][\"actuatorParams\"][\"driveMode\"]\n self.stiffness = self.cfg[\"env\"][\"actuatorParams\"][\"stiffness\"] * self.drive_mode\n self.damping = self.cfg[\"env\"][\"actuatorParams\"][\"damping\"] * self.drive_mode\n self.maxPosition = self.cfg[\"env\"][\"actuatorParams\"][\"maxPosition\"]\n self.maxSpeed = self.cfg[\"env\"][\"actuatorParams\"][\"maxSpeed\"]\n self.maxTorque = self.cfg[\"env\"][\"actuatorParams\"][\"maxTorque\"]\n self.friction = self.cfg[\"env\"][\"actuatorParams\"][\"friction\"]\n self.torqueDecay = self.cfg[\"env\"][\"actuatorParams\"][\"torqueDecay\"]\n\n self.angularDamping = self.cfg[\"env\"][\"assetParams\"][\"angularDamping\"]\n self.angularVelocity = self.cfg[\"env\"][\"assetParams\"][\"angularVelocity\"]\n\n self.goal_dist = self.cfg[\"env\"][\"goalDist\"]\n self.goal_threshold = self.cfg[\"env\"][\"goalThreshold\"]\n\n # obs_buf shapes: (53)\n # obs_buf[0:39] = Rod State x 3 : Pos(3), Ori(4), LinVel(3), AngVel(3)\n # obs_buf[39:42] = Goal Pos : Pos(3)\n # obs_buf[42:45] = vector to goal (3) \n # obs_buf[45:53] = actions : Spring Length Multipliers (9)\n self.cfg[\"env\"][\"numObservations\"] = 54\n # Spring Length Mulitpliers (9)\n self.cfg[\"env\"][\"numActions\"] = 9\n\n super().__init__(config=self.cfg, sim_device=sim_device, graphics_device_id=graphics_device_id, headless=headless)\n\n # set init state\n pos = self.cfg[\"env\"][\"baseInitState\"][\"pos\"]\n rot = self.cfg[\"env\"][\"baseInitState\"][\"rot\"]\n v_lin = self.cfg[\"env\"][\"baseInitState\"][\"vLinear\"]\n v_ang = self.cfg[\"env\"][\"baseInitState\"][\"vAngular\"]\n state = pos + rot + v_lin + v_ang\n self.base_init_state = torch.tensor(state, device=self.device)\n self.start_rotation = torch.tensor(rot, device=self.device)\n \n # get gym GPU root state tensor\n actor_root_state = self.gym.acquire_actor_root_state_tensor(self.sim)\n self.root_states = gymtorch.wrap_tensor(actor_root_state)\n print('root_state')\n print(self.root_states.cpu().detach().numpy())\n print(self.root_states.shape)\n print('num_envs {}, num_actors {}'.format(self.num_envs, self.num_actors))\n\n self.tensebot_pos = self.root_states.view(self.num_envs, self.num_actors, 13)[..., 0:3, 0:3] #num_envs, num_actors, 13 (pos,ori,Lvel,Avel)\n self.tensebot_ori = self.root_states.view(self.num_envs, self.num_actors, 13)[..., 0:3, 3:7] #num_envs, num_actors, 13 (pos,ori,Lvel,Avel)\n self.tensebot_linvel = self.root_states.view(self.num_envs, self.num_actors, 13)[..., 0:3, 7:10] #num_envs, num_actors, 13 (pos,ori,Lvel,Avel)\n self.tensebot_angvel = self.root_states.view(self.num_envs, self.num_actors, 13)[..., 0:3, 10:13] #num_envs, num_actors, 13 (pos,ori,Lvel,Avel)\n self.goal_pos = self.root_states.view(self.num_envs, self.num_actors, 13)[..., 3, 0:3] #num_envs, num_actors, 13 (pos,ori,Lvel,Avel)\n\n self.gym.refresh_actor_root_state_tensor(self.sim)\n self.tensebot_root_state = self.root_states.view(self.num_envs, self.num_actors, 13)[..., 0:3, :]\n self.tensebot_initial_root_states = self.tensebot_root_state.clone()\n # self.tensebot_initial_root_states[:] = to_torch(self.base_init_state, device=self.device, requires_grad=False)\n\n rb_state_tensor = self.gym.acquire_rigid_body_state_tensor(self.sim)\n self.rb_state = gymtorch.wrap_tensor(rb_state_tensor)\n print('rigid_body_state')\n print(self.rb_state.cpu().detach().numpy())\n print(self.rb_state.shape)\n print('num_envs {}, num_bodies {}'.format(self.num_envs, self.num_bodies))\n\n self.rb_pos = self.rb_state.view(self.num_envs, self.num_bodies, 13)[:, :, 0:3] #num_envs, num_rigid_bodies, 13 (pos,ori,Lvel,Avel)\n self.rb_ori = self.rb_state.view(self.num_envs, self.num_bodies, 13)[:, :, 3:7] #num_envs, num_rigid_bodies, 13 (pos,ori,Lvel,Avel)\n self.rb_linvel = self.rb_state.view(self.num_envs, self.num_bodies, 13)[:, :, 7:10] #num_envs, num_rigid_bodies, 13 (pos,ori,Lvel,Avel)\n self.rb_angvel = self.rb_state.view(self.num_envs, self.num_bodies, 13)[:, :, 10:13] #num_envs, num_rigid_bodies, 13 (pos,ori,Lvel,Avel)\n # Used for rewarding moving towards a target\n \n\n # tensebot_avg_pos = torch.mean(self.tensebot_pos, dim=1)\n tensebot_avg_pos = self.tensebot_pos[:,0,:]\n to_target = self.goal_pos - tensebot_avg_pos\n to_target[:, 2] = 0.0\n self.potentials = -torch.norm(to_target, p=2, dim=-1) / self.dt\n self.prev_potentials = self.potentials.clone()\n \n self.goal_reset = torch.ones(self.num_envs, device=self.device, dtype=torch.long)\n goal_ids = self.goal_reset.nonzero(as_tuple=False).squeeze(-1)\n if len(goal_ids) > 0:\n self.reset_goal(goal_ids)\n\n # Measurements for rewards\n self.up_vec = to_torch(get_axis_params(1., self.up_axis_idx), device=self.device).repeat((self.num_envs, 1))\n self.heading_vec = to_torch([1, 0, 0], device=self.device).repeat((self.num_envs, 1))\n self.inv_start_rot = quat_conjugate(self.start_rotation).repeat((self.num_envs, 1))\n\n self.basis_vec0 = self.heading_vec.clone()\n self.basis_vec1 = self.up_vec.clone()\n \n self.frame_count = 0\n self.plot_buffer = []\n self.accumulated_reward = torch.zeros_like(self.rew_buf)\n\n camOffset = gymapi.Vec3(0, -1.5, 0.25)\n camTarget = gymapi.Vec3(self.tensebot_pos[0, 0, 0],self.tensebot_pos[0, 0, 1],self.tensebot_pos[0, 0, 2])\n self.gym.viewer_camera_look_at(self.viewer, None, camOffset+camTarget, camTarget)\n\n def create_sim(self):\n # set the up axis to be z-up given that assets are y-up by default\n self.up_axis_idx = self.set_sim_params_up_axis(self.sim_params, 'z')\n # self.sim_params.gravity = gymapi.Vec3(0.0, 0.0, 0.0)\n self.sim = super().create_sim(self.device_id, self.graphics_device_id, self.physics_engine, self.sim_params)\n self._create_ground_plane()\n print(f'num envs {self.num_envs} env spacing {self.cfg[\"env\"][\"envSpacing\"]}')\n self._create_envs(self.num_envs, self.cfg[\"env\"]['envSpacing'], int(np.sqrt(self.num_envs)))\n \n\n def _create_ground_plane(self):\n plane_params = gymapi.PlaneParams()\n # set the normal force to be z dimension\n plane_params.normal = gymapi.Vec3(0.0, 0.0, 1.0)\n plane_params.static_friction = self.plane_static_friction\n plane_params.dynamic_friction = self.plane_dynamic_friction\n self.gym.add_ground(self.sim, plane_params)\n \n def _create_envs(self, num_envs, spacing, num_per_row):\n # define plane on which environments are initialized\n lower = gymapi.Vec3(0.5 * -spacing, -spacing, 0.0)\n upper = gymapi.Vec3(0.5 * spacing, spacing, spacing)\n\n asset_root = os.path.join(os.path.dirname(os.path.abspath(__file__)), \"../../assets\")\n asset_file = \"urdf/RodAssembly/urdf/RodAssembly.urdf\"\n\n if \"asset\" in self.cfg[\"env\"]:\n asset_root = os.path.join(os.path.dirname(os.path.abspath(__file__)), self.cfg[\"env\"][\"asset\"].get(\"assetRoot\", asset_root))\n asset_file = self.cfg[\"env\"][\"asset\"].get(\"assetFileName\", asset_file)\n\n asset_path = os.path.join(asset_root, asset_file)\n asset_root = os.path.dirname(asset_path)\n asset_file = os.path.basename(asset_path)\n\n asset_options = gymapi.AssetOptions()\n asset_options.fix_base_link = False\n asset_options.angular_damping = self.angularDamping\n asset_options.max_angular_velocity = self.angularVelocity\n\n rod_assembly_asset = self.gym.load_asset(self.sim, asset_root, asset_file, asset_options)\n self.num_dof = self.gym.get_asset_dof_count(rod_assembly_asset)\n\n goal_asset = self.gym.create_sphere(self.sim, 0.025)\n self.num_bodies = self.gym.get_asset_rigid_body_count(rod_assembly_asset)*3 + self.gym.get_asset_rigid_body_count(goal_asset) #3 rod assemblies per tensebot\n # self.num_actor = get_sim_actor_count\n \n pose = gymapi.Transform()\n\n self.rod_handles = []\n self.tensebot_handles = []\n self.goal_handles = []\n self.envs = []\n self.dof_limits_lower = []\n self.dof_limits_upper = []\n\n for i in range(self.num_envs):\n # create env instance\n tensebot_handle = []\n env_ptr = self.gym.create_env(\n self.sim, lower, upper, num_per_row\n )\n radius = 0.05\n thetas = [0, 3.1415*2/3, 3.1415*4/3] #0 deg, 120 deg, 240 deg\n for t, j in zip(thetas, range(len(thetas))):\n pose.p = gymapi.Vec3(radius*torch.cos(torch.tensor(t)), radius*torch.sin(torch.tensor(t)), 0.1)\n pose.r = gymapi.Quat.from_euler_zyx(-3.1415/4, 0, t) \n rod_handle = self.gym.create_actor(env_ptr, rod_assembly_asset, pose, \"rodassembly{}\".format(j), i, 0, 0)\n\n rand_color = torch.rand((3), device=self.device)\n for j in range(self.num_bodies):\n # self.gym.set_rigid_body_color(\n # env_ptr, tensebot_handle, j, gymapi.MESH_VISUAL, gymapi.Vec3(0.27, 0.1, 0.66))\n self.gym.set_rigid_body_color(\n env_ptr, rod_handle, j, gymapi.MESH_VISUAL, gymapi.Vec3(rand_color[0],rand_color[1],rand_color[2]))\n \n self.rod_handles.append(rod_handle)\n tensebot_handle.append(rod_handle) \n\n\n self.tensebot_handles.append(tensebot_handle)\n self.envs.append(env_ptr)\n\n # Set Up the Goal Actor\n goal_pose = gymapi.Transform()\n goal_pose.p.y = self.goal_dist\n goal_handle = self.gym.create_actor(env_ptr, goal_asset, goal_pose, \"goal\", i, 1, 1)\n self.gym.set_rigid_body_color(env_ptr, goal_handle, 0, gymapi.MESH_VISUAL, gymapi.Vec3(0.2, 0.8, 0.2))\n self.goal_handles.append(goal_handle)\n\n \n self.num_actors = self.gym.get_actor_count(self.envs[0])\n self.body_dict = self.gym.get_actor_rigid_body_dict(env_ptr, tensebot_handle[0])\n self.joint_dict = self.gym.get_actor_joint_dict(env_ptr, tensebot_handle[0])\n\n print('body_dict:')\n print(self.body_dict)\n for b in self.body_dict:\n print(b)\n print('joint_dict:')\n for j in self.joint_dict:\n print(j) \n\n\n def compute_reward(self):\n self.rew_buf[:], self.reset_buf[:], self.goal_reset = compute_tensebot_reward(\n self.tensebot_pos,\n self.goal_pos,\n self.reset_buf,\n self.progress_buf,\n self.potentials,\n self.prev_potentials,\n self.max_episode_length,\n self.goal_threshold)\n \n def compute_observations(self, env_ids=None):\n if env_ids is None:\n env_ids = np.arange(self.num_envs)\n\n self.gym.refresh_dof_state_tensor(self.sim)\n self.gym.refresh_actor_root_state_tensor(self.sim)\n self.gym.refresh_rigid_body_state_tensor(self.sim)\n\n # print('self.root_state')\n # print(self.root_states[0,:])\n # print(self.root_states.shape)\n # time.sleep(1)\n\n self.obs_buf[:], self.potentials[:], self.prev_potentials[:] = compute_tensebot_observations(\n self.tensebot_pos,\n self.tensebot_ori,\n self.tensebot_linvel,\n self.tensebot_angvel,\n self.goal_pos, \n self.potentials,\n self.actions, \n self.dt)\n return self.obs_buf\n\n def reset_idx(self, env_ids):\n print('Resetting IDX! Env_IDs = {}'.format(env_ids))\n \n env_ids_int32 = env_ids.to(dtype=torch.int32)*self.num_actors\n env_ids_int32 = torch.cat((env_ids_int32, env_ids_int32+1, env_ids_int32+2))\n\n self.tensebot_root_state[env_ids, :, :] = self.tensebot_initial_root_states[env_ids, :, :]\n\n self.gym.set_actor_root_state_tensor_indexed(self.sim,\n gymtorch.unwrap_tensor(self.root_states),\n gymtorch.unwrap_tensor(env_ids_int32), len(env_ids_int32))\n self.reset_buf[env_ids] = 0\n self.progress_buf[env_ids] = 0\n self.goal_reset[env_ids] = 1\n \n # actor_root_state = self.gym.acquire_actor_root_state_tensor(self.sim)\n # self.root_states = gymtorch.wrap_tensor(actor_root_state)\n # self.initial_root_states = self.root_states.clone()\n # self.initial_root_states[:] = to_torch(self.base_init_state, device=self.device, requires_grad=False)\n\n\n # plt.plot([0,0,0])\n # plt.show()\n # if(self.plot_buffer):\n # plot_data = np.array(self.plot_buffer)\n # print(plot_data.shape)\n # plt.plot(plot_data[:,0,0] + plot_data[:,1,0] + plot_data[:,2,0], label=\"Total Reward\")\n # plt.plot(plot_data[:,0,0], label=\"Progress Reward\")\n # plt.plot(plot_data[:,1,0], label=\"Height Reward\")\n # plt.plot(plot_data[:,2,0], label=\"Heading Reward\")\n # plt.ylabel('Reward')\n # plt.xlabel('Steps')\n # plt.grid()\n # plt.legend(loc=\"lower right\")\n # plt.xlim([0, 500])\n # plt.ylim([-0.1, 2.1])\n # plt.show()\n # self.plot_buffer = []\n\n def reset_goal(self, env_ids):\n print('reset_goal')\n self.gym.refresh_actor_root_state_tensor(self.sim)\n # print('Resetting Goals! Env_IDs = {}'.format(env_ids))\n # print('Old Goal Position = {}'.format(self.goal_pos))\n\n env_ids_int32 = env_ids.to(dtype=torch.int32)*self.num_actors\n goal_pos_update = torch_rand_float(-self.goal_dist, self.goal_dist, (len(env_ids), 3), device=self.device)\n # goal_pos_update[:,0] = 1000.0\n # goal_pos_update[:,1] = 0.0\n goal_pos_update[:,2] = 0.1\n self.goal_pos[env_ids, :] = goal_pos_update\n self.gym.set_actor_root_state_tensor_indexed(self.sim,\n gymtorch.unwrap_tensor(self.root_states),\n gymtorch.unwrap_tensor(env_ids_int32+3), len(env_ids_int32))\n\n # self.gym.refresh_actor_root_state_tensor(self.sim)\n # tensebot_avg_pos = torch.mean(self.tensebot_pos, dim=1)\n tensebot_avg_pos = self.tensebot_pos[:,0,:]\n to_target = self.goal_pos[env_ids, :] - tensebot_avg_pos[env_ids, :]\n to_target[:, 2] = 0.0 \n self.prev_potentials[env_ids] = -torch.norm(to_target, p=2, dim=-1) / self.dt\n self.potentials[env_ids] = self.prev_potentials[env_ids].clone()\n\n self.goal_reset[env_ids] = 0\n # print('New Goal Position = {}'.format(self.goal_pos))\n\n def pre_physics_step(self, actions):\n # print('actions')\n # print(actions)\n # print(actions.shape)\n # print(actions.to(self.device).squeeze().shape())\n self.actions = actions.clone().detach().to(self.device)\n self.calculate_tensegrity_forces(self.actions)\n\n def calculate_tensegrity_forces(self, actions):\n # # print('actions : {}'.format(actions))\n connection_list = []\n # (1,2),(4,5),(7,8) end point indicies (bottom, top) \n # 0, 3, 6 are the body indicies\n # 9 is the goal index\n\n # This might need a low pass filter\n spring_length_multiplier = actions/4 + 1 # Multiplier range from 0.5 to 1.5\n # spring_length_multiplier = torch.rand((self.num_envs, 9), device=self.device)/4 + 1\n # spring_length_multiplier = torch.ones((self.num_envs, 9), device=self.device)*0.1\n \n # Connect All Bottoms\n connection_list.append((1, 4, 0.1))\n connection_list.append((1, 7, 0.1)) \n connection_list.append((4, 7, 0.1)) \n #Connect All Tops\n connection_list.append((2, 5, 0.1))\n connection_list.append((2, 8, 0.1)) \n connection_list.append((5, 8, 0.1)) \n\n #Top1 to Bottom2\n connection_list.append((2, 4, 0.1)) #Body0 top is connected to Body1 bottom\n #Top2 to Bottom3\n connection_list.append((5, 7, 0.1)) #Body0 top is connected to Body1 bottom\n #Top3 to Bottom1 \n connection_list.append((8, 1, 0.1)) #Body0 top is connected to Body1 bottom\n\n #Connect All The Things... \n\n \n forces = torch.zeros_like(self.rb_pos, device=self.device, dtype=torch.float)\n force_positions = self.rb_pos.clone()\n lin_vel_mat = torch.zeros((self.num_envs, 2, 3), device=self.device, dtype=torch.float) # Used in calculating damping force\n diff_matrix= torch.tensor([[-1, 1]], device=self.device, dtype=torch.float)\n\n num_lines = len(connection_list)\n line_vertices = torch.zeros((num_lines*2,3), device=self.device, dtype=torch.float)\n line_colors = torch.zeros((num_lines,3), device=self.device, dtype=torch.float)\n\n for connection, i in zip(connection_list, range(len(connection_list))):\n # print(connection)\n # Spring Force\n P1 = self.rb_pos[:, connection[0], :]\n P2 = self.rb_pos[:, connection[1], :]\n endpoint_vector = P1-P2\n # print('endpoint_vector.shape')\n # print(endpoint_vector.shape)\n spring_constant = 25\n damping_coff = 0.99\n spring_length = connection[2] * spring_length_multiplier[:, i]\n # print('spring_length.shape')\n # print(spring_length.shape)\n # print('P1.shape')\n # print(P1.shape)\n # print('P2.shape')\n # print(P2.shape)\n endpoint_distance = torch.norm(endpoint_vector, dim=1)\n # print('endpoint_distance.shape')\n # print(endpoint_distance.shape)\n\n endpoint_vector_normalized = torch.div(endpoint_vector, torch.unsqueeze(endpoint_distance,1).repeat(1,3))\n # print('endpoint_vector_normalized.shape')\n # print(endpoint_vector_normalized.shape)\n spring_force = spring_constant*(endpoint_distance-spring_length)\n # print('spring_force.shape')\n # print(spring_force.shape)\n # Set springs to only work for tension and not compression\n spring_force = torch.max(torch.tensor(spring_force), torch.zeros_like(spring_force))\n applied_force = torch.mul(endpoint_vector_normalized, torch.unsqueeze(spring_force,1).repeat(1,3))\n applied_force = torch.nan_to_num(applied_force, nan=0.0)\n # print('applied force')\n # print(appled_force.shape)\n \n # print('Spring {} Tension = {}'.format(i, spring_force))\n # print('forces.shape')\n # print(forces.shape)\n # print(connection[0])\n # print(connection[1])\n forces[:, connection[0], :] -= applied_force\n forces[:, connection[1], :] += applied_force\n # print('forces[0,:,:]')\n # print(forces[0,:,:])\n # print('applied_force[0,:]')\n # print(applied_force[0,:])\n # print('endpoint_vector_normalized')\n # print(endpoint_vector_normalized)\n # print(endpoint_distance)\n # Damping\n lin_vel_mat[:, 0, :] = self.rb_linvel[:, connection[0], :]\n lin_vel_mat[:, 1, :] = self.rb_linvel[:, connection[1], :]\n EVN_mat = torch.unsqueeze(endpoint_vector_normalized, 2)\n # print(lin_vel_mat.shape)\n # print(EVN_mat.shape)\n damping_force = torch.matmul(diff_matrix, torch.matmul(lin_vel_mat, EVN_mat))*damping_coff\n # print('damping_force.shape')\n # print(torch.squeeze(damping_force, dim=2).shape)\n # print('endpoint_vector_normalized.shape')\n # print(endpoint_vector_normalized.shape)\n damping_force_vector = endpoint_vector_normalized *torch.squeeze(damping_force, dim=2)\n # print('damping_force_vector.shape')\n # print(damping_force_vector.shape)\n damping_force_vector = torch.nan_to_num(damping_force_vector, nan=0.0)\n forces[:, connection[0], :] += damping_force_vector\n forces[:, connection[1], :] -= damping_force_vector\n \n # Draw Spring Connections? \n line_vertices[i*2,:] = self.rb_pos[0, connection[0], :]\n line_vertices[i*2+1,:] = self.rb_pos[0, connection[1], :]\n line_colors[i,:] = torch.tensor([1.0, 1.0, 1.0])\n \n self.gym.apply_rigid_body_force_at_pos_tensors(self.sim, gymtorch.unwrap_tensor(forces), gymtorch.unwrap_tensor(force_positions), gymapi.ENV_SPACE)\n self.gym.clear_lines(self.viewer)\n self.gym.add_lines(self.viewer, self.envs[0], num_lines, line_vertices.cpu().detach(), line_colors.cpu().detach())\n \n\n def post_physics_step(self):\n self.progress_buf += 1\n\n env_ids = self.reset_buf.nonzero(as_tuple=False).squeeze(-1)\n if len(env_ids) > 0:\n self.reset_idx(env_ids)\n\n goal_ids = self.goal_reset.nonzero(as_tuple=False).squeeze(-1)\n if len(goal_ids) > 0:\n self.reset_goal(goal_ids)\n\n self.compute_observations()\n self.compute_reward()\n\n # Look at the first actor\n env_idx = 0\n camOffset = gymapi.Vec3(0, -1.5, 0.25)\n camTarget = gymapi.Vec3(self.tensebot_pos[env_idx, 0, 0],self.tensebot_pos[env_idx, 0, 1],self.tensebot_pos[env_idx, 0, 2])\n camEnvOffset = gymapi.Vec3(0, 0, 0)\n # print(camOffset)\n # print(camTarget)\n # self.gym.viewer_camera_look_at(self.viewer, None, camOffset+camTarget+camEnvOffset, camTarget+camEnvOffset)\n # time.sleep(0.1) \n # self.debug_printout(env_ids)\n\n def debug_printout(self, env_ids):\n self.accumulated_reward += self.rew_buf\n # print('potentials and previous potentials')\n # print(self.potentials)\n # print(self.prev_potentials)\n print('reward buf')\n print(self.rew_buf)\n if len(env_ids) > 0:\n self.accumulated_reward[env_ids] = 0\n print('self.accumulated_reward')\n print(self.accumulated_reward)\n # # print('DEBUG PRINTOUTS')\n # # body_height = self.obs_buf[:,2]\n # # up_projection = self.obs_buf[:,29]\n # # heading_projection = self.obs_buf[:, 30] \n # # heading_reward = self.heading_weight * heading_projection \n # # # aligning up axis and environment\n # # up_reward = torch.zeros_like(heading_reward)\n # # up_reward = torch.where(up_projection > 0.93, up_reward + self.up_weight, up_reward)\n # # # reward for duration of staying alive\n # # progress_reward = self.potentials - self.prev_potentials\n # # total_reward = progress_reward + up_reward + heading_reward]\n # xtream_rewards = torch.abs(self.rew_buf) > 5\n # # print('ProgressReward[3] : {} = {} - {}'.format(progress_reward[3], self.potentials[3], self.prev_potentials[3]))\n # # print('EnvReset[3], GoalReset[3] : {}, {}'.format(self.reset_buf[3], self.goal_reset[3]))\n # # print('Bot Pos, Goal Pos = {}, {}'.format(self.tensebot_pos[3,:], self.goal_pos[3,:]))\n # if(torch.any(xtream_rewards)):\n # print('XTREAM REWARD DETECTED')\n # xtream_idx = xtream_rewards.nonzero().cpu().detach().numpy()\n # print(\"xtream index = {}\".format(xtream_idx))\n # print(self.rew_buf[xtream_idx])\n # print('Progress Reward : {} = {} - {}'.format(progress_reward[xtream_idx], self.potentials[xtream_idx], self.prev_potentials[xtream_idx]))\n # print('EnvReset, GoalReset : {},{}'.format(self.reset_buf[xtream_idx], self.goal_reset[xtream_idx]))\n # time.sleep(10)\n # print()\n # # print('{:.2f} = {:.2f} + {:.2f} + {:.2f}'.format(total_reward[0], heading_reward[0], up_reward[0], progress_reward[0]))\n\n # # print(' self.reset_buf')\n # # print( self.reset_buf)\n # # tmp_progress_reward = self.potentials - self.prev_potentials\n # # if( np.abs(tmp_progress_reward[0].cpu().detach().numpy()) > 1):\n # # print('{} : {} : {}'.format(tmp_progress_reward[0], self.potentials[0], self.prev_potentials[0]))\n # # time.sleep(1)\n # # tmp_height_reward = self.obs_buf[:,0]\n # # tmp_heading_reward = self.rew_buf - tmp_progress_reward\n # # self.plot_buffer.append((tmp_progress_reward.cpu().detach().numpy(),\n # # tmp_height_reward.cpu().detach().numpy(),\n # # tmp_heading_reward.cpu().detach().numpy()))\n \n\n#####################################################################\n###=========================jit functions=========================###\n#####################################################################\[email protected]\ndef compute_tensebot_reward(\n tensebot_pos,\n goal_pos,\n reset_buf,\n progress_buf,\n potentials,\n prev_potentials,\n max_episode_length,\n goal_threshold):\n # type: (Tensor, Tensor, Tensor, Tensor, Tensor, Tensor, float, float) -> Tuple[Tensor, Tensor, Tensor]\n\n # reward for duration of staying alive\n progress_reward = potentials - prev_potentials\n\n total_reward = progress_reward\n\n # reset agents\n reset = torch.where(progress_buf >= max_episode_length - 1, torch.ones_like(reset_buf), reset_buf)\n\n # tensebot_avg_pos = torch.mean(tensebot_pos, dim=1)\n tensebot_avg_pos = tensebot_pos[:,0,:]\n distance_to_goal = torch.norm(tensebot_avg_pos - goal_pos, dim=-1)\n goal_reached = torch.where(distance_to_goal < goal_threshold, 1, 0)\n goal_reset = torch.where(goal_reached==1, 1, 0)\n\n return total_reward, reset, goal_reset\n\n\[email protected]\ndef compute_tensebot_observations(tensebot_pos, #Tensor\n tensebot_ori, #Tensor\n tensebot_linvel, #Tensor\n tensebot_angvel, #Tensor\n goal_pos, #Tensor\n potentials, #Tensor\n actions, #Tensor\n dt #float\n ):\n # type: (Tensor, Tensor, Tensor, Tensor, Tensor, Tensor, Tensor, float) -> Tuple[Tensor, Tensor, Tensor]\n # tensebot_avg_pos = torch.mean(tensebot_pos, dim=1)\n tensebot_avg_pos = tensebot_pos[:,0,:]\n to_target = goal_pos - tensebot_avg_pos\n to_target[:, 2] = 0.0\n to_target_norm = torch.div(to_target, torch.unsqueeze(torch.norm(to_target, p=2, dim=-1),1).repeat(1,3))\n\n prev_potentials_new = potentials.clone()\n potentials = -torch.norm(to_target, p=2, dim=-1) / dt\n\n # torso_quat, up_proj, heading_proj, up_vec, heading_vec = compute_heading_and_up(\n # tensebot_ori, inv_start_rot, to_target, basis_vec0, basis_vec1, 2)\n\n # vel_loc, angvel_loc, roll, pitch, yaw, angle_to_target = compute_rot(\n # torso_quat, tensebot_linvel, tensebot_angvel, goal_pos, tensebot_pos)\n \n # obs_buf shapes: (53)\n # obs_buf[0:39] = Rod State x 3 : Pos(3), Ori(4), LinVel(3), AngVel(3)\n # obs_buf[39:42] = Goal Pos : Pos(3)\n # obs_buf[42:45] = vector to goal (3) \n # obs_buf[45:53] = actions : Spring Length Multipliers (9)\n obs = torch.cat((tensebot_pos[:,0,:], tensebot_ori[:,0,:], tensebot_linvel[:,0,:], tensebot_angvel[:,0,:], \n tensebot_pos[:,1,:], tensebot_ori[:,1,:], tensebot_linvel[:,1,:], tensebot_angvel[:,1,:], \n tensebot_pos[:,2,:], tensebot_ori[:,2,:], tensebot_linvel[:,2,:], tensebot_angvel[:,2,:], \n goal_pos, to_target_norm, actions), dim=-1)\n\n return obs, potentials, prev_potentials_new"
] | [
[
"torch.cuda.manual_seed",
"numpy.random.seed",
"torch.manual_seed",
"numpy.set_printoptions",
"torch.use_deterministic_algorithms",
"torch.cuda.manual_seed_all",
"numpy.random.randint"
],
[
"torch.norm",
"torch.ones",
"numpy.sqrt",
"torch.cat",
"torch.zeros",
"numpy.arange",
"torch.zeros_like",
"torch.nan_to_num",
"torch.unsqueeze",
"torch.tensor",
"torch.matmul",
"torch.rand",
"torch.where",
"torch.ones_like",
"torch.squeeze"
]
] |
monperrus/iFixR | [
"5548f3ba91341dc9e73057269f8c01a0b1b6fc68"
] | [
"code/common/preprocessing.py"
] | [
"from nltk.tokenize import RegexpTokenizer\n# from stop_words import get_stop_words\nfrom nltk.stem.porter import PorterStemmer\nfrom string import punctuation\nimport re\nfrom nltk.corpus import stopwords\nen_stop = stopwords.words('english')\nfrom nltk.corpus import wordnet\nimport html\n\nfrom common.commons import *\nCODE_PATH = os.environ[\"CODE_PATH\"]\n\nimport spacy\nnlp = spacy.load('en_core_web_lg', disable=['parser', 'tagger', 'ner'])\nnlp.max_length =100000000\nfrom sklearn.feature_extraction.text import TfidfVectorizer\nfrom sklearn.metrics.pairwise import cosine_similarity\n\nimport sys\n\ndef preprocessingCodeElementsList(res):\n printDetail = False\n if isinstance(res, list):\n merged = str()\n for r in res:\n if isinstance(r, list):\n merged = merged + ' ' + ' '.join(r)\n else:\n merged = merged +' ' + r\n else:\n merged=res\n\n res = html.unescape(merged)\n\n tokens = getTokens(res,printDetail)\n\n stripped = []\n for t in tokens:\n splits = re.split('\\.|\\(|\\)|:|>|<|:|=|/|\\\\\\\\|\\'|-',t)\n for s in splits:\n stripped.append(s)\n punc = removeEndingPunct(stripped,printDetail)\n\n non_empty = [i for i in punc if i != '']\n\n stripped = removeEndingPunct(non_empty,printDetail)\n\n camelCase = handleCamelCase(stripped,printDetail,True)\n\n underScore = handleUnderScore(camelCase,printDetail,True)\n\n lower = [i.lower() for i in underScore]\n\n stopped_tokens = [i for i in lower if not i in en_stop]\n\n stem2 = stem(stopped_tokens,printDetail)\n if printDetail:\n print('=====CLEANED=========')\n print(stem2)\n\n return stem2\n\ndef preprocessingNL(res):\n\n printDetail = False\n\n if isinstance(res, list):\n merged = str()\n for r in res:\n if isinstance(r, list):\n merged = merged + ' ' + ' '.join(r)\n else:\n merged = merged +' ' + r\n else:\n merged=res\n\n res = html.unescape(merged)\n html_decoded_string = res.replace(\"&\", \"&\").replace(\""\", '\"').replace(\"'\", \"'\").replace(\">\",\n \">\").replace(\n \"<\", \"<\")\n html_decoded_string = re.sub(r'http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\\(\\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+', '',html_decoded_string)\n\n tokens = getTokens(html_decoded_string,printDetail)\n\n stripped = []\n for t in tokens:\n splits = re.split('\\.|\\(|\\)|:|>|<|:|=|/|\\\\\\\\|\\'|-',t)\n for s in splits:\n stripped.append(s)\n punc = removeEndingPunct(stripped,printDetail)\n\n non_empty = [i for i in punc if i != '']\n\n stripped = removeEndingPunct(non_empty,printDetail)\n\n camelCase = handleCamelCase(stripped,printDetail,True)\n\n underScore = handleUnderScore(camelCase,printDetail,True)\n\n lower = [i.lower() for i in underScore]\n\n stopped_tokens = [i for i in lower if not i in en_stop]\n\n nonDigit = [i for i in stopped_tokens if (not i.isdigit())]\n\n doc = nlp(' '.join(nonDigit))\n newWord = []\n for token in doc:\n if(token.text in nlp.vocab):\n newWord.append(token.text)\n\n stem2 = stem(newWord,printDetail)\n\n if printDetail:\n print('=====CLEANED=========')\n print(stem2)\n\n return stem2\n\ndef getTokens(re,printDetail=False):\n tokenizer = RegexpTokenizer(r'\\S+')\n tokens = tokenizer.tokenize(re)\n if printDetail:\n print('=====TOKENS=========')\n print(tokens)\n\n return tokens\n\ndef charLength(x, l=3):\n if x.isalpha() and len(x) >= l:\n return True\n else:\n return False\n\n\ndef removeEndingPunct(re,printDetail):\n stripped = [i.strip(punctuation) for i in re]\n if printDetail:\n print('=====removeEndingPunct=========')\n print(stripped)\n return stripped\n\ndef handleCamelCase(re,printDetail=False,keepOriginal = False):\n camelCased = list()\n\n for i in re:\n listOfCC = camel_case_split(i)\n camelCased.extend(listOfCC)\n if i not in listOfCC and keepOriginal:\n camelCased.append(i)\n\n if printDetail:\n print('=====CAMEL CASE=========')\n print(camelCased)\n return camelCased\n\ndef handleUnderScore(re,printDetail=False,keepOriginal = False):\n underScored = list()\n for i in re:\n listOfCC = i.split('_')\n underScored.extend(listOfCC)\n if i not in listOfCC and keepOriginal:\n underScored.append(i)\n\n if printDetail:\n print('=====UNDER SCORE=========')\n print(underScored)\n\n return underScored\n\ndef camel_case_split(identifier):\n matches = re.finditer('.+?(?:(?<=[a-z])(?=[A-Z])|(?<=[A-Z])(?=[A-Z][a-z])|$)', identifier)\n res = [m.group(0) for m in matches]\n\n return res\n\ndef stem(res,printDetail):\n p_stemmer = PorterStemmer()\n stemmed_tokens = [p_stemmer.stem(i.strip()) for i in res if i]\n if printDetail:\n print('=====STEMMED=========')\n print(stemmed_tokens)\n return stemmed_tokens\n\ndef isEnglish(word_to_test):\n if not wordnet.synsets(word_to_test):\n #Not an English Word\n #TODO\n word_to_test\n #print word_to_test\n else:\n return word_to_test\n\n\ndef dummy_fun(doc):\n return doc\n\ndef calculateTfIdfCodeElementsList(aCorpus):\n global progress\n progress = 0\n v = TfidfVectorizer(tokenizer=dummy_fun,stop_words=None,lowercase=False,sublinear_tf=True)#,max_df=0.7,min_df=3)\n m = v.fit(aCorpus)\n return v\n\ndef calculateTfIdfNLList(aCorpus):\n global progress\n progress = 0\n v = TfidfVectorizer(tokenizer=dummy_fun,stop_words=None,lowercase=False,sublinear_tf=True)#,max_df=0.7,min_df=3)\n m = v.fit(aCorpus)\n return v\n\ndef getDTMNL(x,v,corpus):\n ind =x.name\n v.tokenizer = dummy_fun\n return v.transform([corpus[ind]])\ndef getDTMCE(x,v,corpus):\n ind =x.name\n v.tokenizer = dummy_fun\n return v.transform([corpus[ind]])\n\ndef getBRDTM(x,v,corpus):\n ind =x.name\n v.tokenizer = dummy_fun\n return v.transform([corpus[ind]])\n\n\ndef getBRDTMCEs(x,v,corpus):\n ind =x.name\n v.tokenizer = dummy_fun\n return v.transform([corpus[ind]])\n"
] | [
[
"sklearn.feature_extraction.text.TfidfVectorizer"
]
] |
egilbertson-ucsf/algHW2 | [
"eec0f4e42e27d4c7633cc907d6f523285fadd79c"
] | [
"hw2skeleton/k_means.py"
] | [
"from hw2skeleton import cluster as cl\nfrom hw2skeleton import io\nimport sklearn.metrics as sk\nimport os\nimport pandas as pd\nimport numpy as np\nimport math\naa3 = \"ALA CYS ASP GLU PHE GLY HIS ILE LYS LEU MET ASN PRO GLN ARG SER THR VAL TRP TYR\".split()\naa_df = pd.DataFrame(0, index=list(aa3), columns=['Count'])\n\n\ndef calc_avg_site_length(sites):\n '''\n calculate the average size of an active site\n for use in generating random sites\n '''\n ss = []\n for site in sites:\n ss.append(len(site.residues))\n\n return [sum(ss) / len(sites), max(ss), min(ss)]\n\n\ndef generate_random_site(sites):\n '''\n generate a random site by filling in a 1x20 vector repr of amino acids with counts\n '''\n lens = calc_avg_site_length(sites)\n num_res = np.random.randint(lens[2],lens[1])\n site = aa_df.copy()\n\n for pos in range(num_res):\n aa = np.random.randint(0,19)\n site.iloc[aa] += 1\n\n return site\n\ndef generate_k_random_centroids(k, sites):\n '''\n generate k random sites using above function\n '''\n centroids = {}\n for i in range(k):\n centroids[i] = generate_random_site(sites)\n return centroids\n\ndef assign_single_site_to_cluster(site, centroids):\n '''\n check which cluster centroid is closest to the given site and assign the\n site to that cluster\n '''\n loc = site.counts\n dists = {}\n for c in centroids.keys():\n dist = cl.compute_similarity(loc, centroids[c])\n dists[dist] = c\n closest = dists[min(dists.keys())]\n return closest\n\ndef assign_all_sites_to_cluster(sites, centroids, clusters):\n '''\n loop through all sites and assign them to the appropriate clusters\n '''\n for site in sites:\n close = assign_single_site_to_cluster(site, centroids)\n if close not in clusters:\n clusters[close] = [site]\n else:\n clusters[close].append(site)\n for cent in centroids:\n if cent not in clusters:\n clusters[cent] = []\n return clusters\n\ndef compute_cluster_center(cluster_list, sites_dict):\n '''\n compute the center of a cluster by taking the average of the vector representations\n of all sites in the cluster\n '''\n sites = aa_df.copy()\n for j in cluster_list:\n if isinstance(j, str):\n sites += sites_dict[j].counts\n else:\n sites += j.counts\n return sites / len(sites)\n\ndef get_new_centroids(clusters, sites_dict=None):\n '''\n use the compute_cluster_center function to get the new centroids after updating\n assignments\n '''\n centroids = {}\n for cluster in clusters.keys():\n centroids[cluster] = compute_cluster_center(clusters[cluster], sites_dict)\n return centroids\n\ndef check_change_in_centroids(old_centroids, new_centroids):\n ''' check how far the centroids have moved '''\n diff = 0\n for c in old_centroids.keys():\n diff += cl.compute_similarity(old_centroids[c], new_centroids[c])\n return diff\n\ndef one_full_k_means(sites, k):\n ''' using all above functions, one full iteration of k means'''\n centroids = generate_k_random_centroids(k, sites)\n clusters = {}\n clusters = assign_all_sites_to_cluster(sites, centroids, clusters)\n new_centroids = get_new_centroids(clusters)\n old_diff = check_change_in_centroids(centroids, new_centroids)\n new_diff = 0\n while old_diff - new_diff > 0.00001:\n old_diff = check_change_in_centroids(centroids, new_centroids)\n centroids = new_centroids.copy()\n clusters = {}\n clusters = assign_all_sites_to_cluster(sites, centroids, clusters)\n new_centroids = get_new_centroids(clusters)\n new_diff = check_change_in_centroids(centroids, new_centroids)\n return clusters, centroids\n\ndef compute_similarity_matrix(sites):\n ''' copy of computer similarity matrix from utils '''\n\n simMat = []\n names = []\n for i in range(len(sites)):\n names.append(sites[i].name)\n row = []\n for j in range(len(sites)):\n row.append(cl.compute_similarity(sites[i].counts,sites[j].counts))\n simMat.append(row)\n simMat = pd.DataFrame(simMat, columns = names, index = names)\n\n return simMat\n\ndef make_cluster_assign_df(clusters, simMat):\n ''' make a nice df repr of the cluster assignments'''\n assgn = pd.DataFrame(index = simMat.index, columns = ['Cluster Assignment'])\n for cluster in clusters.keys():\n for site in clusters[cluster]:\n assgn.loc[site.name] = cluster\n return assgn\n\ndef avg_sl(sites, k, simMat):\n ''' average silhouette_score for i random starts of k means for k clusters'''\n\n scores = []\n c_list = []\n for i in range(1):\n clusters, centroids = one_full_k_means(sites, k)\n assgn = make_cluster_assign_df(clusters, simMat)\n c_list.append(clusters)\n scores.append(sk.silhouette_score(simMat, assgn['Cluster Assignment'], metric='precomputed'))\n return scores, clusters\n\n\n\ndef k_means(sites=None):\n ''' run k means '''\n sites = io.read_active_sites('data')\n simMat = compute_similarity_matrix(sites)\n points = [[],[]]\n clusters = []\n for i in range(2,5):\n points[0].append(i)\n temp = avg_sl(sites, i , simMat)\n points[1].append(temp[0])\n clusters.append(temp[1])\n\n return clusters[points[1].index(max(points[1]))], max(points[1])\n"
] | [
[
"sklearn.metrics.silhouette_score",
"pandas.DataFrame",
"numpy.random.randint"
]
] |
rhong3/CPTAC-UCEC | [
"ec83fbee234b5ad3df6524cdd960b5f0f3da9ea9",
"ec83fbee234b5ad3df6524cdd960b5f0f3da9ea9",
"ec83fbee234b5ad3df6524cdd960b5f0f3da9ea9",
"ec83fbee234b5ad3df6524cdd960b5f0f3da9ea9",
"ec83fbee234b5ad3df6524cdd960b5f0f3da9ea9"
] | [
"Scripts/Legacy/line1prep.py",
"Scripts/data_input3.py",
"Scripts/RGB_profiler.py",
"Scripts/X3.py",
"Scripts/Legacy/gather_split.py"
] | [
"import pandas as pd\n\nlabels = pd.read_csv('../Fusion_dummy_His_MUT_joined.csv', header=0)\n# line = pd.read_csv('../../Line1.csv', header=0)\nline = pd.read_csv('../EC_cyclin_expression.csv', header=0)\n\n# line['name'] = line['Proteomics_Participant_ID']\n# line = line.drop(['Proteomics_Participant_ID', 'Histologic_type', 'Genomics_subtype', 'TP53_TP53'], axis=1)\n# labels = labels.join(line.set_index('name'), on='name')\n# labels['LINE1_ORF1p'] = (labels['LINE1_ORF1p'].dropna() > 0).astype(int)\n# labels['RAD50-S635'] = (labels['RAD50-S635'].dropna() > 0).astype(int)\n# labels['NBN-S343'] = (labels['NBN-S343'].dropna() > 0).astype(int)\n# labels['ATR-T1989'] = (labels['ATR-T1989'].dropna() > 0).astype(int)\n# labels['ATM-S1981'] = (labels['ATM-S1981'].dropna() > 0).astype(int)\n\nline['name'] = line['Sample_ID'].str.slice(start=0, stop=9)\n\nline = line.drop(['Sample_ID', 'Genomic_subtype'], axis=1)\nlabels = labels.join(line.set_index('name'), on='name')\nlabels['CCND1'] = (labels['CCND1'].dropna() > 0).astype(int)\nlabels['CCNE1'] = (labels['CCNE1'].dropna() > 0).astype(int)\nlabels['CCNA2'] = (labels['CCNA2'].dropna() > 0).astype(int)\nlabels['CCNB1'] = (labels['CCNB1'].dropna() > 0).astype(int)\n\nlabels.to_csv('../Fusion_dummy_His_MUT_joined.csv', index=False)\n",
"\"\"\"\nData input preparation from decoding TFrecords, onehot encoding, augmentation, and batching 3.0\n\nCreated on 04/26/2019\n\n@author: RH\n\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport tensorflow as tf\n\n\nclass DataSet(object):\n # bs is batch size; ep is epoch; images are images; mode is test/train; filename is tfrecords\n def __init__(self, bs, count, ep=1, cls=2, images=None, mode=None, filename=None):\n self._batchsize = bs\n self._index_in_epoch = 0\n self._num_examples = count\n self._images = images\n self._mode = mode\n self._filename = filename\n self._epochs = ep\n self._classes = cls\n\n # decoding tfrecords; return images and labels\n def decode(self, serialized_example):\n features = tf.parse_example(\n serialized_example,\n # Defaults are not specified since both keys are required.\n features={self._mode + '/imageL0': tf.FixedLenFeature([], tf.string),\n self._mode + '/imageL1': tf.FixedLenFeature([], tf.string),\n self._mode + '/imageL2': tf.FixedLenFeature([], tf.string),\n self._mode + '/label': tf.FixedLenFeature([], tf.int64), })\n\n imagea = tf.decode_raw(features[self._mode + '/imageL0'], tf.float32)\n imagea = tf.reshape(imagea, [-1, 299, 299, 3])\n imageb = tf.decode_raw(features[self._mode + '/imageL1'], tf.float32)\n imageb = tf.reshape(imageb, [-1, 299, 299, 3])\n imagec = tf.decode_raw(features[self._mode + '/imageL2'], tf.float32)\n imagec = tf.reshape(imagec, [-1, 299, 299, 3])\n\n # Convert label from a scalar uint8 tensor to an int32 scalar.\n label = tf.cast(features[self._mode + '/label'], tf.int32)\n return imagea, imageb, imagec, label\n\n # decoding tfrecords for real test\n def Real_decode(self, serialized_example):\n features = tf.parse_example(\n serialized_example,\n # Defaults are not specified since both keys are required.\n features={self._mode + '/imageL0': tf.FixedLenFeature([], tf.string),\n self._mode + '/imageL1': tf.FixedLenFeature([], tf.string),\n self._mode + '/imageL2': tf.FixedLenFeature([], tf.string), })\n\n imagea = tf.decode_raw(features[self._mode + '/imageL0'], tf.float32)\n imagea = tf.reshape(imagea, [-1, 299, 299, 3])\n imageb = tf.decode_raw(features[self._mode + '/imageL1'], tf.float32)\n imageb = tf.reshape(imageb, [-1, 299, 299, 3])\n imagec = tf.decode_raw(features[self._mode + '/imageL2'], tf.float32)\n imagec = tf.reshape(imagec, [-1, 299, 299, 3])\n\n return imagea, imageb, imagec\n\n # augmentation including onehot encoding\n def augment(self, imagea, imageb, imagec, labels):\n\n angles = tf.cast(tf.random_uniform([], 0, 4), tf.int32)\n imagea = tf.image.rot90(imagea, k=angles)\n imagea = tf.image.random_flip_left_right(imagea)\n imagea = tf.image.random_flip_up_down(imagea)\n imagea = tf.image.random_hue(imagea, 0.02)\n imagea = tf.image.random_brightness(imagea, 0.02)\n imagea = tf.image.random_contrast(imagea, 0.9, 1.1)\n imagea = tf.image.random_saturation(imagea, 0.9, 1.1)\n\n imageb = tf.image.rot90(imageb, k=angles)\n imageb = tf.image.random_flip_left_right(imageb)\n imageb = tf.image.random_flip_up_down(imageb)\n imageb = tf.image.random_hue(imageb, 0.02)\n imageb = tf.image.random_brightness(imageb, 0.02)\n imageb = tf.image.random_contrast(imageb, 0.9, 1.1)\n imageb = tf.image.random_saturation(imageb, 0.9, 1.1)\n\n imagec = tf.image.rot90(imagec, k=angles)\n imagec = tf.image.random_flip_left_right(imagec)\n imagec = tf.image.random_flip_up_down(imagec)\n imagec = tf.image.random_hue(imagec, 0.02)\n imagec = tf.image.random_brightness(imagec, 0.02)\n imagec = tf.image.random_contrast(imagec, 0.9, 1.1)\n imagec = tf.image.random_saturation(imagec, 0.9, 1.1)\n\n labels = tf.one_hot(indices=tf.cast(labels, tf.int32), depth=self._classes)\n\n return imagea, imageb, imagec, labels\n\n # onehot encoding only; for test set\n def onehot_only(self, imagea, imageb, imagec, labels):\n with tf.name_scope('onehot_only'):\n labels = tf.one_hot(indices=tf.cast(labels, tf.int32), depth=self._classes)\n return imagea, imageb, imagec, labels\n\n # dataset preparation; batching; Real test or not; train or test\n def data(self, Not_Realtest=True, train=True):\n batch_size = self._batchsize\n ep = self._epochs\n filenames = tf.placeholder(tf.string, shape=None)\n dataset = tf.data.TFRecordDataset(filenames)\n dataset = dataset.repeat(ep)\n if Not_Realtest:\n if train:\n batched_dataset = dataset.batch(batch_size, drop_remainder=True)\n batched_dataset = batched_dataset.map(self.decode)\n batched_dataset = batched_dataset.map(self.augment)\n else:\n batched_dataset = dataset.batch(batch_size, drop_remainder=False)\n batched_dataset = batched_dataset.map(self.decode)\n batched_dataset = batched_dataset.map(self.onehot_only)\n else:\n batched_dataset = dataset.batch(batch_size, drop_remainder=False)\n batched_dataset = batched_dataset.map(self.Real_decode)\n iterator = batched_dataset.make_initializable_iterator()\n return iterator, self._filename, filenames\n\n @property\n def images(self):\n return self._images\n\n @property\n def num_examples(self):\n return self._num_examples\n",
"\"\"\"\nCheck RGB info for tiles\n\nCreated on 03/26/2019\n\n@author: RH\n\"\"\"\n\nimport cv2\nimport pandas as pd\nimport numpy as np\nimport os\n\n\ndef tile_ids_in(slide, level, root_dir, label, ignore=['.DS_Store','dict.csv', 'all.csv']):\n ids = []\n try:\n for id in os.listdir(root_dir):\n if id in ignore:\n print('Skipping ID:', id)\n else:\n ids.append([slide, level, root_dir+'/'+id, label])\n except FileNotFoundError:\n print('Ignore:', root_dir)\n\n return ids\n\n\n# read images\ndef load_image(addr):\n img = cv2.imread(addr)\n img = img.astype(np.float32)\n return img\n\n\n# loading images\ndef loader(totlist_dir):\n RGB=[]\n all = pd.read_csv(totlist_dir+'/All_images.csv', header=0)\n tiles_list = []\n for idx, row in all.iterrows():\n tile_ids = tile_ids_in(row['slide'], row['level'], row['path'], row['label'])\n tiles_list.extend(tile_ids)\n tiles = pd.DataFrame(tiles_list, columns=['slide', 'level', 'path', 'label'])\n\n imlist = tiles['path'].values.tolist()\n\n for i in range(len(imlist)):\n try:\n # Load the image\n img = load_image(imlist[i])\n the_imagea = np.array(img)[:, :, :3]\n the_imagea = np.nan_to_num(the_imagea)\n mask = (the_imagea[:, :, :3] > 200).astype(np.uint8)\n maskb = (the_imagea[:, :, :3] < 50).astype(np.uint8)\n mask = (~(mask[:, :, 0] * mask[:, :, 1] * mask[:, :, 2]).astype(bool)).astype(np.uint8)\n maskb = (~(maskb[:, :, 0] * maskb[:, :, 1] * maskb[:, :, 2]).astype(bool)).astype(np.uint8)\n mask = mask*maskb\n masksum = np.sum(mask)\n BB = np.sum(the_imagea[:, :, 0]*mask)/masksum\n B = (the_imagea[:, :, 0]*mask).ravel()[np.flatnonzero(the_imagea[:, :, 0]*mask)]\n GG = np.sum(the_imagea[:, :, 1]*mask)/masksum\n G = (the_imagea[:, :, 1]*mask).ravel()[np.flatnonzero(the_imagea[:, :, 1]*mask)]\n RR = np.sum(the_imagea[:, :, 2]*mask)/masksum\n R = (the_imagea[:, :, 2]*mask).ravel()[np.flatnonzero(the_imagea[:, :, 2]*mask)]\n RGB.append([imlist[i], RR, np.percentile(R, 25), np.percentile(R, 75), np.std(R),\n GG, np.percentile(G, 25), np.percentile(G, 75), np.std(G),\n BB, np.percentile(B, 25), np.percentile(B, 75), np.std(B)])\n except (AttributeError, IndexError) as e:\n print('Error image:'+imlist[i])\n pass\n\n RGBdf = pd.DataFrame(RGB, columns=['Img', 'Redmean', 'Red25', 'Red75', 'Redstd',\n 'Greenmean', 'Green25', 'Green75', 'Greenstd',\n 'Bluemean', 'Blue25', 'Blue75', 'Bluestd'])\n\n RGBdf.to_csv('../Results/RGB.csv', index=False, header=True)\n\n Rmean = RGBdf['Redmean'].mean()\n Gmean = RGBdf['Greenmean'].mean()\n Bmean = RGBdf['Bluemean'].mean()\n Rstd = RGBdf['Redstd'].mean()\n Gstd = RGBdf['Greenstd'].mean()\n Bstd = RGBdf['Bluestd'].mean()\n Raqt = RGBdf['Red25'].mean()\n Gaqt = RGBdf['Green25'].mean()\n Baqt = RGBdf['Blue25'].mean()\n Rbqt = RGBdf['Red75'].mean()\n Gbqt = RGBdf['Green75'].mean()\n Bbqt = RGBdf['Blue75'].mean()\n\n print(\"Red Mean of: mean={}; std={}; 25pct={}; 75pct={}\".format(str(Rmean), str(Rstd), str(Raqt), str(Rbqt)))\n print(\"Green Mean of: mean={}; std={}; 25pct={}; 75pct={}\".format(str(Gmean), str(Gstd), str(Gaqt), str(Gbqt)))\n print(\"Blue Mean of: mean={}; std={}; 25pct={}; 75pct={}\".format(str(Bmean), str(Bstd), str(Baqt), str(Bbqt)))\n\n\nif __name__ == \"__main__\":\n loader('../tiles')",
"\"\"\"\nXeptionV3 for TF2.0\n\nCreated on 04/17/2019\n\n@author: RH\n\"\"\"\nimport tensorflow as tf\nfrom keras.layers.convolutional import Conv2D\nfrom keras.layers.pooling import MaxPooling2D, AveragePooling2D, GlobalAveragePooling2D\nfrom keras.layers.core import Dense, Dropout, Flatten, Activation, Lambda\nfrom keras.layers.normalization import BatchNormalization\nfrom keras.layers.merge import concatenate, add\nfrom keras.regularizers import l2\n\n\ndef resnet_v2_stem(input, train=True):\n '''The stem of the pure Inception-v4 and Inception-ResNet-v2 networks. This is input part of those networks.'''\n\n # Input shape is 299 * 299 * 3 (Tensorflow dimension ordering)\n x = Conv2D(32, (3, 3), kernel_regularizer=l2(0.0002), activation=\"relu\", strides=(2, 2))(input) # 149 * 149 * 32\n x = Conv2D(32, (3, 3), kernel_regularizer=l2(0.0002), activation=\"relu\")(x) # 147 * 147 * 32\n x = Conv2D(64, (3, 3), kernel_regularizer=l2(0.0002), activation=\"relu\", padding=\"same\")(x) # 147 * 147 * 64\n\n x1 = MaxPooling2D((3, 3), strides=(2, 2))(x)\n x2 = Conv2D(96, (3, 3), kernel_regularizer=l2(0.0002), activation=\"relu\", strides=(2, 2))(x)\n\n x = concatenate([x1, x2], axis=3) # 73 * 73 * 160\n\n x1 = Conv2D(64, (1, 1), kernel_regularizer=l2(0.0002), activation=\"relu\", padding=\"same\")(x)\n x1 = Conv2D(96, (3, 3), kernel_regularizer=l2(0.0002), activation=\"relu\")(x1)\n\n x2 = Conv2D(64, (1, 1), kernel_regularizer=l2(0.0002), activation=\"relu\", padding=\"same\")(x)\n x2 = Conv2D(64, (7, 1), kernel_regularizer=l2(0.0002), activation=\"relu\", padding=\"same\")(x2)\n x2 = Conv2D(64, (1, 7), kernel_regularizer=l2(0.0002), activation=\"relu\", padding=\"same\")(x2)\n x2 = Conv2D(96, (3, 3), kernel_regularizer=l2(0.0002), activation=\"relu\", padding=\"valid\")(x2)\n\n x = concatenate([x1, x2], axis=3) # 71 * 71 * 192\n\n x1 = Conv2D(192, (3, 3), kernel_regularizer=l2(0.0002), activation=\"relu\", strides=(2, 2))(x)\n\n x2 = MaxPooling2D((3, 3), strides=(2, 2))(x)\n\n x = concatenate([x1, x2], axis=3) # 35 * 35 * 384\n\n x = BatchNormalization(axis=3)(x)\n x = Activation(\"relu\")(x)\n\n return x\n\n\ndef inception_resnet_v2_A(input, scale_residual=True, train=True):\n '''Architecture of Inception_ResNet_A block which is a 35 * 35 grid module.'''\n\n ar1 = Conv2D(32, (1, 1), kernel_regularizer=l2(0.0002), activation=\"relu\", padding=\"same\")(input)\n\n ar2 = Conv2D(32, (1, 1), kernel_regularizer=l2(0.0002), activation=\"relu\", padding=\"same\")(input)\n ar2 = Conv2D(32, (3, 3), kernel_regularizer=l2(0.0002), activation=\"relu\", padding=\"same\")(ar2)\n\n ar3 = Conv2D(32, (1, 1), kernel_regularizer=l2(0.0002), activation=\"relu\", padding=\"same\")(input)\n ar3 = Conv2D(48, (3, 3), kernel_regularizer=l2(0.0002), activation=\"relu\", padding=\"same\")(ar3)\n ar3 = Conv2D(64, (3, 3), kernel_regularizer=l2(0.0002), activation=\"relu\", padding=\"same\")(ar3)\n\n merged = concatenate([ar1, ar2, ar3], axis=3)\n\n ar = Conv2D(384, (1, 1), kernel_regularizer=l2(0.0002), activation=\"linear\", padding=\"same\")(merged)\n if scale_residual: ar = Lambda(lambda a: a * 0.1)(ar)\n\n output = add([input, ar])\n output = BatchNormalization(axis=3)(output)\n output = Activation(\"relu\")(output)\n\n return output\n\n\ndef inception_resnet_v2_B(input, scale_residual=True, train=True):\n '''Architecture of Inception_ResNet_B block which is a 17 * 17 grid module.'''\n\n br1 = Conv2D(192, (1, 1), kernel_regularizer=l2(0.0002), activation=\"relu\", padding=\"same\")(input)\n\n br2 = Conv2D(128, (1, 1), kernel_regularizer=l2(0.0002), activation=\"relu\", padding=\"same\")(input)\n br2 = Conv2D(160, (1, 7), kernel_regularizer=l2(0.0002), activation=\"relu\", padding=\"same\")(br2)\n br2 = Conv2D(192, (7, 1), kernel_regularizer=l2(0.0002), activation=\"relu\", padding=\"same\")(br2)\n\n merged = concatenate([br1, br2], axis=3)\n\n br = Conv2D(1152, (1, 1), kernel_regularizer=l2(0.0002), activation=\"linear\", padding=\"same\")(merged)\n if scale_residual: br = Lambda(lambda b: b * 0.1)(br)\n\n output = add([input, br])\n output = BatchNormalization(axis=3)(output)\n output = Activation(\"relu\")(output)\n\n return output\n\n\ndef inception_resnet_v2_C(input, scale_residual=True, train=True):\n '''Architecture of Inception_ResNet_C block which is a 8 * 8 grid module.'''\n\n cr1 = Conv2D(192, (1, 1), kernel_regularizer=l2(0.0002), activation=\"relu\", padding=\"same\")(input)\n\n cr2 = Conv2D(192, (1, 1), kernel_regularizer=l2(0.0002), activation=\"relu\", padding=\"same\")(input)\n cr2 = Conv2D(224, (1, 3), kernel_regularizer=l2(0.0002), activation=\"relu\", padding=\"same\")(cr2)\n cr2 = Conv2D(256, (3, 1), kernel_regularizer=l2(0.0002), activation=\"relu\", padding=\"same\")(cr2)\n\n merged = concatenate([cr1, cr2], axis=3)\n\n cr = Conv2D(2144, (1, 1), kernel_regularizer=l2(0.0002), activation=\"linear\", padding=\"same\")(merged)\n if scale_residual: cr = Lambda(lambda c: c * 0.1)(cr)\n\n output = add([input, cr])\n output = BatchNormalization(axis=3)(output)\n output = Activation(\"relu\")(output)\n\n return output\n\n\ndef reduction_resnet_A(input, k=192, l=224, m=256, n=384, train=True):\n '''Architecture of a 35 * 35 to 17 * 17 Reduction_ResNet_A block. It is used by both v1 and v2 Inception-ResNets.'''\n\n rar1 = MaxPooling2D((3, 3), strides=(2, 2))(input)\n\n rar2 = Conv2D(n, (3, 3), kernel_regularizer=l2(0.0002), activation=\"relu\", strides=(2, 2))(input)\n\n rar3 = Conv2D(k, (1, 1), kernel_regularizer=l2(0.0002), activation=\"relu\", padding=\"same\")(input)\n rar3 = Conv2D(l, (3, 3), kernel_regularizer=l2(0.0002), activation=\"relu\", padding=\"same\")(rar3)\n rar3 = Conv2D(m, (3, 3), kernel_regularizer=l2(0.0002), activation=\"relu\", strides=(2, 2))(rar3)\n\n merged = concatenate([rar1, rar2, rar3], axis=3)\n rar = BatchNormalization(axis=3)(merged)\n rar = Activation(\"relu\")(rar)\n\n return rar\n\n\ndef reduction_resnet_v2_B(input, train=True):\n '''Architecture of a 17 * 17 to 8 * 8 Reduction_ResNet_B block.'''\n\n rbr1 = MaxPooling2D((3, 3), strides=(2, 2), padding=\"valid\")(input)\n\n rbr2 = Conv2D(256, (1, 1), kernel_regularizer=l2(0.0002), activation=\"relu\", padding=\"same\")(input)\n rbr2 = Conv2D(384, (3, 3), kernel_regularizer=l2(0.0002), activation=\"relu\", strides=(2, 2))(rbr2)\n\n rbr3 = Conv2D(256, (1, 1), kernel_regularizer=l2(0.0002), activation=\"relu\", padding=\"same\")(input)\n rbr3 = Conv2D(288, (3, 3), kernel_regularizer=l2(0.0002), activation=\"relu\", strides=(2, 2))(rbr3)\n\n rbr4 = Conv2D(256, (1, 1), kernel_regularizer=l2(0.0002), activation=\"relu\", padding=\"same\")(input)\n rbr4 = Conv2D(288, (3, 3), kernel_regularizer=l2(0.0002), activation=\"relu\", padding=\"same\")(rbr4)\n rbr4 = Conv2D(320, (3, 3), kernel_regularizer=l2(0.0002), activation=\"relu\", strides=(2, 2))(rbr4)\n\n merged = concatenate([rbr1, rbr2, rbr3, rbr4], axis=3)\n rbr = BatchNormalization(axis=3)(merged)\n rbr = Activation(\"relu\")(rbr)\n\n return rbr\n\n\ndef Branch(input, dropout_keep_prob=0.8, num_classes=1000, is_training=True):\n # Input shape is 299 * 299 * 3\n x = resnet_v2_stem(input, train=is_training) # Output: 35 * 35 * 256\n\n # 5 x Inception A\n for i in range(5):\n x = inception_resnet_v2_A(x, train=is_training)\n # Output: 35 * 35 * 256\n\n # Reduction A\n x = reduction_resnet_A(x, k=256, l=256, m=384, n=384, train=is_training) # Output: 17 * 17 * 896\n\n # 10 x Inception B\n for i in range(10):\n x = inception_resnet_v2_B(x, train=is_training)\n # Output: 17 * 17 * 896\n\n # auxiliary\n loss2_ave_pool = AveragePooling2D(pool_size=(5, 5), strides=(3, 3), name='loss2/ave_pool')(x)\n\n loss2_conv_a = Conv2D(128, (1, 1), kernel_regularizer=l2(0.0002), activation=\"relu\", padding=\"same\")(\n loss2_ave_pool)\n loss2_conv_b = Conv2D(768, (5, 5), kernel_regularizer=l2(0.0002), activation=\"relu\", padding=\"same\")(\n loss2_conv_a)\n\n loss2_conv_b = BatchNormalization(axis=3)(loss2_conv_b)\n\n loss2_conv_b = Activation('relu')(loss2_conv_b)\n\n loss2_flat = Flatten()(loss2_conv_b)\n\n loss2_fc = Dense(1024, activation='relu', name='loss2/fc', kernel_regularizer=l2(0.0002))(loss2_flat)\n\n loss2_drop_fc = Dropout(dropout_keep_prob)(loss2_fc, training=is_training)\n\n loss2_classifier = Dense(num_classes, name='loss2/classifier', kernel_regularizer=l2(0.0002))(loss2_drop_fc)\n\n # Reduction B\n x = reduction_resnet_v2_B(x, train=is_training) # Output: 8 * 8 * 1792\n\n # 5 x Inception C\n for i in range(5):\n x = inception_resnet_v2_C(x, train=is_training)\n # Output: 8 * 8 * 1792\n\n x = Conv2D(896, (1, 1), kernel_regularizer=l2(0.0002), activation=\"relu\", padding=\"same\")(x)\n\n return x, loss2_classifier\n\n\ndef X3(inputa, inputb, inputc, demographics=None,\n dropout=0.8, num_cls=1000, is_train=True, scope='X3', supermd=False):\n with tf.variable_scope(scope, 'X3', [inputa, inputb, inputc]):\n\n xa, auxa = Branch(inputa, dropout_keep_prob=dropout, num_classes=num_cls, is_training=is_train)\n xb, auxb = Branch(inputb, dropout_keep_prob=dropout, num_classes=num_cls, is_training=is_train)\n xc, auxc = Branch(inputc, dropout_keep_prob=dropout, num_classes=num_cls, is_training=is_train)\n\n x = concatenate([xa, xb, xc], axis=3) # Output: 8 * 8 * 2688\n\n x = Conv2D(2688, (1, 1), kernel_regularizer=l2(0.0002), activation=\"relu\", padding=\"same\")(x)\n\n net = x\n\n loss2_classifier = tf.add(auxa, tf.add(auxb, auxc))\n\n # Average Pooling\n x = GlobalAveragePooling2D(name='avg_pool')(x) # Output: 2688\n\n pool5_drop_10x10_s1 = Dropout(dropout)(x, training=is_train)\n\n if supermd:\n demographics = Dense(2, name='demographic_fc1', activation=\"relu\", kernel_regularizer=l2(0.0002))(\n demographics)\n merged = concatenate([pool5_drop_10x10_s1, demographics])\n else:\n merged = pool5_drop_10x10_s1\n\n loss3_classifier_w = Dense(num_cls, name='loss3/classifier', kernel_regularizer=l2(0.0002))\n\n loss3_classifier = loss3_classifier_w(merged)\n\n w_variables = loss3_classifier_w.get_weights()\n w_variables = w_variables[0]\n\n logits = tf.cond(tf.equal(is_train, tf.constant(True)),\n lambda: tf.add(loss3_classifier, tf.scalar_mul(tf.constant(0.1), loss2_classifier)),\n lambda: loss3_classifier)\n\n return logits, net, tf.convert_to_tensor(w_variables)\n\n",
"\"\"\"\nGet baseline models data split\n\nCreated on 11/15/2019\n\n@author: RH\n\"\"\"\nimport pandas as pd\n\nflist = ['CNVL']\nfor i in flist:\n tr = pd.read_csv('../Results/X1{}/data/tr_sample.csv'.format(i), header=0)\n te = pd.read_csv('../Results/X1{}/data/te_sample.csv'.format(i), header=0)\n va = pd.read_csv('../Results/X1{}/data/va_sample.csv'.format(i), header=0)\n trunq = list(tr.slide.unique())\n teunq = list(te.slide.unique())\n vaunq = list(va.slide.unique())\n tepd = pd.DataFrame(columns=['slide', 'set'])\n tepd['slide'] = teunq\n tepd['set'] = 'test'\n trpd = pd.DataFrame(columns=['slide', 'set'])\n trpd['slide'] = trunq\n trpd['set'] = 'train'\n vapd = pd.DataFrame(columns=['slide', 'set'])\n vapd['slide'] = vaunq\n vapd['set'] = 'validation'\n\n pdpd = pd.concat([trpd, vapd, tepd], ignore_index=True)\n pdpd.columns = ['slide', 'set']\n pdpd.to_csv('../split/{}.csv'.format(i), index=False)\n\n# # For TP53-244 split\n# tr = pd.read_csv('../Results/NL5/X1TP53-244/data/tr_sample.csv', header=0)\n# te = pd.read_csv('../Results/NL5/X1TP53-244/data/te_sample.csv', header=0)\n# va = pd.read_csv('../Results/NL5/X1TP53-244/data/va_sample.csv', header=0)\n# trunq = list(tr.slide.unique())\n# teunq = list(te.slide.unique())\n# vaunq = list(va.slide.unique())\n# tepd = pd.DataFrame(columns=['slide', 'set'])\n# tepd['slide'] = teunq\n# tepd['set'] = 'test'\n# trpd = pd.DataFrame(columns=['slide', 'set'])\n# trpd['slide'] = trunq\n# trpd['set'] = 'train'\n# vapd = pd.DataFrame(columns=['slide', 'set'])\n# vapd['slide'] = vaunq\n# vapd['set'] = 'validation'\n# pdpd = pd.concat([trpd, vapd, tepd], ignore_index=True)\n# pdpd.columns = ['slide', 'set']\n#\n# ref = pd.read_csv('../dummy_His_MUT_joined.csv', header=0)\n# ref = ref[~ref['TP53'].isna()]\n#\n# pdpd = pdpd[pdpd['slide'].isin(ref['name'].tolist())]\n#\n# ref = ref[~ref['name'].isin(pdpd['slide'].tolist())]['name'].tolist()\n#\n# pdlst = []\n# for m in ref:\n# ppp = np.random.random()\n# print(ppp)\n# if ppp < 0.1:\n# pdlst.append([m, 'test'])\n# elif ppp > 0.9:\n# pdlst.append([m, 'validation'])\n# else:\n# pdlst.append([m, 'train'])\n#\n# pdpdd = pd.DataFrame(pdlst, columns=['slide', 'set'])\n# pdpd = pdpd.append(pdpdd)\n# pdpd.to_csv('../split/TP53.csv', index=False)\n"
] | [
[
"pandas.read_csv"
],
[
"tensorflow.image.random_hue",
"tensorflow.image.random_brightness",
"tensorflow.image.random_flip_left_right",
"tensorflow.image.random_contrast",
"tensorflow.FixedLenFeature",
"tensorflow.data.TFRecordDataset",
"tensorflow.decode_raw",
"tensorflow.cast",
"tensorflow.reshape",
"tensorflow.placeholder",
"tensorflow.image.random_saturation",
"tensorflow.image.rot90",
"tensorflow.name_scope",
"tensorflow.image.random_flip_up_down",
"tensorflow.random_uniform"
],
[
"pandas.read_csv",
"numpy.nan_to_num",
"pandas.DataFrame",
"numpy.flatnonzero",
"numpy.percentile",
"numpy.std",
"numpy.array",
"numpy.sum"
],
[
"tensorflow.variable_scope",
"tensorflow.convert_to_tensor",
"tensorflow.add",
"tensorflow.constant"
],
[
"pandas.concat",
"pandas.DataFrame"
]
] |
dd-dos/sentence-transformers | [
"8f9c36b788e15141f723d80fea67ed16785cd18e",
"8f9c36b788e15141f723d80fea67ed16785cd18e",
"6992f4c9b7e600ce89f69d6bc0b495ec177b0312"
] | [
"sentence_transformers/datasets/SentenceLabelDataset.py",
"sentence_transformers/datasets/SentencesDataset.py",
"examples/applications/clustering/agglomerative.py"
] | [
"from torch.utils.data import Dataset\nfrom typing import List\nimport bisect\nimport torch\nimport logging\nimport numpy as np\nfrom tqdm import tqdm\nfrom .. import SentenceTransformer\nfrom ..readers.InputExample import InputExample\nfrom multiprocessing import Pool, cpu_count\nimport multiprocessing\n\nclass SentenceLabelDataset(Dataset):\n \"\"\"\n Dataset for training with triplet loss.\n This dataset takes a list of sentences grouped by their label and uses this grouping to dynamically select a\n positive example from the same group and a negative example from the other sentences for a selected anchor sentence.\n\n This dataset should be used in combination with dataset_reader.LabelSentenceReader\n\n One iteration over this dataset selects every sentence as anchor once.\n\n This also uses smart batching like SentenceDataset.\n \"\"\"\n\n def __init__(self, examples: List[InputExample], model: SentenceTransformer, provide_positive: bool = True,\n provide_negative: bool = True,\n parallel_tokenization: bool = True,\n max_processes: int = 4,\n chunk_size: int = 5000):\n \"\"\"\n Converts input examples to a SentenceLabelDataset usable to train the model with\n SentenceTransformer.smart_batching_collate as the collate_fn for the DataLoader\n\n Assumes only one sentence per InputExample and labels as integers from 0 to max_num_labels\n and should be used in combination with dataset_reader.LabelSentenceReader.\n\n Labels with only one example are ignored.\n\n smart_batching_collate as collate_fn is required because it transforms the tokenized texts to the tensors.\n\n :param examples:\n the input examples for the training\n :param model\n the Sentence BERT model for the conversion\n :param provide_positive:\n set this to False, if you don't need a positive example (e.g. for BATCH_HARD_TRIPLET_LOSS).\n :param provide_negative:\n set this to False, if you don't need a negative example (e.g. for BATCH_HARD_TRIPLET_LOSS\n or MULTIPLE_NEGATIVES_RANKING_LOSS).\n :param parallel_tokenization\n If true, multiple processes will be started for the tokenization\n :param max_processes\n Maximum number of processes started for tokenization. Cannot be larger can cpu_count()\n :param chunk_size\n #chunk_size number of examples are send to each process. Larger values increase overall tokenization speed\n \"\"\"\n self.model = model\n self.groups_right_border = []\n self.grouped_inputs = []\n self.grouped_labels = []\n self.num_labels = 0\n self.max_processes = min(max_processes, cpu_count())\n self.chunk_size = chunk_size\n self.parallel_tokenization = parallel_tokenization\n\n if self.parallel_tokenization:\n if multiprocessing.get_start_method() != 'fork':\n logging.info(\"Parallel tokenization is only available on Unix systems which allow to fork processes. Fall back to sequential tokenization\")\n self.parallel_tokenization = False\n\n self.convert_input_examples(examples, model)\n\n self.idxs = np.arange(len(self.grouped_inputs))\n\n self.provide_positive = provide_positive\n self.provide_negative = provide_negative\n\n\n def convert_input_examples(self, examples: List[InputExample], model: SentenceTransformer):\n \"\"\"\n Converts input examples to a SentenceLabelDataset.\n\n Assumes only one sentence per InputExample and labels as integers from 0 to max_num_labels\n and should be used in combination with dataset_reader.LabelSentenceReader.\n\n Labels with only one example are ignored.\n\n :param examples:\n the input examples for the training\n :param model\n the Sentence Transformer model for the conversion\n :param is_pretokenized\n If set to true, no tokenization will be applied. It is expected that the input is tokenized via model.tokenize\n \"\"\"\n\n inputs = []\n labels = []\n\n label_sent_mapping = {}\n too_long = 0\n label_type = None\n\n logging.info(\"Start tokenization\")\n if not self.parallel_tokenization or self.max_processes == 1 or len(examples) <= self.chunk_size:\n tokenized_texts = [self.tokenize_example(example) for example in examples]\n else:\n logging.info(\"Use multi-process tokenization with {} processes\".format(self.max_processes))\n self.model.to('cpu')\n with Pool(self.max_processes) as p:\n tokenized_texts = list(p.imap(self.tokenize_example, examples, chunksize=self.chunk_size))\n\n # Group examples and labels\n # Add examples with the same label to the same dict\n for ex_index, example in enumerate(tqdm(examples, desc=\"Convert dataset\")):\n if label_type is None:\n if isinstance(example.label, int):\n label_type = torch.long\n elif isinstance(example.label, float):\n label_type = torch.float\n tokenized_text = tokenized_texts[ex_index][0]\n\n if hasattr(model, 'max_seq_length') and model.max_seq_length is not None and model.max_seq_length > 0 and len(tokenized_text) > model.max_seq_length:\n too_long += 1\n\n if example.label in label_sent_mapping:\n label_sent_mapping[example.label].append(ex_index)\n else:\n label_sent_mapping[example.label] = [ex_index]\n\n inputs.append(tokenized_text)\n labels.append(example.label)\n\n # Group sentences, such that sentences with the same label\n # are besides each other. Only take labels with at least 2 examples\n distinct_labels = list(label_sent_mapping.keys())\n for i in range(len(distinct_labels)):\n label = distinct_labels[i]\n if len(label_sent_mapping[label]) >= 2:\n self.grouped_inputs.extend([inputs[j] for j in label_sent_mapping[label]])\n self.grouped_labels.extend([labels[j] for j in label_sent_mapping[label]])\n self.groups_right_border.append(len(self.grouped_inputs)) #At which position does this label group / bucket end?\n self.num_labels += 1\n\n self.grouped_labels = torch.tensor(self.grouped_labels, dtype=label_type)\n logging.info(\"Num sentences: %d\" % (len(self.grouped_inputs)))\n logging.info(\"Sentences longer than max_seqence_length: {}\".format(too_long))\n logging.info(\"Number of labels with >1 examples: {}\".format(len(distinct_labels)))\n\n\n def tokenize_example(self, example):\n if example.texts_tokenized is not None:\n return example.texts_tokenized\n\n return [self.model.tokenize(text) for text in example.texts]\n\n def __getitem__(self, item):\n if not self.provide_positive and not self.provide_negative:\n return [self.grouped_inputs[item]], self.grouped_labels[item]\n\n # Anchor element\n anchor = self.grouped_inputs[item]\n\n # Check start and end position for this label in our list of grouped sentences\n group_idx = bisect.bisect_right(self.groups_right_border, item)\n left_border = 0 if group_idx == 0 else self.groups_right_border[group_idx - 1]\n right_border = self.groups_right_border[group_idx]\n\n if self.provide_positive:\n positive_item_idx = np.random.choice(np.concatenate([self.idxs[left_border:item], self.idxs[item + 1:right_border]]))\n positive = self.grouped_inputs[positive_item_idx]\n else:\n positive = []\n\n if self.provide_negative:\n negative_item_idx = np.random.choice(np.concatenate([self.idxs[0:left_border], self.idxs[right_border:]]))\n negative = self.grouped_inputs[negative_item_idx]\n else:\n negative = []\n\n return [anchor, positive, negative], self.grouped_labels[item]\n\n\n def __len__(self):\n return len(self.grouped_inputs)",
"from torch.utils.data import Dataset\nfrom typing import List\nimport torch\nfrom .. import SentenceTransformer\nfrom ..readers.InputExample import InputExample\n\nclass SentencesDataset(Dataset):\n \"\"\"\n Dataset for smart batching, that is each batch is only padded to its longest sequence instead of padding all\n sequences to the max length.\n The SentenceBertEncoder.smart_batching_collate is required for this to work.\n SmartBatchingDataset does *not* work without it.\n \"\"\"\n def __init__(self,\n examples: List[InputExample],\n model: SentenceTransformer\n ):\n \"\"\"\n Create a new SentencesDataset with the tokenized texts and the labels as Tensor\n\n :param examples\n A list of sentence.transformers.readers.InputExample\n :param model:\n SentenceTransformerModel\n \"\"\"\n self.model = model\n self.examples = examples\n self.label_type = torch.long if isinstance(self.examples[0].label, int) else torch.float\n\n\n def __getitem__(self, item):\n label = torch.tensor(self.examples[item].label, dtype=self.label_type)\n if self.examples[item].texts_tokenized is None:\n self.examples[item].texts_tokenized = [self.model.tokenize(text) for text in self.examples[item].texts]\n\n return self.examples[item].texts_tokenized, label\n\n\n def __len__(self):\n return len(self.examples)\n",
"\"\"\"\nThis is a simple application for sentence embeddings: clustering\n\nSentences are mapped to sentence embeddings and then agglomerative clustering with a threshold is applied.\n\"\"\"\nfrom sentence_transformers import SentenceTransformer\nfrom sklearn.cluster import AgglomerativeClustering\nimport numpy as np\n\nembedder = SentenceTransformer('paraphrase-distilroberta-base-v1')\n\n# Corpus with example sentences\ncorpus = ['A man is eating food.',\n 'A man is eating a piece of bread.',\n 'A man is eating pasta.',\n 'The girl is carrying a baby.',\n 'The baby is carried by the woman',\n 'A man is riding a horse.',\n 'A man is riding a white horse on an enclosed ground.',\n 'A monkey is playing drums.',\n 'Someone in a gorilla costume is playing a set of drums.',\n 'A cheetah is running behind its prey.',\n 'A cheetah chases prey on across a field.'\n ]\ncorpus_embeddings = embedder.encode(corpus)\n\n# Normalize the embeddings to unit length\ncorpus_embeddings = corpus_embeddings / np.linalg.norm(corpus_embeddings, axis=1, keepdims=True)\n\n# Perform kmean clustering\nclustering_model = AgglomerativeClustering(n_clusters=None, distance_threshold=1.5) #, affinity='cosine', linkage='average', distance_threshold=0.4)\nclustering_model.fit(corpus_embeddings)\ncluster_assignment = clustering_model.labels_\n\nclustered_sentences = {}\nfor sentence_id, cluster_id in enumerate(cluster_assignment):\n if cluster_id not in clustered_sentences:\n clustered_sentences[cluster_id] = []\n\n clustered_sentences[cluster_id].append(corpus[sentence_id])\n\nfor i, cluster in clustered_sentences.items():\n print(\"Cluster \", i+1)\n print(cluster)\n print(\"\")\n"
] | [
[
"numpy.concatenate",
"torch.tensor"
],
[
"torch.tensor"
],
[
"sklearn.cluster.AgglomerativeClustering",
"numpy.linalg.norm"
]
] |
erfanMhi/Cooperative-Coevolution-Transfer-Optimization | [
"e75b7930bd8b55a160668b1039ac154a0d0270d7"
] | [
"main_multi.py"
] | [
"\nimport argparse\nimport os\nimport queue\n\nimport multiprocessing as mp\n# import SharedArray as sa\nimport numpy as np\n\n\nfrom copy import deepcopy\nfrom time import time\nfrom pprint import pprint\nfrom utils.data_manipulators import *\nfrom evolution.operators import *\nfrom to.probabilistic_model import ProbabilisticModel\nfrom to.mixture_model import MixtureModel\nfrom evolution.chromosome import *\n\n\nclass EAProcess(mp.Process):\n def __init__(self, dims, psize, gen, problem, shared_queue, \n shared_array, t_lock, list_lock, return_list, transfer_interval=2):\n super(EAProcess, self).__init__()\n self.dims = dims\n self.psize = psize\n print('hi')\n self.gen = gen\n self.problem = problem\n self.shared_queue = shared_queue\n self.shared_array = shared_array\n # self.shared_lock = shared_lock\n self.t_lock = t_lock\n self.list_lock = list_lock\n self.transfer_interval = transfer_interval\n self.reinitialize()\n self.return_list = return_list\n\n def reinitialize(self):\n\n self.fitness_hist = np.zeros((self.gen, self.psize))\n self.fitness_time = np.zeros((self.gen))\n\n init_func = lambda n: np.round(np.random.rand(n))\n self.pop = get_pop_init(self.psize, self.dims, init_func)\n\n def _ea(self):\n \n start = time()\n\n for i in range(self.psize): self.pop[i].fitness_calc(self.problem)\n\n self.bestfitness = np.max(self.pop).fitness\n self.fitness = Chromosome.fitness_to_numpy(self.pop)\n self.fitness_hist[0, :] = self.fitness\n\n self.fitness_time[0] = start - time()\n\n\n \n for i in range(1, self.gen):\n start = time()\n\n if i%self.transfer_interval == 0 and i//self.transfer_interval == 1:\n print('transfer start')\n self.t_lock.release()\n\n \n if i%self.transfer_interval == 0:\n recieved_pops = None\n try:\n while True:\n if recieved_pops is None:\n recieved_pops = list(self.shared_queue.get(block=True))\n else:\n recieved_pops += list(self.shared_queue.get(block=False))\n \n except queue.Empty:\n print('Queue is empty now')\n print('recieved_pops: ', len(recieved_pops))\n self.pop = total_selection_pop(np.concatenate((self.pop, recieved_pops)), self.psize)\n\n offsprings = total_crossover(self.pop)\n\n for j in range(self.psize): offsprings[j].mutation(1/self.dims)\n\n # Fitness Calculation\n cfitness = np.zeros(self.psize)\n for j in range(self.psize): \n cfitness[j] = offsprings[j].fitness_calc(self.problem)\n\n\n self.pop, self.fitness = total_selection(np.concatenate((self.pop, offsprings)),\n np.concatenate((self.fitness, cfitness)), self.psize)\n\n self.fitness_hist[i, :] = self.fitness\n\n if self.fitness[0] > self.bestfitness:\n self.bestfitness = self.fitness[0]\n\n print('Generation %d best fitness = %f' % (i, self.bestfitness))\n\n self.list_lock.acquire()\n self.shared_array[:] = Chromosome.genes_to_list(self.pop)\n self.list_lock.release()\n\n self.fitness_time[i] = time() - start\n\n print('Shared Array is now available')\n\n self.return_list.append([self.fitness_time, self.fitness_hist]) \n \n\n\n def run(self):\n\n # When target array is prepared it will be unlocked\n print ('called run method in process: %s' %self.name)\n self._ea()\n return\n\n\nclass TransferProcess(mp.Process):\n def __init__(self, dims, problem, mutation_strength,\n sample_size, sub_sample_size, src_models,\n shared_queue, shared_array, t_lock,\n list_lock, transfer_interval=2):\n super(TransferProcess, self).__init__()\n self.dims = dims\n self.problem = problem\n self.src_models = src_models\n self.mutation_strength = mutation_strength\n self.sample_size = sample_size\n self.sub_sample_size = sub_sample_size\n self.shared_queue = shared_queue\n self.shared_array = shared_array\n # self.shared_lock = shared_lock\n self.t_lock = t_lock\n self.list_lock = list_lock\n self.transfer_interval = transfer_interval\n self.reinitialize()\n \n def reinitialize(self):\n\n # self.fitness_hist = np.zeros((self.gen, self.psize))\n # self.fitness_time = np.zeros((self.gen))\n\n dims_s2 = len(self.src_models)+1\n self.second_specie = StrategyChromosome(dims_s2)\n\n def _transfer_ea(self):\n prev_samples = None\n genes_differ = None\n\n target_model = ProbabilisticModel(modelType='umd')\n\n self.list_lock.acquire()\n target_array = np.array(self.shared_array[:])\n self.list_lock.release()\n\n target_model.buildModel(target_array)\n\n _, sampled_offsprings, prev_samples = \\\n self.second_specie.fitness_calc(self.problem, self.src_models, target_model, self.sample_size,\n self.sub_sample_size, mutation_vec=genes_differ, prev_samples=deepcopy(prev_samples),\n efficient_version=True)\n\n self.shared_queue.put(sampled_offsprings)\n\n while True:\n offspring = deepcopy(self.second_specie)\n\n genes_differ = offspring.mutation(self.mutation_strength, 0, 1)\n\n target_model = ProbabilisticModel(modelType='umd')\n\n self.list_lock.acquire()\n target_array = np.array(self.shared_array[:])\n self.list_lock.release()\n\n target_model.buildModel(target_array)\n\n _, sampled_offsprings, prev_samples_tmp = \\\n offspring.fitness_calc(self.problem, self.src_models, target_model, self.sample_size,\n self.sub_sample_size, mutation_vec=genes_differ, prev_samples=deepcopy(prev_samples),\n efficient_version=True)\n\n self.shared_queue.put(sampled_offsprings)\n \n self.second_specie, self.mutation_strength, is_off_selected = selection_adoption(self.second_specie, offspring, self.mutation_strength)\n\n if is_off_selected:\n prev_samples = prev_samples_tmp\n # second_species_gen_num += 1\n # while True:\n\n\n\n def run(self):\n\n self.t_lock.acquire()\n print ('called run method in process: %s' %self.name)\n self._transfer_ea()\n return\n\ndef get_args():\n parser = argparse.ArgumentParser(description='CoOperative CoEvolution Transfer Optimization Algorithm for Solving Multi-location Inventory Planning with Lateral Transshipments')\n\n\n parser.add_argument('--stop_condition', default=True, \n type=bool, nargs='?',\n help=\"Stop after i number of iteraction if fitness didn't changed\")\n\n parser.add_argument('--reps', default=1,\n type=int, nargs='?',\n help='Number of repetition')\n\n parser.add_argument('--delta', default=2,\n type=int, nargs='?',\n help='Step for switiching between transfer optimization and evolutionary operations')\n \n # parser.add_argument('--buildmodel', default=True,\n # type=bool, nargs='?',\n # help='Should we build source models?')\n\n parser.add_argument('--src_version', default='v1',\n type=str, nargs='?',\n help='What version of source models should be used?')\n\n parser.add_argument('--s1_psize', default=50,\n type=int, nargs='?',\n help='Population size for the first species?')\n \n # parser.add_argument('--s2_psize', default=20,\n # type=int, nargs='?',\n # help='Population size for the second species?')\n\n parser.add_argument('--sample_size', default=50,\n type=int, nargs='?',\n help='Number of samples generated from each AlphaChromosome?')\n\n parser.add_argument('--sub_sample_size', default=50,\n type=int, nargs='?',\n help='How many samples should we take from sample_size number of samples generated?') \n \n # parser.add_argument('-v', dest='version', default='v1',\n # type=str, nargs='?',\n # help='What version should be executed?')\n\n parser.add_argument('--mutation_strength', default=1,\n type=int, nargs='?',\n help='The same step-size which we use in evolution strategy')\n \n parser.add_argument('--injection_type', default='elite',\n type=str, nargs='?',\n help='What method do you want to use for injection of species 2 to species 1?')\n\n parser.add_argument('--to_repititon_num', default=1,\n type=int, nargs='?',\n help='How many time should we repeat the transferring step in evolution strategy?')\n \n parser.add_argument('--selection_version', default='v1',\n type=str, nargs='?',\n help='What selection version should we use in evolution strategy E(1 + 1)?')\n\n parser.add_argument('-c', default=2,\n type=int, nargs='?',\n help='Parameter of E(1 + 1) algorithm selection')\n\n parser.add_argument('--efficient_version', default=False,\n type=bool, nargs='?',\n help='Efficient version of evaluation strategy version?')\n\n parser.add_argument('--transfer_repeat_num', default=None,\n type=int, nargs='?',\n help=''' Number of times transfer optimization should be run.\n if it is None, it will be repeated in every delta iteration''')\n\n\n # parser.add_argument('-q', dest='matrix_num', default='a',\n # type=str, nargs='?',\n # help='T^0_H matrix selector for section b')\n\n return parser.parse_args()\n\ndef main_multi(args):\n\n # constants\n models_path = 'models'\n source_models_path = os.path.join(models_path, 'knapsack_source_models')\n knapsack_problem_path = 'problems/knapsack'\n\n dims = 1000\n psize = args.s1_psize\n mutation_strength = args.mutation_strength\n reps = args.reps\n transfer_interval = args.delta\n sub_sample_size = args.sub_sample_size\n sample_size = args.sample_size\n gen = 100\n\n # Loading Target Problem\n target_problem = Tools.load_from_file(os.path.join(knapsack_problem_path, 'KP_uc_ak'))\n\n # Loading Source Models\n src_models = Tools.load_from_file(source_models_path + '_{}'.format(args.src_version))\n\n main_m = mp.Manager()\n return_list = main_m.list()\n for i in range(reps):\n # Shared Variables\n m = mp.Manager()\n shared_queue = m.Queue()\n shared_array = m.list([[0 for j in range(dims)] for i in range(psize)])\n # prep_lock = m.Lock() # This lock is used for starting transfer learning\n # prep_lock.acquire()\n list_lock = m.Lock() # \\\\ for synchronozing read & write of the list\n # q_lock = m.Lock() # \\\\ for synchronozing put & get of the queue\n transfer_lock = m.Lock() # \\\\ will synchronize the transfer_interval for EAProcess\n transfer_lock.acquire()\n\n\n ea_process = EAProcess(dims, psize, gen, target_problem, shared_queue,\n shared_array, transfer_lock, list_lock, return_list,\n transfer_interval=transfer_interval)\n \n \n tr_process = TransferProcess(dims, target_problem, mutation_strength,\n sample_size, sub_sample_size, src_models,\n shared_queue, shared_array, transfer_lock,\n list_lock, transfer_interval=transfer_interval) \n\n ea_process.start()\n tr_process.start()\n\n ea_process.join()\n tr_process.terminate()\n tr_process.join()\n \n Tools.save_to_file(args.save_path, return_list[:])\n\n\nif __name__ == '__main__':\n args = get_args()\n main_multi(args)\n \n"
] | [
[
"numpy.concatenate",
"numpy.max",
"numpy.random.rand",
"numpy.array",
"numpy.zeros"
]
] |
Juan0001/yellowbrick-docs-zh | [
"36275d9704fc2a946c5bec5f802106bb5281efd1",
"36275d9704fc2a946c5bec5f802106bb5281efd1"
] | [
"tests/dataset.py",
"yellowbrick/utils/helpers.py"
] | [
"# tests.dataset\n# Helper functions for tests that utilize downloadable datasets.\n#\n# Author: Benjamin Bengfort <[email protected]>\n# Created: Thu Oct 13 19:55:53 2016 -0400\n#\n# Copyright (C) 2016 District Data Labs\n# For license information, see LICENSE.txt\n#\n# ID: dataset.py [8f4de77] [email protected] $\n\n\"\"\"\nHelper functions for tests that utilize downloadable datasets.\n\"\"\"\n\n##########################################################################\n## Imports\n##########################################################################\n\nimport os\nimport shutil\nimport hashlib\nimport zipfile\nimport numpy as np\n\nfrom sklearn.datasets.base import Bunch\n\ntry:\n import requests\nexcept ImportError:\n requests = None\n\n\n##########################################################################\n## Fixtures\n##########################################################################\n\nDATASETS = {\n 'concrete': {\n 'url': 'https://s3.amazonaws.com/ddl-data-lake/yellowbrick/concrete.zip',\n 'signature': 'b9ea5f26a7bb272a040e2f1a993b26babbf8dc4a04ab8198bb315ca66d71f10d',\n 'type': 'numpy',\n },\n 'energy': {\n 'url': 'https://s3.amazonaws.com/ddl-data-lake/yellowbrick/energy.zip',\n 'signature': '19fb86f3bcdde208eed46944172cb643ef6a7d58da103fb568fae43205ed89d3',\n 'type': 'numpy',\n },\n 'credit': {\n 'url': 'https://s3.amazonaws.com/ddl-data-lake/yellowbrick/credit.zip',\n 'signature': '4a91339c69f55e18f3f48004328fbcb7868070b618208fed099920427b084e5e',\n 'type': 'numpy',\n },\n 'occupancy': {\n 'url': 'https://s3.amazonaws.com/ddl-data-lake/yellowbrick/occupancy.zip',\n 'signature': '429cfe376dc9929a1fa528da89f0e1626e34e19695f3f555d8954025bbc522b8',\n 'type': 'numpy',\n },\n 'mushroom': {\n 'url': 'https://s3.amazonaws.com/ddl-data-lake/yellowbrick/mushroom.zip',\n 'signature': '884c43cb70db35d211c67b1cf6a3683b2b4569393d2789d5c07840da4dc85ba8',\n 'type': 'numpy',\n },\n 'hobbies': {\n 'url': 'https://s3.amazonaws.com/ddl-data-lake/yellowbrick/hobbies.zip',\n 'signature': '415c8f68df1486d5d84a1d1757a5aa3035aef5ad63ede5013c261d622fbd29d8',\n 'type': 'corpus',\n },\n 'game': {\n 'url': 'https://s3.amazonaws.com/ddl-data-lake/yellowbrick/game.zip',\n 'signature': 'b1bd85789a014a898daa34cb5f89ceab6d2cd6488a2e572187e34aa4ec21a43b',\n 'type': 'numpy',\n },\n 'bikeshare': {\n 'url': 'https://s3.amazonaws.com/ddl-data-lake/yellowbrick/bikeshare.zip',\n 'signature': 'a9b440f65549746dff680c92ff8bdca3c7265f09db1cf09e708e6e26fc8aba44',\n 'type': 'numpy',\n },\n}\n\nFIXTURES = os.path.join(os.path.dirname(__file__), \"fixtures\")\n\n\n##########################################################################\n## Test Cases that Require Download\n##########################################################################\n\nclass DatasetMixin(object):\n \"\"\"\n Mixin for unittest.TestCase class to download datasets from S3 for\n testing real world machine learning visual diagnostics.\n \"\"\"\n\n @staticmethod\n def sha256sum(path, blocksize=65536):\n \"\"\"\n Computes the SHA256 signature of a file to verify that the file has not\n been modified in transit and that it is the correct version of the data.\n \"\"\"\n sig = hashlib.sha256()\n with open(path, 'rb') as f:\n buf = f.read(blocksize)\n while len(buf) > 0:\n sig.update(buf)\n buf = f.read(blocksize)\n return sig.hexdigest()\n\n\n @staticmethod\n def download_data(url, path=FIXTURES, signature=None, extract=True):\n \"\"\"\n Downloads the zipped data set specified at the given URL, saving it to\n the output path specified. This function verifies the download with the\n given signature (if supplied) and extracts the zip file if requested.\n \"\"\"\n if requests is None:\n raise ImportError(\n \"The requests module is required to download data --\\n\"\n \"please install it with pip install requests.\"\n )\n\n # Create the output directory if it does not exist\n if not os.path.exists(path):\n os.mkdir(path)\n\n # Get the name of the file from the URL\n name = os.path.basename(url)\n dlpath = os.path.join(path, name)\n\n # Fetch the response in a streaming fashion and write it to disk.\n response = requests.get(url, stream=True)\n with open(dlpath, 'wb') as f:\n for chunk in response.iter_content(65536):\n f.write(chunk)\n\n # If verify, compare the signature\n if signature is not None:\n dlsignature = DatasetMixin.sha256sum(dlpath)\n if signature != dlsignature:\n raise ValueError(\n \"Download signature does not match hardcoded signature!\"\n )\n\n # If extract, extract the zipfile.\n if extract:\n zf = zipfile.ZipFile(dlpath)\n zf.extractall(path)\n\n\n @staticmethod\n def download_all(path=FIXTURES, verify=True, extract=True):\n \"\"\"\n Downloads all the example datasets. If verify is True then compare the\n download signature with the hardcoded signature. If extract is True then\n extract the contents of the zipfile to the given path.\n \"\"\"\n for name, meta in DATASETS.items():\n url = meta['url']\n signature = meta['signature'] if verify else None\n\n DatasetMixin.download_data(\n url, path=path, signature=signature, extract=extract\n )\n\n @staticmethod\n def remove_all(fixtures=FIXTURES):\n \"\"\"\n Removes all the downloaded datasets as clean up\n \"\"\"\n shutil.rmtree(fixtures)\n\n @staticmethod\n def load_data(name, fixtures=FIXTURES):\n \"\"\"\n Loads the numpy matrix from the specified data set, downloads it if\n it hasn't already been downloaded.\n \"\"\"\n # Just in case this is a corpus data set, then do that instead.\n if DATASETS[name]['type'] == 'corpus':\n return DatasetMixin.load_corpus(name, fixtures)\n\n path = os.path.join(fixtures, name, \"{}.csv\".format(name))\n if not os.path.exists(path):\n DatasetMixin.download_all(path=fixtures)\n\n return np.genfromtxt(path, dtype=float, delimiter=',', names=True)\n\n @staticmethod\n def load_corpus(name, fixtures=FIXTURES):\n \"\"\"\n Loads a sklearn Bunch with the corpus and downloads it if it hasn't\n already been downloaded. Used to test text visualizers.\n \"\"\"\n path = os.path.join(fixtures, name)\n if not os.path.exists(path):\n DatasetMixin.download_all(path=fixtures)\n\n # Read the directories in the directory as the categories.\n categories = [\n cat for cat in os.listdir(path)\n if os.path.isdir(os.path.join(path, cat))\n ]\n\n files = [] # holds the file names relative to the root\n data = [] # holds the text read from the file\n target = [] # holds the string of the category\n\n # Load the data from the files in the corpus\n for cat in categories:\n for name in os.listdir(os.path.join(path, cat)):\n files.append(os.path.join(path, cat, name))\n target.append(cat)\n\n with open(os.path.join(path, cat, name), 'r') as f:\n data.append(f.read())\n\n # Return the data bunch for use similar to the newsgroups example\n return Bunch(\n categories=categories,\n files=files,\n data=data,\n target=target,\n )\n",
"# yellowbrick.utils.helpers\n# Helper functions and generic utilities for use in Yellowbrick code.\n#\n# Author: Benjamin Bengfort <[email protected]>\n# Created: Fri May 19 10:39:30 2017 -0700\n#\n# Copyright (C) 2017 District Data Labs\n# For license information, see LICENSE.txt\n#\n# ID: helpers.py [79cd8cf] [email protected] $\n\n\"\"\"\nHelper functions and generic utilities for use in Yellowbrick code.\n\"\"\"\n\n##########################################################################\n## Imports\n##########################################################################\n\nimport re\nimport numpy as np\n\nfrom sklearn.pipeline import Pipeline\n\nfrom .types import is_estimator\nfrom yellowbrick.exceptions import YellowbrickTypeError\n\n\n##########################################################################\n## Model and Feature Information\n##########################################################################\n\ndef get_model_name(model):\n \"\"\"\n Detects the model name for a Scikit-Learn model or pipeline.\n\n Parameters\n ----------\n model: class or instance\n The object to determine the name for. If the model is an estimator it\n returns the class name; if it is a Pipeline it returns the class name\n of the final transformer or estimator in the Pipeline.\n\n Returns\n -------\n name : string\n The name of the model or pipeline.\n \"\"\"\n if not is_estimator(model):\n raise YellowbrickTypeError(\n \"Cannot detect the model name for non estimator: '{}'\".format(\n type(model)\n )\n )\n\n else:\n if isinstance(model, Pipeline):\n return get_model_name(model.steps[-1][-1])\n else:\n return model.__class__.__name__\n\n\ndef has_ndarray_int_columns(features, X):\n \"\"\" Checks if numeric feature columns exist in ndarray \"\"\"\n _, ncols = X.shape\n if not all(d.isdigit() for d in features if isinstance(d, str)) or not isinstance(X, np.ndarray):\n return False\n ndarray_columns = np.arange(0, ncols)\n feature_cols = np.unique([int(d) for d in features])\n return all(np.in1d(feature_cols, ndarray_columns))\n\n# Alias for closer name to isinstance and issubclass\nhasndarrayintcolumns = has_ndarray_int_columns\n\n\n##########################################################################\n## Numeric Computations\n##########################################################################\n\n#From here: http://stackoverflow.com/questions/26248654/numpy-return-0-with-divide-by-zero\ndef div_safe( numerator, denominator ):\n \"\"\"\n Ufunc-extension that returns 0 instead of nan when dividing numpy arrays\n\n Parameters\n ----------\n numerator: array-like\n\n denominator: scalar or array-like that can be validly divided by the numerator\n\n returns a numpy array\n\n example: div_safe( [-1, 0, 1], 0 ) == [0, 0, 0]\n \"\"\"\n #First handle scalars\n if np.isscalar(numerator):\n raise ValueError(\"div_safe should only be used with an array-like numerator\")\n\n #Then numpy arrays\n try:\n with np.errstate(divide='ignore', invalid='ignore'):\n result = np.true_divide( numerator, denominator )\n result[ ~ np.isfinite( result )] = 0 # -inf inf NaN\n return result\n except ValueError as e:\n raise e\n\n\n##########################################################################\n## String Computations\n##########################################################################\n\ndef slugify(text):\n \"\"\"\n Returns a slug of given text, normalizing unicode data for file-safe\n strings. Used for deciding where to write images to disk.\n\n Parameters\n ----------\n text : string\n The string to slugify\n\n Returns\n -------\n slug : string\n A normalized slug representation of the text\n\n .. seealso:: http://yashchandra.com/2014/05/08/how-to-generate-clean-url-or-a-slug-in-python/\n \"\"\"\n slug = re.sub(r'[^\\w]+', ' ', text)\n slug = \"-\".join(slug.lower().strip().split())\n return slug\n"
] | [
[
"sklearn.datasets.base.Bunch",
"numpy.genfromtxt"
],
[
"numpy.true_divide",
"numpy.isfinite",
"numpy.arange",
"numpy.in1d",
"numpy.isscalar",
"numpy.errstate"
]
] |
takluyver/xray | [
"80c30ae343a2171c541da0387fed3926004030a7",
"80c30ae343a2171c541da0387fed3926004030a7"
] | [
"test/test_conventions.py",
"xray/variable.py"
] | [
"import numpy as np\nimport pandas as pd\nfrom datetime import datetime\nimport warnings\n\nfrom xray import conventions\nfrom . import TestCase, requires_netCDF4\n\n\nclass TestMaskedAndScaledArray(TestCase):\n def test(self):\n x = conventions.MaskedAndScaledArray(np.arange(3), fill_value=0)\n self.assertEqual(x.dtype, np.dtype('float'))\n self.assertEqual(x.shape, (3,))\n self.assertEqual(x.size, 3)\n self.assertEqual(x.ndim, 1)\n self.assertEqual(len(x), 3)\n self.assertArrayEqual([np.nan, 1, 2], x)\n\n x = conventions.MaskedAndScaledArray(np.arange(3), add_offset=1)\n self.assertArrayEqual(np.arange(3) + 1, x)\n\n x = conventions.MaskedAndScaledArray(np.arange(3), scale_factor=2)\n self.assertArrayEqual(2 * np.arange(3), x)\n\n x = conventions.MaskedAndScaledArray(np.array([-99, -1, 0, 1, 2]), -99, 0.01, 1)\n expected = np.array([np.nan, 0.99, 1, 1.01, 1.02])\n self.assertArrayEqual(expected, x)\n\n def test_0d(self):\n x = conventions.MaskedAndScaledArray(np.array(0), fill_value=0)\n self.assertTrue(np.isnan(x))\n self.assertTrue(np.isnan(x[...]))\n\n x = conventions.MaskedAndScaledArray(np.array(0), fill_value=10)\n self.assertEqual(0, x[...])\n\n\nclass TestCharToStringArray(TestCase):\n def test(self):\n array = np.array(list('abc'))\n actual = conventions.CharToStringArray(array)\n expected = np.array('abc')\n self.assertEqual(actual.dtype, expected.dtype)\n self.assertEqual(actual.shape, expected.shape)\n self.assertEqual(actual.size, expected.size)\n self.assertEqual(actual.ndim, expected.ndim)\n with self.assertRaises(TypeError):\n len(actual)\n self.assertArrayEqual(expected, actual)\n with self.assertRaises(IndexError):\n actual[:2]\n self.assertEqual(str(actual), 'abc')\n\n array = np.array([list('abc'), list('cdf')])\n actual = conventions.CharToStringArray(array)\n expected = np.array(['abc', 'cdf'])\n self.assertEqual(actual.dtype, expected.dtype)\n self.assertEqual(actual.shape, expected.shape)\n self.assertEqual(actual.size, expected.size)\n self.assertEqual(actual.ndim, expected.ndim)\n self.assertEqual(len(actual), len(expected))\n self.assertArrayEqual(expected, actual)\n self.assertArrayEqual(expected[:1], actual[:1])\n with self.assertRaises(IndexError):\n actual[:, :2]\n\n\nclass TestDatetime(TestCase):\n @requires_netCDF4\n def test_cf_datetime(self):\n import netCDF4 as nc4\n for num_dates, units in [\n (np.arange(100), 'days since 2000-01-01'),\n (np.arange(100).reshape(10, 10), 'days since 2000-01-01'),\n (12300 + np.arange(50), 'hours since 1680-01-01 00:00:00'),\n (10, 'days since 2000-01-01'),\n ([10], 'days since 2000-01-01'),\n ([[10]], 'days since 2000-01-01'),\n ([10, 10], 'days since 2000-01-01'),\n (0, 'days since 1000-01-01'),\n ([0], 'days since 1000-01-01'),\n ([[0]], 'days since 1000-01-01'),\n (np.arange(20), 'days since 1000-01-01'),\n (np.arange(0, 100000, 10000), 'days since 1900-01-01')\n ]:\n for calendar in ['standard', 'gregorian', 'proleptic_gregorian']:\n expected = nc4.num2date(num_dates, units, calendar)\n actual = conventions.decode_cf_datetime(num_dates, units, calendar)\n if (isinstance(actual, np.ndarray)\n and np.issubdtype(actual.dtype, np.datetime64)):\n self.assertEqual(actual.dtype, np.dtype('M8[ns]'))\n # For some reason, numpy 1.8 does not compare ns precision\n # datetime64 arrays as equal to arrays of datetime objects,\n # but it works for us precision. Thus, convert to us\n # precision for the actual array equal comparison...\n actual_cmp = actual.astype('M8[us]')\n else:\n actual_cmp = actual\n self.assertArrayEqual(expected, actual_cmp)\n encoded, _, _ = conventions.encode_cf_datetime(actual, units, calendar)\n self.assertArrayEqual(num_dates, np.around(encoded))\n if (hasattr(num_dates, 'ndim') and num_dates.ndim == 1\n and '1000' not in units):\n # verify that wrapping with a pandas.Index works\n # note that it *does not* currently work to even put\n # non-datetime64 compatible dates into a pandas.Index :(\n encoded, _, _ = conventions.encode_cf_datetime(\n pd.Index(actual), units, calendar)\n self.assertArrayEqual(num_dates, np.around(encoded))\n\n @requires_netCDF4\n def test_cf_datetime_nan(self):\n for num_dates, units, expected_list in [\n ([np.nan], 'days since 2000-01-01', ['NaT']),\n ([np.nan, 0], 'days since 2000-01-01',\n ['NaT', '2000-01-01T00:00:00Z']),\n ([np.nan, 0, 1], 'days since 2000-01-01',\n ['NaT', '2000-01-01T00:00:00Z', '2000-01-02T00:00:00Z']),\n ]:\n with warnings.catch_warnings():\n warnings.filterwarnings('ignore', 'All-NaN')\n actual = conventions.decode_cf_datetime(num_dates, units)\n expected = np.array(expected_list, dtype='datetime64[ns]')\n self.assertArrayEqual(expected, actual)\n\n def test_guess_time_units(self):\n for dates, expected in [(pd.date_range('1900-01-01', periods=5),\n 'days since 1900-01-01 00:00:00'),\n (pd.date_range('1900-01-01 12:00:00', freq='H',\n periods=2),\n 'hours since 1900-01-01 12:00:00'),\n (['1900-01-01', '1900-01-02',\n '1900-01-02 00:00:01'],\n 'seconds since 1900-01-01 00:00:00')]:\n self.assertEquals(expected, conventions.guess_time_units(dates))\n",
"import functools\nimport numpy as np\nimport pandas as pd\n\nfrom itertools import izip\nfrom collections import OrderedDict\n\nimport data_array\nimport dataset\nimport groupby\nimport indexing\nimport ops\nimport utils\n\nfrom common import AbstractArray\n\n\ndef as_variable(obj, strict=True):\n \"\"\"Convert an object into an Variable\n\n - If the object is already an `Variable`, return it.\n - If the object is a `DataArray`, return it if `strict=False` or return\n its variable if `strict=True`.\n - Otherwise, if the object has 'dimensions' and 'data' attributes, convert\n it into a new `Variable`.\n - If all else fails, attempt to convert the object into an `Variable` by\n unpacking it into the arguments for `Variable.__init__`.\n \"\"\"\n # TODO: consider extending this method to automatically handle Iris and\n # pandas objects.\n if strict and hasattr(obj, 'variable'):\n # extract the primary Variable from DataArrays\n obj = obj.variable\n if not isinstance(obj, (Variable, data_array.DataArray)):\n if hasattr(obj, 'dimensions') and hasattr(obj, 'values'):\n obj = Variable(obj.dimensions, obj.values,\n getattr(obj, 'attributes', None),\n getattr(obj, 'encoding', None))\n else:\n if isinstance(obj, np.ndarray):\n raise TypeError('cannot convert numpy.ndarray objects into '\n 'Variable objects without supplying '\n 'dimensions')\n try:\n obj = Variable(*obj)\n except TypeError:\n raise TypeError('cannot convert argument into an Variable')\n return obj\n\n\ndef _as_compatible_data(data):\n \"\"\"If data does not have the necessary attributes to be the private _data\n attribute, convert it to a np.ndarray and raise an warning\n \"\"\"\n # don't check for __len__ or __iter__ so as not to warn if data is a numpy\n # numeric type like np.float32\n required = ['dtype', 'shape', 'size', 'ndim']\n if (any(not hasattr(data, attr) for attr in required)\n or isinstance(data, np.string_)):\n data = utils.as_safe_array(data)\n elif not isinstance(data, (pd.Index, indexing.LazilyIndexedArray)):\n try:\n # we don't want nested self-described arrays\n # use try/except instead of hasattr to only calculate values once\n data = data.values\n except AttributeError:\n pass\n\n if isinstance(data, pd.Index):\n # check pd.Index first since it's (currently) an ndarray subclass\n data = PandasIndexAdapter(data)\n elif isinstance(data, np.ndarray):\n data = NumpyArrayAdapter(utils.as_safe_array(data))\n return data\n\n\nclass NumpyArrayAdapter(utils.NDArrayMixin):\n \"\"\"Wrap a NumPy array to use orthogonal indexing (array indexing\n accesses different dimensions independently, like netCDF4-python variables)\n \"\"\"\n # note: this object is somewhat similar to biggus.NumpyArrayAdapter in that\n # it implements orthogonal indexing, except it casts to a numpy array,\n # isn't lazy and supports writing values.\n def __init__(self, array):\n self.array = np.asarray(array)\n\n def __array__(self, dtype=None):\n return np.asarray(self.array, dtype=dtype)\n\n def _convert_key(self, key):\n key = indexing.expanded_indexer(key, self.ndim)\n if any(not isinstance(k, (int, slice)) for k in key):\n # key would trigger fancy indexing\n key = indexing.orthogonal_indexer(key, self.shape)\n return key\n\n def __getitem__(self, key):\n key = self._convert_key(key)\n return self.array[key]\n\n def __setitem__(self, key, value):\n key = self._convert_key(key)\n self.array[key] = value\n\n\nclass PandasIndexAdapter(utils.NDArrayMixin):\n \"\"\"Wrap a pandas.Index to be better about preserving dtypes and to handle\n indexing by length 1 tuples like numpy\n \"\"\"\n def __init__(self, array, dtype=None):\n self.array = utils.safe_cast_to_index(array)\n if dtype is None:\n dtype = array.dtype\n self._dtype = dtype\n\n @property\n def dtype(self):\n return self._dtype\n\n def __array__(self, dtype=None):\n if dtype is None:\n dtype = self.dtype\n return self.array.values.astype(dtype)\n\n def __getitem__(self, key):\n if isinstance(key, tuple) and len(key) == 1:\n # unpack key so it can index a pandas.Index object (pandas.Index\n # objects don't like tuples)\n key, = key\n if isinstance(key, int):\n return utils.as_array_or_item(self.array[key], dtype=self.dtype)\n else:\n if isinstance(key, slice) and key == slice(None):\n # pandas<0.14 does dtype inference when slicing; we would like\n # to avoid this if possible\n # https://github.com/pydata/pandas/issues/6370\n arr = self.array\n else:\n arr = self.array[key]\n return PandasIndexAdapter(arr, dtype=self.dtype)\n\n def __repr__(self):\n return ('%s(array=%r, dtype=%r)'\n % (type(self).__name__, self.array, self.dtype))\n\n\nclass Variable(AbstractArray):\n \"\"\"A netcdf-like variable consisting of dimensions, data and attributes\n which describe a single Array. A single Variable object is not fully\n described outside the context of its parent Dataset (if you want such a\n fully described object, use a DataArray instead).\n \"\"\"\n def __init__(self, dims, data, attributes=None, encoding=None):\n \"\"\"\n Parameters\n ----------\n dims : str or sequence of str\n Name(s) of the the data dimension(s). Must be either a string (only\n for 1D data) or a sequence of strings with length equal to the\n number of dimensions.\n data : array_like\n Data array which supports numpy-like data access.\n attributes : dict_like or None, optional\n Attributes to assign to the new variable. If None (default), an\n empty attribute dictionary is initialized.\n encoding : dict_like or None, optional\n Dictionary specifying how to encode this array's data into a\n serialized format like netCDF4. Currently used keys (for netCDF)\n include '_FillValue', 'scale_factor', 'add_offset' and 'dtype'.\n Well behaviored code to serialize a Variable should ignore\n unrecognized encoding items.\n \"\"\"\n self._data = _as_compatible_data(data)\n self._dimensions = self._parse_dimensions(dims)\n if attributes is None:\n attributes = {}\n self._attributes = OrderedDict(attributes)\n self.encoding = dict({} if encoding is None else encoding)\n\n @property\n def dtype(self):\n return self._data.dtype\n\n @property\n def shape(self):\n return self._data.shape\n\n @property\n def size(self):\n return self._data.size\n\n @property\n def ndim(self):\n return self._data.ndim\n\n def __len__(self):\n return len(self._data)\n\n def _in_memory(self):\n return isinstance(self._data, (NumpyArrayAdapter, PandasIndexAdapter))\n\n _cache_data_class = NumpyArrayAdapter\n\n def _data_cached(self):\n if not isinstance(self._data, self._cache_data_class):\n self._data = self._cache_data_class(self._data)\n return self._data\n\n def __getstate__(self):\n \"\"\"Always cache data as an in-memory array before pickling\"\"\"\n self._data_cached()\n # self.__dict__ is the default pickle object, we don't need to\n # implement our own __setstate__ method to make pickle work\n return self.__dict__\n\n @property\n def values(self):\n \"\"\"The variable's data as a numpy.ndarray\"\"\"\n return utils.as_array_or_item(self._data_cached())\n\n @values.setter\n def values(self, values):\n values = _as_compatible_data(values)\n if values.shape != self.shape:\n raise ValueError(\n \"replacement values must match the Variable's shape\")\n self._data = values\n\n def to_coord(self):\n \"\"\"Return this variable as a Coordinate\"\"\"\n return Coordinate(self.dimensions, self._data, self.attrs,\n encoding=self.encoding)\n\n @property\n def dimensions(self):\n \"\"\"Tuple of dimension names with which this variable is associated.\n \"\"\"\n return self._dimensions\n\n def _parse_dimensions(self, dims):\n if isinstance(dims, basestring):\n dims = (dims,)\n dims = tuple(dims)\n if len(dims) != self.ndim:\n raise ValueError('dimensions %s must have the same length as the '\n 'number of data dimensions, ndim=%s'\n % (dims, self.ndim))\n return dims\n\n @dimensions.setter\n def dimensions(self, value):\n self._dimensions = self._parse_dimensions(value)\n\n def __getitem__(self, key):\n \"\"\"Return a new Array object whose contents are consistent with\n getting the provided key from the underlying data.\n\n NB. __getitem__ and __setitem__ implement \"orthogonal indexing\" like\n netCDF4-python, where the key can only include integers, slices\n (including `Ellipsis`) and 1d arrays, each of which are applied\n orthogonally along their respective dimensions.\n\n The difference not matter in most cases unless you are using numpy's\n \"fancy indexing,\" which can otherwise result in data arrays\n with shapes is inconsistent (or just uninterpretable with) with the\n variable's dimensions.\n\n If you really want to do indexing like `x[x > 0]`, manipulate the numpy\n array `x.values` directly.\n \"\"\"\n key = indexing.expanded_indexer(key, self.ndim)\n dimensions = [dim for k, dim in zip(key, self.dimensions)\n if not isinstance(k, int)]\n values = self._data[key]\n # orthogonal indexing should ensure the dimensionality is consistent\n if hasattr(values, 'ndim'):\n assert values.ndim == len(dimensions)\n else:\n assert len(dimensions) == 0\n return type(self)(dimensions, values, self.attrs, self.encoding)\n\n def __setitem__(self, key, value):\n \"\"\"__setitem__ is overloaded to access the underlying numpy values with\n orthogonal indexing.\n\n See __getitem__ for more details.\n \"\"\"\n self._data_cached()[key] = value\n\n @property\n def attributes(self):\n utils.alias_warning('attributes', 'attrs', 3)\n return self._attributes\n\n @attributes.setter\n def attributes(self, value):\n utils.alias_warning('attributes', 'attrs', 3)\n self._attributes = OrderedDict(value)\n\n @property\n def attrs(self):\n \"\"\"Dictionary of local attributes on this variable.\n \"\"\"\n return self._attributes\n\n @attrs.setter\n def attrs(self, value):\n self._attributes = OrderedDict(value)\n\n def copy(self, deep=True):\n \"\"\"Returns a copy of this object.\n\n If `deep=True`, the data array is loaded into memory and copied onto\n the new object. Dimensions, attributes and encodings are always copied.\n \"\"\"\n data = self.values.copy() if deep else self._data\n # note:\n # dimensions is already an immutable tuple\n # attributes and encoding will be copied when the new Array is created\n return type(self)(self.dimensions, data, self.attrs, self.encoding)\n\n def __copy__(self):\n return self.copy(deep=False)\n\n def __deepcopy__(self, memo=None):\n # memo does nothing but is required for compatability with\n # copy.deepcopy\n return self.copy(deep=True)\n\n # mutable objects should not be hashable\n __hash__ = None\n\n def indexed(self, **indexers):\n \"\"\"Return a new array indexed along the specified dimension(s).\n\n Parameters\n ----------\n **indexers : {dim: indexer, ...}\n Keyword arguments with names matching dimensions and values given\n by integers, slice objects or arrays.\n\n Returns\n -------\n obj : Array object\n A new Array with the selected data and dimensions. In general,\n the new variable's data will be a view of this variable's data,\n unless numpy fancy indexing was triggered by using an array\n indexer, in which case the data will be a copy.\n \"\"\"\n invalid = [k for k in indexers if not k in self.dimensions]\n if invalid:\n raise ValueError(\"dimensions %r do not exist\" % invalid)\n\n key = [slice(None)] * self.ndim\n for i, dim in enumerate(self.dimensions):\n if dim in indexers:\n key[i] = indexers[dim]\n return self[tuple(key)]\n\n def transpose(self, *dimensions):\n \"\"\"Return a new Variable object with transposed dimensions.\n\n Parameters\n ----------\n *dimensions : str, optional\n By default, reverse the dimensions. Otherwise, reorder the\n dimensions to this order.\n\n Returns\n -------\n transposed : Variable\n The returned object has transposed data and dimensions with the\n same attributes as the original.\n\n Notes\n -----\n Although this operation returns a view of this variable's data, it is\n not lazy -- the data will be fully loaded.\n\n See Also\n --------\n numpy.transpose\n \"\"\"\n if len(dimensions) == 0:\n dimensions = self.dimensions[::-1]\n axes = self.get_axis_num(dimensions)\n data = self.values.transpose(*axes)\n return type(self)(dimensions, data, self.attrs, self.encoding)\n\n def squeeze(self, dimension=None):\n \"\"\"Return a new Variable object with squeezed data.\n\n Parameters\n ----------\n dimensions : None or str or tuple of str, optional\n Selects a subset of the length one dimensions. If a dimension is\n selected with length greater than one, an error is raised. If\n None, all length one dimensions are squeezed.\n\n Returns\n -------\n squeezed : Variable\n This array, but with with all or a subset of the dimensions of\n length 1 removed.\n\n Notes\n -----\n Although this operation returns a view of this variable's data, it is\n not lazy -- the data will be fully loaded.\n\n See Also\n --------\n numpy.squeeze\n \"\"\"\n dimensions = dict(zip(self.dimensions, self.shape))\n return utils.squeeze(self, dimensions, dimension)\n\n def reduce(self, func, dimension=None, axis=None, **kwargs):\n \"\"\"Reduce this array by applying `func` along some dimension(s).\n\n Parameters\n ----------\n func : function\n Function which can be called in the form\n `func(x, axis=axis, **kwargs)` to return the result of reducing an\n np.ndarray over an integer valued axis.\n dimension : str or sequence of str, optional\n Dimension(s) over which to apply `func`.\n axis : int or sequence of int, optional\n Axis(es) over which to apply `func`. Only one of the 'dimension'\n and 'axis' arguments can be supplied. If neither are supplied, then\n the reduction is calculated over the flattened array (by calling\n `func(x)` without an axis argument).\n **kwargs : dict\n Additional keyword arguments passed on to `func`.\n\n Returns\n -------\n reduced : Array\n Array with summarized data and the indicated dimension(s)\n removed.\n \"\"\"\n if dimension is not None and axis is not None:\n raise ValueError(\"cannot supply both 'axis' and 'dimension' \"\n \"arguments\")\n\n if dimension is not None:\n axis = self.get_axis_num(dimension)\n data = func(self.values, axis=axis, **kwargs)\n\n removed_axes = (range(self.ndim) if axis is None\n else np.atleast_1d(axis) % self.ndim)\n dims = [dim for n, dim in enumerate(self.dimensions)\n if n not in removed_axes]\n\n return Variable(dims, data)\n\n @classmethod\n def concat(cls, variables, dimension='stacked_dimension',\n indexers=None, length=None, shortcut=False):\n \"\"\"Concatenate variables along a new or existing dimension.\n\n Parameters\n ----------\n variables : iterable of Array\n Arrays to stack together. Each variable is expected to have\n matching dimensions and shape except for along the stacked\n dimension.\n dimension : str or DataArray, optional\n Name of the dimension to stack along. This can either be a new\n dimension name, in which case it is added along axis=0, or an\n existing dimension name, in which case the location of the\n dimension is unchanged. Where to insert the new dimension is\n determined by the first variable.\n indexers : iterable of indexers, optional\n Iterable of indexers of the same length as variables which\n specifies how to assign variables along the given dimension. If\n not supplied, indexers is inferred from the length of each\n variable along the dimension, and the variables are stacked in the\n given order.\n length : int, optional\n Length of the new dimension. This is used to allocate the new data\n array for the stacked variable data before iterating over all\n items, which is thus more memory efficient and a bit faster. If\n dimension is provided as a DataArray, length is calculated\n automatically.\n shortcut : bool, optional\n This option is used internally to speed-up groupby operations.\n If `shortcut` is True, some checks of internal consistency between\n arrays to concatenate are skipped.\n\n Returns\n -------\n stacked : Variable\n Concatenated Variable formed by stacking all the supplied variables\n along the given dimension.\n \"\"\"\n if not isinstance(dimension, basestring):\n length = dimension.size\n dimension, = dimension.dimensions\n\n if length is None or indexers is None:\n # so much for lazy evaluation! we need to look at all the variables\n # to figure out the indexers and/or dimensions of the stacked\n # variable\n variables = list(variables)\n steps = [var.shape[var.get_axis_num(dimension)]\n if dimension in var.dimensions else 1\n for var in variables]\n if length is None:\n length = sum(steps)\n if indexers is None:\n indexers = []\n i = 0\n for step in steps:\n indexers.append(slice(i, i + step))\n i += step\n if i != length:\n raise ValueError('actual length of stacked variables '\n 'along %s is %r but expected length was '\n '%s' % (dimension, i, length))\n\n # initialize the stacked variable with empty data\n first_var, variables = groupby.peek_at(variables)\n if dimension in first_var.dimensions:\n axis = first_var.get_axis_num(dimension)\n shape = tuple(length if n == axis else s\n for n, s in enumerate(first_var.shape))\n dims = first_var.dimensions\n else:\n axis = 0\n shape = (length,) + first_var.shape\n dims = (dimension,) + first_var.dimensions\n\n concatenated = cls(dims, np.empty(shape, dtype=first_var.dtype))\n concatenated.attrs.update(first_var.attrs)\n\n alt_dims = tuple(d for d in dims if d != dimension)\n\n # copy in the data from the variables\n for var, indexer in izip(variables, indexers):\n if not shortcut:\n # do sanity checks & attributes clean-up\n if dimension in var.dimensions:\n # transpose verifies that the dimensions are equivalent\n if var.dimensions != concatenated.dimensions:\n var = var.transpose(*concatenated.dimensions)\n elif var.dimensions != alt_dims:\n raise ValueError('inconsistent dimensions')\n utils.remove_incompatible_items(concatenated.attrs, var.attrs)\n\n key = tuple(indexer if n == axis else slice(None)\n for n in range(concatenated.ndim))\n concatenated.values[key] = var.values\n\n return concatenated\n\n def _data_equals(self, other):\n return (self._data is other._data\n or ((not isinstance(self.values, np.ndarray)\n or not isinstance(other.values, np.ndarray))\n and self.values == other.values)\n or utils.array_equiv(self.values, other.values))\n\n def equals(self, other):\n \"\"\"True if two Variables have the same dimensions and values;\n otherwise False.\n\n Variables can still be equal (like pandas objects) if they have NaN\n values in the same locations.\n\n This method is necessary because `v1 == v2` for Variables\n does element-wise comparisions (like numpy.ndarrays).\n \"\"\"\n other = getattr(other, 'variable', other)\n try:\n return (self.dimensions == other.dimensions\n and self._data_equals(other))\n except (TypeError, AttributeError):\n return False\n\n def identical(self, other):\n \"\"\"Like equals, but also checks attributes.\n \"\"\"\n try:\n return (utils.dict_equal(self.attrs, other.attrs)\n and self.equals(other))\n except (TypeError, AttributeError):\n return False\n\n def __array_wrap__(self, obj, context=None):\n return Variable(self.dimensions, obj)\n\n @staticmethod\n def _unary_op(f):\n @functools.wraps(f)\n def func(self, *args, **kwargs):\n return Variable(self.dimensions, f(self.values, *args, **kwargs))\n return func\n\n @staticmethod\n def _binary_op(f, reflexive=False):\n @functools.wraps(f)\n def func(self, other):\n if isinstance(other, data_array.DataArray):\n return NotImplemented\n self_data, other_data, dims = _broadcast_variable_data(self, other)\n new_data = (f(self_data, other_data)\n if not reflexive\n else f(other_data, self_data))\n return Variable(dims, new_data)\n return func\n\n @staticmethod\n def _inplace_binary_op(f):\n @functools.wraps(f)\n def func(self, other):\n self_data, other_data, dims = _broadcast_variable_data(self, other)\n if dims != self.dimensions:\n raise ValueError('dimensions cannot change for in-place '\n 'operations')\n self.values = f(self_data, other_data)\n return self\n return func\n\nops.inject_special_operations(Variable)\n\n\nclass Coordinate(Variable):\n \"\"\"Subclass of Variable which caches its data as a pandas.Index instead of\n a numpy.ndarray.\n\n Coordinates must always be 1-dimensional. In addition to Variable methods,\n they support some pandas.Index methods directly (e.g., get_indexer).\n \"\"\"\n _cache_data_class = PandasIndexAdapter\n\n def __init__(self, *args, **kwargs):\n super(Coordinate, self).__init__(*args, **kwargs)\n if self.ndim != 1:\n raise ValueError('%s objects must be 1-dimensional' %\n type(self).__name__)\n\n def __getitem__(self, key):\n values = self._data[key]\n if not hasattr(values, 'ndim') or values.ndim == 0:\n return Variable((), values, self.attrs, self.encoding)\n else:\n return type(self)(self.dimensions, values, self.attrs,\n self.encoding)\n\n def __setitem__(self, key, value):\n raise TypeError('%s values cannot be modified' % type(self).__name__)\n\n def copy(self, deep=True):\n \"\"\"Returns a copy of this object.\n\n If `deep=True`, the values array is loaded into memory and copied onto\n the new object. Dimensions, attributes and encodings are always copied.\n \"\"\"\n # there is no need to copy the index values here even if deep=True\n # since pandas.Index objects are immutable\n data = PandasIndexAdapter(self) if deep else self._data\n return type(self)(self.dimensions, data, self.attrs, self.encoding)\n\n @property\n def as_index(self):\n \"\"\"The variable's data as a pandas.Index\"\"\"\n return self._data_cached().array\n\n def _data_equals(self, other):\n return self.as_index.equals(other.to_coord().as_index)\n\n def to_coord(self):\n \"\"\"Return this variable as an Coordinate\"\"\"\n return self\n\n def get_indexer(self, label):\n return self.as_index.get_indexer(label)\n\n def slice_indexer(self, start=None, stop=None, step=None):\n return self.as_index.slice_indexer(start, stop, step)\n\n def slice_locs(self, start=None, stop=None):\n return self.as_index.slice_locs(start, stop)\n\n def get_loc(self, label):\n return self.as_index.get_loc(label)\n\n @property\n def is_monotonic(self):\n return self.as_index.is_monotonic\n\n def is_numeric(self):\n return self.as_index.is_numeric()\n\n\ndef broadcast_variables(first, second):\n \"\"\"Given two Variables, return two Variables with matching dimensions and\n numpy broadcast compatible data.\n\n Parameters\n ----------\n first, second : Variable\n Variable objects to broadcast.\n\n Returns\n -------\n first_broadcast, second_broadcast : Variable\n Broadcast arrays. The data on each variable will be a view of the\n data on the corresponding original arrays, but dimensions will be\n reordered and inserted so that both broadcast arrays have the same\n dimensions. The new dimensions are sorted in order of appearence in the\n first variable's dimensions followed by the second variable's\n dimensions.\n \"\"\"\n # TODO: add unit tests specifically for this function\n # validate dimensions\n dim_lengths = dict(zip(first.dimensions, first.shape))\n for k, v in zip(second.dimensions, second.shape):\n if k in dim_lengths and dim_lengths[k] != v:\n raise ValueError('operands could not be broadcast together '\n 'with mismatched lengths for dimension %r: %s'\n % (k, (dim_lengths[k], v)))\n for dimensions in [first.dimensions, second.dimensions]:\n if len(set(dimensions)) < len(dimensions):\n raise ValueError('broadcasting requires that neither operand '\n 'has duplicate dimensions: %r'\n % list(dimensions))\n\n # build dimensions for new Array\n second_only_dims = [d for d in second.dimensions\n if d not in first.dimensions]\n dimensions = list(first.dimensions) + second_only_dims\n\n # expand first_data's dimensions so it's broadcast compatible after\n # adding second's dimensions at the end\n first_data = first.values[(Ellipsis,) + (None,) * len(second_only_dims)]\n new_first = Variable(dimensions, first_data, first.attrs, first.encoding)\n # expand and reorder second_data so the dimensions line up\n first_only_dims = [d for d in dimensions if d not in second.dimensions]\n second_dims = list(second.dimensions) + first_only_dims\n second_data = second.values[(Ellipsis,) + (None,) * len(first_only_dims)]\n new_second = Variable(second_dims, second_data, second.attrs,\n second.encoding).transpose(*dimensions)\n return new_first, new_second\n\n\ndef _broadcast_variable_data(self, other):\n if isinstance(other, dataset.Dataset):\n raise TypeError('datasets do not support mathematical operations')\n elif all(hasattr(other, attr) for attr\n in ['dimensions', 'values', 'shape', 'encoding']):\n # `other` satisfies the necessary Variable API for broadcast_variables\n new_self, new_other = broadcast_variables(self, other)\n self_data = new_self.values\n other_data = new_other.values\n dimensions = new_self.dimensions\n else:\n # rely on numpy broadcasting rules\n self_data = self.values\n other_data = other\n dimensions = self.dimensions\n return self_data, other_data, dimensions\n"
] | [
[
"numpy.isnan",
"numpy.arange",
"numpy.issubdtype",
"numpy.around",
"pandas.Index",
"numpy.dtype",
"pandas.date_range",
"numpy.array"
],
[
"numpy.asarray",
"numpy.atleast_1d",
"numpy.empty"
]
] |
Gabvaztor/tensorflowCode | [
"e206ea4544552b87c2d43274cea3182f6b385a87"
] | [
"src/examples/animations/AnimationGif.py"
] | [
"#IMPORTAMOS LIBRERIAS.\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport animatplot as amp\n\n#INTRODUCIMOS DATOS.\nx = np.linspace(0, 1, 50)\nt = np.linspace(0, 1, 20)\n\n\nX, T = np.meshgrid(x, t)\nY = np.zeros(int(51*(X+T)))\n\n#CREAMOS OBJETO \"timeline\".\ntimeline = amp.Timeline(t, units='s', fps=60)\n\n#GENERAMOS ANIMACIÓN.\nblock = amp.blocks.Line(X, Y, marker=\".\", linestyle=\"-\", color=\"r\")\nanim = amp.Animation([block],timeline)\n\n#DEFINICIÓN DE ETIQUETAS PARA TITULO Y EJES.\nplt.title(\"Sine Wave\")\nplt.xlabel(\"x\")\nplt.ylabel(\"y\")\n\n#GUARDAMOS ANIMACIÓN.\n#anim.save_gif('graph_anim.gif')\n\n#INTRODUCIMOS LÍNEA DE TIEMPO\n#Y BOTÓN PAUSE/PLAY\nanim.controls()\n\n#REPRESENTAMOS GRÁFICA.\nplt.show()"
] | [
[
"matplotlib.pyplot.title",
"numpy.linspace",
"matplotlib.pyplot.xlabel",
"numpy.meshgrid",
"matplotlib.pyplot.show",
"matplotlib.pyplot.ylabel"
]
] |
edgargmartinez/OpenPNM | [
"c68745993b3e9895f53938164a9cf6305500748e",
"c68745993b3e9895f53938164a9cf6305500748e",
"b3873d35270b0acaad019264368d0055c677d159",
"b3873d35270b0acaad019264368d0055c677d159"
] | [
"tests/unit/models/physics/MeniscusTest.py",
"openpnm/algorithms/InvasionPercolation.py",
"tests/unit/utils/WorkspaceTest.py",
"tests/unit/phases/mixtures/GenericMixtureTest.py"
] | [
"import openpnm as op\nimport openpnm.models.physics as pm\nimport scipy as sp\n\n\nclass MeniscusTest:\n\n def setup_class(self):\n sp.random.seed(1)\n self.net = op.network.Cubic(shape=[5, 1, 5], spacing=5e-5)\n self.geo = op.geometry.StickAndBall(network=self.net,\n pores=self.net.pores(),\n throats=self.net.throats())\n self.phase = op.phases.Water(network=self.net)\n self.phys = op.physics.Standard(network=self.net,\n phase=self.phase,\n geometry=self.geo)\n\n def test_toroidal_touch(self):\n phys = self.phys\n r_tor = 1e-6\n self.geo['throat.touch_length'] = 2e-6\n phys.add_model(propname='throat.tor_max',\n model=pm.meniscus.purcell,\n mode='max',\n r_toroid=r_tor)\n phys.add_model(propname='throat.tor_touch',\n model=pm.meniscus.purcell,\n mode='touch',\n r_toroid=r_tor)\n assert sp.any(phys['throat.tor_touch'] < phys['throat.tor_max'])\n\n def test_sinusoidal_touch(self):\n phys = self.phys\n self.geo['throat.amplitude'] = 5e-6\n self.geo['throat.touch_length'] = 1e-6\n phys.add_model(propname='throat.sin_pressure_max',\n model=pm.meniscus.sinusoidal,\n mode='max')\n phys.add_model(propname='throat.sin_pressure_touch',\n model=pm.meniscus.sinusoidal,\n mode='touch')\n h = phys.check_data_health()\n for check in h.values():\n if len(check) > 0:\n assert 1 == 2\n assert sp.any((phys['throat.sin_pressure_touch'] <\n phys['throat.sin_pressure_max']))\n\n def test_sinusoidal(self):\n phys = self.phys\n self.geo['throat.amplitude'] = 5e-6\n phys.add_model(propname='throat.sin_pressure',\n model=pm.meniscus.sinusoidal,\n mode='max')\n phys.add_model(propname='throat.sin_meniscus',\n model=pm.meniscus.sinusoidal,\n mode='men',\n target_Pc=5000)\n h = phys.check_data_health()\n for check in h.values():\n if len(check) > 0:\n assert 1 == 2\n\n def test_toroidal(self):\n phys = self.phys\n r_tor = 1e-6\n phys.add_model(propname='throat.purcell_pressure',\n model=pm.capillary_pressure.purcell,\n r_toroid=r_tor)\n phys.add_model(propname='throat.tor_pressure',\n model=pm.meniscus.purcell,\n mode='max',\n r_toroid=r_tor,\n num_points=1000)\n phys.add_model(propname='throat.tor_meniscus',\n model=pm.meniscus.purcell,\n mode='men',\n r_toroid=r_tor,\n target_Pc=5000)\n a = sp.around(phys['throat.purcell_pressure'], 10)\n b = sp.around(phys['throat.tor_pressure'], 10)\n assert sp.allclose(a, b)\n h = phys.check_data_health()\n for check in h.values():\n if len(check) > 0:\n assert 1 == 2\n\n def test_general_toroidal(self):\n phys = self.phys\n r_tor = 1e-6\n phys.add_model(propname='throat.purcell_pressure',\n model=pm.capillary_pressure.purcell,\n r_toroid=r_tor)\n phys['throat.scale_a'] = r_tor\n phys['throat.scale_b'] = r_tor\n phys.add_model(propname='throat.general_pressure',\n model=pm.meniscus.general_toroidal,\n mode='max',\n num_points=1000)\n a = sp.around(phys['throat.purcell_pressure'], 10)\n b = sp.around(phys['throat.general_pressure'], 10)\n assert sp.allclose(a, b)\n h = phys.check_data_health()\n for check in h.values():\n if len(check) > 0:\n assert 1 == 2\n\n\nif __name__ == '__main__':\n\n t = MeniscusTest()\n self = t\n t.setup_class()\n for item in t.__dir__():\n if item.startswith('test'):\n print('running test: '+item)\n t.__getattribute__(item)()\n",
"import heapq as hq\nimport scipy as sp\nimport numpy as np\nfrom openpnm.algorithms import GenericAlgorithm\nfrom openpnm.utils import logging\nlogger = logging.getLogger(__name__)\n\n\nclass InvasionPercolation(GenericAlgorithm):\n r\"\"\"\n A classic/basic invasion percolation algorithm optimized for speed.\n\n Parameters\n ----------\n network : OpenPNM Network object\n The Network upon which the invasion will occur.\n\n Notes\n ----\n This algorithm uses a binary heap to store all a list of all accessible\n throats, sorted according to entry pressure. This means that item [0] in\n the heap is the most easily invaded throat, so looking up which throat\n to invade next is computationally trivial. In order to keep the list\n sorted new throats to the list takes more time, however, the heap data\n structure is very efficient at this. Interested users can consult the\n wikipedia page on `binary heaps\n <https://en.wikipedia.org/wiki/Binary_heap>`_ for more information.\n\n\n Examples\n --------\n Start by importing the usual packages:\n\n >>> import openpnm as op\n >>> import scipy as sp\n >>> import matplotlib.pyplot as plt\n\n Create 2D cubic network for easier visualizaiton:\n\n >>> S = sp.array([100, 100, 1])\n >>> pn = op.network.Cubic(shape=S, spacing=0.0001, name='pn11')\n\n Add a basic geometry:\n\n >>> geom = op.geometry.StickAndBall(network=pn, pores=pn.Ps, throats=pn.Ts)\n\n Create an invading phase, and attach the capillary pressure model:\n\n >>> water = op.phases.Water(network=pn)\n >>> water.add_model(propname='throat.entry_pressure',\n ... model=op.models.physics.capillary_pressure.washburn)\n\n Initialize an invasion percolation object and define inlets:\n\n >>> ip = op.algorithms.InvasionPercolation(network=pn)\n >>> ip.setup(phase=water)\n >>> ip.set_inlets(pores=0)\n >>> ip.run()\n\n After running the algorithm the invading phase configuration at a given\n saturation can be obtained and assigned to the phase object:\n\n >>> water.update(ip.results(Snwp=0.5))\n\n Because it was a 2D network it's easy to quickly visualize the invasion\n pattern as an image for verification:\n\n .. note::\n\n Because the network is 2D and cubic, an image can be generated with\n color corresponding to a value. The following plots the entire\n invasion sequence, and the water configuraiton at Snwp = 0.5.\n\n ``plt.subplot(1, 2, 1)``\n\n ``plt.imshow(sp.reshape(ip['pore.invasion_sequence'], newshape=S[S > 1]))``\n\n ``plt.subplot(1, 2, 2)``\n\n ``plt.imshow(sp.reshape(water['pore.occupancy'], newshape=S[S > 1]))``\n\n \"\"\"\n def __init__(self, settings={}, phase=None, **kwargs):\n def_set = {'phase': None,\n 'pore_volume': 'pore.volume',\n 'throat_volume': 'throat.volume',\n 'entry_pressure': 'throat.entry_pressure',\n 'gui': {'setup': {'phase': None,\n 'entry_pressure': '',\n 'pore_volume': '',\n 'throat_volume': ''},\n 'set_inlets': {'pores': None,\n 'overwrite': False},\n 'apply_trapping': {'outlets': None}\n }\n }\n super().__init__(**kwargs)\n self.settings.update(def_set)\n self.settings.update(settings)\n if phase is not None:\n self.setup(phase=phase)\n\n def setup(self, phase, entry_pressure='', pore_volume='', throat_volume=''):\n r\"\"\"\n Set up the required parameters for the algorithm\n\n Parameters\n ----------\n phase : OpenPNM Phase object\n The phase to be injected into the Network. The Phase must have the\n capillary entry pressure values for the system.\n\n entry_pressure : string\n The dictionary key to the capillary entry pressure. If none is\n supplied then the current value is retained. The default is\n 'throat.capillary_pressure'.\n\n pore_volume : string\n The dictionary key to the pore volume. If none is supplied then\n the current value is retained. The default is 'pore.volume'.\n\n throat_volume : string\n The dictionary key to the throat volume. If none is supplied then\n the current value is retained. The default is 'throat.volume'.\n\n \"\"\"\n self.settings['phase'] = phase.name\n if pore_volume:\n self.settings['pore_volume'] = pore_volume\n if throat_volume:\n self.settings['throat_volume'] = throat_volume\n if entry_pressure:\n self.settings['entry_pressure'] = entry_pressure\n\n # Setup arrays and info\n self['throat.entry_pressure'] = phase[self.settings['entry_pressure']]\n # Indices into t_entry giving a sorted list\n self['throat.sorted'] = sp.argsort(self['throat.entry_pressure'], axis=0)\n self['throat.order'] = 0\n self['throat.order'][self['throat.sorted']] = sp.arange(0, self.Nt)\n self['throat.invasion_sequence'] = -1\n self['pore.invasion_sequence'] = -1\n self._tcount = 0\n\n def set_inlets(self, pores=[], overwrite=False):\n r\"\"\"\n\n Parameters\n ----------\n pores : array_like\n The list of inlet pores from which the Phase can enter the Network\n \"\"\"\n if overwrite:\n self['pore.invasion_sequence'] = -1\n self['pore.invasion_sequence'][pores] = 0\n\n # Perform initial analysis on input pores\n Ts = self.project.network.find_neighbor_throats(pores=pores)\n self.queue = []\n [hq.heappush(self.queue, T) for T in self['throat.order'][Ts]]\n\n def run(self, n_steps=None):\n r\"\"\"\n Perform the algorithm\n\n Parameters\n ----------\n n_steps : int\n The number of throats to invaded during this step\n\n \"\"\"\n if n_steps is None:\n n_steps = sp.inf\n\n queue = self.queue\n if len(queue) == 0:\n logger.warn('queue is empty, this network is fully invaded')\n return\n t_sorted = self['throat.sorted']\n t_order = self['throat.order']\n t_inv = self['throat.invasion_sequence']\n p_inv = self['pore.invasion_sequence']\n\n count = 0\n while (len(queue) > 0) and (count < n_steps):\n # Find throat at the top of the queue\n t = hq.heappop(queue)\n # Extract actual throat number\n t_next = t_sorted[t]\n t_inv[t_next] = self._tcount\n # If throat is duplicated\n while len(queue) > 0 and queue[0] == t:\n # Note: Preventing duplicate entries below might save some time\n t = hq.heappop(queue)\n # Find pores connected to newly invaded throat\n Ps = self.project.network['throat.conns'][t_next]\n # Remove already invaded pores from Ps\n Ps = Ps[p_inv[Ps] < 0]\n if len(Ps) > 0:\n p_inv[Ps] = self._tcount\n Ts = self.project.network.find_neighbor_throats(pores=Ps)\n Ts = Ts[t_inv[Ts] < 0] # Remove invaded throats from Ts\n [hq.heappush(queue, T) for T in t_order[Ts]]\n count += 1\n self._tcount += 1\n self['throat.invasion_sequence'] = t_inv\n self['pore.invasion_sequence'] = p_inv\n\n def results(self, Snwp=None):\n r\"\"\"\n Returns the phase configuration at the specified non-wetting phase\n (invading phase) saturation.\n\n Parameters\n ----------\n Snwp : scalar, between 0 and 1\n The network saturation for which the phase configuration is\n desired.\n\n Returns\n -------\n Two dictionary containing arrays that describe the pore and throat\n distribution at the given saturation. Specifically, these are:\n\n **'pore.occupancy'** : 1 indicates the pores is invaded and 0\n otherwise.\n\n **'throat.occupancy'** : Same as described above but for throats.\n\n \"\"\"\n if Snwp is None:\n Np = self['pore.invasion_sequence']\n Nt = self['throat.invasion_sequence']\n data = {'pore.invasion_sequence': Np,\n 'throat.invasion_sequence': Nt}\n else:\n net = self.project.network\n P12 = net['throat.conns']\n # Fetch void volume for pores and throats\n Vp = net[self.settings['pore_volume']]\n Vt = net[self.settings['throat_volume']]\n # Fetch the order of filling\n Np = self['pore.invasion_sequence']\n Nt = self['throat.invasion_sequence']\n # Create Nt-long mask of which pores were filled when throat was filled\n Pinv = (Np[P12].T == Nt).T\n # If a pore and throat filled together, find combined volume\n Vinv = sp.vstack(((Pinv*Vp[P12]).T, Vt)).T\n Vinv = sp.sum(Vinv, axis=1)\n # Convert to cumulative volume filled as each throat is invaded\n x = sp.argsort(Nt) # Find order throats were invaded\n Vinv_cum = np.cumsum(Vinv[x])\n # Normalized cumulative volume filled into saturation\n S = Vinv_cum/(Vp.sum() + Vt.sum())\n # Find throat invasion step where Snwp was reached\n try:\n N = sp.where(S < Snwp)[0][-1]\n except:\n N = -np.inf\n data = {'pore.occupancy': Np <= N, 'throat.occupancy': Nt <= N}\n return data\n\n def apply_trapping(self, outlets):\n \"\"\"\n Apply trapping based on algorithm described by Y. Masson [1].\n It is applied as a post-process and runs the percolation algorithm in\n reverse assessing the occupancy of pore neighbors. Consider the\n following scenario when running standard IP without trapping,\n 3 situations can happen after each invasion step:\n\n * The number of defending clusters stays the same and clusters can\n shrink\n * A cluster of size one is suppressed\n * A cluster is split into multiple clusters\n\n In reverse the following opposite situations can happen:\n\n * The number of defending clusters stays the same and clusters can\n grow\n * A cluster of size one is created\n * Mutliple clusters merge into one cluster\n\n With trapping the reversed rules are adjusted so that only clusters\n that do not connect to a sink can grow and merge. At the point that a\n neighbor connected to a sink is touched the trapped cluster stops\n growing as this is the point of trapping in forward invasion time.\n\n Logger info displays the invasion sequence and pore index and a message\n with condition number based on the modified trapping rules and the\n assignment of the pore to a given cluster.\n\n Initially all invaded pores are given cluster label -1\n Outlets / Sinks are given -2\n New clusters that grow into fully trapped clusters are either\n identified at the point of breakthrough or grow from nothing if the\n full invasion sequence is run, they are assigned numbers from 0 up.\n\n Ref:\n [1] Masson, Y., 2016. A fast two-step algorithm for invasion\n percolation with trapping. Computers & Geosciences, 90, pp.41-48\n\n Parameters\n ----------\n outlets : list or array of pore indices for defending fluid to escape\n through\n\n Returns\n -------\n Creates a throat array called 'pore.clusters' in the Algorithm\n dictionary. Any positive number is a trapped cluster\n\n Also creates 2 boolean arrays Np and Nt long called '<element>.trapped'\n \"\"\"\n # First see if network is fully invaded\n net = self.project.network\n invaded_ps = self['pore.invasion_sequence'] > -1\n if ~np.all(invaded_ps):\n # Put defending phase into clusters\n clusters = net.find_clusters2(~invaded_ps)\n # Identify clusters that are connected to an outlet and set to -2\n # -1 is the invaded fluid\n # -2 is the defender fluid able to escape\n # All others now trapped clusters which grow as invasion is reversed\n out_clusters = sp.unique(clusters[outlets])\n for c in out_clusters:\n if c >= 0:\n clusters[clusters == c] = -2\n else:\n # Go from end\n clusters = np.ones(net.Np, dtype=int)*-1\n clusters[outlets] = -2\n\n # Turn into a list for indexing\n inv_seq = np.vstack((self['pore.invasion_sequence'].astype(int),\n np.arange(0, net.Np, dtype=int))).T\n # Reverse sort list\n inv_seq = inv_seq[inv_seq[:, 0].argsort()][::-1]\n next_cluster_num = np.max(clusters)+1\n # For all the steps after the inlets are set up to break-through\n # Reverse the sequence and assess the neighbors cluster state\n stopped_clusters = np.zeros(net.Np, dtype=bool)\n all_neighbors = net.find_neighbor_pores(net.pores(), flatten=False,\n include_input=True)\n for un_seq, pore in inv_seq:\n if pore not in outlets and un_seq > 0: # Skip inlets and outlets\n nc = clusters[all_neighbors[pore]] # Neighboring clusters\n unique_ns = np.unique(nc[nc != -1]) # Unique Neighbors\n seq_pore = \"S:\"+str(un_seq)+\" P:\"+str(pore)\n if np.all(nc == -1):\n # This is the start of a new trapped cluster\n clusters[pore] = next_cluster_num\n next_cluster_num += 1\n msg = (seq_pore+\" C:1 new cluster number: \" +\n str(clusters[pore]))\n logger.info(msg)\n elif len(unique_ns) == 1:\n # Grow the only connected neighboring cluster\n if not stopped_clusters[unique_ns[0]]:\n clusters[pore] = unique_ns[0]\n msg = (seq_pore+\" C:2 joins cluster number: \" +\n str(clusters[pore]))\n logger.info(msg)\n else:\n clusters[pore] = -2\n elif -2 in unique_ns:\n # We have reached a sink neighbor, stop growing cluster\n msg = (seq_pore+\" C:3 joins sink cluster\")\n logger.info(msg)\n clusters[pore] = -2\n # Stop growth and merging\n stopped_clusters[unique_ns[unique_ns > -1]] = True\n else:\n # We might be able to do some merging\n # Check if any stopped clusters are neighbors\n if np.any(stopped_clusters[unique_ns]):\n msg = (seq_pore+\" C:4 joins sink cluster\")\n logger.info(msg)\n clusters[pore] = -2\n # Stop growing all neighboring clusters\n stopped_clusters[unique_ns] = True\n else:\n # Merge multiple un-stopped trapped clusters\n new_num = unique_ns[0]\n clusters[pore] = new_num\n for c in unique_ns:\n clusters[clusters == c] = new_num\n msg = (seq_pore + \" C:5 merge clusters: \" +\n str(c) + \" into \"+str(new_num))\n logger.info(msg)\n\n # And now return clusters\n self['pore.clusters'] = clusters\n logger.info(\"Number of trapped clusters\" +\n str(np.sum(np.unique(clusters) >= 0)))\n self['pore.trapped'] = self['pore.clusters'] > -1\n trapped_ts = net.find_neighbor_throats(self['pore.trapped'])\n self['throat.trapped'] = np.zeros([net.Nt], dtype=bool)\n self['throat.trapped'][trapped_ts] = True\n self['pore.invasion_sequence'][self['pore.trapped']] = -1\n self['throat.invasion_sequence'][self['throat.trapped']] = -1\n",
"import openpnm as op\nimport scipy as sp\nimport pytest\nimport os\nimport pickle\n\n\nclass WorkspaceTest:\n\n def setup_class(self):\n self.ws = op.Workspace()\n self.ws.clear()\n self.net = op.network.Cubic(shape=[5, 5, 5])\n self.phase = op.phases.Air(network=self.net)\n\n def test_new_project_no_name(self):\n proj = self.ws.new_project()\n assert proj.name in self.ws.keys()\n self.ws.clear()\n\n def test_new_project_w_name(self):\n proj = self.ws.new_project(name='bob')\n assert proj.name in self.ws.keys()\n self.ws.clear()\n\n def test_new_project_duplicate_name(self):\n proj = self.ws.new_project('foo')\n with pytest.raises(Exception):\n proj = self.ws.new_project('foo')\n assert proj.name in self.ws.keys()\n self.ws.clear()\n\n def test_assign_project(self):\n proj = self.ws.new_project()\n with pytest.raises(Exception):\n self.ws[proj.name] = proj\n old_name = proj.name\n new_name = self.ws._gen_name()\n self.ws[new_name] = proj\n assert proj.name == new_name\n assert proj.name in self.ws.keys()\n assert old_name not in self.ws.keys()\n self.ws.clear()\n\n def test_str(self):\n proj = self.ws.new_project()\n op.network.Cubic(shape=[3, 3, 3], project=proj)\n s = self.ws.__str__().split('\\n')\n assert 'OpenPNM Version' in s[1]\n self.ws.clear()\n\n def test_save_and_load_project(self):\n proj = self.ws.new_project('test_proj')\n net = op.network.Cubic(shape=[3, 3, 3], project=proj)\n op.phases.Air(network=net)\n self.ws.save_project(proj)\n assert proj.name in self.ws.keys()\n self.ws.close_project(proj)\n assert 'test_proj' not in self.ws.keys()\n assert proj.workspace == {}\n self.ws.load_project(filename='test_proj.pnm')\n assert 'test_proj' in self.ws.keys()\n self.ws.clear()\n os.remove('test_proj.pnm')\n\n def test_save_and_load_project_from_pickled_list(self):\n proj = self.ws.new_project()\n pn = op.network.Cubic(shape=[3, 3, 3], project=proj)\n air = op.phases.Air(network=pn)\n pickle.dump([pn, air], open('test.pnm', 'wb'))\n self.ws.clear()\n self.ws.load_project('test.pnm')\n self.ws.clear()\n os.remove('test.pnm')\n\n def test_save_and_load_project_from_pickled_object(self):\n a = sp.ones((10, ))\n pickle.dump(a, open('single_object.pnm', 'wb'))\n self.ws.clear()\n with pytest.raises(Exception):\n self.ws.load_project('single_object.pnm')\n b = {'test': a}\n pickle.dump(b, open('single_object.pnm', 'wb'))\n self.ws.clear()\n with pytest.raises(Exception):\n self.ws.load_project('single_object.pnm')\n os.remove('single_object.pnm')\n\n def test_load_project_with_name_conflict(self):\n self.ws.clear()\n proj = self.ws.new_project(name='test')\n pn = op.network.Cubic(shape=[3, 3, 3], project=proj)\n op.phases.Air(network=pn)\n self.ws.save_project(proj, filename='test.pnm')\n self.ws.load_project('test.pnm')\n assert set(self.ws.keys()) == set(['test', 'sim_01'])\n os.remove('test.pnm')\n\n def test_save_and_load_workspace(self):\n self.ws.clear()\n proj1 = self.ws.new_project('test_proj_1')\n proj2 = self.ws.new_project('test_proj_2')\n op.network.Cubic(shape=[3, 3, 3], project=proj1, name='net1')\n op.network.Cubic(shape=[3, 3, 3], project=proj2, name='net2')\n self.ws.save_workspace(filename='workspace_test')\n self.ws.clear()\n assert 'test_proj_1' not in self.ws.keys()\n assert 'test_proj_2' not in self.ws.keys()\n self.ws.load_workspace('workspace_test', overwrite=True)\n assert 'test_proj_1' in self.ws.keys()\n assert 'test_proj_2' in self.ws.keys()\n self.ws.clear()\n os.remove('workspace_test.pnm')\n\n\nif __name__ == '__main__':\n\n t = WorkspaceTest()\n self = t\n t.setup_class()\n for item in t.__dir__():\n if item.startswith('test'):\n print('running test: '+item)\n t.__getattribute__(item)()\n",
"import openpnm as op\nfrom openpnm.phases import mixtures\nimport scipy as sp\nimport pytest\n\n\nclass MixtureTest:\n def setup_class(self):\n ws = op.Workspace()\n ws.clear()\n self.net = op.network.Cubic(shape=[10, 10, 10])\n self.net = op.network.Cubic(shape=[10, 10, 10])\n self.N2 = mixtures.species.gases.N2(network=self.net, name='pure_N2')\n self.O2 = mixtures.species.gases.O2(network=self.net, name='pure_O2')\n self.CO2 = mixtures.species.gases.CO2(network=self.net, name='pure_CO2')\n self.H2 = mixtures.species.gases.H2(network=self.net, name='pure_H2')\n self.air = mixtures.GenericMixture(network=self.net,\n components=[self.N2, self.O2,\n self.H2, self.CO2],\n name='air_mixture')\n\n def test_set_mole_fraction(self):\n self.air.set_mole_fraction(self.N2, 0.790)\n self.air.set_mole_fraction(self.O2, 0.209)\n self.air.set_mole_fraction(self.CO2, 0.001)\n assert sp.all(self.air['pore.mole_fraction.all'] == 1.0)\n\n def test_props(self):\n a = self.air.props(deep=False)\n b = self.air.props(deep=True)\n assert len(b) > len(a)\n\n def test_update_mole_fraction_with_molar_density(self):\n self.air.pop('pore.concentration.'+self.N2.name, None)\n self.air['pore.concentration.'+self.O2.name] = 0.5\n self.air['pore.concentration.'+self.CO2.name] = 0.0\n self.air['pore.concentration.'+self.H2.name] = 0.0\n self.air['pore.molar_density'] = 2.0\n self.air.update_mole_fractions(molar_density='pore.molar_density')\n assert sp.all(self.air['pore.mole_fraction.all'] == 1.0)\n\n def test_update_mole_fraction_with_all_concentrations(self):\n self.air['pore.concentration.'+self.O2.name] = 1.5\n self.air['pore.concentration.'+self.N2.name] = 0.5\n self.air['pore.concentration.'+self.CO2.name] = 0.0\n self.air['pore.concentration.'+self.H2.name] = 0.0\n self.air.update_mole_fractions()\n assert sp.all(self.air['pore.mole_fraction.all'] == 1.0)\n\n def test_interleave_data(self):\n r\"\"\"\n \"\"\"\n self.air['pore.concentration.'+self.O2.name] = 1.0\n self.air['pore.concentration.'+self.N2.name] = 0.0\n self.air['pore.concentration.'+self.CO2.name] = 0.0\n self.air['pore.concentration.'+self.H2.name] = 0.0\n self.air.update_mole_fractions()\n MW = self.air['pore.molecular_weight'][0]\n assert MW == 0.032\n self.air['pore.concentration.'+self.O2.name] = 1.0\n self.air['pore.concentration.'+self.N2.name] = 0.5\n self.air['pore.concentration.'+self.CO2.name] = 0.3\n self.air['pore.concentration.'+self.H2.name] = 0.2\n self.air.update_mole_fractions()\n MW = self.air['pore.molecular_weight'][0]\n assert MW == 0.0298031\n\n def test_check_health(self):\n self.air.set_mole_fraction(self.N2, 0.790)\n self.air.set_mole_fraction(self.O2, 0.209)\n self.air.set_mole_fraction(self.CO2, 0.001)\n self.air.set_mole_fraction(self.H2, 0.000)\n h = self.air.check_mixture_health()\n assert h.health is True\n self.air.set_mole_fraction(self.CO2, 0.002)\n h = self.air.check_mixture_health()\n assert h.health is False\n self.air.set_mole_fraction(self.CO2, 0.000)\n h = self.air.check_mixture_health()\n assert h.health is False\n assert len(h['mole_fraction_too_low']) == self.air.Np\n self.air.set_mole_fraction(self.CO2, 0.001)\n self.air['pore.mole_fraction.'+self.CO2.name][0] = 0.0\n h = self.air.check_mixture_health()\n assert h.health is False\n assert len(h['mole_fraction_too_low']) == 1\n\n def test_getitem(self):\n d = self.air['pore.mole_fraction']\n set_a = set(['pore.mole_fraction.pure_N2',\n 'pore.mole_fraction.pure_O2',\n 'pore.mole_fraction.pure_H2',\n 'pore.mole_fraction.pure_CO2',\n 'pore.mole_fraction.all'])\n assert set_a.difference(set(d.keys())) == set()\n\n\nif __name__ == '__main__':\n\n t = MixtureTest()\n self = t\n t.setup_class()\n for item in t.__dir__():\n if item.startswith('test'):\n print('running test: '+item)\n t.__getattribute__(item)()\n"
] | [
[
"scipy.any",
"scipy.random.seed",
"scipy.allclose",
"scipy.around"
],
[
"scipy.vstack",
"scipy.sum",
"numpy.unique",
"numpy.arange",
"scipy.where",
"numpy.cumsum",
"scipy.argsort",
"numpy.all",
"numpy.max",
"numpy.ones",
"numpy.any",
"scipy.arange",
"scipy.unique",
"numpy.zeros"
],
[
"scipy.ones"
],
[
"scipy.all"
]
] |
naver/cog | [
"5b34ca90757116b9cfae11d8838927ba73e1ede8",
"5b34ca90757116b9cfae11d8838927ba73e1ede8"
] | [
"logreg.py",
"model_loaders_deit.py"
] | [
"# ImageNet-CoG Benchmark\n# Copyright 2021-present NAVER Corp.\n# 3-Clause BSD License\n\nimport argparse\nimport copy\nimport logging\nimport math\nimport os\nimport shutil\nimport time\n\nimport optuna\nimport torch as th\n\nimport feature_ops\nimport metrics\nimport utils\nfrom iterators import TorchIterator\nfrom meters import AverageMeter, ProgressMeter\n\nlogger = logging.getLogger()\n\n\nclass LogReg:\n \"\"\"\n Logistic regression classifier with mini-batch SGD.\n \"\"\"\n\n def __init__(self, args, cfg):\n self.args = args\n self.cfg = cfg\n\n # load the training set features\n trainset = feature_ops.load_feature_set(\n args.train_features_path, \"train\", cfg.CLF.NORM_FTS\n )\n\n if args.val:\n # randomly split the training set into train + val\n logger.info(\"Splitting the training set into train and val\")\n trainset, testset = feature_ops.split_trainset(trainset, cfg.CLF.VAL_PERC)\n else:\n # load the test set\n testset = feature_ops.load_feature_set(args.test_features_path, \"test\", cfg.CLF.NORM_FTS)\n\n if cfg.CLF.N_SHOT > 0:\n logger.info(\n \"Simulating few-shot learning setting, {} images per class.\".format(\n cfg.CLF.N_SHOT\n )\n )\n trainset = feature_ops.make_fewshot_dataset(trainset, cfg.CLF.N_SHOT)\n\n self.trainset = trainset\n self.testset = testset\n self.trainset.print_info()\n self.testset.print_info()\n\n # determine number of cases\n if len(list(self.trainset.y.shape)) == 1:\n classes = th.unique(self.trainset.y)\n assert th.all(classes == th.unique(self.testset.y))\n args.n_classes = classes.size(0)\n\n # move all features to the device\n if args.device == \"cuda\":\n feature_ops.move_data_to_cuda([self.trainset, self.testset])\n\n def __call__(self, trial=None):\n \"\"\"\n The function called by Optuna.\n \"\"\"\n # empty the cache allocated in the previous call\n th.cuda.empty_cache()\n\n args = copy.deepcopy(self.args)\n cfg = self.cfg\n\n x_train = self.trainset.x\n y_train = self.trainset.y\n x_test = self.testset.x\n y_test = self.testset.y\n\n # create training and test set iterators\n train_iter = TorchIterator((x_train, y_train), cfg.CLF.BATCH_SIZE, shuffle=True)\n test_iter = TorchIterator((x_test, y_test), cfg.CLF.BATCH_SIZE, shuffle=False)\n\n # define logistic classifier\n model = th.nn.Linear(x_train.size(1), args.n_classes).to(args.device)\n crit = th.nn.CrossEntropyLoss().to(args.device)\n\n # sample a learning rate and weight decay\n if trial is not None:\n lr_intv = cfg.CLF.LR_INTV\n wd_intv = cfg.CLF.WD_INTV\n args.lr = trial.suggest_loguniform(\"lr\", lr_intv[0], lr_intv[1])\n args.wd = trial.suggest_loguniform(\"wd\", wd_intv[0], wd_intv[1])\n optim = th.optim.SGD(\n model.parameters(), lr=args.lr, momentum=args.mom, weight_decay=args.wd\n )\n\n args.exp_dir = os.path.join(\n args.output_dir,\n \"{}-lr-{}_wd-{}\".format(\"val\" if args.val else \"final\", args.lr, args.wd),\n )\n os.makedirs(args.exp_dir, exist_ok=True)\n\n # write the model definition into exp_dir\n utils.write_to_file(str(model), os.path.join(args.exp_dir, \"model.txt\"))\n\n # logs computed during training / evaluation\n args.logs = {\n \"train/loss\": [],\n \"train/top1\": [],\n \"train/top5\": [],\n \"test/loss\": [],\n \"test/top1\": [],\n \"test/top5\": [],\n \"lr\": [],\n }\n\n # predictions over the evaluation sets\n args.preds = []\n\n for epoch in range(cfg.CLF.N_EPOCHS):\n if not args.val:\n logger.info(f\"**Epoch:{epoch}**\")\n args.epoch = epoch\n train_stat = train(train_iter, model, crit, optim, epoch, args)\n validate(test_iter, model, crit, args)\n adjust_learning_rate(optim, args, cfg)\n\n # if something went wrong during training\n # e.g. SGD diverged\n if train_stat == -1:\n break\n\n # save the logs\n utils.save_pickle(args.logs, f\"{args.exp_dir}/logs.pkl\")\n\n # save the predictions\n utils.save_pickle(args.preds, f\"{args.exp_dir}/preds.pkl\")\n\n # save the whole args, for ease of access\n utils.save_pickle(vars(args), f\"{args.exp_dir}/args.pkl\")\n\n # save also the final model\n th.save(\n {\n \"model\": model.state_dict(),\n },\n f\"{args.exp_dir}/model.pth\",\n )\n\n # return the last test accuracy\n return args.logs[\"test/top1\"][-1]\n\n\ndef train(train_loader, model, criterion, optimizer, epoch, args):\n \"\"\"\n Train the classifier for one epoch.\n \"\"\"\n batch_time = AverageMeter(\"Time\", \":6.3f\")\n losses = AverageMeter(\"Loss\", \":.4e\")\n top1 = AverageMeter(\"Acc@1\", \":6.2f\")\n top5 = AverageMeter(\"Acc@5\", \":6.2f\")\n progress = ProgressMeter(\n len(train_loader),\n [batch_time, losses, top1, top5],\n prefix=\"Epoch: [{}]\".format(epoch),\n )\n\n # switch to train mode\n model.train()\n\n end = time.time()\n for i, (fts, lbls) in enumerate(train_loader):\n fts = fts.to(args.device)\n lbls = lbls.to(args.device)\n\n # compute output\n output = model(fts)\n loss = criterion(output, lbls)\n\n if not th.isfinite(loss):\n logger.info(\"Loss ({}) is not finite, terminating\".format(loss.item()))\n optimizer.zero_grad()\n return -1\n\n # measure accuracy and record loss\n acc1, acc5 = metrics.accuracy(output, lbls, topk=(1, 5))\n losses.update(loss.item(), fts.size(0))\n top1.update(acc1.item(), fts.size(0))\n top5.update(acc5.item(), fts.size(0))\n\n # compute gradient and do SGD step\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n\n # measure elapsed time\n batch_time.update(time.time() - end)\n end = time.time()\n\n if (not args.val) and (i % args.print_freq == 0):\n progress.display(i)\n\n args.logs[\"train/loss\"].append(losses.avg)\n args.logs[\"train/top1\"].append(top1.avg)\n args.logs[\"train/top5\"].append(top5.avg)\n return 0\n\n\ndef validate(val_loader, model, criterion, args):\n losses = AverageMeter(\"Loss\", \":.4e\")\n top1 = AverageMeter(\"Acc@1\", \":6.2f\")\n top5 = AverageMeter(\"Acc@5\", \":6.2f\")\n\n # switch to evaluate mode\n model.eval()\n\n # keep predictions per class\n preds = th.ones(len(val_loader.tensors[0]), dtype=th.int32, device=args.device) * -1.\n six = 0\n\n with th.no_grad():\n for i, (fts, lbls) in enumerate(val_loader):\n fts = fts.to(args.device)\n lbls = lbls.to(args.device)\n bs = fts.size(0)\n\n # compute output\n output = model(fts)\n loss = criterion(output, lbls)\n\n # store the predicted classes\n preds[six:six + bs] = th.argmax(output, dim=1)\n six += bs\n\n # measure accuracy and record loss\n acc1, acc5 = metrics.accuracy(output, lbls, topk=(1, 5))\n losses.update(loss.item(), bs)\n top1.update(acc1[0].item(), bs)\n top5.update(acc5[0].item(), bs)\n\n # make sure that there is no invalid prediction\n assert th.all(preds >= 0).item()\n args.preds.append(preds.detach().cpu())\n\n args.logs[\"test/loss\"].append(losses.avg)\n args.logs[\"test/top1\"].append(top1.avg)\n args.logs[\"test/top5\"].append(top5.avg)\n\n if not args.val:\n logger.info(\n \" * Acc@1:{top1.avg:.3f} - Acc@5:{top5.avg:.3f}\".format(\n top1=top1, top5=top5\n )\n )\n\n\ndef adjust_learning_rate(optimizer, args, cfg):\n \"\"\"Decay the learning rate based on cosine schedule\"\"\"\n lr = args.lr\n lr *= 0.5 * (1.0 + math.cos(math.pi * args.epoch / cfg.CLF.N_EPOCHS))\n for param_group in optimizer.param_groups:\n param_group[\"lr\"] = lr\n args.logs[\"lr\"].append(lr)\n\n\ndef save_checkpoint(state, is_best, filename=\"checkpoint.pth.tar\"):\n th.save(state, filename)\n if is_best:\n shutil.copyfile(filename, \"model_best.pth.tar\")\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n parser.add_argument('--model', type=utils.none_or_string_flag,\n help='Name of the model in the <model_title>_<architecture_name> form.'\n 'See the table of models in ./prepare_models/README.md for all the model names we support.'\n 'This is an optional argument that needs to be set along with --models_root_dir and --dataset.'\n 'When these three arguments are set, the script will load features from:'\n '<models_root_dir>/<model_title>/<architecture_name>/<dataset>/features_*/X_Y.pth.'\n 'If you would like to load pre-extracted features from somewhere else'\n 'then ignore this argument and provide the --train_features_dir and --test_features_dir arguments accordingly')\n parser.add_argument('--models_root_dir', type=utils.none_or_string_flag,\n help='Root directory for all models, see prepare_models/README.md for a detailed explanation.'\n 'This is an optional argument that needs to be set along with --model and --dataset.'\n 'Please see the help message for the --model argument as well.')\n parser.add_argument(\"--dataset\", type=utils.none_or_string_flag,\n help=\"On which dataset to learn classifiers\"\n 'Possible values are (\"in1k\", \"cog_l1\", \"cog_l2\", \"cog_l3\", \"cog_l4\", \"cog_l5\")'\n 'This is an optional argument that needs to be set along with --models_root_dir and --model.'\n 'Please see the help message for the --model argument as well.')\n parser.add_argument('--train_features_dir', type=utils.none_or_string_flag,\n help='Path to the directory containing pre-extracted training set features.'\n 'We expect a features file \"X_Y.pth\" under <train_features_dir>.'\n 'This is an optional argument that needs to be set if --models_root_dir, --model and --dataset are not set.')\n parser.add_argument('--test_features_dir', type=utils.none_or_string_flag,\n help='Path to the directory containing pre-extracted test set features.'\n 'We expect a features file \"X_Y.pth\" under <test_features_dir>.'\n 'This is an optional argument that needs to be set if --models_root_dir, --model and --dataset are not set.')\n parser.add_argument('--output_dir', type=utils.none_or_string_flag,\n help='Where to log program logs.'\n 'This is an optional argument that needs to be set if --models_root_dir is not set.'\n 'If not provided, we try to save the logs under'\n '<models_root_dir>/<model_title>/<architecture_name>/<dataset>/eval_logreg/seed*')\n # learning rate and momentum are tuned in this program, do not manually set.\n parser.add_argument(\"--lr\", type=float, default=0.0, help=\"initial learning rate\")\n parser.add_argument(\"--wd\", type=float, default=0.0, help=\"weight decay\")\n parser.add_argument(\"--mom\", type=float, default=0.9, help=\"momentum\")\n # program-related options\n parser.add_argument(\"--print_freq\", default=100, type=int, help=\"print frequency (default: 10)\")\n parser.add_argument(\"--device\", type=str, default=\"cuda\")\n # optionally to overwrite the default config\n parser.add_argument(\"opts\", default=None,\n help=\"see configs/default.py for all options\",\n nargs=argparse.REMAINDER)\n args = parser.parse_args()\n\n if args.device == \"cuda\" and not th.cuda.is_available():\n print(\"CUDA is not available, I will run on CPU.\")\n args.device = \"cpu\"\n\n # load the config file\n # create output directory,\n # locate pre-extracted features,\n # initialize program logger,\n # save args and cfg\n # this function sets the following arg variables:\n # - train_features_path, type=str\n # - test_features_path, type=str\n # - output_dir, type=str\n args, cfg = utils.init_program(args, _for=\"logreg\")\n\n # tune hyper-parameters with optuna\n logger.info(\"Running Optuna...\")\n hps_sampler = optuna.samplers.TPESampler(multivariate=True, seed=cfg.EVAL.SEED)\n study = optuna.create_study(sampler=hps_sampler, direction=\"maximize\")\n\n args.val = True\n logreg = LogReg(args, cfg)\n study.optimize(logreg, n_trials=cfg.CLF.N_TRIALS, n_jobs=1, show_progress_bar=False)\n utils.save_pickle(study, os.path.join(args.output_dir, \"study.pkl\"))\n\n logger.info(\"\")\n logger.info(\"*\" * 50)\n logger.info(\"Hyper-parameter search ended\")\n logger.info(\"best_trial:\")\n logger.info(str(study.best_trial))\n logger.info(\"best_params:\")\n logger.info(str(study.best_params))\n logger.info(\"*\" * 50)\n logger.info(\"\")\n\n # train the final classifier with the tuned hyper-parameters\n del logreg\n th.cuda.empty_cache()\n args.lr = study.best_params[\"lr\"]\n args.wd = study.best_params[\"wd\"]\n args.val = False\n logreg = LogReg(args, cfg)\n logreg()\n",
"# ImageNet-CoG Benchmark\n# Copyright 2021-present NAVER Corp.\n# 3-Clause BSD License\n\nimport sys\nimport torch\nfrom pathlib import Path\n\n\ndef load_deit(init_args):\n \"\"\"\n Load pretrained Deit models.\n \"\"\"\n model_name = init_args[\"model_name\"]\n model_dir = init_args[\"model_dir\"]\n ckpt_file = init_args[\"ckpt_file\"]\n\n # the config files for the deit models are not defined in timm\n # therefore we also need models.py file provided in the deit repository\n # we must have downloaded it under the model directory\n sys.path.insert(\n 0,\n str(Path(model_dir).parent),\n )\n from src import models as deit_models\n\n from timm.models import create_model\n backbone = create_model(\n {\n \"deit_small\": \"deit_small_patch16_224\",\n \"deit_small_distilled\": \"deit_small_distilled_patch16_224\",\n \"deit_base_distilled_384\": \"deit_base_distilled_patch16_384\",\n }[model_name],\n pretrained=False,\n num_classes=1000,\n drop_rate=0.0,\n drop_path_rate=0.0,\n drop_block_rate=None,\n )\n state_dict = backbone.state_dict()\n\n # load model state dict\n ckpt = torch.load(ckpt_file, \"cpu\")\n checkpoint_model = ckpt['model']\n\n # remove from the checkpoint\n # the classification layer\n for key_to_remove in ['head.weight', 'head.bias', 'head_dist.weight', 'head_dist.bias']:\n if (key_to_remove in checkpoint_model) and \\\n (checkpoint_model[key_to_remove].shape != state_dict[key_to_remove].shape):\n del checkpoint_model[key_to_remove]\n\n ##################################################\n # To properly setup deit models, we use code from the official deit repo https://github.com/facebookresearch/deit.\n # Lines between 59 and 77 in this file are from https://github.com/facebookresearch/deit/blob/ab5715372db8c6cad5740714b2216d55aeae052e/main.py\n # Copyright (c) 2015-present, Facebook, Inc.\n # All rights reserved.\n\n # interpolate position embedding\n pos_embed_checkpoint = checkpoint_model['pos_embed']\n embedding_size = pos_embed_checkpoint.shape[-1]\n num_patches = backbone.patch_embed.num_patches\n num_extra_tokens = backbone.pos_embed.shape[-2] - num_patches\n # height (== width) for the checkpoint position embedding\n orig_size = int((pos_embed_checkpoint.shape[-2] - num_extra_tokens) ** 0.5)\n # height (== width) for the new position embedding\n new_size = int(num_patches ** 0.5)\n # class_token and dist_token are kept unchanged\n extra_tokens = pos_embed_checkpoint[:, :num_extra_tokens]\n # only the position tokens are interpolated\n pos_tokens = pos_embed_checkpoint[:, num_extra_tokens:]\n pos_tokens = pos_tokens.reshape(-1, orig_size, orig_size, embedding_size).permute(0, 3, 1, 2)\n pos_tokens = torch.nn.functional.interpolate(\n pos_tokens, size=(new_size, new_size), mode='bicubic', align_corners=False)\n pos_tokens = pos_tokens.permute(0, 2, 3, 1).flatten(1, 2)\n new_pos_embed = torch.cat((extra_tokens, pos_tokens), dim=1)\n checkpoint_model['pos_embed'] = new_pos_embed\n ##################################################\n\n backbone.load_state_dict(checkpoint_model, strict=False)\n\n # note that the backbone is a VisionTransformer\n # defined in timm.models\n # this backbone provides a function to extract features\n # from the 0-th token, which corresponds to the class token\n forward = backbone.forward_features\n # but the distilled version actually outputs two tensors (including distillation token)\n if \"distilled\" in model_name:\n def forward(x):\n return backbone.forward_features(x)[0]\n\n return backbone, forward\n"
] | [
[
"torch.all",
"torch.nn.CrossEntropyLoss",
"torch.argmax",
"torch.cuda.empty_cache",
"torch.isfinite",
"torch.unique",
"torch.no_grad",
"torch.cuda.is_available",
"torch.save"
],
[
"torch.cat",
"torch.nn.functional.interpolate",
"torch.load"
]
] |
RomainClaret/msc.ml.labs | [
"4e6b8e1c1ab841ab8ebbaee13f6ae43e9a1c44a5"
] | [
"lab4/predict_income_romain_claret_and_sylvain_robert-nicoud_lab4.py"
] | [
"#!/usr/bin/env python3\n# 12.04.21\n# Assignment lab 04\n\n# Master Class: Machine Learning (5MI2018)\n# Faculty of Economic Science\n# University of Neuchatel (Switzerland)\n# Lab 4, see ML21_Exercise_4.pdf for more information\n\n# https://github.com/RomainClaret/msc.ml.labs\n\n# Authors: \n# - Romain Claret @RomainClaret\n# - Sylvain Robert-Nicoud @Nic0uds\n\nimport warnings\nimport pickle\nimport pandas as pd\nfrom sklearn.preprocessing import LabelEncoder\nfrom sklearn.metrics import accuracy_score\n\nwarnings.filterwarnings(\"ignore\")\n\n\n# SPLITING ADULT.TEST FILE IN SUBFILES\n#spliting the adult.test file into several files to simulate weeks\n\nfilename = 'adult.test'\nfile_handler = open(filename, 'r').readlines()[1:]\nprefix_file = \"adult_2021_cw_\"\nweek_number = 1\nsplit_into = 10\nline_count = 0\nfile_length = len(file_handler)\n\nfor i in range(0,file_length):\n if i % ((file_length)//split_into) == 0 and i+((file_length//split_into)//2) < file_length:\n open(str(prefix_file)+str(week_number) + \".csv\", \"w+\").writelines(file_handler[i:i+(file_length//split_into)])\n week_number += 1\n\n\n# RUN PIPELINE MODEL FROM OTHER FILE\n#input file, and save the predictions into a different file.\n#Example:\n#Let's say you have the input data weekly in the file adult_2021_cw_12.csv.\n#This second script should read the input from this file and use the classifier to make predictions and write those predictions in the file adult_2021_cw_12_pred.csv .\n\n# load pipeline model\npipeline_model = pickle.load( open(\"grid_search_model.pickle\", \"rb\" ))\n\nweeks_count = 10\nfilename = 'adult.test'\nprefix_file = \"adult_2021_cw_\"\n\n# get the features names and the values of the categories from adult.names (build a dictionary)\ndata_dict = {}\nwith open('adult.names') as f:\n for l in f:\n if l[0] == '|' or ':' not in l: continue\n c = l.split(':')\n if c[1].startswith(' continuous'): data_dict[c[0]] = \"\"\n else: data_dict[c[0]] = c[1].replace(\"\\n\",\"\").replace(\".\",\"\").replace(\" \",\"\").split(\",\")\n \nheader = list(data_dict.keys())+['income']\n\n# for each week based on a count and a naming convention\nfor i in range (weeks_count):\n filename = str(prefix_file)+str(i+1)+\".csv\"\n df_weekly = pd.read_table(filename, sep=r',\\s', na_values='?', skiprows=[0], header=None, names=header).dropna()\n \n drop_list = [\"education\", \"occupation\", \"relationship\"]\n df_weekly = df_weekly.drop(columns=drop_list)\n \n dict_replace = {\n 'marital-status' : {\n 'Never-married': 'Not-Married',\n 'Married-civ-spouse': 'Married',\n 'Divorced': 'Not-Married',\n 'Married-spouse-absent': 'Married',\n 'Separated': 'Married',\n 'Married-AF-spouse': 'Married',\n 'Widowed': 'Not-Married'\n },\n 'workclass': {\n 'State-gov': 'Government',\n 'Self-emp-not-inc': 'Self-Employment',\n 'Federal-gov': 'Government',\n 'Local-gov': 'Government',\n 'Self-emp-inc': 'Self-Employment'\n }\n }\n\n df_weekly.replace(dict_replace, inplace=True)\n \n df_weekly[\"income\"].replace({\"<=50K.\": \"<=50K\", \">50K.\": \">50K\"}, inplace=True)\n \n for l in [\"marital-status\", \"sex\", \"income\"]:\n l_enc = LabelEncoder()\n encoder_weekly = l_enc.fit(df_weekly[l])\n df_weekly[\"encoded_\"+l] = encoder_weekly.transform(df_weekly[l])\n \n y_hat_dtree_weekly = pipeline_model.predict(df_weekly)\n \n pref_filename = str(prefix_file)+str(i+1)+\"_pred.csv\"\n print(pref_filename, \"accuracy_score:\",accuracy_score(df_weekly[\"encoded_income\"],y_hat_dtree_weekly),\"\\n\")\n \n # save the prediction into file\n pd.DataFrame(y_hat_dtree_weekly).to_csv(str(pref_filename),header=[\"pred_income\"], index=None)\n \n # lab 03 results:\n # adult_2021_cw_1.csv accuracy_score: 0.8293736501079914 \n # adult_2021_cw_2.csv accuracy_score: 0.8503253796095445 \n # adult_2021_cw_3.csv accuracy_score: 0.8427807486631016 \n # adult_2021_cw_4.csv accuracy_score: 0.8307860262008734 \n # adult_2021_cw_5.csv accuracy_score: 0.8507462686567164 \n # adult_2021_cw_6.csv accuracy_score: 0.854978354978355 \n # adult_2021_cw_7.csv accuracy_score: 0.8545454545454545 \n # adult_2021_cw_8.csv accuracy_score: 0.8514531754574811 \n # adult_2021_cw_9.csv accuracy_score: 0.8296943231441049 \n # adult_2021_cw_10.csv accuracy_score: 0.8574537540805223 "
] | [
[
"pandas.read_table",
"sklearn.preprocessing.LabelEncoder",
"pandas.DataFrame",
"sklearn.metrics.accuracy_score"
]
] |
YunYang1994/CodeFun | [
"36fcdbfb4ed55fbb8f8dbc6f900842cc7bb9f068"
] | [
"detect_image.py"
] | [
"#! /usr/bin/env python\n# coding=utf-8\n#================================================================\n# Copyright (C) 2020 * Ltd. All rights reserved.\n#\n# Editor : VIM\n# File name : detect_image.py\n# Author : YunYang1994\n# Created date: 2020-03-19 14:05:53\n# Description :\n#\n#================================================================\n\n\nimport os\nimport cv2\nimport time\nimport numpy as np\nimport tensorflow as tf\n\nfrom PIL import Image, ImageFont, ImageDraw\nfrom mtcnn import pnet, rnet, onet\nfrom models import IResnet\nfrom utils import detect_face, align_face, recognize_face\n\nmodel = IResnet(tflite_model=\"IResnet.tflite\")\nfont = ImageFont.truetype('weghts/HuaWenXinWei-1.ttf', 30)\nimage = cv2.imread(\"/Users/yangyun/多人照片/5.jpg\")\n\nimage_h, image_w, _ = image.shape\n\norg_image = image.copy()\nimage = cv2.cvtColor(image ,cv2.COLOR_BGR2RGB)\ntotal_boxes, points = detect_face(image, 20, pnet, rnet, onet, [0.6, 0.7, 0.9], 0.709)\n\nfor idx, (bounding_box, keypoints) in enumerate(zip(total_boxes, points.T)):\n bounding_boxes = {\n 'box': [int(bounding_box[0]), int(bounding_box[1]),\n int(bounding_box[2]-bounding_box[0]), int(bounding_box[3]-bounding_box[1])],\n 'confidence': bounding_box[-1],\n 'keypoints': {\n 'left_eye': (int(keypoints[0]), int(keypoints[5])),\n 'right_eye': (int(keypoints[1]), int(keypoints[6])),\n 'nose': (int(keypoints[2]), int(keypoints[7])),\n 'mouth_left': (int(keypoints[3]), int(keypoints[8])),\n 'mouth_right': (int(keypoints[4]), int(keypoints[9])),\n }\n }\n\n bounding_box = bounding_boxes['box']\n keypoints = bounding_boxes['keypoints']\n\n cv2.circle(org_image,(keypoints['left_eye']), 2, (255,0,0), 3)\n cv2.circle(org_image,(keypoints['right_eye']), 2, (255,0,0), 3)\n cv2.circle(org_image,(keypoints['nose']), 2, (255,0,0), 3)\n cv2.circle(org_image,(keypoints['mouth_left']), 2, (255,0,0), 3)\n cv2.circle(org_image,(keypoints['mouth_right']),2, (255,0,0), 3)\n cv2.rectangle(org_image,\n (bounding_box[0], bounding_box[1]),\n (bounding_box[0]+bounding_box[2], bounding_box[1] + bounding_box[3]),\n (0,255,0), 2)\n # align face and extract it out\n align_image = align_face(image, keypoints)\n\n marigin = 16\n xmin = max(bounding_box[0] - marigin, 0)\n ymin = max(bounding_box[1] - marigin, 0)\n xmax = min(bounding_box[0] + bounding_box[2] + marigin, image_w)\n ymax = min(bounding_box[1] + bounding_box[3] + marigin, image_h)\n\n crop_image = align_image[ymin:ymax, xmin:xmax, :]\n if crop_image is not None:\n t1 = time.time()\n embedding = model(crop_image)\n person = recognize_face(embedding)\n\n org_image_pil = Image.fromarray(org_image)\n draw = ImageDraw.Draw(org_image_pil)\n text_size = draw.textsize(person, font)\n draw.text((bounding_box[0], bounding_box[1]-16), person, fill=(0, 0, 255), font=font)\n org_image = np.array(org_image_pil)\n\n t2 = time.time()\n print(\"time: %.2fms\" %((t2-t1)*1000))\n\norg_image = cv2.cvtColor(org_image, cv2.COLOR_BGR2RGB)\nimage = Image.fromarray(org_image)\nimage.show()\n# image.save(\"test.png\")\n"
] | [
[
"numpy.array"
]
] |
lRomul/argus-bengali-ai | [
"e64374230f5390a17305769126ff4bfc9a2a8644"
] | [
"src/draw.py"
] | [
"import time\nimport random\nimport numpy as np\nfrom pathlib import Path\nfrom PIL import Image, ImageDraw, ImageFont, ImageFilter\n\nimport torch\nfrom torch.utils.data import Dataset\n\nfrom src import config\n\n\ndef draw_grapheme(grapheme, font_path, size=(137, 236)):\n height, width = size\n image = Image.new('RGB', (width, height))\n draw = ImageDraw.Draw(image)\n font_size = np.random.randint(70, 110)\n font = ImageFont.truetype(str(font_path), font_size)\n w, h = draw.textsize(grapheme, font=font)\n width_ratio = np.random.uniform(1.5, 2.5)\n height_ratio = np.random.uniform(2.5, 3.5)\n fill = np.random.randint(200, 255)\n draw.text(((width - w) / width_ratio, (height - h) / height_ratio),\n grapheme, font=font, fill=fill)\n image = image.filter(ImageFilter.BLUR)\n return np.array(image)[:, :, 0]\n\n\ndef get_draw_data():\n graphemes = []\n for grapheme_root_idx, grapheme_root in config.class_map['grapheme_root'].items():\n for vowel_diacritic_idx, vowel_diacritic in config.class_map['vowel_diacritic'].items():\n for consonant_diacritic_idx, consonant_diacritic in config.class_map['consonant_diacritic'].items():\n consonant_diacritic, grapheme_root, vowel_diacritic = [c if c != '0' else '' for c in\n [consonant_diacritic, grapheme_root,\n vowel_diacritic]]\n\n grapheme = consonant_diacritic + grapheme_root + vowel_diacritic\n graphemes.append({\n 'grapheme': grapheme,\n 'grapheme_root': grapheme_root_idx,\n 'vowel_diacritic': vowel_diacritic_idx,\n 'consonant_diacritic': consonant_diacritic_idx\n })\n return graphemes\n\n\nclass BengaliDrawDataset(Dataset):\n def __init__(self,\n fonts_dir,\n transform=None,\n mixer=None):\n self.fonts_dir = fonts_dir\n self.transform = transform\n self.mixer = mixer\n self.data = get_draw_data()\n self.font_paths = sorted(Path(fonts_dir).glob('*.ttf'))\n\n def __len__(self):\n return len(self.data)\n\n def get_sample(self, idx):\n sample = self.data[idx]\n\n font_path = np.random.choice(self.font_paths)\n image = draw_grapheme(sample['grapheme'], font_path,\n size=config.raw_image_shape)\n\n grapheme = torch.tensor(sample['grapheme_root'], dtype=torch.int64)\n vowel = torch.tensor(sample['vowel_diacritic'], dtype=torch.int64)\n consonant = torch.tensor(sample['consonant_diacritic'], dtype=torch.int64)\n target = grapheme, vowel, consonant\n\n return image, target\n\n def _set_random_seed(self, idx):\n seed = int(time.time() * 1000.0) + idx\n random.seed(seed)\n np.random.seed(seed % (2**32 - 1))\n\n @torch.no_grad()\n def __getitem__(self, idx):\n self._set_random_seed(idx)\n\n image, target = self.get_sample(idx)\n if self.mixer is not None:\n image, target = self.mixer(self, image, target)\n if self.transform is not None:\n image = self.transform(image)\n return image, target\n"
] | [
[
"numpy.random.seed",
"numpy.random.choice",
"torch.tensor",
"torch.no_grad",
"numpy.random.uniform",
"numpy.array",
"numpy.random.randint"
]
] |
dd-dos/Emotion-detection | [
"23eb94cbceb70890cf6b0f63e84d80eae7336c85"
] | [
"src/dataset_prepare.py"
] | [
"import numpy as np\nimport pandas as pd \nfrom PIL import Image\nfrom tqdm import tqdm\nimport os\n\n# convert string to integer\ndef atoi(s):\n n = 0\n for i in s:\n n = n*10 + ord(i) - ord(\"0\")\n return n\n\n# making folders\nouter_names = ['test','train']\ninner_names = ['angry', 'disgusted', 'fearful', 'happy', 'sad', 'surprised', 'neutral']\nos.makedirs('data', exist_ok=True)\nfor outer_name in outer_names:\n os.makedirs(os.path.join('data',outer_name), exist_ok=True)\n for inner_name in inner_names:\n os.makedirs(os.path.join('data',outer_name,inner_name), exist_ok=True)\n\n# to keep count of each category\nangry = 0\ndisgusted = 0\nfearful = 0\nhappy = 0\nsad = 0\nsurprised = 0\nneutral = 0\nangry_test = 0\ndisgusted_test = 0\nfearful_test = 0\nhappy_test = 0\nsad_test = 0\nsurprised_test = 0\nneutral_test = 0\n\ndf = pd.read_csv('./fer2013.csv')\nmat = np.zeros((48,48),dtype=np.uint8)\nprint(\"Saving images...\")\n\n# read the csv file line by line\nfor i in tqdm(range(len(df))):\n txt = df['pixels'][i]\n words = txt.split()\n \n # the image size is 48x48\n for j in range(2304):\n xind = j // 48\n yind = j % 48\n mat[xind][yind] = atoi(words[j])\n\n img = Image.fromarray(mat)\n # train\n if i < 28709:\n if df['emotion'][i] == 0:\n img.save('./data/train/angry/im'+str(angry)+'.png')\n angry += 1\n elif df['emotion'][i] == 1:\n img.save('./data/train/disgusted/im'+str(disgusted)+'.png')\n disgusted += 1\n elif df['emotion'][i] == 2:\n img.save('./data/train/fearful/im'+str(fearful)+'.png')\n fearful += 1\n elif df['emotion'][i] == 3:\n img.save('./data/train/happy/im'+str(happy)+'.png')\n happy += 1\n elif df['emotion'][i] == 4:\n img.save('./data/train/sad/im'+str(sad)+'.png')\n sad += 1\n elif df['emotion'][i] == 5:\n img.save('./data/train/surprised/im'+str(surprised)+'.png')\n surprised += 1\n elif df['emotion'][i] == 6:\n img.save('./data/train/neutral/im'+str(neutral)+'.png')\n neutral += 1\n\n # test\n else:\n if df['emotion'][i] == 0:\n img.save('./data/test/angry/im'+str(angry_test)+'.png')\n angry_test += 1\n elif df['emotion'][i] == 1:\n img.save('./data/test/disgusted/im'+str(disgusted_test)+'.png')\n disgusted_test += 1\n elif df['emotion'][i] == 2:\n img.save('./data/test/fearful/im'+str(fearful_test)+'.png')\n fearful_test += 1\n elif df['emotion'][i] == 3:\n img.save('./data/test/happy/im'+str(happy_test)+'.png')\n happy_test += 1\n elif df['emotion'][i] == 4:\n img.save('./data/test/sad/im'+str(sad_test)+'.png')\n sad_test += 1\n elif df['emotion'][i] == 5:\n img.save('./data/test/surprised/im'+str(surprised_test)+'.png')\n surprised_test += 1\n elif df['emotion'][i] == 6:\n img.save('./data/test/neutral/im'+str(neutral_test)+'.png')\n neutral_test += 1\n\nprint(\"Done!\")"
] | [
[
"pandas.read_csv",
"numpy.zeros"
]
] |
tzachar/addons | [
"e352207da32e4670a36a295ea477c476118cb0d9"
] | [
"tensorflow_addons/layers/normalizations.py"
] | [
"# Copyright 2019 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# Orginal implementation from keras_contrib/layer/normalization\n# =============================================================================\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport logging\nimport tensorflow as tf\n\n\[email protected]_keras_serializable(package='Addons')\nclass GroupNormalization(tf.keras.layers.Layer):\n \"\"\"Group normalization layer.\n\n Group Normalization divides the channels into groups and computes\n within each group the mean and variance for normalization.\n Empirically, its accuracy is more stable than batch norm in a wide\n range of small batch sizes, if learning rate is adjusted linearly\n with batch sizes.\n\n Relation to Layer Normalization:\n If the number of groups is set to 1, then this operation becomes identical\n to Layer Normalization.\n\n Relation to Instance Normalization:\n If the number of groups is set to the\n input dimension (number of groups is equal\n to number of channels), then this operation becomes\n identical to Instance Normalization.\n\n Arguments\n groups: Integer, the number of groups for Group Normalization.\n Can be in the range [1, N] where N is the input dimension.\n The input dimension must be divisible by the number of groups.\n axis: Integer, the axis that should be normalized.\n epsilon: Small float added to variance to avoid dividing by zero.\n center: If True, add offset of `beta` to normalized tensor.\n If False, `beta` is ignored.\n scale: If True, multiply by `gamma`.\n If False, `gamma` is not used.\n beta_initializer: Initializer for the beta weight.\n gamma_initializer: Initializer for the gamma weight.\n beta_regularizer: Optional regularizer for the beta weight.\n gamma_regularizer: Optional regularizer for the gamma weight.\n beta_constraint: Optional constraint for the beta weight.\n gamma_constraint: Optional constraint for the gamma weight.\n\n Input shape\n Arbitrary. Use the keyword argument `input_shape`\n (tuple of integers, does not include the samples axis)\n when using this layer as the first layer in a model.\n\n Output shape\n Same shape as input.\n References\n - [Group Normalization](https://arxiv.org/abs/1803.08494)\n \"\"\"\n\n def __init__(self,\n groups=2,\n axis=-1,\n epsilon=1e-3,\n center=True,\n scale=True,\n beta_initializer='zeros',\n gamma_initializer='ones',\n beta_regularizer=None,\n gamma_regularizer=None,\n beta_constraint=None,\n gamma_constraint=None,\n **kwargs):\n super(GroupNormalization, self).__init__(**kwargs)\n self.supports_masking = True\n self.groups = groups\n self.axis = axis\n self.epsilon = epsilon\n self.center = center\n self.scale = scale\n self.beta_initializer = tf.keras.initializers.get(beta_initializer)\n self.gamma_initializer = tf.keras.initializers.get(gamma_initializer)\n self.beta_regularizer = tf.keras.regularizers.get(beta_regularizer)\n self.gamma_regularizer = tf.keras.regularizers.get(gamma_regularizer)\n self.beta_constraint = tf.keras.constraints.get(beta_constraint)\n self.gamma_constraint = tf.keras.constraints.get(gamma_constraint)\n self._check_axis()\n\n def build(self, input_shape):\n\n self._check_if_input_shape_is_none(input_shape)\n self._set_number_of_groups_for_instance_norm(input_shape)\n self._check_size_of_dimensions(input_shape)\n self._create_input_spec(input_shape)\n\n self._add_gamma_weight(input_shape)\n self._add_beta_weight(input_shape)\n self.built = True\n super(GroupNormalization, self).build(input_shape)\n\n def call(self, inputs):\n\n input_shape = tf.keras.backend.int_shape(inputs)\n tensor_input_shape = tf.shape(inputs)\n\n reshaped_inputs, group_shape = self._reshape_into_groups(\n inputs, input_shape, tensor_input_shape)\n\n normalized_inputs = self._apply_normalization(reshaped_inputs,\n input_shape)\n\n outputs = tf.reshape(normalized_inputs, tensor_input_shape)\n\n return outputs\n\n def get_config(self):\n config = {\n 'groups':\n self.groups,\n 'axis':\n self.axis,\n 'epsilon':\n self.epsilon,\n 'center':\n self.center,\n 'scale':\n self.scale,\n 'beta_initializer':\n tf.keras.initializers.serialize(self.beta_initializer),\n 'gamma_initializer':\n tf.keras.initializers.serialize(self.gamma_initializer),\n 'beta_regularizer':\n tf.keras.regularizers.serialize(self.beta_regularizer),\n 'gamma_regularizer':\n tf.keras.regularizers.serialize(self.gamma_regularizer),\n 'beta_constraint':\n tf.keras.constraints.serialize(self.beta_constraint),\n 'gamma_constraint':\n tf.keras.constraints.serialize(self.gamma_constraint)\n }\n base_config = super(GroupNormalization, self).get_config()\n return dict(list(base_config.items()) + list(config.items()))\n\n def compute_output_shape(self, input_shape):\n return input_shape\n\n def _reshape_into_groups(self, inputs, input_shape, tensor_input_shape):\n\n group_shape = [tensor_input_shape[i] for i in range(len(input_shape))]\n group_shape[self.axis] = input_shape[self.axis] // self.groups\n group_shape.insert(1, self.groups)\n group_shape = tf.stack(group_shape)\n reshaped_inputs = tf.reshape(inputs, group_shape)\n return reshaped_inputs, group_shape\n\n def _apply_normalization(self, reshaped_inputs, input_shape):\n\n group_shape = tf.keras.backend.int_shape(reshaped_inputs)\n group_reduction_axes = list(range(len(group_shape)))\n # Remember the ordering of the tensor is [batch, group , steps]. Jump\n # the first 2 to calculate the variance and the mean\n mean, variance = tf.nn.moments(\n reshaped_inputs, group_reduction_axes[2:], keepdims=True)\n\n gamma, beta = self._get_reshaped_weights(input_shape)\n normalized_inputs = tf.nn.batch_normalization(\n reshaped_inputs,\n mean=mean,\n variance=variance,\n scale=gamma,\n offset=beta,\n variance_epsilon=self.epsilon)\n return normalized_inputs\n\n def _get_reshaped_weights(self, input_shape):\n broadcast_shape = self._create_broadcast_shape(input_shape)\n gamma = None\n beta = None\n if self.scale:\n gamma = tf.reshape(self.gamma, broadcast_shape)\n\n if self.center:\n beta = tf.reshape(self.beta, broadcast_shape)\n return gamma, beta\n\n def _check_if_input_shape_is_none(self, input_shape):\n dim = input_shape[self.axis]\n if dim is None:\n raise ValueError('Axis ' + str(self.axis) + ' of '\n 'input tensor should have a defined dimension '\n 'but the layer received an input with shape ' +\n str(input_shape) + '.')\n\n def _set_number_of_groups_for_instance_norm(self, input_shape):\n dim = input_shape[self.axis]\n\n if self.groups == -1:\n self.groups = dim\n\n def _check_size_of_dimensions(self, input_shape):\n\n dim = input_shape[self.axis]\n if dim < self.groups:\n raise ValueError(\n 'Number of groups (' + str(self.groups) + ') cannot be '\n 'more than the number of channels (' + str(dim) + ').')\n\n if dim % self.groups != 0:\n raise ValueError(\n 'Number of groups (' + str(self.groups) + ') must be a '\n 'multiple of the number of channels (' + str(dim) + ').')\n\n def _check_axis(self):\n\n if self.axis == 0:\n raise ValueError(\n \"You are trying to normalize your batch axis. Do you want to \"\n \"use tf.layer.batch_normalization instead\")\n\n def _create_input_spec(self, input_shape):\n\n dim = input_shape[self.axis]\n self.input_spec = tf.keras.layers.InputSpec(\n ndim=len(input_shape), axes={self.axis: dim})\n\n def _add_gamma_weight(self, input_shape):\n\n dim = input_shape[self.axis]\n shape = (dim,)\n\n if self.scale:\n self.gamma = self.add_weight(\n shape=shape,\n name='gamma',\n initializer=self.gamma_initializer,\n regularizer=self.gamma_regularizer,\n constraint=self.gamma_constraint)\n else:\n self.gamma = None\n\n def _add_beta_weight(self, input_shape):\n\n dim = input_shape[self.axis]\n shape = (dim,)\n\n if self.center:\n self.beta = self.add_weight(\n shape=shape,\n name='beta',\n initializer=self.beta_initializer,\n regularizer=self.beta_regularizer,\n constraint=self.beta_constraint)\n else:\n self.beta = None\n\n def _create_broadcast_shape(self, input_shape):\n broadcast_shape = [1] * len(input_shape)\n broadcast_shape[self.axis] = input_shape[self.axis] // self.groups\n broadcast_shape.insert(1, self.groups)\n return broadcast_shape\n\n\[email protected]_keras_serializable(package='Addons')\nclass InstanceNormalization(GroupNormalization):\n \"\"\"Instance normalization layer.\n\n Instance Normalization is an specific case of ```GroupNormalization```since\n it normalizes all features of one channel. The Groupsize is equal to the\n channel size. Empirically, its accuracy is more stable than batch norm in a\n wide range of small batch sizes, if learning rate is adjusted linearly\n with batch sizes.\n\n Arguments\n axis: Integer, the axis that should be normalized.\n epsilon: Small float added to variance to avoid dividing by zero.\n center: If True, add offset of `beta` to normalized tensor.\n If False, `beta` is ignored.\n scale: If True, multiply by `gamma`.\n If False, `gamma` is not used.\n beta_initializer: Initializer for the beta weight.\n gamma_initializer: Initializer for the gamma weight.\n beta_regularizer: Optional regularizer for the beta weight.\n gamma_regularizer: Optional regularizer for the gamma weight.\n beta_constraint: Optional constraint for the beta weight.\n gamma_constraint: Optional constraint for the gamma weight.\n\n Input shape\n Arbitrary. Use the keyword argument `input_shape`\n (tuple of integers, does not include the samples axis)\n when using this layer as the first layer in a model.\n\n Output shape\n Same shape as input.\n\n References\n - [Instance Normalization: The Missing Ingredient for Fast Stylization]\n (https://arxiv.org/abs/1607.08022)\n \"\"\"\n\n def __init__(self, **kwargs):\n if \"groups\" in kwargs:\n logging.warning(\"The given value for groups will be overwritten.\")\n\n kwargs[\"groups\"] = -1\n super(InstanceNormalization, self).__init__(**kwargs)\n"
] | [
[
"tensorflow.nn.batch_normalization",
"tensorflow.keras.constraints.get",
"tensorflow.shape",
"tensorflow.keras.constraints.serialize",
"tensorflow.keras.backend.int_shape",
"tensorflow.stack",
"tensorflow.keras.regularizers.get",
"tensorflow.reshape",
"tensorflow.keras.utils.register_keras_serializable",
"tensorflow.nn.moments",
"tensorflow.keras.initializers.serialize",
"tensorflow.keras.regularizers.serialize",
"tensorflow.keras.initializers.get"
]
] |
mutazag/mdsi | [
"efecc8f650ddf6866154389f98d4ce0a9803db18"
] | [
"misc/learnpy/k-means/loadiris.py"
] | [
"import pandas as pd\nfrom sklearn import datasets\n\n\n# load iris data set\niris = datasets.load_iris()\nprint(iris)\n\nspecies = [iris.target_names[x] for x in iris.target]\n\niris = pd.DataFrame(iris['data'], columns = ['Sepal_Length', 'Sepal_Width', 'Petal_Length', 'Petal_Width']) \niris['Species'] = species\n\n\niris.head()\niris.dtypes\n\n\n# quick count\niris['count'] = 1\niris[['Species', 'count']].groupby('Species').count()\niris.groupby('Species').count()\n\n\n\n# plot the data set \n# %matplotlib inline\ndef plot_iris(iris, col1, col2):\n print(\"plot_iris\")\n import seaborn as sns\n import matplotlib.pyplot as plt\n sns.lmplot(x = col1, y=col2, \n data = iris, \n hue = \"Species\", \n fit_reg=False)\n plt.xlabel(col1)\n plt.ylabel(col2)\n plt.title('Iris species show by color')\n plt.show() \n\nplot_iris(iris, 'Petal_Width', 'Sepal_Length') \n\nplot_iris(iris, 'Sepal_Width', 'Sepal_Length')\n\n\n# preparing numeric featurs by scaling\n\nfrom sklearn.preprocessing import scale\n\nimport pandas as pd \n\nnum_cols = ['Sepal_Length', 'Sepal_Width', 'Petal_Length', 'Petal_Width']\niris_scaled = scale(iris[num_cols])\niris_scaled = pd.DataFrame(iris_scaled, columns = num_cols)\nprint(iris_scaled.describe().round(3))\n\n# coding string col 'species' as numeric using a dictionary \nlevels = {'setosa':0, \n 'versicolor':1, \n 'virginica':2}\n\n# add coded species to the new scaled iris data frame \niris_scaled['Species'] = [levels[x] for x in iris['Species']]\niris_scaled.head()\nplot_iris(iris_scaled, 'Sepal_Width', 'Sepal_Length')\n\n\n\n## split the data into training and tes using Bernoulli sampling \n\nfrom sklearn.model_selection import train_test_split\nimport numpy as np\n\nnp.random.seed(3456) \niris_split = train_test_split(np.asmatrix(iris_scaled), test_size = 75)\n\niris_train_features = iris_split[0][:,:4]\niris_train_labels = np.ravel(iris_split[0][:,4])\n\niris_test_features = iris_split[1][:,:4]\niris_test_labels = np.ravel(iris_split[1][:,4])\n\nprint(iris_train_features.shape)\nprint(iris_train_labels.shape)\n\nprint(iris_test_features.shape)\nprint(iris_test_labels.shape)\n\n# Train and Eval KNN model \n\n#fit model \nfrom sklearn.neighbors import KNeighborsClassifier\nKNN_mod = KNeighborsClassifier(n_neighbors=3) # this is K \nKNN_mod.fit(iris_train_features, iris_train_labels)\n\n#test model on test data set\niris_test = pd.DataFrame(iris_test_features, columns = num_cols)\niris_test['predicted'] = KNN_mod.predict(iris_test_features)\niris_test['actuals'] = iris_test_labels\niris_test['correct'] = [1 if x == z else 0 for x, z in zip(iris_test['predicted'], iris_test_labels)]\n\n# calculate some accuracy measure \naccuracy = 100 * float(sum(iris_test['correct'])) / float(iris_test.shape[0])\nprint(accuracy)\n\niris_test[iris_test.correct != 1]\niris_test.loc[iris_test[\"correct\"] != 1]\n\n\n\n\n# plotting the predicted values and highliting incorrectly classified observations \n\nlevels = {0:'setosa', 1:'versicolor', 2:'virginica'}\niris_test['Species'] = [levels[x] for x in iris_test['predicted']]\nmarkers = {1:'^', 0:'o'}\ncolors = {'setosa':'blue', 'versicolor':'green', 'virginica':'red'}\ndef plot_shapes(df, col1,col2, markers, colors):\n import matplotlib.pyplot as plt\n import seaborn as sns\n ax = plt.figure(figsize=(6, 6)).gca() # define plot axis\n for m in markers: # iterate over marker dictioary keys\n for c in colors: # iterate over color dictionary keys\n df_temp = df[(df['correct'] == m) & (df['Species'] == c)]\n sns.regplot(x = col1, y = col2, \n data = df_temp, \n fit_reg = False, \n scatter_kws={'color': colors[c]},\n marker = markers[m],\n ax = ax)\n plt.xlabel(col1)\n plt.ylabel(col2)\n plt.title('Iris species by color')\n return 'Done'\nplot_shapes(iris_test, 'Petal_Width', 'Sepal_Length', markers, colors)\nplot_shapes(iris_test, 'Sepal_Width', 'Sepal_Length', markers, colors)"
] | [
[
"numpy.random.seed",
"matplotlib.pyplot.title",
"matplotlib.pyplot.figure",
"sklearn.datasets.load_iris",
"pandas.DataFrame",
"sklearn.neighbors.KNeighborsClassifier",
"numpy.asmatrix",
"matplotlib.pyplot.xlabel",
"numpy.ravel",
"sklearn.preprocessing.scale",
"matplotlib.pyplot.show",
"matplotlib.pyplot.ylabel"
]
] |
urialon/bottleneck | [
"481fbb95edc6ae711da40b6305b40c12ce6a6d29"
] | [
"run-gat-2-8.py"
] | [
"import main\nfrom common import Task, STOP, GNN_TYPE\nfrom attrdict import AttrDict\nfrom experiment import Experiment\nimport torch\n\noverride_params = {\n 2: {'batch_size': 64, 'eval_every': 1000},\n 3: {'batch_size': 64},\n 4: {'batch_size': 1024},\n 5: {'batch_size': 1024},\n 6: {'batch_size': 1024},\n 7: {'batch_size': 2048},\n 8: {'batch_size': 1024, 'accum_grad': 2}, # effective batch size of 2048, with less GPU memory\n}\n\n\nclass Results:\n def __init__(self, train_acc, test_acc, epoch):\n self.train_acc = train_acc\n self.test_acc = test_acc\n self.epoch = epoch\n\n\nif __name__ == '__main__':\n\n task = Task.DICTIONARY\n gnn_type = GNN_TYPE.GAT\n stopping_criterion = STOP.TRAIN\n min_depth = 2\n max_depth = 8\n\n results_all_depths = {}\n for depth in range(min_depth, max_depth + 1):\n num_layers = depth + 1\n args = main.get_fake_args(task=task, depth=depth, num_layers=num_layers, loader_workers=7,\n type=gnn_type, stop=stopping_criterion,\n no_activation=True, no_residual=False)\n if depth in override_params:\n for key, value in AttrDict(override_params[depth]).items():\n args[key] = value\n train_acc, test_acc, epoch = Experiment(args).run()\n torch.cuda.empty_cache()\n results_all_depths[depth] = Results(train_acc=train_acc, test_acc=test_acc, epoch=epoch)\n print()\n\n print(f'Task: {task}')\n print('depth, train_acc, test_acc, epoch, train_acc, test_acc, epoch,')\n for depth in range(min_depth, max_depth + 1):\n res = results_all_depths[depth]\n print(f'{depth}, {res.train_acc}, {res.test_acc}, {res.epoch}')\n"
] | [
[
"torch.cuda.empty_cache"
]
] |
savinshynu/turbo_seti | [
"7d756f130af5a323403affcdcb9f9bfa62325836"
] | [
"test/fb_cases_util.py"
] | [
"r'''\nUtility functions for test_fb_cases.py\n'''\n\nfrom os import mkdir, remove\nfrom os.path import dirname\nfrom shutil import rmtree\nimport logging\nimport pandas as pd\nimport numpy as np\nimport setigen as stg\nfrom turbo_seti.find_doppler.find_doppler import FindDoppler\nfrom fb_cases_def import HERE, DEBUGGING, RTOL_DIFF, TestResultRecord, SetigenParms\n\nDF_REFERENCE = HERE + '/fb_dat_reference.txt'\nSEP = r'\\s+'\n\n\ndef initialize(arg_dir):\n r'''\n Recreate working directory, TESTDIR.\n Load result reference tables (2).\n '''\n rmtree(arg_dir, ignore_errors=True)\n mkdir(arg_dir)\n df = pd.read_csv(DF_REFERENCE, sep=SEP, engine='python', comment='#')\n nrows = len(df)\n if nrows < 1:\n raise ValueError('initialize: Empty reference table')\n if nrows % 2 != 0:\n raise ValueError('initialize: Reference table row count ({}) is not divisible by 2'\n .format(nrows))\n if DEBUGGING:\n print('initialize: Test case reference results: \\n', df)\n ref_tophit_1 = []\n ref_tophit_2 = []\n jj = 0\n while jj < nrows:\n record = TestResultRecord()\n record.fdir = int(df['fdir'][jj])\n record.drsign = int(df['drsign'][jj])\n record.tophit_id = int(df['tophit'][jj])\n record.drate = float(df['drate'][jj])\n record.snr = float(df['snr'][jj])\n record.freq = float(df['freq'][jj])\n record.index = int(df['index'][jj])\n ref_tophit_1.append(record)\n if DEBUGGING:\n print('initialize: appended for hit_1:\\n', record.to_string() )\n jj += 1\n del record\n record = TestResultRecord()\n record.fdir = int(df['fdir'][jj])\n record.drsign = int(df['drsign'][jj])\n record.tophit_id = int(df['tophit'][jj])\n record.drate = float(df['drate'][jj])\n record.snr = float(df['snr'][jj])\n record.freq = float(df['freq'][jj])\n record.index = int(df['index'][jj])\n ref_tophit_2.append(record)\n if DEBUGGING:\n print('initialize: appended for hit_2:\\n', record.to_string() )\n jj += 1\n if DEBUGGING:\n print('initialize: {} test cases loaded.'.format(len(ref_tophit_1)))\n return ref_tophit_1, ref_tophit_2\n\n\ndef generate_fil_file(outpath, flag_fascending, flag_sign_drift_rate):\n r'''\n Using setigen, generate a filterbank file.\n\n Parameters:\n outpath - full path of where to store the resultant filterbank file.\n flag_fascending - use an ascending (+1) or descending (-1) sequence of frequencies\n flag_sign_drift_rate - use a positive (+1) or negative (-1) drift rate\n '''\n if DEBUGGING:\n print('generate_fil_file: flag_fascending={}, flag_sign_drift_rate={}'\n .format(flag_fascending, flag_sign_drift_rate))\n\n # Set up setigne parameters\n stg_parms = SetigenParms()\n if flag_sign_drift_rate < 0:\n stg_parms.drift_rate_1 = -stg_parms.drift_rate_1\n stg_parms.drift_rate_2 = -stg_parms.drift_rate_2\n stg_parms.drift_rate_3 = -stg_parms.drift_rate_3\n stg_parms.drift_rate_4 = -stg_parms.drift_rate_4\n stg_parms.drift_rate_5 = -stg_parms.drift_rate_5\n\n # Instantiate a setigen Frame object\n frame = stg.Frame(fchans=stg_parms.fchans,\n tchans=stg_parms.tchans,\n df=stg_parms.df,\n dt=stg_parms.dt,\n fch1=stg_parms.fch1,\n ascending=(flag_fascending > 0))\n\n # Add noise to stg object.\n frame.add_noise(x_mean=0, x_std=stg_parms.noise_std, noise_type='gaussian')\n\n # Signal 1 will be detected.\n signal_1_intensity = frame.get_intensity(snr=stg_parms.snr_1)\n frame.add_constant_signal(f_start=frame.get_frequency(stg_parms.signal_start_1),\n drift_rate=stg_parms.drift_rate_1,\n level=signal_1_intensity,\n width=stg_parms.width_1,\n f_profile_type='gaussian')\n\n # Signal 2 will be detected.\n signal_2_intensity = frame.get_intensity(snr=stg_parms.snr_2)\n frame.add_constant_signal(f_start=frame.get_frequency(stg_parms.signal_start_2),\n drift_rate=stg_parms.drift_rate_2,\n level=signal_2_intensity,\n width=stg_parms.width_2,\n f_profile_type='gaussian')\n\n # Signal 3 is a symmetric signal with three Gaussians\n # that will fall below the SNR requirements.\n signal_3_intensity = frame.get_intensity(snr=stg_parms.snr_3)\n frame.add_signal(stg.constant_path(f_start=frame.get_frequency(stg_parms.signal_start_3),\n drift_rate=stg_parms.drift_rate_3),\n stg.constant_t_profile(level=1),\n stg.multiple_gaussian_f_profile(width=stg_parms.width_3),\n stg.constant_bp_profile(level=signal_3_intensity))\n\n # Signal 4 is a symmetric signal with three Gaussians\n # that will be drifting too quickly.\n signal_4_intensity = frame.get_intensity(snr=stg_parms.snr_4)\n frame.add_signal(stg.constant_path(f_start=frame.get_frequency(stg_parms.signal_start_4),\n drift_rate=stg_parms.drift_rate_4),\n stg.constant_t_profile(level=1),\n stg.multiple_gaussian_f_profile(width=stg_parms.width_4),\n stg.constant_bp_profile(level=signal_4_intensity))\n\n # Signal 5 is similar to signal 4 but drifting in the opposite direction.\n signal_5_intensity = frame.get_intensity(snr=stg_parms.snr_5)\n frame.add_signal(stg.constant_path(f_start=frame.get_frequency(stg_parms.signal_start_5),\n drift_rate=stg_parms.drift_rate_5),\n stg.constant_t_profile(level=1),\n stg.multiple_gaussian_f_profile(width=stg_parms.width_5),\n stg.constant_bp_profile(level=signal_5_intensity))\n\n # Save the frame as a filterbank file.\n frame.save_fil(filename=outpath)\n print(\"generate_fil_file: generated {}\".format(outpath))\n del frame\n\n\ndef make_one_dat_file(arg_path_fil, min_drift=0.0, max_drift=4.0, min_snr=25.0, remove_h5=True):\n r'''\n Make a single DAT file:\n * Instantiate the FindDoppler class object.\n * With the object, search the H5, creating the DAT file\n and a LOG file (not used).\n '''\n if max_drift is None:\n raise ValueError('make_one_dat_file: max_drift not set')\n woutdir = dirname(arg_path_fil)\n fdop = FindDoppler(datafile=arg_path_fil,\n min_drift=min_drift,\n max_drift=max_drift,\n snr=min_snr,\n log_level_int=logging.WARNING,\n out_dir=woutdir)\n fdop.search()\n path_h5_file = arg_path_fil.replace('.fil', '.h5')\n if remove_h5:\n remove(path_h5_file)\n\n\ndef get_case_results(arg_path_dat):\n r'''From the DAT file, extract the data for all top hits.'''\n df = pd.read_csv(arg_path_dat, header=None, sep=SEP, engine='python', comment='#')\n nrows = len(df)\n if nrows != 2:\n raise ValueError('get_case_results: Expected 2 rows in DAT but observed {} rows'\n .format(nrows))\n\n obs_tophit_1 = TestResultRecord()\n obs_tophit_1.tophit_id = int(df[0][0]) # 1st col, 1st row\n obs_tophit_1.drate = float(df[1][0])\n obs_tophit_1.snr = float(df[2][0])\n obs_tophit_1.freq = float(df[4][0])\n obs_tophit_1.index = int(df[5][0])\n\n obs_tophit_2 = TestResultRecord()\n obs_tophit_2.tophit_id = int(df[0][1]) # 1st col, 2nd row\n obs_tophit_2.drate = float(df[1][1])\n obs_tophit_2.snr = float(df[2][1])\n obs_tophit_2.freq = float(df[4][1])\n obs_tophit_2.index = int(df[5][1])\n\n return obs_tophit_1, obs_tophit_2\n\n\ndef case_comparison(obs_tophit, ref_tophit, max_drift):\n r'''Compare DAT file observations to the reference.'''\n if obs_tophit is None:\n if ref_tophit is None:\n return # success, both None\n # ref_tophit defined, obs_tophit is None\n raise ValueError('case_comparison: FAILED, max_drift={}\\nobs_tophit is None\\nref_tophit:::{}'\n .format(max_drift, ref_tophit.to_string()))\n if ref_tophit is None: # obs_tophit defined, ref_tophit is None\n raise ValueError('case_comparison: FAILED, max_drift={}\\nref_tophit is None\\nobs_tophit:::{}'\n .format(max_drift, obs_tophit.to_string()))\n\n if obs_tophit.tophit_id == ref_tophit.tophit_id \\\n and np.isclose(obs_tophit.drate, ref_tophit.drate, rtol=RTOL_DIFF) \\\n and np.isclose(obs_tophit.snr, ref_tophit.snr, rtol=RTOL_DIFF) \\\n and np.isclose(obs_tophit.freq, ref_tophit.freq, rtol=RTOL_DIFF) \\\n and obs_tophit.index == ref_tophit.index:\n return # success\n\n # Some field(s) did not compare correctly.\n raise ValueError('case_comparison: FAILED, max_drift={}\\nobs_tophit:::{}\\nref_tophit:::{}'\n .format(max_drift, obs_tophit.to_string(), ref_tophit.to_string()))\n\nif __name__ == '__main__':\n # __main__ is a developer unit test, not normally to be executed.\n from fb_cases_def import TESTDIR, PATH_FIL_FILE, MIN_SNR\n rmtree(TESTDIR, ignore_errors=True)\n mkdir(TESTDIR)\n generate_fil_file(PATH_FIL_FILE, -1, -1)\n make_one_dat_file(PATH_FIL_FILE, max_drift=5, min_snr=MIN_SNR)\n"
] | [
[
"pandas.read_csv",
"numpy.isclose"
]
] |
DebeshJha/tensorflow-1 | [
"2b5a225c49d25273532d11c424d37ce394d7579a",
"2b5a225c49d25273532d11c424d37ce394d7579a",
"2b5a225c49d25273532d11c424d37ce394d7579a",
"2b5a225c49d25273532d11c424d37ce394d7579a"
] | [
"tensorflow/python/ipu/utils.py",
"tensorflow/compiler/plugin/poplar/tests/bias_apply_graph_caching_test.py",
"tensorflow/python/ipu/horovod/ipu_horovod_strategy.py",
"tensorflow/compiler/plugin/poplar/tests/casts_elimination_test.py"
] | [
"# Copyright 2019 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# =============================================================================\n\"\"\"\nGeneral utilities\n~~~~~~~~~~~~~~~~~\n\"\"\"\n\nimport collections\nfrom enum import Enum\nimport os\nimport time\nimport numpy as np\n\nfrom tensorflow.compiler.plugin.poplar.driver.config_pb2 import IpuOptions\nfrom tensorflow.compiler.plugin.poplar.driver.trace_pb2 import IpuTraceEvent\nfrom tensorflow.compiler.plugin.poplar.driver import config_pb2\nfrom tensorflow.compiler.plugin.poplar.ops import gen_ipu_ops\n# pylint: disable=unused-import\n# These imports are only here to make it easier for the Tensorflow Wheel users\n# to use these functions:\n# ```\n# from tensorflow.python import ipu\n# ...\n# ipu.utils.export_variables_from_live_session(...)\n# ```\nfrom tensorflow.compiler.plugin.poplar.tools.tensorflow_weights_extractor import (\n export_variables_from_live_session, export_variables_from_live_model,\n import_data_in_live_session, import_data_in_live_model)\n# pylint: enable=unused-import\nfrom tensorflow.compat.v1 import executing_eagerly\nfrom tensorflow.core.framework import attr_value_pb2\nfrom tensorflow.python.client import session as session_lib\nfrom tensorflow.python.distribute import values\nfrom tensorflow.python.framework import ops\nfrom tensorflow.python.ops import control_flow_ops\nfrom tensorflow.python.util import deprecation\nfrom tensorflow.python.data.ops import dataset_ops\nfrom tensorflow.python.ipu import ipu_infeed_queue\nfrom tensorflow.python.ipu import dataset_extractor\n\n\nclass SelectionOrder(Enum):\n \"\"\"Depending on the communication pattern of the model, the order in\n which the IPUs are selected and mapped to shards can impact the performance.\n\n For example, given a model which executes on multiple IPUs:\n\n .. code-block:: python\n\n def sharded_graph(pa, pb, pc, pd):\n with ipu.scopes.ipu_shard(0):\n o1 = pa + pb\n with ipu.scopes.ipu_shard(1):\n o2 = o1 + pc\n with ipu.scopes.ipu_shard(2):\n o3 = o2 + pd\n return o3\n\n and a typical machine with 8 Graphcore C2 cards:\n\n .. code-block:: none\n\n _______ _______\n | | | |\n | 14 |=============| 15 |\n |_______| |_______|\n || ||\n _______ _______\n | | | |\n | 12 |=============| 13 |\n |_______| |_______|\n || ||\n _______ _______\n | | | |\n | 10 |=============| 11 |\n |_______| |_______|\n || ||\n _______ _______\n | | | |\n | 8 |=============| 9 |\n |_______| |_______|\n || ||\n _______ _______\n | | | |\n | 6 |=============| 7 |\n |_______| |_______|\n || ||\n _______ _______\n | | | |\n | 4 |=============| 5 |\n |_______| |_______|\n || ||\n _______ _______\n | | | |\n | 2 |=============| 3 |\n |_______| |_______|\n || ||\n _______ _______\n | | | |\n | 0 |=============| 1 |\n |_______| |_______|\n\n (where each numbered square represents an IPU with the given device ID and the\n == and || connections represent IPUs being directly connected via IPU-Links)\n\n we can see that the `ipu_shard(0)` directly communicates with `ipu_shard(1)`\n and that `ipu_shard(1)` directly communicates with `ipu_shard(2)`.\n If the shards 0, 1, 2 were mapped to IPUs 0, 1, 2 in that order, then the\n communication between shards 1 and 2 would not have a direct connection via an\n IPU-Link and would have to perform a \"hop\" via an IPU.\n If the shards 0, 1, 2 were mapped to IPUs 0, 1, 3 in that order, then the\n communication between shards 1 and 2 would have a direct connection via an\n IPU-Link which will reduce the communication cost.\n\n This Enum class is used to control the order in which the IPUs are selected.\n Currently, the following IPU selection orderings are supported:\n\n * `AUTO`: automatically try and select the best selection given the network.\n * `ZIGZAG`: follow the natural ordering of IPUs. In the above example, the\n IPUs would be selected in the following order:\n `0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15`.\n * `SNAKE`: select IPUs such that each consecutive shard is directly\n connected via IPU-Links to the shard before and after. In the above example,\n the IPUs would be selected in the following order:\n `0, 1, 3, 2, 4, 5, 7, 6, 8, 9, 11, 10, 12, 13, 15, 14`.\n * `HOOF`: select IPUs such that each consecutive shard is directly\n connected via IPU-Links to the shard before and after and the last and first\n shard are on the same C2 cards. In the above example, the IPUs would be\n selected in the following order:\n `0, 2, 4, 6, 8, 10, 12, 14, 15, 13, 11, 9, 7, 5, 3, 1`.\n\n The `SNAKE` and `HOOF` IPU selection orders are particularly beneficial for\n pipelined models.\n \"\"\"\n AUTO = config_pb2.IpuSelectionOrder.Value(\"AUTO\")\n ZIGZAG = config_pb2.IpuSelectionOrder.Value(\"ZIGZAG\")\n SNAKE = config_pb2.IpuSelectionOrder.Value(\"SNAKE\")\n HOOF = config_pb2.IpuSelectionOrder.Value(\"HOOF\")\n\n\nclass ExecutionProfileType(Enum):\n \"\"\"The execution profile type indicates the desired information in the\n execution profile.\n\n * `NO_PROFILE` indicates that there should be no execution profiling.\n * `DEVICE_PROFILE` indicates that the execution profile should contain only\n device wide events.\n * `IPU_PROFILE` indicates that the profile should contain IPU level\n execution events.\n * `TILE_PROFILE` indicates that the profile should contain Tile level\n execution events.\n \"\"\"\n NO_PROFILE = config_pb2.IpuExecutionProfileType.Value(\"NO_PROFILE\")\n DEVICE_PROFILE = config_pb2.IpuExecutionProfileType.Value(\"DEVICE_PROFILE\")\n IPU_PROFILE = config_pb2.IpuExecutionProfileType.Value(\"IPU_PROFILE\")\n TILE_PROFILE = config_pb2.IpuExecutionProfileType.Value(\"TILE_PROFILE\")\n\n\nclass DeviceConnectionType(Enum):\n \"\"\"Enumeration to describe the mechanism used to attach to the Poplar\n device.\n\n * `ALWAYS` indicates that the system will attach when configuring the\n device.\n * `ON_DEMAND` will defer connection to when the IPU is needed.\n * `NEVER` will never try to attach to a device. Used when compiling offline.\n \"\"\"\n ALWAYS = config_pb2.IpuDeviceConnectionType.Value(\"ALWAYS\")\n ON_DEMAND = config_pb2.IpuDeviceConnectionType.Value(\"ON_DEMAND\")\n NEVER = config_pb2.IpuDeviceConnectionType.Value(\"NEVER\")\n\n\ndef configure_ipu_system(config, device=\"cpu\"):\n \"\"\"Configure an IPU system. Passing an IpuOptions protobuf created by the\n ``create_ipu_config`` function.\n\n Args:\n config: An IpuOptions configuration protobuf\n device: The CPU device which is local to the IPU hardware\n\n Returns:\n None\n \"\"\"\n if not isinstance(config, config_pb2.IpuOptions):\n raise Exception(\"`config` must be an IpuOptions instance\")\n\n g = ops.Graph()\n with g.as_default():\n with ops.device(device):\n cfg_op = gen_ipu_ops.ipu_configure_hardware(config.SerializeToString())\n\n with session_lib.Session(graph=g) as sess:\n sess.run(cfg_op)\n\n\ndef get_ipu_config(session=None):\n \"\"\"Get the configuration of an IPU system.\n\n Args:\n session: An optional session on which to execute.\n\n Returns:\n A list of IpuOption instances, one for each PoplarExecutor.\n \"\"\"\n configurations = None\n\n # Get the serialized output.\n if executing_eagerly():\n assert not session, \"No session is required for eager execution.\"\n configurations = gen_ipu_ops.ipu_get_configuration().numpy()\n else:\n s = session if session else session_lib.Session()\n configurations = s.run(gen_ipu_ops.ipu_get_configuration())\n\n # Deserialize and determine if a valid config exists,\n # i.e. user has succesfully called ipu_configure_hardware.\n deserialized = []\n valid = False\n for conf in configurations:\n # Deserialize.\n opt = IpuOptions()\n opt.ParseFromString(conf)\n deserialized.append(opt)\n\n valid |= len(opt.device_config) > 0\n\n if not valid:\n raise RuntimeError(\"No IPU devices configured.\")\n\n return deserialized\n\n\ndef get_num_of_ipus_in_device(ipu_device, device=\"cpu\"):\n \"\"\"Get the number of physical IPUs\n\n Args:\n ipu_device: The IPU device for which to get the number of devices for.\n device: The CPU device which is local to the IPU hardware.\n\n Returns:\n A number of physical IPUs configured for a particular TF device.\n \"\"\"\n\n g = ops.Graph()\n with g.as_default():\n with ops.device(device):\n cfg_op = gen_ipu_ops.ipu_get_num_devices(ipu_device)\n\n with session_lib.Session(graph=g) as sess:\n return sess.run(cfg_op)\n\n\ndef running_on_ipu_model():\n \"\"\" Check if XLA is configured to run on the ipu model.\n\n Returns:\n True if XLA is configured to run on the ipu model.\n False if XLA is configured to run on real hardware.\n \"\"\"\n return \"--use_ipu_model\" in os.environ.get(\"TF_POPLAR_FLAGS\", \"\")\n\n\[email protected]_args(None, \"Use set_optimization_options() instead.\",\n \"max_cross_replica_sum_buffer_size\",\n \"max_inter_ipu_copies_buffer_size\")\ndef create_ipu_config(profiling=False,\n enable_ipu_events=False,\n use_poplar_text_report=False,\n use_poplar_cbor_report=False,\n profile_execution=None,\n enable_poplar_serialized_graph=False,\n report_every_nth_execution=0,\n max_report_size=0x10000000,\n report_directory=\"\",\n scheduler_selection=\"\",\n always_rearrange_copies_on_the_host=False,\n merge_infeed_io_copies=False,\n disable_graph_convolution_caching=False,\n disable_graph_outlining=False,\n retain_control_dependencies=False,\n max_cross_replica_sum_buffer_size=0,\n max_inter_ipu_copies_buffer_size=0,\n max_scheduler_lookahead_depth=5,\n max_scheduler_search_space_size=64,\n prefetch_data_streams=True,\n selection_order=None,\n enable_experimental_remote_buffer_embedding=False):\n \"\"\"Create an empty IPU session configuration structure.\n\n Args:\n profiling: Enable compilation reports, and IPU trace events.\n enable_ipu_events: Enable IPU trace events without poplar reports.\n use_poplar_text_report: Enable the Poplar textual report summary.\n use_poplar_cbor_report: Enable the Poplar CBOR reports.\n profile_execution: Include Poplar execution profiles in the execution\n events. Can only be enabled if `profiling` is also enabled. If set, can be\n `True`, 'False`, or a member of the `ExecutionProfileType` enumeration.\n A `True` value indicates `ExecutionProfileType.DEVICE_PROFILE`.\n enable_poplar_serialized_graph: Create the Poplar serialized graph and\n include in the IPU compilation trace events.\n report_every_nth_execution: Only produce an execution report on every Nth\n execution. 0 = One report only.\n max_report_size: The maximum size of Poplar profiles to include in the\n profile events.\n report_directory: When set, reports will be written to files in this\n directory, instead of being written into the events. The events will\n contain the full paths of the report files.\n scheduler_selection: When set, this forces the compiler to use a specific\n scheduler when ordering the instructions. See the documentation for a\n list of valid schedulers.\n always_rearrange_copies_on_the_host: *** Experimental Flag ***\n The data which is streamed to/from the device might be stored in different\n layouts on the device and on the host. If that is the case the\n rearrangment is performed on the device by default. By enabling this\n option the rearrangment will be performed on the host at the expense of\n latency.\n merge_infeed_io_copies: When true, this flag will merge the streamed\n host->device input copies into one larger copy. This may reduce the time\n to copy data from the host, at the expense of increasing the live tensor\n memory on the device.\n disable_graph_convolution_caching: By default, the convolution operation\n searches for an equivalent cached operation, and uses this instead of\n creating a new convolution. Setting this flag forces the creation of a\n new convolution. This can improve runtime at the expense of graph size.\n disable_graph_outlining: By default, some operations, such as matrix\n multiplications, which occur in the graph multiple times but with\n different input tensors might be optimised to reduce the total code size\n of the graph at the expense of the execution time. Setting this flag will\n disable these optimisations. This option is not valid for the convolution\n operation (also see disable_graph_convolution_caching)\n retain_control_dependencies: Deprecated.\n max_cross_replica_sum_buffer_size: The maximum number of bytes that can be\n waiting before a cross replica sum op is scheduled.\n max_inter_ipu_copies_buffer_size: The maximum number of bytes that can be\n waiting before a inter IPU copy between IPUs is scheduled.\n max_scheduler_lookahead_depth: The maximum distance to look into the future\n when considering valid schedules.\n max_scheduler_search_space_size: The maximum number of nodes to consider\n when building the tree of future schedules.\n prefetch_data_streams: When set to true, the prefetching of data for data\n streams on the host will be overlapped with execution on the IPU.\n selection_order: the order in which IPUs are selected and mapped to physical\n IPU devices when using a multi-IPU devices (see `SelectionOrder`). When\n not specified, then automatic selection order is used, otherwise an\n instance of `SelectionOrder`.\n enable_experimental_remote_buffer_embedding: When set to true,\n `HostEmbedding` will make use of poplar remote buffers.\n\n Returns:\n An IpuOptions configuration protobuf, suitable for passing to\n ``configure_ipu_system``\n \"\"\"\n if profiling and enable_ipu_events:\n raise Exception(\n \"`profiling` and `enable_ipu_events` are mutually exclusive\")\n\n if retain_control_dependencies:\n raise Exception(\"`retain_control_dependencies` is deprecated\")\n\n selection_order = selection_order if selection_order else SelectionOrder.AUTO\n profile_execution = profile_execution if profile_execution \\\n else ExecutionProfileType.NO_PROFILE\n\n if isinstance(profile_execution, (np.bool_, bool)):\n if profile_execution:\n profile_execution = ExecutionProfileType.DEVICE_PROFILE\n else:\n profile_execution = ExecutionProfileType.NO_PROFILE\n\n if (profile_execution != ExecutionProfileType.NO_PROFILE and not profiling):\n raise Exception(\"`profiling` is required when `profile_execution` is set\")\n\n if not isinstance(profile_execution, ExecutionProfileType):\n raise Exception(\"`profile_execution` must be True, False, or an \"\n \"ExecutionProfileType instance\")\n\n opts = config_pb2.IpuOptions()\n\n # Default initialize IpuOptions() attributes here.\n opts.creator_id = config_pb2.IpuOptionsCreator.IPU_UTILS\n opts.ipu_model_config.compile_ipu_code = True\n opts.enable_multi_slice_combiner = False\n opts.enable_matmul_combiner = False\n opts.enable_gather_simplifier = False\n opts.device_connection_type = DeviceConnectionType.ALWAYS.value\n opts.speed_size_config.allow_recompute = False\n\n # Configure IpuOptions according to the passed arguments.\n opts.profiling.enable_ipu_trace_events = profiling or enable_ipu_events\n opts.profiling.enable_compilation_trace = profiling\n opts.profiling.enable_io_trace = profiling\n opts.profiling.execution_trace_type = profile_execution.value\n opts.profiling.enable_poplar_reports_text = use_poplar_text_report\n opts.profiling.enable_poplar_reports_cbor = use_poplar_cbor_report\n opts.profiling.enable_poplar_graph = enable_poplar_serialized_graph\n opts.profiling.report_every_nth_execution = report_every_nth_execution\n opts.profiling.max_report_size = max_report_size\n opts.profiling.report_directory = report_directory\n\n opts.speed_size_config.always_rearrange_copies_on_the_host = \\\n always_rearrange_copies_on_the_host\n opts.speed_size_config.merge_infeed_io_copies = merge_infeed_io_copies\n opts.speed_size_config.disable_graph_convolution_caching = \\\n disable_graph_convolution_caching\n opts.speed_size_config.disable_graph_outlining = \\\n disable_graph_outlining\n opts.speed_size_config.scheduler_selection = scheduler_selection\n\n opts.max_cross_replica_sum_buffer_size = max_cross_replica_sum_buffer_size\n opts.max_inter_ipu_copies_buffer_size = max_inter_ipu_copies_buffer_size\n\n opts.max_scheduler_lookahead_depth = max_scheduler_lookahead_depth\n opts.max_scheduler_search_space_size = max_scheduler_search_space_size\n\n opts.prefetch_data_streams = prefetch_data_streams\n opts.selection_order = selection_order.value\n\n opts.verified_transfers.enabled = False\n opts = set_verification_options(opts, VerificationOptions())\n\n opts.enable_experimental_remote_buffer_embedding = \\\n enable_experimental_remote_buffer_embedding\n\n return opts\n\n\ndef set_serialization_options(opts, output_folder=\"\"):\n \"\"\" Enable / disable the serialization to disk of the compiled executables.\n\n .. code-block:: python\n\n # Create a device that will save to disk all the compiled executables.\n opts = create_ipu_config()\n opts = set_serialization_options(opts,\n output_folder=\"/tmp/my_network\")\n ipu.utils.configure_ipu_system(opts)\n with tf.Session() as s:\n ...\n\n Args:\n output_folder: Where to save the compiled executables.\n Set to \"\" to disable serialization.\n\n Returns:\n The IpuOptions configuration protobuf.\n \"\"\"\n opts.serialization_folder = output_folder\n return opts\n\n\ndef set_optimization_options(opts,\n combine_embedding_lookups=False,\n combine_matmuls=False,\n max_cross_replica_sum_buffer_size=0,\n max_reduce_scatter_buffer_size=0,\n max_inter_ipu_copies_buffer_size=0,\n max_send_recv_cluster_size=0,\n gather_simplifier=False,\n triangular_solve_expander_block_size=0):\n \"\"\"Set the IPU options related to performance / optimizations.\n\n .. code-block:: python\n\n # Create a device with fusion for multiSlices sharing the same input\n # enabled.\n opts = create_ipu_config()\n opts = set_optimization_options(opts,\n combine_embedding_lookups=True)\n ipu.utils.configure_ipu_system(opts)\n with tf.Session() as s:\n ...\n\n Args:\n combine_embedding_lookups: Fuse embedding lookups on the same tensor. This\n might improve performance but increase memory usage.\n combine_matmuls: Fuse matmul operations if they share the same weights or\n the same input.\n max_cross_replica_sum_buffer_size: The maximum number of bytes that can be\n waiting before a cross replica sum op is scheduled.\n max_reduce_scatter_buffer_size: The maximum number of bytes that can be\n waiting before a reduce scatter op is scheduled.\n max_inter_ipu_copies_buffer_size: The maximum number of bytes that can be\n waiting before a inter IPU copy between IPUs is scheduled.\n max_send_recv_cluster_size: The maximum number of bytes that can be waiting\n before a cluster of send/recv instructions to/from the host is scheduled.\n These are lowered to stream copies that can be merged by Poplar.\n gather_simplifier: Will enable more aggressive optimisation\n for embedding lookups.\n triangular_solve_expander_block_size: Defines size for triangular solver\n expander blocks. 0 - implementation defined default.\n\n Returns:\n The IpuOptions configuration protobuf.\n \"\"\"\n # Internally embedding lookups are implemented using multiSlice operations.\n opts.enable_multi_slice_combiner = combine_embedding_lookups\n opts.enable_matmul_combiner = combine_matmuls\n opts.max_cross_replica_sum_buffer_size = max_cross_replica_sum_buffer_size\n opts.max_reduce_scatter_buffer_size = max_reduce_scatter_buffer_size\n opts.max_inter_ipu_copies_buffer_size = max_inter_ipu_copies_buffer_size\n opts.max_send_recv_cluster_size = max_send_recv_cluster_size\n opts.enable_gather_simplifier = gather_simplifier\n opts.triangular_solve_expander_block_size = \\\n triangular_solve_expander_block_size\n\n return opts\n\n\ndef set_norm_options(opts, use_stable_statistics=False):\n \"\"\"Set the IPU options related to norms.\n\n Args:\n use_stable_statistics: If True, computes the mean first and subtracts\n the activations by it before computing the variance. The\n implementation with this flag set to True is slower than when set\n to False.\n\n Returns:\n The IpuOptions configuration protobuf.\n \"\"\"\n opts.use_stable_norm_statistics = use_stable_statistics\n\n return opts\n\n\ndef set_transfer_options(opts, use_verified_transfers=False):\n \"\"\"Set the IPU options related to Poplar data transfers.\n\n Args:\n opts: An IpuOptions session control protobuf.\n use_verified_transfers: If True, use Poplar's verified transfers.\n\n Returns:\n The IpuOptions configuration protobuf.\n \"\"\"\n opts.verified_transfers.enabled = use_verified_transfers\n\n return opts\n\n\nclass KeyId:\n def __init__(self, key=0, start_id=-1):\n self.key = key\n self.start_id = start_id\n\n\nclass VerificationOptions:\n \"\"\"Store pairs of key / id to use for each type of data used in the graph.\n Does nothing unless verified transfers have been enabled by calling\n `set_transfer_options(opts, use_verified_transfers=True)`\n and an instance of this class has been set by calling\n `set_verification_options`:\n\n .. code-block:: python\n\n o = VerificationOptions()\n o.inputs.key = 1\n o.infeeds[\"infeed\"].key = 3\n set_verification_options(opts, o)\n\n \"\"\"\n def __init__(self):\n self.inputs = KeyId()\n self.input_parameters = KeyId()\n self.outputs = KeyId()\n self.output_parameters = KeyId()\n self.infeeds = collections.defaultdict(KeyId)\n self.outfeeds = collections.defaultdict(KeyId)\n self.checkpoint_in = KeyId(0, 0)\n self.checkpoint_out = KeyId(0, 0)\n\n\ndef set_verification_options(opts, verification_options):\n \"\"\"Set the pairs or key / id to use for each type of data used in the graph\n when verified transfers are enabled.\n\n .. code-block:: python\n\n # Create a device which will use verified transfers with different keys.\n opts = create_ipu_config()\n opts = set_transfer_options(opts, use_verified_transfers=True)\n o = VerificationOptions()\n o.input_parameters = KeyId(1)\n o.infeeds[\"training_feed\"] = KeyId(2)\n opts = set_verification_options(opts, o)\n ipu.utils.configure_ipu_system(opts)\n with tf.Session() as s:\n ...\n\n Args:\n opts: An IpuOptions session control protobuf.\n verification_options: a VerificationOptions object that contains\n the keys / ids to use.\n \"\"\"\n if not isinstance(verification_options, VerificationOptions):\n raise Exception(\n \"`verification_options` must be of type VerificationOptions\")\n\n def _cp_key_and_id(src, dst):\n dst.key = src.key\n dst.start_id = src.start_id\n\n for attr in [\n \"inputs\", \"input_parameters\", \"outputs\", \"output_parameters\",\n \"checkpoint_in\", \"checkpoint_out\"\n ]:\n _cp_key_and_id(getattr(verification_options, attr),\n getattr(opts.verified_transfers, attr))\n\n for name, options in verification_options.infeeds.items():\n _cp_key_and_id(options, opts.verified_transfers.infeeds[name])\n\n for name, options in verification_options.outfeeds.items():\n _cp_key_and_id(options, opts.verified_transfers.outfeeds[name])\n\n return opts\n\n\ndef set_compilation_options(opts, compilation_options=None):\n \"\"\"Set the IPU compilation options for the session.\n\n .. code-block:: python\n\n # Create a device with debug execution profile flag set to \"compute_sets\"\n opts = create_ipu_config()\n opts = set_compilation_options(opts,\n compilation_options={\"debug.instrument\": \"true\",\n \"debug.allowOutOfMemory\": \"true\"})\n ipu.utils.configure_ipu_system(opts)\n with tf.Session() as s:\n ...\n\n Args:\n opts: An IpuOptions session control protobuf.\n compilation_options: A dictionary of poplar compilation option flags to be\n sent to the executor.\n\n Returns:\n The IpuOptions configuration protobuf, with engine compilation options set.\n \"\"\"\n if compilation_options:\n if not isinstance(compilation_options, dict):\n raise Exception(\"`compilation_options` must be a dictionary\")\n\n for (option_name, value) in compilation_options.items():\n compilation_option = opts.compilation_options.add()\n compilation_option.option = option_name\n compilation_option.value = value\n\n return opts\n\n\ndef set_convolution_options(opts, convolution_options=None):\n \"\"\"Set the IPU convolution options for the session.\n\n .. code-block:: python\n\n # Set \"availableMemoryProportion\" flag to \"0.1\"\n opts = create_ipu_config()\n opts = set_convolution_options(opts,\n convolution_options={\"availableMemoryProportion\": \"0.1\"})\n ipu.utils.configure_ipu_system(opts)\n with tf.Session() as s:\n ...\n\n Args:\n opts: An IpuOptions session control protobuf.\n convolution_options: A dictionary of poplar option flags for\n convolutions. The \"availableMemoryProportion\" flag indicates the\n proportion of tile memory to be made available as\n temporary memory for convolutions (float between 0 and 1.0).\n Less temporary memory will generally result in a convolution that\n takes more cycles to complete. However, because always live memory\n (such as control code and vertex state) is not tracked when planning it,\n a convolution using less temporary memory may use more memory overall,\n due to an increase of always live memory.\n\n Returns:\n The IpuOptions configuration protobuf, with convolution options set.\n \"\"\"\n if convolution_options:\n if not isinstance(convolution_options, dict):\n raise Exception(\"`convolution_options` must be a dictionary\")\n\n for (option_name, value) in convolution_options.items():\n opt = opts.convolution_options.add()\n opt.option = option_name\n opt.value = value\n\n return opts\n\n\ndef set_matmul_options(opts, matmul_options=None, clear_pass_type=False):\n \"\"\"Set the IPU matrix multiplication options for the session.\n\n .. code-block:: python\n\n # Set \"availableMemoryProportion\" flag to \"0.5\"\n opts = create_ipu_config()\n opts = set_matmul_options(opts,\n matmul_options={\"availableMemoryProportion\": \"0.5\"})\n ipu.utils.configure_ipu_system(opts)\n with tf.Session() as s:\n ...\n\n Args:\n opts: An IpuOptions session control protobuf.\n matmul_options: A dictionary containing the poplar option flag\n \"availableMemoryProportion\" for the matrix multiplication operations.\n It indicates the proportion of tile memory to be made available as\n temporary memory for the matrix multiplications (float between 0 and 1.0).\n Less temporary memory will generally result in a multiplication that\n takes more cycles to complete. However, because always live memory\n (like code and vertex state) is not tracked when planning it,\n a multiplication using less temporary memory may use more memory overall,\n due to an increase of always live memory.\n clear_pass_type: When set to True, the Pass type will not\n be set in the options passed to the poplar operation.\n\n Returns:\n The IpuOptions configuration protobuf, with matmul options set.\n \"\"\"\n if matmul_options:\n if not isinstance(matmul_options, dict):\n raise Exception(\"`matmul_options` must be a dictionary\")\n\n for (option_name, value) in matmul_options.items():\n opt = opts.matmul_options.add()\n opt.option = option_name\n opt.value = value\n\n opts.clear_matmul_pass_type = clear_pass_type\n\n return opts\n\n\ndef set_pooling_options(opts, pooling_options=None):\n \"\"\"Set the IPU pooling compilation options for the session.\n\n .. code-block:: python\n\n # Set \"poolUseIntrospectiveMapping\" flag to \"false\"\n opts = create_ipu_config()\n opts = set_pooling_options(opts,\n pooling_options={\"poolUseIntrospectiveMapping\": \"false\"})\n ipu.utils.configure_ipu_system(opts)\n with tf.Session() as s:\n ...\n\n Args:\n opts: An IpuOptions session control protobuf.\n pooling_options: A dictionary of poplar option flags for the pooling\n operation.\n\n Returns:\n The IpuOptions configuration protobuf, with pooling options set.\n \"\"\"\n if pooling_options:\n if not isinstance(pooling_options, dict):\n raise Exception(\"`pooling_options` must be a dictionary\")\n\n for (option_name, value) in pooling_options.items():\n opt = opts.pooling_options.add()\n opt.option = option_name\n opt.value = value\n\n return opts\n\n\[email protected]_args(\n None, \"report_options is deprecated, use graph_options and\"\n \" execution_options instead\", \"report_options\")\ndef set_report_options(opts,\n report_options=None,\n graph_options=None,\n execution_options=None):\n \"\"\"Set the options used to influence Poplar graph and execution reports\n generation.\n\n\n .. code-block:: python\n\n opts = create_ipu_config()\n opts = set_report_options(opts,\n report_options={\"reportOption1\": \"false\"},\n graph_options={\"graphOptions\": \"false\"},\n execution_options={\"executionOptions\": \"false\"})\n ipu.utils.configure_ipu_system(opts)\n with tf.Session() as s:\n ...\n\n Args:\n opts: An IpuOptions session control protobuf.\n report_options: (Deprecated) A dictionary of poplar option flags for\n the report generation.\n graph_options: A dictionary of poplar option flags for the graph report\n generation.\n execution_options: A dictionary of poplar option flags for the execution\n report generation.\n\n Returns:\n The IpuOptions configuration protobuf, with convolution options set.\n \"\"\"\n def use_report_options():\n if report_options:\n if not isinstance(report_options, dict):\n raise Exception(\"`report_options` must be a dictionary\")\n return report_options\n\n if not graph_options:\n graph_options = use_report_options()\n\n if graph_options:\n if not isinstance(graph_options, dict):\n raise Exception(\"`graph_options` must be a dictionary\")\n\n for (option_name, value) in graph_options.items():\n opt = opts.profiling.graph_options.add()\n opt.option = option_name\n opt.value = value\n\n if not execution_options:\n execution_options = use_report_options()\n\n if execution_options:\n if not isinstance(execution_options, dict):\n raise Exception(\"`execution_options` must be a dictionary\")\n\n for (option_name, value) in execution_options.items():\n opt = opts.profiling.execution_options.add()\n opt.option = option_name\n opt.value = value\n\n return opts\n\n\ndef set_ipu_model_options(opts, compile_ipu_code=True):\n \"\"\"Set the IPU Model options.\n\n Args:\n compile_ipu_code: Whether or not to actually compile real IPU code for\n modelling.\n\n Returns:\n The IpuOptions configuration protobuf, with IPU model options set.\n \"\"\"\n opts.ipu_model_config.compile_ipu_code = compile_ipu_code\n\n return opts\n\n\[email protected]_args(\n None,\n \"Pipelining recomputation will recompute all the non-stateful operations \"\n \"when recomputation is enabled.\",\n \"allow_stateful_recompute\",\n)\ndef set_recomputation_options(opts,\n allow_recompute=True,\n allow_stateful_recompute=None): # pylint: disable=unused-argument\n \"\"\"Set re-computation options.\n\n Args:\n allow_recompute: Whether or not to re-compute instructions during training.\n If this is enabled then we will attempt to pattern match\n instructions/pipeline stages in the forward pass and recompute them in the\n backward pass to avoid having to preserve activations which increase the\n maximum memory liveness. Enabling this option can reduce memory usage at\n the expense of extra computation. Any stateful operations cannot be\n recomputed.\n allow_stateful_recompute: Deprecated.\n\n Returns:\n The IpuOptions configuration protobuf.\n \"\"\"\n\n opts.speed_size_config.allow_recompute = allow_recompute\n\n return opts\n\n\ndef set_floating_point_behaviour_options(opts,\n inv=True,\n div0=True,\n oflo=True,\n esr=True,\n nanoo=True):\n \"\"\"Set the IPU floating point control behaviour bits\n\n See the Poplar API documentation for poplar::FloatingPointBehaviour.\n\n Args:\n inv: If true a floating point invalid operation (defined by IEEE 754)\n will cause an exception.\n div0: If true a floating point divide by zero operation will cause an\n exception.\n oflo: If true a floating point overflow will cause an exception.\n esr: Enable stochastic rounding.\n nanoo: Enable Not-a-Number on overflow mode.\n \"\"\"\n opts.floating_point_behaviour.flags_set = True\n opts.floating_point_behaviour.inv = inv\n opts.floating_point_behaviour.div0 = div0\n opts.floating_point_behaviour.oflo = oflo\n opts.floating_point_behaviour.esr = esr\n opts.floating_point_behaviour.nanoo = nanoo\n\n return opts\n\n\ndef set_gcl_options(opts, num_io_tiles=0, gcl_options=None):\n \"\"\"Set the IPU options for the Graphcore Communication Library.\n\n Args:\n num_io_tiles: Number of tiles to reserve per IPU for the GCL collective\n operations.\n gcl_options: A dictionary with options for configuring the GCL collective\n operations.\n\n Returns:\n The IpuOptions configuration protobuf.\n \"\"\"\n opts.gcl_num_io_tiles = num_io_tiles\n\n if gcl_options:\n if not isinstance(gcl_options, dict):\n raise TypeError(\"`gcl_options` must be a dictionary\")\n\n for (option_name, value) in gcl_options.items():\n opt = opts.gcl_options.add()\n opt.option = option_name\n opt.value = value\n\n return opts\n\n\ndef auto_select_ipus(opts, num_ipus):\n \"\"\"Configure the IPUs to be used by the session.\n\n The configuration describes a system consisting of multiple Tensorflow\n devices, each with control of one of more IPUs. The devices will be labeled\n ``/device:IPU:0``, ``/device:IPU:1`` and so on.\n\n Each device can control a specific number of IPUs, given by the ``num_ipus``\n parameter. The system will automatically select IPU configurations from the\n available IPUs, where they match the desired number of IPUs.\n\n Examples:\n\n\n .. code-block:: python\n\n # Create a single device, with one IPU\n opts = create_ipu_config()\n opts = auto_select_ipus(opts, num_ipus=1)\n ipu.utils.configure_ipu_system(opts)\n with tf.Session() as s:\n ...\n\n .. code-block:: python\n\n # Create two devices, with 2 IPUs per device.\n opts = create_ipu_config()\n opts = auto_select_ipus(opts, num_ipus=[2,2])\n ipu.utils.configure_ipu_system(opts)\n with tf.Session() as s:\n ...\n\n .. code-block:: python\n\n # Create two devices, with 1 IPU in the first device and 2 IPUs\n # in the second device.\n opts = create_ipu_config()\n opts = auto_select_ipus(opts, num_ipus=[1,2])\n ipu.utils.configure_ipu_system(opts)\n with tf.Session() as s:\n ...\n\n Args:\n opts: An IpuOptions session control protobuf.\n num_ipus: List of IPUs per Tensorflow device\n\n Returns:\n The IpuOptions configuration protobuf, configured for auto-selecting a set\n of IPU devices.\n \"\"\"\n if opts.device_config:\n raise Exception(\"IPU devices have already been configured.\")\n\n if not isinstance(num_ipus, (int, list, tuple)):\n raise Exception(\"`num_ipus` must be an integer, list or tuple.\")\n\n if isinstance(num_ipus, int):\n dev = opts.device_config.add()\n dev.auto_count = num_ipus\n else:\n for n in num_ipus:\n dev = opts.device_config.add()\n dev.auto_count = n\n\n return opts\n\n\ndef select_ipus(opts, indices):\n \"\"\"Configure the IPUs to be used by the session.\n\n The configuration describes a system consisting of multiple Tensorflow\n devices, each with control of one of more IPUs. The Tensorflow devices will be\n labeled ``/device:IPU:0``, ``/device:IPU:1`` and so on.\n\n Each Tensorflow device uses a specific configuration consisting of one or more\n IPUs from the list of devices. These can be found by running the Graphcore\n utility ``gc-info -l``. For instance, the following listing shows the device\n configurations available on a system with 16 IPUs.\n\n .. code-block:: shell\n\n user@host:~$ gc-info -l\n Graphcore device listing:\n\n -+- Id: [0], type: [PCIe], PCI Domain: [0000:1a:00.0]\n -+- Id: [1], type: [PCIe], PCI Domain: [0000:1b:00.0]\n -+- Id: [2], type: [PCIe], PCI Domain: [0000:23:00.0]\n -+- Id: [3], type: [PCIe], PCI Domain: [0000:24:00.0]\n -+- Id: [4], type: [PCIe], PCI Domain: [0000:3d:00.0]\n -+- Id: [5], type: [PCIe], PCI Domain: [0000:3e:00.0]\n -+- Id: [6], type: [PCIe], PCI Domain: [0000:43:00.0]\n -+- Id: [7], type: [PCIe], PCI Domain: [0000:44:00.0]\n -+- Id: [8], type: [PCIe], PCI Domain: [0000:8b:00.0]\n -+- Id: [9], type: [PCIe], PCI Domain: [0000:8c:00.0]\n -+- Id: [10], type: [PCIe], PCI Domain: [0000:8e:00.0]\n -+- Id: [11], type: [PCIe], PCI Domain: [0000:8f:00.0]\n -+- Id: [12], type: [PCIe], PCI Domain: [0000:b8:00.0]\n -+- Id: [13], type: [PCIe], PCI Domain: [0000:b9:00.0]\n -+- Id: [14], type: [PCIe], PCI Domain: [0000:ba:00.0]\n -+- Id: [15], type: [PCIe], PCI Domain: [0000:bb:00.0]\n -+- Id: [16], type: [Multi IPU]\n |--- PCIe Id: [5], DNC Id: [0], PCI Domain: [0000:3e:00.0]\n |--- PCIe Id: [7], DNC Id: [1], PCI Domain: [0000:44:00.0]\n -+- Id: [17], type: [Multi IPU]\n |--- PCIe Id: [4], DNC Id: [0], PCI Domain: [0000:3d:00.0]\n |--- PCIe Id: [6], DNC Id: [1], PCI Domain: [0000:43:00.0]\n -+- Id: [18], type: [Multi IPU]\n |--- PCIe Id: [3], DNC Id: [0], PCI Domain: [0000:24:00.0]\n |--- PCIe Id: [1], DNC Id: [1], PCI Domain: [0000:1b:00.0]\n -+- Id: [19], type: [Multi IPU]\n |--- PCIe Id: [2], DNC Id: [0], PCI Domain: [0000:23:00.0]\n |--- PCIe Id: [0], DNC Id: [1], PCI Domain: [0000:1a:00.0]\n -+- Id: [20], type: [Multi IPU]\n |--- PCIe Id: [13], DNC Id: [0], PCI Domain: [0000:b9:00.0]\n |--- PCIe Id: [15], DNC Id: [1], PCI Domain: [0000:bb:00.0]\n -+- Id: [21], type: [Multi IPU]\n |--- PCIe Id: [12], DNC Id: [0], PCI Domain: [0000:b8:00.0]\n |--- PCIe Id: [14], DNC Id: [1], PCI Domain: [0000:ba:00.0]\n -+- Id: [22], type: [Multi IPU]\n |--- PCIe Id: [9], DNC Id: [0], PCI Domain: [0000:8c:00.0]\n |--- PCIe Id: [11], DNC Id: [1], PCI Domain: [0000:8f:00.0]\n -+- Id: [23], type: [Multi IPU]\n |--- PCIe Id: [10], DNC Id: [0], PCI Domain: [0000:8e:00.0]\n |--- PCIe Id: [8], DNC Id: [1], PCI Domain: [0000:8b:00.0]\n -+- Id: [24], type: [Multi IPU]\n |--- PCIe Id: [5], DNC Id: [0], PCI Domain: [0000:3e:00.0]\n |--- PCIe Id: [7], DNC Id: [1], PCI Domain: [0000:44:00.0]\n |--- PCIe Id: [4], DNC Id: [2], PCI Domain: [0000:3d:00.0]\n |--- PCIe Id: [6], DNC Id: [3], PCI Domain: [0000:43:00.0]\n -+- Id: [25], type: [Multi IPU]\n |--- PCIe Id: [3], DNC Id: [0], PCI Domain: [0000:24:00.0]\n |--- PCIe Id: [1], DNC Id: [1], PCI Domain: [0000:1b:00.0]\n |--- PCIe Id: [2], DNC Id: [2], PCI Domain: [0000:23:00.0]\n |--- PCIe Id: [0], DNC Id: [3], PCI Domain: [0000:1a:00.0]\n -+- Id: [26], type: [Multi IPU]\n |--- PCIe Id: [13], DNC Id: [0], PCI Domain: [0000:b9:00.0]\n |--- PCIe Id: [15], DNC Id: [1], PCI Domain: [0000:bb:00.0]\n |--- PCIe Id: [12], DNC Id: [2], PCI Domain: [0000:b8:00.0]\n |--- PCIe Id: [14], DNC Id: [3], PCI Domain: [0000:ba:00.0]\n -+- Id: [27], type: [Multi IPU]\n |--- PCIe Id: [9], DNC Id: [0], PCI Domain: [0000:8c:00.0]\n |--- PCIe Id: [11], DNC Id: [1], PCI Domain: [0000:8f:00.0]\n |--- PCIe Id: [10], DNC Id: [2], PCI Domain: [0000:8e:00.0]\n |--- PCIe Id: [8], DNC Id: [3], PCI Domain: [0000:8b:00.0]\n -+- Id: [28], type: [Multi IPU]\n |--- PCIe Id: [5], DNC Id: [0], PCI Domain: [0000:3e:00.0]\n |--- PCIe Id: [7], DNC Id: [1], PCI Domain: [0000:44:00.0]\n |--- PCIe Id: [4], DNC Id: [2], PCI Domain: [0000:3d:00.0]\n |--- PCIe Id: [6], DNC Id: [3], PCI Domain: [0000:43:00.0]\n |--- PCIe Id: [3], DNC Id: [4], PCI Domain: [0000:24:00.0]\n |--- PCIe Id: [1], DNC Id: [5], PCI Domain: [0000:1b:00.0]\n |--- PCIe Id: [2], DNC Id: [6], PCI Domain: [0000:23:00.0]\n |--- PCIe Id: [0], DNC Id: [7], PCI Domain: [0000:1a:00.0]\n -+- Id: [29], type: [Multi IPU]\n |--- PCIe Id: [13], DNC Id: [0], PCI Domain: [0000:b9:00.0]\n |--- PCIe Id: [15], DNC Id: [1], PCI Domain: [0000:bb:00.0]\n |--- PCIe Id: [12], DNC Id: [2], PCI Domain: [0000:b8:00.0]\n |--- PCIe Id: [14], DNC Id: [3], PCI Domain: [0000:ba:00.0]\n |--- PCIe Id: [9], DNC Id: [4], PCI Domain: [0000:8c:00.0]\n |--- PCIe Id: [11], DNC Id: [5], PCI Domain: [0000:8f:00.0]\n |--- PCIe Id: [10], DNC Id: [6], PCI Domain: [0000:8e:00.0]\n |--- PCIe Id: [8], DNC Id: [7], PCI Domain: [0000:8b:00.0]\n -+- Id: [30], type: [Multi IPU]\n |--- PCIe Id: [5], DNC Id: [0], PCI Domain: [0000:3e:00.0]\n |--- PCIe Id: [7], DNC Id: [1], PCI Domain: [0000:44:00.0]\n |--- PCIe Id: [4], DNC Id: [2], PCI Domain: [0000:3d:00.0]\n |--- PCIe Id: [6], DNC Id: [3], PCI Domain: [0000:43:00.0]\n |--- PCIe Id: [3], DNC Id: [4], PCI Domain: [0000:24:00.0]\n |--- PCIe Id: [1], DNC Id: [5], PCI Domain: [0000:1b:00.0]\n |--- PCIe Id: [2], DNC Id: [6], PCI Domain: [0000:23:00.0]\n |--- PCIe Id: [0], DNC Id: [7], PCI Domain: [0000:1a:00.0]\n |--- PCIe Id: [13], DNC Id: [8], PCI Domain: [0000:b9:00.0]\n |--- PCIe Id: [15], DNC Id: [9], PCI Domain: [0000:bb:00.0]\n |--- PCIe Id: [12], DNC Id: [10], PCI Domain: [0000:b8:00.0]\n |--- PCIe Id: [14], DNC Id: [11], PCI Domain: [0000:ba:00.0]\n |--- PCIe Id: [9], DNC Id: [12], PCI Domain: [0000:8c:00.0]\n |--- PCIe Id: [11], DNC Id: [13], PCI Domain: [0000:8f:00.0]\n |--- PCIe Id: [10], DNC Id: [14], PCI Domain: [0000:8e:00.0]\n |--- PCIe Id: [8], DNC Id: [15], PCI Domain: [0000:8b:00.0]\n\n Examples based on the listing above:\n\n .. code-block:: python\n\n # Create a single device with 1 IPU at PCI address 0000:1a:00.0 by using\n # IPU configuration index 0\n opts = create_ipu_config()\n opts = select_ipus(opts, indices=[0])\n ipu.utils.configure_ipu_system(opts)\n with tf.Session() as s:\n ...\n\n .. code-block:: python\n\n # Create a single device with 1 IPU at PCI address 0000:8b:00.0 by using\n # IPU configuration index 8\n opts = create_ipu_config()\n opts = select_ipus(opts, indices=[8])\n ipu.utils.configure_ipu_system(opts)\n with tf.Session() as s:\n ...\n\n .. code-block:: python\n\n # Create two TensorFlow devices, with one IPU each, being devices at\n # indices 0 and 1\n opts = create_ipu_config()\n opts = select_ipus(opts, indices=[0, 1])\n ipu.utils.configure_ipu_system(opts)\n with tf.Session() as s:\n ...\n\n .. code-block:: python\n\n # Create two TensorFlow devices, with four IPUs each. The device\n # configurations at indices 24 (0000:3e:00.0, 0000:44:00.0, 0000:3d:00.0,\n # 000:43:00.0) and 25 (0000:24:00.0, 0000:1b:00.0, 0000:23:00.0,\n # 00:1a:00.0)\n opts = create_ipu_config()\n opts = select_ipus(opts, indices=[24, 25])\n ipu.utils.configure_ipu_system(opts)\n with tf.Session() as s:\n ...\n\n .. code-block:: python\n\n # Create four TensorFlow devices each with one IPU, at addresses\n # 0000:1a:00.0, 0000:1b:00.0, 0000:23:00.0, 0000:24:00.0.\n opts = create_ipu_config()\n opts = select_ipus(opts, indices=[0, 1, 2, 3])\n ipu.utils.configure_ipu_system(opts)\n with tf.Session() as s:\n ...\n\n Args:\n opts: An IpuOptions session control protobuf.\n indices: List of IPU configuration indices.\n Returns:\n The IpuOptions configuration protobuf, with a number of devices selected by\n IPU configuration index.\n \"\"\"\n\n if opts.device_config:\n raise Exception(\"IPU devices have already been configured.\")\n\n if not isinstance(indices, (list, tuple)):\n raise Exception(\"`indices` must be a list or tuple.\")\n\n if len(set(indices)) != len(indices):\n raise Exception(\"All device indeicies in `indices` must be unique.\")\n\n for i in indices:\n dev = opts.device_config.add()\n dev.cfg_index = i\n\n return opts\n\n\ndef set_ipu_connection_type(opts, connection_type=None, ipu_version=None):\n \"\"\" Configure when to attach to the device.\n\n .. code-block:: python\n\n # Compile without attaching to the device.\n opts = create_ipu_config()\n opts = set_ipu_connection_type(opts,\n DeviceConnectionType.ON_DEMAND))\n ipu.utils.configure_ipu_system(opts)\n with tf.Session() as s:\n ...\n\n Args:\n opts: An IpuOptions session control protobuf.\n connection_type: One of `DeviceConnectionType`.\n Defaults to `DeviceConnectionType.ALWAYS` if None.\n\n ipu_version: Version of the IPU hardware used. Required if the\n `connection_type` provided is `DeviceConnectionType.NEVER`.\n Returns:\n The IpuOptions configuration protobuf.\n \"\"\"\n connection_type = connection_type if connection_type \\\n else DeviceConnectionType.ALWAYS\n\n if connection_type == DeviceConnectionType.NEVER and ipu_version is None:\n raise Exception(\"`ipu_version` must be set when `connection_type` is set \"\n \"to `DeviceConnectionType.NEVER`\")\n opts.device_connection_type = connection_type.value\n\n if ipu_version is not None:\n opts.ipu_version = ipu_version\n opts.has_ipu_version = True\n\n return opts\n\n\ndef reset_ipu_seed(seed, device=\"/device:IPU:0\", cpu_device=\"cpu\"):\n \"\"\"Reset the seed used to generate stateful random numbers and perform\n stochastic rounding.\n\n Args:\n seed: The new random number generator seed.\n device: The device to which the seed will be applied.\n cpu_device: The CPU device which is on the same hardware to the IPU device.\n\n Returns:\n None\n \"\"\"\n g = ops.Graph()\n with g.as_default():\n with ops.device(cpu_device):\n cfg_op = gen_ipu_ops.ipu_reset_seed(device, seed)\n\n with session_lib.Session(graph=g) as sess:\n sess.run(cfg_op)\n\n\ndef extract_all_strings_from_event_trace(events):\n \"\"\"Extract a concatenation of all data strings from an IPU event trace.\n\n Args:\n events: An array of IPU events as returned from the ``ipu_compile_summary``\n operation.\n\n Returns:\n A string containing the concatenation of all of the data fields of the\n events.\n\n \"\"\"\n result = \"\"\n for e in events:\n evt = IpuTraceEvent.FromString(e)\n\n result = result + (\"-\" * 70) + \"\\n=> @ \" + \\\n time.strftime('%F %T %z', time.localtime(evt.timestamp)) + \": \"\n\n if evt.type == IpuTraceEvent.COMPILE_BEGIN:\n evt_str = \"Compile begin: \" + \\\n evt.compile_begin.module_name.decode('utf-8') + \"\\n\"\n elif evt.type == IpuTraceEvent.COMPILE_END:\n evt_str = \"Compile end: \" + \\\n evt.compile_end.module_name.decode('utf-8') + \"\\n\" + \\\n \"Duration: \" + str(evt.compile_end.duration) + \" us\\n\" + \\\n evt.compile_end.compilation_report.decode('utf-8')\n elif evt.type == IpuTraceEvent.HOST_TO_DEVICE_TRANSFER:\n evt_str = \"Host->Device\\n\" + \\\n evt.data_transfer.data_transfer.decode('utf-8') + \"\\n\"\n elif evt.type == IpuTraceEvent.DEVICE_TO_HOST_TRANSFER:\n evt_str = \"Device->Host\\n\" + \\\n evt.data_transfer.data_transfer.decode('utf-8') + \"\\n\"\n elif evt.type == IpuTraceEvent.LOAD_ENGINE:\n evt_str = \"Load engine: \" + \\\n evt.load_engine.module_name.decode('utf-8') + \"\\n\"\n elif evt.type == IpuTraceEvent.EXECUTE:\n evt_str = \"Execute: \" + \\\n evt.execute.module_name.decode('utf-8') + \"\\n\" + \\\n evt.execute.execution_report.decode('utf-8')\n else:\n evt_str = \"Unknown event\"\n\n result = result + evt_str + '\\n'\n\n return result\n\n\ndef extract_all_types_from_event_trace(events):\n \"\"\"Return a list of the types of each event in an event trace tensor\n\n Args:\n events: A tensor containing a list of IPU events as protobuf strings\n\n Returns:\n A list containing the type of each event\n \"\"\"\n result = []\n for e in events:\n evt = IpuTraceEvent.FromString(e)\n result += [evt.type]\n return result\n\n\ndef extract_all_events(events):\n \"\"\"Extract a list containing each event as an event object\n\n Args:\n events: A tensor containing a list of IPU events as protobuf strings\n\n Returns:\n A list containing IpuTraceEvent objects\n \"\"\"\n result = []\n for e in events:\n evt = IpuTraceEvent.FromString(e)\n result += [evt]\n return result\n\n\ndef extract_compile_reports(events):\n \"\"\"Get a list of all compiler reports in the event list.\n\n Args:\n events: A list of trace event serialized protobufs.\n\n Returns:\n A list of tuples containing the module name and report.\"\"\"\n result = []\n for e in events:\n evt = IpuTraceEvent.FromString(e)\n if evt.type == IpuTraceEvent.COMPILE_END:\n try:\n module = evt.compile_end.module_name.decode('utf-8')\n rep = evt.compile_end.compilation_report.decode('utf-8')\n if rep:\n result += [(module, rep)]\n except UnicodeDecodeError:\n pass\n return result\n\n\ndef extract_poplar_serialized_graphs(events):\n \"\"\"Get a list of all poplar serialized graphs in the event list.\n\n Args:\n events: A list of trace event serialized protobufs.\n\n Returns:\n A list of tuples containing the module name and report.\"\"\"\n result = []\n for e in events:\n evt = IpuTraceEvent.FromString(e)\n if evt.type == IpuTraceEvent.COMPILE_END:\n try:\n rep = evt.compile_end.poplar_graph.decode('utf-8')\n except UnicodeDecodeError:\n rep = evt.compile_end.poplar_graph\n\n module = evt.compile_end.module_name.decode('utf-8')\n if rep:\n result += [(module, rep)]\n return result\n\n\ndef extract_execute_reports(events):\n \"\"\"Get a list of all compiler reports in the event list.\n\n Args:\n events: A list of trace event serialized protobufs.\n\n Returns:\n A list of tuples containing the module name and report.\"\"\"\n result = []\n for e in events:\n evt = IpuTraceEvent.FromString(e)\n if evt.type == IpuTraceEvent.EXECUTE:\n try:\n module = evt.execute.module_name.decode('utf-8')\n rep = evt.execute.execution_report.decode('utf-8')\n if rep:\n result += [(module, rep)]\n except UnicodeDecodeError:\n pass\n return result\n\n\ndef move_variable_initialization_to_cpu(graph=None):\n \"\"\"For all variables in the VARIABLES collection, move any initialization\n ops onto the CPU.\n\n Args:\n graph: Operations are moved around on this graph. The default graph will be\n used if not specified.\n\n Returns:\n None\n \"\"\"\n if not graph:\n graph = ops.get_default_graph()\n\n with ops.device(\"/device:CPU:0\"):\n control_flow_ops.no_op(name=\"cpu\")\n variables = []\n for v in graph.get_collection('variables'):\n # We assume a distribution strategy knows better how to\n # initialize its own variables, so skip those.\n if not isinstance(v, values.DistributedVariable):\n variables.append(v)\n\n def _uses_resource(op):\n \"\"\" Helper to determine if an op uses a resource \"\"\"\n return any(input_tensor.dtype == 'resource' for input_tensor in op.inputs)\n\n init_ops = []\n dep_ops = [v.initializer.inputs[1].op for v in variables]\n visited = set()\n\n # Depth-first search up the graph starting from all variables in VARIABLES\n # Place all touched ops on the CPU, but do not touch or search ops that use\n # resource tensors, otherwise device colocation could be violated.\n while dep_ops:\n op = dep_ops.pop()\n if op not in visited and not _uses_resource(op):\n visited.add(op)\n init_ops += [op]\n dep_ops += [x.op for x in op.inputs]\n\n # pylint: disable=protected-access\n for op in init_ops:\n op._set_device('/device:CPU:0')\n op._set_attr(\n '_class',\n attr_value_pb2.AttrValue(list=attr_value_pb2.AttrValue.ListValue(\n s=[b'loc:@cpu'])))\n op._set_attr('_XlaCompile', attr_value_pb2.AttrValue(b=False))\n op._set_attr('_XlaScope', attr_value_pb2.AttrValue(s=b''))\n # pylint: enable=protected-access\n\n return\n\n\ndef export_dataset_to_file(dataset_or_infeed,\n output_filename,\n num_elements,\n feed_name=\"\",\n apply_options=True):\n \"\"\"Export as binary `num_elements` from the given `infeed` to the specified\n `output_filename`.\n\n If the infeed elements are tuples then one file per tuple element will be\n created.\n For example, if `dataset` looks like\n\n .. code-block:: python\n\n [{ \"a\": A_0, \"b\": B_0}, { \"a\": A_1, \"b\": B_1}, ...]\n\n then `export_dataset_to_file(dataset, \"my_dataset.bin\", 100)` will generate:\n\n .. code-block:: python\n\n my_dataset.0.bin # Contains tensors [ A_0, A_1, ..., A_99]\n my_dataset.1.bin # Contains tensors [ B_0, B_1, ..., B_99]\n\n Args:\n dataset_or_infeed: An unary dataset with the same input and output\n structure or an `IPUInfeedQueue`.\n output_filename: Where to export the tensors to.\n num_elements: Number of elements to export from the dataset.\n feed_name: Specify the feed name.\n apply_options: Whether to apply optimization options which can improve the\n dataset performance.\n \"\"\"\n assert isinstance(dataset_or_infeed,\n (dataset_ops.Dataset, ipu_infeed_queue.IPUInfeedQueue))\n if isinstance(dataset_or_infeed, ipu_infeed_queue.IPUInfeedQueue):\n dataset = dataset_or_infeed._dataset # pylint: disable=protected-access\n feed_name = feed_name or dataset_or_infeed._id # pylint: disable=protected-access\n else:\n dataset = dataset_or_infeed\n if apply_options:\n dataset = dataset._apply_options() # pylint: disable=protected-access\n\n extractor = dataset_extractor.dataset_extractor(dataset, num_elements,\n output_filename, feed_name)\n with ops.device(\"cpu\"), session_lib.Session() as sess:\n sess.run(extractor)\n\n\ndef export_inputs_to_file(inputs, output_filename, feed_dict):\n \"\"\"Export as binary the list of `inputs` provided to the specified\n `output_filename`.\n\n Args:\n inputs: List of graph inputs to export.\n output_filename: Where to export the tensors to.\n feed_dict: Feed dictionary containing the inputs' values.\n \"\"\"\n\n with ops.device(\"cpu\"), session_lib.Session() as sess:\n sess.run(dataset_extractor.export_variables(inputs, output_filename),\n feed_dict)\n",
"# Copyright 2019 The TensorFlow Authors. All Rights Reserved.\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n# http://www.apache.org/licenses/LICENSE-2.0\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# =============================================================================\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport numpy as np\nimport test_utils as tu\n\nfrom tensorflow.compiler.tests import xla_test\nfrom tensorflow.python.platform import googletest\nfrom tensorflow.python.framework import ops\nfrom tensorflow.python.ops import array_ops\nfrom tensorflow.python.ops import math_ops\nfrom tensorflow.python.ops import variables\n\n\nclass BiasApplyGraphCachingTest(xla_test.XLATestCase):\n def testMatch(self):\n with self.session() as sess:\n with ops.device(\"/device:IPU:0\"):\n biases1 = array_ops.placeholder(np.float32, shape=[2])\n biases2 = array_ops.placeholder(np.float32, shape=[2])\n biases3 = array_ops.placeholder(np.float32, shape=[2])\n grads1 = array_ops.placeholder(np.float32, shape=[2, 10])\n grads2 = array_ops.placeholder(np.float32, shape=[2, 10])\n grads3 = array_ops.placeholder(np.float32, shape=[2, 10])\n vlr = array_ops.placeholder(np.float32, shape=[])\n\n def bias_apply(bias, grad, lr):\n return bias - math_ops.reduce_sum(grad, axis=1) * lr\n\n out = (bias_apply(biases1, grads1, vlr) +\n bias_apply(biases2, grads2, 0.1) +\n bias_apply(biases3, grads3, 0.2))\n\n report = tu.ReportJSON(self, sess)\n\n sess.run(variables.global_variables_initializer())\n\n report.reset()\n\n r = sess.run(\n out, {\n biases1: np.ones([2]),\n biases2: np.ones([2]),\n biases3: np.ones([2]),\n grads1: np.ones([2, 10]),\n grads2: np.ones([2, 10]),\n grads3: np.ones([2, 10]),\n vlr: 0.1\n })\n self.assertAllClose(r, [-1., -1.])\n report.parse_log()\n report.assert_compute_sets_matches(\"*ReduceOnTile*\", 1)\n\n def testMatchBecauseEvenWhenNotInplace(self):\n with self.session() as sess:\n with ops.device(\"/device:IPU:0\"):\n biases1 = array_ops.placeholder(np.float32, shape=[2])\n grads1 = array_ops.placeholder(np.float32, shape=[2, 10])\n grads2 = array_ops.placeholder(np.float32, shape=[2, 10])\n\n def bias_apply(bias, grad):\n return bias - math_ops.reduce_sum(grad, axis=1) * 0.1\n\n out = bias_apply(biases1, grads1) + bias_apply(biases1, grads2)\n\n report = tu.ReportJSON(self, sess)\n\n sess.run(variables.global_variables_initializer())\n\n report.reset()\n\n r = sess.run(\n out, {\n biases1: np.ones([2]),\n grads1: np.ones([2, 10]),\n grads2: np.ones([2, 10])\n })\n self.assertAllClose(r, [0., 0.])\n report.parse_log()\n report.assert_compute_sets_matches(\n \"*ReduceOnTile*\", 1,\n \"We should still reuse the code even though only one reduce is inplace\"\n )\n\n\nif __name__ == \"__main__\":\n googletest.main()\n",
"# Copyright 2020 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\nfrom tensorflow.python.distribute import distribute_lib\nfrom tensorflow.python.distribute import reduce_util\nfrom tensorflow.python.distribute.cluster_resolver import cluster_resolver as cluster_resolver_lib\nfrom tensorflow.python.ipu import ipu_multi_worker_strategy\nfrom tensorflow.python.ipu.horovod import Sum, Average, size, allreduce, broadcast\nfrom tensorflow.python.training import server_lib\n\n\ndef _to_horovod_op(reduce_op):\n if reduce_op == reduce_util.ReduceOp.SUM:\n return Sum\n if reduce_op == reduce_util.ReduceOp.MEAN:\n return Average\n\n raise ValueError(\"Unsupported reduce op: {}\".format(reduce_op))\n\n\nclass IPUHorovodStrategy(distribute_lib.StrategyV1):\n \"\"\"This is a distribution strategy using Horovod.\n\n Usage is very similar to the `IPUMultiWorkerStrategy`, with the\n following differences:\n\n * There is no `cluster_resolver` argument, as Horovod's built-in\n cluster discovery is used. Hence the `TF_CONFIG` environment\n variable containing the cluster configuration is not needed.\n * As Horovod sets up the necessary communication channels,\n starting a `tf.distribute.Server` is not needed either.\n * Launching the cluster should be done with the `mpirun` tool.\n\n **Example using a custom training loop with pipelining**\n\n .. code-block:: python\n\n strategy = IPUHorovodStrategy()\n\n with strategy.scope():\n\n infeed_queue = ipu_infeed_queue.IPUInfeedQueue(dataset, \"infeed\")\n outfeed_queue = ipu_outfeed_queue.IPUOutfeedQueue(\"outfeed\")\n\n def stage1(lr, images, labels):\n partial = keras.layers.Dense(256, activation=\"relu\")(images)\n partial = keras.layers.Dense(128, activation=\"relu\")(partial)\n return lr, partial, labels\n\n def stage2(lr, partial, labels):\n logits = keras.layers.Dense(10)(partial)\n per_example_loss = keras.losses.sparse_categorical_crossentropy(\n y_true=labels, y_pred=logits, from_logits=True)\n # In a custom training loop, the optimiser does an allreduce *sum*, not\n # average, of the gradients across the distributed workers. Therefore\n # we want to divide the loss here by the *global* batch size, which is\n # done by the `tf.nn.compute_average_loss()` function.\n loss = nn.compute_average_loss(per_example_loss)\n return lr, loss\n\n def optimizer_function(lr, loss):\n optimizer = GradientDescentOptimizer(lr)\n return pipelining_ops.OptimizerFunctionOutput(optimizer, loss)\n\n def model(lr):\n pipeline_op = pipelining_ops.pipeline(\n computational_stages=[stage1, stage2],\n pipeline_depth=pipeline_depth,\n inputs=[lr],\n infeed_queue=infeed_queue,\n outfeed_queue=outfeed_queue,\n optimizer_function=optimizer_function,\n name=\"Pipeline\")\n return pipeline_op\n\n def compiled_model(lr):\n with ipu_scope(\"/device:IPU:0\"):\n return ipu_compiler.compile(model, inputs=[lr])\n\n with ops.device(\"cpu\"):\n lr = array_ops.placeholder(np.float32, [])\n\n train_op = strategy.experimental_run_v2(compiled_model, args=[lr])\n\n _, per_worker_losses = outfeed_queue.dequeue()\n\n # Mean across the local `pipeline_depth` batches:\n per_worker_loss = math_ops.reduce_mean(per_worker_losses)\n\n # Global mean across the distributed workers (since it is already\n # divided by the global batch size above, we do a sum here):\n global_loss = strategy.reduce(ReduceOp.SUM, per_worker_loss)\n\n config = ipu_utils.create_ipu_config()\n config = ipu_utils.auto_select_ipus(config, num_ipus=2)\n ipu_utils.configure_ipu_system(config)\n ipu_utils.move_variable_initialization_to_cpu()\n\n with session.Session() as sess:\n sess.run(infeed_queue.initializer)\n sess.run(variables.global_variables_initializer())\n\n for _ in range(10):\n sess.run(train_op, {lr: 0.01})\n global_loss_val = sess.run(global_loss)\n \"\"\"\n def __init__(self, ipu_device=\"/device:IPU:0\", variables_on_host=False):\n # We create an empty cluster here since we will not be using gRPC for communication.\n # All the communication is delegated to Horovod (MPI) below.\n cluster_resolver = cluster_resolver_lib.SimpleClusterResolver(\n server_lib.ClusterSpec({}))\n\n super().__init__(\n IPUHorovodExtended(self, cluster_resolver, ipu_device,\n variables_on_host))\n\n\nclass IPUHorovodExtended(ipu_multi_worker_strategy.IPUMultiWorkerExtended):\n def __init__(self, container_strategy, cluster_resolver, ipu_device,\n variables_on_host):\n super().__init__(container_strategy, cluster_resolver, ipu_device,\n variables_on_host)\n self._num_workers = size()\n\n def _reduce_implementation(self, reduce_op, value, destinations):\n del destinations\n return allreduce(value, op=_to_horovod_op(reduce_op))\n\n def _batch_reduce_implementation(self, reduce_op, value_destination_pairs):\n op = _to_horovod_op(reduce_op)\n return [allreduce(v, op=op) for (v, _) in value_destination_pairs]\n\n def _broadcast_implementation(self, initial_value, device):\n del device\n return broadcast(initial_value, root_rank=0)\n",
"# Copyright 2018 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport os\nimport numpy as np\nfrom test_utils import ReportJSON\n\nfrom tensorflow.compiler.tests import xla_test\nfrom tensorflow.python.platform import googletest\nfrom tensorflow.python.framework import ops\nfrom tensorflow.python.ops import array_ops\nfrom tensorflow.python.ops import gen_array_ops\nfrom tensorflow.python.ops import math_ops\n\n\nclass IpuFuseOpsTest(xla_test.XLATestCase):\n def testReductionSumVectorF16NoConverts(self):\n with self.session() as sess:\n with ops.device(\"/device:IPU:0\"):\n pa = array_ops.placeholder(np.float16, [4096], name=\"a\")\n output = math_ops.reduce_sum(pa, axis=[0])\n\n report = ReportJSON(self, sess)\n report.reset()\n\n fd = {pa: np.ones([4096])}\n result = sess.run(output, fd)\n self.assertAllClose(result, 4096)\n\n report.parse_log()\n\n # Check that there are no casts to float at the beginning.\n ok = [\n '__seed*', 'host-exchange-local-copy-',\n 'Sum/reduce*/ReduceOnTile/InToIntermediateNoExchange/Reduce',\n 'Sum/reduce*/ReduceFinalStage/IntermediateToOutput/Reduce'\n ]\n\n report.assert_all_compute_sets_and_list(ok)\n\n def testNoCastsF32ToF16ToF32(self):\n with self.session() as sess:\n with ops.device(\"/device:IPU:0\"):\n pa = array_ops.placeholder(np.float32, [3])\n b = math_ops.cast(pa, np.float16)\n c = math_ops.cast(b, np.float32)\n\n report = ReportJSON(self, sess)\n report.reset()\n\n fd = {pa: [2.0, 0.5, 1.0]}\n result = sess.run(c, fd)\n self.assertAllClose(result, [2.0, 0.5, 1.0])\n\n report.parse_log(assert_len=0)\n report.assert_no_compute_set()\n\n def testNoCastsF16ReduceWithReshape(self):\n with self.session() as sess:\n with ops.device(\"/device:IPU:0\"):\n pa = array_ops.placeholder(np.float16, [3, 4])\n a = gen_array_ops.reshape(pa, [4, 3])\n a = math_ops.reduce_sum(a, axis=(1))\n\n report = ReportJSON(self, sess)\n report.reset()\n\n fd = {pa: np.ones([3, 4])}\n result = sess.run(a, fd)\n self.assertAllClose(result, [3.0, 3.0, 3.0, 3.0])\n\n report.parse_log()\n\n ok = [\n '__seed*',\n 'Sum/reduce*/Reduce',\n ]\n report.assert_all_compute_sets_and_list(ok)\n\n def testMultipleReduces(self):\n with self.session() as sess:\n with ops.device(\"/device:IPU:0\"):\n pa = array_ops.placeholder(np.float16, [3])\n pb = array_ops.placeholder(np.float16, [3])\n a = math_ops.cast(pa, np.float32)\n a = math_ops.reduce_sum(a)\n a = math_ops.cast(a, np.float16)\n b = math_ops.cast(pb, np.float32)\n b = math_ops.reduce_sum(b)\n b = math_ops.cast(b, np.float16)\n c = a + b\n\n report = ReportJSON(self, sess)\n report.reset()\n\n fd = {pa: [2.0, 0.5, 1.0], pb: [1.0, 1.0, 2.0]}\n result = sess.run(c, fd)\n self.assertAllClose(result, 7.5)\n\n report.parse_log()\n\n ok = [\n '__seed*', 'host-exchange-local-copy-', 'Sum/reduce*/Reduce',\n 'Sum_1/reduce*/Reduce', 'add/add*/Add'\n ]\n report.assert_all_compute_sets_and_list(ok)\n\n def testNoCastsF16ToF32ToF16(self):\n with self.session() as sess:\n with ops.device(\"/device:IPU:0\"):\n pa = array_ops.placeholder(np.float16, [3])\n b = math_ops.cast(pa, np.float32)\n c = math_ops.cast(b, np.float16)\n\n report = ReportJSON(self, sess)\n report.reset()\n\n fd = {pa: [2.0, 0.5, 1.0]}\n result = sess.run(c, fd)\n self.assertAllClose(result, [2.0, 0.5, 1.0])\n\n report.parse_log(assert_len=0)\n report.assert_no_compute_set()\n\n def testDontRemoveCastsIfUsed(self):\n with self.session() as sess:\n with ops.device(\"/device:IPU:0\"):\n pa = array_ops.placeholder(np.float16, [3])\n b = math_ops.cast(pa, np.float32)\n const = array_ops.constant(1.0, np.float32)\n b = b + const\n c = math_ops.cast(b, np.float16)\n\n report = ReportJSON(self, sess)\n report.reset()\n\n fd = {pa: [2.0, 0.5, 1.0]}\n result = sess.run(c, fd)\n self.assertAllClose(result, [3.0, 1.5, 2.0])\n\n report.parse_log(assert_len=4)\n\n ok = [\n '__seed*', 'host-exchange-local-copy-', 'Cast/convert.*/Cast',\n 'add/fusion*/Add', 'Cast_1/convert.*/Cast'\n ]\n report.assert_all_compute_sets_and_list(ok)\n\n def testReduceMean(self):\n with self.session() as sess:\n shape = [2, 10000]\n with ops.device(\"/device:IPU:0\"):\n pa = array_ops.placeholder(np.float16, shape)\n output = math_ops.reduce_mean(pa, axis=[1])\n\n report = ReportJSON(self, sess)\n report.reset()\n\n val = np.finfo(np.float16).max / 2\n result = sess.run(output, {pa: np.full(shape, val)})\n self.assertAllClose(result, [val, val])\n\n report.parse_log(assert_len=4)\n\n ok = [\n '__seed*', 'host-exchange-local-copy-', 'Mean/fusion/Reduce',\n 'Mean/fusion*/Op/Multiply', 'Mean/convert*/Cast'\n ]\n report.assert_all_compute_sets_and_list(ok)\n\n def testReduceMax(self):\n with self.session() as sess:\n shape = [2, 10000]\n with ops.device(\"/device:IPU:0\"):\n pa = array_ops.placeholder(np.float16, shape)\n a = math_ops.cast(pa, np.float32)\n output = math_ops.reduce_max(a, axis=[1])\n\n report = ReportJSON(self, sess)\n report.reset()\n\n val = np.finfo(np.float16).max / 2\n result = sess.run(output, {pa: np.full(shape, val)})\n self.assertAllClose(result, [val, val])\n\n report.parse_log(assert_len=4)\n\n ok = [\n '__seed*', 'host-exchange-local-copy-', 'Max/reduce*/Reduce',\n 'Cast/convert*/Cast'\n ]\n report.assert_all_compute_sets_and_list(ok)\n\n\nif __name__ == \"__main__\":\n os.environ['TF_XLA_FLAGS'] = ('--tf_xla_min_cluster_size=2 ' +\n os.environ.get('TF_XLA_FLAGS', ''))\n googletest.main()\n"
] | [
[
"tensorflow.python.ipu.dataset_extractor.dataset_extractor",
"tensorflow.compat.v1.executing_eagerly",
"tensorflow.compiler.plugin.poplar.driver.config_pb2.IpuSelectionOrder.Value",
"tensorflow.python.framework.ops.device",
"tensorflow.python.ops.control_flow_ops.no_op",
"tensorflow.compiler.plugin.poplar.driver.config_pb2.IpuExecutionProfileType.Value",
"tensorflow.compiler.plugin.poplar.ops.gen_ipu_ops.ipu_reset_seed",
"tensorflow.compiler.plugin.poplar.driver.config_pb2.IpuOptions",
"tensorflow.python.util.deprecation.deprecated_args",
"tensorflow.python.client.session.Session",
"tensorflow.core.framework.attr_value_pb2.AttrValue",
"tensorflow.python.ipu.dataset_extractor.export_variables",
"tensorflow.python.framework.ops.Graph",
"tensorflow.compiler.plugin.poplar.ops.gen_ipu_ops.ipu_get_num_devices",
"tensorflow.python.framework.ops.get_default_graph",
"tensorflow.compiler.plugin.poplar.driver.config_pb2.IpuDeviceConnectionType.Value",
"tensorflow.compiler.plugin.poplar.ops.gen_ipu_ops.ipu_get_configuration",
"tensorflow.core.framework.attr_value_pb2.AttrValue.ListValue",
"tensorflow.compiler.plugin.poplar.driver.trace_pb2.IpuTraceEvent.FromString"
],
[
"numpy.ones",
"tensorflow.python.ops.array_ops.placeholder",
"tensorflow.python.platform.googletest.main",
"tensorflow.python.framework.ops.device",
"tensorflow.python.ops.variables.global_variables_initializer",
"tensorflow.python.ops.math_ops.reduce_sum"
],
[
"tensorflow.python.ipu.horovod.allreduce",
"tensorflow.python.ipu.horovod.broadcast",
"tensorflow.python.training.server_lib.ClusterSpec",
"tensorflow.python.ipu.horovod.size"
],
[
"tensorflow.python.ops.array_ops.constant",
"tensorflow.python.ops.math_ops.reduce_max",
"numpy.ones",
"tensorflow.python.ops.array_ops.placeholder",
"tensorflow.python.ops.math_ops.reduce_mean",
"numpy.finfo",
"numpy.full",
"tensorflow.python.platform.googletest.main",
"tensorflow.python.framework.ops.device",
"tensorflow.python.ops.gen_array_ops.reshape",
"tensorflow.python.ops.math_ops.reduce_sum",
"tensorflow.python.ops.math_ops.cast"
]
] |
Lakonik/EPro-PnP | [
"931df847190ce10eddd1dc3e3168ce1a2f295ffa",
"931df847190ce10eddd1dc3e3168ce1a2f295ffa",
"931df847190ce10eddd1dc3e3168ce1a2f295ffa",
"931df847190ce10eddd1dc3e3168ce1a2f295ffa"
] | [
"EPro-PnP-Det/epropnp_det/core/bbox_3d/misc.py",
"EPro-PnP-Det/epropnp_det/ops/pnp/cost_fun.py",
"EPro-PnP-6DoF/lib/models/resnet_rot_head.py",
"EPro-PnP-Det/epropnp_det/ops/deformable_attention_sampler.py"
] | [
"\"\"\"\nCopyright (C) 2010-2022 Alibaba Group Holding Limited.\nThis file is modified from\nhttps://github.com/tjiiv-cprg/MonoRUn\n\"\"\"\n\nimport math\nimport numpy as np\nimport torch\nfrom pytorch3d.structures.meshes import Meshes\n\nfrom epropnp_det.ops.iou3d.iou3d_utils import nms_gpu\n\n\ndef gen_unit_noc(num_pts, device=None):\n indices = torch.arange(0, num_pts, dtype=torch.float32, device=device) + 0.5\n phi = torch.arccos(1 - 2 * indices / num_pts)\n theta = math.pi * (1 + 5**0.5) * indices\n xyz = torch.stack(\n (torch.cos(theta) * torch.sin(phi),\n torch.sin(theta) * torch.sin(phi),\n torch.cos(phi)), dim=-1)\n return xyz\n\n\ndef project_to_image_r_mat(\n x3d, r_mat, t_vec, cam_intrinsic, img_shapes, z_min=0.5, allowed_border=200,\n return_z=False, return_clip_mask=False):\n \"\"\"\n Args:\n x3d (torch.Tensor): shape (*, num_points, 3)\n r_mat (torch.Tensor): shape (*, 3, 3)\n t_vec (torch.Tensor): shape (*, 3) in format [x, y, z]\n cam_intrinsic (torch.Tensor): shape (*, 3, 3)\n img_shapes (torch.Tensor): shape (*, 2)\n\n Returns:\n Tensor: x2d_proj, shape (*, num_points, 2)\n \"\"\"\n proj_r_mats = cam_intrinsic @ r_mat # (*, 3, 3)\n proj_t_vecs = cam_intrinsic @ t_vec.unsqueeze(-1) # (*, 3, 1)\n # (*, num_points, 3) = ((*, 3, 3) @ (*, 3, num_points) + (*, 3, 1)).T\n xyz_proj = (proj_r_mats @ x3d.transpose(-1, -2) + proj_t_vecs).transpose(-1, -2)\n z_proj = xyz_proj[..., 2:] # (*, num_points, 1)\n if return_clip_mask:\n z_clip_mask = z_proj < z_min\n z_proj = z_proj.clamp(min=z_min)\n x2d_proj = xyz_proj[..., :2] / z_proj # (*, num_points, 2)\n # clip to border\n x2d_min = -allowed_border - 0.5 # Number\n x2d_max = img_shapes[..., None, [1, 0]] + (allowed_border - 0.5) # (*, 1, 2)\n if return_clip_mask:\n x2d_clip_mask = (x2d_proj < x2d_min) | (x2d_proj > x2d_max)\n clip_mask = z_clip_mask.squeeze(-1) | x2d_clip_mask.any(-1) # (*, num_points)\n x2d_proj = torch.min(x2d_proj.clamp(min=x2d_min), x2d_max)\n if not return_z:\n if not return_clip_mask:\n return x2d_proj\n else:\n return x2d_proj, clip_mask\n else:\n if not return_clip_mask:\n return x2d_proj, z_proj\n else:\n return x2d_proj, z_proj, clip_mask\n\n\ndef project_to_image(\n x3d, pose, cam_intrinsic, img_shapes, z_min=0.5, allowed_border=200,\n return_z=False, return_clip_mask=False):\n \"\"\"\n Args:\n x3d (torch.Tensor): shape (*, num_points, 3)\n pose (torch.Tensor): shape (*, 4) in format [x, y, z, yaw]\n cam_intrinsic (torch.Tensor): shape (*, 3, 3)\n img_shapes (torch.Tensor): shape (*, 2)\n\n Returns:\n Tensor: x2d_proj, shape (*, num_points, 2)\n \"\"\"\n r_mat = yaw_to_rot_mat(pose[..., 3])\n t_vec = pose[..., :3]\n return project_to_image_r_mat(x3d, r_mat, t_vec, cam_intrinsic, img_shapes, z_min,\n allowed_border, return_z, return_clip_mask)\n\n\ndef yaw_to_rot_mat(yaw):\n \"\"\"\n Args:\n yaw: (*)\n\n Returns:\n rot_mats: (*, 3, 3)\n \"\"\"\n if isinstance(yaw, torch.Tensor):\n pkg = torch\n device_kwarg = dict(device=yaw.device)\n else:\n pkg = np\n device_kwarg = dict()\n sin_yaw = pkg.sin(yaw)\n cos_yaw = pkg.cos(yaw)\n # [[ cos_yaw, 0, sin_yaw],\n # [ 0, 1, 0],\n # [-sin_yaw, 0, cos_yaw]]\n rot_mats = pkg.zeros(yaw.shape + (3, 3), dtype=pkg.float32, **device_kwarg)\n rot_mats[..., 0, 0] = cos_yaw\n rot_mats[..., 2, 2] = cos_yaw\n rot_mats[..., 0, 2] = sin_yaw\n rot_mats[..., 2, 0] = -sin_yaw\n rot_mats[..., 1, 1] = 1\n return rot_mats\n\n\ndef rot_mat_to_yaw(rot_mat):\n \"\"\"\n Args:\n rot_mat: (*, 3, 3)\n\n Returns:\n yaw: (*)\n \"\"\"\n if isinstance(rot_mat, torch.Tensor):\n atan2 = torch.atan2\n else:\n atan2 = np.arctan2\n yaw = atan2(rot_mat[..., 0, 2] - rot_mat[..., 2, 0], rot_mat[..., 0, 0] + rot_mat[..., 2, 2])\n return yaw\n\n\ndef box_mesh():\n return Meshes(\n verts=[torch.tensor([[-1, -1, 1],\n [ 1, -1, 1],\n [-1, 1, 1],\n [ 1, 1, 1],\n [-1, -1, -1],\n [ 1, -1, -1],\n [-1, 1, -1],\n [ 1, 1, -1]], dtype=torch.float32)],\n faces=[torch.tensor([[0, 1, 2],\n [1, 3, 2],\n [2, 3, 7],\n [2, 7, 6],\n [1, 7, 3],\n [1, 5, 7],\n [6, 7, 4],\n [7, 5, 4],\n [0, 4, 1],\n [1, 4, 5],\n [2, 6, 4],\n [0, 2, 4]], dtype=torch.int)])\n\n\ndef compute_box_3d(bbox_3d):\n \"\"\"\n Args:\n bbox_3d: (*, 7)\n\n Returns:\n corners: (*, 8, 3)\n edge_corner_idx: (12, 2)\n \"\"\"\n bs = bbox_3d.shape[:-1]\n rotation_matrix = yaw_to_rot_mat(bbox_3d[..., 6]) # (*bs, 3, 3)\n edge_corner_idx = np.array([[0, 1],\n [1, 2],\n [2, 3],\n [3, 0],\n [4, 5],\n [5, 6],\n [6, 7],\n [7, 4],\n [0, 4],\n [1, 5],\n [2, 6],\n [3, 7]])\n corners = np.array([[ 0.5, 0.5, 0.5],\n [ 0.5, 0.5, -0.5],\n [-0.5, 0.5, -0.5],\n [-0.5, 0.5, 0.5],\n [ 0.5, -0.5, 0.5],\n [ 0.5, -0.5, -0.5],\n [-0.5, -0.5, -0.5],\n [-0.5, -0.5, 0.5]], dtype=np.float32)\n if isinstance(bbox_3d, torch.Tensor):\n edge_corner_idx = torch.from_numpy(edge_corner_idx).to(device=bbox_3d.device)\n corners = torch.from_numpy(corners).to(device=bbox_3d.device)\n corners = corners * bbox_3d[..., None, :3] # (*bs, 8, 3)\n corners = (rotation_matrix[..., None, :, :] @ corners[..., None]).reshape(*bs, 8, 3) \\\n + bbox_3d[..., None, 3:6]\n return corners, edge_corner_idx\n\n\ndef edge_intersection(corners, edge_corner_idx, clip_axis, clip_val, op, edge_valid_mask=None):\n \"\"\"\n Args:\n corners: (bs, 8, 3/2)\n edge_corner_idx: (12, 2)\n clip_val: (bs, )\n edge_valid_mask: (bs, 12)\n \"\"\"\n if op == 'greater':\n op = torch.greater\n elif op == 'less':\n op = torch.less\n if edge_valid_mask is None:\n edge_valid_mask = corners.new_ones(\n (corners.size(0), edge_corner_idx.size(0)), dtype=torch.bool)\n corners_inside = op(corners[..., clip_axis], clip_val[:, None]) # (bs, 8)\n # compute z intersection\n edges_0_inside = corners_inside[:, edge_corner_idx[:, 0]] # (bs, 12)\n edges_1_inside = corners_inside[:, edge_corner_idx[:, 1]] # (bs, 12)\n edges_clipped = (edges_0_inside ^ edges_1_inside) & edge_valid_mask # (bs, 12)\n edges_clipped_idx = edges_clipped.nonzero() # (num_nonzero, 2) in [bs_ind, edge_ind]\n if edges_clipped_idx.shape[0] > 0:\n edge_corner_idx_to_clip = edge_corner_idx[edges_clipped_idx[:, 1], :] # (num_nonzero, 2)\n edges_0 = corners[edges_clipped_idx[:, 0], edge_corner_idx_to_clip[:, 0], :] # (num_nonzero, 3)\n edges_1 = corners[edges_clipped_idx[:, 0], edge_corner_idx_to_clip[:, 1], :] # (num_nonzero, 3)\n axval0 = edges_0[:, clip_axis] # (num_nonzero, )\n axval1 = edges_1[:, clip_axis]\n clip_val_ = clip_val[edges_clipped_idx[:, 0]]\n weight_0 = axval1 - clip_val_ # (num_nonzero, )\n weight_1 = clip_val_ - axval0\n intersection = (edges_0 * weight_0[:, None] + edges_1 * weight_1[:, None]\n ) * (1 / (axval1 - axval0)).clamp(min=-1e6, max=1e6)[:, None] # (num_nonzero, 3)\n clip_idx = torch.where(op(axval0, clip_val_),\n edge_corner_idx_to_clip[:, 1],\n edge_corner_idx_to_clip[:, 0]) # (num_nonzero, )\n corners[edges_clipped_idx[:, 0], clip_idx, :] = intersection # replace clipped corners with intersection\n corners_inside[edges_clipped_idx[:, 0], clip_idx] = True\n edge_valid_mask &= corners_inside[:, edge_corner_idx[:, 0]] & corners_inside[:, edge_corner_idx[:, 1]]\n else:\n edge_valid_mask &= edges_0_inside & edges_1_inside\n return corners, corners_inside, edge_valid_mask\n\n\ndef bboxes_3d_to_2d(bbox_3d, cam_intrinsic, imsize, z_clip=0.1, min_size=4.0, clip=False):\n \"\"\"\n Args:\n bbox_3d: (bs, 7)\n cam_intrinsic: (bs, 3, 3)\n imsize: (bs, 2) in [h, w]\n \"\"\"\n assert bbox_3d.dim() == 2\n bs = bbox_3d.size(0)\n if bs > 0:\n # (bs, 8, 3), (12, 2)\n corners, edge_corner_idx = compute_box_3d(bbox_3d)\n corners, in_front, edge_valid_mask = edge_intersection(\n corners, edge_corner_idx, 2, corners.new_tensor([z_clip]).expand(bs), 'greater')\n pts_2d = corners @ cam_intrinsic.transpose(-1, -2)\n pts_2d = pts_2d[..., :2] / pts_2d[..., 2:].clamp(min=z_clip) + 0.5 # (bs, 8, 2)\n in_canvas = in_front\n if clip:\n pts_2d, in_canvas_x0, edge_valid_mask = edge_intersection(\n pts_2d, edge_corner_idx, 0, corners.new_tensor([0]).expand(bs), 'greater', edge_valid_mask)\n pts_2d, in_canvas_y0, edge_valid_mask = edge_intersection(\n pts_2d, edge_corner_idx, 1, corners.new_tensor([0]).expand(bs), 'greater', edge_valid_mask)\n pts_2d, in_canvas_x1, edge_valid_mask = edge_intersection(\n pts_2d, edge_corner_idx, 0, imsize[:, 1], 'less', edge_valid_mask)\n pts_2d, in_canvas_y1, edge_valid_mask = edge_intersection(\n pts_2d, edge_corner_idx, 1, imsize[:, 0], 'less', edge_valid_mask)\n in_canvas = in_canvas & in_canvas_x0 & in_canvas_x1 & in_canvas_y0 & in_canvas_y1 # (bs, 8)\n not_in_canvas = ~in_canvas\n pts_2d[not_in_canvas] = imsize[:, None, [1, 0]].expand(-1, 8, -1)[not_in_canvas]\n x0y0 = pts_2d.min(dim=1)[0].clamp(min=0) # (bs, 2)\n pts_2d[not_in_canvas] = 0\n x1y1 = torch.minimum(pts_2d.max(dim=1)[0], imsize[:, [1, 0]])\n bbox = torch.cat((x0y0, x1y1), dim=1) # (bs, 4)\n bbox_valid_mask = (x1y1 - x0y0).min(dim=1)[0] >= min_size # (bs, )\n else:\n bbox = bbox_3d.new_empty((0, 4))\n bbox_valid_mask = bbox_3d.new_empty((0, ), dtype=torch.bool)\n return bbox, bbox_valid_mask\n\n\ndef xywhr2xyxyr(boxes_xywhr):\n \"\"\"Convert a rotated boxes in XYWHR format to XYXYR format.\n\n Args:\n boxes_xywhr (torch.Tensor): Rotated boxes in XYWHR format.\n\n Returns:\n torch.Tensor: Converted boxes in XYXYR format.\n \"\"\"\n boxes = torch.zeros_like(boxes_xywhr)\n half_w = boxes_xywhr[:, 2] / 2 # l in bbox_3d\n half_h = boxes_xywhr[:, 3] / 2 # w in bbox_3d\n # x in cam coord\n boxes[:, 0] = boxes_xywhr[:, 0] - half_w\n # z in cam coord, mirrored_direction\n boxes[:, 1] = boxes_xywhr[:, 1] - half_h\n boxes[:, 2] = boxes_xywhr[:, 0] + half_w\n boxes[:, 3] = boxes_xywhr[:, 1] + half_h\n boxes[:, 4] = boxes_xywhr[:, 4]\n return boxes\n\n\ndef batched_bev_nms(bbox_3d, batch_inds, nms_thr=0.25):\n \"\"\"\n Args:\n bbox_3d (Tensor): tensor shape (N, 8+),\n in format [l, h, w, x, y, z, ry, score, ind, *]\n batch_inds (Tensor): tensor shape (N, )\n nms_thr (float)\n\n Returns:\n Tuple:\n bbox_3d_out (Tensor)\n keep_inds (Tensor)\n \"\"\"\n n = bbox_3d.size(0)\n if n > 1:\n boxes_for_nms = xywhr2xyxyr(\n bbox_3d[:, [3, 5, 0, 2, 6]])\n offset_unit = (boxes_for_nms[:, :4].max() - boxes_for_nms[:, :4].min()) * 2\n boxes_for_nms[:, :4] = boxes_for_nms[:, :4] + (offset_unit * batch_inds)[:, None]\n keep_inds = nms_gpu(\n boxes_for_nms, bbox_3d[:, 7], nms_thr)\n else:\n keep_inds = bbox_3d.new_zeros(0, dtype=torch.int64)\n bbox_3d_out = bbox_3d[keep_inds]\n return bbox_3d_out, keep_inds\n",
"\"\"\"\nCopyright (C) 2010-2022 Alibaba Group Holding Limited.\n\"\"\"\n\nimport torch\n\nfrom .builder import COSTFUN\n\n\ndef huber_kernel(s_sqrt, delta):\n half_rho = torch.where(s_sqrt <= delta,\n 0.5 * torch.square(s_sqrt),\n delta * s_sqrt - 0.5 * torch.square(delta))\n return half_rho\n\n\ndef huber_d_kernel(s_sqrt, delta, eps: float = 1e-10):\n if s_sqrt.requires_grad or delta.requires_grad:\n rho_d_sqrt = (delta.clamp(min=eps).sqrt() * s_sqrt.clamp(min=eps).rsqrt()).clamp(max=1.0)\n else:\n rho_d_sqrt = (delta / s_sqrt.clamp_(min=eps)).clamp_(max=1.0).sqrt_()\n return rho_d_sqrt\n\n\[email protected]_module()\nclass HuberPnPCost(object):\n\n def __init__(self, delta=1.0, eps=1e-10):\n super(HuberPnPCost, self).__init__()\n self.eps = eps\n self.delta = delta\n\n def set_param(self, *args, **kwargs):\n pass\n\n def compute(self, x2d_proj, x2d, w2d, jac_cam=None,\n out_residual=False, out_cost=False, out_jacobian=False):\n \"\"\"\n Args:\n x2d_proj: Shape (*, n, 2)\n x2d: Shape (*, n, 2)\n w2d: Shape (*, n, 2)\n jac_cam: Shape (*, n, 2, 4 or 6), Jacobian of x2d_proj w.r.t. pose\n out_residual (Tensor | bool): Shape (*, n*2) or equivalent shape\n out_cost (Tensor | bool): Shape (*, )\n out_jacobian (Tensor | bool): Shape (*, n*2, 4 or 6) or equivalent shape\n \"\"\"\n bs = x2d_proj.shape[:-2]\n pn = x2d_proj.size(-2)\n delta = self.delta\n if not isinstance(delta, torch.Tensor):\n delta = x2d.new_tensor(delta)\n delta = delta[..., None]\n\n residual = (x2d_proj - x2d) * w2d\n s_sqrt = residual.norm(dim=-1)\n\n if out_cost is not False:\n half_rho = huber_kernel(s_sqrt, delta)\n if not isinstance(out_cost, torch.Tensor):\n out_cost = None\n cost = torch.sum(half_rho, dim=-1, out=out_cost)\n else:\n cost = None\n\n # robust rescaling\n if out_residual is not False or out_jacobian is not False:\n rho_d_sqrt = huber_d_kernel(s_sqrt, delta, eps=self.eps)\n if out_residual is not False:\n if isinstance(out_residual, torch.Tensor):\n out_residual = out_residual.view(*bs, pn, 2)\n else:\n out_residual = None\n residual = torch.mul(\n residual, rho_d_sqrt[..., None],\n out=out_residual).view(*bs, pn * 2)\n if out_jacobian is not False:\n assert jac_cam is not None\n dof = jac_cam.size(-1)\n if isinstance(out_jacobian, torch.Tensor):\n out_jacobian = out_jacobian.view(*bs, pn, 2, dof)\n else:\n out_jacobian = None\n # rescaled jacobian\n jacobian = torch.mul(\n jac_cam, (w2d * rho_d_sqrt[..., None])[..., None],\n out=out_jacobian).view(*bs, pn * 2, dof)\n if out_residual is False:\n residual = None\n if out_jacobian is False:\n jacobian = None\n return residual, cost, jacobian\n\n def reshape_(self, *batch_shape):\n if isinstance(self.delta, torch.Tensor):\n self.delta = self.delta.reshape(*batch_shape)\n return self\n\n def expand_(self, *batch_shape):\n if isinstance(self.delta, torch.Tensor):\n self.delta = self.delta.expand(*batch_shape)\n return self\n\n def repeat_(self, *batch_repeat):\n if isinstance(self.delta, torch.Tensor):\n self.delta = self.delta.repeat(*batch_repeat)\n return self\n\n def shallow_copy(self):\n return HuberPnPCost(\n delta=self.delta,\n eps=self.eps)\n\n\[email protected]_module()\nclass AdaptiveHuberPnPCost(HuberPnPCost):\n\n def __init__(self,\n delta=None,\n relative_delta=0.5,\n eps=1e-10):\n super(HuberPnPCost, self).__init__()\n self.delta = delta\n self.relative_delta = relative_delta\n self.eps = eps\n\n def set_param(self, x2d, w2d):\n # compute dynamic delta\n x2d_std = torch.var(x2d, dim=-2).sum(dim=-1).sqrt() # (num_obj, )\n self.delta = w2d.mean(dim=(-2, -1)) * x2d_std * self.relative_delta # (num_obj, )\n\n def shallow_copy(self):\n return AdaptiveHuberPnPCost(\n delta=self.delta,\n relative_delta=self.relative_delta,\n eps=self.eps)\n",
"\"\"\"\nCopyright (C) 2010-2021 Alibaba Group Holding Limited.\nThis file is modified from\nhttps://github.com/LZGMatrix/CDPN_ICCV2019_ZhigangLi\n\"\"\"\n\nimport torch.nn as nn\nimport torch\n\n\nclass RotHeadNet(nn.Module):\n def __init__(self, in_channels, num_layers=3, num_filters=256, kernel_size=3, output_kernel_size=1,\n output_dim=5, freeze=False):\n super(RotHeadNet, self).__init__()\n\n self.freeze = freeze\n\n assert kernel_size == 2 or kernel_size == 3 or kernel_size == 4, 'Only support kenerl 2, 3 and 4'\n padding = 1\n output_padding = 0\n if kernel_size == 3:\n output_padding = 1\n elif kernel_size == 2:\n padding = 0\n\n assert output_kernel_size == 1 or output_kernel_size == 3, 'Only support kenerl 1 and 3'\n if output_kernel_size == 1:\n pad = 0\n elif output_kernel_size == 3:\n pad = 1\n\n self.features = nn.ModuleList()\n for i in range(num_layers):\n _in_channels = in_channels if i == 0 else num_filters\n self.features.append(\n nn.ConvTranspose2d(_in_channels, num_filters, kernel_size=kernel_size, stride=2, padding=padding,\n output_padding=output_padding, bias=False))\n self.features.append(nn.BatchNorm2d(num_filters))\n self.features.append(nn.ReLU(inplace=True))\n\n self.features.append(\n nn.Conv2d(num_filters, num_filters, kernel_size=3, stride=1, padding=1, bias=False))\n self.features.append(nn.BatchNorm2d(num_filters))\n self.features.append(nn.ReLU(inplace=True))\n\n self.features.append(\n nn.Conv2d(num_filters, num_filters, kernel_size=3, stride=1, padding=1, bias=False))\n self.features.append(nn.BatchNorm2d(num_filters))\n self.features.append(nn.ReLU(inplace=True))\n\n self.out_layer = nn.Conv2d(num_filters, output_dim, kernel_size=output_kernel_size, padding=pad, bias=True)\n\n self.scale_branch = nn.Linear(256, 2)\n\n for m in self.modules():\n if isinstance(m, nn.Conv2d):\n nn.init.normal_(m.weight, mean=0, std=0.001)\n elif isinstance(m, nn.BatchNorm2d):\n nn.init.constant_(m.weight, 1)\n nn.init.constant_(m.bias, 0)\n elif isinstance(m, nn.ConvTranspose2d):\n nn.init.normal_(m.weight, mean=0, std=0.001)\n elif isinstance(m, nn.Linear):\n nn.init.normal_(m.weight, mean=0, std=0.001)\n\n def forward(self, x):\n if self.freeze:\n with torch.no_grad():\n for i, l in enumerate(self.features):\n x = l(x)\n x3d, w2d = self.out_layer(x).split([3, 2], dim=1)\n scale = self.scale_branch(x.flatten(2).mean(dim=-1)).exp()\n else:\n for i, l in enumerate(self.features):\n x = l(x)\n x3d, w2d = self.out_layer(x).split([3, 2], dim=1)\n scale = self.scale_branch(x.flatten(2).mean(dim=-1)).exp()\n return x3d, w2d, scale\n\n",
"\"\"\"\nCopyright (C) 2010-2022 Alibaba Group Holding Limited.\n\"\"\"\n\nimport numpy as np\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nfrom mmcv.cnn import xavier_init, build_norm_layer\nfrom mmcv.cnn.bricks.transformer import build_feedforward_network\nfrom mmcv.cnn.bricks.registry import ATTENTION\n\n\[email protected]_module()\nclass DeformableAttentionSampler(nn.Module):\n\n def __init__(self,\n embed_dims=256,\n num_heads=8,\n num_points=32,\n stride=4,\n ffn_cfg=dict(\n type='FFN',\n embed_dims=256,\n feedforward_channels=1024,\n num_fcs=2,\n ffn_drop=0.1,\n act_cfg=dict(type='ReLU', inplace=True)),\n norm_cfg=dict(type='LN'),\n init_cfg=None):\n super(DeformableAttentionSampler, self).__init__()\n self.embed_dims = embed_dims\n self.num_heads = num_heads\n self.num_points = num_points\n self.stride = stride\n self.ffn_cfg = ffn_cfg\n self.norm_cfg = norm_cfg\n\n self.sampling_offsets = nn.Linear(self.embed_dims, self.num_heads * self.num_points * 2)\n self.out_proj = nn.Linear(self.embed_dims, self.embed_dims)\n self.layer_norms = nn.ModuleList(\n [build_norm_layer(norm_cfg, self.embed_dims)[1] for _ in range(2)])\n self.ffn = build_feedforward_network(self.ffn_cfg, dict(type='FFN'))\n\n self.init_weights()\n\n def init_weights(self):\n xavier_init(self.sampling_offsets, gain=2.5, distribution='uniform')\n for m in [self.layer_norms, self.ffn]:\n if hasattr(m, 'init_weights'):\n m.init_weights()\n self._is_init = True\n\n def forward(self, query, obj_emb, key, value, img_dense_x2d, img_dense_x2d_mask,\n obj_xy_point, strides, obj_img_ind):\n \"\"\"\n Args:\n query: shape (num_obj, num_head, 1, head_emb_dim)\n obj_emb: shape (num_obj, embed_dim)\n key: shape (num_img, embed_dim, h, w)\n value: shape (num_img, embed_dim, h, w)\n img_dense_x2d: shape (num_img, 2, h, w)\n img_dense_x2d_mask: shape (num_img, 1, h, w)\n obj_xy_point: shape (num_obj, 2)\n strides: shape (num_obj, )\n obj_img_ind: shape (num_obj, )\n\n Returns:\n tuple[tensor]:\n output (num_obj_sample, embed_dim)\n v_samples (num_obj_sample, num_head, head_emb_dim, num_point)\n a_samples (num_obj_sample, num_head, 1, num_point)\n mask_samples (num_obj_sample, num_head, 1, num_point)\n x2d_samples (num_obj_sample, num_head, 2, num_point)\n \"\"\"\n num_obj_samples = query.size(0)\n num_img, _, h_out, w_out = key.size()\n head_emb_dim = self.embed_dims // self.num_heads\n\n offsets = self.sampling_offsets(obj_emb).reshape(\n num_obj_samples, self.num_heads, self.num_points, 2)\n # (num_obj_sample, num_head, num_point, 2)\n sampling_location = obj_xy_point[:, None, None] + offsets * strides[:, None, None, None]\n hw_img = key.new_tensor(key.shape[-2:]) * self.stride\n sampling_grid = sampling_location * (2 / hw_img[[1, 0]]) - 1\n sampling_grid = sampling_grid.transpose(1, 0).reshape(\n self.num_heads, num_obj_samples, self.num_points, 1, 2)\n img_ind_grid = (obj_img_ind.to(torch.float32) + 0.5) * (2 / num_img) - 1.0\n sampling_grid = torch.cat(\n (sampling_grid,\n img_ind_grid[None, :, None, None, None].expand(self.num_heads, -1, self.num_points, 1, 1)),\n dim=-1) # (num_head, num_obj_sample, num_point, 1, 3) in [img_ind, x, y]\n # (num_head, head_emb_dim, num_obj_sample, num_point, 1) ->\n # (num_obj_sample, num_head, head_emb_dim, num_point)\n k_samples = F.grid_sample(\n key.reshape(\n num_img, self.num_heads, head_emb_dim, h_out, w_out\n ).permute(1, 2, 0, 3, 4), # (num_head, head_emb_dim, num_img, h_out, w_out)\n sampling_grid,\n mode='bilinear',\n padding_mode='border',\n align_corners=False,\n ).squeeze(-1).permute(2, 0, 1, 3)\n v_samples = F.grid_sample(\n value.reshape(\n num_img, self.num_heads, head_emb_dim, h_out, w_out\n ).permute(1, 2, 0, 3, 4), # (num_head, head_emb_dim, num_img, h_out, w_out)\n sampling_grid,\n mode='bilinear',\n padding_mode='border',\n align_corners=False,\n ).squeeze(-1).permute(2, 0, 1, 3)\n x2d_samples = F.grid_sample(\n # (num_head, 2, num_img, h_out, w_out)\n img_dense_x2d.transpose(1, 0)[None].expand(self.num_heads, -1, -1, -1, -1),\n sampling_grid,\n mode='bilinear',\n padding_mode='border',\n align_corners=False\n ).squeeze(-1).permute(2, 0, 1, 3)\n mask_samples = F.grid_sample(\n img_dense_x2d_mask.transpose(1, 0)[None].expand(self.num_heads, -1, -1, -1, -1),\n sampling_grid,\n mode='bilinear',\n padding_mode='zeros',\n align_corners=False\n ).squeeze(-1).permute(2, 0, 1, 3)\n # (num_obj_sample, num_head, 1, num_point) = (num_obj_sample, num_head, 1, head_emb_dim)\n # @ (num_obj_sample, num_head, head_emb_dim, num_point)\n a_samples = query @ k_samples / np.sqrt(head_emb_dim)\n a_samples_softmax = a_samples.softmax(dim=-1) * mask_samples\n # (num_obj_sample, num_head, head_emb_dim, 1)\n # = (num_obj_sample, num_head, head_emb_dim, num_point)\n # @ (num_obj_sample, num_head, num_point, 1)\n output = v_samples @ a_samples_softmax.reshape(num_obj_samples, self.num_heads, self.num_points, 1)\n output = output.reshape(num_obj_samples, self.embed_dims)\n output = self.out_proj(output) + obj_emb\n output = self.layer_norms[0](output)\n output = self.ffn(output, output)\n output = self.layer_norms[1](output)\n return output, v_samples, a_samples, mask_samples, x2d_samples\n"
] | [
[
"torch.cat",
"torch.arccos",
"torch.sin",
"torch.zeros_like",
"torch.from_numpy",
"torch.tensor",
"torch.arange",
"numpy.array",
"torch.cos"
],
[
"torch.var",
"torch.sum",
"torch.mul",
"torch.square"
],
[
"torch.nn.ConvTranspose2d",
"torch.nn.init.constant_",
"torch.nn.ModuleList",
"torch.nn.Conv2d",
"torch.nn.Linear",
"torch.nn.init.normal_",
"torch.no_grad",
"torch.nn.BatchNorm2d",
"torch.nn.ReLU"
],
[
"torch.nn.Linear",
"numpy.sqrt"
]
] |
mengjian0502/GroupLasso_Quant | [
"1c54c940739babf86e362ffc57752c2aa4c8986d",
"1c54c940739babf86e362ffc57752c2aa4c8986d"
] | [
"models/resnet_cifar_quant.py",
"models/resnet_cifar_w2_quant.py"
] | [
"\"\"\"\nResNet on CIFAR10\n\"\"\"\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torch.nn import init\nfrom .quant import ClippedReLU, int_conv2d, int_linear\nfrom .mpdr_score import get_mpdr_score\nimport math\n\nclass DownsampleA(nn.Module):\n\n def __init__(self, nIn, nOut, stride):\n super(DownsampleA, self).__init__()\n assert stride == 2\n self.avg = nn.AvgPool2d(kernel_size=1, stride=stride)\n\n def forward(self, x):\n x = self.avg(x)\n return torch.cat((x, x.mul(0)), 1)\n\n\nclass ResNetBasicblock(nn.Module):\n expansion = 1\n \"\"\"\n RexNet basicblock (https://github.com/facebook/fb.resnet.torch/blob/master/models/resnet.lua)\n \"\"\"\n def __init__(self, inplanes, planes, stride=1, downsample=None, wbit=4, abit=4, alpha_init=10, mode='mean', k=2, ch_group=16, push=False):\n super(ResNetBasicblock, self).__init__() \n # self.conv_a = nn.Conv2d(inplanes, planes, kernel_size=3, stride=stride, padding=1, bias=False) # quantization\n self.conv_a = int_conv2d(inplanes, planes, kernel_size=3, stride=stride, padding=1, bias=False, nbit=wbit, mode=mode, k=k, ch_group=ch_group, push=push) # quantization\n self.bn_a = nn.BatchNorm2d(planes)\n self.relu1 = ClippedReLU(num_bits=abit, alpha=alpha_init, inplace=True) # Clipped ReLU function 4 - bits\n # self.relu1 = nn.ReLU(inplace=True)\n\n self.conv_b = int_conv2d(planes, planes, kernel_size=3, stride=1, padding=1, bias=False, nbit=wbit, mode=mode, k=k, ch_group=ch_group, push=push) # quantization\n # self.conv_b = nn.Conv2d(planes, planes, kernel_size=3, stride=1, padding=1, bias=False) # quantization\n self.bn_b = nn.BatchNorm2d(planes)\n self.relu2 = ClippedReLU(num_bits=abit, alpha=alpha_init, inplace=True) # Clipped ReLU function 4 - bits\n self.downsample = downsample\n\n def forward(self, x):\n residual = x\n\n basicblock = self.conv_a(x)\n basicblock = self.bn_a(basicblock)\n basicblock = self.relu1(basicblock)\n\n basicblock = self.conv_b(basicblock)\n basicblock = self.bn_b(basicblock)\n\n if self.downsample is not None:\n residual = self.downsample(x)\n \n return self.relu2(residual + basicblock)\n\n\nclass CifarResNet(nn.Module):\n \"\"\"\n ResNet optimized for the Cifar dataset, as specified in\n https://arxiv.org/abs/1512.03385.pdf\n \"\"\"\n def __init__(self, depth, num_classes, wbit=4, abit=4, alpha_init=10, mode='mean', k=2, ch_group=16, push=False):\n \"\"\" Constructor\n Args:\n depth: number of layers.\n num_classes: number of classes\n base_width: base width\n \"\"\"\n super(CifarResNet, self).__init__()\n\n block = ResNetBasicblock\n \n\n #Model type specifies number of layers for CIFAR-10 and CIFAR-100 model\n assert (depth - 2) % 6 == 0, 'depth should be one of 20, 32, 44, 56, 110'\n layer_blocks = (depth - 2) // 6\n print ('CifarResNet : Depth : {} , Layers for each block : {}'.format(depth, layer_blocks))\n self.num_classes = num_classes\n self.ch_group = ch_group\n # self.conv_1_3x3 = nn.Conv2d(3, 16, kernel_size=3, stride=1, padding=1, bias=False)\n self.conv_1_3x3 = int_conv2d(3, 16, kernel_size=3, stride=1, padding=1, bias=False, nbit=wbit, mode=mode, k=k, ch_group=ch_group, push=False) # skip the push process for the first conv layer\n self.relu0 = ClippedReLU(num_bits=abit, alpha=alpha_init, inplace=True)\n self.bn_1 = nn.BatchNorm2d(16)\n\n self.inplanes = 16\n self.stage_1 = self._make_layer(block, 16, layer_blocks, 1, wbit=wbit, abit=abit, alpha_init=alpha_init, mode=mode, k=k, ch_group=ch_group, push=push)\n self.stage_2 = self._make_layer(block, 32, layer_blocks, 2, wbit=wbit, abit=abit, alpha_init=alpha_init, mode=mode, k=k, ch_group=ch_group, push=push)\n self.stage_3 = self._make_layer(block, 64, layer_blocks, 2, wbit=wbit, abit=abit, alpha_init=alpha_init, mode=mode, k=k, ch_group=ch_group, push=push)\n self.avgpool = nn.AvgPool2d(8)\n self.classifier = int_linear(64*block.expansion, num_classes, nbit=wbit, mode=mode, k=k, ch_group=ch_group, push=False) # skip the push process for the last fc layer\n\n for m in self.modules():\n if isinstance(m, nn.Conv2d):\n n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels\n m.weight.data.normal_(0, math.sqrt(2. / n))\n #m.bias.data.zero_()\n elif isinstance(m, nn.BatchNorm2d):\n m.weight.data.fill_(1)\n m.bias.data.zero_()\n elif isinstance(m, nn.Linear):\n init.kaiming_normal_(m.weight)\n m.bias.data.zero_()\n\n def _make_layer(self, block, planes, blocks, stride=1, wbit=4, abit=4, alpha_init=10, mode='mean', k=2, ch_group=16, push=False):\n downsample = None\n if stride != 1 or self.inplanes != planes * block.expansion:\n downsample = nn.Sequential(\n int_conv2d(self.inplanes, planes * block.expansion, kernel_size=1, stride=stride, bias=False, nbit=wbit, mode=mode, k=k, ch_group=ch_group, push=False),\n nn.BatchNorm2d(planes * block.expansion),\n )\n\n layers = []\n layers.append(block(self.inplanes, planes, stride, downsample, wbit=wbit, abit=abit, alpha_init=alpha_init, mode=mode, k=k, ch_group=ch_group, push=push))\n self.inplanes = planes * block.expansion\n for i in range(1, blocks):\n layers.append(block(self.inplanes, planes, wbit=wbit, abit=abit, alpha_init=alpha_init, mode=mode, k=k, ch_group=ch_group, push=push))\n\n return nn.Sequential(*layers)\n\n def forward(self, x):\n x = self.conv_1_3x3(x)\n x = self.relu0(self.bn_1(x))\n x = self.stage_1(x)\n x = self.stage_2(x)\n x = self.stage_3(x)\n x = self.avgpool(x)\n x = x.view(x.size(0), -1)\n return self.classifier(x)\n\n def get_group_val(self):\n val = torch.Tensor()\n\n if torch.cuda.is_available():\n val = val.cuda()\n\n count = 0\n for m in self.modules():\n if isinstance(m, int_conv2d):\n kw = m.weight.size(2)\n if kw != 1:\n if not count in [0]:\n w_l = m.weight\n num_group = w_l.size(0) * w_l.size(1) // self.ch_group\n w_l = w_l.view(w_l.size(0), w_l.size(1) // self.ch_group, self.ch_group, kw, kw)\n w_l = w_l.contiguous().view((num_group, self.ch_group*kw*kw))\n \n g = w_l.pow(2).sum(dim=1).pow(1/2)\n val = torch.cat((val.view(-1), g.view(-1)))\n count += 1\n return val\n\n def get_global_thre(self, ratio):\n grp_val = self.get_group_val()\n # grp_mean = grp_val.mean()\n\n # threshold = ratio * grp_mean\n sorted_block_values, indices = torch.sort(grp_val.contiguous().view(-1))\n thre_index = int(grp_val.data.numel() * ratio)\n threshold = sorted_block_values[thre_index]\n return threshold\n\n def get_group_mp(self):\n val = torch.Tensor()\n\n if torch.cuda.is_available():\n val = val.cuda()\n\n count = 0\n for m in self.modules():\n if isinstance(m, int_conv2d):\n kw = m.weight.size(2)\n if kw != 1:\n if not count in [0]:\n w_l = m.weight\n num_group = w_l.size(0) * w_l.size(1) // self.ch_group\n w_l = w_l.view(w_l.size(0), w_l.size(1) // self.ch_group, self.ch_group, kw, kw)\n w_l = w_l.contiguous().view((num_group, self.ch_group*kw*kw))\n\n g = w_l.abs().mean(dim=1)\n val = torch.cat((val.view(-1), g.view(-1)))\n count += 1\n return val\n\n def get_global_mp_thre(self, ratio):\n grp_val = self.get_group_mp()\n sorted_block_values, indices = torch.sort(grp_val.contiguous().view(-1))\n thre_index = int(grp_val.data.numel() * ratio)\n threshold = sorted_block_values[thre_index]\n return threshold\n\n def get_group_mpdr(self):\n val = torch.Tensor()\n\n if torch.cuda.is_available():\n val = val.cuda()\n\n count = 0\n for m in self.modules():\n if isinstance(m, int_conv2d):\n kw = m.weight.size(2)\n if kw != 1:\n if not count in [0]:\n w_l = get_mpdr_score(m.weight)\n\n num_group = w_l.size(0) * w_l.size(1) // self.ch_group\n w_l = w_l.view(w_l.size(0), w_l.size(1) // self.ch_group, self.ch_group, kw, kw)\n w_l = w_l.contiguous().view((num_group, self.ch_group*kw*kw))\n\n g = w_l.mean(dim=1) # compute the mean of the mpdr score\n val = torch.cat((val.view(-1), g.view(-1)))\n count += 1\n return val\n\n def get_global_mpdr_thre(self, ratio):\n grp_val = self.get_group_mpdr()\n sorted_block_values, indices = torch.sort(grp_val.contiguous().view(-1))\n thre_index = int(grp_val.data.numel() * ratio)\n threshold = sorted_block_values[thre_index]\n return threshold\n\nclass resnet20_quant:\n base=CifarResNet\n args = list()\n kwargs = {'depth': 20}\n\nclass resnet32_quant:\n base=CifarResNet\n args = list()\n kwargs = {'depth': 32}\n",
"\"\"\"\nResNet on CIFAR10\n\"\"\"\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torch.nn import init\nfrom .quant import ClippedReLU, Conv2d_2bit, Conv2d_W2_IP\nimport math\n\nclass DownsampleA(nn.Module):\n\n def __init__(self, nIn, nOut, stride):\n super(DownsampleA, self).__init__()\n assert stride == 2\n self.avg = nn.AvgPool2d(kernel_size=1, stride=stride)\n\n def forward(self, x):\n x = self.avg(x)\n return torch.cat((x, x.mul(0)), 1)\n\n\nclass ResNetBasicblock(nn.Module):\n expansion = 1\n \"\"\"\n RexNet basicblock (https://github.com/facebook/fb.resnet.torch/blob/master/models/resnet.lua)\n \"\"\"\n def __init__(self, inplanes, planes, stride=1, downsample=None, wbit=2, abit=2, alpha_init=10, mode='mean', k=2, ch_group=16, gamma=0.3):\n super(ResNetBasicblock, self).__init__() \n # self.conv_a = nn.Conv2d(inplanes, planes, kernel_size=3, stride=stride, padding=1, bias=False) # quantization\n # self.conv_a = Conv2d_2bit(inplanes, planes, kernel_size=3, stride=stride, padding=1, bias=False, mode=mode, k=k) # 2bit quantization\n self.conv_a = Conv2d_W2_IP(inplanes, planes, kernel_size=3, stride=stride, padding=1, bias=False, nbit=wbit, mode=mode, k=k, skp_group=ch_group, gamma=gamma)\n self.bn_a = nn.BatchNorm2d(planes)\n self.relu1 = ClippedReLU(num_bits=abit, alpha=alpha_init, inplace=True) # Clipped ReLU function 4 - bits\n # self.relu1 = nn.ReLU(inplace=True)\n\n # self.conv_b = nn.Conv2d(planes, planes, kernel_size=3, stride=1, padding=1, bias=False) # quantization\n # self.conv_b = Conv2d_2bit(planes, planes, kernel_size=3, stride=1, padding=1, bias=False, mode=mode, k=k)\n self.conv_b = Conv2d_W2_IP(planes, planes, kernel_size=3, stride=1, padding=1, bias=False, nbit=wbit, mode=mode, k=k, skp_group=ch_group, gamma=gamma)\n self.bn_b = nn.BatchNorm2d(planes)\n self.relu2 = ClippedReLU(num_bits=abit, alpha=alpha_init, inplace=True) # Clipped ReLU function 4 - bits\n self.downsample = downsample\n\n def forward(self, x):\n residual = x\n\n basicblock = self.conv_a(x)\n basicblock = self.bn_a(basicblock)\n basicblock = self.relu1(basicblock)\n\n basicblock = self.conv_b(basicblock)\n basicblock = self.bn_b(basicblock)\n basicblock = self.relu2(basicblock) # Pre-ACT\n\n if self.downsample is not None:\n residual = self.downsample(x)\n \n return residual + basicblock\n\n\nclass CifarResNet(nn.Module):\n \"\"\"\n ResNet optimized for the Cifar dataset, as specified in\n https://arxiv.org/abs/1512.03385.pdf\n \"\"\"\n def __init__(self, depth, num_classes, wbit=4, abit=4, alpha_init=10, mode='mean', k=2, ch_group=16, gamma=0.3):\n \"\"\" Constructor\n Args:\n depth: number of layers.\n num_classes: number of classes\n base_width: base width\n \"\"\"\n super(CifarResNet, self).__init__()\n\n block = ResNetBasicblock\n\n #Model type specifies number of layers for CIFAR-10 and CIFAR-100 model\n assert (depth - 2) % 6 == 0, 'depth should be one of 20, 32, 44, 56, 110'\n layer_blocks = (depth - 2) // 6\n print ('CifarResNet : Depth : {} , Layers for each block : {}'.format(depth, layer_blocks))\n self.num_classes = num_classes\n self.conv_1_3x3 = nn.Conv2d(3, 16, kernel_size=3, stride=1, padding=1, bias=False)\n \n self.relu0 = nn.ReLU(inplace=True)\n self.bn_1 = nn.BatchNorm2d(16)\n\n self.inplanes = 16\n self.stage_1 = self._make_layer(block, 16, layer_blocks, 1, wbit=wbit, abit=abit, alpha_init=alpha_init, mode=mode, k=k, ch_group=ch_group, gamma=gamma)\n self.stage_2 = self._make_layer(block, 32, layer_blocks, 2, wbit=wbit, abit=abit, alpha_init=alpha_init, mode=mode, k=k, ch_group=ch_group, gamma=gamma)\n self.stage_3 = self._make_layer(block, 64, layer_blocks, 2, wbit=wbit, abit=abit, alpha_init=alpha_init, mode=mode, k=k, ch_group=ch_group, gamma=gamma)\n self.avgpool = nn.AvgPool2d(8)\n self.classifier = nn.Linear(64*block.expansion, num_classes)\n\n for m in self.modules():\n if isinstance(m, nn.Conv2d):\n n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels\n m.weight.data.normal_(0, math.sqrt(2. / n))\n #m.bias.data.zero_()\n elif isinstance(m, nn.BatchNorm2d):\n m.weight.data.fill_(1)\n m.bias.data.zero_()\n elif isinstance(m, nn.Linear):\n init.kaiming_normal_(m.weight)\n m.bias.data.zero_()\n\n def _make_layer(self, block, planes, blocks, stride=1, wbit=4, abit=4, alpha_init=10, mode='mean', k=2, ch_group=16, gamma=0.3):\n downsample = None\n if stride != 1 or self.inplanes != planes * block.expansion:\n downsample = nn.Sequential(\n # int_conv2d(self.inplanes, planes * block.expansion, kernel_size=1, stride=stride, bias=False, nbit=wbit, mode=mode, k=k, ch_group=ch_group, push=False),\n nn.Conv2d(self.inplanes, planes * block.expansion, kernel_size=1, stride=stride, bias=False), # full precision short connections\n nn.BatchNorm2d(planes * block.expansion),\n )\n\n layers = []\n layers.append(block(self.inplanes, planes, stride, downsample, wbit=wbit, abit=abit, alpha_init=alpha_init, mode=mode, k=k, ch_group=ch_group, gamma=gamma))\n self.inplanes = planes * block.expansion\n for i in range(1, blocks):\n layers.append(block(self.inplanes, planes, wbit=wbit, abit=abit, alpha_init=alpha_init, mode=mode, k=k, ch_group=ch_group, gamma=gamma))\n\n return nn.Sequential(*layers)\n\n def forward(self, x):\n x = self.conv_1_3x3(x)\n x = self.relu0(self.bn_1(x))\n x = self.stage_1(x)\n x = self.stage_2(x)\n x = self.stage_3(x)\n x = self.avgpool(x)\n x = x.view(x.size(0), -1)\n return self.classifier(x)\n\n\nclass resnet20_W2_quant:\n base=CifarResNet\n args = list()\n kwargs = {'depth': 20}\n\nclass resnet32_W2_quant:\n base=CifarResNet\n args = list()\n kwargs = {'depth': 32}\n\n# def resnet20_quant(num_classes=10):\n# \"\"\"Constructs a ResNet-20 model for CIFAR-10 (by default)\n# Args:\n# num_classes (uint): number of classes\n# \"\"\"\n# model = CifarResNet(ResNetBasicblock, 20, num_classes)\n# return model\n\n\n# def resnet32_quant(num_classes=10):\n# \"\"\"Constructs a ResNet-32 model for CIFAR-10 (by default)\n# Args:\n# num_classes (uint): number of classes\n# \"\"\"\n# model = CifarResNet(ResNetBasicblock, 32, num_classes)\n# return model\n\n"
] | [
[
"torch.nn.Sequential",
"torch.Tensor",
"torch.nn.AvgPool2d",
"torch.cuda.is_available",
"torch.nn.BatchNorm2d",
"torch.nn.init.kaiming_normal_"
],
[
"torch.nn.Sequential",
"torch.nn.Conv2d",
"torch.nn.Linear",
"torch.nn.AvgPool2d",
"torch.nn.BatchNorm2d",
"torch.nn.ReLU",
"torch.nn.init.kaiming_normal_"
]
] |
clabrugere/numpy-basics | [
"81efb4b8ac58fc17dc8f6c676004bbc3a99a92c3"
] | [
"models/utils.py"
] | [
"import numpy as np\n\n\ndef confusion_matrix(y_true, y_hat, threshold=.5):\n \n def _to_class(y):\n return np.array([1 if i >= threshold else 0 for i in y])\n \n n_classes = len(np.unique(y_true))\n cm = np.zeros((n_classes, n_classes))\n y_hat = _to_class(y_hat)\n \n for a, p in zip(y_true, y_hat):\n cm[a, p] += 1\n \n return cm\n\ndef f1_score(cm):\n precision = cm[0, 0] / cm[0, :].sum()\n recall = cm[0, 0] / cm[:, 0].sum()\n return 2 * (precision * recall) / (precision + recall)"
] | [
[
"numpy.array",
"numpy.zeros",
"numpy.unique"
]
] |
sergevkim/sonata | [
"2250b60174628ee76fb7d54bf50e4b8b07b505d5"
] | [
"sonata/datamodules/base_datamodule.py"
] | [
"from abc import ABC, abstractmethod\nfrom pathlib import Path\n\nimport torch\nfrom torch import Tensor\nfrom torch.utils.data import Dataset, DataLoader\n\n\nclass BaseDataModule(ABC):\n def __init__(\n self,\n data_path: Path,\n batch_size: int,\n num_workers: int,\n ):\n super().__init__()\n self.data_path = data_path\n self.batch_size = batch_size\n self.num_workers = num_workers\n\n @staticmethod\n def prepare_data(\n data_path: Path,\n ):\n pass\n\n @abstractmethod\n def setup(\n self,\n val_ratio: float,\n ) -> None:\n pass\n\n def train_dataloader(self) -> DataLoader:\n train_dataloader = DataLoader(\n dataset=self.train_dataset,\n batch_size=self.batch_size,\n num_workers=self.num_workers,\n )\n\n return train_dataloader\n\n def val_dataloader(self) -> DataLoader:\n val_dataloader = DataLoader(\n dataset=self.val_dataset,\n batch_size=self.batch_size,\n num_workers=self.num_workers,\n )\n\n return val_dataloader\n\n def test_dataloader(self):\n pass\n\n"
] | [
[
"torch.utils.data.DataLoader"
]
] |
gpescia/MyNetKet | [
"958510966a5870d9d491de0628903cf1fc210921",
"958510966a5870d9d491de0628903cf1fc210921",
"958510966a5870d9d491de0628903cf1fc210921",
"958510966a5870d9d491de0628903cf1fc210921",
"958510966a5870d9d491de0628903cf1fc210921",
"958510966a5870d9d491de0628903cf1fc210921",
"958510966a5870d9d491de0628903cf1fc210921"
] | [
"netket/operator/boson.py",
"Examples/Ising2d/plot_ising.py",
"netket/operator/_der_local_values.py",
"netket/legacy/stats/mc_stats.py",
"netket/stats/mc_stats.py",
"netket/legacy/machine/density_matrix/rbm.py",
"Examples/Legacy/RealMachines/j1j2.py"
] | [
"# Copyright 2021 The NetKet Authors - All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom netket.utils.types import DType\n\nfrom netket.hilbert import AbstractHilbert\n\nfrom ._local_operator import LocalOperator as _LocalOperator\n\n\ndef destroy(\n hilbert: AbstractHilbert, site: int, dtype: DType = float\n) -> _LocalOperator:\n \"\"\"\n Builds the boson destruction operator :math:`\\\\hat{a}` acting on the `site`-th of the\n Hilbert space `hilbert`.\n\n If `hilbert` is a non-Bosonic space of local dimension M, it is considered\n as a bosonic space of local dimension M.\n\n Args:\n hilbert: The hilbert space\n site: the site on which this operator acts\n\n Returns:\n The resulting Local Operator\n \"\"\"\n import numpy as np\n\n N = hilbert.size_at_index(site)\n\n D = np.array([np.sqrt(m) for m in np.arange(1, N)])\n mat = np.diag(D, 1)\n return _LocalOperator(hilbert, mat, [site], dtype=dtype)\n\n\ndef create(hilbert: AbstractHilbert, site: int, dtype: DType = float) -> _LocalOperator:\n \"\"\"\n Builds the boson creation operator :math:`\\\\hat{a}^\\\\dagger` acting on the `site`-th of the\n Hilbert space `hilbert`.\n\n If `hilbert` is a non-Bosonic space of local dimension M, it is considered\n as a bosonic space of local dimension M.\n\n Args:\n hilbert: The hilbert space\n site: the site on which this operator acts\n\n Returns:\n The resulting Local Operator\n \"\"\"\n import numpy as np\n\n N = hilbert.size_at_index(site)\n\n D = np.array([np.sqrt(m) for m in np.arange(1, N)])\n mat = np.diag(D, -1)\n return _LocalOperator(hilbert, mat, [site], dtype=dtype)\n\n\ndef number(hilbert: AbstractHilbert, site: int, dtype: DType = float) -> _LocalOperator:\n \"\"\"\n Builds the number operator :math:`\\\\hat{a}^\\\\dagger\\\\hat{a}` acting on the `site`-th of the\n Hilbert space `hilbert`.\n\n If `hilbert` is a non-Bosonic space of local dimension M, it is considered\n as a bosonic space of local dimension M.\n\n Args:\n hilbert: The hilbert space\n site: the site on which this operator acts\n\n Returns:\n The resulting Local Operator\n \"\"\"\n import numpy as np\n\n N = hilbert.size_at_index(site)\n\n D = np.array([m for m in np.arange(0, N)])\n mat = np.diag(D, 0)\n return _LocalOperator(hilbert, mat, [site], dtype=dtype)\n\n\ndef proj(\n hilbert: AbstractHilbert, site: int, n: int, dtype: DType = float\n) -> _LocalOperator:\n \"\"\"\n Builds the projector operator :math:`|n\\\\rangle\\\\langle n |` acting on the `site`-th of the\n Hilbert space `hilbert` and collapsing on the state with `n` bosons.\n\n If `hilbert` is a non-Bosonic space of local dimension M, it is considered\n as a bosonic space of local dimension M.\n\n Args:\n hilbert: The hilbert space\n site: the site on which this operator acts\n n: the state on which to project\n\n Returns:\n the resulting operator\n \"\"\"\n import numpy as np\n\n N = hilbert.size_at_index(site)\n\n if n >= N:\n raise ValueError(\"Cannot project on a state above the cutoff.\")\n\n D = np.array([0 for m in np.arange(0, N)])\n D[n] = 1\n mat = np.diag(D, 0)\n return _LocalOperator(hilbert, mat, [site], dtype=dtype)\n\n\n# clean up the module\ndel AbstractHilbert, DType\n",
"import numpy as np\nimport matplotlib.pyplot as plt\nimport json\n\nplt.ion()\n\n# 4x4 lattice\n# exact=-3.21550807082536*16\n\n# 5x5 lattice\nexact = -80.13310152422413\n\n\nwhile True:\n plt.clf()\n plt.ylabel(\"Energy\")\n plt.xlabel(\"Iteration #\")\n\n data = json.load(open(\"test.log\"))\n iters = data[\"Energy\"][\"iters\"]\n energy = data[\"Energy\"][\"Mean\"]\n sigma = data[\"Energy\"][\"Sigma\"]\n evar = data[\"Energy\"][\"Variance\"]\n\n nres = len(iters)\n cut = 60\n if nres > cut:\n\n fitx = iters[-cut:-1]\n fity = energy[-cut:-1]\n z = np.polyfit(fitx, fity, deg=0)\n p = np.poly1d(z)\n\n plt.xlim([nres - cut, nres])\n maxval = np.max(energy[-cut:-1])\n plt.ylim([exact - (np.abs(exact) * 0.01), maxval + np.abs(maxval) * 0.01])\n error = (z[0] - exact) / -exact\n plt.gca().text(\n 0.95,\n 0.8,\n \"Relative Error : \" + \"{:.2e}\".format(error),\n verticalalignment=\"bottom\",\n horizontalalignment=\"right\",\n color=\"green\",\n fontsize=15,\n transform=plt.gca().transAxes,\n )\n\n plt.plot(fitx, p(fitx))\n\n plt.errorbar(iters, energy, yerr=sigma, color=\"red\")\n plt.axhline(y=exact, xmin=0, xmax=iters[-1], linewidth=2, color=\"k\", label=\"Exact\")\n\n plt.legend(frameon=False)\n plt.pause(1)\n # plt.draw()\n\nplt.ioff()\nplt.show()\n",
"# Copyright 2021 The NetKet Authors - All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport numpy as np\nfrom numba import jit\n\nfrom netket.legacy.machine import Jax as _Jax\n\nfrom ._der_local_values_jax import der_local_values_jax\n\n\n@jit(nopython=True)\ndef _der_local_values_kernel(\n log_vals, log_val_p, mels, der_log, der_log_p, sections, out\n):\n low_range = 0\n for i, s in enumerate(sections):\n out[i, :] = (\n np.expand_dims(\n mels[low_range:s] * np.exp(log_val_p[low_range:s] - log_vals[i]), 1\n )\n * (der_log_p[low_range:s, :] - der_log[i, :])\n ).sum(axis=0)\n low_range = s\n\n\ndef _der_local_values_impl(op, machine, v, log_vals, der_log_vals, out, batch_size=64):\n sections = np.empty(v.shape[0], dtype=np.int32)\n v_primes, mels = op.get_conn_flattened(v, sections)\n\n log_val_primes = machine.log_val(v_primes)\n\n # Compute the der_log in small batches and not in one go.\n # For C++ machines there is a 100% slowdown when the batch is too big.\n n_primes = len(log_val_primes)\n der_log_primes = np.empty((n_primes, machine.n_par), dtype=np.complex128)\n\n for s in range(0, n_primes, batch_size):\n end = min(s + batch_size, n_primes)\n der_log_primes[s:end, :] = machine.der_log(v_primes[s:end])\n\n _der_local_values_kernel(\n log_vals, log_val_primes, mels, der_log_vals, der_log_primes, sections, out\n )\n\n\n@jit(nopython=True)\ndef _der_local_values_notcentered_kernel(\n log_vals, log_val_p, mels, der_log_p, sections, out\n):\n low_range = 0\n for i, s in enumerate(sections):\n out[i, :] = (\n np.expand_dims(\n mels[low_range:s] * np.exp(log_val_p[low_range:s] - log_vals[i]), 1\n )\n * der_log_p[low_range:s, :]\n ).sum(axis=0)\n low_range = s\n\n\ndef _der_local_values_notcentered_impl(op, machine, v, log_vals, out, batch_size=64):\n sections = np.empty(v.shape[0], dtype=np.int32)\n v_primes, mels = op.get_conn_flattened(v, sections)\n\n log_val_primes = machine.log_val(v_primes)\n\n # Compute the der_log in small batches and not in one go.\n # For C++ machines there is a 100% slowdown when the batch is too big.\n n_primes = len(log_val_primes)\n der_log_primes = np.empty((n_primes, machine.n_par), dtype=np.complex128)\n\n for s in range(0, n_primes, batch_size):\n end = min(s + batch_size, n_primes)\n der_log_primes[s:end, :] = machine.der_log(v_primes[s:end])\n\n _der_local_values_notcentered_kernel(\n log_vals, log_val_primes, mels, der_log_primes, sections, out\n )\n\n\ndef der_local_values(\n op,\n machine,\n v,\n log_vals=None,\n der_log_vals=None,\n out=None,\n center_derivative=True,\n batch_size=64,\n):\n r\"\"\"\n Computes the derivative of local values of the operator `op` for all `samples`.\n\n The local value is defined as\n .. math:: O_{\\mathrm{loc}}(x) = \\langle x | O | \\Psi \\rangle / \\langle x | \\Psi \\rangle\n\n\n Args:\n op: Hermitian operator.\n v: A numpy array or matrix containing either a single\n :math:`V = v` or a batch of visible\n configurations :math:`V = v_1,\\dots v_M`.\n In the latter case, each row of the matrix corresponds to a\n visible configuration.\n machine: Wavefunction :math:`\\Psi`.\n log_vals: A scalar/numpy array containing the value(s) :math:`\\Psi(V)`.\n If not given, it is computed from scratch.\n Defaults to None.\n der_log_vals: A numpy tensor containing the vector of log-derivative(s) :math:`O_i(V)`.\n If not given, it is computed from scratch.\n Defaults to None.\n out: A scalar or a numpy array of local values of the operator.\n If not given, it is allocated from scratch and then returned.\n Defaults to None.\n center_derivative: Whever to center the derivatives or not. In the formula above,\n When this is true/false it is equivalent to setting :math:`\\alpha=\\{1 / 2\\}`.\n By default `center_derivative=True`, meaning that it returns the correct\n derivative of the local values. False is mainly used when dealing with liouvillians.\n\n Returns:\n If samples is given in batches, a numpy ndarray of derivatives of local values\n of the operator, otherwise a 1D array.\n \"\"\"\n if isinstance(machine, _Jax):\n return der_local_values_jax(\n op,\n machine,\n v,\n log_vals=log_vals,\n center_derivative=center_derivative,\n )\n\n if v.ndim != 2:\n raise RuntimeError(\"Invalid input shape, expected a 2d array\")\n\n assert (\n v.shape[1] == op.hilbert.size\n ), \"samples has wrong shape: {}; expected (?, {})\".format(v.shape, op.hilbert.size)\n\n if out is None:\n out = np.empty((v.shape[0], machine.n_par), dtype=np.complex128)\n\n if log_vals is None:\n log_vals = machine.log_val(v)\n\n if der_log_vals is None and center_derivative is True:\n der_log_vals = machine.der_log(v)\n\n if center_derivative is True:\n _der_local_values_impl(\n op, machine, v, log_vals, der_log_vals, out, batch_size=batch_size\n )\n else:\n _der_local_values_notcentered_impl(\n op, machine, v, log_vals, out, batch_size=batch_size\n )\n\n return out\n",
"import math\n\nfrom numba import jit\nimport numpy as _np\nfrom . import mean as _mean\nfrom . import var as _var\nfrom . import total_size as _total_size\n\n\ndef _format_decimal(value, std, var):\n if math.isfinite(std) and std > 1e-7:\n decimals = max(int(_np.ceil(-_np.log10(std))), 0)\n return (\n \"{0:.{1}f}\".format(value, decimals + 1),\n \"{0:.{1}f}\".format(std, decimals + 1),\n \"{0:.{1}f}\".format(var, decimals + 1),\n )\n else:\n return (\n \"{0:.3e}\".format(value),\n \"{0:.3e}\".format(std),\n \"{0:.3e}\".format(var),\n )\n\n\nclass Stats:\n \"\"\"A dict-compatible class containing the result of the statistics function.\"\"\"\n\n _NaN = float(\"NaN\")\n\n def __init__(\n self,\n mean=_NaN,\n error_of_mean=_NaN,\n variance=_NaN,\n tau_corr=_NaN,\n R_hat=_NaN,\n ):\n self.mean = complex(mean) if _np.iscomplexobj(mean) else float(mean)\n self.error_of_mean = float(error_of_mean)\n self.variance = float(variance)\n self.tau_corr = float(tau_corr)\n self.R_hat = float(R_hat)\n\n def to_json(self):\n jsd = {}\n jsd[\"Mean\"] = self.mean.real\n jsd[\"Variance\"] = self.variance\n jsd[\"Sigma\"] = self.error_of_mean\n jsd[\"R_hat\"] = self.R_hat\n jsd[\"TauCorr\"] = self.tau_corr\n return jsd\n\n def __repr__(self):\n mean, err, var = _format_decimal(self.mean, self.error_of_mean, self.variance)\n if not math.isnan(self.R_hat):\n ext = \", R̂={:.4f}\".format(self.R_hat)\n else:\n ext = \"\"\n return \"{} ± {} [σ²={}{}]\".format(mean, err, var, ext)\n\n def __getitem__(self, name):\n if name in (\"mean\", \"Mean\"):\n return self.mean\n elif name in (\"variance\", \"Variance\"):\n return self.variance\n elif name in (\"error_of_mean\", \"Sigma\"):\n return self.error_of_mean\n elif name in (\"R_hat\", \"R\"):\n return self.R_hat\n elif name in (\"tau_corr\", \"TauCorr\"):\n return self.tau_corr\n\n\n@jit(nopython=True)\ndef _get_blocks(data, l):\n n_blocks = int(_np.floor(data.shape[1] / float(l)))\n blocks = _np.empty(data.shape[0] * n_blocks, dtype=data.dtype)\n k = 0\n for i in range(data.shape[0]):\n for b in range(n_blocks):\n blocks[k] = data[i, b * l : (b + 1) * l].mean()\n k += 1\n return blocks\n\n\ndef _block_variance(data, l):\n blocks = _get_blocks(data, l)\n ts = _total_size(blocks)\n if ts > 0:\n return _var(blocks), ts\n else:\n return _np.nan, 0\n\n\ndef _batch_variance(data):\n b_means = _np.mean(data, axis=1)\n ts = _total_size(b_means)\n return _var(b_means), ts\n\n\ndef statistics(data):\n r\"\"\"\n Returns statistics of a given array (or matrix, see below) containing a stream of data.\n This is particularly useful to analyze Markov Chain data, but it can be used\n also for other type of time series.\n\n Args:\n data (vector or matrix): The input data. It can be real or complex valued.\n * if a vector, it is assumed that this is a time\n series of data (not necessarily independent).\n * if a matrix, it is assumed that that rows data[i]\n contain independent time series.\n\n Returns:\n Stats: A dictionary-compatible class containing the average (mean),\n the variance (variance),\n the error of the mean (error_of_mean), and an estimate of the\n autocorrelation time (tau_corr). In addition to accessing the elements with the standard\n dict sintax (e.g. res['mean']), one can also access them directly with the dot operator\n (e.g. res.mean).\n \"\"\"\n\n stats = Stats()\n data = _np.atleast_1d(data)\n if data.ndim == 1:\n data = data.reshape((1, -1))\n\n if data.ndim > 2:\n raise NotImplementedError(\"Statistics are implemented only for ndim<=2\")\n\n mean = _mean(data)\n variance = _var(data)\n\n ts = _total_size(data)\n\n bare_var = variance\n\n batch_var, n_batches = _batch_variance(data)\n\n b_s = 32\n l_block = max(1, data.shape[1] // b_s)\n\n block_var, n_blocks = _block_variance(data, l_block)\n\n tau_batch = ((ts / n_batches) * batch_var / bare_var - 1) * 0.5\n tau_block = ((ts / n_blocks) * block_var / bare_var - 1) * 0.5\n\n block_good = n_blocks >= b_s and tau_block < 6 * l_block\n batch_good = n_batches >= b_s and tau_batch < 6 * data.shape[1]\n\n if batch_good:\n error_of_mean = _np.sqrt(batch_var / n_batches)\n tau_corr = max(0, tau_batch)\n elif block_good:\n error_of_mean = _np.sqrt(block_var / n_blocks)\n tau_corr = max(0, tau_block)\n else:\n error_of_mean = _np.nan\n tau_corr = _np.nan\n\n if n_batches > 1:\n N = data.shape[-1]\n\n # V_loc = _np.var(data, axis=-1, ddof=0)\n # W_loc = _np.mean(V_loc)\n # W = _mean(W_loc)\n # # This approximation seems to hold well enough for larger n_samples\n W = variance\n\n R_hat = _np.sqrt((N - 1) / N + batch_var / W)\n else:\n R_hat = float(\"nan\")\n\n return Stats(mean, error_of_mean, variance, tau_corr, R_hat)\n",
"# Copyright 2021 The NetKet Authors - All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport math\n\nfrom typing import Union\n\nfrom functools import partial\n\nfrom flax import struct\nimport jax\nfrom jax import numpy as jnp\n\nimport numpy as np\n\nfrom netket import jax as nkjax\n\nfrom . import mean as _mean\nfrom . import var as _var\nfrom . import total_size as _total_size\n\n\ndef _format_decimal(value, std, var):\n if math.isfinite(std) and std > 1e-7:\n decimals = max(int(np.ceil(-np.log10(std))), 0)\n return (\n \"{0:.{1}f}\".format(value, decimals + 1),\n \"{0:.{1}f}\".format(std, decimals + 1),\n \"{0:.{1}f}\".format(var, decimals + 1),\n )\n else:\n return (\n \"{0:.3e}\".format(value),\n \"{0:.3e}\".format(std),\n \"{0:.3e}\".format(var),\n )\n\n\n_NaN = float(\"NaN\")\n\n\[email protected]\nclass Stats:\n \"\"\"A dict-compatible class containing the result of the statistics function.\"\"\"\n\n mean: Union[float, complex] = _NaN\n \"\"\"The mean value\"\"\"\n error_of_mean: float = _NaN\n variance: float = _NaN\n tau_corr: float = _NaN\n R_hat: float = _NaN\n\n def to_dict(self):\n jsd = {}\n jsd[\"Mean\"] = self.mean.item()\n jsd[\"Variance\"] = self.variance.item()\n jsd[\"Sigma\"] = self.error_of_mean.item()\n jsd[\"R_hat\"] = self.R_hat.item()\n jsd[\"TauCorr\"] = self.tau_corr.item()\n return jsd\n\n def to_compound(self):\n return \"Mean\", self.to_dict()\n\n # Remove this method once we remove legacy.\n def to_json(self):\n jsd = {}\n jsd[\"Mean\"] = float(self.mean.real)\n jsd[\"Variance\"] = float(self.variance)\n jsd[\"Sigma\"] = float(self.error_of_mean)\n jsd[\"R_hat\"] = float(self.R_hat)\n jsd[\"TauCorr\"] = float(self.tau_corr)\n return jsd\n\n def __repr__(self):\n mean, err, var = _format_decimal(self.mean, self.error_of_mean, self.variance)\n if not math.isnan(self.R_hat):\n ext = \", R̂={:.4f}\".format(self.R_hat)\n else:\n ext = \"\"\n return \"{} ± {} [σ²={}{}]\".format(mean, err, var, ext)\n\n # Alias accessors\n def __getattr__(self, name):\n if name in (\"mean\", \"Mean\"):\n return self.mean\n elif name in (\"variance\", \"Variance\"):\n return self.variance\n elif name in (\"error_of_mean\", \"Sigma\"):\n return self.error_of_mean\n elif name in (\"R_hat\", \"R\"):\n return self.R_hat\n elif name in (\"tau_corr\", \"TauCorr\"):\n return self.tau_corr\n else:\n raise AttributeError(\n \"'Stats' object object has no attribute '{}'\".format(name)\n )\n\n\ndef _get_blocks(data, block_size):\n chain_length = data.shape[1]\n\n n_blocks = int(np.floor(chain_length / float(block_size)))\n\n return data[:, 0 : n_blocks * block_size].reshape((-1, block_size)).mean(axis=1)\n\n\ndef _block_variance(data, l):\n blocks = _get_blocks(data, l)\n ts = _total_size(blocks)\n if ts > 0:\n return _var(blocks), ts\n else:\n return jnp.nan, 0\n\n\ndef _batch_variance(data):\n b_means = data.mean(axis=1)\n ts = _total_size(b_means)\n return _var(b_means), ts\n\n\n# this is not batch_size maybe?\ndef statistics(data, batch_size=32):\n r\"\"\"\n Returns statistics of a given array (or matrix, see below) containing a stream of data.\n This is particularly useful to analyze Markov Chain data, but it can be used\n also for other type of time series.\n Assumes same shape on all MPI processes.\n\n Args:\n data (vector or matrix): The input data. It can be real or complex valued.\n * if a vector, it is assumed that this is a time\n series of data (not necessarily independent).\n * if a matrix, it is assumed that that rows data[i]\n contain independent time series.\n\n Returns:\n Stats: A dictionary-compatible class containing the average (mean),\n the variance (variance),\n the error of the mean (error_of_mean), and an estimate of the\n autocorrelation time (tau_corr). In addition to accessing the elements with the standard\n dict sintax (e.g. res['mean']), one can also access them directly with the dot operator\n (e.g. res.mean).\n \"\"\"\n return _statistics(data, batch_size)\n\n\n@partial(jax.jit, static_argnums=1)\ndef _statistics(data, batch_size):\n data = jnp.atleast_1d(data)\n if data.ndim == 1:\n data = data.reshape((1, -1))\n\n if data.ndim > 2:\n raise NotImplementedError(\"Statistics are implemented only for ndim<=2\")\n\n mean = _mean(data)\n variance = _var(data)\n\n ts = _total_size(data)\n\n bare_var = variance\n\n batch_var, n_batches = _batch_variance(data)\n\n l_block = max(1, data.shape[1] // batch_size)\n\n block_var, n_blocks = _block_variance(data, l_block)\n\n tau_batch = ((ts / n_batches) * batch_var / bare_var - 1) * 0.5\n tau_block = ((ts / n_blocks) * block_var / bare_var - 1) * 0.5\n\n batch_good = (tau_batch < 6 * data.shape[1]) * (n_batches >= batch_size)\n block_good = (tau_block < 6 * l_block) * (n_blocks >= batch_size)\n\n stat_dtype = nkjax.dtype_real(data.dtype)\n\n # if batch_good:\n # error_of_mean = jnp.sqrt(batch_var / n_batches)\n # tau_corr = jnp.max(0, tau_batch)\n # elif block_good:\n # error_of_mean = jnp.sqrt(block_var / n_blocks)\n # tau_corr = jnp.max(0, tau_block)\n # else:\n # error_of_mean = jnp.nan\n # tau_corr = jnp.nan\n # jax style\n\n def batch_good_err(args):\n batch_var, tau_batch, *_ = args\n error_of_mean = jnp.sqrt(batch_var / n_batches)\n tau_corr = jnp.clip(tau_batch, 0)\n return jnp.asarray(error_of_mean, dtype=stat_dtype), jnp.asarray(\n tau_corr, dtype=stat_dtype\n )\n\n def block_good_err(args):\n _, _, block_var, tau_block = args\n error_of_mean = jnp.sqrt(block_var / n_blocks)\n tau_corr = jnp.clip(tau_block, 0)\n return jnp.asarray(error_of_mean, dtype=stat_dtype), jnp.asarray(\n tau_corr, dtype=stat_dtype\n )\n\n def nan_err(args):\n return jnp.asarray(jnp.nan, dtype=stat_dtype), jnp.asarray(\n jnp.nan, dtype=stat_dtype\n )\n\n def batch_not_good(args):\n batch_var, tau_batch, block_var, tau_block, block_good = args\n return jax.lax.cond(\n block_good,\n block_good_err,\n nan_err,\n (batch_var, tau_batch, block_var, tau_block),\n )\n\n error_of_mean, tau_corr = jax.lax.cond(\n batch_good,\n batch_good_err,\n batch_not_good,\n (batch_var, tau_batch, block_var, tau_block, block_good),\n )\n\n if n_batches > 1:\n N = data.shape[-1]\n\n # V_loc = _np.var(data, axis=-1, ddof=0)\n # W_loc = _np.mean(V_loc)\n # W = _mean(W_loc)\n # # This approximation seems to hold well enough for larger n_samples\n W = variance\n\n R_hat = jnp.sqrt((N - 1) / N + batch_var / W)\n else:\n R_hat = jnp.nan\n\n res = Stats(mean, error_of_mean, variance, tau_corr, R_hat)\n\n return res\n ##\n",
"from . import AbstractDensityMatrix\nfrom .. import RbmSpin as PureRbmSpin\nimport numpy as _np\n\n\nclass RbmSpin(AbstractDensityMatrix):\n def __init__(\n self,\n hilbert,\n n_hidden=None,\n alpha=None,\n use_visible_bias=True,\n use_hidden_bias=True,\n automorphisms=None,\n dtype=complex,\n ):\n super().__init__(hilbert, dtype=dtype)\n\n if automorphisms is not None:\n if isinstance(automorphisms, netket.graph.AbstractGraph):\n automorphisms = automorphisms.automorphisms()\n import itertools\n\n automorphisms = [\n prod[0] + prod[1] for prod in itertools.product(autom, autom)\n ]\n\n input_like = _np.zeros(hilbert.size * 2)\n self._prbm = PureRbmSpin(\n input_like,\n n_hidden,\n alpha,\n use_visible_bias,\n use_hidden_bias,\n automorphisms,\n dtype,\n )\n self._plog_val = self._prbm.log_val\n self._pder_log = self._prbm.der_log\n\n def log_val(self, xr, xc=None, out=None):\n r\"\"\"Computes the logarithm of the density matrix for a batch of visible\n quantum numbers `(xr,xc)` and stores the result into `out`.\n Specifically, for each element of the batch i, this function should compute\n out[i]=log(rho(xr[i],xc[i])).\n If xr is None, it is assumed that xr has twice as many quantum numbers and\n contains both row and columns, stored contigously.\n\n Args:\n xr: A matrix of `float64` of shape `(*, self.n_visible)` if xc is given.\n If xc is None, then this should be a matrix of `float64` of shape `(*, 2*self.n_visible)`.\n xc (optional): A matrix of `float64` of shape `(*, self.n_visible)`.\n out: Destination vector of `complex128`. The length of `out` should be `xr.shape[0]`.\n\n Returns:\n A vector out[i]=log(rho(xr[i],xc[i])).\n \"\"\"\n if xc is None:\n return self._plog_val(xr, out)\n else:\n return self._plog_val(_np.hstack((xr, xc)), out)\n\n def der_log(self, xr, xc=None, out=None):\n r\"\"\"Computes the gradient of the logarithm of the density matrix for a\n batch of visible configurations `(xr,xc)` and stores the result into `out`.\n\n Args:\n xr: A matrix of `float64` of shape `(*, self.n_visible)` if xc is given.\n If xc is None, then this should be a matrix of `float64` of shape `(*, 2*self.n_visible)`.\n xc (optional): A matrix of `float64` of shape `(*, self.n_visible)`.\n out: Destination tensor of `complex128`.\n `out` should be a matrix of shape `(x.shape[0], self.n_par)`.\n\n Returns:\n `out`\n \"\"\"\n if xc is None:\n return self._pder_log(xr, out)\n else:\n return self._pder_log(_np.hstack((xr, xc)), out)\n\n @property\n def state_dict(self):\n r\"\"\"A dictionary containing the parameters of this machine\"\"\"\n return self._prbm.state_dict\n",
"# Copyright 2018-2019 The Simons Foundation, Inc. - All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport numpy as np\nfrom netket import legacy as nk\n\n# Sigma^z*Sigma^z interactions\nsigmaz = np.array([[1, 0], [0, -1]])\nmszsz = np.kron(sigmaz, sigmaz)\n\n# Exchange interactions\nexchange = np.asarray([[0, 0, 0, 0], [0, 0, 2, 0], [0, 2, 0, 0], [0, 0, 0, 0]])\n\n# Couplings J1 and J2\nJ = [1, 0.5]\n\nL = 20\n\nmats = []\nsites = []\nfor i in range(L):\n\n for d in [0, 1]:\n # \\sum_i J*sigma^z(i)*sigma^z(i+d)\n mats.append((J[d] * mszsz).tolist())\n sites.append([i, (i + d + 1) % L])\n\n # \\sum_i J*(sigma^x(i)*sigma^x(i+d) + sigma^y(i)*sigma^y(i+d))\n mats.append(((-1.0) ** (d + 1) * J[d] * exchange).tolist())\n sites.append([i, (i + d + 1) % L])\n\n# Custom Graph\ng = nk.graph.Hypercube(length=L, n_dim=1, pbc=True)\n\n# Spin based Hilbert Space\nhi = nk.hilbert.Spin(s=1 / 2, total_sz=0.0, N=g.n_nodes)\n\n# Custom Hamiltonian operator\nop = nk.operator.LocalOperator(hi)\nfor mat, site in zip(mats, sites):\n op += nk.operator.LocalOperator(hi, mat, site)\n\n# Restricted Boltzmann Machine in the phase representation\nma = nk.machine.RbmSpinPhase(hi, alpha=1)\nma.init_random_parameters(seed=1234, sigma=0.1)\n\n# Sampler\nsa = nk.sampler.MetropolisExchange(machine=ma, graph=g.n_nodes)\n\n# Optimizer\nopt = nk.optimizer.Sgd(learning_rate=0.01)\n\n# Variational Monte Carlo\ngs = nk.variational.Vmc(\n hamiltonian=op, sampler=sa, optimizer=opt, n_samples=1000, method=\"Sr\"\n)\n\ngs.run(out=\"test\", n_iter=10000)\n"
] | [
[
"numpy.diag",
"numpy.arange",
"numpy.sqrt"
],
[
"matplotlib.pyplot.legend",
"numpy.polyfit",
"matplotlib.pyplot.axhline",
"numpy.poly1d",
"matplotlib.pyplot.gca",
"numpy.abs",
"matplotlib.pyplot.ioff",
"matplotlib.pyplot.xlim",
"matplotlib.pyplot.clf",
"numpy.max",
"matplotlib.pyplot.errorbar",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.ion",
"matplotlib.pyplot.pause",
"matplotlib.pyplot.show",
"matplotlib.pyplot.ylabel"
],
[
"numpy.exp",
"numpy.empty"
],
[
"numpy.sqrt",
"numpy.atleast_1d",
"numpy.log10",
"numpy.mean",
"numpy.iscomplexobj",
"numpy.empty"
],
[
"numpy.log10"
],
[
"numpy.hstack",
"numpy.zeros"
],
[
"numpy.asarray",
"numpy.array",
"numpy.kron"
]
] |
kingoflolz/DALL-E | [
"d3f3e9a57a31b1e1cc74a449a9e6e5a0442f0ac7"
] | [
"examples/pure_jax.py"
] | [
"import io\n\nimport jax\nimport requests\nimport PIL\nfrom PIL import ImageOps\n\nimport numpy as np\nimport jax.numpy as jnp\n\nfrom dall_e_jax import get_encoder, get_decoder, map_pixels, unmap_pixels\n\ntarget_image_size = 256\n\n\ndef download_image(url):\n resp = requests.get(url)\n resp.raise_for_status()\n return PIL.Image.open(io.BytesIO(resp.content))\n\n\ndef preprocess(img):\n img = ImageOps.fit(img, [target_image_size,] * 2, method=0, bleed=0.0, centering=(0.5, 0.5))\n\n img = np.expand_dims(np.transpose(np.array(img).astype(np.float32)/255, (2, 0, 1)), 0)\n return map_pixels(img)\n\n\njax_enc_fn, jax_enc_params = get_encoder(\"encoder.pkl\")\njax_dec_fn, jax_dec_params = get_decoder(\"decoder.pkl\")\n\nx = preprocess(download_image('https://assets.bwbx.io/images/users/iqjWHBFdfxIU/iKIWgaiJUtss/v2/1000x-1.jpg'))\n\nz_logits = jax_enc_fn(jax_enc_params, x)\n\nz = jnp.argmax(z_logits, axis=1)\nz = jnp.transpose(jax.nn.one_hot(z, num_classes=8192), (0, 3, 1, 2))\n\nx_stats = jax_dec_fn(jax_dec_params, z)\n\nx_rec = unmap_pixels(jax.nn.sigmoid(x_stats[:, :3]))\nx_rec = np.transpose((np.array(x_rec[0]) * 255).astype(np.uint8), (1, 2, 0))\n\nPIL.Image.fromarray(x_rec).save('reconstructed.png')\n"
] | [
[
"numpy.array"
]
] |
williamsashbee/Confident_classifier | [
"cba3ef862b310afc3af6c4a62b524f032f45549e",
"cba3ef862b310afc3af6c4a62b524f032f45549e"
] | [
"src/run_joint_confidence_cdcOriginalGan.py",
"src/run_joint_confidence_condgan.py"
] | [
"##############################################\n# This code is based on samples from pytorch #\n##############################################\n# Writer: Kimin Lee \n\nfrom __future__ import print_function\nimport argparse\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.optim as optim\nimport data_loader\nimport numpy as np\nimport torchvision.utils as vutils\nimport models\nfrom torchvision import datasets, transforms\nfrom torch.autograd import Variable\n\nimport os\n\nos.environ[\"CUDA_DEVICE_ORDER\"] = \"PCI_BUS_ID\" # see issue #152\nos.environ[\"CUDA_VISIBLE_DEVICES\"] = \"5\"\n\n# Training settings\nparser = argparse.ArgumentParser(description='Training code - joint confidence')\nparser.add_argument('--batch-size', type=int, default=128, help='input batch size for training')\nparser.add_argument('--epochs', type=int, default=100, help='number of epochs to train')\nparser.add_argument('--lr', type=float, default=0.0002, help='learning rate')\nparser.add_argument('--no-cuda', action='store_true', default=False, help='disables CUDA training')\nparser.add_argument('--seed', type=int, default=1, help='random seed')\nparser.add_argument('--log-interval', type=int, default=100,\n help='how many batches to wait before logging training status')\nparser.add_argument('--dataset', default='mnist', help='cifar10 | svhn')\nparser.add_argument('--dataroot', required=True, help='path to dataset')\nparser.add_argument('--imageSize', type=int, default=32, help='the height / width of the input image to network')\nparser.add_argument('--outf', default='.', help='folder to output images and model checkpoints')\nparser.add_argument('--wd', type=float, default=0.0, help='weight decay')\nparser.add_argument('--droprate', type=float, default=0.1, help='learning rate decay')\nparser.add_argument('--decreasing_lr', default='60', help='decreasing strategy')\nparser.add_argument('--num_classes', type=int, default=10, help='the # of classes')\nparser.add_argument('--beta', type=float, default=1, help='penalty parameter for KL term')\n\nargs = parser.parse_args()\n\nif args.dataset == 'cifar10':\n args.beta = 0.1\n args.batch_size = 64\n\nprint(args)\nargs.cuda = not args.no_cuda and torch.cuda.is_available()\nprint(\"Random Seed: \", args.seed)\ntorch.manual_seed(args.seed)\n\nif args.cuda:\n torch.cuda.manual_seed(args.seed)\n\nkwargs = {'num_workers': 1, 'pin_memory': True} if args.cuda else {}\n\nprint('load data: ', args.dataset)\nif args.dataset=='mnist':\n transform = transforms.Compose([\n transforms.Scale(32),\n transforms.ToTensor(),\n transforms.Lambda(lambda x: x.repeat(3, 1, 1)),\n transforms.Normalize(mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5))\n ])\n train_loader = torch.utils.data.DataLoader(\n datasets.MNIST('data', train=True, download=True, transform=transform),\n batch_size=128, shuffle=True)\n test_loader = torch.utils.data.DataLoader(\n datasets.MNIST('data', train=False, download=True, transform=transform),\n batch_size=128, shuffle=True)\nelse:\n train_loader, test_loader = data_loader.getTargetDataSet(args.dataset, args.batch_size, args.imageSize, args.dataroot)\n\n\ntransform = transforms.Compose([\n transforms.Scale(32),\n transforms.ToTensor(),\n transforms.Lambda(lambda x: x.repeat(3, 1, 1)),\n transforms.Normalize(mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5))\n])\n\ntrain_loader_mnist = torch.utils.data.DataLoader(\n datasets.MNIST('data', train=True, download=True, transform=transform),\n batch_size=128, shuffle=True)\n\nprint('Load model')\nmodel = models.vgg13()\nprint(model)\n\nprint('load GAN')\nnz = 100\nG = models.cdcOriginalGenerator(1, nz, 64, 3) # ngpu, nz, ngf, nc\nD = models.cdcOriginalDiscriminator(1, 3, 64) # ngpu, nc, ndf\nG.weight_init(mean=0.0, std=0.02)\nD.weight_init(mean=0.0, std=0.02)\n\n# Initial setup for GAN\nreal_label = 1\nfake_label = 0\ncriterion = nn.BCELoss()\nnz = 100\n\n\nprint('Setup optimizer')\nlr = 0.0002\nbatch_size = 128\noptimizer = optim.Adam(model.parameters(), lr=args.lr, weight_decay=args.wd)\nG_optimizer = optim.Adam(G.parameters(), lr=lr, betas=(0.5, 0.999))\nD_optimizer = optim.Adam(D.parameters(), lr=lr, betas=(0.5, 0.999))\n\ndecreasing_lr = list(map(int, args.decreasing_lr.split(',')))\n\nonehot = torch.zeros(10, 10).cuda()\nonehot = onehot.scatter_(1, torch.cuda.LongTensor([0, 1, 2, 3, 4, 5, 6, 7, 8, 9]).view(10, 1), 1).view(10, 10, 1, 1)\nimg_size = 32\nnum_labels = 10\nfraction = 1\nfill = torch.zeros([num_labels, num_labels, img_size / fraction, img_size / fraction]).cuda()\nfor i in range(num_labels):\n fill[i, i, :, :] = 1\nfill = fill.cuda()\n# os.environ[\"CUDA_LAUNCH_BLOCKING\"]=\"1\"\n\n# Binary Cross Entropy loss\nBCE_loss = nn.BCELoss()\n# fixed_noise = torch.FloatTensor(64, nz, 1, 1).normal_(0, 1)\nfixed_noise = torch.randn((64, 100)).view(-1, 100, 1, 1)\nfixed_label = None\n\nif args.cuda:\n model.cuda()\n D.cuda()\n G.cuda()\n criterion.cuda()\n fixed_noise = fixed_noise.cuda()\n\nfirst = True\ndef train(epoch):\n model.train()\n # D_train_loss = 0\n # G_train_loss = 3\n trg = 0\n trd = 0\n i = 0\n\n for batch_idx, (data, y_labels) in enumerate(train_loader):\n uniform_dist = torch.Tensor(data.size(0), args.num_classes).fill_((1. / args.num_classes)).cuda()\n x_ = data.cuda()\n assert x_[0, :, :, :].shape == (3, 32, 32)\n global first\n if first:\n global fixed_noise\n global fixed_label\n\n first = False\n fixed_label = onehot[y_labels.squeeze()[:64]]\n print(\"saving fixed_label!\")\n vutils.save_image(data[:64],\n '{}/{}jointConfidencerealReference{}.png'.format(args.outf, args.dataset, epoch),\n normalize=True)\n\n # train discriminator D\n D.zero_grad()\n y_ = y_labels\n mini_batch = x_.size()[0]\n\n y_real_ = torch.ones(mini_batch)\n y_fake_ = torch.zeros(mini_batch)\n y_real_, y_fake_ = Variable(y_real_.cuda()), Variable(y_fake_.cuda())\n\n y_fill_ = fill[y_.squeeze().tolist()]\n # y_fill_ = fill[y_]\n\n assert y_fill_[0, y_.squeeze().tolist()[0], :, :].sum() == (img_size / fraction) ** 2\n assert y_fill_.sum() == (img_size / fraction) ** 2 * mini_batch\n\n x_, y_fill_ = Variable(x_.cuda()), Variable(y_fill_.cuda())\n\n D_result = D(x_, y_fill_).squeeze()\n D_real_loss = BCE_loss(D_result, y_real_)\n\n z_ = torch.randn((mini_batch, 100)).view(-1, 100, 1, 1)\n y_ = (torch.rand(mini_batch, 1) * num_labels).type(torch.LongTensor).squeeze()\n y_label_ = onehot[y_]\n y_fill_ = fill[y_]\n assert y_label_[0, y_[0]] == 1\n assert y_label_.shape == (mini_batch, 10, 1, 1)\n\n assert y_fill_[0, y_[0], :, :].sum() == (img_size / fraction) ** 2\n assert y_fill_.sum() == (img_size / fraction) ** 2 * mini_batch\n\n z_, y_label_, y_fill_ = Variable(z_.cuda()), Variable(y_label_.cuda()), Variable(y_fill_.cuda())\n\n G_result = G(z_, y_label_)\n D_result = D(G_result, y_fill_).squeeze()\n\n D_fake_loss = BCE_loss(D_result, y_fake_)\n D_fake_score = D_result.data.mean()\n\n D_train_loss = D_real_loss + D_fake_loss\n trg += 1\n if D_train_loss > .1:\n trd += 1\n D_train_loss.backward()\n D_optimizer.step()\n\n # D_losses.append(D_train_loss.item())\n\n # train generator G\n G.zero_grad()\n\n z_ = torch.randn((mini_batch, 100)).view(-1, 100, 1, 1)\n y_ = (torch.rand(mini_batch, 1) * num_labels).type(torch.LongTensor).squeeze()\n y_label_ = onehot[y_]\n y_fill_ = fill[y_]\n\n z_, y_label_, y_fill_ = Variable(z_.cuda()), Variable(y_label_.cuda()), Variable(y_fill_.cuda())\n\n assert y_label_[0, y_[0]] == 1\n assert y_label_.shape == (mini_batch, 10, 1, 1)\n\n assert y_fill_[0, y_[0], :, :].sum() == (img_size / fraction) ** 2\n assert y_fill_.sum() == (img_size / fraction) ** 2 * mini_batch\n\n G_result = G(z_, y_label_)\n D_result = D(G_result, y_fill_).squeeze()\n\n G_train_loss = BCE_loss(D_result, y_real_)\n\n # minimize the true distribution\n KL_fake_output = F.log_softmax(model(G_result))\n errG_KL = F.kl_div(KL_fake_output, uniform_dist) * args.num_classes\n generator_loss = G_train_loss + args.beta * errG_KL # 12.0, .65, 0e-8\n generator_loss.backward()\n\n G_optimizer.step()\n # G_losses.append(G_train_loss.item())\n ###########################\n # (3) Update classifier #\n ###########################\n # cross entropy loss\n\n optimizer.zero_grad()\n x_ = Variable(x_)\n\n output = F.log_softmax(model(x_))\n loss = F.nll_loss(output.cuda(), y_labels.type(torch.cuda.LongTensor).squeeze())\n\n # KL divergence\n\n ####\n z_ = torch.randn((data.shape[0], 100)).view(-1, 100, 1, 1).cuda()\n y_ = (torch.rand(data.shape[0], 1) * num_labels).type(torch.LongTensor).squeeze().cuda()\n y_label_ = onehot[y_]\n y_fill_ = fill[y_]\n\n assert y_label_[0, y_[0]] == 1\n assert y_label_.shape == (data.shape[0], 10, 1, 1)\n\n assert y_fill_[0, y_[0], :, :].sum() == (img_size / fraction) ** 2\n assert y_fill_.sum() == (img_size / fraction) ** 2 * data.shape[0]\n\n G_result = G(z_, y_label_)\n # !!!#D_result = D(G_result, y_fill_).squeeze()\n\n ####\n KL_fake_output = F.log_softmax(model(G_result))\n KL_loss_fake = F.kl_div(KL_fake_output, uniform_dist) * args.num_classes\n\n total_loss = loss + args.beta * KL_loss_fake\n # total_loss = loss\n total_loss.backward()\n optimizer.step()\n\n if batch_idx % args.log_interval == 0:\n print(\n \"Epoch {} , Descriminator loss {:.6f} Generator loss {:.6f} traingenerator {:.6f} traindiscriminator {:.6f}\".format(\n epoch, D_train_loss, G_train_loss, trg, trd))\n print('Classification Train Epoch: {} [{}/{} ({:.0f}%)]\\tLoss: {:.6f}, KL fake Loss: {:.6f}'.format(\n epoch, batch_idx * len(data), len(train_loader.dataset),\n 100. * batch_idx / len(train_loader), loss.data.item(), KL_loss_fake.data.item()))\n\n # print('Classification Train Epoch: {} [{}/{} ({:.0f}%)]\\tLoss: {:.6f}, KL fake Loss: {:.6f}'.format(\n # epoch, batch_idx * len(data), len(train_loader.dataset),\n # 100. * batch_idx / len(train_loader), loss.data.item(), KL_loss_fake.data.item()))\n fake = G(fixed_noise.cuda(), fixed_label)\n vutils.save_image(fake.data, '%s/MNISTcDCgan_samples_epoch_%03d.png' % (args.outf, epoch), normalize=True)\n\n\ndef test(epoch):\n model.eval()\n test_loss = 0\n correct = 0\n total = 0\n for data, target in test_loader:\n total += data.size(0)\n if args.cuda:\n data, target = data.cuda(), target.cuda()\n # data, target = Variable(data, volatile=True), Variable(target)\n output = F.log_softmax(model(data))\n target = target.type(\n torch.LongTensor) # https://discuss.pytorch.org/t/runtimeerror-multi-target-not-supported-newbie/10216/4\n if args.cuda:\n output = output.cuda()\n target = target.cuda()\n target = torch.squeeze(target)\n\n test_loss += F.nll_loss(output, target).data.item()\n pred = output.data.max(1)[1] # get the index of the max log-probability\n correct += pred.eq(target.data).cpu().sum()\n\n test_loss = test_loss\n test_loss /= len(test_loader) # loss function already averages over batch size\n print('\\nTest set: Average loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)\\n'.format(\n test_loss, correct, total,\n 100. * correct / total))\n\n\nfor epoch in range(1, args.epochs + 1):\n train(epoch)\n test(epoch)\n if epoch in decreasing_lr:\n G_optimizer.param_groups[0]['lr'] *= args.droprate\n D_optimizer.param_groups[0]['lr'] *= args.droprate\n optimizer.param_groups[0]['lr'] *= args.droprate\n if epoch % 20 == 0:\n # do checkpointing\n torch.save(G.state_dict(), '%s/netG_epoch_%d.pth' % (args.outf, epoch))\n torch.save(D.state_dict(), '%s/netD_epoch_%d.pth' % (args.outf, epoch))\n torch.save(model.state_dict(), '%s/model_epoch_%d.pth' % (args.outf, epoch))\n",
"##############################################\n# This code is based on samples from pytorch #\n##############################################\n# Writer: Kimin Lee \n\nfrom __future__ import print_function\nimport argparse\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.optim as optim\nimport data_loader\nimport numpy as np\nimport torchvision.utils as vutils\nimport models\nfrom torchvision import datasets, transforms\nfrom torch.autograd import Variable\n\nimport os\n\nos.environ[\"CUDA_DEVICE_ORDER\"] = \"PCI_BUS_ID\" # see issue #152\nos.environ[\"CUDA_VISIBLE_DEVICES\"] = \"5\"\n\n# Training settings\nparser = argparse.ArgumentParser(description='Training code - joint confidence')\nparser.add_argument('--batch-size', type=int, default=128, help='input batch size for training')\nparser.add_argument('--epochs', type=int, default=100, help='number of epochs to train')\nparser.add_argument('--lr', type=float, default=0.0002, help='learning rate')\nparser.add_argument('--no-cuda', action='store_true', default=False, help='disables CUDA training')\nparser.add_argument('--seed', type=int, default=1, help='random seed')\nparser.add_argument('--log-interval', type=int, default=100,\n help='how many batches to wait before logging training status')\nparser.add_argument('--nz', type=int, default=100, help='size of the latent z vector')\nparser.add_argument('--ngf', type=int, default=180)\nparser.add_argument('--ndf', type=int, default=80)\nparser.add_argument('--beta1', type=float, default=0.5, help='beta1 for adam. default=0.5')\nparser.add_argument('--netG', default='', help=\"path to netG (to continue training)\")\nparser.add_argument('--netD', default='', help=\"path to netD (to continue training)\")\n\nparser.add_argument('--dataset', default='svhn', help='cifar10 | svhn')\nparser.add_argument('--dataroot', required=True, help='path to dataset')\nparser.add_argument('--imageSize', type=int, default=32, help='the height / width of the input image to network')\nparser.add_argument('--outf', default='.', help='folder to output images and model checkpoints')\nparser.add_argument('--wd', type=float, default=0.0, help='weight decay')\nparser.add_argument('--droprate', type=float, default=0.1, help='learning rate decay')\nparser.add_argument('--decreasing_lr', default='60', help='decreasing strategy')\nparser.add_argument('--num_classes', type=int, default=10, help='the # of classes')\nparser.add_argument('--beta', type=float, default=1, help='penalty parameter for KL term')\n\nargs = parser.parse_args()\n\nif args.dataset == 'cifar10':\n args.beta = 0.1\n args.batch_size = 64\n\nprint(args)\nargs.cuda = not args.no_cuda and torch.cuda.is_available()\nprint(\"Random Seed: \", args.seed)\ntorch.manual_seed(args.seed)\n\nif args.cuda:\n torch.cuda.manual_seed(args.seed)\n\nkwargs = {'num_workers': 1, 'pin_memory': True} if args.cuda else {}\n\nprint('load data: ', args.dataset)\nif args.dataset=='mnist':\n transform = transforms.Compose([\n transforms.Scale(32),\n transforms.ToTensor(),\n transforms.Lambda(lambda x: x.repeat(3, 1, 1)),\n transforms.Normalize(mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5))\n ])\n train_loader = torch.utils.data.DataLoader(\n datasets.MNIST('data', train=True, download=True, transform=transform),\n batch_size=128, shuffle=True)\n test_loader = None\nelse:\n train_loader, test_loader = data_loader.getTargetDataSet(args.dataset, args.batch_size, args.imageSize, args.dataroot)\n\n\n\nprint('Load model')\nmodel = models.vgg13()\nprint(model)\n\nprint('load GAN')\n\n\nnz = int(args.nz)\nngf = int(args.ngf)\nndf = int(args.ndf)\nif args.dataset == 'mnist':\n #nc = 1\n nc=3\n nb_label = 10\nelse:\n nc = 3\n nb_label = 10\n\nnetG = models.acnetG(nz, ngf, nc)\n\nif args.netG != '':\n netG.load_state_dict(torch.load(args.netG))\nprint(netG)\n\nnetD = models.acnetD(ndf, nc, nb_label)\n\nif args.netD != '':\n netD.load_state_dict(torch.load(args.netD))\nprint(netD)\n\n\nprint('Setup optimizer')\noptimizer = optim.Adam(model.parameters(), lr=args.lr, weight_decay=args.wd)\n\ndecreasing_lr = list(map(int, args.decreasing_lr.split(',')))\n\nnum_labels = 10\n# os.environ[\"CUDA_LAUNCH_BLOCKING\"]=\"1\"\nbatchSize = 128\nimageSize = 32\ninput = torch.FloatTensor(batchSize, 3, imageSize, imageSize)\nnoise = torch.FloatTensor(batchSize, nz, 1, 1)\nfixed_noise = torch.FloatTensor(batchSize, nz, 1, 1).normal_(0, 1)\ns_label = torch.FloatTensor(batchSize)\nc_label = torch.LongTensor(batchSize)\n\nreal_label = 1\nfake_label = 0\n\n\ns_criterion = nn.BCELoss()\nc_criterion = nn.NLLLoss()\n\nif args.cuda:\n netD.cuda()\n netG.cuda()\n s_criterion.cuda()\n c_criterion.cuda()\n input, s_label = input.cuda(), s_label.cuda()\n c_label = c_label.cuda()\n noise, fixed_noise = noise.cuda(), fixed_noise.cuda()\n\ninput = Variable(input)\ns_label = Variable(s_label)\nc_label = Variable(c_label)\nnoise = Variable(noise)\nfixed_noise = Variable(fixed_noise)\nfixed_noise_ = np.random.normal(0, 1, (batchSize, nz))\nrandom_label = np.random.randint(0, nb_label, batchSize)\nprint('fixed label:{}'.format(random_label))\nrandom_onehot = np.zeros((batchSize, nb_label))\nrandom_onehot[np.arange(batchSize), random_label] = 1\nfixed_noise_[np.arange(batchSize), :nb_label] = random_onehot[np.arange(batchSize)]\n\n\nfixed_noise_ = (torch.from_numpy(fixed_noise_))\nfixed_noise_ = fixed_noise_.resize_(batchSize, nz, 1, 1)\nfixed_noise.data.copy_(fixed_noise_)\n\n# setup optimizer\noptimizerD = optim.Adam(netD.parameters(), lr=args.lr, betas=(args.beta1, 0.999))\noptimizerG = optim.Adam(netG.parameters(), lr=args.lr, betas=(args.beta1, 0.999))\n\n\ndef train(epoch):\n model.train()\n # D_train_loss = 0\n # G_train_loss = 3\n trg = 0\n trd = 0\n\n global first\n global fixed_noise\n global fixed_label\n global fixed_label_base\n global one_hot_zero\n for batch_idx, (img, label) in enumerate(train_loader):\n ###########################\n # (1) Update D network\n ###########################\n # train with real\n if img.shape[0] != batchSize:\n print('shape problem')\n break\n netD.zero_grad()\n batch_size = img.size(0)\n input.data.resize_(img.size()).copy_(img)\n s_label.data.resize_(batch_size).fill_(real_label)\n c_label.data.resize_(batch_size).copy_(label.squeeze())\n s_output, c_output = netD(input)\n s_errD_real = s_criterion(s_output, s_label)\n c_errD_real = c_criterion(c_output, c_label)\n errD_real = s_errD_real + 2.0*c_errD_real\n errD_real.backward()\n D_x = s_output.data.mean()\n\n #correct, length = test(c_output, c_label)\n\n # train with fake\n noise.data.resize_(batch_size, nz, 1, 1)\n noise.data.normal_(0, 1)\n\n label = np.random.randint(0, nb_label, batch_size)\n noise_ = np.random.normal(0, 1, (batch_size, nz))\n label_onehot = np.zeros((batch_size, nb_label))\n label_onehot[np.arange(batch_size), label] = 1\n noise_[np.arange(batch_size), :nb_label] = label_onehot[np.arange(batch_size)]\n\n noise_ = (torch.from_numpy(noise_))\n noise_ = noise_.resize_(batch_size, nz, 1, 1)\n noise.data.copy_(noise_)\n\n c_label.data.resize_(batch_size).copy_(torch.from_numpy(label))\n\n fake = netG(noise)\n s_label.data.fill_(fake_label)\n s_output, c_output = netD(fake.detach())\n s_errD_fake = s_criterion(s_output, s_label)\n c_errD_fake = c_criterion(c_output, c_label)\n errD_fake = s_errD_fake + 2.0*c_errD_fake\n\n errD_fake.backward()\n D_G_z1 = s_output.data.mean()\n errD = s_errD_real + s_errD_fake\n optimizerD.step()\n trd += 1\n ###########################\n # (2) Update G network\n ###########################\n netG.zero_grad()\n s_label.data.fill_(real_label) # fake labels are real for generator cost\n s_output, c_output = netD(fake)\n s_errG = s_criterion(s_output, s_label)\n c_errG = c_criterion(c_output, c_label)\n\n errG = s_errG + 2.0*c_errG\n errG.backward()\n D_G_z2 = s_output.data.mean()\n\n if errG > 0:\n optimizerG.step()\n trg+=1\n # minimize the true distribution\n # KL_fake_output = F.log_softmax(model(G_result))\n # errG_KL = F.kl_div(KL_fake_output, uniform_dist)*args.num_classes\n # generator_loss = G_train_loss + args.beta*errG_KL # 12.0, .65, 0e-8\n # generator_loss.backward()\n #G_train_loss.backward()\n #G_optimizer.step()\n # G_losses.append(G_train_loss.item())\n ###########################\n # (3) Update classifier #\n ###########################\n # cross entropy loss\n \"\"\" \n optimizer.zero_grad()\n x_ = Variable(x_)\n\n output = F.log_softmax(model(x_))\n loss = F.nll_loss(output.cuda(), label.type(torch.cuda.LongTensor).squeeze())\n\n # KL divergence\n\n ####\n z_ = torch.randn((img.shape[0], 100)).view(-1, 100, 1, 1).cuda()\n y_ = (torch.rand(img.shape[0], 1) * num_labels).type(torch.LongTensor).squeeze().cuda()\n y_label_ = onehot[y_]\n y_fill_ = fill[y_]\n\n assert y_label_[0, y_[0]] == 1\n assert y_label_.shape == (data.shape[0], 10, 1, 1)\n\n assert y_fill_[0, y_[0], :, :].sum() == (img_size ) ** 2\n assert y_fill_.sum() == (img_size ) ** 2 * data.shape[0]\n\n G_result = G(z_, y_label_)\n # !!!#D_result = D(G_result, y_fill_).squeeze()\n\n ####\n KL_fake_output = F.log_softmax(model(G_result))\n KL_loss_fake = F.kl_div(KL_fake_output, uniform_dist) * args.num_classes\n\n total_loss = loss + args.beta * KL_loss_fake\n # total_loss = loss\n total_loss.backward()\n optimizer.step()\n \"\"\"\n if batch_idx % args.log_interval == 0:\n print(\n \"Epoch {} , Descriminator loss {:.6f} Generator loss {:.6f} traingenerator {:.6f} traindiscriminator {:.6f}\".format(\n epoch, errD, errG, trg, trd))\n #print('Classification Train Epoch: {} [{}/{} ({:.0f}%)]\\tLoss: {:.6f}, KL fake Loss: {:.6f}'.format(\n # epoch, batch_idx * len(data), len(train_loader.dataset),\n # 100. * batch_idx / len(train_loader), loss.data.item(), KL_loss_fake.data.item()))\n\n # print('Classification Train Epoch: {} [{}/{} ({:.0f}%)]\\tLoss: {:.6f}, KL fake Loss: {:.6f}'.format(\n # epoch, batch_idx * len(data), len(train_loader.dataset),\n # 100. * batch_idx / len(train_loader), loss.data.item(), KL_loss_fake.data.item()))\n fake = netG(fixed_noise)\n vutils.save_image(fake.data, '%s/SVHNcDCgan_samples_epoch_%03d.png' % (args.outf, epoch), normalize=True)\n\n\n\ndef test(epoch):\n model.eval()\n test_loss = 0\n correct = 0\n total = 0\n for data, target in test_loader:\n total += data.size(0)\n if args.cuda:\n data, target = data.cuda(), target.cuda()\n # data, target = Variable(data, volatile=True), Variable(target)\n output = F.log_softmax(model(data))\n target = target.type(\n torch.LongTensor) # https://discuss.pytorch.org/t/runtimeerror-multi-target-not-supported-newbie/10216/4\n if args.cuda:\n output = output.cuda()\n target = target.cuda()\n target = torch.squeeze(target)\n\n test_loss += F.nll_loss(output, target).data.item()\n pred = output.data.max(1)[1] # get the index of the max log-probability\n correct += pred.eq(target.data).cpu().sum()\n\n test_loss = test_loss\n test_loss /= len(test_loader) # loss function already averages over batch size\n print('\\nTest set: Average loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)\\n'.format(\n test_loss, correct, total,\n 100. * correct / total))\n\n\nfor epoch in range(1, args.epochs + 1):\n train(epoch)\n# test(epoch)\n if epoch in decreasing_lr:\n optimizerG.param_groups[0]['lr'] *= args.droprate\n optimizerD.param_groups[0]['lr'] *= args.droprate\n optimizer.param_groups[0]['lr'] *= args.droprate\n if epoch % 20 == 0:\n # do checkpointing\n torch.save(netG.state_dict(), '%s/netG_epoch_%d.pth' % (args.outf, epoch))\n torch.save(netD.state_dict(), '%s/netD_epoch_%d.pth' % (args.outf, epoch))\n torch.save(model.state_dict(), '%s/model_epoch_%d.pth' % (args.outf, epoch))\n"
] | [
[
"torch.nn.functional.kl_div",
"torch.ones",
"torch.cuda.manual_seed",
"torch.zeros",
"torch.nn.functional.nll_loss",
"torch.manual_seed",
"torch.randn",
"torch.cuda.LongTensor",
"torch.nn.BCELoss",
"torch.rand",
"torch.cuda.is_available",
"torch.squeeze",
"torch.autograd.Variable"
],
[
"torch.LongTensor",
"torch.nn.NLLLoss",
"torch.cuda.manual_seed",
"torch.load",
"torch.nn.functional.nll_loss",
"torch.manual_seed",
"numpy.arange",
"torch.from_numpy",
"torch.nn.BCELoss",
"numpy.random.normal",
"torch.FloatTensor",
"numpy.random.randint",
"torch.cuda.is_available",
"numpy.zeros",
"torch.squeeze",
"torch.autograd.Variable"
]
] |
7125messi/rencommend_system_learning | [
"4a8bcef241c4c0357cfbe4d1a9828b847974b69c"
] | [
"Chapter2/LFM.py"
] | [
"# 导入包\nimport random\nimport math\nimport numpy as np\nimport time\nfrom tqdm import tqdm\nfrom tqdm import trange\n\n# 1 通用函数定义\n## 定义装饰器,监控运行时间\ndef timmer(func):\n def wrapper(*args, **kwargs):\n start_time = time.time()\n res = func(*args, **kwargs)\n stop_time = time.time()\n print('Func {},run time:{}'.format(func.__name__,stop_time - start_time))\n return res\n return wrapper\n\n## 数据处理相关\n### load data\n### split data\nclass Dataset():\n def __init__(self,fp):\n self.data = self.loadData(fp)\n\n @timmer\n def loadData(self,fp):\n data = []\n for l in open(fp):\n data.append(tuple(map(int, l.strip().split('::')[:2])))\n return data\n\n @timmer\n def splitData(self, M, k, seed=1):\n '''\n :params: data, 加载的所有(user, item)数据条目\n :params: M, 划分的数目,最后需要取M折的平均\n :params: k, 本次是第几次划分,k~[0, M)\n :params: seed, random的种子数,对于不同的k应设置成一样的\n :return: train, test\n '''\n train , test = [], []\n random.seed(seed)\n for user, item in self.data:\n # 这里与书中的不一致,本人认为取M-1较为合理,因randint是左右都覆盖的\n if random.randint(0, M-1) == k: \n test.append((user, item))\n else:\n train.append((user, item))\n\n # 处理成字典的形式,user->set(items)\n def convert_dict(data):\n data_dict = {}\n for user, item in data:\n if user not in data_dict:\n data_dict[user] = set()\n data_dict[user].add(item)\n data_dict = {k: list(data_dict[k]) for k in data_dict}\n return data_dict\n\n return convert_dict(train), convert_dict(test)\n\n## 评价指标\n### Precision\n### Recall\n### Coverage\n### Popularity(Novelty)\n\nclass Metric():\n def __init__(self, train, test, GetRecommendation):\n '''\n :params: train, 训练数据\n :params: test, 测试数据\n :params: GetRecommendation, 为某个用户获取推荐物品的接口函数\n '''\n self.train = train\n self.test = test\n self.GetRecommendation = GetRecommendation\n self.recs = self.getRec()\n \n # 为test中的每个用户进行推荐\n def getRec(self):\n recs = {}\n for user in self.test:\n rank = self.GetRecommendation(user)\n recs[user] = rank\n return recs\n \n # 定义精确率指标计算方式\n def precision(self):\n all, hit = 0, 0\n for user in self.test:\n test_items = set(self.test[user])\n rank = self.recs[user]\n for item, score in rank:\n if item in test_items:\n hit += 1\n all += len(rank)\n return round(hit / all * 100, 2)\n \n # 定义召回率指标计算方式\n def recall(self):\n all, hit = 0, 0\n for user in self.test:\n test_items = set(self.test[user])\n rank = self.recs[user]\n for item, score in rank:\n if item in test_items:\n hit += 1\n all += len(test_items)\n return round(hit / all * 100, 2)\n \n # 定义覆盖率指标计算方式\n def coverage(self):\n all_item, recom_item = set(), set()\n for user in self.test:\n for item in self.train[user]:\n all_item.add(item)\n rank = self.recs[user]\n for item, score in rank:\n recom_item.add(item)\n return round(len(recom_item) / len(all_item) * 100, 2)\n \n # 定义新颖度指标计算方式\n def popularity(self):\n # 计算物品的流行度\n item_pop = {}\n for user in self.train:\n for item in self.train[user]:\n if item not in item_pop:\n item_pop[item] = 0\n item_pop[item] += 1\n\n num, pop = 0, 0\n for user in self.test:\n rank = self.recs[user]\n for item, score in rank:\n # 取对数,防止因长尾问题带来的被流行物品所主导\n pop += math.log(1 + item_pop[item])\n num += 1\n return round(pop / num, 6)\n \n def eval(self):\n metric = {'Precision': self.precision(),\n 'Recall': self.recall(),\n 'Coverage': self.coverage(),\n 'Popularity': self.popularity()}\n print('Metric:', metric)\n return metric\n\n# 2 LFM算法实现\ndef LFM(train,ratio,K,lr,step,lmbda,N):\n '''\n :params: train, 训练数据\n :params: ratio, 负采样的正负比例\n :params: K, 隐语义个数\n :params: lr, 初始学习率\n :params: step, 迭代次数\n :params: lmbda, 正则化系数\n :params: N, 推荐TopN物品的个数\n :return: GetRecommendation, 获取推荐结果的接口\n '''\n all_items = {}\n for user in train:\n for item in train[user]:\n if item not in all_items:\n all_items[item] = 0\n all_items[item] += 1\n\n all_items = list(all_items.items())\n items = [x[0] for x in all_items]\n pops = [x[1] for x in all_items]\n\n # 负采样函数(按照流行度就行采样)\n def nSample(data,ratio):\n new_data = {}\n # 正样本\n for user in data:\n if user not in new_data:\n new_data[user] = {}\n for item in data[user]:\n new_data[user][item] = 1\n # 负样本\n for user in new_data:\n seen = set(new_data[user])\n pos_num = len(seen)\n item = np.random.choice(items, int(pos_num * ratio * 3), pops)\n item = [x for x in item if x not in seen][:int(pos_num * ratio)]\n new_data[user].update({x: 0 for x in item})\n \n return new_data\n\n # 训练\n P, Q = {}, {}\n for user in train:\n P[user] = np.random.random(K)\n for item in items:\n Q[item] = np.random.random(K)\n \n for s in trange(step):\n data = nSample(train, ratio)\n for user in data:\n for item in data[user]:\n eui = data[user][item] - (P[user] * Q[item]).sum()\n P[user] += lr * (Q[item] * eui - lmbda * P[user])\n Q[item] += lr * (P[user] * eui - lmbda * Q[item])\n lr *= 0.9 # 调整学习率\n \n # 获取接口函数\n def GetRecommendation(user):\n seen_items = set(train[user])\n recs = {}\n for item in items:\n if item not in seen_items:\n recs[item] = (P[user] * Q[item]).sum()\n recs = list(sorted(recs.items(), key=lambda x: x[1], reverse=True))[:N]\n return recs\n \n return GetRecommendation\n\n# 3 LFM实验\n## M=8, N=10, ratio=[1, 2, 3, 5, 10, 20]\n\nclass Experiment():\n def __init__(self, M, N, ratio=1,\n K=100, lr=0.02, step=100, lmbda=0.01, fp='../dataset/ml-1m/ratings.dat'):\n '''\n :params: M, 进行多少次实验\n :params: N, TopN推荐物品的个数\n :params: ratio, 正负样本比例\n :params: K, 隐语义个数\n :params: lr, 学习率\n :params: step, 训练步数\n :params: lmbda, 正则化系数\n :params: fp, 数据文件路径\n '''\n self.M = M\n self.K = K\n self.N = N\n self.ratio = ratio\n self.lr = lr\n self.step = step\n self.lmbda = lmbda\n self.fp = fp\n self.alg = LFM\n \n # 定义单次实验\n @timmer\n def worker(self, train, test):\n '''\n :params: train, 训练数据集\n :params: test, 测试数据集\n :return: 各指标的值\n '''\n getRecommendation = self.alg(train, self.ratio, self.K, \n self.lr, self.step, self.lmbda, self.N)\n metric = Metric(train, test, getRecommendation)\n return metric.eval()\n \n # 多次实验取平均\n @timmer\n def run(self):\n metrics = {'Precision': 0, 'Recall': 0, \n 'Coverage': 0, 'Popularity': 0}\n dataset = Dataset(self.fp)\n for ii in range(self.M):\n train, test = dataset.splitData(self.M, ii)\n print('Experiment {}:'.format(ii))\n metric = self.worker(train, test)\n metrics = {k: metrics[k]+metric[k] for k in metrics}\n metrics = {k: metrics[k] / self.M for k in metrics}\n print('Average Result (M={}, N={}, ratio={}): {}'.format(\\\n self.M, self.N, self.ratio, metrics))\n\n# LFM实验(运行时间较长,这里没贴实验结果)\nM, N = 8, 10\nfor r in [1, 2, 3, 5, 10, 20]:\n exp = Experiment(M, N, ratio=r)\n exp.run()"
] | [
[
"numpy.random.random"
]
] |
JCSDA/mpas-jedi | [
"e0780d1fd295912ee4cfb758854c52b6764d4ab9",
"e0780d1fd295912ee4cfb758854c52b6764d4ab9"
] | [
"graphics/basic_plot_functions.py",
"graphics/plot_modelspace_ts_1d_aggr.py"
] | [
"#!/usr/bin/env python3\n\nfrom copy import deepcopy\nimport cartopy.crs as ccrs\nimport datetime as dt\nimport logging\nfrom pandas.plotting import register_matplotlib_converters\nregister_matplotlib_converters()\nimport matplotlib\nmatplotlib.use('AGG')\nimport matplotlib.axes as maxes\nimport matplotlib.cm as cm\nimport matplotlib.colors as colors\nfrom matplotlib.colors import BoundaryNorm\nimport matplotlib.pyplot as plt\nimport matplotlib.ticker as mticker\nfrom mpl_toolkits.axes_grid1 import make_axes_locatable\nimport numpy as np\nimport plot_utils as pu\nimport var_utils as vu\nimport os\n\n_logger = logging.getLogger(__name__)\n\ncmGray = plt.cm.get_cmap(\"gist_gray\")\ncmRainbow = plt.cm.get_cmap(\"gist_rainbow\")\ncmSpectral = plt.cm.get_cmap(\"nipy_spectral\")\ncmHeat = plt.cm.get_cmap(\"gist_heat\")\ncmOcean = plt.cm.get_cmap(\"ocean\")\ncmNCAR = plt.cm.get_cmap(\"gist_ncar\")\n\nWhiteBlack1 = cmGray(np.linspace(1.0,0.0,17)) # white to black (-90 to -74 C)\nBlackRed = cmHeat(np.linspace(0.0,0.5,10)) #black to red (-74 to -65 C)\nROYG = cmSpectral(np.linspace(0.9,0.43,27)) # red, orange, yellow, green, blue (-65 to -39 C)\n#GreenBlue = cmNCAR(np.linspace(0.05,0.1,8)) # green to blue (-39 to -32 C)\n#BlueCyan = cmRainbow(np.linspace(0.8,0.6,13)) # blue to cyan (-32 to -20 C)\nGreenBlueCyan = cmNCAR(np.linspace(0.05,0.2,20)) # green to blue (-39 to -20 C)\n#WhiteBlack2 = cmGray(np.linspace(0.9,0.0,51)) # white to black (-20 to 30 C)\nMVW = cmNCAR(np.linspace(0.8,0.98,21)) # magenta to violet to white (-20 to 0 C)\nWhiteBlack2 = cmGray(np.linspace(0.9,0.0,31)) # white to black (0 to 30 C)\n\n#btcolors = np.concatenate((WhiteBlack1, BlackRed, ROYG, GreenBlue, BlueCyan, WhiteBlack2))\n#btcolors = np.concatenate((WhiteBlack1, BlackRed, ROYG, GreenBlueCyan, WhiteBlack2))\nbtcolors = np.concatenate((WhiteBlack1, BlackRed, ROYG, GreenBlueCyan, MVW, WhiteBlack2))\n\nbtCMap = colors.ListedColormap(btcolors)\n\n#This script includes basic plotting functions.\n\ndistriZooms = {}\n\n#Full Earth\ndistriZooms['default'] = {\n 'cLon': None,\n 'minLon': -180,\n 'maxLon': 180,\n 'minLat': -90,\n 'maxLat': 90,\n}\ndistriZooms['abi'] = {\n 'cLon': -75.2,\n 'minLon': None,\n 'maxLon': None,\n 'minLat': None,\n 'maxLat': None,\n}\ndistriZooms['ahi'] = {\n 'cLon': 140.7,\n 'minLon': None,\n 'maxLon': None,\n 'minLat': None,\n 'maxLat': None,\n}\n\ndef plotDistri(lats,lons,values, \\\n ObsType,VarName,var_unit,out_name,nstation,levbin, \\\n dmin=None,dmax=None,dotsize=6,color=\"rainbow\"):\n#================================================================\n#INPUTS:\n# lats - latitude\n# lons - longitude\n# values - values will be plotted\n# ObsType - observation type\n# VarName - variable name\n# var_unit - variable units\n# out_name - will be included in output file name. It can be experiment name.\n# nstation - station numbers for sondes.\n# levbin - plot all levels together (levbin=all); or plot every level.\n# dmin, dmax - min/max values of colorbars, optional\n# dotsize - dot size, optional\n# color - color scheme, optional\n#================================================================\n# For some plots that need to change longitude from [-180,180] to [0,360]\n# tmp = np.logical_not(lons > 0)\n# lons[tmp] = lons[tmp] + 360\n\n#set map=======================================================================\n cLon = distriZooms['default']['cLon']\n minLon = distriZooms['default']['minLon']\n maxLon = distriZooms['default']['maxLon']\n minLat = distriZooms['default']['minLat']\n maxLat = distriZooms['default']['maxLat']\n\n for key, val in distriZooms.items():\n if key in ObsType:\n cLon = val['cLon']\n minLon = val['minLon']\n maxLon = val['maxLon']\n minLat = val['minLat']\n maxLat = val['maxLat']\n\n if cLon is not None:\n fig = plt.figure(figsize=(5,5))\n ax = fig.add_subplot(projection=ccrs.Orthographic(cLon))\n else:\n fig = plt.figure(figsize=(8,8))\n ax = fig.add_subplot(projection=ccrs.PlateCarree())\n\n ax.set_global()\n\n#draw points onto map =========================================================\n if color == \"BT\":\n if (\"abi\" in ObsType or \"ahi\" in ObsType):\n cm = btCMap\n if dmin is None: dmin = 183\n if dmax is None: dmax = 303\n else:\n cm = plt.cm.get_cmap(\"gist_ncar\")\n if dmin is None: dmin = 190\n if dmax is None: dmax = 270\n else:\n cm = plt.cm.get_cmap(color)\n\n finite = np.isfinite(values)\n if (((\"abi\" in ObsType or \"ahi\" in ObsType)\n and finite.sum() > 4e4)\n or \"model\" in ObsType):\n # option 1: smoothed contours (note: color bar is not quite right)\n # sc=m.contourf(lons[finite], lats[finite], values[finite],\n # cm.N, cmap = cm, vmin = dmin, vmax = dmax,\n # latlon = True, tri = True, extend='both')\n\n # option 2: pixel contours\n # first sort by longitude to avoid bug for cyclic projections in basemap\n lonsPlot = lons[finite]\n lonsPlot[lonsPlot > 180.0] -= 360.0 # fixes latitude swap bug for cyclic projections\n latsPlot = lats[finite]\n valuesPlot = values[finite]\n lonSort = np.argsort(lonsPlot)\n\n p = plt.pcolor(lonsPlot[lonSort], latsPlot[lonSort], valuesPlot[lonSort],\n transform = ccrs.PlateCarree(),\n cmap = cm, vmin = dmin, vmax = dmax,\n latlon = True, tri = True)\n\n else:\n p=ax.scatter(lons[finite], lats[finite], c=values[finite],\n transform = ccrs.PlateCarree(),\n cmap= cm, s = dotsize)\n ax.gridlines(draw_labels=True, xlocs=np.arange(-180,180,60),linestyle='--')\n\n ax.coastlines()\n\n divider = make_axes_locatable(ax)\n cax = divider.append_axes(\"bottom\",size=\"5%\", pad=0.3,axes_class=plt.Axes)\n\n #fig.add_axes(cax)\n plt.colorbar(p,cax=cax,orientation='horizontal') #,cax=cax,ax=ax,orientation='horizontal')\n\n#set title ===================================================================\n if nstation == 0 or ObsType == 'satwind':\n plt.text(0.5, 1.15, '%s %s %s nlocs:%s' \\\n %(ObsType,VarName,var_unit,len(values[~np.isnan(values)])), \\\n horizontalalignment='center', \\\n fontsize=12, transform = ax.transAxes)\n else:\n if ObsType[:6] == 'gnssro':\n plt.text(0.5, 1.15, '%s %s %s nlocs:%s nprofile:%s' \\\n %(ObsType,VarName,var_unit,len(values[~np.isnan(values)]),nstation), \\\n horizontalalignment='center', \\\n fontsize=12, transform = ax.transAxes)\n elif ObsType == 'aircraft':\n plt.text(0.5, 1.15, '%s %s %s nlocs:%s nflight:%s' \\\n %(ObsType,VarName,var_unit,len(values[~np.isnan(values)]),nstation), \\\n horizontalalignment='center', \\\n fontsize=12, transform = ax.transAxes)\n else:\n plt.text(0.5, 1.15, '%s %s %s nlocs:%s nstation:%s' \\\n %(ObsType,VarName,var_unit,len(values[~np.isnan(values)]),nstation), \\\n horizontalalignment='center', \\\n fontsize=12, transform = ax.transAxes)\n\n plt.savefig('distri_%s_%s_%s.png'%(VarName,out_name,levbin),dpi=200,bbox_inches='tight')\n plt.close()\n\n\ndef scatterMapFields(\n lonVals, latVals, fields,\n filename,\n minLon = -180., maxLon = 180.,\n minLat = -90., maxLat = 90.,\n cLon = None,\n projection = 'default',\n dmin = None, dmax = None,\n markers = {},\n sizes = {},\n cmap = 'gist_ncar',\n cbarType = None,\n c = {},\n logVLim = 1.e-12,\n ):\n\n # setup map\n cLons = np.asarray([])\n lonVals_180 = {}\n\n for name in lonVals.keys():\n cLon = None\n\n # 0 < longitude <= 360\n lonVals_360 = deepcopy(lonVals[name])\n while np.max(lonVals_360) >= 360.0:\n lonVals_360[lonVals_360 >= 360.0] -= 360.0\n while np.min(lonVals_360) < 0.0:\n lonVals_360[lonVals_360 < 0.0] += 360.0\n\n # -180 < longitude <= 180\n lonVals_180[name] = deepcopy(lonVals_360)\n lonVals_180[name][lonVals_180[name] > 180.0] -= 360.0\n\n for lon in [lonVals_360, lonVals_180[name]]:\n if np.max(lon) - np.min(lon) <= 180.0:\n cLon = 0.5*(np.max(lon) + np.min(lon))\n\n cLons = np.append(cLons, cLon)\n\n anycLonNone = np.any([c is None for c in cLons])\n\n if anycLonNone:\n # plot entire Earth\n fig = plt.figure(figsize=(5,5))\n ax = fig.add_subplot(projection=ccrs.Mollweide(0.0))\n\n else:\n # plot single projected side of Earth\n cLon = cLons[0]\n if cLon > 180.0: cLon-=360.0\n fig = plt.figure(figsize=(5,5))\n ax = fig.add_subplot(projection=ccrs.Orthographic(cLon))\n\n assert (cbarType is None or cbarType in ['Log', 'SymLog']), \\\n 'scatterMapFields: invalid cbarType: '+cbarType\n\n for name, field in fields.items():\n f = c=c.get(name, field)\n finite = np.isfinite(f)\n lons = lonVals_180[name][finite]\n lats = latVals[name][finite]\n f = f[finite]\n\n ## transform to pcolormesh and cartopy conventions\n # longitude monotonically increasing\n lonSort = np.argsort(lons)\n lons = lons[lonSort]\n lats = lats[lonSort]\n f = f[lonSort]\n\n if dmin is None:\n vmin = f.min()\n else:\n vmin = dmin\n if dmax is None:\n vmax = f.max()\n else:\n vmax = dmax\n\n if cbarType is None:\n norm = None\n elif cbarType == 'Log':\n if vmin <= logVLim: vmin = logVLim\n f[f < vmin] = vmin\n norm=colors.LogNorm(vmin=vmin, vmax=vmax)\n elif cbarType == 'SymLog':\n norm=colors.SymLogNorm(vmin=vmin, vmax=vmax,\n linthresh=1.e-4*vmax, linscale=1.0, base=10)\n\n sc = ax.scatter(lons, lats, c=f,\n s = sizes.get(name, 1),\n cmap = cmap,\n norm = norm,\n marker = markers.get(name, '.'), linewidth = 0,\n transform=ccrs.PlateCarree(),\n )\n\n # show full projection extent\n ax.set_global()\n\n # add coastlines\n ax.coastlines()\n\n divider = make_axes_locatable(ax)\n cax = divider.append_axes(\"bottom\",size=\"5%\", pad=0.3,axes_class=plt.Axes)\n cb = plt.colorbar(sc, cax=cax, orientation='horizontal')\n\n plt.savefig(filename, dpi=200, bbox_inches='tight')\n plt.close()\n\ndef plotTimeserial2D(Stats,xlabeltime,ylevels,VarName):\n#================================================================\n#INPUTS:\n# Stats - statistics\n# xlabeltime - time labels for x-axis \n# ylevels - vertical levels for y-axis\n# VarName - variable name\n#================================================================\n zgrid = np.loadtxt(\"/glade/work/jban/pandac/fix_input/graphics/zgrid_v55.txt\")\n\n fig, ax1 = plt.subplots()\n\n xarray = range(len(xlabeltime))\n valuemin = np.amin(Stats)\n valuemax = np.amax(Stats)\n # yonggangyu introduce epsilon and xi for plotting absolutely zero field,\n # solving vmin, vcenter, vmax ascending order issue\n epsilon = 1.e-8\n if (valuemin > 0 or valuemax < 0):\n color = 'rainbow'\n plt.contourf(xarray,ylevels,Stats,40,vmin=valuemin, vmax=valuemax,cmap=color)\n xi=-1\n else:\n cmap = 'coolwarm'\n if ( -valuemin < epsilon and valuemax < epsilon ):\n xi=1\n valuemin = -epsilon\n valuemax = epsilon\n elif ( -valuemin < epsilon and valuemax > epsilon ):\n xi=2\n valuemin = -epsilon\n elif ( -valuemin > epsilon and valuemax < epsilon ):\n xi=3\n valuemax = epsilon\n else:\n xi=4\n #print('xi= '+str(xi)+' valuemin= ',str(valuemin)+' valuemax= ',str(valuemax))\n norm = matplotlib.colors.DivergingNorm(vmin=valuemin, vcenter=0, vmax=valuemax)\n plt.contourf(xarray,ylevels,Stats,40,vmin=valuemin, vmax=valuemax,norm=norm,cmap=cmap)\n xarray = range(len(xlabeltime))\n major_ticks = np.arange(0, 56, 5)\n ax1.set_yticks(major_ticks)\n ax1.set_ylim([0,54])\n ax1.set_ylabel('Vertical level',fontsize=15)\n\n ax2 = ax1.twinx()\n ax2.set_yticks(major_ticks-1)\n ax2.set_yticklabels((zgrid[::5]).astype(int))\n\n ax2.set_ylabel('Height (m)',fontsize=13)\n\n FCDay = ''.join(VarName.split(\"_\")[1:][:-3])\n if (FCDay == 'day0.0'):\n ax1.set_xlabel('Analysis Time',fontsize=15)\n ax1.set_xticks(xarray[::4])\n ax1.set_xticklabels(xlabeltime[::4],rotation=90)\n elif (FCDay == 'day0.25'):\n ax1.set_xlabel( '6h Forecast',fontsize=15)\n ax1.set_xticks(xarray[::4])\n ax1.set_xticklabels(xlabeltime[::4],rotation=90)\n else:\n ax1.set_xlabel( 'Lead Time',fontsize=15)\n\n plt.colorbar(extend='both',orientation=\"horizontal\",pad=0.2)\n ax1.grid(True)\n region = ''.join(VarName.split(\"_\")[2:][:-2])\n var = ''.join(VarName.split(\"_\")[3:][:-1])\n stats = ''.join(VarName.split(\"_\")[4:])\n plt.title(stats+' variable:'+vu.varDictModel[var][1]+'('+ vu.varDictModel[var][0]+') '+region, fontsize = 12)\n plt.savefig(VarName+'_TS_2d.png',dpi=200,bbox_inches='tight')\n plt.close()\n\nmaxLegendEntries = 12\n\n###############################################################################\nlenWarnSer = 0\nnanWarnSer = 0\ndef plotSeries(fig, \\\n linesVals, xVals, \\\n linesLabel, \\\n title=\"\", dataLabel=\"y\", \\\n sciticks=False, logscale= False, signdef=False, \\\n indepLabel=\"x\", invert_ind_axis=False, \\\n ny=1, nx=1, nplots=1, iplot=0, \\\n linesValsMinCI=None, linesValsMaxCI=None, \\\n dmin=np.NaN, dmax=np.NaN, \\\n lineAttribOffset=0, \\\n legend_inside=True,\n interiorLabels=True):\n\n# ARGUMENTS\n# fig - matplotlib figure object\n# linesVals - dependent variable (list of arrays)\n# xVals - independent variable on x-axis (array)\n# linesLabel - legend label for linesVals (list)\n\n# title - subplot title, optional\n# dataLabel - label for linesVals, optional\n# sciticks - whether linesVals needs scientific formatting for ticks, optional\n# logscale - y-axis is scaled logarithmically, optional, overrides sciticks\n# signdef - whether linesVals is positive/negative definite, optional\n# indepLabel - label for xVals, optional\n# invert_ind_axis - whether to invert x-axis orientation, optional\n\n# ny, nx - number of subplots in x/y direction, optional\n# nplots - total number of subplots, optional\n# iplot - this subplot index (starting at 0), optional\n\n# linesValsMinCI - minimum error bound for linesVals (list of arrays), optional\n# linesValsMaxCI - maximum error bound for linesVals (list of arrays), optional\n# Note: linesValsMinCI and linesValsMaxCI must be specified together\n\n# lineAttribOffset - offset for selecting line attributes, optional\n# dmin, dmax - min/max values of linesVals, optional\n# legend_inside - whether legend should be placed inside the subplot, optional\n\n ax = fig.add_subplot(ny, nx, iplot+1)\n\n #title\n ax.set_title(title,fontsize=5)\n\n #add lines\n plotVals = np.asarray([])\n nLines = 0\n for iline, lineVals in enumerate(linesVals):\n if np.all(np.isnan(lineVals)):\n global nanWarnSer\n if nanWarnSer==0:\n _logger.warning(\"skipping all-NaN data\")\n _logger.warning(title+\"; \"+indepLabel+\"; \"+linesLabel[iline])\n nanWarnSer=nanWarnSer+1\n continue\n if len(lineVals)!=len(xVals):\n global lenWarnSer\n if lenWarnSer==0:\n _logger.warning(\"skipping data where len(x)!=len(y)\")\n _logger.warning(title+\"; \"+indepLabel+\"; \"+linesLabel[iline])\n lenWarnSer=lenWarnSer+1\n continue\n\n # Plot line for each lineVals that has non-missing data\n pColor = pu.plotColor(len(linesVals),iline+lineAttribOffset)\n\n ax.plot(xVals, lineVals, \\\n color=pColor, \\\n label=linesLabel[iline], \\\n ls=pu.plotLineStyle(len(linesVals),iline+lineAttribOffset), \\\n linewidth=0.5)\n nLines += 1\n plotVals = np.append(plotVals, lineVals)\n\n # Add shaded error regions if specified\n if linesValsMinCI is not None and \\\n linesValsMaxCI is not None:\n\n # test statistical significance versus zero\n if signdef:\n significant = np.empty(len(lineVals))\n significant[:] = np.NaN\n else:\n significant = np.multiply(linesValsMinCI[iline], linesValsMaxCI[iline])\n significant = np.array([x if np.isfinite(x) else -1.0 for x in significant])\n\n lineArr = np.array(lineVals)\n xArr = np.array(xVals)\n negsiginds = np.array([i for i,x in enumerate(significant)\n if (x > 0.0 and lineArr[i] < 0.0)],dtype=int)\n if len(negsiginds) > 0:\n ax.plot(xArr[negsiginds], lineArr[negsiginds], \\\n color=pColor, \\\n ls='', \\\n marker='v', \\\n markersize=1.5)\n\n possiginds = np.array([i for i,x in enumerate(significant)\n if (x > 0.0 and lineArr[i] > 0.0)],dtype=int)\n if len(possiginds) > 0:\n ax.plot(xArr[possiginds], lineArr[possiginds], \\\n color=pColor, \\\n ls='', \\\n marker='^', \\\n markersize=1.5)\n\n ax.plot(xVals, linesValsMinCI[iline], \\\n color=pColor, \\\n alpha=0.4, \\\n ls='-', \\\n linewidth=0.5)\n ax.plot(xVals, linesValsMaxCI[iline], \\\n color=pColor, \\\n alpha=0.4, \\\n ls='-', \\\n linewidth=0.5)\n ax.fill_between(xVals, linesValsMinCI[iline], linesValsMaxCI[iline], \\\n color=pColor, \\\n edgecolor=pColor, \\\n linewidth=0.0, alpha = 0.1)\n ax.fill_between(xVals, linesValsMinCI[iline], linesValsMaxCI[iline], \\\n where=significant > 0.0, \\\n color=pColor, \\\n edgecolor=pColor, \\\n linewidth=0.2, alpha = 0.3)\n\n if nLines == 0:\n ax.tick_params(axis='x',labelbottom=False)\n ax.tick_params(axis='y',labelleft=False)\n return\n\n # add horizontal zero line for unbounded quantities\n if not signdef:\n ax.plot([xVals[0], xVals[-1]], [0., 0.], ls=\"--\", c=\".3\", \\\n linewidth=0.7,markersize=0)\n\n # standardize x-limits\n mindval, maxdval = pu.get_clean_ax_limits(dmin,dmax,plotVals,signdef)\n\n #axes settings\n ax.xaxis.set_tick_params(labelsize=3)\n ax.yaxis.set_tick_params(labelsize=3)\n\n isLogScale = logscale\n if logscale:\n nonzero = np.logical_and(np.greater(np.abs(plotVals), 0.), np.isfinite(plotVals))\n if nonzero.sum() > 0:\n vmin = np.nanmin(np.abs(plotVals[nonzero]))\n vmax = np.nanmax(np.abs(plotVals[nonzero]))\n if signdef:\n # log tick labels look bad for single decade\n if vmax / vmin > 10.0:\n ax.set_yscale('log')\n else:\n isLogScale = False\n else:\n ax.set_yscale('symlog')\n else:\n isLogScale = False\n\n if isLogScale and np.isfinite(maxdval) and maxdval > 0.:\n ax.set_ylim(None, maxdval)\n if np.abs(vmin) > 0.:\n ax.set_ylim(vmin, None)\n\n if not isLogScale:\n if sciticks:\n ax.ticklabel_format(style='sci', axis='y', scilimits=(0,0))\n if (np.isfinite(mindval) and\n np.isfinite(maxdval)):\n ax.set_ylim(mindval,maxdval)\n if maxdval-mindval < 1.0 or \\\n maxdval-mindval > 100.0:\n ax.tick_params(axis='y',rotation=-35)\n ax.yaxis.get_offset_text().set_fontsize(3)\n\n #handle interior subplot ticks/labels\n ix = int(iplot)%int(nx)\n iy = int(iplot)/int(nx)\n if not interiorLabels \\\n and (iy < ny-2 or ( iy == ny-2 and (int(nplots)%int(nx)==0 or ix <= (int(nplots)%int(nx) - 1)) )):\n ax.tick_params(axis='x',labelbottom=False)\n if interiorLabels or ix == 0:\n ax.set_xlabel(indepLabel,fontsize=4)\n if interiorLabels or iy == ny-1:\n ax.set_ylabel(dataLabel,fontsize=4)\n\n #legend\n if nLines <= maxLegendEntries:\n if legend_inside:\n #INSIDE AXES\n lh = ax.legend(loc='best',fontsize=3,frameon=True,\\\n framealpha=0.4,ncol=1)\n lh.get_frame().set_linewidth(0.0)\n elif ix==nx-1 or iplot==nplots-1:\n #OUTSIDE AXES\n ax.legend(loc='upper left',fontsize=3,frameon=False, \\\n bbox_to_anchor=(1.02, 1), borderaxespad=0)\n\n if invert_ind_axis:\n ax.invert_xaxis()\n\n ax.grid()\n\n return\n\n###############################################################################\nlenWarnProf = 0\nnanWarnProf = 0\ndef plotProfile(fig, \\\n linesVals, yVals, \\\n linesLabel, \\\n title=\"\", dataLabel=\"x\", \\\n sciticks=False, logscale=False, signdef=False, \\\n indepLabel=\"y\", invert_ind_axis=False, \\\n ny=1, nx=1, nplots=1, iplot=0, \\\n linesValsMinCI=None, linesValsMaxCI=None, \\\n dmin=np.NaN, dmax=np.NaN, \\\n lineAttribOffset=0, \\\n legend_inside=True,\n interiorLabels=True):\n\n# ARGUMENTS\n# fig - matplotlib figure object\n# linesVals - dependent variable (list of arrays)\n# yVals - independent variable on y-axis (array)\n# linesLabel - legend label for linesVals (list)\n\n# title - subplot title, optional\n# dataLabel - label for linesVals, optional\n# sciticks - whether linesVals needs scientific formatting for ticks, optional\n# logscale - x-axis is scaled logarithmically, optional, overrides sciticks\n# signdef - whether linesVals is positive/negative definite, optional\n# indepLabel - label for yVals, optional\n# invert_ind_axis - whether to invert y-axis orientation, optional\n\n# ny, nx - number of subplots in x/y direction, optional\n# nplots - total number of subplots, optional\n# iplot - this subplot index (starting at 0), optional\n\n# linesValsMinCI - minimum error bound for linesVals (list of arrays), optional\n# linesValsMaxCI - maximum error bound for linesVals (list of arrays), optional\n# Note: linesValsMinCI and linesValsMaxCI must be specified together\n\n# lineAttribOffset - offset for selecting line attributes, optional\n# dmin, dmax - min/max values of linesVals, optional\n# legend_inside - whether legend should be placed inside the subplot, optional\n\n ax = fig.add_subplot(ny, nx, iplot+1)\n\n #title\n ax.set_title(title,fontsize=5)\n\n #add lines\n plotVals = np.asarray([])\n nLines = 0\n for iline, lineVals in enumerate(linesVals):\n if np.all(np.isnan(lineVals)):\n global nanWarnProf\n if nanWarnProf==0:\n _logger.warning(\"skipping all-NaN data\")\n _logger.warning(title+\"; \"+dataLabel+\"; \"+linesLabel[iline])\n nanWarnProf=nanWarnProf+1\n continue\n if len(lineVals)!=len(yVals):\n global lenWarnProf\n if lenWarnProf==0:\n _logger.warning(\"skipping data where len(x)!=len(y)\")\n _logger.warning(title+\"; \"+dataLabel+\"; \"+linesLabel[iline])\n lenWarnProf=lenWarnProf+1\n continue\n\n # Plot line for each lineVals that has non-missing data\n pColor = pu.plotColor(len(linesVals),iline+lineAttribOffset)\n\n ax.plot(lineVals, yVals, \\\n color=pColor, \\\n label=linesLabel[iline], \\\n ls=pu.plotLineStyle(len(linesVals),iline+lineAttribOffset), \\\n linewidth=0.5)\n nLines += 1\n plotVals = np.append(plotVals,lineVals)\n\n # Add shaded error regions if specified\n if linesValsMinCI is not None and \\\n linesValsMaxCI is not None:\n\n # test statistical significance versus zero\n if signdef:\n significant = np.empty(len(lineVals))\n significant[:] = np.NaN\n else:\n significant = np.multiply(linesValsMinCI[iline], linesValsMaxCI[iline])\n significant = np.array([x if np.isfinite(x) else -1.0 for x in significant])\n\n lineArr = np.array(lineVals)\n yArr = np.array(yVals)\n negsiginds = np.array([i for i,x in enumerate(significant)\n if (x > 0.0 and lineArr[i] < 0.0)],dtype=int)\n if len(negsiginds) > 0:\n ax.plot(lineArr[negsiginds], yArr[negsiginds], \\\n color=pColor, \\\n ls='', \\\n marker='<', \\\n markersize=1.5)\n\n possiginds = np.array([i for i,x in enumerate(significant)\n if (x > 0.0 and lineArr[i] > 0.0)],dtype=int)\n if len(possiginds) > 0:\n ax.plot(lineArr[possiginds], yArr[possiginds], \\\n color=pColor, \\\n ls='', \\\n marker='>', \\\n markersize=1.5)\n\n ax.plot(linesValsMinCI[iline], yVals, \\\n color=pColor, \\\n alpha=0.4, \\\n ls='-', \\\n linewidth=0.5)\n ax.plot(linesValsMaxCI[iline], yVals, \\\n color=pColor, \\\n alpha=0.4, \\\n ls='-', \\\n linewidth=0.5)\n ax.fill_betweenx(yVals, linesValsMinCI[iline], linesValsMaxCI[iline], \\\n color=pColor, \\\n edgecolor=pColor, \\\n linewidth=0.0, alpha = 0.1)\n ax.fill_betweenx(yVals, linesValsMinCI[iline], linesValsMaxCI[iline], \\\n where=significant > 0.0, \\\n color=pColor, \\\n edgecolor=pColor, \\\n linewidth=0.2, alpha = 0.3)\n\n if nLines == 0:\n ax.tick_params(axis='x',labelbottom=False)\n ax.tick_params(axis='y',labelleft=False)\n return\n\n # add vertical zero line for unbounded quantities\n if not signdef:\n ax.plot([0., 0.], [yVals[0], yVals[-1]], ls=\"--\", c=\".3\", \\\n linewidth=0.7,markersize=0)\n\n # standardize x-limits\n mindval, maxdval = pu.get_clean_ax_limits(dmin,dmax,plotVals,signdef)\n\n #axes settings\n ax.xaxis.set_tick_params(labelsize=3)\n ax.yaxis.set_tick_params(labelsize=3)\n\n isLogScale = logscale\n if logscale:\n nonzero = np.logical_and(np.greater(np.abs(plotVals), 0.), np.isfinite(plotVals))\n if nonzero.sum() > 0:\n vmin = np.nanmin(np.abs(plotVals[nonzero]))\n vmax = np.nanmax(np.abs(plotVals[nonzero]))\n if signdef:\n # log tick labels look bad for single decade\n if vmax / vmin > 10.0:\n ax.set_xscale('log')\n else:\n isLogScale = False\n else:\n ax.set_xscale('symlog')\n else:\n isLogScale = False\n\n if isLogScale and np.isfinite(maxdval) and maxdval > 0.:\n ax.set_xlim(None, maxdval)\n if np.abs(mindval) > 0.:\n ax.set_xlim(mindval, None)\n\n if not isLogScale:\n if sciticks:\n ax.ticklabel_format(style='sci', axis='x', scilimits=(0,0))\n if (np.isfinite(mindval) and\n np.isfinite(maxdval)):\n ax.set_xlim(mindval,maxdval)\n if maxdval-mindval < 1.0 or \\\n maxdval-mindval > 100.0:\n ax.tick_params(axis='x',rotation=-35)\n ax.xaxis.get_offset_text().set_fontsize(3)\n\n\n #handle interior subplot ticks/labels\n ix = int(iplot)%int(nx)\n iy = int(iplot)/int(nx)\n if not interiorLabels \\\n and (iy < ny-2 or ( iy == ny-2 and (int(nplots)%int(nx)==0 or ix <= (int(nplots)%int(nx) - 1)) )):\n ax.tick_params(axis='x',labelbottom=False)\n if interiorLabels or ix == 0:\n ax.set_xlabel(dataLabel,fontsize=4)\n if interiorLabels or iy == ny-1:\n ax.set_ylabel(indepLabel,fontsize=4)\n\n #legend\n if nLines <= maxLegendEntries:\n if legend_inside:\n #INSIDE AXES\n lh = ax.legend(loc='best',fontsize=3,frameon=True,\\\n framealpha=0.4,ncol=1)\n lh.get_frame().set_linewidth(0.0)\n elif ix==nx-1 or iplot==nplots-1:\n #OUTSIDE AXES\n ax.legend(loc='upper left',fontsize=3,frameon=False, \\\n bbox_to_anchor=(1.02, 1), borderaxespad=0)\n\n if invert_ind_axis:\n ax.invert_yaxis()\n\n ax.grid()\n\n return\n\n\n###############################################################################\nlenWarnTS=0\nnanWarnTS=0\ndef plotTimeSeries(fig, \\\n xsDates, linesVals, \\\n linesLabel, \\\n title=\"\", dataLabel=\"\", \\\n sciticks=False, logscale = False, signdef=False, \\\n ny=1, nx=1, nplots=1, iplot=0, \\\n linesValsMinCI=None, linesValsMaxCI=None, \\\n dmin=np.NaN, dmax=np.NaN, \\\n lineAttribOffset=0, \\\n legend_inside=True,\n interiorLabels=True):\n\n# ARGUMENTS\n# fig - matplotlib figure object\n# xsDates - date x-values (list/array or list of lists/arrays\n# of float seconds, dt.timedelta, dt.datetime)\n# linesVals - dependent variable (list of arrays)\n# linesLabel - legend label for linesVals (list)\n\n# title - subplot title, optional\n# dataLabel - label for linesVals, optional\n# sciticks - whether linesVals needs scientific formatting for ticks, optional\n# logscale - y-axis is scaled logarithmically, optional, overrides sciticks\n# signdef - whether linesVals is positive/negative definite, optional\n\n# ny, nx - number of subplots in x/y direction, optional\n# nplots - total number of subplots, optional\n# iplot - this subplot index (starting at 0), optional\n\n# linesValsMinCI - minimum error bound for linesVals (list of arrays), optional\n# linesValsMaxCI - maximum error bound for linesVals (list of arrays), optional\n# Note: linesValsMinCI and linesValsMaxCI must be specified together\n\n# lineAttribOffset - offset for selecting line attributes, optional\n# dmin, dmax - min/max values of linesVals, optional\n# legend_inside - whether legend should be placed inside the subplot, optional\n\n ax = fig.add_subplot(ny, nx, iplot+1)\n\n #title\n ax.set_title(title,fontsize=5)\n\n #add lines\n plotVals = np.asarray([])\n nLines = 0\n jline = 0\n for iline, lineVals in enumerate(linesVals):\n if np.all(np.isnan(lineVals)):\n global nanWarnTS\n if nanWarnTS==0:\n _logger.warning(\"skipping all-NaN data\")\n _logger.warning(title+\"; \"+dataLabel+\"; \"+linesLabel[iline])\n nanWarnTS=nanWarnTS+1\n continue\n\n #float xVals\n if isinstance(xsDates[0],(list,np.ndarray)):\n xVals = pu.TDeltas2Seconds(xsDates[min([iline,len(xsDates)-1])])\n else:\n xVals = pu.TDeltas2Seconds(xsDates)\n\n if len(lineVals)!=len(xVals):\n global lenWarnTS\n if lenWarnTS==0:\n _logger.warning(\"skipping data where len(x)!=len(y)\")\n _logger.warning(title+\"; \"+dataLabel+\"; \"+linesLabel[iline])\n lenWarnTS=lenWarnTS+1\n continue\n\n if jline == 0:\n minX = xVals[0]\n maxX = xVals[-1]\n else:\n minX = min([xVals[0], minX])\n maxX = max([xVals[-1], maxX])\n jline += 1\n\n # Plot line for each lineVals that has non-missing data\n pColor = pu.plotColor(len(linesVals),iline+lineAttribOffset)\n\n ax.plot(xVals, lineVals, \\\n label=linesLabel[iline], \\\n color=pColor, \\\n ls=pu.plotLineStyle(len(linesVals),iline+lineAttribOffset), \\\n linewidth=0.5)\n nLines += 1\n plotVals = np.append(plotVals, lineVals)\n\n # Add shaded CI regions if specified\n if linesValsMinCI is not None and \\\n linesValsMaxCI is not None:\n\n # test statistical significance versus zero\n if signdef:\n significant = np.empty(len(lineVals))\n significant[:] = np.NaN\n else:\n significant = np.multiply(linesValsMinCI[iline], linesValsMaxCI[iline])\n significant = np.array([x if np.isfinite(x) else -1.0 for x in significant])\n\n lineArr = np.array(lineVals)\n xArr = np.array(xVals)\n negsiginds = np.array([i for i,x in enumerate(significant)\n if (x > 0.0 and lineArr[i] < 0.0)],dtype=int)\n if len(negsiginds) > 0:\n ax.plot(xArr[negsiginds], lineArr[negsiginds], \\\n color=pColor, \\\n ls='', \\\n marker='v', \\\n markersize=1.5)\n\n possiginds = np.array([i for i,x in enumerate(significant)\n if (x > 0.0 and lineArr[i] > 0.0)],dtype=int)\n if len(possiginds) > 0:\n ax.plot(xArr[possiginds], lineArr[possiginds], \\\n color=pColor, \\\n ls='', \\\n marker='^', \\\n markersize=1.5)\n\n ax.plot(xVals, linesValsMinCI[iline], \\\n color=pColor, \\\n alpha=0.4, \\\n ls='-', \\\n linewidth=0.5)\n ax.plot(xVals, linesValsMaxCI[iline], \\\n color=pColor, \\\n alpha=0.4, \\\n ls='-', \\\n linewidth=0.5)\n ax.fill_between(xVals, linesValsMinCI[iline], linesValsMaxCI[iline], \\\n color=pColor, \\\n edgecolor=pColor, \\\n linewidth=0.0, alpha = 0.1)\n ax.fill_between(xVals, linesValsMinCI[iline], linesValsMaxCI[iline], \\\n where=significant > 0.0, \\\n color=pColor, \\\n edgecolor=pColor, \\\n linewidth=0.2, alpha = 0.3)\n\n if nLines == 0:\n ax.tick_params(axis='x',labelbottom=False)\n ax.tick_params(axis='y',labelleft=False)\n return\n\n # standardize y-limits\n mindval, maxdval = pu.get_clean_ax_limits(dmin,dmax,plotVals,signdef)\n\n # add horizontal zero line for unbounded quantities\n if not signdef:\n ax.plot([minX, maxX], [0., 0.], ls=\"--\", c=\".3\", \\\n linewidth=0.7,markersize=0)\n\n #axes settings\n if isinstance(xsDates[0],(list,np.ndarray)):\n pu.format_x_for_dates(ax, xsDates[0])\n else:\n pu.format_x_for_dates(ax, xsDates)\n\n ax.xaxis.set_tick_params(labelsize=3)\n ax.yaxis.set_tick_params(labelsize=3)\n isLogScale = logscale\n if logscale:\n nonzero = np.logical_and(np.greater(np.abs(plotVals), 0.), np.isfinite(plotVals))\n if nonzero.sum() > 0:\n vmin = np.nanmin(np.abs(plotVals[nonzero]))\n vmax = np.nanmax(np.abs(plotVals[nonzero]))\n if signdef:\n # log tick labels look bad for single decade\n if vmax / vmin > 10.0:\n ax.set_yscale('log')\n else:\n isLogScale = False\n else:\n ax.set_yscale('symlog')\n else:\n isLogScale = False\n\n if isLogScale and np.isfinite(maxdval) and maxdval > 0.:\n ax.set_ylim(None, maxdval)\n if np.abs(vmin) > 0.:\n ax.set_ylim(vmin, None)\n\n if not isLogScale:\n if sciticks:\n ax.ticklabel_format(style='sci', axis='y', scilimits=(0,0))\n if (np.isfinite(mindval) and\n np.isfinite(maxdval)):\n ax.set_ylim(mindval,maxdval)\n if maxdval-mindval < 1.0 or \\\n maxdval-mindval > 100.0:\n ax.tick_params(axis='y',rotation=-35)\n ax.yaxis.get_offset_text().set_fontsize(3)\n\n ax.grid()\n\n #handle interior subplot ticks/labels\n ix = int(iplot)%int(nx)\n iy = int(iplot)/int(nx)\n if not interiorLabels \\\n and (iy < ny-2 or ( iy == ny-2 and (int(nplots)%int(nx)==0 or ix <= (int(nplots)%int(nx) - 1)) )):\n ax.tick_params(axis='x',labelbottom=False)\n if interiorLabels or ix == 0:\n ax.set_ylabel(dataLabel,fontsize=4)\n\n #legend\n if nLines <= maxLegendEntries:\n if legend_inside:\n #INSIDE AXES\n nlcol = np.int(np.ceil(np.sqrt(nLines)))\n lh = ax.legend(loc='best',fontsize=3,frameon=True,\\\n framealpha=0.4,ncol=nlcol)\n lh.get_frame().set_linewidth(0.0)\n elif ix==nx-1 or iplot==nplots-1:\n #OUTSIDE AXES\n ax.legend(loc='upper left',fontsize=3,frameon=False, \\\n bbox_to_anchor=(1.02, 1), borderaxespad=0)\n\n\n return\n\n\n###############################################################################\ndef plotTimeSeries2D(fig, \\\n xDates, yVals, contourVals, \\\n title=\"\", clabel=\"\", \\\n sciticks=False, logscale=False, signdef=False, \\\n dataLabel=\"y\", invert_ind_axis=False, \\\n ny=1, nx=1, nplots=1, iplot=0, \\\n dmin=np.NaN, dmax=np.NaN,\n interiorLabels=True):\n\n# ARGUMENTS\n# fig - matplotlib figure object\n# xDates - date x-values (array of float seconds, dt.timedelta, dt.datetime)\n# yVals - second independent variable\n# contourVals - dependent variable (2d array)\n\n# title - subplot title, optional\n# clabel - label for dependent variable, optional\n# sciticks - whether contourVals needs scientific formatting for ticks, optional\n# logscale - whether contours are spaced logarithmically, optional, overrides sciticks\n# signdef - whether contourVals is positive/negative definite, optional\n# dataLabel - label for yVals, optional\n# invert_ind_axis - whether to invert y-axis orientation, optional\n\n# ny, nx - number of subplots in x/y direction, optional\n# nplots - total number of subplots, optional\n# iplot - this subplot index (starting at 0), optional\n\n# dmin, dmax - min/max values of contourVals, optional\n\n ax = fig.add_subplot(ny, nx, iplot+1)\n\n if (np.isnan(contourVals)).all():\n ax.tick_params(axis='x',labelbottom=False)\n ax.tick_params(axis='y',labelleft=False)\n return\n\n xVals = pu.TDeltas2Seconds(xDates)\n\n # standardize c-limits\n mindval, maxdval = pu.get_clean_ax_limits(dmin,dmax,contourVals,signdef)\n if signdef:\n cmapName = 'BuPu'\n nlevs = 18\n\n # scientific contours\n cint = contourVals.astype(int)\n isInt = np.all((contourVals - cint) == 0)\n if isInt:\n minscid = np.nanmax(np.array([1., dmin]))\n else:\n minscid = maxdval*1.e-5\n lognorm = colors.LogNorm(vmin=minscid, vmax=maxdval)\n else:\n cmapName = 'seismic'\n nlevs = 28\n\n # scientific contours\n lognorm = colors.SymLogNorm(vmin=mindval, vmax=maxdval,\n linthresh=1.e-3*maxdval, linscale=1.3, base=10)\n\n # plot contour\n # option 1: smoothed contours\n #cp = ax.contourf(xVals, yVals, contourVals, nlevs, cmap=cmapName, extend='both', \\\n # vmin=mindval, vmax=maxdval)\n\n # option 2: pixel contours\n cmap = plt.get_cmap(cmapName)\n cmap.set_bad(color = 'k', alpha = 1.0)\n if logscale:\n norm = lognorm\n else:\n levels = mticker.MaxNLocator(nbins=nlevs).tick_values(mindval,maxdval)\n norm = BoundaryNorm(levels, ncolors=cmap.N, clip=True)\n xVals_pcolor, yVals_pcolor = transformXY_for_pcolor(xVals,yVals)\n cp = ax.pcolormesh(xVals_pcolor, yVals_pcolor, contourVals, cmap=cmap, norm=norm)\n\n #title\n ax.set_title(title,fontsize=5)\n\n #axes settings\n pu.format_x_for_dates(ax, xDates)\n ax.xaxis.set_tick_params(labelsize=3)\n ax.yaxis.set_tick_params(labelsize=3)\n\n #handle interior subplot ticks/labels\n ix = int(iplot)%int(nx)\n iy = int(iplot)/int(nx)\n if not interiorLabels \\\n and (iy < ny-2 or ( iy == ny-2 and (int(nplots)%int(nx)==0 or ix <= (int(nplots)%int(nx) - 1)) )):\n ax.tick_params(axis='x',labelbottom=False)\n if interiorLabels or ix == 0:\n ax.set_ylabel(dataLabel,fontsize=4)\n if interiorLabels or ix == nx-1:\n #colorbar\n m = plt.cm.ScalarMappable(cmap=cmap)\n m.set_array(contourVals)\n m.set_norm(norm)\n if (np.isfinite(mindval) and\n np.isfinite(maxdval) and\n not logscale):\n m.set_clim(mindval,maxdval)\n cb = plt.colorbar(m, ax=ax)\n #scientific formatting\n if sciticks and not logscale:\n cb.ax.ticklabel_format(style='sci', axis='y', scilimits=(0,0))\n cb.ax.yaxis.get_offset_text().set_fontsize(3)\n\n cb.ax.tick_params(labelsize=3)\n cb.set_label(clabel,fontsize=5)\n\n if invert_ind_axis:\n ax.invert_yaxis()\n\n # optionally add a grid\n #ax.grid()\n\n return\n\n\n###############################################################################\ndef transformXY_for_pcolor(xs,ys):\n # adjust centered x and y values to edges to work with pcolormesh \n # note: works best for regularly spaced data\n xs_diff = xs[1] - xs[0]\n # extend xs by 2\n # fill in first endpoint\n xs_extend = [xs[0]-xs_diff]\n # fill in internal values\n for x in xs: xs_extend.append(x)\n # fill in last endpoint\n xs_extend.append(xs_extend[-1]+(xs[-1]-xs[-2]))\n # calculate the midpoints\n xs_pcolormesh_midpoints = []\n for ii, x in enumerate(xs_extend[:-1]):\n xs_pcolormesh_midpoints.append(x+0.5*(xs_extend[ii+1] - xs_extend[ii]))\n\n ys_diff = ys[1] - ys[0]\n # extend ys by 2\n # fill in first endpoint\n ys_extend = [ys[0]-ys_diff]\n # fill in internal values\n for y in ys: ys_extend.append(y)\n # fill in last endpoint\n ys_extend.append(ys_extend[-1]+(ys[-1]-ys[-2]))\n # calculate the midpoints\n ys_pcolormesh_midpoints = []\n for ii, y in enumerate(ys_extend[:-1]):\n ys_pcolormesh_midpoints.append(y+0.5*(ys_extend[ii+1] - ys_extend[ii]))\n\n return xs_pcolormesh_midpoints, ys_pcolormesh_midpoints\n\n\n###############################################################################\nlenWarnPDF = 0\nnanWarnPDF = 0\ndef plotPDF(fig,\n countsVals, xVals,\n countsLabel,\n title=\"\",\n indepLabel=\"x\",\n ny=1, nx=1, nplots=1, iplot=0,\n lineAttribOffset=1,\n legend_inside=True,\n interiorLabels=True):\n\n# ARGUMENTS\n# fig - matplotlib figure object\n# countsVals - list of arrays, each containing counts across xVals\n# xVals - independent variable on x-axis (array)\n# countsLabel - legend label for countsVals (list)\n\n# title - subplot title, optional\n# indepLabel - label for xVals, optional\n\n# ny, nx - number of subplots in x/y direction, optional\n# nplots - total number of subplots, optional\n# iplot - this subplot index (starting at 0), optional\n\n# lineAttribOffset - offset for selecting line attributes, optional\n# legend_inside - whether legend should be placed inside the subplot, optional\n\n ax = fig.add_subplot(ny, nx, iplot+1)\n\n #title\n ax.set_title(title,fontsize=5)\n\n #add counts\n plotVals = []\n nPDFs = 0\n for ihist, countVals in enumerate(countsVals):\n if np.all(np.isnan(countVals)):\n global nanWarnPDF\n if nanWarnPDF==0:\n _logger.warning(\"skipping all-NaN data\")\n _logger.warning(title+\"; \"+indepLabel+\"; \"+countsLabel[ihist])\n nanWarnPDF=nanWarnPDF+1\n continue\n if len(countVals)!=len(xVals):\n global lenWarnPDF\n if lenWarnPDF==0:\n _logger.warning(\"skipping data where len(x)!=len(y)\")\n _logger.warning(title+\"; \"+indepLabel+\"; \"+countsLabel[ihist])\n lenWarnPDF=lenWarnPDF+1\n continue\n\n # Plot line for each countVals that has non-missing data\n\n # assume constant dx between bins\n dx = xVals[1] - xVals[0]\n\n ax.plot(xVals, np.divide(countVals,np.sum(countVals)*dx),\n color=pu.plotColor(len(countsVals),ihist+lineAttribOffset),\n label=countsLabel[ihist],\n ls=pu.plotLineStyle(len(countsVals),ihist+lineAttribOffset),\n linewidth=0.5)\n nPDFs = nPDFs + 1\n plotVals.append(countVals)\n\n\n if nPDFs == 0:\n ax.tick_params(axis='x',labelbottom=False)\n ax.tick_params(axis='y',labelleft=False)\n return\n\n # add a standard normal pdf\n from scipy.stats import norm\n ax.plot(xVals, norm.pdf(xVals),\n color='k',\n ls='-',\n linewidth=0.35,\n label='N(0,1)'\n )\n\n #axes settings\n ax.xaxis.set_tick_params(labelsize=3)\n ax.yaxis.set_tick_params(labelsize=3)\n plt.yscale('log')\n ax.set_ylim(bottom=1.e-6)\n\n #handle interior subplot ticks/labels\n ix = int(iplot)%int(nx)\n iy = int(iplot)/int(nx)\n if not interiorLabels \\\n and (iy < ny-2 or ( iy == ny-2 and (int(nplots)%int(nx)==0 or ix <= (int(nplots)%int(nx) - 1)) )):\n ax.tick_params(axis='x',labelbottom=False)\n if interiorLabels or ix == 0:\n ax.set_xlabel(indepLabel,fontsize=4)\n ax.set_ylabel('PDF',fontsize=4)\n\n #legend\n if legend_inside:\n #INSIDE AXES\n lh = ax.legend(loc='best',fontsize=3,frameon=True,\\\n framealpha=0.4,ncol=1)\n lh.get_frame().set_linewidth(0.0)\n elif ix==nx-1 or iplot==nplots-1:\n #OUTSIDE AXES\n ax.legend(loc='upper left',fontsize=3,frameon=False, \\\n bbox_to_anchor=(1.02, 1), borderaxespad=0)\n\n ax.grid()\n\n return\n\n\n###############################################################################\nlenWarnRamp = 0\nnanWarnRamp = 0\ndef plotfitRampComposite(fig,\n xVals,\n countVals,\n meanVals,\n rmsVals,\n stdVals,\n title=\"\", dataLabel=\"y\", \\\n indepLabel=\"x\",\n ny=1, nx=1, nplots=1, iplot=0,\n lineAttribOffset=1,\n legend_inside=True,\n interiorLabels=True):\n\n# ARGUMENTS\n# fig - matplotlib figure object\n# countVals - Count of quantity (array)\n# meanVals - Mean of quantity (array)\n# rmsVals - RMS of quantity (array)\n# stdVals - STD of quantity (array)\n\n# xVals - independent variable on x-axis (array)\n\n# title - subplot title, optional\n# dataLabel - label for y-axis, optional\n# indepLabel - label for xVals, optional\n\n# ny, nx - number of subplots in x/y direction, optional\n# nplots - total number of subplots, optional\n# iplot - this subplot index (starting at 0), optional\n\n# lineAttribOffset - offset for selecting line attributes, optional\n# legend_inside - whether legend should be placed inside the subplot, optional\n\n ax = fig.add_subplot(ny, nx, iplot+1)\n ix = int(iplot)%int(nx)\n iy = int(iplot)/int(nx)\n\n #title\n ax.set_title(title,fontsize=5)\n\n #add lines\n plotVals = []\n nLines = 0\n linesLabel = ['RMS','STD','Mean']\n for iline, lineVals in enumerate([rmsVals,stdVals,meanVals]):\n if np.all(np.isnan(lineVals)):\n global nanWarnRamp\n if nanWarnRamp==0:\n _logger.warning(\"skipping all-NaN data\")\n _logger.warning(title+\"; \"+indepLabel+\"; \"+linesLabel[iline])\n nanWarnRamp=nanWarnRamp+1\n continue\n if len(lineVals)!=len(xVals):\n global lenWarnRamp\n if lenWarnRamp==0:\n _logger.warning(\"skipping data where len(x)!=len(y)\")\n _logger.warning(title+\"; \"+indepLabel+\"; \"+linesLabel[iline])\n lenWarnRamp=lenWarnRamp+1\n continue\n\n # Plot line for each lineVals that has non-missing data\n pColor = pu.plotColor(4,iline+lineAttribOffset)\n\n ax.plot(xVals, lineVals,\n color=pColor,\n label=linesLabel[iline],\n ls=pu.plotLineStyle(4,iline+lineAttribOffset),\n linewidth=0.6)\n nLines += 1\n plotVals.append(lineVals)\n\n if nLines == 0:\n ax.tick_params(axis='x',labelbottom=False)\n ax.tick_params(axis='y',labelleft=False)\n return\n\n # Add fit for stdVals here using info from countVals\n ind0 = np.argmax(countVals)\n\n indexMaxX4Std = 0\n for ii, std in enumerate(stdVals):\n if np.isfinite(std): indexMaxX4Std = ii\n indexMaxX = indexMaxX4Std\n maxCount = 0\n for ii, count in enumerate(countVals):\n if count > maxCount: maxCount = count\n if count < 0.002*maxCount:\n indexMaxX = ii\n break\n if indexMaxX > indexMaxX4Std:\n ind1 = np.argmax(stdVals[0:indexMaxX4Std])\n else:\n ind1 = np.argmax(stdVals[0:indexMaxX])\n\n weights = [0.2]*(ind1-ind0+1)\n weights[0] = 1.0\n p = np.polyfit(xVals[ind0:ind1+1],stdVals[ind0:ind1+1],1,\n w=weights)\n\n X0 = xVals[ind0]\n ERR0 = X0 * p[0] + p[1]\n\n # X1 = xVals[ind1]\n # ERR1 = X1 * p[0] + p[1]\n ERR1 = stdVals[ind1]\n X1 = (ERR1 - p[1]) / p[0]\n\n\n ERRfitDict = {\n 'bu':{\n 'X': [round(X0,2), round(X1,2)],\n 'ERR': [round(ERR0,2), round(ERR1,2)],\n },\n 'YAML':{\n 'X0': [round(X0,2)],\n 'X1': [round(X1,2)],\n 'ERR0': [round(ERR0,2)],\n 'ERR1': [round(ERR1,2)],\n },\n }\n\n fitX = np.asarray([0.0] + ERRfitDict['bu']['X'] + [xVals[indexMaxX4Std]])\n fitERR = np.asarray([ERR0] + ERRfitDict['bu']['ERR'] + [ERR1])\n\n plotVals.append(fitERR)\n\n pColor = pu.plotColor(4,1+lineAttribOffset)\n\n ax.plot(fitX, fitERR,\n color=pColor,\n label='Fit-STD',\n ls='-.',\n linewidth=1.2,\n marker='+',\n ms=1.5\n )\n\n #axes settings\n ax.xaxis.set_tick_params(labelsize=3)\n ax.yaxis.set_tick_params(labelsize=3)\n\n # standardize x-limits\n mindval, maxdval = pu.get_clean_ax_limits(plotVals=plotVals)\n if (np.isfinite(mindval) and\n np.isfinite(maxdval)):\n ax.set_ylim(mindval,maxdval)\n\n #handle interior subplot ticks/labels\n if not interiorLabels \\\n and (iy < ny-2 or ( iy == ny-2 and (int(nplots)%int(nx)==0 or ix <= (int(nplots)%int(nx) - 1)) )):\n ax.tick_params(axis='x',labelbottom=False)\n if interiorLabels or ix == 0:\n ax.set_xlabel(indepLabel,fontsize=4)\n if interiorLabels or iy == ny-1:\n ax.set_ylabel(dataLabel,fontsize=4)\n\n #legend\n if legend_inside:\n #INSIDE AXES\n lh = ax.legend(loc='best',fontsize=3,frameon=True,\\\n framealpha=0.4,ncol=1)\n lh.get_frame().set_linewidth(0.0)\n elif ix==nx-1 or iplot==nplots-1:\n #OUTSIDE AXES\n ax.legend(loc='upper left',fontsize=3,frameon=False, \\\n bbox_to_anchor=(1.02, 1), borderaxespad=0)\n\n ax.grid()\n\n # Add count on RHS y-axis\n ax2 = ax.twinx()\n color = 'black'\n if interiorLabels or ix == nx:\n ax2.set_ylabel('Count',fontsize=4,color=color)\n ax2.plot(xVals[:indexMaxX4Std], countVals[:indexMaxX4Std],\n color=color,\n label='Count',\n ls=':',\n linewidth=0.5)\n ax2.tick_params(axis='y', labelcolor=color)\n ax2.yaxis.set_tick_params(labelsize=3)\n plt.yscale('log')\n ax2.set_ylim(bottom=100.)\n\n return ERRfitDict\n",
"import os\nimport sys\nimport numpy\nimport numpy as np\nfrom netCDF4 import Dataset # http://code.google.com/p/netcdf4-python/\nimport matplotlib\nmatplotlib.use('pdf')\nimport matplotlib.cm as cm\nimport matplotlib.pyplot as plt\nimport datetime as dt\nimport plot_utils as pu\nimport var_utils as vu\nimport modelsp_utils as mu\n\ndef readdata():\n for metrics in mu.allFileStats:\n for varName in mu.varNames2d:\n for latBand in mu.latBands:\n arraylist = []\n for iexp, expName in enumerate(mu.expNames):\n xlabeltime = []\n alldata = []\n alldiffdata = []\n alldiffdata_rmsdiv = [] \n nc_file = mu.expDirectory+'/'+mu.expLongNames[iexp]+'/FC2DIAG/expmgfs.nc'\n nc_fid = Dataset(nc_file, \"r\", format=\"NETCDF4\")\n for fcTDelta in np.arange(0,mu.fcRange+mu.interval,mu.interval):\n varNamesList = ['expmgfs_day'+str(fcTDelta)+'_'+ latBand +'_'+ varName + '_' + metrics]\n # data1: exp1-GFSANA \n data = np.array( nc_fid.variables[''.join(varNamesList)][:]) \n alldata = np.append(alldata, data)\n xlabeltime = np.append(xlabeltime,fcTDelta)\n varNamesListUse = 'expmgfs_fc_'+ latBand +'_'+ varName + '_' + metrics\n if (iexp == 0):\n arraylist = [alldata]\n else:\n arraylist= arraylist + [alldata]\n plotTimeSerial(arraylist,xlabeltime,varNamesListUse) \n#TODO: move this part to basic_plot_functions.py\ndef plotTimeSerial(linesVals,xlabeltime,VarName):\n\n fig,ax1 = plt.subplots(1,sharex=True)\n plt.grid(True)\n xarray = range(len(xlabeltime))\n major_ticks = np.arange(0, len(xlabeltime), 1)\n\n if (VarName == 'qv' or VarName == 'rho' or VarName == 'q2'):\n ax1.ticklabel_format(style='sci', axis='y', scilimits=(0,0))\n nx = mu.nExp\n for iexp in range(0,mu.nExp):\n ax1.plot(xarray,linesVals[iexp],pu.plotSpecs[iexp],markersize=5)\n\n ax1.set_xticks(major_ticks[::4])\n # upper right\n ax1.legend(mu.expNames, loc='upper left',fontsize=12,frameon=False)\n\n FCDay = VarName.split(\"_\")[1]\n if (FCDay == 'day0.0'):\n ax1.set_xlabel('Analysis Time',fontsize=15)\n ax1.set_xticks(xarray[::4])\n ax1.set_xticklabels(xlabeltime[::4],rotation=90)\n elif (FCDay == 'day0.25'):\n ax1.set_xlabel( '6h Forecast',fontsize=15)\n ax1.set_xticks(xarray[::4])\n ax1.set_xticklabels(xlabeltime[::4],rotation=90)\n else:\n ax1.set_xlabel( 'Lead Time (day)',fontsize=15)\n ax1.set_xticks(major_ticks[::1])\n ax1.set_xticks(xarray[::1])\n ax1.set_xticklabels(xlabeltime[::1],rotation=0)\n\n ax1.grid()\n #ax1.set_xticklabels(xlabeltime[::4])\n plt.xticks(rotation=90)\n plt.grid(True)\n region = VarName.split(\"_\")[2]\n var = '_'.join(VarName.split(\"_\")[3:][:-1]) # surface_pressure\n stats = ''.join(VarName.split(\"_\")[-1:])\n ax1.set_ylabel(stats,fontsize=15)\n plt.title(stats+' variable:'+vu.varDictModel[var][1]+'('+ vu.varDictModel[var][0]+') '+region, fontsize = 12)\n plt.savefig(VarName+'.png',dpi=300,bbox_inches='tight')\n plt.close()\ndef main():\n readdata()\n\nif __name__ == '__main__': main()\n"
] | [
[
"numpy.amax",
"numpy.polyfit",
"matplotlib.pyplot.contourf",
"matplotlib.colors.BoundaryNorm",
"numpy.linspace",
"matplotlib.colors.SymLogNorm",
"numpy.asarray",
"numpy.sqrt",
"matplotlib.pyplot.get_cmap",
"numpy.concatenate",
"numpy.all",
"numpy.max",
"numpy.any",
"numpy.arange",
"matplotlib.colors.DivergingNorm",
"numpy.argmax",
"pandas.plotting.register_matplotlib_converters",
"matplotlib.pyplot.close",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.cm.get_cmap",
"matplotlib.pyplot.title",
"numpy.amin",
"numpy.min",
"numpy.isnan",
"numpy.multiply",
"matplotlib.pyplot.savefig",
"numpy.append",
"matplotlib.colors.ListedColormap",
"numpy.argsort",
"numpy.array",
"numpy.sum",
"matplotlib.pyplot.cm.ScalarMappable",
"matplotlib.colors.LogNorm",
"numpy.abs",
"numpy.isfinite",
"matplotlib.use",
"matplotlib.pyplot.yscale",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.colorbar",
"matplotlib.ticker.MaxNLocator",
"numpy.loadtxt"
],
[
"matplotlib.pyplot.title",
"matplotlib.use",
"numpy.arange",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.savefig",
"numpy.append",
"matplotlib.pyplot.grid",
"matplotlib.pyplot.close",
"matplotlib.pyplot.xticks"
]
] |
Christoper-Harvey/1st-Capstone | [
"93630a4d5f4a2d939c8b5f74f11b5b33052e3f72"
] | [
"DeepReinforcementLearning/funcs.py"
] | [
"import numpy as np\nimport random\n\nimport loggers as lg\n\nfrom game import Game, GameState\nfrom model import Residual_CNN\n\nfrom agent import Agent, User\n\nimport config\n\ndef playMatchesBetweenVersions(env, run_version, player1version, player2version, EPISODES, logger, turns_until_tau0, goes_first = 0):\n \n if player1version == -1:\n player1 = User('player1', env.state_size, env.action_size)\n else:\n player1_NN = Residual_CNN(config.REG_CONST, config.LEARNING_RATE, env.input_shape, env.action_size, config.HIDDEN_CNN_LAYERS)\n\n if player1version > 0:\n player1_network = player1_NN.read(env.name, run_version, player1version)\n player1_NN.model.set_weights(player1_network.get_weights()) \n player1 = Agent('player1', env.state_size, env.action_size, config.p1_MCTS_SIMS, config.CPUCT, player1_NN)\n\n if player2version == -1:\n player2 = User('player2', env.state_size, env.action_size)\n else:\n player2_NN = Residual_CNN(config.REG_CONST, config.LEARNING_RATE, env.input_shape, env.action_size, config.HIDDEN_CNN_LAYERS)\n \n if player2version > 0:\n player2_network = player2_NN.read(env.name, run_version, player2version)\n player2_NN.model.set_weights(player2_network.get_weights())\n player2 = Agent('player2', env.state_size, env.action_size, config.p2_MCTS_SIMS, config.CPUCT, player2_NN)\n\n scores, memory, points, sp_scores = playMatches(player1, player2, EPISODES, logger, turns_until_tau0, None, goes_first)\n\n return (scores, memory, points, sp_scores)\n\n\ndef playMatches(player1, player2, EPISODES, logger, turns_until_tau0, memory = None, goes_first = 0):\n\n env = Game()\n scores = {player1.name:0, \"drawn\": 0, player2.name:0}\n sp_scores = {'sp':0, \"drawn\": 0, 'nsp':0}\n points = {player1.name:[], player2.name:[]}\n\n for e in range(EPISODES):\n\n logger.info('====================')\n logger.info('EPISODE %d OF %d', e+1, EPISODES)\n logger.info('====================')\n\n print (str(e+1) + ' ', end='')\n\n state = env.reset()\n \n done = 0\n turn = 0\n player1.mcts = None\n player2.mcts = None\n\n if goes_first == 0:\n player1Starts = random.randint(0,1) * 2 - 1\n else:\n player1Starts = goes_first\n\n if player1Starts == 1:\n players = {1:{\"agent\": player1, \"name\":player1.name}\n , -1: {\"agent\": player2, \"name\":player2.name}\n }\n logger.info(player1.name + ' plays as X')\n else:\n players = {1:{\"agent\": player2, \"name\":player2.name}\n , -1: {\"agent\": player1, \"name\":player1.name}\n }\n logger.info(player2.name + ' plays as X')\n logger.info('--------------')\n\n env.gameState.render(logger)\n\n while done == 0:\n turn = turn + 1\n \n #### Run the MCTS algo and return an action\n if turn < turns_until_tau0:\n action, pi, MCTS_value, NN_value = players[state.playerTurn]['agent'].act(state, 1)\n else:\n action, pi, MCTS_value, NN_value = players[state.playerTurn]['agent'].act(state, 0)\n\n if memory != None:\n ####Commit the move to memory\n memory.commit_stmemory(env.identities, state, pi)\n\n\n logger.info('action: %d', action)\n for r in range(env.grid_shape[0]):\n logger.info(['----' if x == 0 else '{0:.2f}'.format(np.round(x,2)) for x in pi[env.grid_shape[1]*r : (env.grid_shape[1]*r + env.grid_shape[1])]])\n logger.info('MCTS perceived value for %s: %f', state.pieces[str(state.playerTurn)] ,np.round(MCTS_value,2))\n logger.info('NN perceived value for %s: %f', state.pieces[str(state.playerTurn)] ,np.round(NN_value,2))\n logger.info('====================')\n\n ### Do the action\n state, value, done, _ = env.step(action) #the value of the newState from the POV of the new playerTurn i.e. -1 if the previous player played a winning move\n \n env.gameState.render(logger)\n\n if done == 1: \n if memory != None:\n #### If the game is finished, assign the values correctly to the game moves\n for move in memory.stmemory:\n if move['playerTurn'] == state.playerTurn:\n move['value'] = value\n else:\n move['value'] = -value\n \n memory.commit_ltmemory()\n \n if value == 1:\n logger.info('%s WINS!', players[state.playerTurn]['name'])\n scores[players[state.playerTurn]['name']] = scores[players[state.playerTurn]['name']] + 1\n if state.playerTurn == 1: \n sp_scores['sp'] = sp_scores['sp'] + 1\n else:\n sp_scores['nsp'] = sp_scores['nsp'] + 1\n\n elif value == -1:\n logger.info('%s WINS!', players[-state.playerTurn]['name'])\n scores[players[-state.playerTurn]['name']] = scores[players[-state.playerTurn]['name']] + 1\n \n if state.playerTurn == 1: \n sp_scores['nsp'] = sp_scores['nsp'] + 1\n else:\n sp_scores['sp'] = sp_scores['sp'] + 1\n\n else:\n logger.info('DRAW...')\n scores['drawn'] = scores['drawn'] + 1\n sp_scores['drawn'] = sp_scores['drawn'] + 1\n\n pts = state.score\n points[players[state.playerTurn]['name']].append(pts[0])\n points[players[-state.playerTurn]['name']].append(pts[1])\n\n return (scores, memory, points, sp_scores)\n"
] | [
[
"numpy.round"
]
] |
sebasj13/topas-create-graphs | [
"5ccdbcbbe39461917cc015aa59805e518421431c"
] | [
"topasgraphsim/src/functions/dp.py"
] | [
"import numpy as np\nimport scipy.integrate as integrate\nimport scipy.interpolate as interpolate\n\n\ndef calculate_parameters(axis, dose, cax=False):\n\n \"\"\"\n A function to calculate the relevant\n descriptive parameters of dose profiles.\n \"\"\"\n\n interpolated_axis = np.linspace(axis[0], axis[-1], len(axis) * 100)\n akima_dose_interpolator = interpolate.Akima1DInterpolator(axis, dose)\n interpolated_dose = np.flip(akima_dose_interpolator.__call__(interpolated_axis))\n\n D0 = (\n interpolated_dose[int(len(interpolated_dose) / 2)]\n + interpolated_dose[int(len(interpolated_dose) / 2) - 1]\n ) / 2\n XL20 = interpolated_axis[: int(len(interpolated_axis) / 2)][\n (\n np.abs(\n interpolated_dose[: int(len(interpolated_axis) / 2)] - 0.2 * max(dose)\n )\n ).argmin()\n ]\n XL50 = interpolated_axis[: int(len(interpolated_axis) / 2)][\n (\n np.abs(\n interpolated_dose[: int(len(interpolated_axis) / 2)] - 0.5 * max(dose)\n )\n ).argmin()\n ]\n XL80 = interpolated_axis[: int(len(interpolated_axis) / 2)][\n (\n np.abs(\n interpolated_dose[: int(len(interpolated_axis) / 2)] - 0.8 * max(dose)\n )\n ).argmin()\n ]\n XR20 = interpolated_axis[int(len(interpolated_axis) / 2) :][\n (\n np.abs(\n interpolated_dose[\n int(len(interpolated_axis) / 2) : len(interpolated_axis)\n ]\n - 0.2 * max(dose)\n )\n ).argmin()\n ]\n XR50 = interpolated_axis[int(len(interpolated_axis) / 2) :][\n (\n np.abs(\n interpolated_dose[\n int(len(interpolated_axis) / 2) : len(interpolated_axis)\n ]\n - 0.5 * max(dose)\n )\n ).argmin()\n ]\n XR80 = interpolated_axis[int(len(interpolated_axis) / 2) :][\n (\n np.abs(\n interpolated_dose[\n int(len(interpolated_axis) / 2) : len(interpolated_axis)\n ]\n - 0.8 * max(dose)\n )\n ).argmin()\n ]\n\n HWB = round(abs(XR50 - XL50), 3)\n CAXdev = round(XL50 + 0.5 * HWB, 3)\n\n Dose80 = [value for value in dose if value >= 0.8 * max(dose)]\n\n if cax == True:\n return CAXdev\n\n flat_krieger = round(\n max([value for value in dose if value >= 0.95 * max(dose)])\n - min([value for value in dose if value >= 0.95 * max(dose)]) / D0,\n 5,\n )\n flat_stddev = round(np.std(Dose80), 3)\n\n if len(Dose80) % 2 != 0:\n Dose80 = (\n Dose80[0 : int(len(Dose80) / 2)]\n + Dose80[int(len(Dose80) / 2) + 1 : len(Dose80)]\n )\n\n S = round(\n max(\n [Dose80[i - 1] / Dose80[len(Dose80) - i] for i in range(1, len(Dose80) + 1)]\n ),\n 3,\n )\n\n Lpenumbra = round(abs(XL80 - XL20 + CAXdev), 3)\n Rpenumbra = round(abs(XR80 - XR20 + CAXdev), 3)\n\n XL20index = np.where(interpolated_axis == XL20)[0][0]\n XL80index = np.where(interpolated_axis == XL80)[0][0]\n XR20index = np.where(interpolated_axis == XR20)[0][0]\n XR80index = np.where(interpolated_axis == XR80)[0][0]\n Lintegral = round(\n abs(\n integrate.simps(\n interpolated_dose[XL20index:XL80index],\n interpolated_axis[XL20index:XL80index],\n )\n ),\n 3,\n )\n Rintegral = round(\n abs(\n integrate.simps(\n interpolated_dose[XR80index:XR20index],\n interpolated_axis[XR80index:XR20index],\n )\n ),\n 3,\n )\n\n if CAXdev > 150:\n raise Exception\n\n return [\n HWB,\n CAXdev,\n flat_krieger,\n flat_stddev,\n S,\n Lpenumbra,\n Rpenumbra,\n Lintegral,\n Rintegral,\n ]\n"
] | [
[
"scipy.integrate.simps",
"numpy.std",
"scipy.interpolate.Akima1DInterpolator",
"numpy.where"
]
] |
simondlevy/gym-copter | [
"7236769b7586b92026d4b47f12363258c84d9508"
] | [
"nengo/copter.py"
] | [
"'''\nQuadcopter class for Nengo adaptive controller\n\nCopyright (C) 2021 Xuan Choo, Simon D. Levy\n\nMIT License\n'''\n\nimport nengo\nimport gym\nimport numpy as np\n\nfrom adaptive import run\n\n\nclass Copter:\n\n def __init__(self, seed=None):\n\n self.env = gym.make('gym_copter:Hover1D-v0')\n self.reset(seed)\n\n def reset(self, seed):\n\n self.state = self.env.reset()\n\n def step(self, u):\n\n u = np.clip(u, 0, 1)\n\n self.env.render()\n\n z, dz, = self.state\n\n # Negate for NED => ENU\n z, dz = -z, -dz\n\n print('%f | %+3.3f %+3.3f' % (u, z, dz))\n\n self.state, _reward, _done, _info = self.env.step((u,))\n\n return z, dz\n\n def set_extra_force(self, force):\n\n self.extra_mass = force\n\n def generate_html(self, desired):\n '''\n Copter is simulated externally\n '''\n return None\n\n\nwith nengo.Network(seed=3) as model:\n\n run(Copter, 'Copter', 'Position', 'Wind Force')\n"
] | [
[
"numpy.clip"
]
] |
chasingegg/Data_Science | [
"a499866ff92aa1107057b20563564bdd89fc370f"
] | [
"Python/textrank/textrank.py"
] | [
"#!/usr/src/env python\n# -*- coding: utf-8 -*-\n# TextRank 博客 http://xiaosheng.me/2017/04/08/article49/\n# 从PageRank转变而来,可以用来做关键字的提取。TextRank的计算公式其实跟PageRank可以认为是一样的\n# 只不过就是要考虑权重的因素(算PageRank的时候就是均摊权值)\n# 在TextRank构建的图中,节点是句子,权值就是两个句子的相似程度 \n\n# 提取关键字的时候,单词作为图的节点,把权值都设成1,此时其实退化成PageRank\n# 把文本拆分成单词,将这一些单词设定一个简单的滑动窗口,每个窗口内的任意两个单词之间存在一条边\n\n# 如果是要提取关键句,一般认为所有句子都是相邻的,不需要窗口提取。相似程度的计算公式一般是重合\n# 单词数量除以总单词数量\n\nimport sys\nimport pandas as pd\nimport jieba.analyse\n\ndef textrank(data, topK):\n idList, titleList, abstractList = data['id'], data['title'], data['abstract']\n ids, title, keys = [], [], []\n for i in range(len(idList)):\n text = '%s。%s' % (titleList[i], abstractList[i]) #拼接\n jieba.analyse.set_stop_words('data/stopWord.txt')\n print(\"\\\"\", titleList[i], \"\\\"\", \" 10 keywords - TextRank :\")\n keywords = jieba.analyse.textrank(text, topK = topK, allowPOS=('n','nz','v','vd','vn','l','a','d'))\n word_split = \" \".join(keywords)\n print(word_split)\n keys.append(word_split.encode(\"utf-8\"))\n ids.append(idList[i])\n title.append(titleList[i])\n result = pd.DataFrame({\"id\":ids, \"title\":title, \"key\":keys}, columns=['id', 'title', 'key'])\n return result\n\nif __name__ == \"__main__\":\n dataFile = 'data/sample_data.csv'\n data = pd.read_csv(dataFile)\n result = textrank(data, 10)\n result.to_csv(\"result/keys_textrank.csv\", index=False)"
] | [
[
"pandas.read_csv",
"pandas.DataFrame"
]
] |
1in1/Python-Baseball | [
"4c76d65330ff7eb88c87057be02bbddb50dd325b"
] | [
"stats/data.py"
] | [
"import os\nimport glob\nimport pandas as pd\n\ngame_files = glob.glob(os.path.join(os.getcwd(), 'games', '*.EVE'))\ngame_files.sort()\n\ngame_frames = []\nfor game_file in game_files:\n game_frame = pd.read_csv(game_file, names=['type','multi2','multi3','multi4','multi5','multi6','event'])\n game_frames.append(game_frame)\n\ngames = pd.concat(game_frames)\ngames.loc[games['multi5'] == '??', ['multi5']] = ''\n\nidentifiers = games['multi2'].str.extract(r'(.LS(\\d{4})\\d{5})')\nidentifiers = identifiers.fillna(method='ffill')\nidentifiers.columns = ['game_id', 'year']\ngames = pd.concat([games, identifiers], axis=1, sort=False)\ngames = games.fillna(' ')\ngames.loc[:, 'type'] = pd.Categorical(games.loc[:, 'type'])\nprint(games.head())\n\n"
] | [
[
"pandas.Categorical",
"pandas.concat",
"pandas.read_csv"
]
] |
RobertRosca/PyFstat | [
"1c9568bb3dc87c3d33aeb41b3f572e9990665372",
"1c9568bb3dc87c3d33aeb41b3f572e9990665372"
] | [
"examples/other_examples/PyFstat_example_twoF_cumulative.py",
"examples/other_examples/PyFstat_example_spectrogram.py"
] | [
"\"\"\"\nCumulative coherent 2F\n======================\n\nCompute the cumulative coherent F-statistic of a signal candidate.\n\"\"\"\n\n\nimport os\nimport numpy as np\nimport pyfstat\n\nfrom pyfstat.helper_functions import get_predict_fstat_parameters_from_dict\n\nlabel = \"PyFstat_example_twoF_cumulative\"\noutdir = os.path.join(\"PyFstat_example_data\", label)\n\n# Properties of the GW data\ngw_data = {\n \"sqrtSX\": 1e-23,\n \"tstart\": 1000000000,\n \"duration\": 100 * 86400,\n \"detectors\": \"H1,L1\",\n \"Band\": 4,\n \"Tsft\": 1800,\n}\n\n# Properties of the signal\ndepth = 100\nphase_parameters = {\n \"F0\": 30.0,\n \"F1\": -1e-10,\n \"F2\": 0,\n \"Alpha\": np.radians(83.6292),\n \"Delta\": np.radians(22.0144),\n \"tref\": gw_data[\"tstart\"],\n \"asini\": 10,\n \"period\": 10 * 3600 * 24,\n \"tp\": gw_data[\"tstart\"] + gw_data[\"duration\"] / 2.0,\n \"ecc\": 0,\n \"argp\": 0,\n}\namplitude_parameters = {\n \"h0\": gw_data[\"sqrtSX\"] / depth,\n \"cosi\": 1,\n \"phi\": np.pi,\n \"psi\": np.pi / 8,\n}\n\nPFS_input = get_predict_fstat_parameters_from_dict(\n {**phase_parameters, **amplitude_parameters}\n)\n\n# Let me grab tref here, since it won't really be needed in phase_parameters\ntref = phase_parameters.pop(\"tref\")\ndata = pyfstat.BinaryModulatedWriter(\n label=label,\n outdir=outdir,\n tref=tref,\n **gw_data,\n **phase_parameters,\n **amplitude_parameters,\n)\ndata.make_data()\n\n# The predicted twoF, given by lalapps_predictFstat can be accessed by\ntwoF = data.predict_fstat()\nprint(\"Predicted twoF value: {}\\n\".format(twoF))\n\n# Create a search object for each of the possible SFT combinations\n# (H1 only, L1 only, H1 + L1).\nifo_constraints = [\"L1\", \"H1\", None]\ncompute_fstat_per_ifo = [\n pyfstat.ComputeFstat(\n sftfilepattern=os.path.join(\n data.outdir,\n (f\"{ifo_constraint[0]}*.sft\" if ifo_constraint is not None else \"*.sft\"),\n ),\n tref=data.tref,\n binary=phase_parameters.get(\"asini\", 0),\n minCoverFreq=-0.5,\n maxCoverFreq=-0.5,\n )\n for ifo_constraint in ifo_constraints\n]\n\nfor ind, compute_f_stat in enumerate(compute_fstat_per_ifo):\n compute_f_stat.plot_twoF_cumulative(\n label=label + (f\"_{ifo_constraints[ind]}\" if ind < 2 else \"_H1L1\"),\n outdir=outdir,\n savefig=True,\n CFS_input=phase_parameters,\n PFS_input=PFS_input,\n custom_ax_kwargs={\n \"title\": \"How does 2F accumulate over time?\",\n \"label\": \"Cumulative 2F\"\n + (f\" {ifo_constraints[ind]}\" if ind < 2 else \" H1 + L1\"),\n },\n )\n",
"\"\"\"\nCompute a spectrogram\n==========================\n\nCompute the spectrogram of a set of SFTs. This is useful to produce\nvisualizations of the Doppler modulation of a CW signal.\n\"\"\"\n\nimport os\nimport matplotlib.pyplot as plt\n\nimport pyfstat\n\n# not github-action compatible\n# plt.rcParams[\"font.family\"] = \"serif\"\n# plt.rcParams[\"font.size\"] = 18\n# plt.rcParams[\"text.usetex\"] = True\n\nlabel = \"PyFstat_example_spectrogram\"\noutdir = os.path.join(\"PyFstat_example_data\", label)\n\ndepth = 5\n\ndata_parameters = {\n \"sqrtSX\": 1e-23,\n \"tstart\": 1000000000,\n \"duration\": 2 * 365 * 86400,\n \"detectors\": \"H1\",\n \"Tsft\": 1800,\n}\n\nsignal_parameters = {\n \"F0\": 100.0,\n \"F1\": 0,\n \"F2\": 0,\n \"Alpha\": 0.0,\n \"Delta\": 0.5,\n \"tp\": data_parameters[\"tstart\"],\n \"asini\": 25.0,\n \"period\": 50 * 86400,\n \"tref\": data_parameters[\"tstart\"],\n \"h0\": data_parameters[\"sqrtSX\"] / depth,\n \"cosi\": 1.0,\n}\n\n# making data\ndata = pyfstat.BinaryModulatedWriter(\n label=label, outdir=outdir, **data_parameters, **signal_parameters\n)\ndata.make_data()\n\nprint(\"Loading SFT data and computing normalized power...\")\ntimes, freqs, sft_data = pyfstat.helper_functions.get_sft_array(data.sftfilepath)\nnormalized_power = (\n 2 * sft_data ** 2 / (data_parameters[\"Tsft\"] * data_parameters[\"sqrtSX\"] ** 2)\n)\n\nplotfile = os.path.join(outdir, label + \".png\")\nprint(f\"Plotting to file: {plotfile}\")\nfig, ax = plt.subplots(figsize=(0.8 * 16, 0.8 * 9))\nax.grid(which=\"both\")\nax.set(xlabel=\"Time [days]\", ylabel=\"Frequency [Hz]\", ylim=(99.98, 100.02))\nc = ax.pcolormesh(\n (times - times[0]) / 86400,\n freqs,\n normalized_power,\n cmap=\"inferno_r\",\n shading=\"nearest\",\n)\nfig.colorbar(c, label=\"Normalized Power\")\nplt.tight_layout()\nfig.savefig(plotfile)\n"
] | [
[
"numpy.radians"
],
[
"matplotlib.pyplot.tight_layout",
"matplotlib.pyplot.subplots"
]
] |
bugface/transformers | [
"ba286fe7d51db12ad663effac83bed8199dd7141",
"ba286fe7d51db12ad663effac83bed8199dd7141",
"ba286fe7d51db12ad663effac83bed8199dd7141",
"ba286fe7d51db12ad663effac83bed8199dd7141",
"ba286fe7d51db12ad663effac83bed8199dd7141",
"ba286fe7d51db12ad663effac83bed8199dd7141",
"ba286fe7d51db12ad663effac83bed8199dd7141",
"ba286fe7d51db12ad663effac83bed8199dd7141",
"ba286fe7d51db12ad663effac83bed8199dd7141"
] | [
"src/transformers/models/unispeech/modeling_unispeech.py",
"src/transformers/models/luke/modeling_luke.py",
"tests/models/clip/test_modeling_tf_clip.py",
"examples/flax/token-classification/run_flax_ner.py",
"examples/research_projects/bertology/run_bertology.py",
"tests/models/t5/test_modeling_t5.py",
"tests/models/big_bird/test_tokenization_big_bird.py",
"src/transformers/models/lxmert/modeling_tf_lxmert.py",
"src/transformers/models/rembert/modeling_rembert.py"
] | [
"# coding=utf-8\n# Copyright 2021 The Fairseq Authors and the HuggingFace Inc. team. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\" PyTorch UniSpeech model.\"\"\"\n\nimport math\nimport warnings\nfrom dataclasses import dataclass\nfrom typing import Optional, Tuple, Union\n\nimport numpy as np\nimport torch\nimport torch.utils.checkpoint\nfrom torch import nn\nfrom torch.nn import CrossEntropyLoss\n\nfrom ...activations import ACT2FN\nfrom ...deepspeed import is_deepspeed_zero3_enabled\nfrom ...modeling_outputs import BaseModelOutput, CausalLMOutput, SequenceClassifierOutput, Wav2Vec2BaseModelOutput\nfrom ...modeling_utils import PreTrainedModel\nfrom ...pytorch_utils import torch_int_div\nfrom ...utils import (\n ModelOutput,\n add_code_sample_docstrings,\n add_start_docstrings,\n add_start_docstrings_to_model_forward,\n logging,\n replace_return_docstrings,\n)\nfrom .configuration_unispeech import UniSpeechConfig\n\n\nlogger = logging.get_logger(__name__)\n\n\n_HIDDEN_STATES_START_POSITION = 2\n\n# General docstring\n_CONFIG_FOR_DOC = \"UniSpeechConfig\"\n_PROCESSOR_FOR_DOC = \"Wav2Vec2Processor\"\n\n# Base docstring\n_CHECKPOINT_FOR_DOC = \"patrickvonplaten/unispeech-large-1500h-cv-timit\"\n_EXPECTED_OUTPUT_SHAPE = [1, 292, 1024]\n\n# CTC docstring\n_CTC_EXPECTED_OUTPUT = \"'mister quilter is the apposl of the midle classes and weare glad to welcom his gosepl'\"\n_CTC_EXPECTED_LOSS = 17.17\n\n# Audio class docstring\n_FEAT_EXTRACTOR_FOR_DOC = \"Wav2Vec2FeatureExtractor\"\n_SEQ_CLASS_CHECKPOINT = \"hf-internal-testing/tiny-random-unispeech\"\n_SEQ_CLASS_EXPECTED_OUTPUT = \"'LABEL_0'\" # TODO(anton) - could you quickly fine-tune a KS WavLM Model\n_SEQ_CLASS_EXPECTED_LOSS = 0.66 # TODO(anton) - could you quickly fine-tune a KS WavLM Model\n\nUNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST = [\n \"microsoft/unispeech-large-1500h-cv\",\n \"microsoft/unispeech-large-multi-lingual-1500h-cv\",\n # See all UniSpeech models at https://huggingface.co/models?filter=unispeech\n]\n\n\n@dataclass\nclass UniSpeechForPreTrainingOutput(ModelOutput):\n \"\"\"\n Output type of [`UniSpeechForPreTrainingOutput`], with potential hidden states and attentions.\n\n Args:\n loss (*optional*, returned when model is in train mode, `torch.FloatTensor` of shape `(1,)`):\n Total loss as the sum of the contrastive loss (L_m) and the diversity loss (L_d) as stated in the [official\n paper](https://arxiv.org/pdf/2006.11477.pdf) . (classification) loss.\n projected_states (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.proj_codevector_dim)`):\n Hidden-states of the model projected to *config.proj_codevector_dim* that can be used to predict the masked\n projected quantized states.\n projected_quantized_states (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.proj_codevector_dim)`):\n Quantized extracted feature vectors projected to *config.proj_codevector_dim* representing the positive\n target vectors for contrastive loss.\n hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):\n Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of\n shape `(batch_size, sequence_length, hidden_size)`.\n\n Hidden-states of the model at the output of each layer plus the initial embedding outputs.\n attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):\n Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,\n sequence_length)`.\n\n Attentions weights after the attention softmax, used to compute the weighted average in the self-attention\n heads.\n \"\"\"\n\n loss: Optional[torch.FloatTensor] = None\n projected_states: torch.FloatTensor = None\n projected_quantized_states: torch.FloatTensor = None\n codevector_perplexity: torch.FloatTensor = None\n hidden_states: Optional[Tuple[torch.FloatTensor]] = None\n attentions: Optional[Tuple[torch.FloatTensor]] = None\n\n\n# Copied from transformers.models.wav2vec2.modeling_wav2vec2._compute_mask_indices\ndef _compute_mask_indices(\n shape: Tuple[int, int],\n mask_prob: float,\n mask_length: int,\n attention_mask: Optional[torch.LongTensor] = None,\n min_masks: int = 0,\n) -> np.ndarray:\n \"\"\"\n Computes random mask spans for a given shape. Used to implement [SpecAugment: A Simple Data Augmentation Method for\n ASR](https://arxiv.org/abs/1904.08779). Note that this method is not optimized to run on TPU and should be run on\n CPU as part of the preprocessing during training.\n\n Args:\n shape: The shape for which to compute masks. This should be of a tuple of size 2 where\n the first element is the batch size and the second element is the length of the axis to span.\n mask_prob: The percentage of the whole axis (between 0 and 1) which will be masked. The number of\n independently generated mask spans of length `mask_length` is computed by\n `mask_prob*shape[1]/mask_length`. Note that due to overlaps, `mask_prob` is an upper bound and the\n actual percentage will be smaller.\n mask_length: size of the mask\n min_masks: minimum number of masked spans\n attention_mask: A (right-padded) attention mask which independently shortens the feature axis of\n each batch dimension.\n \"\"\"\n batch_size, sequence_length = shape\n\n if mask_length < 1:\n raise ValueError(\"`mask_length` has to be bigger than 0.\")\n\n if mask_length > sequence_length:\n raise ValueError(\n f\"`mask_length` has to be smaller than `sequence_length`, but got `mask_length`: {mask_length}\"\n f\" and `sequence_length`: {sequence_length}`\"\n )\n\n # epsilon is used for probabilistic rounding\n epsilon = np.random.rand(1).item()\n\n def compute_num_masked_span(input_length):\n \"\"\"Given input length, compute how many spans should be masked\"\"\"\n num_masked_span = int(mask_prob * input_length / mask_length + epsilon)\n num_masked_span = max(num_masked_span, min_masks)\n\n # make sure num masked span <= sequence_length\n if num_masked_span * mask_length > sequence_length:\n num_masked_span = sequence_length // mask_length\n\n # make sure num_masked span is also <= input_length - (mask_length - 1)\n if input_length - (mask_length - 1) < num_masked_span:\n num_masked_span = max(input_length - (mask_length - 1), 0)\n\n return num_masked_span\n\n # compute number of masked spans in batch\n input_lengths = (\n attention_mask.sum(-1).detach().tolist()\n if attention_mask is not None\n else [sequence_length for _ in range(batch_size)]\n )\n\n # SpecAugment mask to fill\n spec_aug_mask = np.zeros((batch_size, sequence_length), dtype=np.bool)\n spec_aug_mask_idxs = []\n\n max_num_masked_span = compute_num_masked_span(sequence_length)\n\n if max_num_masked_span == 0:\n return spec_aug_mask\n\n for input_length in input_lengths:\n # compute num of masked spans for this input\n num_masked_span = compute_num_masked_span(input_length)\n\n # get random indices to mask\n spec_aug_mask_idx = np.random.choice(\n np.arange(input_length - (mask_length - 1)), num_masked_span, replace=False\n )\n\n # pick first sampled index that will serve as a dummy index to pad vector\n # to ensure same dimension for all batches due to probabilistic rounding\n # Picking first sample just pads those vectors twice.\n if len(spec_aug_mask_idx) == 0:\n # this case can only happen if `input_length` is strictly smaller then\n # `sequence_length` in which case the last token has to be a padding\n # token which we can use as a dummy mask id\n dummy_mask_idx = sequence_length - 1\n else:\n dummy_mask_idx = spec_aug_mask_idx[0]\n\n spec_aug_mask_idx = np.concatenate(\n [spec_aug_mask_idx, np.ones(max_num_masked_span - num_masked_span, dtype=np.int32) * dummy_mask_idx]\n )\n spec_aug_mask_idxs.append(spec_aug_mask_idx)\n\n spec_aug_mask_idxs = np.array(spec_aug_mask_idxs)\n\n # expand masked indices to masked spans\n spec_aug_mask_idxs = np.broadcast_to(\n spec_aug_mask_idxs[:, :, None], (batch_size, max_num_masked_span, mask_length)\n )\n spec_aug_mask_idxs = spec_aug_mask_idxs.reshape(batch_size, max_num_masked_span * mask_length)\n\n # add offset to the starting indexes so that that indexes now create a span\n offsets = np.arange(mask_length)[None, None, :]\n offsets = np.broadcast_to(offsets, (batch_size, max_num_masked_span, mask_length)).reshape(\n batch_size, max_num_masked_span * mask_length\n )\n spec_aug_mask_idxs = spec_aug_mask_idxs + offsets\n\n # ensure that we cannot have indices larger than sequence_length\n if spec_aug_mask_idxs.max() > sequence_length - 1:\n spec_aug_mask_idxs[spec_aug_mask_idxs > sequence_length - 1] = sequence_length - 1\n\n # scatter indices to mask\n np.put_along_axis(spec_aug_mask, spec_aug_mask_idxs, 1, -1)\n\n return spec_aug_mask\n\n\n# Copied from transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2NoLayerNormConvLayer with Wav2Vec2->UniSpeech\nclass UniSpeechNoLayerNormConvLayer(nn.Module):\n def __init__(self, config, layer_id=0):\n super().__init__()\n self.in_conv_dim = config.conv_dim[layer_id - 1] if layer_id > 0 else 1\n self.out_conv_dim = config.conv_dim[layer_id]\n\n self.conv = nn.Conv1d(\n self.in_conv_dim,\n self.out_conv_dim,\n kernel_size=config.conv_kernel[layer_id],\n stride=config.conv_stride[layer_id],\n bias=config.conv_bias,\n )\n self.activation = ACT2FN[config.feat_extract_activation]\n\n def forward(self, hidden_states):\n hidden_states = self.conv(hidden_states)\n hidden_states = self.activation(hidden_states)\n return hidden_states\n\n\n# Copied from transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2LayerNormConvLayer with Wav2Vec2->UniSpeech\nclass UniSpeechLayerNormConvLayer(nn.Module):\n def __init__(self, config, layer_id=0):\n super().__init__()\n self.in_conv_dim = config.conv_dim[layer_id - 1] if layer_id > 0 else 1\n self.out_conv_dim = config.conv_dim[layer_id]\n\n self.conv = nn.Conv1d(\n self.in_conv_dim,\n self.out_conv_dim,\n kernel_size=config.conv_kernel[layer_id],\n stride=config.conv_stride[layer_id],\n bias=config.conv_bias,\n )\n self.layer_norm = nn.LayerNorm(self.out_conv_dim, elementwise_affine=True)\n self.activation = ACT2FN[config.feat_extract_activation]\n\n def forward(self, hidden_states):\n hidden_states = self.conv(hidden_states)\n\n hidden_states = hidden_states.transpose(-2, -1)\n hidden_states = self.layer_norm(hidden_states)\n hidden_states = hidden_states.transpose(-2, -1)\n\n hidden_states = self.activation(hidden_states)\n return hidden_states\n\n\n# Copied from transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2GroupNormConvLayer with Wav2Vec2->UniSpeech\nclass UniSpeechGroupNormConvLayer(nn.Module):\n def __init__(self, config, layer_id=0):\n super().__init__()\n self.in_conv_dim = config.conv_dim[layer_id - 1] if layer_id > 0 else 1\n self.out_conv_dim = config.conv_dim[layer_id]\n\n self.conv = nn.Conv1d(\n self.in_conv_dim,\n self.out_conv_dim,\n kernel_size=config.conv_kernel[layer_id],\n stride=config.conv_stride[layer_id],\n bias=config.conv_bias,\n )\n self.activation = ACT2FN[config.feat_extract_activation]\n\n self.layer_norm = nn.GroupNorm(num_groups=self.out_conv_dim, num_channels=self.out_conv_dim, affine=True)\n\n def forward(self, hidden_states):\n hidden_states = self.conv(hidden_states)\n hidden_states = self.layer_norm(hidden_states)\n hidden_states = self.activation(hidden_states)\n return hidden_states\n\n\n# Copied from transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2PositionalConvEmbedding with Wav2Vec2->UniSpeech\nclass UniSpeechPositionalConvEmbedding(nn.Module):\n def __init__(self, config):\n super().__init__()\n self.conv = nn.Conv1d(\n config.hidden_size,\n config.hidden_size,\n kernel_size=config.num_conv_pos_embeddings,\n padding=config.num_conv_pos_embeddings // 2,\n groups=config.num_conv_pos_embedding_groups,\n )\n\n if is_deepspeed_zero3_enabled():\n import deepspeed\n\n with deepspeed.zero.GatheredParameters(self.conv.weight, modifier_rank=0):\n self.conv = nn.utils.weight_norm(self.conv, name=\"weight\", dim=2)\n deepspeed.zero.register_external_parameter(self, self.conv.weight_v)\n deepspeed.zero.register_external_parameter(self, self.conv.weight_g)\n else:\n self.conv = nn.utils.weight_norm(self.conv, name=\"weight\", dim=2)\n\n self.padding = UniSpeechSamePadLayer(config.num_conv_pos_embeddings)\n self.activation = ACT2FN[config.feat_extract_activation]\n\n def forward(self, hidden_states):\n hidden_states = hidden_states.transpose(1, 2)\n\n hidden_states = self.conv(hidden_states)\n hidden_states = self.padding(hidden_states)\n hidden_states = self.activation(hidden_states)\n\n hidden_states = hidden_states.transpose(1, 2)\n return hidden_states\n\n\n# Copied from transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2SamePadLayer with Wav2Vec2->UniSpeech\nclass UniSpeechSamePadLayer(nn.Module):\n def __init__(self, num_conv_pos_embeddings):\n super().__init__()\n self.num_pad_remove = 1 if num_conv_pos_embeddings % 2 == 0 else 0\n\n def forward(self, hidden_states):\n if self.num_pad_remove > 0:\n hidden_states = hidden_states[:, :, : -self.num_pad_remove]\n return hidden_states\n\n\n# Copied from transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2FeatureEncoder with Wav2Vec2->UniSpeech\nclass UniSpeechFeatureEncoder(nn.Module):\n \"\"\"Construct the features from raw audio waveform\"\"\"\n\n def __init__(self, config):\n super().__init__()\n\n if config.feat_extract_norm == \"group\":\n conv_layers = [UniSpeechGroupNormConvLayer(config, layer_id=0)] + [\n UniSpeechNoLayerNormConvLayer(config, layer_id=i + 1)\n for i in range(config.num_feat_extract_layers - 1)\n ]\n elif config.feat_extract_norm == \"layer\":\n conv_layers = [\n UniSpeechLayerNormConvLayer(config, layer_id=i) for i in range(config.num_feat_extract_layers)\n ]\n else:\n raise ValueError(\n f\"`config.feat_extract_norm` is {config.feat_extract_norm}, but has to be one of ['group', 'layer']\"\n )\n self.conv_layers = nn.ModuleList(conv_layers)\n self.gradient_checkpointing = False\n self._requires_grad = True\n\n def _freeze_parameters(self):\n for param in self.parameters():\n param.requires_grad = False\n self._requires_grad = False\n\n def forward(self, input_values):\n hidden_states = input_values[:, None]\n\n # make sure hidden_states require grad for gradient_checkpointing\n if self._requires_grad and self.training:\n hidden_states.requires_grad = True\n\n for conv_layer in self.conv_layers:\n if self._requires_grad and self.gradient_checkpointing and self.training:\n\n def create_custom_forward(module):\n def custom_forward(*inputs):\n return module(*inputs)\n\n return custom_forward\n\n hidden_states = torch.utils.checkpoint.checkpoint(\n create_custom_forward(conv_layer),\n hidden_states,\n )\n else:\n hidden_states = conv_layer(hidden_states)\n\n return hidden_states\n\n\nclass UniSpeechFeatureExtractor(UniSpeechFeatureEncoder):\n def __init__(self, config):\n super().__init__(config)\n warnings.warn(\n f\"The class `{self.__class__.__name__}` has been depreciated \"\n \"and will be removed in Transformers v5. \"\n f\"Use `{self.__class__.__bases__[0].__name__}` instead.\",\n FutureWarning,\n )\n\n\n# Copied from transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2FeatureProjection with Wav2Vec2->UniSpeech\nclass UniSpeechFeatureProjection(nn.Module):\n def __init__(self, config):\n super().__init__()\n self.layer_norm = nn.LayerNorm(config.conv_dim[-1], eps=config.layer_norm_eps)\n self.projection = nn.Linear(config.conv_dim[-1], config.hidden_size)\n self.dropout = nn.Dropout(config.feat_proj_dropout)\n\n def forward(self, hidden_states):\n # non-projected hidden states are needed for quantization\n norm_hidden_states = self.layer_norm(hidden_states)\n hidden_states = self.projection(norm_hidden_states)\n hidden_states = self.dropout(hidden_states)\n return hidden_states, norm_hidden_states\n\n\n# Copied from transformers.models.bart.modeling_bart.BartAttention with Bart->UniSpeech\nclass UniSpeechAttention(nn.Module):\n \"\"\"Multi-headed attention from 'Attention Is All You Need' paper\"\"\"\n\n def __init__(\n self,\n embed_dim: int,\n num_heads: int,\n dropout: float = 0.0,\n is_decoder: bool = False,\n bias: bool = True,\n ):\n super().__init__()\n self.embed_dim = embed_dim\n self.num_heads = num_heads\n self.dropout = dropout\n self.head_dim = embed_dim // num_heads\n\n if (self.head_dim * num_heads) != self.embed_dim:\n raise ValueError(\n f\"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim}\"\n f\" and `num_heads`: {num_heads}).\"\n )\n self.scaling = self.head_dim**-0.5\n self.is_decoder = is_decoder\n\n self.k_proj = nn.Linear(embed_dim, embed_dim, bias=bias)\n self.v_proj = nn.Linear(embed_dim, embed_dim, bias=bias)\n self.q_proj = nn.Linear(embed_dim, embed_dim, bias=bias)\n self.out_proj = nn.Linear(embed_dim, embed_dim, bias=bias)\n\n def _shape(self, tensor: torch.Tensor, seq_len: int, bsz: int):\n return tensor.view(bsz, seq_len, self.num_heads, self.head_dim).transpose(1, 2).contiguous()\n\n def forward(\n self,\n hidden_states: torch.Tensor,\n key_value_states: Optional[torch.Tensor] = None,\n past_key_value: Optional[Tuple[torch.Tensor]] = None,\n attention_mask: Optional[torch.Tensor] = None,\n layer_head_mask: Optional[torch.Tensor] = None,\n output_attentions: bool = False,\n ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:\n \"\"\"Input shape: Batch x Time x Channel\"\"\"\n\n # if key_value_states are provided this layer is used as a cross-attention layer\n # for the decoder\n is_cross_attention = key_value_states is not None\n\n bsz, tgt_len, _ = hidden_states.size()\n\n # get query proj\n query_states = self.q_proj(hidden_states) * self.scaling\n # get key, value proj\n if is_cross_attention and past_key_value is not None:\n # reuse k,v, cross_attentions\n key_states = past_key_value[0]\n value_states = past_key_value[1]\n elif is_cross_attention:\n # cross_attentions\n key_states = self._shape(self.k_proj(key_value_states), -1, bsz)\n value_states = self._shape(self.v_proj(key_value_states), -1, bsz)\n elif past_key_value is not None:\n # reuse k, v, self_attention\n key_states = self._shape(self.k_proj(hidden_states), -1, bsz)\n value_states = self._shape(self.v_proj(hidden_states), -1, bsz)\n key_states = torch.cat([past_key_value[0], key_states], dim=2)\n value_states = torch.cat([past_key_value[1], value_states], dim=2)\n else:\n # self_attention\n key_states = self._shape(self.k_proj(hidden_states), -1, bsz)\n value_states = self._shape(self.v_proj(hidden_states), -1, bsz)\n\n if self.is_decoder:\n # if cross_attention save Tuple(torch.Tensor, torch.Tensor) of all cross attention key/value_states.\n # Further calls to cross_attention layer can then reuse all cross-attention\n # key/value_states (first \"if\" case)\n # if uni-directional self-attention (decoder) save Tuple(torch.Tensor, torch.Tensor) of\n # all previous decoder key/value_states. Further calls to uni-directional self-attention\n # can concat previous decoder key/value_states to current projected key/value_states (third \"elif\" case)\n # if encoder bi-directional self-attention `past_key_value` is always `None`\n past_key_value = (key_states, value_states)\n\n proj_shape = (bsz * self.num_heads, -1, self.head_dim)\n query_states = self._shape(query_states, tgt_len, bsz).view(*proj_shape)\n key_states = key_states.view(*proj_shape)\n value_states = value_states.view(*proj_shape)\n\n src_len = key_states.size(1)\n attn_weights = torch.bmm(query_states, key_states.transpose(1, 2))\n\n if attn_weights.size() != (bsz * self.num_heads, tgt_len, src_len):\n raise ValueError(\n f\"Attention weights should be of size {(bsz * self.num_heads, tgt_len, src_len)}, but is\"\n f\" {attn_weights.size()}\"\n )\n\n if attention_mask is not None:\n if attention_mask.size() != (bsz, 1, tgt_len, src_len):\n raise ValueError(\n f\"Attention mask should be of size {(bsz, 1, tgt_len, src_len)}, but is {attention_mask.size()}\"\n )\n attn_weights = attn_weights.view(bsz, self.num_heads, tgt_len, src_len) + attention_mask\n attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len)\n\n attn_weights = nn.functional.softmax(attn_weights, dim=-1)\n\n if layer_head_mask is not None:\n if layer_head_mask.size() != (self.num_heads,):\n raise ValueError(\n f\"Head mask for a single layer should be of size {(self.num_heads,)}, but is\"\n f\" {layer_head_mask.size()}\"\n )\n attn_weights = layer_head_mask.view(1, -1, 1, 1) * attn_weights.view(bsz, self.num_heads, tgt_len, src_len)\n attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len)\n\n if output_attentions:\n # this operation is a bit awkward, but it's required to\n # make sure that attn_weights keeps its gradient.\n # In order to do so, attn_weights have to be reshaped\n # twice and have to be reused in the following\n attn_weights_reshaped = attn_weights.view(bsz, self.num_heads, tgt_len, src_len)\n attn_weights = attn_weights_reshaped.view(bsz * self.num_heads, tgt_len, src_len)\n else:\n attn_weights_reshaped = None\n\n attn_probs = nn.functional.dropout(attn_weights, p=self.dropout, training=self.training)\n\n attn_output = torch.bmm(attn_probs, value_states)\n\n if attn_output.size() != (bsz * self.num_heads, tgt_len, self.head_dim):\n raise ValueError(\n f\"`attn_output` should be of size {(bsz, self.num_heads, tgt_len, self.head_dim)}, but is\"\n f\" {attn_output.size()}\"\n )\n\n attn_output = attn_output.view(bsz, self.num_heads, tgt_len, self.head_dim)\n attn_output = attn_output.transpose(1, 2)\n\n # Use the `embed_dim` from the config (stored in the class) rather than `hidden_state` because `attn_output` can be\n # partitioned aross GPUs when using tensor-parallelism.\n attn_output = attn_output.reshape(bsz, tgt_len, self.embed_dim)\n\n attn_output = self.out_proj(attn_output)\n\n return attn_output, attn_weights_reshaped, past_key_value\n\n\n# Copied from transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2FeedForward with Wav2Vec2->UniSpeech\nclass UniSpeechFeedForward(nn.Module):\n def __init__(self, config):\n super().__init__()\n self.intermediate_dropout = nn.Dropout(config.activation_dropout)\n\n self.intermediate_dense = nn.Linear(config.hidden_size, config.intermediate_size)\n if isinstance(config.hidden_act, str):\n self.intermediate_act_fn = ACT2FN[config.hidden_act]\n else:\n self.intermediate_act_fn = config.hidden_act\n\n self.output_dense = nn.Linear(config.intermediate_size, config.hidden_size)\n self.output_dropout = nn.Dropout(config.hidden_dropout)\n\n def forward(self, hidden_states):\n hidden_states = self.intermediate_dense(hidden_states)\n hidden_states = self.intermediate_act_fn(hidden_states)\n hidden_states = self.intermediate_dropout(hidden_states)\n\n hidden_states = self.output_dense(hidden_states)\n hidden_states = self.output_dropout(hidden_states)\n return hidden_states\n\n\n# Copied from transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2EncoderLayer with Wav2Vec2->UniSpeech\nclass UniSpeechEncoderLayer(nn.Module):\n def __init__(self, config):\n super().__init__()\n self.attention = UniSpeechAttention(\n embed_dim=config.hidden_size,\n num_heads=config.num_attention_heads,\n dropout=config.attention_dropout,\n is_decoder=False,\n )\n self.dropout = nn.Dropout(config.hidden_dropout)\n self.layer_norm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)\n self.feed_forward = UniSpeechFeedForward(config)\n self.final_layer_norm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)\n\n def forward(self, hidden_states, attention_mask=None, output_attentions=False):\n attn_residual = hidden_states\n hidden_states, attn_weights, _ = self.attention(\n hidden_states, attention_mask=attention_mask, output_attentions=output_attentions\n )\n hidden_states = self.dropout(hidden_states)\n hidden_states = attn_residual + hidden_states\n\n hidden_states = self.layer_norm(hidden_states)\n hidden_states = hidden_states + self.feed_forward(hidden_states)\n hidden_states = self.final_layer_norm(hidden_states)\n\n outputs = (hidden_states,)\n\n if output_attentions:\n outputs += (attn_weights,)\n\n return outputs\n\n\n# Copied from transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2EncoderLayerStableLayerNorm with Wav2Vec2->UniSpeech\nclass UniSpeechEncoderLayerStableLayerNorm(nn.Module):\n def __init__(self, config):\n super().__init__()\n self.attention = UniSpeechAttention(\n embed_dim=config.hidden_size,\n num_heads=config.num_attention_heads,\n dropout=config.attention_dropout,\n is_decoder=False,\n )\n self.dropout = nn.Dropout(config.hidden_dropout)\n self.layer_norm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)\n self.feed_forward = UniSpeechFeedForward(config)\n self.final_layer_norm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)\n\n def forward(self, hidden_states, attention_mask=None, output_attentions=False):\n attn_residual = hidden_states\n hidden_states = self.layer_norm(hidden_states)\n hidden_states, attn_weights, _ = self.attention(\n hidden_states, attention_mask=attention_mask, output_attentions=output_attentions\n )\n hidden_states = self.dropout(hidden_states)\n hidden_states = attn_residual + hidden_states\n hidden_states = hidden_states + self.feed_forward(self.final_layer_norm(hidden_states))\n\n outputs = (hidden_states,)\n\n if output_attentions:\n outputs += (attn_weights,)\n\n return outputs\n\n\n# Copied from transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2Encoder with Wav2Vec2->UniSpeech\nclass UniSpeechEncoder(nn.Module):\n def __init__(self, config):\n super().__init__()\n self.config = config\n self.pos_conv_embed = UniSpeechPositionalConvEmbedding(config)\n self.layer_norm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)\n self.dropout = nn.Dropout(config.hidden_dropout)\n self.layers = nn.ModuleList([UniSpeechEncoderLayer(config) for _ in range(config.num_hidden_layers)])\n self.gradient_checkpointing = False\n\n def forward(\n self,\n hidden_states,\n attention_mask=None,\n output_attentions=False,\n output_hidden_states=False,\n return_dict=True,\n ):\n all_hidden_states = () if output_hidden_states else None\n all_self_attentions = () if output_attentions else None\n\n if attention_mask is not None:\n # make sure padded tokens output 0\n hidden_states[~attention_mask] = 0.0\n\n # extend attention_mask\n attention_mask = (1.0 - attention_mask[:, None, None, :].to(dtype=hidden_states.dtype)) * -10000.0\n attention_mask = attention_mask.expand(\n attention_mask.shape[0], 1, attention_mask.shape[-1], attention_mask.shape[-1]\n )\n\n position_embeddings = self.pos_conv_embed(hidden_states)\n hidden_states = hidden_states + position_embeddings\n hidden_states = self.layer_norm(hidden_states)\n hidden_states = self.dropout(hidden_states)\n\n deepspeed_zero3_is_enabled = is_deepspeed_zero3_enabled()\n\n for layer in self.layers:\n if output_hidden_states:\n all_hidden_states = all_hidden_states + (hidden_states,)\n\n # add LayerDrop (see https://arxiv.org/abs/1909.11556 for description)\n dropout_probability = np.random.uniform(0, 1)\n\n skip_the_layer = True if self.training and (dropout_probability < self.config.layerdrop) else False\n if not skip_the_layer or deepspeed_zero3_is_enabled:\n # under deepspeed zero3 all gpus must run in sync\n if self.gradient_checkpointing and self.training:\n # create gradient checkpointing function\n def create_custom_forward(module):\n def custom_forward(*inputs):\n return module(*inputs, output_attentions)\n\n return custom_forward\n\n layer_outputs = torch.utils.checkpoint.checkpoint(\n create_custom_forward(layer),\n hidden_states,\n attention_mask,\n )\n else:\n layer_outputs = layer(\n hidden_states, attention_mask=attention_mask, output_attentions=output_attentions\n )\n hidden_states = layer_outputs[0]\n\n if skip_the_layer:\n layer_outputs = (None, None)\n\n if output_attentions:\n all_self_attentions = all_self_attentions + (layer_outputs[1],)\n\n if output_hidden_states:\n all_hidden_states = all_hidden_states + (hidden_states,)\n\n if not return_dict:\n return tuple(v for v in [hidden_states, all_hidden_states, all_self_attentions] if v is not None)\n return BaseModelOutput(\n last_hidden_state=hidden_states,\n hidden_states=all_hidden_states,\n attentions=all_self_attentions,\n )\n\n\n# Copied from transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2EncoderStableLayerNorm with Wav2Vec2->UniSpeech\nclass UniSpeechEncoderStableLayerNorm(nn.Module):\n def __init__(self, config):\n super().__init__()\n self.config = config\n self.pos_conv_embed = UniSpeechPositionalConvEmbedding(config)\n self.layer_norm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)\n self.dropout = nn.Dropout(config.hidden_dropout)\n self.layers = nn.ModuleList(\n [UniSpeechEncoderLayerStableLayerNorm(config) for _ in range(config.num_hidden_layers)]\n )\n self.gradient_checkpointing = False\n\n def forward(\n self,\n hidden_states,\n attention_mask=None,\n output_attentions=False,\n output_hidden_states=False,\n return_dict=True,\n ):\n all_hidden_states = () if output_hidden_states else None\n all_self_attentions = () if output_attentions else None\n\n if attention_mask is not None:\n # make sure padded tokens are not attended to\n hidden_states[~attention_mask] = 0\n\n # extend attention_mask\n attention_mask = (1.0 - attention_mask[:, None, None, :].to(dtype=hidden_states.dtype)) * -10000.0\n attention_mask = attention_mask.expand(\n attention_mask.shape[0], 1, attention_mask.shape[-1], attention_mask.shape[-1]\n )\n\n position_embeddings = self.pos_conv_embed(hidden_states)\n hidden_states = hidden_states + position_embeddings\n hidden_states = self.dropout(hidden_states)\n\n deepspeed_zero3_is_enabled = is_deepspeed_zero3_enabled()\n\n for layer in self.layers:\n if output_hidden_states:\n all_hidden_states = all_hidden_states + (hidden_states,)\n\n # add LayerDrop (see https://arxiv.org/abs/1909.11556 for description)\n dropout_probability = np.random.uniform(0, 1)\n\n skip_the_layer = True if self.training and (dropout_probability < self.config.layerdrop) else False\n if not skip_the_layer or deepspeed_zero3_is_enabled:\n # under deepspeed zero3 all gpus must run in sync\n # XXX: could optimize this like synced_gpus in generate_utils but not sure if it's worth the code complication\n if self.gradient_checkpointing and self.training:\n # create gradient checkpointing function\n def create_custom_forward(module):\n def custom_forward(*inputs):\n return module(*inputs, output_attentions)\n\n return custom_forward\n\n layer_outputs = torch.utils.checkpoint.checkpoint(\n create_custom_forward(layer),\n hidden_states,\n attention_mask,\n )\n else:\n layer_outputs = layer(\n hidden_states, attention_mask=attention_mask, output_attentions=output_attentions\n )\n hidden_states = layer_outputs[0]\n\n if skip_the_layer:\n layer_outputs = (None, None)\n\n if output_attentions:\n all_self_attentions = all_self_attentions + (layer_outputs[1],)\n\n hidden_states = self.layer_norm(hidden_states)\n\n if output_hidden_states:\n all_hidden_states = all_hidden_states + (hidden_states,)\n\n if not return_dict:\n return tuple(v for v in [hidden_states, all_hidden_states, all_self_attentions] if v is not None)\n return BaseModelOutput(\n last_hidden_state=hidden_states,\n hidden_states=all_hidden_states,\n attentions=all_self_attentions,\n )\n\n\nclass UniSpeechGumbelVectorQuantizer(nn.Module):\n \"\"\"\n Vector quantization using gumbel softmax. See [CATEGORICAL REPARAMETERIZATION WITH\n GUMBEL-SOFTMAX](https://arxiv.org/pdf/1611.01144.pdf) for more information.\n \"\"\"\n\n def __init__(self, config):\n super().__init__()\n self.num_groups = config.num_codevector_groups\n self.num_vars = config.num_codevectors_per_group\n\n if config.codevector_dim % self.num_groups != 0:\n raise ValueError(\n f\"`config.codevector_dim {config.codevector_dim} must be divisible by `config.num_codevector_groups`\"\n f\" {self.num_groups} for concatenation\"\n )\n\n # storage for codebook variables (codewords)\n self.codevectors = nn.Parameter(\n torch.FloatTensor(1, self.num_groups * self.num_vars, config.codevector_dim // self.num_groups)\n )\n self.weight_proj = nn.Linear(config.conv_dim[-1], self.num_groups * self.num_vars)\n\n # can be decayed for training\n self.temperature = 2\n\n @staticmethod\n def _compute_perplexity(probs):\n marginal_probs = probs.mean(dim=0)\n perplexity = torch.exp(-torch.sum(marginal_probs * torch.log(marginal_probs + 1e-7), dim=-1)).sum()\n return perplexity\n\n def forward(self, hidden_states):\n batch_size, sequence_length, hidden_size = hidden_states.shape\n\n # project to codevector dim\n hidden_states = self.weight_proj(hidden_states)\n hidden_states = hidden_states.view(batch_size * sequence_length * self.num_groups, -1)\n\n if self.training:\n # sample code vector probs via gumbel in differentiateable way\n codevector_probs = nn.functional.gumbel_softmax(\n hidden_states.float(), tau=self.temperature, hard=True\n ).type_as(hidden_states)\n\n # compute perplexity\n codevector_soft_dist = torch.softmax(\n hidden_states.view(batch_size * sequence_length, self.num_groups, -1).float(), dim=-1\n )\n perplexity = self._compute_perplexity(codevector_soft_dist)\n else:\n # take argmax in non-differentiable way\n # comptute hard codevector distribution (one hot)\n codevector_idx = hidden_states.argmax(dim=-1)\n codevector_probs = hidden_states.new_zeros(*hidden_states.shape).scatter_(\n -1, codevector_idx.view(-1, 1), 1.0\n )\n codevector_probs = codevector_probs.view(batch_size * sequence_length, self.num_groups, -1)\n\n perplexity = self._compute_perplexity(codevector_probs)\n\n codevector_probs = codevector_probs.view(batch_size * sequence_length, -1)\n # use probs to retrieve codevectors\n codevectors_per_group = codevector_probs.unsqueeze(-1) * self.codevectors\n codevectors = codevectors_per_group.view(batch_size * sequence_length, self.num_groups, self.num_vars, -1)\n codevectors = codevectors.sum(-2).view(batch_size, sequence_length, -1)\n\n return codevectors, perplexity\n\n\nclass UniSpeechPreTrainedModel(PreTrainedModel):\n \"\"\"\n An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained\n models.\n \"\"\"\n\n config_class = UniSpeechConfig\n base_model_prefix = \"unispeech\"\n main_input_name = \"input_values\"\n _keys_to_ignore_on_load_missing = [r\"position_ids\"]\n supports_gradient_checkpointing = True\n\n def _init_weights(self, module):\n \"\"\"Initialize the weights\"\"\"\n # gumbel softmax requires special init\n if isinstance(module, UniSpeechGumbelVectorQuantizer):\n module.weight_proj.weight.data.normal_(mean=0.0, std=1)\n module.weight_proj.bias.data.zero_()\n nn.init.uniform_(module.codevectors)\n elif isinstance(module, UniSpeechPositionalConvEmbedding):\n nn.init.normal_(\n module.conv.weight,\n mean=0,\n std=2 * math.sqrt(1 / (module.conv.kernel_size[0] * module.conv.in_channels)),\n )\n nn.init.constant_(module.conv.bias, 0)\n elif isinstance(module, UniSpeechFeatureProjection):\n k = math.sqrt(1 / module.projection.in_features)\n nn.init.uniform_(module.projection.weight, a=-k, b=k)\n nn.init.uniform_(module.projection.bias, a=-k, b=k)\n elif isinstance(module, nn.Linear):\n module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)\n\n if module.bias is not None:\n module.bias.data.zero_()\n elif isinstance(module, (nn.LayerNorm, nn.GroupNorm)):\n module.bias.data.zero_()\n module.weight.data.fill_(1.0)\n elif isinstance(module, nn.Conv1d):\n nn.init.kaiming_normal_(module.weight)\n\n if module.bias is not None:\n k = math.sqrt(module.groups / (module.in_channels * module.kernel_size[0]))\n nn.init.uniform_(module.bias, a=-k, b=k)\n\n def _get_feat_extract_output_lengths(self, input_lengths: Union[torch.LongTensor, int]):\n \"\"\"\n Computes the output length of the convolutional layers\n \"\"\"\n\n def _conv_out_length(input_length, kernel_size, stride):\n # 1D convolutional layer output length formula taken\n # from https://pytorch.org/docs/stable/generated/torch.nn.Conv1d.html\n return torch_int_div(input_length - kernel_size, stride) + 1\n\n for kernel_size, stride in zip(self.config.conv_kernel, self.config.conv_stride):\n input_lengths = _conv_out_length(input_lengths, kernel_size, stride)\n\n return input_lengths\n\n def _get_feature_vector_attention_mask(self, feature_vector_length: int, attention_mask: torch.LongTensor):\n # Effectively attention_mask.sum(-1), but not inplace to be able to run\n # on inference mode.\n non_padded_lengths = attention_mask.cumsum(dim=-1)[:, -1]\n output_lengths = self._get_feat_extract_output_lengths(non_padded_lengths).to(torch.long)\n batch_size = attention_mask.shape[0]\n\n attention_mask = torch.zeros(\n (batch_size, feature_vector_length), dtype=attention_mask.dtype, device=attention_mask.device\n )\n # these two operations makes sure that all values before the output lengths idxs are attended to\n attention_mask[(torch.arange(attention_mask.shape[0], device=attention_mask.device), output_lengths - 1)] = 1\n attention_mask = attention_mask.flip([-1]).cumsum(-1).flip([-1]).bool()\n return attention_mask\n\n def _set_gradient_checkpointing(self, module, value=False):\n if isinstance(module, (UniSpeechEncoder, UniSpeechEncoderStableLayerNorm, UniSpeechFeatureEncoder)):\n module.gradient_checkpointing = value\n\n\nUNISPEECH_START_DOCSTRING = r\"\"\"\n UniSpeech was proposed in [UniSpeech: Unified Speech Representation Learning with Labeled and Unlabeled\n Data](https://arxiv.org/abs/2101.07597) by Chengyi Wang, Yu Wu, Yao Qian, Kenichi Kumatani, Shujie Liu, Furu Wei,\n Michael Zeng, Xuedong Huang.\n\n This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the\n library implements for all its model (such as downloading or saving etc.).\n\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use\n it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n\n Parameters:\n config ([`UniSpeechConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n\"\"\"\n\n\nUNISPEECH_INPUTS_DOCSTRING = r\"\"\"\n Args:\n input_values (`torch.FloatTensor` of shape `(batch_size, sequence_length)`):\n Float values of input raw speech waveform. Values can be obtained by loading a *.flac* or *.wav* audio file\n into an array of type *List[float]* or a *numpy.ndarray*, *e.g.* via the soundfile library (*pip install\n soundfile*). To prepare the array into *input_values*, the [`UniSpeechProcessor`] should be used for\n padding and conversion into a tensor of type *torch.FloatTensor*. See [`UniSpeechProcessor.__call__`] for\n details.\n attention_mask (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):\n Mask to avoid performing convolution and attention on padding token indices. Mask values selected in `[0,\n 1]`:\n\n - 1 for tokens that are **not masked**,\n - 0 for tokens that are **masked**.\n\n [What are attention masks?](../glossary#attention-mask)\n\n <Tip warning={true}>\n\n `attention_mask` should only be passed if the corresponding processor has `config.return_attention_mask ==\n True`. For all models whose processor has `config.return_attention_mask == False`, `attention_mask` should\n **not** be passed to avoid degraded performance when doing batched inference. For such models\n `input_values` should simply be padded with 0 and passed without `attention_mask`. Be aware that these\n models also yield slightly different results depending on whether `input_values` is padded or not.\n\n </Tip>\n\n output_attentions (`bool`, *optional*):\n Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned\n tensors for more detail.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n\"\"\"\n\n\n@add_start_docstrings(\n \"The bare UniSpeech Model transformer outputting raw hidden-states without any specific head on top.\",\n UNISPEECH_START_DOCSTRING,\n)\nclass UniSpeechModel(UniSpeechPreTrainedModel):\n def __init__(self, config: UniSpeechConfig):\n super().__init__(config)\n self.config = config\n self.feature_extractor = UniSpeechFeatureEncoder(config)\n self.feature_projection = UniSpeechFeatureProjection(config)\n\n if config.mask_time_prob > 0.0 or config.mask_feature_prob > 0.0:\n self.masked_spec_embed = nn.Parameter(torch.FloatTensor(config.hidden_size).uniform_())\n\n if config.do_stable_layer_norm:\n self.encoder = UniSpeechEncoderStableLayerNorm(config)\n else:\n self.encoder = UniSpeechEncoder(config)\n\n # Initialize weights and apply final processing\n self.post_init()\n\n # Copied from transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2Model._mask_hidden_states\n def _mask_hidden_states(\n self,\n hidden_states: torch.FloatTensor,\n mask_time_indices: Optional[torch.FloatTensor] = None,\n attention_mask: Optional[torch.LongTensor] = None,\n ):\n \"\"\"\n Masks extracted features along time axis and/or along feature axis according to\n [SpecAugment](https://arxiv.org/abs/1904.08779).\n \"\"\"\n\n # `config.apply_spec_augment` can set masking to False\n if not getattr(self.config, \"apply_spec_augment\", True):\n return hidden_states\n\n # generate indices & apply SpecAugment along time axis\n batch_size, sequence_length, hidden_size = hidden_states.size()\n\n if mask_time_indices is not None:\n # apply SpecAugment along time axis with given mask_time_indices\n hidden_states[mask_time_indices] = self.masked_spec_embed.to(hidden_states.dtype)\n elif self.config.mask_time_prob > 0 and self.training:\n mask_time_indices = _compute_mask_indices(\n (batch_size, sequence_length),\n mask_prob=self.config.mask_time_prob,\n mask_length=self.config.mask_time_length,\n attention_mask=attention_mask,\n min_masks=self.config.mask_time_min_masks,\n )\n mask_time_indices = torch.tensor(mask_time_indices, device=hidden_states.device, dtype=torch.bool)\n hidden_states[mask_time_indices] = self.masked_spec_embed.to(hidden_states.dtype)\n\n if self.config.mask_feature_prob > 0 and self.training:\n # generate indices & apply SpecAugment along feature axis\n mask_feature_indices = _compute_mask_indices(\n (batch_size, hidden_size),\n mask_prob=self.config.mask_feature_prob,\n mask_length=self.config.mask_feature_length,\n min_masks=self.config.mask_feature_min_masks,\n )\n mask_feature_indices = torch.tensor(mask_feature_indices, device=hidden_states.device, dtype=torch.bool)\n mask_feature_indices = mask_feature_indices[:, None].expand(-1, sequence_length, -1)\n hidden_states[mask_feature_indices] = 0\n\n return hidden_states\n\n @add_start_docstrings_to_model_forward(UNISPEECH_INPUTS_DOCSTRING)\n @add_code_sample_docstrings(\n processor_class=_PROCESSOR_FOR_DOC,\n checkpoint=_CHECKPOINT_FOR_DOC,\n output_type=Wav2Vec2BaseModelOutput,\n config_class=_CONFIG_FOR_DOC,\n modality=\"audio\",\n expected_output=_EXPECTED_OUTPUT_SHAPE,\n )\n def forward(\n self,\n input_values: Optional[torch.Tensor],\n attention_mask: Optional[torch.Tensor] = None,\n mask_time_indices: Optional[torch.FloatTensor] = None,\n output_attentions: Optional[bool] = None,\n output_hidden_states: Optional[bool] = None,\n return_dict: Optional[bool] = None,\n ) -> Union[Tuple, Wav2Vec2BaseModelOutput]:\n output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions\n output_hidden_states = (\n output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states\n )\n return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n\n extract_features = self.feature_extractor(input_values)\n extract_features = extract_features.transpose(1, 2)\n\n if attention_mask is not None:\n # compute reduced attention_mask corresponding to feature vectors\n attention_mask = self._get_feature_vector_attention_mask(extract_features.shape[1], attention_mask)\n\n hidden_states, extract_features = self.feature_projection(extract_features)\n hidden_states = self._mask_hidden_states(\n hidden_states, mask_time_indices=mask_time_indices, attention_mask=attention_mask\n )\n\n encoder_outputs = self.encoder(\n hidden_states,\n attention_mask=attention_mask,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n return_dict=return_dict,\n )\n\n hidden_states = encoder_outputs[0]\n\n if not return_dict:\n return (hidden_states, extract_features) + encoder_outputs[1:]\n\n return Wav2Vec2BaseModelOutput(\n last_hidden_state=hidden_states,\n extract_features=extract_features,\n hidden_states=encoder_outputs.hidden_states,\n attentions=encoder_outputs.attentions,\n )\n\n\n@add_start_docstrings(\n \"\"\"UniSpeech Model with a vector-quantization module and ctc loss for pre-training.\"\"\", UNISPEECH_START_DOCSTRING\n)\nclass UniSpeechForPreTraining(UniSpeechPreTrainedModel):\n def __init__(self, config: UniSpeechConfig):\n super().__init__(config)\n self.unispeech = UniSpeechModel(config)\n self.dropout_features = nn.Dropout(config.feat_quantizer_dropout)\n\n self.quantizer = UniSpeechGumbelVectorQuantizer(config)\n self.project_q = nn.Linear(config.codevector_dim, config.proj_codevector_dim)\n self.project_hid = nn.Linear(config.proj_codevector_dim, config.hidden_size)\n\n self.ctc_proj = nn.Linear(config.hidden_size, config.num_ctc_classes)\n self.dropout = nn.Dropout(config.final_dropout)\n\n # Initialize weights and apply final processing\n self.post_init()\n\n def set_gumbel_temperature(self, temperature: int):\n \"\"\"\n Set the Gumbel softmax temperature to a given value. Only necessary for training\n \"\"\"\n self.quantizer.temperature = temperature\n\n def freeze_feature_extractor(self):\n \"\"\"\n Calling this function will disable the gradient computation for the feature encoder so that its parameters will\n not be updated during training.\n \"\"\"\n warnings.warn(\n \"The method `freeze_feature_extractor` is deprecated and will be removed in Transformers v5.\"\n \"Please use the equivalent `freeze_feature_encoder` method instead.\",\n FutureWarning,\n )\n self.freeze_feature_encoder()\n\n def freeze_feature_encoder(self):\n \"\"\"\n Calling this function will disable the gradient computation for the feature encoder so that its parameter will\n not be updated during training.\n \"\"\"\n self.unispeech.feature_extractor._freeze_parameters()\n\n @staticmethod\n def compute_contrastive_logits(\n target_features: torch.FloatTensor,\n negative_features: torch.FloatTensor,\n predicted_features: torch.FloatTensor,\n temperature: int = 1,\n ):\n \"\"\"\n Compute logits for contrastive loss based using cosine similarity as the distance measure between\n `[positive_feature, negative_features]` and `[predicted_features]`. Additionally, temperature can be applied.\n \"\"\"\n target_features = torch.cat([target_features, negative_features], dim=0)\n\n logits = torch.cosine_similarity(predicted_features.float(), target_features.float(), dim=-1)\n logits = logits.type_as(target_features)\n\n # apply temperature\n logits = logits / temperature\n return logits\n\n @add_start_docstrings_to_model_forward(UNISPEECH_INPUTS_DOCSTRING)\n @replace_return_docstrings(output_type=UniSpeechForPreTrainingOutput, config_class=_CONFIG_FOR_DOC)\n def forward(\n self,\n input_values: Optional[torch.Tensor],\n attention_mask: Optional[torch.Tensor] = None,\n output_attentions: Optional[bool] = None,\n output_hidden_states: Optional[bool] = None,\n return_dict: Optional[bool] = None,\n ) -> Union[Tuple, UniSpeechForPreTrainingOutput]:\n r\"\"\"\n mask_time_indices (`torch.BoolTensor` of shape `(batch_size, sequence_length)`, *optional*):\n Indices to mask extracted features for contrastive loss. When in training mode, model learns to predict\n masked extracted features in *config.proj_codevector_dim* space.\n sampled_negative_indices (`torch.BoolTensor` of shape `(batch_size, sequence_length, num_negatives)`, *optional*):\n Indices indicating which quantized target vectors are used as negative sampled vectors in contrastive loss.\n Required input for pre-training.\n\n Returns:\n\n Example:\n\n ```python\n >>> import torch\n >>> from transformers import Wav2Vec2FeatureExtractor, UniSpeechForPreTraining\n >>> from transformers.models.unispeech.modeling_unispeech import _compute_mask_indices\n\n >>> feature_extractor = Wav2Vec2FeatureExtractor.from_pretrained(\n ... \"hf-internal-testing/tiny-random-unispeech-sat\"\n ... )\n >>> model = UniSpeechForPreTraining.from_pretrained(\"microsoft/unispeech-large-1500h-cv\")\n >>> # TODO: Add full pretraining example\n ```\"\"\"\n\n return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n\n outputs = self.unispeech(\n input_values,\n attention_mask=attention_mask,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n return_dict=return_dict,\n )\n transformer_features = outputs[0]\n\n # quantize all (unmasked) extracted features and project to final vq dim\n extract_features = self.dropout_features(outputs[1])\n quantized_features, codevector_perplexity = self.quantizer(extract_features)\n\n # project quantized features twice\n quantized_features = self.project_q(quantized_features)\n quantized_features = self.project_hid(quantized_features)\n\n prob_replace_matrix = torch.empty(transformer_features.size(0), transformer_features.size(1)).fill_(\n self.config.replace_prob\n )\n prob_replace_matrix = prob_replace_matrix.transpose(0, 1)\n sampled_replace_matrix = torch.bernoulli(prob_replace_matrix).bool().to(transformer_features.device)\n sampled_replace_matrix = sampled_replace_matrix.transpose(0, 1)\n sampled_replace_matrix = sampled_replace_matrix.unsqueeze(-1)\n logits = transformer_features.masked_fill(sampled_replace_matrix, 0.0) + (\n quantized_features.masked_fill(~sampled_replace_matrix, 0.0)\n )\n\n # project to ctc units\n logits = self.dropout(logits)\n logits = self.ctc_proj(logits)\n\n # TODO(PVP) - add negative sampling & loss computation\n loss = None\n if not return_dict:\n if loss is not None:\n return (loss, transformer_features, quantized_features, codevector_perplexity) + outputs[2:]\n return (transformer_features, quantized_features, codevector_perplexity) + outputs[2:]\n\n return UniSpeechForPreTrainingOutput(\n loss=loss,\n projected_states=transformer_features,\n projected_quantized_states=quantized_features,\n codevector_perplexity=codevector_perplexity,\n hidden_states=outputs.hidden_states,\n attentions=outputs.attentions,\n )\n\n\n@add_start_docstrings(\n \"\"\"UniSpeech Model with a `language modeling` head on top for Connectionist Temporal Classification (CTC).\"\"\",\n UNISPEECH_START_DOCSTRING,\n)\n# Copied from transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2ForCTC with Wav2Vec2->UniSpeech, wav2vec2->unispeech, WAV_2_VEC_2->UNISPEECH\nclass UniSpeechForCTC(UniSpeechPreTrainedModel):\n def __init__(self, config):\n super().__init__(config)\n\n self.unispeech = UniSpeechModel(config)\n self.dropout = nn.Dropout(config.final_dropout)\n\n if config.vocab_size is None:\n raise ValueError(\n f\"You are trying to instantiate {self.__class__} with a configuration that \"\n \"does not define the vocabulary size of the language model head. Please \"\n \"instantiate the model as follows: `UniSpeechForCTC.from_pretrained(..., vocab_size=vocab_size)`. \"\n \"or define `vocab_size` of your model's configuration.\"\n )\n output_hidden_size = (\n config.output_hidden_size if hasattr(config, \"add_adapter\") and config.add_adapter else config.hidden_size\n )\n self.lm_head = nn.Linear(output_hidden_size, config.vocab_size)\n\n # Initialize weights and apply final processing\n self.post_init()\n\n def freeze_feature_extractor(self):\n \"\"\"\n Calling this function will disable the gradient computation for the feature encoder so that its parameter will\n not be updated during training.\n \"\"\"\n warnings.warn(\n \"The method `freeze_feature_extractor` is deprecated and will be removed in Transformers v5.\"\n \"Please use the equivalent `freeze_feature_encoder` method instead.\",\n FutureWarning,\n )\n self.freeze_feature_encoder()\n\n def freeze_feature_encoder(self):\n \"\"\"\n Calling this function will disable the gradient computation for the feature encoder so that its parameter will\n not be updated during training.\n \"\"\"\n self.unispeech.feature_extractor._freeze_parameters()\n\n @add_start_docstrings_to_model_forward(UNISPEECH_INPUTS_DOCSTRING)\n @add_code_sample_docstrings(\n processor_class=_PROCESSOR_FOR_DOC,\n checkpoint=_CHECKPOINT_FOR_DOC,\n output_type=CausalLMOutput,\n config_class=_CONFIG_FOR_DOC,\n expected_output=_CTC_EXPECTED_OUTPUT,\n expected_loss=_CTC_EXPECTED_LOSS,\n )\n def forward(\n self,\n input_values: Optional[torch.Tensor],\n attention_mask: Optional[torch.Tensor] = None,\n output_attentions: Optional[bool] = None,\n output_hidden_states: Optional[bool] = None,\n return_dict: Optional[bool] = None,\n labels: Optional[torch.Tensor] = None,\n ) -> Union[Tuple, CausalLMOutput]:\n r\"\"\"\n labels (`torch.LongTensor` of shape `(batch_size, target_length)`, *optional*):\n Labels for connectionist temporal classification. Note that `target_length` has to be smaller or equal to\n the sequence length of the output logits. Indices are selected in `[-100, 0, ..., config.vocab_size - 1]`.\n All labels set to `-100` are ignored (masked), the loss is only computed for labels in `[0, ...,\n config.vocab_size - 1]`.\n \"\"\"\n\n return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n\n outputs = self.unispeech(\n input_values,\n attention_mask=attention_mask,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n return_dict=return_dict,\n )\n\n hidden_states = outputs[0]\n hidden_states = self.dropout(hidden_states)\n\n logits = self.lm_head(hidden_states)\n\n loss = None\n if labels is not None:\n\n if labels.max() >= self.config.vocab_size:\n raise ValueError(f\"Label values must be <= vocab_size: {self.config.vocab_size}\")\n\n # retrieve loss input_lengths from attention_mask\n attention_mask = (\n attention_mask if attention_mask is not None else torch.ones_like(input_values, dtype=torch.long)\n )\n input_lengths = self._get_feat_extract_output_lengths(attention_mask.sum(-1)).to(torch.long)\n\n # assuming that padded tokens are filled with -100\n # when not being attended to\n labels_mask = labels >= 0\n target_lengths = labels_mask.sum(-1)\n flattened_targets = labels.masked_select(labels_mask)\n\n # ctc_loss doesn't support fp16\n log_probs = nn.functional.log_softmax(logits, dim=-1, dtype=torch.float32).transpose(0, 1)\n\n with torch.backends.cudnn.flags(enabled=False):\n loss = nn.functional.ctc_loss(\n log_probs,\n flattened_targets,\n input_lengths,\n target_lengths,\n blank=self.config.pad_token_id,\n reduction=self.config.ctc_loss_reduction,\n zero_infinity=self.config.ctc_zero_infinity,\n )\n\n if not return_dict:\n output = (logits,) + outputs[_HIDDEN_STATES_START_POSITION:]\n return ((loss,) + output) if loss is not None else output\n\n return CausalLMOutput(\n loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions\n )\n\n\n@add_start_docstrings(\n \"\"\"\n UniSpeech Model with a sequence classification head on top (a linear layer over the pooled output) for tasks like\n SUPERB Keyword Spotting.\n \"\"\",\n UNISPEECH_START_DOCSTRING,\n)\n# Copied from transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2ForSequenceClassification with Wav2Vec2->UniSpeech, wav2vec2->unispeech, WAV_2_VEC_2->UNISPEECH\nclass UniSpeechForSequenceClassification(UniSpeechPreTrainedModel):\n def __init__(self, config):\n super().__init__(config)\n\n if hasattr(config, \"add_adapter\") and config.add_adapter:\n raise ValueError(\n \"Sequence classification does not support the use of UniSpeech adapters (config.add_adapter=True)\"\n )\n self.unispeech = UniSpeechModel(config)\n num_layers = config.num_hidden_layers + 1 # transformer layers + input embeddings\n if config.use_weighted_layer_sum:\n self.layer_weights = nn.Parameter(torch.ones(num_layers) / num_layers)\n self.projector = nn.Linear(config.hidden_size, config.classifier_proj_size)\n self.classifier = nn.Linear(config.classifier_proj_size, config.num_labels)\n\n # Initialize weights and apply final processing\n self.post_init()\n\n def freeze_feature_extractor(self):\n \"\"\"\n Calling this function will disable the gradient computation for the feature encoder so that its parameters will\n not be updated during training.\n \"\"\"\n warnings.warn(\n \"The method `freeze_feature_extractor` is deprecated and will be removed in Transformers v5.\"\n \"Please use the equivalent `freeze_feature_encoder` method instead.\",\n FutureWarning,\n )\n self.freeze_feature_encoder()\n\n def freeze_feature_encoder(self):\n \"\"\"\n Calling this function will disable the gradient computation for the feature encoder so that its parameter will\n not be updated during training.\n \"\"\"\n self.unispeech.feature_extractor._freeze_parameters()\n\n def freeze_base_model(self):\n \"\"\"\n Calling this function will disable the gradient computation for the base model so that its parameters will not\n be updated during training. Only the classification head will be updated.\n \"\"\"\n for param in self.unispeech.parameters():\n param.requires_grad = False\n\n @add_start_docstrings_to_model_forward(UNISPEECH_INPUTS_DOCSTRING)\n @add_code_sample_docstrings(\n processor_class=_FEAT_EXTRACTOR_FOR_DOC,\n checkpoint=_SEQ_CLASS_CHECKPOINT,\n output_type=SequenceClassifierOutput,\n config_class=_CONFIG_FOR_DOC,\n modality=\"audio\",\n expected_output=_SEQ_CLASS_EXPECTED_OUTPUT,\n expected_loss=_SEQ_CLASS_EXPECTED_LOSS,\n )\n def forward(\n self,\n input_values: Optional[torch.Tensor],\n attention_mask: Optional[torch.Tensor] = None,\n output_attentions: Optional[bool] = None,\n output_hidden_states: Optional[bool] = None,\n return_dict: Optional[bool] = None,\n labels: Optional[torch.Tensor] = None,\n ) -> Union[Tuple, SequenceClassifierOutput]:\n r\"\"\"\n labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):\n Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,\n config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If\n `config.num_labels > 1` a classification loss is computed (Cross-Entropy).\n \"\"\"\n\n return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n output_hidden_states = True if self.config.use_weighted_layer_sum else output_hidden_states\n\n outputs = self.unispeech(\n input_values,\n attention_mask=attention_mask,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n return_dict=return_dict,\n )\n\n if self.config.use_weighted_layer_sum:\n hidden_states = outputs[_HIDDEN_STATES_START_POSITION]\n hidden_states = torch.stack(hidden_states, dim=1)\n norm_weights = nn.functional.softmax(self.layer_weights, dim=-1)\n hidden_states = (hidden_states * norm_weights.view(-1, 1, 1)).sum(dim=1)\n else:\n hidden_states = outputs[0]\n\n hidden_states = self.projector(hidden_states)\n if attention_mask is None:\n pooled_output = hidden_states.mean(dim=1)\n else:\n padding_mask = self._get_feature_vector_attention_mask(hidden_states.shape[1], attention_mask)\n hidden_states[~padding_mask] = 0.0\n pooled_output = hidden_states.sum(dim=1) / padding_mask.sum(dim=1).view(-1, 1)\n\n logits = self.classifier(pooled_output)\n\n loss = None\n if labels is not None:\n loss_fct = CrossEntropyLoss()\n loss = loss_fct(logits.view(-1, self.config.num_labels), labels.view(-1))\n\n if not return_dict:\n output = (logits,) + outputs[_HIDDEN_STATES_START_POSITION:]\n return ((loss,) + output) if loss is not None else output\n\n return SequenceClassifierOutput(\n loss=loss,\n logits=logits,\n hidden_states=outputs.hidden_states,\n attentions=outputs.attentions,\n )\n",
"# coding=utf-8\n# Copyright Studio Ousia and The HuggingFace Inc. team.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"PyTorch LUKE model.\"\"\"\n\nimport math\nfrom dataclasses import dataclass\nfrom typing import Optional, Tuple, Union\n\nimport torch\nimport torch.utils.checkpoint\nfrom torch import nn\n\nfrom ...activations import ACT2FN, gelu\nfrom ...modeling_outputs import BaseModelOutput, BaseModelOutputWithPooling\nfrom ...modeling_utils import PreTrainedModel\nfrom ...pytorch_utils import apply_chunking_to_forward\nfrom ...utils import (\n ModelOutput,\n add_start_docstrings,\n add_start_docstrings_to_model_forward,\n logging,\n replace_return_docstrings,\n)\nfrom .configuration_luke import LukeConfig\n\n\nlogger = logging.get_logger(__name__)\n\n_CONFIG_FOR_DOC = \"LukeConfig\"\n_TOKENIZER_FOR_DOC = \"LukeTokenizer\"\n_CHECKPOINT_FOR_DOC = \"studio-ousia/luke-base\"\n\nLUKE_PRETRAINED_MODEL_ARCHIVE_LIST = [\n \"studio-ousia/luke-base\",\n \"studio-ousia/luke-large\",\n # See all LUKE models at https://huggingface.co/models?filter=luke\n]\n\n\n@dataclass\nclass BaseLukeModelOutputWithPooling(BaseModelOutputWithPooling):\n \"\"\"\n Base class for outputs of the LUKE model.\n\n Args:\n last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):\n Sequence of hidden-states at the output of the last layer of the model.\n entity_last_hidden_state (`torch.FloatTensor` of shape `(batch_size, entity_length, hidden_size)`):\n Sequence of entity hidden-states at the output of the last layer of the model.\n pooler_output (`torch.FloatTensor` of shape `(batch_size, hidden_size)`):\n Last layer hidden-state of the first token of the sequence (classification token) further processed by a\n Linear layer and a Tanh activation function.\n hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):\n Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of\n shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer\n plus the initial embedding outputs.\n entity_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):\n Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of\n shape `(batch_size, entity_length, hidden_size)`. Entity hidden-states of the model at the output of each\n layer plus the initial entity embedding outputs.\n attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):\n Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length +\n entity_length, sequence_length + entity_length)`. Attentions weights after the attention softmax, used to\n compute the weighted average in the self-attention heads.\n \"\"\"\n\n entity_last_hidden_state: torch.FloatTensor = None\n entity_hidden_states: Optional[Tuple[torch.FloatTensor]] = None\n\n\n@dataclass\nclass BaseLukeModelOutput(BaseModelOutput):\n \"\"\"\n Base class for model's outputs, with potential hidden states and attentions.\n\n Args:\n last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):\n Sequence of hidden-states at the output of the last layer of the model.\n entity_last_hidden_state (`torch.FloatTensor` of shape `(batch_size, entity_length, hidden_size)`):\n Sequence of entity hidden-states at the output of the last layer of the model.\n hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):\n Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of\n shape `(batch_size, sequence_length, hidden_size)`.\n\n Hidden-states of the model at the output of each layer plus the initial embedding outputs.\n entity_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):\n Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of\n shape `(batch_size, entity_length, hidden_size)`. Entity hidden-states of the model at the output of each\n layer plus the initial entity embedding outputs.\n attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):\n Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,\n sequence_length)`.\n\n Attentions weights after the attention softmax, used to compute the weighted average in the self-attention\n heads.\n \"\"\"\n\n entity_last_hidden_state: torch.FloatTensor = None\n entity_hidden_states: Optional[Tuple[torch.FloatTensor]] = None\n\n\n@dataclass\nclass LukeMaskedLMOutput(ModelOutput):\n \"\"\"\n Base class for model's outputs, with potential hidden states and attentions.\n\n Args:\n loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided):\n The sum of masked language modeling (MLM) loss and entity prediction loss.\n mlm_loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided):\n Masked language modeling (MLM) loss.\n mep_loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided):\n Masked entity prediction (MEP) loss.\n logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.vocab_size)`):\n Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).\n entity_logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.vocab_size)`):\n Prediction scores of the entity prediction head (scores for each entity vocabulary token before SoftMax).\n hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):\n Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of\n shape `(batch_size, sequence_length, hidden_size)`.\n\n Hidden-states of the model at the output of each layer plus the initial embedding outputs.\n entity_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):\n Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of\n shape `(batch_size, entity_length, hidden_size)`. Entity hidden-states of the model at the output of each\n layer plus the initial entity embedding outputs.\n attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):\n Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,\n sequence_length)`.\n\n Attentions weights after the attention softmax, used to compute the weighted average in the self-attention\n heads.\n \"\"\"\n\n loss: Optional[torch.FloatTensor] = None\n mlm_loss: Optional[torch.FloatTensor] = None\n mep_loss: Optional[torch.FloatTensor] = None\n logits: torch.FloatTensor = None\n entity_logits: torch.FloatTensor = None\n hidden_states: Optional[Tuple[torch.FloatTensor]] = None\n entity_hidden_states: Optional[Tuple[torch.FloatTensor]] = None\n attentions: Optional[Tuple[torch.FloatTensor]] = None\n\n\n@dataclass\nclass EntityClassificationOutput(ModelOutput):\n \"\"\"\n Outputs of entity classification models.\n\n Args:\n loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided):\n Classification loss.\n logits (`torch.FloatTensor` of shape `(batch_size, config.num_labels)`):\n Classification scores (before SoftMax).\n hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):\n Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of\n shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer\n plus the initial embedding outputs.\n entity_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):\n Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of\n shape `(batch_size, entity_length, hidden_size)`. Entity hidden-states of the model at the output of each\n layer plus the initial entity embedding outputs.\n attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):\n Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,\n sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in\n the self-attention heads.\n \"\"\"\n\n loss: Optional[torch.FloatTensor] = None\n logits: torch.FloatTensor = None\n hidden_states: Optional[Tuple[torch.FloatTensor]] = None\n entity_hidden_states: Optional[Tuple[torch.FloatTensor]] = None\n attentions: Optional[Tuple[torch.FloatTensor]] = None\n\n\n@dataclass\nclass EntityPairClassificationOutput(ModelOutput):\n \"\"\"\n Outputs of entity pair classification models.\n\n Args:\n loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided):\n Classification loss.\n logits (`torch.FloatTensor` of shape `(batch_size, config.num_labels)`):\n Classification scores (before SoftMax).\n hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):\n Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of\n shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer\n plus the initial embedding outputs.\n entity_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):\n Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of\n shape `(batch_size, entity_length, hidden_size)`. Entity hidden-states of the model at the output of each\n layer plus the initial entity embedding outputs.\n attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):\n Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,\n sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in\n the self-attention heads.\n \"\"\"\n\n loss: Optional[torch.FloatTensor] = None\n logits: torch.FloatTensor = None\n hidden_states: Optional[Tuple[torch.FloatTensor]] = None\n entity_hidden_states: Optional[Tuple[torch.FloatTensor]] = None\n attentions: Optional[Tuple[torch.FloatTensor]] = None\n\n\n@dataclass\nclass EntitySpanClassificationOutput(ModelOutput):\n \"\"\"\n Outputs of entity span classification models.\n\n Args:\n loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided):\n Classification loss.\n logits (`torch.FloatTensor` of shape `(batch_size, config.num_labels)`):\n Classification scores (before SoftMax).\n hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):\n Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of\n shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer\n plus the initial embedding outputs.\n entity_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):\n Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of\n shape `(batch_size, entity_length, hidden_size)`. Entity hidden-states of the model at the output of each\n layer plus the initial entity embedding outputs.\n attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):\n Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,\n sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in\n the self-attention heads.\n \"\"\"\n\n loss: Optional[torch.FloatTensor] = None\n logits: torch.FloatTensor = None\n hidden_states: Optional[Tuple[torch.FloatTensor]] = None\n entity_hidden_states: Optional[Tuple[torch.FloatTensor]] = None\n attentions: Optional[Tuple[torch.FloatTensor]] = None\n\n\nclass LukeEmbeddings(nn.Module):\n \"\"\"\n Same as BertEmbeddings with a tiny tweak for positional embeddings indexing.\n \"\"\"\n\n def __init__(self, config):\n super().__init__()\n self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=config.pad_token_id)\n self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size)\n self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size)\n\n # self.LayerNorm is not snake-cased to stick with TensorFlow model variable name and be able to load\n # any TensorFlow checkpoint file\n self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)\n self.dropout = nn.Dropout(config.hidden_dropout_prob)\n\n # End copy\n self.padding_idx = config.pad_token_id\n self.position_embeddings = nn.Embedding(\n config.max_position_embeddings, config.hidden_size, padding_idx=self.padding_idx\n )\n\n def forward(\n self,\n input_ids=None,\n token_type_ids=None,\n position_ids=None,\n inputs_embeds=None,\n ):\n if position_ids is None:\n if input_ids is not None:\n # Create the position ids from the input token ids. Any padded tokens remain padded.\n position_ids = create_position_ids_from_input_ids(input_ids, self.padding_idx).to(input_ids.device)\n else:\n position_ids = self.create_position_ids_from_inputs_embeds(inputs_embeds)\n\n if input_ids is not None:\n input_shape = input_ids.size()\n else:\n input_shape = inputs_embeds.size()[:-1]\n\n if token_type_ids is None:\n token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=self.position_ids.device)\n\n if inputs_embeds is None:\n inputs_embeds = self.word_embeddings(input_ids)\n\n position_embeddings = self.position_embeddings(position_ids)\n token_type_embeddings = self.token_type_embeddings(token_type_ids)\n\n embeddings = inputs_embeds + position_embeddings + token_type_embeddings\n embeddings = self.LayerNorm(embeddings)\n embeddings = self.dropout(embeddings)\n return embeddings\n\n def create_position_ids_from_inputs_embeds(self, inputs_embeds):\n \"\"\"\n We are provided embeddings directly. We cannot infer which are padded so just generate sequential position ids.\n\n Args:\n inputs_embeds: torch.Tensor\n\n Returns: torch.Tensor\n \"\"\"\n input_shape = inputs_embeds.size()[:-1]\n sequence_length = input_shape[1]\n\n position_ids = torch.arange(\n self.padding_idx + 1, sequence_length + self.padding_idx + 1, dtype=torch.long, device=inputs_embeds.device\n )\n return position_ids.unsqueeze(0).expand(input_shape)\n\n\nclass LukeEntityEmbeddings(nn.Module):\n def __init__(self, config: LukeConfig):\n super().__init__()\n self.config = config\n\n self.entity_embeddings = nn.Embedding(config.entity_vocab_size, config.entity_emb_size, padding_idx=0)\n if config.entity_emb_size != config.hidden_size:\n self.entity_embedding_dense = nn.Linear(config.entity_emb_size, config.hidden_size, bias=False)\n\n self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size)\n self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size)\n\n self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)\n self.dropout = nn.Dropout(config.hidden_dropout_prob)\n\n def forward(\n self, entity_ids: torch.LongTensor, position_ids: torch.LongTensor, token_type_ids: torch.LongTensor = None\n ):\n if token_type_ids is None:\n token_type_ids = torch.zeros_like(entity_ids)\n\n entity_embeddings = self.entity_embeddings(entity_ids)\n if self.config.entity_emb_size != self.config.hidden_size:\n entity_embeddings = self.entity_embedding_dense(entity_embeddings)\n\n position_embeddings = self.position_embeddings(position_ids.clamp(min=0))\n position_embedding_mask = (position_ids != -1).type_as(position_embeddings).unsqueeze(-1)\n position_embeddings = position_embeddings * position_embedding_mask\n position_embeddings = torch.sum(position_embeddings, dim=-2)\n position_embeddings = position_embeddings / position_embedding_mask.sum(dim=-2).clamp(min=1e-7)\n\n token_type_embeddings = self.token_type_embeddings(token_type_ids)\n\n embeddings = entity_embeddings + position_embeddings + token_type_embeddings\n embeddings = self.LayerNorm(embeddings)\n embeddings = self.dropout(embeddings)\n\n return embeddings\n\n\nclass LukeSelfAttention(nn.Module):\n def __init__(self, config):\n super().__init__()\n if config.hidden_size % config.num_attention_heads != 0 and not hasattr(config, \"embedding_size\"):\n raise ValueError(\n f\"The hidden size {config.hidden_size,} is not a multiple of the number of attention \"\n f\"heads {config.num_attention_heads}.\"\n )\n\n self.num_attention_heads = config.num_attention_heads\n self.attention_head_size = int(config.hidden_size / config.num_attention_heads)\n self.all_head_size = self.num_attention_heads * self.attention_head_size\n self.use_entity_aware_attention = config.use_entity_aware_attention\n\n self.query = nn.Linear(config.hidden_size, self.all_head_size)\n self.key = nn.Linear(config.hidden_size, self.all_head_size)\n self.value = nn.Linear(config.hidden_size, self.all_head_size)\n\n if self.use_entity_aware_attention:\n self.w2e_query = nn.Linear(config.hidden_size, self.all_head_size)\n self.e2w_query = nn.Linear(config.hidden_size, self.all_head_size)\n self.e2e_query = nn.Linear(config.hidden_size, self.all_head_size)\n\n self.dropout = nn.Dropout(config.attention_probs_dropout_prob)\n\n def transpose_for_scores(self, x):\n new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size)\n x = x.view(*new_x_shape)\n return x.permute(0, 2, 1, 3)\n\n def forward(\n self,\n word_hidden_states,\n entity_hidden_states,\n attention_mask=None,\n head_mask=None,\n output_attentions=False,\n ):\n word_size = word_hidden_states.size(1)\n\n if entity_hidden_states is None:\n concat_hidden_states = word_hidden_states\n else:\n concat_hidden_states = torch.cat([word_hidden_states, entity_hidden_states], dim=1)\n\n key_layer = self.transpose_for_scores(self.key(concat_hidden_states))\n value_layer = self.transpose_for_scores(self.value(concat_hidden_states))\n\n if self.use_entity_aware_attention and entity_hidden_states is not None:\n # compute query vectors using word-word (w2w), word-entity (w2e), entity-word (e2w), entity-entity (e2e)\n # query layers\n w2w_query_layer = self.transpose_for_scores(self.query(word_hidden_states))\n w2e_query_layer = self.transpose_for_scores(self.w2e_query(word_hidden_states))\n e2w_query_layer = self.transpose_for_scores(self.e2w_query(entity_hidden_states))\n e2e_query_layer = self.transpose_for_scores(self.e2e_query(entity_hidden_states))\n\n # compute w2w, w2e, e2w, and e2e key vectors used with the query vectors computed above\n w2w_key_layer = key_layer[:, :, :word_size, :]\n e2w_key_layer = key_layer[:, :, :word_size, :]\n w2e_key_layer = key_layer[:, :, word_size:, :]\n e2e_key_layer = key_layer[:, :, word_size:, :]\n\n # compute attention scores based on the dot product between the query and key vectors\n w2w_attention_scores = torch.matmul(w2w_query_layer, w2w_key_layer.transpose(-1, -2))\n w2e_attention_scores = torch.matmul(w2e_query_layer, w2e_key_layer.transpose(-1, -2))\n e2w_attention_scores = torch.matmul(e2w_query_layer, e2w_key_layer.transpose(-1, -2))\n e2e_attention_scores = torch.matmul(e2e_query_layer, e2e_key_layer.transpose(-1, -2))\n\n # combine attention scores to create the final attention score matrix\n word_attention_scores = torch.cat([w2w_attention_scores, w2e_attention_scores], dim=3)\n entity_attention_scores = torch.cat([e2w_attention_scores, e2e_attention_scores], dim=3)\n attention_scores = torch.cat([word_attention_scores, entity_attention_scores], dim=2)\n\n else:\n query_layer = self.transpose_for_scores(self.query(concat_hidden_states))\n attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2))\n\n attention_scores = attention_scores / math.sqrt(self.attention_head_size)\n if attention_mask is not None:\n # Apply the attention mask is (precomputed for all layers in LukeModel forward() function)\n attention_scores = attention_scores + attention_mask\n\n # Normalize the attention scores to probabilities.\n attention_probs = nn.functional.softmax(attention_scores, dim=-1)\n\n # This is actually dropping out entire tokens to attend to, which might\n # seem a bit unusual, but is taken from the original Transformer paper.\n attention_probs = self.dropout(attention_probs)\n\n # Mask heads if we want to\n if head_mask is not None:\n attention_probs = attention_probs * head_mask\n\n context_layer = torch.matmul(attention_probs, value_layer)\n\n context_layer = context_layer.permute(0, 2, 1, 3).contiguous()\n new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,)\n context_layer = context_layer.view(*new_context_layer_shape)\n\n output_word_hidden_states = context_layer[:, :word_size, :]\n if entity_hidden_states is None:\n output_entity_hidden_states = None\n else:\n output_entity_hidden_states = context_layer[:, word_size:, :]\n\n if output_attentions:\n outputs = (output_word_hidden_states, output_entity_hidden_states, attention_probs)\n else:\n outputs = (output_word_hidden_states, output_entity_hidden_states)\n\n return outputs\n\n\n# Copied from transformers.models.bert.modeling_bert.BertSelfOutput\nclass LukeSelfOutput(nn.Module):\n def __init__(self, config):\n super().__init__()\n self.dense = nn.Linear(config.hidden_size, config.hidden_size)\n self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)\n self.dropout = nn.Dropout(config.hidden_dropout_prob)\n\n def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor:\n hidden_states = self.dense(hidden_states)\n hidden_states = self.dropout(hidden_states)\n hidden_states = self.LayerNorm(hidden_states + input_tensor)\n return hidden_states\n\n\nclass LukeAttention(nn.Module):\n def __init__(self, config):\n super().__init__()\n self.self = LukeSelfAttention(config)\n self.output = LukeSelfOutput(config)\n self.pruned_heads = set()\n\n def prune_heads(self, heads):\n raise NotImplementedError(\"LUKE does not support the pruning of attention heads\")\n\n def forward(\n self,\n word_hidden_states,\n entity_hidden_states,\n attention_mask=None,\n head_mask=None,\n output_attentions=False,\n ):\n word_size = word_hidden_states.size(1)\n self_outputs = self.self(\n word_hidden_states,\n entity_hidden_states,\n attention_mask,\n head_mask,\n output_attentions,\n )\n if entity_hidden_states is None:\n concat_self_outputs = self_outputs[0]\n concat_hidden_states = word_hidden_states\n else:\n concat_self_outputs = torch.cat(self_outputs[:2], dim=1)\n concat_hidden_states = torch.cat([word_hidden_states, entity_hidden_states], dim=1)\n\n attention_output = self.output(concat_self_outputs, concat_hidden_states)\n\n word_attention_output = attention_output[:, :word_size, :]\n if entity_hidden_states is None:\n entity_attention_output = None\n else:\n entity_attention_output = attention_output[:, word_size:, :]\n\n # add attentions if we output them\n outputs = (word_attention_output, entity_attention_output) + self_outputs[2:]\n\n return outputs\n\n\n# Copied from transformers.models.bert.modeling_bert.BertIntermediate\nclass LukeIntermediate(nn.Module):\n def __init__(self, config):\n super().__init__()\n self.dense = nn.Linear(config.hidden_size, config.intermediate_size)\n if isinstance(config.hidden_act, str):\n self.intermediate_act_fn = ACT2FN[config.hidden_act]\n else:\n self.intermediate_act_fn = config.hidden_act\n\n def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:\n hidden_states = self.dense(hidden_states)\n hidden_states = self.intermediate_act_fn(hidden_states)\n return hidden_states\n\n\n# Copied from transformers.models.bert.modeling_bert.BertOutput\nclass LukeOutput(nn.Module):\n def __init__(self, config):\n super().__init__()\n self.dense = nn.Linear(config.intermediate_size, config.hidden_size)\n self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)\n self.dropout = nn.Dropout(config.hidden_dropout_prob)\n\n def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor:\n hidden_states = self.dense(hidden_states)\n hidden_states = self.dropout(hidden_states)\n hidden_states = self.LayerNorm(hidden_states + input_tensor)\n return hidden_states\n\n\nclass LukeLayer(nn.Module):\n def __init__(self, config):\n super().__init__()\n self.chunk_size_feed_forward = config.chunk_size_feed_forward\n self.seq_len_dim = 1\n self.attention = LukeAttention(config)\n self.intermediate = LukeIntermediate(config)\n self.output = LukeOutput(config)\n\n def forward(\n self,\n word_hidden_states,\n entity_hidden_states,\n attention_mask=None,\n head_mask=None,\n output_attentions=False,\n ):\n word_size = word_hidden_states.size(1)\n\n self_attention_outputs = self.attention(\n word_hidden_states,\n entity_hidden_states,\n attention_mask,\n head_mask,\n output_attentions=output_attentions,\n )\n if entity_hidden_states is None:\n concat_attention_output = self_attention_outputs[0]\n else:\n concat_attention_output = torch.cat(self_attention_outputs[:2], dim=1)\n\n outputs = self_attention_outputs[2:] # add self attentions if we output attention weights\n\n layer_output = apply_chunking_to_forward(\n self.feed_forward_chunk, self.chunk_size_feed_forward, self.seq_len_dim, concat_attention_output\n )\n word_layer_output = layer_output[:, :word_size, :]\n if entity_hidden_states is None:\n entity_layer_output = None\n else:\n entity_layer_output = layer_output[:, word_size:, :]\n\n outputs = (word_layer_output, entity_layer_output) + outputs\n\n return outputs\n\n def feed_forward_chunk(self, attention_output):\n intermediate_output = self.intermediate(attention_output)\n layer_output = self.output(intermediate_output, attention_output)\n return layer_output\n\n\nclass LukeEncoder(nn.Module):\n def __init__(self, config):\n super().__init__()\n self.config = config\n self.layer = nn.ModuleList([LukeLayer(config) for _ in range(config.num_hidden_layers)])\n self.gradient_checkpointing = False\n\n def forward(\n self,\n word_hidden_states,\n entity_hidden_states,\n attention_mask=None,\n head_mask=None,\n output_attentions=False,\n output_hidden_states=False,\n return_dict=True,\n ):\n all_word_hidden_states = () if output_hidden_states else None\n all_entity_hidden_states = () if output_hidden_states else None\n all_self_attentions = () if output_attentions else None\n\n for i, layer_module in enumerate(self.layer):\n if output_hidden_states:\n all_word_hidden_states = all_word_hidden_states + (word_hidden_states,)\n all_entity_hidden_states = all_entity_hidden_states + (entity_hidden_states,)\n\n layer_head_mask = head_mask[i] if head_mask is not None else None\n if self.gradient_checkpointing and self.training:\n\n def create_custom_forward(module):\n def custom_forward(*inputs):\n return module(*inputs, output_attentions)\n\n return custom_forward\n\n layer_outputs = torch.utils.checkpoint.checkpoint(\n create_custom_forward(layer_module),\n word_hidden_states,\n entity_hidden_states,\n attention_mask,\n layer_head_mask,\n )\n else:\n layer_outputs = layer_module(\n word_hidden_states,\n entity_hidden_states,\n attention_mask,\n layer_head_mask,\n output_attentions,\n )\n\n word_hidden_states = layer_outputs[0]\n\n if entity_hidden_states is not None:\n entity_hidden_states = layer_outputs[1]\n\n if output_attentions:\n all_self_attentions = all_self_attentions + (layer_outputs[2],)\n\n if output_hidden_states:\n all_word_hidden_states = all_word_hidden_states + (word_hidden_states,)\n all_entity_hidden_states = all_entity_hidden_states + (entity_hidden_states,)\n\n if not return_dict:\n return tuple(\n v\n for v in [\n word_hidden_states,\n all_word_hidden_states,\n all_self_attentions,\n entity_hidden_states,\n all_entity_hidden_states,\n ]\n if v is not None\n )\n return BaseLukeModelOutput(\n last_hidden_state=word_hidden_states,\n hidden_states=all_word_hidden_states,\n attentions=all_self_attentions,\n entity_last_hidden_state=entity_hidden_states,\n entity_hidden_states=all_entity_hidden_states,\n )\n\n\n# Copied from transformers.models.bert.modeling_bert.BertPooler\nclass LukePooler(nn.Module):\n def __init__(self, config):\n super().__init__()\n self.dense = nn.Linear(config.hidden_size, config.hidden_size)\n self.activation = nn.Tanh()\n\n def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:\n # We \"pool\" the model by simply taking the hidden state corresponding\n # to the first token.\n first_token_tensor = hidden_states[:, 0]\n pooled_output = self.dense(first_token_tensor)\n pooled_output = self.activation(pooled_output)\n return pooled_output\n\n\nclass EntityPredictionHeadTransform(nn.Module):\n def __init__(self, config):\n super().__init__()\n self.dense = nn.Linear(config.hidden_size, config.entity_emb_size)\n if isinstance(config.hidden_act, str):\n self.transform_act_fn = ACT2FN[config.hidden_act]\n else:\n self.transform_act_fn = config.hidden_act\n self.LayerNorm = nn.LayerNorm(config.entity_emb_size, eps=config.layer_norm_eps)\n\n def forward(self, hidden_states):\n hidden_states = self.dense(hidden_states)\n hidden_states = self.transform_act_fn(hidden_states)\n hidden_states = self.LayerNorm(hidden_states)\n return hidden_states\n\n\nclass EntityPredictionHead(nn.Module):\n def __init__(self, config):\n super().__init__()\n self.config = config\n self.transform = EntityPredictionHeadTransform(config)\n self.decoder = nn.Linear(config.entity_emb_size, config.entity_vocab_size, bias=False)\n self.bias = nn.Parameter(torch.zeros(config.entity_vocab_size))\n\n def forward(self, hidden_states):\n hidden_states = self.transform(hidden_states)\n hidden_states = self.decoder(hidden_states) + self.bias\n\n return hidden_states\n\n\nclass LukePreTrainedModel(PreTrainedModel):\n \"\"\"\n An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained\n models.\n \"\"\"\n\n config_class = LukeConfig\n base_model_prefix = \"luke\"\n supports_gradient_checkpointing = True\n\n def _init_weights(self, module: nn.Module):\n \"\"\"Initialize the weights\"\"\"\n if isinstance(module, nn.Linear):\n module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)\n if module.bias is not None:\n module.bias.data.zero_()\n elif isinstance(module, nn.Embedding):\n if module.embedding_dim == 1: # embedding for bias parameters\n module.weight.data.zero_()\n else:\n module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)\n if module.padding_idx is not None:\n module.weight.data[module.padding_idx].zero_()\n elif isinstance(module, nn.LayerNorm):\n module.bias.data.zero_()\n module.weight.data.fill_(1.0)\n\n def _set_gradient_checkpointing(self, module, value=False):\n if isinstance(module, LukeEncoder):\n module.gradient_checkpointing = value\n\n\nLUKE_START_DOCSTRING = r\"\"\"\n\n This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the\n library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads\n etc.)\n\n This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.\n Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage\n and behavior.\n\n Parameters:\n config ([`LukeConfig`]): Model configuration class with all the parameters of the\n model. Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n\"\"\"\n\nLUKE_INPUTS_DOCSTRING = r\"\"\"\n Args:\n input_ids (`torch.LongTensor` of shape `({0})`):\n Indices of input sequence tokens in the vocabulary.\n\n Indices can be obtained using [`LukeTokenizer`]. See [`PreTrainedTokenizer.encode`] and\n [`PreTrainedTokenizer.__call__`] for details.\n\n [What are input IDs?](../glossary#input-ids)\n attention_mask (`torch.FloatTensor` of shape `({0})`, *optional*):\n Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:\n\n - 1 for tokens that are **not masked**,\n - 0 for tokens that are **masked**.\n\n [What are attention masks?](../glossary#attention-mask)\n token_type_ids (`torch.LongTensor` of shape `({0})`, *optional*):\n Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0,\n 1]`:\n\n - 0 corresponds to a *sentence A* token,\n - 1 corresponds to a *sentence B* token.\n\n [What are token type IDs?](../glossary#token-type-ids)\n position_ids (`torch.LongTensor` of shape `({0})`, *optional*):\n Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,\n config.max_position_embeddings - 1]`.\n\n [What are position IDs?](../glossary#position-ids)\n\n entity_ids (`torch.LongTensor` of shape `(batch_size, entity_length)`):\n Indices of entity tokens in the entity vocabulary.\n\n Indices can be obtained using [`LukeTokenizer`]. See [`PreTrainedTokenizer.encode`] and\n [`PreTrainedTokenizer.__call__`] for details.\n\n entity_attention_mask (`torch.FloatTensor` of shape `(batch_size, entity_length)`, *optional*):\n Mask to avoid performing attention on padding entity token indices. Mask values selected in `[0, 1]`:\n\n - 1 for entity tokens that are **not masked**,\n - 0 for entity tokens that are **masked**.\n\n entity_token_type_ids (`torch.LongTensor` of shape `(batch_size, entity_length)`, *optional*):\n Segment token indices to indicate first and second portions of the entity token inputs. Indices are\n selected in `[0, 1]`:\n\n - 0 corresponds to a *portion A* entity token,\n - 1 corresponds to a *portion B* entity token.\n\n entity_position_ids (`torch.LongTensor` of shape `(batch_size, entity_length, max_mention_length)`, *optional*):\n Indices of positions of each input entity in the position embeddings. Selected in the range `[0,\n config.max_position_embeddings - 1]`.\n\n inputs_embeds (`torch.FloatTensor` of shape `({0}, hidden_size)`, *optional*):\n Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This\n is useful if you want more control over how to convert `input_ids` indices into associated vectors than the\n model's internal embedding lookup matrix.\n\n head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):\n Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`:\n\n - 1 indicates the head is **not masked**,\n - 0 indicates the head is **masked**.\n\n output_attentions (`bool`, *optional*):\n Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned\n tensors for more detail.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n\"\"\"\n\n\n@add_start_docstrings(\n \"The bare LUKE model transformer outputting raw hidden-states for both word tokens and entities without any\"\n \" specific head on top.\",\n LUKE_START_DOCSTRING,\n)\nclass LukeModel(LukePreTrainedModel):\n\n _keys_to_ignore_on_load_missing = [r\"position_ids\"]\n\n def __init__(self, config: LukeConfig, add_pooling_layer: bool = True):\n super().__init__(config)\n self.config = config\n\n self.embeddings = LukeEmbeddings(config)\n self.entity_embeddings = LukeEntityEmbeddings(config)\n self.encoder = LukeEncoder(config)\n\n self.pooler = LukePooler(config) if add_pooling_layer else None\n\n # Initialize weights and apply final processing\n self.post_init()\n\n def get_input_embeddings(self):\n return self.embeddings.word_embeddings\n\n def set_input_embeddings(self, value):\n self.embeddings.word_embeddings = value\n\n def get_entity_embeddings(self):\n return self.entity_embeddings.entity_embeddings\n\n def set_entity_embeddings(self, value):\n self.entity_embeddings.entity_embeddings = value\n\n def _prune_heads(self, heads_to_prune):\n raise NotImplementedError(\"LUKE does not support the pruning of attention heads\")\n\n @add_start_docstrings_to_model_forward(LUKE_INPUTS_DOCSTRING.format(\"batch_size, sequence_length\"))\n @replace_return_docstrings(output_type=BaseLukeModelOutputWithPooling, config_class=_CONFIG_FOR_DOC)\n def forward(\n self,\n input_ids: Optional[torch.LongTensor] = None,\n attention_mask: Optional[torch.FloatTensor] = None,\n token_type_ids: Optional[torch.LongTensor] = None,\n position_ids: Optional[torch.LongTensor] = None,\n entity_ids: Optional[torch.LongTensor] = None,\n entity_attention_mask: Optional[torch.FloatTensor] = None,\n entity_token_type_ids: Optional[torch.LongTensor] = None,\n entity_position_ids: Optional[torch.LongTensor] = None,\n head_mask: Optional[torch.FloatTensor] = None,\n inputs_embeds: Optional[torch.FloatTensor] = None,\n output_attentions: Optional[bool] = None,\n output_hidden_states: Optional[bool] = None,\n return_dict: Optional[bool] = None,\n ) -> Union[Tuple, BaseLukeModelOutputWithPooling]:\n r\"\"\"\n\n Returns:\n\n Examples:\n\n ```python\n >>> from transformers import LukeTokenizer, LukeModel\n\n >>> tokenizer = LukeTokenizer.from_pretrained(\"studio-ousia/luke-base\")\n >>> model = LukeModel.from_pretrained(\"studio-ousia/luke-base\")\n # Compute the contextualized entity representation corresponding to the entity mention \"Beyoncé\"\n\n >>> text = \"Beyoncé lives in Los Angeles.\"\n >>> entity_spans = [(0, 7)] # character-based entity span corresponding to \"Beyoncé\"\n\n >>> encoding = tokenizer(text, entity_spans=entity_spans, add_prefix_space=True, return_tensors=\"pt\")\n >>> outputs = model(**encoding)\n >>> word_last_hidden_state = outputs.last_hidden_state\n >>> entity_last_hidden_state = outputs.entity_last_hidden_state\n # Input Wikipedia entities to obtain enriched contextualized representations of word tokens\n\n >>> text = \"Beyoncé lives in Los Angeles.\"\n >>> entities = [\n ... \"Beyoncé\",\n ... \"Los Angeles\",\n ... ] # Wikipedia entity titles corresponding to the entity mentions \"Beyoncé\" and \"Los Angeles\"\n >>> entity_spans = [\n ... (0, 7),\n ... (17, 28),\n ... ] # character-based entity spans corresponding to \"Beyoncé\" and \"Los Angeles\"\n\n >>> encoding = tokenizer(\n ... text, entities=entities, entity_spans=entity_spans, add_prefix_space=True, return_tensors=\"pt\"\n ... )\n >>> outputs = model(**encoding)\n >>> word_last_hidden_state = outputs.last_hidden_state\n >>> entity_last_hidden_state = outputs.entity_last_hidden_state\n ```\"\"\"\n output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions\n output_hidden_states = (\n output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states\n )\n return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n\n if input_ids is not None and inputs_embeds is not None:\n raise ValueError(\"You cannot specify both input_ids and inputs_embeds at the same time\")\n elif input_ids is not None:\n input_shape = input_ids.size()\n elif inputs_embeds is not None:\n input_shape = inputs_embeds.size()[:-1]\n else:\n raise ValueError(\"You have to specify either input_ids or inputs_embeds\")\n\n batch_size, seq_length = input_shape\n device = input_ids.device if input_ids is not None else inputs_embeds.device\n\n if attention_mask is None:\n attention_mask = torch.ones((batch_size, seq_length), device=device)\n if token_type_ids is None:\n token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device)\n if entity_ids is not None:\n entity_seq_length = entity_ids.size(1)\n if entity_attention_mask is None:\n entity_attention_mask = torch.ones((batch_size, entity_seq_length), device=device)\n if entity_token_type_ids is None:\n entity_token_type_ids = torch.zeros((batch_size, entity_seq_length), dtype=torch.long, device=device)\n\n # Prepare head mask if needed\n # 1.0 in head_mask indicate we keep the head\n # attention_probs has shape bsz x n_heads x N x N\n # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]\n # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]\n head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers)\n\n # First, compute word embeddings\n word_embedding_output = self.embeddings(\n input_ids=input_ids,\n position_ids=position_ids,\n token_type_ids=token_type_ids,\n inputs_embeds=inputs_embeds,\n )\n\n # Second, compute extended attention mask\n extended_attention_mask = self.get_extended_attention_mask(attention_mask, entity_attention_mask)\n\n # Third, compute entity embeddings and concatenate with word embeddings\n if entity_ids is None:\n entity_embedding_output = None\n else:\n entity_embedding_output = self.entity_embeddings(entity_ids, entity_position_ids, entity_token_type_ids)\n\n # Fourth, send embeddings through the model\n encoder_outputs = self.encoder(\n word_embedding_output,\n entity_embedding_output,\n attention_mask=extended_attention_mask,\n head_mask=head_mask,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n return_dict=return_dict,\n )\n\n # Fifth, get the output. LukeModel outputs the same as BertModel, namely sequence_output of shape (batch_size, seq_len, hidden_size)\n sequence_output = encoder_outputs[0]\n\n # Sixth, we compute the pooled_output, word_sequence_output and entity_sequence_output based on the sequence_output\n pooled_output = self.pooler(sequence_output) if self.pooler is not None else None\n\n if not return_dict:\n return (sequence_output, pooled_output) + encoder_outputs[1:]\n\n return BaseLukeModelOutputWithPooling(\n last_hidden_state=sequence_output,\n pooler_output=pooled_output,\n hidden_states=encoder_outputs.hidden_states,\n attentions=encoder_outputs.attentions,\n entity_last_hidden_state=encoder_outputs.entity_last_hidden_state,\n entity_hidden_states=encoder_outputs.entity_hidden_states,\n )\n\n def get_extended_attention_mask(\n self, word_attention_mask: torch.LongTensor, entity_attention_mask: Optional[torch.LongTensor]\n ):\n \"\"\"\n Makes broadcastable attention and causal masks so that future and masked tokens are ignored.\n\n Arguments:\n word_attention_mask (`torch.LongTensor`):\n Attention mask for word tokens with ones indicating tokens to attend to, zeros for tokens to ignore.\n entity_attention_mask (`torch.LongTensor`, *optional*):\n Attention mask for entity tokens with ones indicating tokens to attend to, zeros for tokens to ignore.\n\n Returns:\n `torch.Tensor` The extended attention mask, with a the same dtype as `attention_mask.dtype`.\n \"\"\"\n attention_mask = word_attention_mask\n if entity_attention_mask is not None:\n attention_mask = torch.cat([attention_mask, entity_attention_mask], dim=-1)\n\n if attention_mask.dim() == 3:\n extended_attention_mask = attention_mask[:, None, :, :]\n elif attention_mask.dim() == 2:\n extended_attention_mask = attention_mask[:, None, None, :]\n else:\n raise ValueError(f\"Wrong shape for attention_mask (shape {attention_mask.shape})\")\n\n extended_attention_mask = extended_attention_mask.to(dtype=self.dtype) # fp16 compatibility\n extended_attention_mask = (1.0 - extended_attention_mask) * -10000.0\n return extended_attention_mask\n\n\ndef create_position_ids_from_input_ids(input_ids, padding_idx):\n \"\"\"\n Replace non-padding symbols with their position numbers. Position numbers begin at padding_idx+1. Padding symbols\n are ignored. This is modified from fairseq's `utils.make_positions`.\n\n Args:\n x: torch.Tensor x:\n\n Returns: torch.Tensor\n \"\"\"\n # The series of casts and type-conversions here are carefully balanced to both work with ONNX export and XLA.\n mask = input_ids.ne(padding_idx).int()\n incremental_indices = (torch.cumsum(mask, dim=1).type_as(mask)) * mask\n return incremental_indices.long() + padding_idx\n\n\n# Copied from transformers.models.roberta.modeling_roberta.RobertaLMHead\nclass LukeLMHead(nn.Module):\n \"\"\"Roberta Head for masked language modeling.\"\"\"\n\n def __init__(self, config):\n super().__init__()\n self.dense = nn.Linear(config.hidden_size, config.hidden_size)\n self.layer_norm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)\n\n self.decoder = nn.Linear(config.hidden_size, config.vocab_size)\n self.bias = nn.Parameter(torch.zeros(config.vocab_size))\n self.decoder.bias = self.bias\n\n def forward(self, features, **kwargs):\n x = self.dense(features)\n x = gelu(x)\n x = self.layer_norm(x)\n\n # project back to size of vocabulary with bias\n x = self.decoder(x)\n\n return x\n\n def _tie_weights(self):\n # To tie those two weights if they get disconnected (on TPU or when the bias is resized)\n self.bias = self.decoder.bias\n\n\n@add_start_docstrings(\n \"\"\"\n The LUKE model with a language modeling head and entity prediction head on top for masked language modeling and\n masked entity prediction.\n \"\"\",\n LUKE_START_DOCSTRING,\n)\nclass LukeForMaskedLM(LukePreTrainedModel):\n _keys_to_ignore_on_save = [\n r\"lm_head.decoder.weight\",\n r\"lm_head.decoder.bias\",\n r\"entity_predictions.decoder.weight\",\n ]\n _keys_to_ignore_on_load_missing = [\n r\"position_ids\",\n r\"lm_head.decoder.weight\",\n r\"lm_head.decoder.bias\",\n r\"entity_predictions.decoder.weight\",\n ]\n\n def __init__(self, config):\n super().__init__(config)\n\n self.luke = LukeModel(config)\n\n self.lm_head = LukeLMHead(config)\n self.entity_predictions = EntityPredictionHead(config)\n\n self.loss_fn = nn.CrossEntropyLoss(ignore_index=-1)\n\n # Initialize weights and apply final processing\n self.post_init()\n\n def tie_weights(self):\n super().tie_weights()\n self._tie_or_clone_weights(self.entity_predictions.decoder, self.luke.entity_embeddings.entity_embeddings)\n\n def get_output_embeddings(self):\n return self.lm_head.decoder\n\n def set_output_embeddings(self, new_embeddings):\n self.lm_head.decoder = new_embeddings\n\n @add_start_docstrings_to_model_forward(LUKE_INPUTS_DOCSTRING.format(\"batch_size, sequence_length\"))\n @replace_return_docstrings(output_type=LukeMaskedLMOutput, config_class=_CONFIG_FOR_DOC)\n def forward(\n self,\n input_ids: Optional[torch.LongTensor] = None,\n attention_mask: Optional[torch.FloatTensor] = None,\n token_type_ids: Optional[torch.LongTensor] = None,\n position_ids: Optional[torch.LongTensor] = None,\n entity_ids: Optional[torch.LongTensor] = None,\n entity_attention_mask: Optional[torch.LongTensor] = None,\n entity_token_type_ids: Optional[torch.LongTensor] = None,\n entity_position_ids: Optional[torch.LongTensor] = None,\n labels: Optional[torch.LongTensor] = None,\n entity_labels: Optional[torch.LongTensor] = None,\n head_mask: Optional[torch.FloatTensor] = None,\n inputs_embeds: Optional[torch.FloatTensor] = None,\n output_attentions: Optional[bool] = None,\n output_hidden_states: Optional[bool] = None,\n return_dict: Optional[bool] = None,\n ) -> Union[Tuple, LukeMaskedLMOutput]:\n r\"\"\"\n labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):\n Labels for computing the masked language modeling loss. Indices should be in `[-100, 0, ...,\n config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked), the\n loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`\n entity_labels (`torch.LongTensor` of shape `(batch_size, entity_length)`, *optional*):\n Labels for computing the masked language modeling loss. Indices should be in `[-100, 0, ...,\n config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked), the\n loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`\n\n Returns:\n\n \"\"\"\n\n return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n\n outputs = self.luke(\n input_ids=input_ids,\n attention_mask=attention_mask,\n token_type_ids=token_type_ids,\n position_ids=position_ids,\n entity_ids=entity_ids,\n entity_attention_mask=entity_attention_mask,\n entity_token_type_ids=entity_token_type_ids,\n entity_position_ids=entity_position_ids,\n head_mask=head_mask,\n inputs_embeds=inputs_embeds,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n return_dict=True,\n )\n\n loss = None\n\n mlm_loss = None\n logits = self.lm_head(outputs.last_hidden_state)\n if labels is not None:\n mlm_loss = self.loss_fn(logits.view(-1, self.config.vocab_size), labels.view(-1))\n if loss is None:\n loss = mlm_loss\n\n mep_loss = None\n entity_logits = self.entity_predictions(outputs.entity_last_hidden_state)\n if entity_labels is not None:\n mep_loss = self.loss_fn(entity_logits.view(-1, self.config.entity_vocab_size), entity_labels.view(-1))\n if loss is None:\n loss = mep_loss\n else:\n loss = loss + mep_loss\n\n if not return_dict:\n output = (logits, entity_logits, outputs.hidden_states, outputs.entity_hidden_states, outputs.attentions)\n if mlm_loss is not None and mep_loss is not None:\n return (loss, mlm_loss, mep_loss) + output\n elif mlm_loss is not None:\n return (loss, mlm_loss) + output\n elif mep_loss is not None:\n return (loss, mep_loss) + output\n else:\n return output\n\n return LukeMaskedLMOutput(\n loss=loss,\n mlm_loss=mlm_loss,\n mep_loss=mep_loss,\n logits=logits,\n entity_logits=entity_logits,\n hidden_states=outputs.hidden_states,\n entity_hidden_states=outputs.entity_hidden_states,\n attentions=outputs.attentions,\n )\n\n\n@add_start_docstrings(\n \"\"\"\n The LUKE model with a classification head on top (a linear layer on top of the hidden state of the first entity\n token) for entity classification tasks, such as Open Entity.\n \"\"\",\n LUKE_START_DOCSTRING,\n)\nclass LukeForEntityClassification(LukePreTrainedModel):\n def __init__(self, config):\n super().__init__(config)\n\n self.luke = LukeModel(config)\n\n self.num_labels = config.num_labels\n self.dropout = nn.Dropout(config.hidden_dropout_prob)\n self.classifier = nn.Linear(config.hidden_size, config.num_labels)\n\n # Initialize weights and apply final processing\n self.post_init()\n\n @add_start_docstrings_to_model_forward(LUKE_INPUTS_DOCSTRING.format(\"batch_size, sequence_length\"))\n @replace_return_docstrings(output_type=EntityClassificationOutput, config_class=_CONFIG_FOR_DOC)\n def forward(\n self,\n input_ids: Optional[torch.LongTensor] = None,\n attention_mask: Optional[torch.FloatTensor] = None,\n token_type_ids: Optional[torch.LongTensor] = None,\n position_ids: Optional[torch.LongTensor] = None,\n entity_ids: Optional[torch.LongTensor] = None,\n entity_attention_mask: Optional[torch.FloatTensor] = None,\n entity_token_type_ids: Optional[torch.LongTensor] = None,\n entity_position_ids: Optional[torch.LongTensor] = None,\n head_mask: Optional[torch.FloatTensor] = None,\n inputs_embeds: Optional[torch.FloatTensor] = None,\n labels: Optional[torch.FloatTensor] = None,\n output_attentions: Optional[bool] = None,\n output_hidden_states: Optional[bool] = None,\n return_dict: Optional[bool] = None,\n ) -> Union[Tuple, EntityClassificationOutput]:\n r\"\"\"\n labels (`torch.LongTensor` of shape `(batch_size,)` or `(batch_size, num_labels)`, *optional*):\n Labels for computing the classification loss. If the shape is `(batch_size,)`, the cross entropy loss is\n used for the single-label classification. In this case, labels should contain the indices that should be in\n `[0, ..., config.num_labels - 1]`. If the shape is `(batch_size, num_labels)`, the binary cross entropy\n loss is used for the multi-label classification. In this case, labels should only contain `[0, 1]`, where 0\n and 1 indicate false and true, respectively.\n\n Returns:\n\n Examples:\n\n ```python\n >>> from transformers import LukeTokenizer, LukeForEntityClassification\n\n >>> tokenizer = LukeTokenizer.from_pretrained(\"studio-ousia/luke-large-finetuned-open-entity\")\n >>> model = LukeForEntityClassification.from_pretrained(\"studio-ousia/luke-large-finetuned-open-entity\")\n\n >>> text = \"Beyoncé lives in Los Angeles.\"\n >>> entity_spans = [(0, 7)] # character-based entity span corresponding to \"Beyoncé\"\n >>> inputs = tokenizer(text, entity_spans=entity_spans, return_tensors=\"pt\")\n >>> outputs = model(**inputs)\n >>> logits = outputs.logits\n >>> predicted_class_idx = logits.argmax(-1).item()\n >>> print(\"Predicted class:\", model.config.id2label[predicted_class_idx])\n Predicted class: person\n ```\"\"\"\n return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n\n outputs = self.luke(\n input_ids=input_ids,\n attention_mask=attention_mask,\n token_type_ids=token_type_ids,\n position_ids=position_ids,\n entity_ids=entity_ids,\n entity_attention_mask=entity_attention_mask,\n entity_token_type_ids=entity_token_type_ids,\n entity_position_ids=entity_position_ids,\n head_mask=head_mask,\n inputs_embeds=inputs_embeds,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n return_dict=True,\n )\n\n feature_vector = outputs.entity_last_hidden_state[:, 0, :]\n feature_vector = self.dropout(feature_vector)\n logits = self.classifier(feature_vector)\n\n loss = None\n if labels is not None:\n # When the number of dimension of `labels` is 1, cross entropy is used as the loss function. The binary\n # cross entropy is used otherwise.\n if labels.ndim == 1:\n loss = nn.functional.cross_entropy(logits, labels)\n else:\n loss = nn.functional.binary_cross_entropy_with_logits(logits.view(-1), labels.view(-1).type_as(logits))\n\n if not return_dict:\n output = (\n logits,\n outputs.hidden_states,\n outputs.entity_hidden_states,\n outputs.attentions,\n )\n return ((loss,) + output) if loss is not None else output\n\n return EntityClassificationOutput(\n loss=loss,\n logits=logits,\n hidden_states=outputs.hidden_states,\n entity_hidden_states=outputs.entity_hidden_states,\n attentions=outputs.attentions,\n )\n\n\n@add_start_docstrings(\n \"\"\"\n The LUKE model with a classification head on top (a linear layer on top of the hidden states of the two entity\n tokens) for entity pair classification tasks, such as TACRED.\n \"\"\",\n LUKE_START_DOCSTRING,\n)\nclass LukeForEntityPairClassification(LukePreTrainedModel):\n def __init__(self, config):\n super().__init__(config)\n\n self.luke = LukeModel(config)\n\n self.num_labels = config.num_labels\n self.dropout = nn.Dropout(config.hidden_dropout_prob)\n self.classifier = nn.Linear(config.hidden_size * 2, config.num_labels, False)\n\n # Initialize weights and apply final processing\n self.post_init()\n\n @add_start_docstrings_to_model_forward(LUKE_INPUTS_DOCSTRING.format(\"batch_size, sequence_length\"))\n @replace_return_docstrings(output_type=EntityPairClassificationOutput, config_class=_CONFIG_FOR_DOC)\n def forward(\n self,\n input_ids: Optional[torch.LongTensor] = None,\n attention_mask: Optional[torch.FloatTensor] = None,\n token_type_ids: Optional[torch.LongTensor] = None,\n position_ids: Optional[torch.LongTensor] = None,\n entity_ids: Optional[torch.LongTensor] = None,\n entity_attention_mask: Optional[torch.FloatTensor] = None,\n entity_token_type_ids: Optional[torch.LongTensor] = None,\n entity_position_ids: Optional[torch.LongTensor] = None,\n head_mask: Optional[torch.FloatTensor] = None,\n inputs_embeds: Optional[torch.FloatTensor] = None,\n labels: Optional[torch.LongTensor] = None,\n output_attentions: Optional[bool] = None,\n output_hidden_states: Optional[bool] = None,\n return_dict: Optional[bool] = None,\n ) -> Union[Tuple, EntityPairClassificationOutput]:\n r\"\"\"\n labels (`torch.LongTensor` of shape `(batch_size,)` or `(batch_size, num_labels)`, *optional*):\n Labels for computing the classification loss. If the shape is `(batch_size,)`, the cross entropy loss is\n used for the single-label classification. In this case, labels should contain the indices that should be in\n `[0, ..., config.num_labels - 1]`. If the shape is `(batch_size, num_labels)`, the binary cross entropy\n loss is used for the multi-label classification. In this case, labels should only contain `[0, 1]`, where 0\n and 1 indicate false and true, respectively.\n\n Returns:\n\n Examples:\n\n ```python\n >>> from transformers import LukeTokenizer, LukeForEntityPairClassification\n\n >>> tokenizer = LukeTokenizer.from_pretrained(\"studio-ousia/luke-large-finetuned-tacred\")\n >>> model = LukeForEntityPairClassification.from_pretrained(\"studio-ousia/luke-large-finetuned-tacred\")\n\n >>> text = \"Beyoncé lives in Los Angeles.\"\n >>> entity_spans = [\n ... (0, 7),\n ... (17, 28),\n ... ] # character-based entity spans corresponding to \"Beyoncé\" and \"Los Angeles\"\n >>> inputs = tokenizer(text, entity_spans=entity_spans, return_tensors=\"pt\")\n >>> outputs = model(**inputs)\n >>> logits = outputs.logits\n >>> predicted_class_idx = logits.argmax(-1).item()\n >>> print(\"Predicted class:\", model.config.id2label[predicted_class_idx])\n Predicted class: per:cities_of_residence\n ```\"\"\"\n return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n\n outputs = self.luke(\n input_ids=input_ids,\n attention_mask=attention_mask,\n token_type_ids=token_type_ids,\n position_ids=position_ids,\n entity_ids=entity_ids,\n entity_attention_mask=entity_attention_mask,\n entity_token_type_ids=entity_token_type_ids,\n entity_position_ids=entity_position_ids,\n head_mask=head_mask,\n inputs_embeds=inputs_embeds,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n return_dict=True,\n )\n\n feature_vector = torch.cat(\n [outputs.entity_last_hidden_state[:, 0, :], outputs.entity_last_hidden_state[:, 1, :]], dim=1\n )\n feature_vector = self.dropout(feature_vector)\n logits = self.classifier(feature_vector)\n\n loss = None\n if labels is not None:\n # When the number of dimension of `labels` is 1, cross entropy is used as the loss function. The binary\n # cross entropy is used otherwise.\n if labels.ndim == 1:\n loss = nn.functional.cross_entropy(logits, labels)\n else:\n loss = nn.functional.binary_cross_entropy_with_logits(logits.view(-1), labels.view(-1).type_as(logits))\n\n if not return_dict:\n output = (\n logits,\n outputs.hidden_states,\n outputs.entity_hidden_states,\n outputs.attentions,\n )\n return ((loss,) + output) if loss is not None else output\n\n return EntityPairClassificationOutput(\n loss=loss,\n logits=logits,\n hidden_states=outputs.hidden_states,\n entity_hidden_states=outputs.entity_hidden_states,\n attentions=outputs.attentions,\n )\n\n\n@add_start_docstrings(\n \"\"\"\n The LUKE model with a span classification head on top (a linear layer on top of the hidden states output) for tasks\n such as named entity recognition.\n \"\"\",\n LUKE_START_DOCSTRING,\n)\nclass LukeForEntitySpanClassification(LukePreTrainedModel):\n def __init__(self, config):\n super().__init__(config)\n\n self.luke = LukeModel(config)\n\n self.num_labels = config.num_labels\n self.dropout = nn.Dropout(config.hidden_dropout_prob)\n self.classifier = nn.Linear(config.hidden_size * 3, config.num_labels)\n\n # Initialize weights and apply final processing\n self.post_init()\n\n @add_start_docstrings_to_model_forward(LUKE_INPUTS_DOCSTRING.format(\"batch_size, sequence_length\"))\n @replace_return_docstrings(output_type=EntitySpanClassificationOutput, config_class=_CONFIG_FOR_DOC)\n def forward(\n self,\n input_ids: Optional[torch.LongTensor] = None,\n attention_mask=None,\n token_type_ids: Optional[torch.LongTensor] = None,\n position_ids: Optional[torch.LongTensor] = None,\n entity_ids: Optional[torch.LongTensor] = None,\n entity_attention_mask: Optional[torch.LongTensor] = None,\n entity_token_type_ids: Optional[torch.LongTensor] = None,\n entity_position_ids: Optional[torch.LongTensor] = None,\n entity_start_positions: Optional[torch.LongTensor] = None,\n entity_end_positions: Optional[torch.LongTensor] = None,\n head_mask: Optional[torch.FloatTensor] = None,\n inputs_embeds: Optional[torch.FloatTensor] = None,\n labels: Optional[torch.LongTensor] = None,\n output_attentions: Optional[bool] = None,\n output_hidden_states: Optional[bool] = None,\n return_dict: Optional[bool] = None,\n ) -> Union[Tuple, EntitySpanClassificationOutput]:\n r\"\"\"\n entity_start_positions (`torch.LongTensor`):\n The start positions of entities in the word token sequence.\n\n entity_end_positions (`torch.LongTensor`):\n The end positions of entities in the word token sequence.\n\n labels (`torch.LongTensor` of shape `(batch_size, entity_length)` or `(batch_size, entity_length, num_labels)`, *optional*):\n Labels for computing the classification loss. If the shape is `(batch_size, entity_length)`, the cross\n entropy loss is used for the single-label classification. In this case, labels should contain the indices\n that should be in `[0, ..., config.num_labels - 1]`. If the shape is `(batch_size, entity_length,\n num_labels)`, the binary cross entropy loss is used for the multi-label classification. In this case,\n labels should only contain `[0, 1]`, where 0 and 1 indicate false and true, respectively.\n\n Returns:\n\n Examples:\n\n ```python\n >>> from transformers import LukeTokenizer, LukeForEntitySpanClassification\n\n >>> tokenizer = LukeTokenizer.from_pretrained(\"studio-ousia/luke-large-finetuned-conll-2003\")\n >>> model = LukeForEntitySpanClassification.from_pretrained(\"studio-ousia/luke-large-finetuned-conll-2003\")\n\n >>> text = \"Beyoncé lives in Los Angeles\"\n # List all possible entity spans in the text\n\n >>> word_start_positions = [0, 8, 14, 17, 21] # character-based start positions of word tokens\n >>> word_end_positions = [7, 13, 16, 20, 28] # character-based end positions of word tokens\n >>> entity_spans = []\n >>> for i, start_pos in enumerate(word_start_positions):\n ... for end_pos in word_end_positions[i:]:\n ... entity_spans.append((start_pos, end_pos))\n\n >>> inputs = tokenizer(text, entity_spans=entity_spans, return_tensors=\"pt\")\n >>> outputs = model(**inputs)\n >>> logits = outputs.logits\n >>> predicted_class_indices = logits.argmax(-1).squeeze().tolist()\n >>> for span, predicted_class_idx in zip(entity_spans, predicted_class_indices):\n ... if predicted_class_idx != 0:\n ... print(text[span[0] : span[1]], model.config.id2label[predicted_class_idx])\n Beyoncé PER\n Los Angeles LOC\n ```\"\"\"\n return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n\n outputs = self.luke(\n input_ids=input_ids,\n attention_mask=attention_mask,\n token_type_ids=token_type_ids,\n position_ids=position_ids,\n entity_ids=entity_ids,\n entity_attention_mask=entity_attention_mask,\n entity_token_type_ids=entity_token_type_ids,\n entity_position_ids=entity_position_ids,\n head_mask=head_mask,\n inputs_embeds=inputs_embeds,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n return_dict=True,\n )\n hidden_size = outputs.last_hidden_state.size(-1)\n\n entity_start_positions = entity_start_positions.unsqueeze(-1).expand(-1, -1, hidden_size)\n start_states = torch.gather(outputs.last_hidden_state, -2, entity_start_positions)\n entity_end_positions = entity_end_positions.unsqueeze(-1).expand(-1, -1, hidden_size)\n end_states = torch.gather(outputs.last_hidden_state, -2, entity_end_positions)\n feature_vector = torch.cat([start_states, end_states, outputs.entity_last_hidden_state], dim=2)\n\n feature_vector = self.dropout(feature_vector)\n logits = self.classifier(feature_vector)\n\n loss = None\n if labels is not None:\n # When the number of dimension of `labels` is 2, cross entropy is used as the loss function. The binary\n # cross entropy is used otherwise.\n if labels.ndim == 2:\n loss = nn.functional.cross_entropy(logits.view(-1, self.num_labels), labels.view(-1))\n else:\n loss = nn.functional.binary_cross_entropy_with_logits(logits.view(-1), labels.view(-1).type_as(logits))\n\n if not return_dict:\n output = (\n logits,\n outputs.hidden_states,\n outputs.entity_hidden_states,\n outputs.attentions,\n )\n return ((loss,) + output) if loss is not None else output\n\n return EntitySpanClassificationOutput(\n loss=loss,\n logits=logits,\n hidden_states=outputs.hidden_states,\n entity_hidden_states=outputs.entity_hidden_states,\n attentions=outputs.attentions,\n )\n",
"# coding=utf-8\n# Copyright 2021 The HuggingFace Inc. team. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\" Testing suite for the TensorFlow CLIP model. \"\"\"\n\n\nimport inspect\nimport os\nimport tempfile\nimport unittest\nfrom importlib import import_module\n\nimport requests\nfrom transformers import CLIPConfig, CLIPTextConfig, CLIPVisionConfig\nfrom transformers.testing_utils import require_tf, require_vision, slow\nfrom transformers.utils import is_tf_available, is_vision_available\n\nfrom ...test_configuration_common import ConfigTester\nfrom ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask\n\n\nif is_tf_available():\n import tensorflow as tf\n\n from transformers import TFCLIPModel, TFCLIPTextModel, TFCLIPVisionModel, TFSharedEmbeddings\n from transformers.models.clip.modeling_tf_clip import TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST\n\n\nif is_vision_available():\n from PIL import Image\n\n from transformers import CLIPProcessor\n\n\nclass TFCLIPVisionModelTester:\n def __init__(\n self,\n parent,\n batch_size=12,\n image_size=30,\n patch_size=2,\n num_channels=3,\n is_training=True,\n hidden_size=32,\n num_hidden_layers=5,\n num_attention_heads=4,\n intermediate_size=37,\n dropout=0.1,\n attention_dropout=0.1,\n initializer_range=0.02,\n scope=None,\n ):\n self.parent = parent\n self.batch_size = batch_size\n self.image_size = image_size\n self.patch_size = patch_size\n self.num_channels = num_channels\n self.is_training = is_training\n self.hidden_size = hidden_size\n self.num_hidden_layers = num_hidden_layers\n self.num_attention_heads = num_attention_heads\n self.intermediate_size = intermediate_size\n self.dropout = dropout\n self.attention_dropout = attention_dropout\n self.initializer_range = initializer_range\n self.scope = scope\n\n def prepare_config_and_inputs(self):\n pixel_values = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])\n config = self.get_config()\n\n return config, pixel_values\n\n def get_config(self):\n return CLIPVisionConfig(\n image_size=self.image_size,\n patch_size=self.patch_size,\n num_channels=self.num_channels,\n hidden_size=self.hidden_size,\n num_hidden_layers=self.num_hidden_layers,\n num_attention_heads=self.num_attention_heads,\n intermediate_size=self.intermediate_size,\n dropout=self.dropout,\n attention_dropout=self.attention_dropout,\n initializer_range=self.initializer_range,\n )\n\n def create_and_check_model(self, config, pixel_values):\n model = TFCLIPVisionModel(config=config)\n result = model(pixel_values, training=False)\n # expected sequence length = num_patches + 1 (we add 1 for the [CLS] token)\n image_size = (self.image_size, self.image_size)\n patch_size = (self.patch_size, self.patch_size)\n num_patches = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])\n self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, num_patches + 1, self.hidden_size))\n self.parent.assertEqual(result.pooler_output.shape, (self.batch_size, self.hidden_size))\n\n def prepare_config_and_inputs_for_common(self):\n config_and_inputs = self.prepare_config_and_inputs()\n config, pixel_values = config_and_inputs\n inputs_dict = {\"pixel_values\": pixel_values}\n return config, inputs_dict\n\n\n@require_tf\nclass TFCLIPVisionModelTest(TFModelTesterMixin, unittest.TestCase):\n \"\"\"\n Here we also overwrite some of the tests of test_modeling_common.py, as CLIP does not use input_ids, inputs_embeds,\n attention_mask and seq_length.\n \"\"\"\n\n all_model_classes = (TFCLIPVisionModel,) if is_tf_available() else ()\n\n test_pruning = False\n test_resize_embeddings = False\n test_head_masking = False\n test_onnx = False\n\n def setUp(self):\n self.model_tester = TFCLIPVisionModelTester(self)\n self.config_tester = ConfigTester(self, config_class=CLIPVisionConfig, has_text_modality=False, hidden_size=37)\n\n def test_config(self):\n self.config_tester.run_common_tests()\n\n def test_inputs_embeds(self):\n # CLIP does not use inputs_embeds\n pass\n\n def test_graph_mode_with_inputs_embeds(self):\n # CLIP does not use inputs_embeds\n pass\n\n def test_model_common_attributes(self):\n config, _ = self.model_tester.prepare_config_and_inputs_for_common()\n\n for model_class in self.all_model_classes:\n model = model_class(config)\n self.assertIsInstance(model.get_input_embeddings(), (tf.keras.layers.Layer))\n x = model.get_output_embeddings()\n self.assertTrue(x is None or isinstance(x, tf.keras.layers.Layer))\n\n def test_forward_signature(self):\n config, _ = self.model_tester.prepare_config_and_inputs_for_common()\n\n for model_class in self.all_model_classes:\n model = model_class(config)\n signature = inspect.signature(model.call)\n # signature.parameters is an OrderedDict => so arg_names order is deterministic\n arg_names = [*signature.parameters.keys()]\n\n expected_arg_names = [\"pixel_values\"]\n self.assertListEqual(arg_names[:1], expected_arg_names)\n\n def test_model(self):\n config_and_inputs = self.model_tester.prepare_config_and_inputs()\n self.model_tester.create_and_check_model(*config_and_inputs)\n\n def test_attention_outputs(self):\n config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()\n config.return_dict = True\n\n # in CLIP, the seq_len equals the number of patches + 1 (we add 1 for the [CLS] token)\n image_size = (self.model_tester.image_size, self.model_tester.image_size)\n patch_size = (self.model_tester.patch_size, self.model_tester.patch_size)\n num_patches = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])\n seq_len = num_patches + 1\n\n for model_class in self.all_model_classes:\n inputs_dict[\"output_attentions\"] = True\n inputs_dict[\"output_hidden_states\"] = False\n config.return_dict = True\n model = model_class(config)\n outputs = model(**self._prepare_for_class(inputs_dict, model_class), training=False)\n attentions = outputs.attentions\n self.assertEqual(len(attentions), self.model_tester.num_hidden_layers)\n\n # check that output_attentions also work using config\n del inputs_dict[\"output_attentions\"]\n config.output_attentions = True\n model = model_class(config)\n outputs = model(**self._prepare_for_class(inputs_dict, model_class), training=False)\n attentions = outputs.attentions\n self.assertEqual(len(attentions), self.model_tester.num_hidden_layers)\n\n out_len = len(outputs)\n\n # Check attention is always last and order is fine\n inputs_dict[\"output_attentions\"] = True\n inputs_dict[\"output_hidden_states\"] = True\n model = model_class(config)\n outputs = model(**self._prepare_for_class(inputs_dict, model_class), training=False)\n\n added_hidden_states = 1\n self.assertEqual(out_len + added_hidden_states, len(outputs))\n\n self_attentions = outputs.attentions\n\n self.assertEqual(len(self_attentions), self.model_tester.num_hidden_layers)\n\n self.assertListEqual(\n list(self_attentions[0].shape[-3:]),\n [self.model_tester.num_attention_heads, seq_len, seq_len],\n )\n\n def test_hidden_states_output(self):\n def check_hidden_states_output(inputs_dict, config, model_class):\n model = model_class(config)\n\n outputs = model(**self._prepare_for_class(inputs_dict, model_class), training=False)\n\n hidden_states = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states\n\n expected_num_layers = getattr(\n self.model_tester, \"expected_num_hidden_layers\", self.model_tester.num_hidden_layers + 1\n )\n self.assertEqual(len(hidden_states), expected_num_layers)\n\n # CLIP has a different seq_length\n image_size = (self.model_tester.image_size, self.model_tester.image_size)\n patch_size = (self.model_tester.patch_size, self.model_tester.patch_size)\n num_patches = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])\n seq_length = num_patches + 1\n\n self.assertListEqual(\n list(hidden_states[0].shape[-2:]),\n [seq_length, self.model_tester.hidden_size],\n )\n\n config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()\n\n for model_class in self.all_model_classes:\n inputs_dict[\"output_hidden_states\"] = True\n check_hidden_states_output(inputs_dict, config, model_class)\n\n # check that output_hidden_states also work using config\n del inputs_dict[\"output_hidden_states\"]\n config.output_hidden_states = True\n\n check_hidden_states_output(inputs_dict, config, model_class)\n\n @slow\n def test_model_from_pretrained(self):\n for model_name in TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:\n model = TFCLIPVisionModel.from_pretrained(model_name)\n self.assertIsNotNone(model)\n\n @slow\n def test_saved_model_creation_extended(self):\n config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()\n config.output_hidden_states = True\n config.output_attentions = True\n\n if hasattr(config, \"use_cache\"):\n config.use_cache = True\n\n # in CLIP, the seq_len equals the number of patches + 1 (we add 1 for the [CLS] token)\n image_size = (self.model_tester.image_size, self.model_tester.image_size)\n patch_size = (self.model_tester.patch_size, self.model_tester.patch_size)\n num_patches = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])\n seq_len = num_patches + 1\n\n for model_class in self.all_model_classes:\n class_inputs_dict = self._prepare_for_class(inputs_dict, model_class)\n model = model_class(config)\n num_out = len(model(class_inputs_dict))\n\n with tempfile.TemporaryDirectory() as tmpdirname:\n model.save_pretrained(tmpdirname, saved_model=True)\n saved_model_dir = os.path.join(tmpdirname, \"saved_model\", \"1\")\n model = tf.keras.models.load_model(saved_model_dir)\n outputs = model(class_inputs_dict)\n output_hidden_states = outputs[\"hidden_states\"]\n output_attentions = outputs[\"attentions\"]\n\n # Check num outputs\n self.assertEqual(len(outputs), num_out)\n\n # Check num layers\n expected_num_layers = getattr(\n self.model_tester, \"expected_num_hidden_layers\", self.model_tester.num_hidden_layers + 1\n )\n\n self.assertEqual(len(output_hidden_states), expected_num_layers)\n self.assertEqual(len(output_attentions), self.model_tester.num_hidden_layers)\n\n # Check attention outputs\n image_size = (self.model_tester.image_size, self.model_tester.image_size)\n patch_size = (self.model_tester.patch_size, self.model_tester.patch_size)\n num_patches = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])\n seq_len = num_patches + 1\n\n self.assertListEqual(\n list(output_attentions[0].shape[-3:]),\n [self.model_tester.num_attention_heads, seq_len, seq_len],\n )\n\n # Check hidden states\n self.assertListEqual(\n list(output_hidden_states[0].shape[-2:]),\n [seq_len, self.model_tester.hidden_size],\n )\n\n\nclass TFCLIPTextModelTester:\n def __init__(\n self,\n parent,\n batch_size=12,\n seq_length=7,\n is_training=True,\n use_input_mask=True,\n use_labels=True,\n vocab_size=99,\n hidden_size=32,\n num_hidden_layers=5,\n num_attention_heads=4,\n intermediate_size=37,\n dropout=0.1,\n attention_dropout=0.1,\n max_position_embeddings=512,\n initializer_range=0.02,\n scope=None,\n ):\n self.parent = parent\n self.batch_size = batch_size\n self.seq_length = seq_length\n self.is_training = is_training\n self.use_input_mask = use_input_mask\n self.use_labels = use_labels\n self.vocab_size = vocab_size\n self.hidden_size = hidden_size\n self.num_hidden_layers = num_hidden_layers\n self.num_attention_heads = num_attention_heads\n self.intermediate_size = intermediate_size\n self.dropout = dropout\n self.attention_dropout = attention_dropout\n self.max_position_embeddings = max_position_embeddings\n self.initializer_range = initializer_range\n self.scope = scope\n\n def prepare_config_and_inputs(self):\n input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size)\n\n input_mask = None\n if self.use_input_mask:\n input_mask = random_attention_mask([self.batch_size, self.seq_length])\n # make sure the first token has attention mask `1` to ensure that, after combining the causal mask, there\n # is still at least one token being attended to for each batch.\n # TODO: Change `random_attention_mask` in PT/TF/Flax common test file, after a discussion with the team.\n input_mask = tf.concat(\n [tf.ones_like(input_mask[:, :1], dtype=input_mask.dtype), input_mask[:, 1:]], axis=-1\n )\n\n config = self.get_config()\n\n return config, input_ids, input_mask\n\n def get_config(self):\n return CLIPTextConfig(\n vocab_size=self.vocab_size,\n hidden_size=self.hidden_size,\n num_hidden_layers=self.num_hidden_layers,\n num_attention_heads=self.num_attention_heads,\n intermediate_size=self.intermediate_size,\n dropout=self.dropout,\n attention_dropout=self.attention_dropout,\n max_position_embeddings=self.max_position_embeddings,\n initializer_range=self.initializer_range,\n )\n\n def create_and_check_model(self, config, input_ids, input_mask):\n model = TFCLIPTextModel(config=config)\n result = model(input_ids, attention_mask=input_mask, training=False)\n result = model(input_ids, training=False)\n self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size))\n self.parent.assertEqual(result.pooler_output.shape, (self.batch_size, self.hidden_size))\n\n def prepare_config_and_inputs_for_common(self):\n config_and_inputs = self.prepare_config_and_inputs()\n config, input_ids, input_mask = config_and_inputs\n inputs_dict = {\"input_ids\": input_ids, \"attention_mask\": input_mask}\n return config, inputs_dict\n\n\n@require_tf\nclass TFCLIPTextModelTest(TFModelTesterMixin, unittest.TestCase):\n\n all_model_classes = (TFCLIPTextModel,) if is_tf_available() else ()\n test_pruning = False\n test_head_masking = False\n test_onnx = False\n\n def setUp(self):\n self.model_tester = TFCLIPTextModelTester(self)\n self.config_tester = ConfigTester(self, config_class=CLIPTextConfig, hidden_size=37)\n\n def test_config(self):\n self.config_tester.run_common_tests()\n\n def test_model(self):\n config_and_inputs = self.model_tester.prepare_config_and_inputs()\n self.model_tester.create_and_check_model(*config_and_inputs)\n\n def test_inputs_embeds(self):\n # CLIP does not use inputs_embeds\n pass\n\n @slow\n def test_model_from_pretrained(self):\n for model_name in TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:\n model = TFCLIPTextModel.from_pretrained(model_name)\n self.assertIsNotNone(model)\n\n @slow\n def test_saved_model_creation_extended(self):\n config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()\n config.output_hidden_states = True\n config.output_attentions = True\n\n if hasattr(config, \"use_cache\"):\n config.use_cache = True\n\n for model_class in self.all_model_classes:\n class_inputs_dict = self._prepare_for_class(inputs_dict, model_class)\n model = model_class(config)\n num_out = len(model(class_inputs_dict))\n\n with tempfile.TemporaryDirectory() as tmpdirname:\n model.save_pretrained(tmpdirname, saved_model=True)\n saved_model_dir = os.path.join(tmpdirname, \"saved_model\", \"1\")\n model = tf.keras.models.load_model(saved_model_dir)\n outputs = model(class_inputs_dict)\n output_hidden_states = outputs[\"hidden_states\"]\n output_attentions = outputs[\"attentions\"]\n\n # Check number of outputs\n self.assertEqual(len(outputs), num_out)\n\n # Check number of layers\n expected_num_layers = getattr(\n self.model_tester, \"expected_num_hidden_layers\", self.model_tester.num_hidden_layers + 1\n )\n\n # Check hidden states\n self.assertEqual(len(output_hidden_states), expected_num_layers)\n self.assertListEqual(\n list(output_hidden_states[0].shape[-2:]),\n [self.model_tester.seq_length, self.model_tester.hidden_size],\n )\n\n # Check attention outputs\n self.assertEqual(len(output_attentions), self.model_tester.num_hidden_layers)\n\n seq_length = self.model_tester.seq_length\n key_length = getattr(self.model_tester, \"key_length\", seq_length)\n\n self.assertListEqual(\n list(output_attentions[0].shape[-3:]),\n [self.model_tester.num_attention_heads, seq_length, key_length],\n )\n\n\nclass TFCLIPModelTester:\n def __init__(self, parent, is_training=True):\n self.parent = parent\n self.text_model_tester = TFCLIPTextModelTester(parent)\n self.vision_model_tester = TFCLIPVisionModelTester(parent)\n self.is_training = is_training\n\n def prepare_config_and_inputs(self):\n text_config, input_ids, attention_mask = self.text_model_tester.prepare_config_and_inputs()\n vision_config, pixel_values = self.vision_model_tester.prepare_config_and_inputs()\n\n config = self.get_config()\n\n return config, input_ids, attention_mask, pixel_values\n\n def get_config(self):\n return CLIPConfig.from_text_vision_configs(\n self.text_model_tester.get_config(), self.vision_model_tester.get_config(), projection_dim=64\n )\n\n def create_and_check_model(self, config, input_ids, attention_mask, pixel_values):\n model = TFCLIPModel(config)\n result = model(input_ids, pixel_values, attention_mask, training=False)\n self.parent.assertEqual(\n result.logits_per_image.shape, (self.vision_model_tester.batch_size, self.text_model_tester.batch_size)\n )\n self.parent.assertEqual(\n result.logits_per_text.shape, (self.text_model_tester.batch_size, self.vision_model_tester.batch_size)\n )\n\n def prepare_config_and_inputs_for_common(self):\n config_and_inputs = self.prepare_config_and_inputs()\n config, input_ids, attention_mask, pixel_values = config_and_inputs\n inputs_dict = {\n \"input_ids\": input_ids,\n \"attention_mask\": attention_mask,\n \"pixel_values\": pixel_values,\n \"return_loss\": True,\n }\n return config, inputs_dict\n\n\n@require_tf\nclass TFCLIPModelTest(TFModelTesterMixin, unittest.TestCase):\n all_model_classes = (TFCLIPModel,) if is_tf_available() else ()\n test_head_masking = False\n test_pruning = False\n test_resize_embeddings = False\n test_attention_outputs = False\n test_onnx = False\n\n def setUp(self):\n self.model_tester = TFCLIPModelTester(self)\n\n def test_model(self):\n config_and_inputs = self.model_tester.prepare_config_and_inputs()\n self.model_tester.create_and_check_model(*config_and_inputs)\n\n # hidden_states are tested in individual model tests\n def test_hidden_states_output(self):\n pass\n\n # input_embeds are tested in individual model tests\n def test_inputs_embeds(self):\n pass\n\n # CLIPModel does not have input/output embeddings\n def test_model_common_attributes(self):\n pass\n\n # overwrite from common since `TFCLIPModelTester` set `return_loss` to `True` and causes the preparation of\n # `symbolic_inputs` failed.\n def test_keras_save_load(self):\n config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()\n\n # remove `return_loss` to make code work\n if self.__class__.__name__ == \"TFCLIPModelTest\":\n inputs_dict.pop(\"return_loss\", None)\n\n tf_main_layer_classes = set(\n module_member\n for model_class in self.all_model_classes\n for module in (import_module(model_class.__module__),)\n for module_member_name in dir(module)\n if module_member_name.endswith(\"MainLayer\")\n # This condition is required, since `modeling_tf_clip.py` has 3 classes whose names end with `MainLayer`.\n and module_member_name[: -len(\"MainLayer\")] == model_class.__name__[: -len(\"Model\")]\n for module_member in (getattr(module, module_member_name),)\n if isinstance(module_member, type)\n and tf.keras.layers.Layer in module_member.__bases__\n and getattr(module_member, \"_keras_serializable\", False)\n )\n for main_layer_class in tf_main_layer_classes:\n # T5MainLayer needs an embed_tokens parameter when called without the inputs_embeds parameter\n if \"T5\" in main_layer_class.__name__:\n # Take the same values than in TFT5ModelTester for this shared layer\n shared = TFSharedEmbeddings(99, 32, name=\"shared\")\n config.use_cache = inputs_dict.pop(\"use_cache\", None)\n main_layer = main_layer_class(config, embed_tokens=shared)\n else:\n main_layer = main_layer_class(config)\n\n symbolic_inputs = {\n name: tf.keras.Input(tensor.shape[1:], dtype=tensor.dtype) for name, tensor in inputs_dict.items()\n }\n\n model = tf.keras.Model(symbolic_inputs, outputs=main_layer(symbolic_inputs))\n outputs = model(inputs_dict)\n\n with tempfile.TemporaryDirectory() as tmpdirname:\n filepath = os.path.join(tmpdirname, \"keras_model.h5\")\n model.save(filepath)\n if \"T5\" in main_layer_class.__name__:\n model = tf.keras.models.load_model(\n filepath,\n custom_objects={\n main_layer_class.__name__: main_layer_class,\n \"TFSharedEmbeddings\": TFSharedEmbeddings,\n },\n )\n else:\n model = tf.keras.models.load_model(\n filepath, custom_objects={main_layer_class.__name__: main_layer_class}\n )\n assert isinstance(model, tf.keras.Model)\n after_outputs = model(inputs_dict)\n self.assert_outputs_same(after_outputs, outputs)\n\n @slow\n def test_model_from_pretrained(self):\n for model_name in TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:\n model = TFCLIPModel.from_pretrained(model_name)\n self.assertIsNotNone(model)\n\n @unittest.skip(reason=\"Currently `saved_model` doesn't work with nested outputs.\")\n @slow\n def test_saved_model_creation_extended(self):\n pass\n\n\n# We will verify our results on an image of cute cats\ndef prepare_img():\n url = \"http://images.cocodataset.org/val2017/000000039769.jpg\"\n im = Image.open(requests.get(url, stream=True).raw)\n return im\n\n\n@require_vision\n@require_tf\nclass TFCLIPModelIntegrationTest(unittest.TestCase):\n @slow\n def test_inference(self):\n model_name = \"openai/clip-vit-base-patch32\"\n model = TFCLIPModel.from_pretrained(model_name)\n processor = CLIPProcessor.from_pretrained(model_name)\n\n image = prepare_img()\n inputs = processor(\n text=[\"a photo of a cat\", \"a photo of a dog\"], images=image, padding=True, return_tensors=\"tf\"\n )\n\n outputs = model(**inputs, training=False)\n\n # verify the logits\n self.assertEqual(\n outputs.logits_per_image.shape,\n tf.TensorShape((inputs.pixel_values.shape[0], inputs.input_ids.shape[0])),\n )\n self.assertEqual(\n outputs.logits_per_text.shape,\n tf.TensorShape((inputs.input_ids.shape[0], inputs.pixel_values.shape[0])),\n )\n\n expected_logits = tf.constant([[24.5701, 19.3049]])\n\n tf.debugging.assert_near(outputs.logits_per_image, expected_logits, atol=1e-3)\n",
"#!/usr/bin/env python\n# coding=utf-8\n# Copyright 2021 The HuggingFace Inc. team. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\" Fine-tuning a 🤗 Flax Transformers model on token classification tasks (NER, POS, CHUNKS)\"\"\"\nimport json\nimport logging\nimport os\nimport random\nimport sys\nimport time\nfrom dataclasses import asdict, dataclass, field\nfrom enum import Enum\nfrom itertools import chain\nfrom pathlib import Path\nfrom typing import Any, Callable, Dict, Optional, Tuple\n\nimport datasets\nimport numpy as np\nfrom datasets import ClassLabel, load_dataset, load_metric\nfrom tqdm import tqdm\n\nimport jax\nimport jax.numpy as jnp\nimport optax\nimport transformers\nfrom flax import struct, traverse_util\nfrom flax.jax_utils import replicate, unreplicate\nfrom flax.training import train_state\nfrom flax.training.common_utils import get_metrics, onehot, shard\nfrom huggingface_hub import Repository\nfrom transformers import (\n AutoConfig,\n AutoTokenizer,\n FlaxAutoModelForTokenClassification,\n HfArgumentParser,\n is_tensorboard_available,\n)\nfrom transformers.utils import check_min_version, get_full_repo_name\nfrom transformers.utils.versions import require_version\n\n\nlogger = logging.getLogger(__name__)\n# Will error if the minimal version of Transformers is not installed. Remove at your own risks.\ncheck_min_version(\"4.20.0.dev0\")\n\nrequire_version(\"datasets>=1.8.0\", \"To fix: pip install -r examples/pytorch/token-classification/requirements.txt\")\n\nArray = Any\nDataset = datasets.arrow_dataset.Dataset\nPRNGKey = Any\n\n\n@dataclass\nclass TrainingArguments:\n output_dir: str = field(\n metadata={\"help\": \"The output directory where the model predictions and checkpoints will be written.\"},\n )\n overwrite_output_dir: bool = field(\n default=False,\n metadata={\n \"help\": (\n \"Overwrite the content of the output directory. \"\n \"Use this to continue training if output_dir points to a checkpoint directory.\"\n )\n },\n )\n do_train: bool = field(default=False, metadata={\"help\": \"Whether to run training.\"})\n do_eval: bool = field(default=False, metadata={\"help\": \"Whether to run eval on the dev set.\"})\n per_device_train_batch_size: int = field(\n default=8, metadata={\"help\": \"Batch size per GPU/TPU core/CPU for training.\"}\n )\n per_device_eval_batch_size: int = field(\n default=8, metadata={\"help\": \"Batch size per GPU/TPU core/CPU for evaluation.\"}\n )\n learning_rate: float = field(default=5e-5, metadata={\"help\": \"The initial learning rate for AdamW.\"})\n weight_decay: float = field(default=0.0, metadata={\"help\": \"Weight decay for AdamW if we apply some.\"})\n adam_beta1: float = field(default=0.9, metadata={\"help\": \"Beta1 for AdamW optimizer\"})\n adam_beta2: float = field(default=0.999, metadata={\"help\": \"Beta2 for AdamW optimizer\"})\n adam_epsilon: float = field(default=1e-8, metadata={\"help\": \"Epsilon for AdamW optimizer.\"})\n adafactor: bool = field(default=False, metadata={\"help\": \"Whether or not to replace AdamW by Adafactor.\"})\n num_train_epochs: float = field(default=3.0, metadata={\"help\": \"Total number of training epochs to perform.\"})\n warmup_steps: int = field(default=0, metadata={\"help\": \"Linear warmup over warmup_steps.\"})\n logging_steps: int = field(default=500, metadata={\"help\": \"Log every X updates steps.\"})\n save_steps: int = field(default=500, metadata={\"help\": \"Save checkpoint every X updates steps.\"})\n eval_steps: int = field(default=None, metadata={\"help\": \"Run an evaluation every X steps.\"})\n seed: int = field(default=42, metadata={\"help\": \"Random seed that will be set at the beginning of training.\"})\n push_to_hub: bool = field(\n default=False, metadata={\"help\": \"Whether or not to upload the trained model to the model hub after training.\"}\n )\n hub_model_id: str = field(\n default=None, metadata={\"help\": \"The name of the repository to keep in sync with the local `output_dir`.\"}\n )\n hub_token: str = field(default=None, metadata={\"help\": \"The token to use to push to the Model Hub.\"})\n\n def __post_init__(self):\n if self.output_dir is not None:\n self.output_dir = os.path.expanduser(self.output_dir)\n\n def to_dict(self):\n \"\"\"\n Serializes this instance while replace `Enum` by their values (for JSON serialization support). It obfuscates\n the token values by removing their value.\n \"\"\"\n d = asdict(self)\n for k, v in d.items():\n if isinstance(v, Enum):\n d[k] = v.value\n if isinstance(v, list) and len(v) > 0 and isinstance(v[0], Enum):\n d[k] = [x.value for x in v]\n if k.endswith(\"_token\"):\n d[k] = f\"<{k.upper()}>\"\n return d\n\n\n@dataclass\nclass ModelArguments:\n \"\"\"\n Arguments pertaining to which model/config/tokenizer we are going to fine-tune from.\n \"\"\"\n\n model_name_or_path: str = field(\n metadata={\"help\": \"Path to pretrained model or model identifier from huggingface.co/models\"}\n )\n config_name: Optional[str] = field(\n default=None, metadata={\"help\": \"Pretrained config name or path if not the same as model_name\"}\n )\n tokenizer_name: Optional[str] = field(\n default=None, metadata={\"help\": \"Pretrained tokenizer name or path if not the same as model_name\"}\n )\n cache_dir: Optional[str] = field(\n default=None,\n metadata={\"help\": \"Where do you want to store the pretrained models downloaded from huggingface.co\"},\n )\n model_revision: str = field(\n default=\"main\",\n metadata={\"help\": \"The specific model version to use (can be a branch name, tag name or commit id).\"},\n )\n use_auth_token: bool = field(\n default=False,\n metadata={\n \"help\": (\n \"Will use the token generated when running `transformers-cli login` (necessary to use this script \"\n \"with private models).\"\n )\n },\n )\n\n\n@dataclass\nclass DataTrainingArguments:\n \"\"\"\n Arguments pertaining to what data we are going to input our model for training and eval.\n \"\"\"\n\n task_name: Optional[str] = field(default=\"ner\", metadata={\"help\": \"The name of the task (ner, pos...).\"})\n dataset_name: Optional[str] = field(\n default=None, metadata={\"help\": \"The name of the dataset to use (via the datasets library).\"}\n )\n dataset_config_name: Optional[str] = field(\n default=None, metadata={\"help\": \"The configuration name of the dataset to use (via the datasets library).\"}\n )\n train_file: Optional[str] = field(\n default=None, metadata={\"help\": \"The input training data file (a csv or JSON file).\"}\n )\n validation_file: Optional[str] = field(\n default=None,\n metadata={\"help\": \"An optional input evaluation data file to evaluate on (a csv or JSON file).\"},\n )\n test_file: Optional[str] = field(\n default=None,\n metadata={\"help\": \"An optional input test data file to predict on (a csv or JSON file).\"},\n )\n text_column_name: Optional[str] = field(\n default=None, metadata={\"help\": \"The column name of text to input in the file (a csv or JSON file).\"}\n )\n label_column_name: Optional[str] = field(\n default=None, metadata={\"help\": \"The column name of label to input in the file (a csv or JSON file).\"}\n )\n overwrite_cache: bool = field(\n default=False, metadata={\"help\": \"Overwrite the cached training and evaluation sets\"}\n )\n preprocessing_num_workers: Optional[int] = field(\n default=None,\n metadata={\"help\": \"The number of processes to use for the preprocessing.\"},\n )\n max_seq_length: int = field(\n default=None,\n metadata={\n \"help\": (\n \"The maximum total input sequence length after tokenization. If set, sequences longer \"\n \"than this will be truncated, sequences shorter will be padded.\"\n )\n },\n )\n max_train_samples: Optional[int] = field(\n default=None,\n metadata={\n \"help\": (\n \"For debugging purposes or quicker training, truncate the number of training examples to this \"\n \"value if set.\"\n )\n },\n )\n max_eval_samples: Optional[int] = field(\n default=None,\n metadata={\n \"help\": (\n \"For debugging purposes or quicker training, truncate the number of evaluation examples to this \"\n \"value if set.\"\n )\n },\n )\n max_predict_samples: Optional[int] = field(\n default=None,\n metadata={\n \"help\": (\n \"For debugging purposes or quicker training, truncate the number of prediction examples to this \"\n \"value if set.\"\n )\n },\n )\n label_all_tokens: bool = field(\n default=False,\n metadata={\n \"help\": (\n \"Whether to put the label for one word on all tokens of generated by that word or just on the \"\n \"one (in which case the other tokens will have a padding index).\"\n )\n },\n )\n return_entity_level_metrics: bool = field(\n default=False,\n metadata={\"help\": \"Whether to return all the entity levels during evaluation or just the overall ones.\"},\n )\n\n def __post_init__(self):\n if self.dataset_name is None and self.train_file is None and self.validation_file is None:\n raise ValueError(\"Need either a dataset name or a training/validation file.\")\n else:\n if self.train_file is not None:\n extension = self.train_file.split(\".\")[-1]\n assert extension in [\"csv\", \"json\"], \"`train_file` should be a csv or a json file.\"\n if self.validation_file is not None:\n extension = self.validation_file.split(\".\")[-1]\n assert extension in [\"csv\", \"json\"], \"`validation_file` should be a csv or a json file.\"\n self.task_name = self.task_name.lower()\n\n\ndef create_train_state(\n model: FlaxAutoModelForTokenClassification,\n learning_rate_fn: Callable[[int], float],\n num_labels: int,\n training_args: TrainingArguments,\n) -> train_state.TrainState:\n \"\"\"Create initial training state.\"\"\"\n\n class TrainState(train_state.TrainState):\n \"\"\"Train state with an Optax optimizer.\n\n The two functions below differ depending on whether the task is classification\n or regression.\n\n Args:\n logits_fn: Applied to last layer to obtain the logits.\n loss_fn: Function to compute the loss.\n \"\"\"\n\n logits_fn: Callable = struct.field(pytree_node=False)\n loss_fn: Callable = struct.field(pytree_node=False)\n\n # We use Optax's \"masking\" functionality to not apply weight decay\n # to bias and LayerNorm scale parameters. decay_mask_fn returns a\n # mask boolean with the same structure as the parameters.\n # The mask is True for parameters that should be decayed.\n # Note that this mask is specifically adapted for FlaxBERT-like models.\n # For other models, one should correct the layer norm parameter naming\n # accordingly.\n def decay_mask_fn(params):\n flat_params = traverse_util.flatten_dict(params)\n flat_mask = {path: (path[-1] != \"bias\" and path[-2:] != (\"LayerNorm\", \"scale\")) for path in flat_params}\n return traverse_util.unflatten_dict(flat_mask)\n\n tx = optax.adamw(\n learning_rate=learning_rate_fn,\n b1=training_args.adam_beta1,\n b2=training_args.adam_beta2,\n eps=training_args.adam_epsilon,\n weight_decay=training_args.weight_decay,\n mask=decay_mask_fn,\n )\n\n def cross_entropy_loss(logits, labels):\n xentropy = optax.softmax_cross_entropy(logits, onehot(labels, num_classes=num_labels))\n return jnp.mean(xentropy)\n\n return TrainState.create(\n apply_fn=model.__call__,\n params=model.params,\n tx=tx,\n logits_fn=lambda logits: logits.argmax(-1),\n loss_fn=cross_entropy_loss,\n )\n\n\ndef create_learning_rate_fn(\n train_ds_size: int, train_batch_size: int, num_train_epochs: int, num_warmup_steps: int, learning_rate: float\n) -> Callable[[int], jnp.array]:\n \"\"\"Returns a linear warmup, linear_decay learning rate function.\"\"\"\n steps_per_epoch = train_ds_size // train_batch_size\n num_train_steps = steps_per_epoch * num_train_epochs\n warmup_fn = optax.linear_schedule(init_value=0.0, end_value=learning_rate, transition_steps=num_warmup_steps)\n decay_fn = optax.linear_schedule(\n init_value=learning_rate, end_value=0, transition_steps=num_train_steps - num_warmup_steps\n )\n schedule_fn = optax.join_schedules(schedules=[warmup_fn, decay_fn], boundaries=[num_warmup_steps])\n return schedule_fn\n\n\ndef train_data_collator(rng: PRNGKey, dataset: Dataset, batch_size: int):\n \"\"\"Returns shuffled batches of size `batch_size` from truncated `train dataset`, sharded over all local devices.\"\"\"\n steps_per_epoch = len(dataset) // batch_size\n perms = jax.random.permutation(rng, len(dataset))\n perms = perms[: steps_per_epoch * batch_size] # Skip incomplete batch.\n perms = perms.reshape((steps_per_epoch, batch_size))\n\n for perm in perms:\n batch = dataset[perm]\n batch = {k: np.array(v) for k, v in batch.items()}\n batch = shard(batch)\n\n yield batch\n\n\ndef eval_data_collator(dataset: Dataset, batch_size: int):\n \"\"\"Returns batches of size `batch_size` from `eval dataset`, sharded over all local devices.\"\"\"\n for i in range(len(dataset) // batch_size):\n batch = dataset[i * batch_size : (i + 1) * batch_size]\n batch = {k: np.array(v) for k, v in batch.items()}\n batch = shard(batch)\n\n yield batch\n\n\ndef main():\n # See all possible arguments in src/transformers/training_args.py\n # or by passing the --help flag to this script.\n # We now keep distinct sets of args, for a cleaner separation of concerns.\n\n parser = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments))\n if len(sys.argv) == 2 and sys.argv[1].endswith(\".json\"):\n # If we pass only one argument to the script and it's the path to a json file,\n # let's parse it to get our arguments.\n model_args, data_args, training_args = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1]))\n else:\n model_args, data_args, training_args = parser.parse_args_into_dataclasses()\n\n # Make one log on every process with the configuration for debugging.\n logging.basicConfig(\n format=\"%(asctime)s - %(levelname)s - %(name)s - %(message)s\",\n datefmt=\"%m/%d/%Y %H:%M:%S\",\n level=logging.INFO,\n )\n # Setup logging, we only want one process per machine to log things on the screen.\n logger.setLevel(logging.INFO if jax.process_index() == 0 else logging.ERROR)\n if jax.process_index() == 0:\n datasets.utils.logging.set_verbosity_warning()\n transformers.utils.logging.set_verbosity_info()\n else:\n datasets.utils.logging.set_verbosity_error()\n transformers.utils.logging.set_verbosity_error()\n\n # Handle the repository creation\n if training_args.push_to_hub:\n if training_args.hub_model_id is None:\n repo_name = get_full_repo_name(\n Path(training_args.output_dir).absolute().name, token=training_args.hub_token\n )\n else:\n repo_name = training_args.hub_model_id\n repo = Repository(training_args.output_dir, clone_from=repo_name)\n\n # Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)\n # or just provide the name of one of the public datasets for token classification task available on the hub at https://huggingface.co/datasets/\n # (the dataset will be downloaded automatically from the datasets Hub).\n #\n # For CSV/JSON files, this script will use the column called 'tokens' or the first column if no column called\n # 'tokens' is found. You can easily tweak this behavior (see below).\n #\n # In distributed training, the load_dataset function guarantee that only one local process can concurrently\n # download the dataset.\n if data_args.dataset_name is not None:\n # Downloading and loading a dataset from the hub.\n raw_datasets = load_dataset(\n data_args.dataset_name,\n data_args.dataset_config_name,\n cache_dir=model_args.cache_dir,\n use_auth_token=True if model_args.use_auth_token else None,\n )\n else:\n # Loading the dataset from local csv or json file.\n data_files = {}\n if data_args.train_file is not None:\n data_files[\"train\"] = data_args.train_file\n if data_args.validation_file is not None:\n data_files[\"validation\"] = data_args.validation_file\n extension = (data_args.train_file if data_args.train_file is not None else data_args.valid_file).split(\".\")[-1]\n raw_datasets = load_dataset(\n extension,\n data_files=data_files,\n cache_dir=model_args.cache_dir,\n use_auth_token=True if model_args.use_auth_token else None,\n )\n # See more about loading any type of standard or custom dataset at\n # https://huggingface.co/docs/datasets/loading_datasets.html.\n\n if raw_datasets[\"train\"] is not None:\n column_names = raw_datasets[\"train\"].column_names\n features = raw_datasets[\"train\"].features\n else:\n column_names = raw_datasets[\"validation\"].column_names\n features = raw_datasets[\"validation\"].features\n\n if data_args.text_column_name is not None:\n text_column_name = data_args.text_column_name\n elif \"tokens\" in column_names:\n text_column_name = \"tokens\"\n else:\n text_column_name = column_names[0]\n\n if data_args.label_column_name is not None:\n label_column_name = data_args.label_column_name\n elif f\"{data_args.task_name}_tags\" in column_names:\n label_column_name = f\"{data_args.task_name}_tags\"\n else:\n label_column_name = column_names[1]\n\n # In the event the labels are not a `Sequence[ClassLabel]`, we will need to go through the dataset to get the\n # unique labels.\n def get_label_list(labels):\n unique_labels = set()\n for label in labels:\n unique_labels = unique_labels | set(label)\n label_list = list(unique_labels)\n label_list.sort()\n return label_list\n\n if isinstance(features[label_column_name].feature, ClassLabel):\n label_list = features[label_column_name].feature.names\n # No need to convert the labels since they are already ints.\n label_to_id = {i: i for i in range(len(label_list))}\n else:\n label_list = get_label_list(raw_datasets[\"train\"][label_column_name])\n label_to_id = {l: i for i, l in enumerate(label_list)}\n num_labels = len(label_list)\n\n # Load pretrained model and tokenizer\n config = AutoConfig.from_pretrained(\n model_args.config_name if model_args.config_name else model_args.model_name_or_path,\n num_labels=num_labels,\n label2id=label_to_id,\n id2label={i: l for l, i in label_to_id.items()},\n finetuning_task=data_args.task_name,\n cache_dir=model_args.cache_dir,\n revision=model_args.model_revision,\n use_auth_token=True if model_args.use_auth_token else None,\n )\n tokenizer_name_or_path = model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path\n if config.model_type in {\"gpt2\", \"roberta\"}:\n tokenizer = AutoTokenizer.from_pretrained(\n tokenizer_name_or_path,\n cache_dir=model_args.cache_dir,\n revision=model_args.model_revision,\n use_auth_token=True if model_args.use_auth_token else None,\n add_prefix_space=True,\n )\n else:\n tokenizer = AutoTokenizer.from_pretrained(\n tokenizer_name_or_path,\n cache_dir=model_args.cache_dir,\n revision=model_args.model_revision,\n use_auth_token=True if model_args.use_auth_token else None,\n )\n model = FlaxAutoModelForTokenClassification.from_pretrained(\n model_args.model_name_or_path,\n config=config,\n cache_dir=model_args.cache_dir,\n revision=model_args.model_revision,\n use_auth_token=True if model_args.use_auth_token else None,\n )\n\n # Preprocessing the datasets\n # Tokenize all texts and align the labels with them.\n def tokenize_and_align_labels(examples):\n tokenized_inputs = tokenizer(\n examples[text_column_name],\n max_length=data_args.max_seq_length,\n padding=\"max_length\",\n truncation=True,\n # We use this argument because the texts in our dataset are lists of words (with a label for each word).\n is_split_into_words=True,\n )\n\n labels = []\n\n for i, label in enumerate(examples[label_column_name]):\n word_ids = tokenized_inputs.word_ids(batch_index=i)\n previous_word_idx = None\n label_ids = []\n for word_idx in word_ids:\n # Special tokens have a word id that is None. We set the label to -100 so they are automatically\n # ignored in the loss function.\n if word_idx is None:\n label_ids.append(-100)\n # We set the label for the first token of each word.\n elif word_idx != previous_word_idx:\n label_ids.append(label_to_id[label[word_idx]])\n # For the other tokens in a word, we set the label to either the current label or -100, depending on\n # the label_all_tokens flag.\n else:\n label_ids.append(label_to_id[label[word_idx]] if data_args.label_all_tokens else -100)\n previous_word_idx = word_idx\n\n labels.append(label_ids)\n tokenized_inputs[\"labels\"] = labels\n return tokenized_inputs\n\n processed_raw_datasets = raw_datasets.map(\n tokenize_and_align_labels,\n batched=True,\n num_proc=data_args.preprocessing_num_workers,\n load_from_cache_file=not data_args.overwrite_cache,\n remove_columns=raw_datasets[\"train\"].column_names,\n desc=\"Running tokenizer on dataset\",\n )\n\n train_dataset = processed_raw_datasets[\"train\"]\n eval_dataset = processed_raw_datasets[\"validation\"]\n\n # Log a few random samples from the training set:\n for index in random.sample(range(len(train_dataset)), 3):\n logger.info(f\"Sample {index} of the training set: {train_dataset[index]}.\")\n\n # Define a summary writer\n has_tensorboard = is_tensorboard_available()\n if has_tensorboard and jax.process_index() == 0:\n try:\n from flax.metrics.tensorboard import SummaryWriter\n\n summary_writer = SummaryWriter(training_args.output_dir)\n summary_writer.hparams({**training_args.to_dict(), **vars(model_args), **vars(data_args)})\n except ImportError as ie:\n has_tensorboard = False\n logger.warning(\n f\"Unable to display metrics through TensorBoard because some package are not installed: {ie}\"\n )\n else:\n logger.warning(\n \"Unable to display metrics through TensorBoard because the package is not installed: \"\n \"Please run pip install tensorboard to enable.\"\n )\n\n def write_train_metric(summary_writer, train_metrics, train_time, step):\n summary_writer.scalar(\"train_time\", train_time, step)\n\n train_metrics = get_metrics(train_metrics)\n for key, vals in train_metrics.items():\n tag = f\"train_{key}\"\n for i, val in enumerate(vals):\n summary_writer.scalar(tag, val, step - len(vals) + i + 1)\n\n def write_eval_metric(summary_writer, eval_metrics, step):\n for metric_name, value in eval_metrics.items():\n summary_writer.scalar(f\"eval_{metric_name}\", value, step)\n\n num_epochs = int(training_args.num_train_epochs)\n rng = jax.random.PRNGKey(training_args.seed)\n dropout_rngs = jax.random.split(rng, jax.local_device_count())\n\n train_batch_size = training_args.per_device_train_batch_size * jax.local_device_count()\n eval_batch_size = training_args.per_device_eval_batch_size * jax.local_device_count()\n\n learning_rate_fn = create_learning_rate_fn(\n len(train_dataset),\n train_batch_size,\n training_args.num_train_epochs,\n training_args.warmup_steps,\n training_args.learning_rate,\n )\n\n state = create_train_state(model, learning_rate_fn, num_labels=num_labels, training_args=training_args)\n\n # define step functions\n def train_step(\n state: train_state.TrainState, batch: Dict[str, Array], dropout_rng: PRNGKey\n ) -> Tuple[train_state.TrainState, float]:\n \"\"\"Trains model with an optimizer (both in `state`) on `batch`, returning a pair `(new_state, loss)`.\"\"\"\n dropout_rng, new_dropout_rng = jax.random.split(dropout_rng)\n targets = batch.pop(\"labels\")\n\n def loss_fn(params):\n logits = state.apply_fn(**batch, params=params, dropout_rng=dropout_rng, train=True)[0]\n loss = state.loss_fn(logits, targets)\n return loss\n\n grad_fn = jax.value_and_grad(loss_fn)\n loss, grad = grad_fn(state.params)\n grad = jax.lax.pmean(grad, \"batch\")\n new_state = state.apply_gradients(grads=grad)\n metrics = jax.lax.pmean({\"loss\": loss, \"learning_rate\": learning_rate_fn(state.step)}, axis_name=\"batch\")\n return new_state, metrics, new_dropout_rng\n\n p_train_step = jax.pmap(train_step, axis_name=\"batch\", donate_argnums=(0,))\n\n def eval_step(state, batch):\n logits = state.apply_fn(**batch, params=state.params, train=False)[0]\n return state.logits_fn(logits)\n\n p_eval_step = jax.pmap(eval_step, axis_name=\"batch\")\n\n metric = load_metric(\"seqeval\")\n\n def get_labels(y_pred, y_true):\n # Transform predictions and references tensos to numpy arrays\n\n # Remove ignored index (special tokens)\n true_predictions = [\n [label_list[p] for (p, l) in zip(pred, gold_label) if l != -100]\n for pred, gold_label in zip(y_pred, y_true)\n ]\n true_labels = [\n [label_list[l] for (p, l) in zip(pred, gold_label) if l != -100]\n for pred, gold_label in zip(y_pred, y_true)\n ]\n return true_predictions, true_labels\n\n def compute_metrics():\n results = metric.compute()\n if data_args.return_entity_level_metrics:\n # Unpack nested dictionaries\n final_results = {}\n for key, value in results.items():\n if isinstance(value, dict):\n for n, v in value.items():\n final_results[f\"{key}_{n}\"] = v\n else:\n final_results[key] = value\n return final_results\n else:\n return {\n \"precision\": results[\"overall_precision\"],\n \"recall\": results[\"overall_recall\"],\n \"f1\": results[\"overall_f1\"],\n \"accuracy\": results[\"overall_accuracy\"],\n }\n\n logger.info(f\"===== Starting training ({num_epochs} epochs) =====\")\n train_time = 0\n\n # make sure weights are replicated on each device\n state = replicate(state)\n\n train_time = 0\n step_per_epoch = len(train_dataset) // train_batch_size\n total_steps = step_per_epoch * num_epochs\n epochs = tqdm(range(num_epochs), desc=f\"Epoch ... (1/{num_epochs})\", position=0)\n for epoch in epochs:\n\n train_start = time.time()\n train_metrics = []\n\n # Create sampling rng\n rng, input_rng = jax.random.split(rng)\n\n # train\n for step, batch in enumerate(\n tqdm(\n train_data_collator(input_rng, train_dataset, train_batch_size),\n total=step_per_epoch,\n desc=\"Training...\",\n position=1,\n )\n ):\n state, train_metric, dropout_rngs = p_train_step(state, batch, dropout_rngs)\n train_metrics.append(train_metric)\n\n cur_step = (epoch * step_per_epoch) + (step + 1)\n\n if cur_step % training_args.logging_steps == 0 and cur_step > 0:\n # Save metrics\n train_metric = unreplicate(train_metric)\n train_time += time.time() - train_start\n if has_tensorboard and jax.process_index() == 0:\n write_train_metric(summary_writer, train_metrics, train_time, cur_step)\n\n epochs.write(\n f\"Step... ({cur_step}/{total_steps} | Training Loss: {train_metric['loss']}, Learning Rate:\"\n f\" {train_metric['learning_rate']})\"\n )\n\n train_metrics = []\n\n if cur_step % training_args.eval_steps == 0 and cur_step > 0:\n\n eval_metrics = {}\n # evaluate\n for batch in tqdm(\n eval_data_collator(eval_dataset, eval_batch_size),\n total=len(eval_dataset) // eval_batch_size,\n desc=\"Evaluating ...\",\n position=2,\n ):\n labels = batch.pop(\"labels\")\n predictions = p_eval_step(state, batch)\n predictions = np.array([pred for pred in chain(*predictions)])\n labels = np.array([label for label in chain(*labels)])\n labels[np.array(chain(*batch[\"attention_mask\"])) == 0] = -100\n preds, refs = get_labels(predictions, labels)\n metric.add_batch(\n predictions=preds,\n references=refs,\n )\n\n # evaluate also on leftover examples (not divisible by batch_size)\n num_leftover_samples = len(eval_dataset) % eval_batch_size\n\n # make sure leftover batch is evaluated on one device\n if num_leftover_samples > 0 and jax.process_index() == 0:\n # take leftover samples\n batch = eval_dataset[-num_leftover_samples:]\n batch = {k: np.array(v) for k, v in batch.items()}\n\n labels = batch.pop(\"labels\")\n predictions = eval_step(unreplicate(state), batch)\n labels = np.array(labels)\n labels[np.array(batch[\"attention_mask\"]) == 0] = -100\n preds, refs = get_labels(predictions, labels)\n metric.add_batch(\n predictions=preds,\n references=refs,\n )\n\n eval_metrics = compute_metrics()\n\n if data_args.return_entity_level_metrics:\n logger.info(f\"Step... ({cur_step}/{total_steps} | Validation metrics: {eval_metrics}\")\n else:\n logger.info(\n f\"Step... ({cur_step}/{total_steps} | Validation f1: {eval_metrics['f1']}, Validation Acc:\"\n f\" {eval_metrics['accuracy']})\"\n )\n\n if has_tensorboard and jax.process_index() == 0:\n write_eval_metric(summary_writer, eval_metrics, cur_step)\n\n if (cur_step % training_args.save_steps == 0 and cur_step > 0) or (cur_step == total_steps):\n # save checkpoint after each epoch and push checkpoint to the hub\n if jax.process_index() == 0:\n params = jax.device_get(unreplicate(state.params))\n model.save_pretrained(training_args.output_dir, params=params)\n tokenizer.save_pretrained(training_args.output_dir)\n if training_args.push_to_hub:\n repo.push_to_hub(commit_message=f\"Saving weights and logs of step {cur_step}\", blocking=False)\n epochs.desc = f\"Epoch ... {epoch + 1}/{num_epochs}\"\n\n # Eval after training\n if training_args.do_eval:\n eval_metrics = {}\n eval_loader = eval_data_collator(eval_dataset, eval_batch_size)\n for batch in tqdm(eval_loader, total=len(eval_dataset) // eval_batch_size, desc=\"Evaluating ...\", position=2):\n labels = batch.pop(\"labels\")\n predictions = p_eval_step(state, batch)\n predictions = np.array([pred for pred in chain(*predictions)])\n labels = np.array([label for label in chain(*labels)])\n labels[np.array(chain(*batch[\"attention_mask\"])) == 0] = -100\n preds, refs = get_labels(predictions, labels)\n metric.add_batch(predictions=preds, references=refs)\n\n # evaluate also on leftover examples (not divisible by batch_size)\n num_leftover_samples = len(eval_dataset) % eval_batch_size\n\n # make sure leftover batch is evaluated on one device\n if num_leftover_samples > 0 and jax.process_index() == 0:\n # take leftover samples\n batch = eval_dataset[-num_leftover_samples:]\n batch = {k: np.array(v) for k, v in batch.items()}\n\n labels = np.array(batch.pop(\"labels\"))\n predictions = eval_step(unreplicate(state), batch)\n labels[np.array(batch[\"attention_mask\"]) == 0] = -100\n preds, refs = get_labels(predictions, labels)\n metric.add_batch(predictions=preds, references=refs)\n\n eval_metrics = compute_metrics()\n\n if jax.process_index() == 0:\n eval_metrics = {f\"eval_{metric_name}\": value for metric_name, value in eval_metrics.items()}\n path = os.path.join(training_args.output_dir, \"eval_results.json\")\n with open(path, \"w\") as f:\n json.dump(eval_metrics, f, indent=4, sort_keys=True)\n\n\nif __name__ == \"__main__\":\n main()\n",
"#!/usr/bin/env python3\n# Copyright 2018 CMU and The HuggingFace Inc. team.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\" Bertology: this script shows how you can explore the internals of the models in the library to:\n - compute the entropy of the head attentions\n - compute the importance of each head\n - prune (remove) the low importance head.\n Some parts of this script are adapted from the code of Michel et al. (http://arxiv.org/abs/1905.10650)\n which is available at https://github.com/pmichel31415/are-16-heads-really-better-than-1\n\"\"\"\nimport argparse\nimport logging\nimport os\nfrom datetime import datetime\n\nimport numpy as np\nimport torch\nfrom torch import nn\nfrom torch.utils.data import DataLoader, SequentialSampler, Subset\nfrom torch.utils.data.distributed import DistributedSampler\nfrom tqdm import tqdm\n\nimport transformers\nfrom transformers import (\n AutoConfig,\n AutoModelForSequenceClassification,\n AutoTokenizer,\n GlueDataset,\n default_data_collator,\n glue_compute_metrics,\n glue_output_modes,\n glue_processors,\n set_seed,\n)\nfrom transformers.trainer_utils import is_main_process\n\n\nlogger = logging.getLogger(__name__)\n\n\ndef entropy(p):\n \"\"\"Compute the entropy of a probability distribution\"\"\"\n plogp = p * torch.log(p)\n plogp[p == 0] = 0\n return -plogp.sum(dim=-1)\n\n\ndef print_2d_tensor(tensor):\n \"\"\"Print a 2D tensor\"\"\"\n logger.info(\"lv, h >\\t\" + \"\\t\".join(f\"{x + 1}\" for x in range(len(tensor))))\n for row in range(len(tensor)):\n if tensor.dtype != torch.long:\n logger.info(f\"layer {row + 1}:\\t\" + \"\\t\".join(f\"{x:.5f}\" for x in tensor[row].cpu().data))\n else:\n logger.info(f\"layer {row + 1}:\\t\" + \"\\t\".join(f\"{x:d}\" for x in tensor[row].cpu().data))\n\n\ndef compute_heads_importance(\n args, model, eval_dataloader, compute_entropy=True, compute_importance=True, head_mask=None, actually_pruned=False\n):\n \"\"\"This method shows how to compute:\n - head attention entropy\n - head importance scores according to http://arxiv.org/abs/1905.10650\n \"\"\"\n # Prepare our tensors\n n_layers, n_heads = model.config.num_hidden_layers, model.config.num_attention_heads\n head_importance = torch.zeros(n_layers, n_heads).to(args.device)\n attn_entropy = torch.zeros(n_layers, n_heads).to(args.device)\n\n if head_mask is None:\n head_mask = torch.ones(n_layers, n_heads).to(args.device)\n\n head_mask.requires_grad_(requires_grad=True)\n # If actually pruned attention multi-head, set head mask to None to avoid shape mismatch\n if actually_pruned:\n head_mask = None\n\n preds = None\n labels = None\n tot_tokens = 0.0\n\n for step, inputs in enumerate(tqdm(eval_dataloader, desc=\"Iteration\", disable=args.local_rank not in [-1, 0])):\n for k, v in inputs.items():\n inputs[k] = v.to(args.device)\n\n # Do a forward pass (not with torch.no_grad() since we need gradients for importance score - see below)\n outputs = model(**inputs, head_mask=head_mask)\n loss, logits, all_attentions = (\n outputs[0],\n outputs[1],\n outputs[-1],\n ) # Loss and logits are the first, attention the last\n loss.backward() # Backpropagate to populate the gradients in the head mask\n\n if compute_entropy:\n for layer, attn in enumerate(all_attentions):\n masked_entropy = entropy(attn.detach()) * inputs[\"attention_mask\"].float().unsqueeze(1)\n attn_entropy[layer] += masked_entropy.sum(-1).sum(0).detach()\n\n if compute_importance:\n head_importance += head_mask.grad.abs().detach()\n\n # Also store our logits/labels if we want to compute metrics afterwards\n if preds is None:\n preds = logits.detach().cpu().numpy()\n labels = inputs[\"labels\"].detach().cpu().numpy()\n else:\n preds = np.append(preds, logits.detach().cpu().numpy(), axis=0)\n labels = np.append(labels, inputs[\"labels\"].detach().cpu().numpy(), axis=0)\n\n tot_tokens += inputs[\"attention_mask\"].float().detach().sum().data\n\n # Normalize\n attn_entropy /= tot_tokens\n head_importance /= tot_tokens\n # Layerwise importance normalization\n if not args.dont_normalize_importance_by_layer:\n exponent = 2\n norm_by_layer = torch.pow(torch.pow(head_importance, exponent).sum(-1), 1 / exponent)\n head_importance /= norm_by_layer.unsqueeze(-1) + 1e-20\n\n if not args.dont_normalize_global_importance:\n head_importance = (head_importance - head_importance.min()) / (head_importance.max() - head_importance.min())\n\n # Print/save matrices\n np.save(os.path.join(args.output_dir, \"attn_entropy.npy\"), attn_entropy.detach().cpu().numpy())\n np.save(os.path.join(args.output_dir, \"head_importance.npy\"), head_importance.detach().cpu().numpy())\n\n logger.info(\"Attention entropies\")\n print_2d_tensor(attn_entropy)\n logger.info(\"Head importance scores\")\n print_2d_tensor(head_importance)\n logger.info(\"Head ranked by importance scores\")\n head_ranks = torch.zeros(head_importance.numel(), dtype=torch.long, device=args.device)\n head_ranks[head_importance.view(-1).sort(descending=True)[1]] = torch.arange(\n head_importance.numel(), device=args.device\n )\n head_ranks = head_ranks.view_as(head_importance)\n print_2d_tensor(head_ranks)\n\n return attn_entropy, head_importance, preds, labels\n\n\ndef mask_heads(args, model, eval_dataloader):\n \"\"\"This method shows how to mask head (set some heads to zero), to test the effect on the network,\n based on the head importance scores, as described in Michel et al. (http://arxiv.org/abs/1905.10650)\n \"\"\"\n _, head_importance, preds, labels = compute_heads_importance(args, model, eval_dataloader, compute_entropy=False)\n preds = np.argmax(preds, axis=1) if args.output_mode == \"classification\" else np.squeeze(preds)\n original_score = glue_compute_metrics(args.task_name, preds, labels)[args.metric_name]\n logger.info(\"Pruning: original score: %f, threshold: %f\", original_score, original_score * args.masking_threshold)\n\n new_head_mask = torch.ones_like(head_importance)\n num_to_mask = max(1, int(new_head_mask.numel() * args.masking_amount))\n\n current_score = original_score\n while current_score >= original_score * args.masking_threshold:\n head_mask = new_head_mask.clone() # save current head mask\n # heads from least important to most - keep only not-masked heads\n head_importance[head_mask == 0.0] = float(\"Inf\")\n current_heads_to_mask = head_importance.view(-1).sort()[1]\n\n if len(current_heads_to_mask) <= num_to_mask:\n break\n\n # mask heads\n current_heads_to_mask = current_heads_to_mask[:num_to_mask]\n logger.info(\"Heads to mask: %s\", str(current_heads_to_mask.tolist()))\n new_head_mask = new_head_mask.view(-1)\n new_head_mask[current_heads_to_mask] = 0.0\n new_head_mask = new_head_mask.view_as(head_mask)\n new_head_mask = new_head_mask.clone().detach()\n print_2d_tensor(new_head_mask)\n\n # Compute metric and head importance again\n _, head_importance, preds, labels = compute_heads_importance(\n args, model, eval_dataloader, compute_entropy=False, head_mask=new_head_mask\n )\n preds = np.argmax(preds, axis=1) if args.output_mode == \"classification\" else np.squeeze(preds)\n current_score = glue_compute_metrics(args.task_name, preds, labels)[args.metric_name]\n logger.info(\n \"Masking: current score: %f, remaining heads %d (%.1f percents)\",\n current_score,\n new_head_mask.sum(),\n new_head_mask.sum() / new_head_mask.numel() * 100,\n )\n\n logger.info(\"Final head mask\")\n print_2d_tensor(head_mask)\n np.save(os.path.join(args.output_dir, \"head_mask.npy\"), head_mask.detach().cpu().numpy())\n\n return head_mask\n\n\ndef prune_heads(args, model, eval_dataloader, head_mask):\n \"\"\"This method shows how to prune head (remove heads weights) based on\n the head importance scores as described in Michel et al. (http://arxiv.org/abs/1905.10650)\n \"\"\"\n # Try pruning and test time speedup\n # Pruning is like masking but we actually remove the masked weights\n before_time = datetime.now()\n _, _, preds, labels = compute_heads_importance(\n args, model, eval_dataloader, compute_entropy=False, compute_importance=False, head_mask=head_mask\n )\n preds = np.argmax(preds, axis=1) if args.output_mode == \"classification\" else np.squeeze(preds)\n score_masking = glue_compute_metrics(args.task_name, preds, labels)[args.metric_name]\n original_time = datetime.now() - before_time\n\n original_num_params = sum(p.numel() for p in model.parameters())\n heads_to_prune = dict(\n (layer, (1 - head_mask[layer].long()).nonzero().squeeze().tolist()) for layer in range(len(head_mask))\n )\n\n assert sum(len(h) for h in heads_to_prune.values()) == (1 - head_mask.long()).sum().item()\n model.prune_heads(heads_to_prune)\n pruned_num_params = sum(p.numel() for p in model.parameters())\n\n before_time = datetime.now()\n _, _, preds, labels = compute_heads_importance(\n args,\n model,\n eval_dataloader,\n compute_entropy=False,\n compute_importance=False,\n head_mask=None,\n actually_pruned=True,\n )\n preds = np.argmax(preds, axis=1) if args.output_mode == \"classification\" else np.squeeze(preds)\n score_pruning = glue_compute_metrics(args.task_name, preds, labels)[args.metric_name]\n new_time = datetime.now() - before_time\n\n logger.info(\n \"Pruning: original num of params: %.2e, after pruning %.2e (%.1f percents)\",\n original_num_params,\n pruned_num_params,\n pruned_num_params / original_num_params * 100,\n )\n logger.info(\"Pruning: score with masking: %f score with pruning: %f\", score_masking, score_pruning)\n logger.info(\"Pruning: speed ratio (new timing / original timing): %f percents\", original_time / new_time * 100)\n\n\ndef main():\n parser = argparse.ArgumentParser()\n # Required parameters\n parser.add_argument(\n \"--data_dir\",\n default=None,\n type=str,\n required=True,\n help=\"The input data dir. Should contain the .tsv files (or other data files) for the task.\",\n )\n parser.add_argument(\n \"--model_name_or_path\",\n default=None,\n type=str,\n required=True,\n help=\"Path to pretrained model or model identifier from huggingface.co/models\",\n )\n parser.add_argument(\n \"--task_name\",\n default=None,\n type=str,\n required=True,\n help=\"The name of the task to train selected in the list: \" + \", \".join(glue_processors.keys()),\n )\n parser.add_argument(\n \"--output_dir\",\n default=None,\n type=str,\n required=True,\n help=\"The output directory where the model predictions and checkpoints will be written.\",\n )\n\n # Other parameters\n parser.add_argument(\n \"--config_name\",\n default=\"\",\n type=str,\n help=\"Pretrained config name or path if not the same as model_name_or_path\",\n )\n parser.add_argument(\n \"--tokenizer_name\",\n default=\"\",\n type=str,\n help=\"Pretrained tokenizer name or path if not the same as model_name_or_path\",\n )\n parser.add_argument(\n \"--cache_dir\",\n default=None,\n type=str,\n help=\"Where do you want to store the pre-trained models downloaded from huggingface.co\",\n )\n parser.add_argument(\n \"--data_subset\", type=int, default=-1, help=\"If > 0: limit the data to a subset of data_subset instances.\"\n )\n parser.add_argument(\n \"--overwrite_output_dir\", action=\"store_true\", help=\"Whether to overwrite data in output directory\"\n )\n parser.add_argument(\n \"--overwrite_cache\", action=\"store_true\", help=\"Overwrite the cached training and evaluation sets\"\n )\n\n parser.add_argument(\n \"--dont_normalize_importance_by_layer\", action=\"store_true\", help=\"Don't normalize importance score by layers\"\n )\n parser.add_argument(\n \"--dont_normalize_global_importance\",\n action=\"store_true\",\n help=\"Don't normalize all importance scores between 0 and 1\",\n )\n\n parser.add_argument(\n \"--try_masking\", action=\"store_true\", help=\"Whether to try to mask head until a threshold of accuracy.\"\n )\n parser.add_argument(\n \"--masking_threshold\",\n default=0.9,\n type=float,\n help=\"masking threshold in term of metrics (stop masking when metric < threshold * original metric value).\",\n )\n parser.add_argument(\n \"--masking_amount\", default=0.1, type=float, help=\"Amount to heads to masking at each masking step.\"\n )\n parser.add_argument(\"--metric_name\", default=\"acc\", type=str, help=\"Metric to use for head masking.\")\n\n parser.add_argument(\n \"--max_seq_length\",\n default=128,\n type=int,\n help=(\n \"The maximum total input sequence length after WordPiece tokenization. \\n\"\n \"Sequences longer than this will be truncated, sequences shorter padded.\"\n ),\n )\n parser.add_argument(\"--batch_size\", default=1, type=int, help=\"Batch size.\")\n\n parser.add_argument(\"--seed\", type=int, default=42)\n parser.add_argument(\"--local_rank\", type=int, default=-1, help=\"local_rank for distributed training on gpus\")\n parser.add_argument(\"--no_cuda\", action=\"store_true\", help=\"Whether not to use CUDA when available\")\n parser.add_argument(\"--server_ip\", type=str, default=\"\", help=\"Can be used for distant debugging.\")\n parser.add_argument(\"--server_port\", type=str, default=\"\", help=\"Can be used for distant debugging.\")\n args = parser.parse_args()\n\n if args.server_ip and args.server_port:\n # Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script\n import ptvsd\n\n print(\"Waiting for debugger attach\")\n ptvsd.enable_attach(address=(args.server_ip, args.server_port), redirect_output=True)\n ptvsd.wait_for_attach()\n\n # Setup devices and distributed training\n if args.local_rank == -1 or args.no_cuda:\n args.device = torch.device(\"cuda\" if torch.cuda.is_available() and not args.no_cuda else \"cpu\")\n args.n_gpu = 0 if args.no_cuda else torch.cuda.device_count()\n else:\n torch.cuda.set_device(args.local_rank)\n args.device = torch.device(\"cuda\", args.local_rank)\n args.n_gpu = 1\n torch.distributed.init_process_group(backend=\"nccl\") # Initializes the distributed backend\n\n # Setup logging\n logging.basicConfig(level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN)\n logger.info(\"device: {} n_gpu: {}, distributed: {}\".format(args.device, args.n_gpu, bool(args.local_rank != -1)))\n # Set the verbosity to info of the Transformers logger (on main process only):\n if is_main_process(args.local_rank):\n transformers.utils.logging.set_verbosity_info()\n transformers.utils.logging.enable_default_handler()\n transformers.utils.logging.enable_explicit_format()\n\n # Set seeds\n set_seed(args.seed)\n\n # Prepare GLUE task\n args.task_name = args.task_name.lower()\n if args.task_name not in glue_processors:\n raise ValueError(\"Task not found: %s\" % (args.task_name))\n processor = glue_processors[args.task_name]()\n args.output_mode = glue_output_modes[args.task_name]\n label_list = processor.get_labels()\n num_labels = len(label_list)\n\n # Load pretrained model and tokenizer\n #\n # Distributed training:\n # The .from_pretrained methods guarantee that only one local process can concurrently\n # download model & vocab.\n\n config = AutoConfig.from_pretrained(\n args.config_name if args.config_name else args.model_name_or_path,\n num_labels=num_labels,\n finetuning_task=args.task_name,\n output_attentions=True,\n cache_dir=args.cache_dir,\n )\n tokenizer = AutoTokenizer.from_pretrained(\n args.tokenizer_name if args.tokenizer_name else args.model_name_or_path,\n cache_dir=args.cache_dir,\n )\n model = AutoModelForSequenceClassification.from_pretrained(\n args.model_name_or_path,\n from_tf=bool(\".ckpt\" in args.model_name_or_path),\n config=config,\n cache_dir=args.cache_dir,\n )\n\n # Distributed and parallel training\n model.to(args.device)\n if args.local_rank != -1:\n model = nn.parallel.DistributedDataParallel(\n model, device_ids=[args.local_rank], output_device=args.local_rank, find_unused_parameters=True\n )\n elif args.n_gpu > 1:\n model = nn.DataParallel(model)\n\n # Print/save training arguments\n os.makedirs(args.output_dir, exist_ok=True)\n torch.save(args, os.path.join(args.output_dir, \"run_args.bin\"))\n logger.info(\"Training/evaluation parameters %s\", args)\n\n # Prepare dataset for the GLUE task\n eval_dataset = GlueDataset(args, tokenizer=tokenizer, mode=\"dev\")\n if args.data_subset > 0:\n eval_dataset = Subset(eval_dataset, list(range(min(args.data_subset, len(eval_dataset)))))\n eval_sampler = SequentialSampler(eval_dataset) if args.local_rank == -1 else DistributedSampler(eval_dataset)\n eval_dataloader = DataLoader(\n eval_dataset, sampler=eval_sampler, batch_size=args.batch_size, collate_fn=default_data_collator\n )\n\n # Compute head entropy and importance score\n compute_heads_importance(args, model, eval_dataloader)\n\n # Try head masking (set heads to zero until the score goes under a threshole)\n # and head pruning (remove masked heads and see the effect on the network)\n if args.try_masking and args.masking_threshold > 0.0 and args.masking_threshold < 1.0:\n head_mask = mask_heads(args, model, eval_dataloader)\n prune_heads(args, model, eval_dataloader, head_mask)\n\n\nif __name__ == \"__main__\":\n main()\n",
"# coding=utf-8\n# Copyright 2018 Google T5 Authors and HuggingFace Inc. team.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\nimport copy\nimport tempfile\nimport unittest\n\nfrom transformers import T5Config, is_torch_available\nfrom transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device\nfrom transformers.utils import cached_property\n\nfrom ...generation.test_generation_utils import GenerationTesterMixin\nfrom ...test_configuration_common import ConfigTester\nfrom ...test_modeling_common import ModelTesterMixin, ids_tensor\n\n\nif is_torch_available():\n import torch\n\n from transformers import ByT5Tokenizer, T5EncoderModel, T5ForConditionalGeneration, T5Model, T5Tokenizer\n from transformers.models.t5.modeling_t5 import T5_PRETRAINED_MODEL_ARCHIVE_LIST\n\n\nclass T5ModelTester:\n def __init__(\n self,\n parent,\n vocab_size=99,\n batch_size=13,\n encoder_seq_length=7,\n decoder_seq_length=9,\n # For common tests\n is_training=True,\n use_attention_mask=True,\n use_labels=True,\n hidden_size=32,\n num_hidden_layers=5,\n num_attention_heads=4,\n d_ff=37,\n relative_attention_num_buckets=8,\n dropout_rate=0.1,\n initializer_factor=0.002,\n eos_token_id=1,\n pad_token_id=0,\n decoder_start_token_id=0,\n scope=None,\n decoder_layers=None,\n ):\n\n self.parent = parent\n self.batch_size = batch_size\n self.encoder_seq_length = encoder_seq_length\n self.decoder_seq_length = decoder_seq_length\n # For common tests\n self.seq_length = self.decoder_seq_length\n self.is_training = is_training\n self.use_attention_mask = use_attention_mask\n self.use_labels = use_labels\n self.vocab_size = vocab_size\n self.hidden_size = hidden_size\n self.num_hidden_layers = num_hidden_layers\n self.num_attention_heads = num_attention_heads\n self.d_ff = d_ff\n self.relative_attention_num_buckets = relative_attention_num_buckets\n self.dropout_rate = dropout_rate\n self.initializer_factor = initializer_factor\n self.eos_token_id = eos_token_id\n self.pad_token_id = pad_token_id\n self.decoder_start_token_id = decoder_start_token_id\n self.scope = None\n self.decoder_layers = decoder_layers\n\n def get_large_model_config(self):\n return T5Config.from_pretrained(\"t5-base\")\n\n def prepare_config_and_inputs(self):\n input_ids = ids_tensor([self.batch_size, self.encoder_seq_length], self.vocab_size)\n decoder_input_ids = ids_tensor([self.batch_size, self.decoder_seq_length], self.vocab_size)\n\n attention_mask = None\n decoder_attention_mask = None\n if self.use_attention_mask:\n attention_mask = ids_tensor([self.batch_size, self.encoder_seq_length], vocab_size=2)\n decoder_attention_mask = ids_tensor([self.batch_size, self.decoder_seq_length], vocab_size=2)\n\n lm_labels = None\n if self.use_labels:\n lm_labels = ids_tensor([self.batch_size, self.decoder_seq_length], self.vocab_size)\n\n config = self.get_config()\n\n return (\n config,\n input_ids,\n decoder_input_ids,\n attention_mask,\n decoder_attention_mask,\n lm_labels,\n )\n\n def get_pipeline_config(self):\n return T5Config(\n vocab_size=166, # t5 forces 100 extra tokens\n d_model=self.hidden_size,\n d_ff=self.d_ff,\n d_kv=self.hidden_size // self.num_attention_heads,\n num_layers=self.num_hidden_layers,\n num_decoder_layers=self.decoder_layers,\n num_heads=self.num_attention_heads,\n relative_attention_num_buckets=self.relative_attention_num_buckets,\n dropout_rate=self.dropout_rate,\n initializer_factor=self.initializer_factor,\n eos_token_id=self.eos_token_id,\n bos_token_id=self.pad_token_id,\n pad_token_id=self.pad_token_id,\n decoder_start_token_id=self.decoder_start_token_id,\n )\n\n def get_config(self):\n return T5Config(\n vocab_size=self.vocab_size,\n d_model=self.hidden_size,\n d_ff=self.d_ff,\n d_kv=self.hidden_size // self.num_attention_heads,\n num_layers=self.num_hidden_layers,\n num_decoder_layers=self.decoder_layers,\n num_heads=self.num_attention_heads,\n relative_attention_num_buckets=self.relative_attention_num_buckets,\n dropout_rate=self.dropout_rate,\n initializer_factor=self.initializer_factor,\n eos_token_id=self.eos_token_id,\n bos_token_id=self.pad_token_id,\n pad_token_id=self.pad_token_id,\n decoder_start_token_id=self.decoder_start_token_id,\n )\n\n def check_prepare_lm_labels_via_shift_left(\n self,\n config,\n input_ids,\n decoder_input_ids,\n attention_mask,\n decoder_attention_mask,\n lm_labels,\n ):\n model = T5Model(config=config)\n model.to(torch_device)\n model.eval()\n\n # make sure that lm_labels are correctly padded from the right\n lm_labels.masked_fill_((lm_labels == self.decoder_start_token_id), self.eos_token_id)\n\n # add casaul pad token mask\n triangular_mask = torch.tril(lm_labels.new_ones(lm_labels.shape)).logical_not()\n lm_labels.masked_fill_(triangular_mask, self.pad_token_id)\n decoder_input_ids = model._shift_right(lm_labels)\n\n for i, (decoder_input_ids_slice, lm_labels_slice) in enumerate(zip(decoder_input_ids, lm_labels)):\n # first item\n self.parent.assertEqual(decoder_input_ids_slice[0].item(), self.decoder_start_token_id)\n if i < decoder_input_ids_slice.shape[-1]:\n if i < decoder_input_ids.shape[-1] - 1:\n # items before diagonal\n self.parent.assertListEqual(\n decoder_input_ids_slice[1 : i + 1].tolist(), lm_labels_slice[:i].tolist()\n )\n # pad items after diagonal\n if i < decoder_input_ids.shape[-1] - 2:\n self.parent.assertListEqual(\n decoder_input_ids_slice[i + 2 :].tolist(), lm_labels_slice[i + 1 : -1].tolist()\n )\n else:\n # all items after square\n self.parent.assertListEqual(decoder_input_ids_slice[1:].tolist(), lm_labels_slice[:-1].tolist())\n\n def create_and_check_model(\n self,\n config,\n input_ids,\n decoder_input_ids,\n attention_mask,\n decoder_attention_mask,\n lm_labels,\n ):\n model = T5Model(config=config)\n model.to(torch_device)\n model.eval()\n result = model(\n input_ids=input_ids,\n decoder_input_ids=decoder_input_ids,\n attention_mask=attention_mask,\n decoder_attention_mask=decoder_attention_mask,\n )\n result = model(input_ids=input_ids, decoder_input_ids=decoder_input_ids)\n decoder_output = result.last_hidden_state\n decoder_past = result.past_key_values\n encoder_output = result.encoder_last_hidden_state\n\n self.parent.assertEqual(encoder_output.size(), (self.batch_size, self.encoder_seq_length, self.hidden_size))\n self.parent.assertEqual(decoder_output.size(), (self.batch_size, self.decoder_seq_length, self.hidden_size))\n # There should be `num_layers` key value embeddings stored in decoder_past\n self.parent.assertEqual(len(decoder_past), config.num_layers)\n # There should be a self attn key, a self attn value, a cross attn key and a cross attn value stored in each decoder_past tuple\n self.parent.assertEqual(len(decoder_past[0]), 4)\n\n def create_and_check_with_lm_head(\n self,\n config,\n input_ids,\n decoder_input_ids,\n attention_mask,\n decoder_attention_mask,\n lm_labels,\n ):\n model = T5ForConditionalGeneration(config=config).to(torch_device).eval()\n outputs = model(\n input_ids=input_ids,\n decoder_input_ids=decoder_input_ids,\n decoder_attention_mask=decoder_attention_mask,\n labels=lm_labels,\n )\n self.parent.assertEqual(len(outputs), 4)\n self.parent.assertEqual(outputs[\"logits\"].size(), (self.batch_size, self.decoder_seq_length, self.vocab_size))\n self.parent.assertEqual(outputs[\"loss\"].size(), ())\n\n def create_and_check_decoder_model_past(\n self,\n config,\n input_ids,\n decoder_input_ids,\n attention_mask,\n decoder_attention_mask,\n lm_labels,\n ):\n model = T5Model(config=config).get_decoder().to(torch_device).eval()\n # first forward pass\n outputs = model(input_ids, use_cache=True)\n outputs_use_cache_conf = model(input_ids)\n outputs_no_past = model(input_ids, use_cache=False)\n\n self.parent.assertTrue(len(outputs) == len(outputs_use_cache_conf))\n self.parent.assertTrue(len(outputs) == len(outputs_no_past) + 1)\n\n output, past_key_values = outputs.to_tuple()\n\n # create hypothetical next token and extent to next_input_ids\n next_tokens = ids_tensor((self.batch_size, 1), config.vocab_size)\n\n # append to next input_ids and\n next_input_ids = torch.cat([input_ids, next_tokens], dim=-1)\n\n output_from_no_past = model(next_input_ids)[\"last_hidden_state\"]\n output_from_past = model(next_tokens, past_key_values=past_key_values)[\"last_hidden_state\"]\n\n # select random slice\n random_slice_idx = ids_tensor((1,), output_from_past.shape[-1]).item()\n output_from_no_past_slice = output_from_no_past[:, -1, random_slice_idx].detach()\n output_from_past_slice = output_from_past[:, 0, random_slice_idx].detach()\n\n # test that outputs are equal for slice\n self.parent.assertTrue(torch.allclose(output_from_past_slice, output_from_no_past_slice, atol=1e-3))\n\n def create_and_check_decoder_model_attention_mask_past(\n self,\n config,\n input_ids,\n decoder_input_ids,\n attention_mask,\n decoder_attention_mask,\n lm_labels,\n ):\n model = T5Model(config=config).get_decoder()\n model.to(torch_device)\n model.eval()\n\n # create attention mask\n attn_mask = torch.ones(input_ids.shape, dtype=torch.long, device=torch_device)\n\n half_seq_length = input_ids.shape[-1] // 2\n attn_mask[:, half_seq_length:] = 0\n\n # first forward pass\n output, past_key_values = model(input_ids, attention_mask=attn_mask, use_cache=True).to_tuple()\n\n # create hypothetical next token and extent to next_input_ids\n next_tokens = ids_tensor((self.batch_size, 1), config.vocab_size)\n\n # change a random masked slice from input_ids\n random_seq_idx_to_change = ids_tensor((1,), half_seq_length).item() + 1\n random_other_next_tokens = ids_tensor((self.batch_size, 1), config.vocab_size).squeeze(-1)\n input_ids[:, -random_seq_idx_to_change] = random_other_next_tokens\n\n # append to next input_ids and attn_mask\n next_input_ids = torch.cat([input_ids, next_tokens], dim=-1)\n attn_mask = torch.cat(\n [attn_mask, torch.ones((attn_mask.shape[0], 1), dtype=torch.long, device=torch_device)],\n dim=1,\n )\n\n # get two different outputs\n output_from_no_past = model(next_input_ids, attention_mask=attn_mask)[\"last_hidden_state\"]\n output_from_past = model(next_tokens, past_key_values=past_key_values, attention_mask=attn_mask)[\n \"last_hidden_state\"\n ]\n\n # select random slice\n random_slice_idx = ids_tensor((1,), output_from_past.shape[-1]).item()\n output_from_no_past_slice = output_from_no_past[:, -1, random_slice_idx].detach()\n output_from_past_slice = output_from_past[:, 0, random_slice_idx].detach()\n\n # test that outputs are equal for slice\n self.parent.assertTrue(torch.allclose(output_from_past_slice, output_from_no_past_slice, atol=1e-3))\n\n def create_and_check_decoder_model_past_large_inputs(\n self,\n config,\n input_ids,\n decoder_input_ids,\n attention_mask,\n decoder_attention_mask,\n lm_labels,\n ):\n model = T5Model(config=config).get_decoder().to(torch_device).eval()\n # first forward pass\n outputs = model(input_ids, attention_mask=attention_mask, use_cache=True)\n\n output, past_key_values = outputs.to_tuple()\n\n # create hypothetical multiple next token and extent to next_input_ids\n next_tokens = ids_tensor((self.batch_size, 3), config.vocab_size)\n next_mask = ids_tensor((self.batch_size, 3), vocab_size=2)\n\n # append to next input_ids and\n next_input_ids = torch.cat([input_ids, next_tokens], dim=-1)\n next_attention_mask = torch.cat([attention_mask, next_mask], dim=-1)\n\n output_from_no_past = model(next_input_ids, attention_mask=next_attention_mask)[\"last_hidden_state\"]\n output_from_past = model(next_tokens, attention_mask=next_attention_mask, past_key_values=past_key_values)[\n \"last_hidden_state\"\n ]\n\n # select random slice\n random_slice_idx = ids_tensor((1,), output_from_past.shape[-1]).item()\n output_from_no_past_slice = output_from_no_past[:, -3:, random_slice_idx].detach()\n output_from_past_slice = output_from_past[:, :, random_slice_idx].detach()\n\n self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1])\n\n # test that outputs are equal for slice\n self.parent.assertTrue(torch.allclose(output_from_past_slice, output_from_no_past_slice, atol=1e-3))\n\n def create_and_check_generate_with_past_key_values(\n self,\n config,\n input_ids,\n decoder_input_ids,\n attention_mask,\n decoder_attention_mask,\n lm_labels,\n ):\n model = T5ForConditionalGeneration(config=config).to(torch_device).eval()\n torch.manual_seed(0)\n output_without_past_cache = model.generate(\n input_ids[:1], num_beams=2, max_length=5, do_sample=True, use_cache=False\n )\n torch.manual_seed(0)\n output_with_past_cache = model.generate(input_ids[:1], num_beams=2, max_length=5, do_sample=True)\n self.parent.assertTrue(torch.all(output_with_past_cache == output_without_past_cache))\n\n def create_and_check_model_fp16_forward(\n self,\n config,\n input_ids,\n decoder_input_ids,\n attention_mask,\n decoder_attention_mask,\n lm_labels,\n ):\n model = T5Model(config=config).to(torch_device).half().eval()\n output = model(input_ids, decoder_input_ids=input_ids, attention_mask=attention_mask)[\"last_hidden_state\"]\n self.parent.assertFalse(torch.isnan(output).any().item())\n\n def create_and_check_encoder_decoder_shared_weights(\n self,\n config,\n input_ids,\n decoder_input_ids,\n attention_mask,\n decoder_attention_mask,\n lm_labels,\n ):\n for model_class in [T5Model, T5ForConditionalGeneration]:\n torch.manual_seed(0)\n model = model_class(config=config).to(torch_device).eval()\n # load state dict copies weights but does not tie them\n model.encoder.load_state_dict(model.decoder.state_dict(), strict=False)\n\n torch.manual_seed(0)\n tied_config = copy.deepcopy(config)\n tied_config.tie_encoder_decoder = True\n tied_model = model_class(config=tied_config).to(torch_device).eval()\n\n model_result = model(\n input_ids=input_ids,\n decoder_input_ids=decoder_input_ids,\n attention_mask=attention_mask,\n decoder_attention_mask=decoder_attention_mask,\n )\n\n tied_model_result = tied_model(\n input_ids=input_ids,\n decoder_input_ids=decoder_input_ids,\n attention_mask=attention_mask,\n decoder_attention_mask=decoder_attention_mask,\n )\n\n # check that models has less parameters\n self.parent.assertLess(\n sum(p.numel() for p in tied_model.parameters()), sum(p.numel() for p in model.parameters())\n )\n random_slice_idx = ids_tensor((1,), model_result[0].shape[-1]).item()\n\n # check that outputs are equal\n self.parent.assertTrue(\n torch.allclose(\n model_result[0][0, :, random_slice_idx], tied_model_result[0][0, :, random_slice_idx], atol=1e-4\n )\n )\n\n # check that outputs after saving and loading are equal\n with tempfile.TemporaryDirectory() as tmpdirname:\n tied_model.save_pretrained(tmpdirname)\n tied_model = model_class.from_pretrained(tmpdirname)\n tied_model.to(torch_device)\n tied_model.eval()\n\n # check that models has less parameters\n self.parent.assertLess(\n sum(p.numel() for p in tied_model.parameters()), sum(p.numel() for p in model.parameters())\n )\n random_slice_idx = ids_tensor((1,), model_result[0].shape[-1]).item()\n\n tied_model_result = tied_model(\n input_ids=input_ids,\n decoder_input_ids=decoder_input_ids,\n attention_mask=attention_mask,\n decoder_attention_mask=decoder_attention_mask,\n )\n\n # check that outputs are equal\n self.parent.assertTrue(\n torch.allclose(\n model_result[0][0, :, random_slice_idx],\n tied_model_result[0][0, :, random_slice_idx],\n atol=1e-4,\n )\n )\n\n def check_resize_embeddings_t5_v1_1(\n self,\n config,\n ):\n prev_vocab_size = config.vocab_size\n\n config.tie_word_embeddings = False\n model = T5ForConditionalGeneration(config=config).to(torch_device).eval()\n model.resize_token_embeddings(prev_vocab_size - 10)\n\n self.parent.assertEqual(model.get_input_embeddings().weight.shape[0], prev_vocab_size - 10)\n self.parent.assertEqual(model.get_output_embeddings().weight.shape[0], prev_vocab_size - 10)\n self.parent.assertEqual(model.config.vocab_size, prev_vocab_size - 10)\n\n def prepare_config_and_inputs_for_common(self):\n config_and_inputs = self.prepare_config_and_inputs()\n (\n config,\n input_ids,\n decoder_input_ids,\n attention_mask,\n decoder_attention_mask,\n lm_labels,\n ) = config_and_inputs\n\n inputs_dict = {\n \"input_ids\": input_ids,\n \"attention_mask\": attention_mask,\n \"decoder_input_ids\": decoder_input_ids,\n \"decoder_attention_mask\": decoder_attention_mask,\n \"use_cache\": False,\n }\n return config, inputs_dict\n\n\n@require_torch\nclass T5ModelTest(ModelTesterMixin, GenerationTesterMixin, unittest.TestCase):\n\n all_model_classes = (T5Model, T5ForConditionalGeneration) if is_torch_available() else ()\n all_generative_model_classes = (T5ForConditionalGeneration,) if is_torch_available() else ()\n all_parallelizable_model_classes = (T5Model, T5ForConditionalGeneration) if is_torch_available() else ()\n fx_compatible = True\n test_pruning = False\n test_resize_embeddings = True\n test_model_parallel = True\n is_encoder_decoder = True\n\n def setUp(self):\n self.model_tester = T5ModelTester(self)\n self.config_tester = ConfigTester(self, config_class=T5Config, d_model=37)\n\n def test_config(self):\n self.config_tester.run_common_tests()\n\n def test_shift_right(self):\n config_and_inputs = self.model_tester.prepare_config_and_inputs()\n self.model_tester.check_prepare_lm_labels_via_shift_left(*config_and_inputs)\n\n def test_model(self):\n config_and_inputs = self.model_tester.prepare_config_and_inputs()\n self.model_tester.create_and_check_model(*config_and_inputs)\n\n def test_model_v1_1(self):\n config_and_inputs = self.model_tester.prepare_config_and_inputs()\n # check that gated gelu feed forward and different word embeddings work\n config = config_and_inputs[0]\n config.tie_word_embeddings = False\n config.feed_forward_proj = \"gated-gelu\"\n self.model_tester.create_and_check_model(config, *config_and_inputs[1:])\n\n def test_with_lm_head(self):\n config_and_inputs = self.model_tester.prepare_config_and_inputs()\n self.model_tester.create_and_check_with_lm_head(*config_and_inputs)\n\n def test_decoder_model_past(self):\n config_and_inputs = self.model_tester.prepare_config_and_inputs()\n self.model_tester.create_and_check_decoder_model_past(*config_and_inputs)\n\n def test_decoder_model_past_with_attn_mask(self):\n config_and_inputs = self.model_tester.prepare_config_and_inputs()\n self.model_tester.create_and_check_decoder_model_attention_mask_past(*config_and_inputs)\n\n def test_decoder_model_past_with_3d_attn_mask(self):\n (\n config,\n input_ids,\n decoder_input_ids,\n attention_mask,\n decoder_attention_mask,\n lm_labels,\n ) = self.model_tester.prepare_config_and_inputs()\n\n attention_mask = ids_tensor(\n [self.model_tester.batch_size, self.model_tester.encoder_seq_length, self.model_tester.encoder_seq_length],\n vocab_size=2,\n )\n decoder_attention_mask = ids_tensor(\n [self.model_tester.batch_size, self.model_tester.decoder_seq_length, self.model_tester.decoder_seq_length],\n vocab_size=2,\n )\n\n self.model_tester.create_and_check_decoder_model_attention_mask_past(\n config,\n input_ids,\n decoder_input_ids,\n attention_mask,\n decoder_attention_mask,\n lm_labels,\n )\n\n def test_decoder_model_past_with_large_inputs(self):\n config_and_inputs = self.model_tester.prepare_config_and_inputs()\n self.model_tester.create_and_check_decoder_model_past_large_inputs(*config_and_inputs)\n\n def test_generate_with_past_key_values(self):\n config_and_inputs = self.model_tester.prepare_config_and_inputs()\n self.model_tester.create_and_check_generate_with_past_key_values(*config_and_inputs)\n\n def test_encoder_decoder_shared_weights(self):\n config_and_inputs = self.model_tester.prepare_config_and_inputs()\n self.model_tester.create_and_check_encoder_decoder_shared_weights(*config_and_inputs)\n\n @unittest.skipIf(torch_device == \"cpu\", \"Cant do half precision\")\n def test_model_fp16_forward(self):\n config_and_inputs = self.model_tester.prepare_config_and_inputs()\n self.model_tester.create_and_check_model_fp16_forward(*config_and_inputs)\n\n def test_v1_1_resize_embeddings(self):\n config = self.model_tester.prepare_config_and_inputs()[0]\n self.model_tester.check_resize_embeddings_t5_v1_1(config)\n\n @slow\n def test_model_from_pretrained(self):\n for model_name in T5_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:\n model = T5Model.from_pretrained(model_name)\n self.assertIsNotNone(model)\n\n @unittest.skip(\"Test has a segmentation fault on torch 1.8.0\")\n def test_export_to_onnx(self):\n config_and_inputs = self.model_tester.prepare_config_and_inputs()\n model = T5Model(config_and_inputs[0]).to(torch_device)\n with tempfile.TemporaryDirectory() as tmpdirname:\n torch.onnx.export(\n model,\n (config_and_inputs[1], config_and_inputs[3], config_and_inputs[2]),\n f\"{tmpdirname}/t5_test.onnx\",\n export_params=True,\n opset_version=9,\n input_names=[\"input_ids\", \"decoder_input_ids\"],\n )\n\n def test_generate_with_head_masking(self):\n attention_names = [\"encoder_attentions\", \"decoder_attentions\", \"cross_attentions\"]\n config_and_inputs = self.model_tester.prepare_config_and_inputs()\n config = config_and_inputs[0]\n max_length = config_and_inputs[1].shape[-1] + 3\n model = T5ForConditionalGeneration(config).eval()\n model.to(torch_device)\n\n head_masking = {\n \"head_mask\": torch.zeros(config.num_layers, config.num_heads, device=torch_device),\n \"decoder_head_mask\": torch.zeros(config.num_decoder_layers, config.num_heads, device=torch_device),\n \"cross_attn_head_mask\": torch.zeros(config.num_decoder_layers, config.num_heads, device=torch_device),\n }\n\n for attn_name, (name, mask) in zip(attention_names, head_masking.items()):\n head_masks = {name: mask}\n # Explicitly pass decoder_head_mask as it is required from T5 model when head_mask specified\n if name == \"head_mask\":\n head_masks[\"decoder_head_mask\"] = torch.ones(\n config.num_decoder_layers, config.num_heads, device=torch_device\n )\n\n out = model.generate(\n config_and_inputs[1],\n num_beams=1,\n max_length=max_length,\n output_attentions=True,\n return_dict_in_generate=True,\n **head_masks,\n )\n # We check the state of decoder_attentions and cross_attentions just from the last step\n attn_weights = out[attn_name] if attn_name == attention_names[0] else out[attn_name][-1]\n self.assertEqual(sum([w.sum().item() for w in attn_weights]), 0.0)\n\n\nclass T5EncoderOnlyModelTester:\n def __init__(\n self,\n parent,\n vocab_size=99,\n batch_size=13,\n encoder_seq_length=7,\n # For common tests\n use_attention_mask=True,\n hidden_size=32,\n num_hidden_layers=5,\n num_attention_heads=4,\n d_ff=37,\n relative_attention_num_buckets=8,\n is_training=False,\n dropout_rate=0.1,\n initializer_factor=0.002,\n is_encoder_decoder=False,\n eos_token_id=1,\n pad_token_id=0,\n scope=None,\n ):\n\n self.parent = parent\n self.batch_size = batch_size\n self.encoder_seq_length = encoder_seq_length\n # For common tests\n self.seq_length = self.encoder_seq_length\n self.use_attention_mask = use_attention_mask\n self.vocab_size = vocab_size\n self.hidden_size = hidden_size\n self.num_hidden_layers = num_hidden_layers\n self.num_attention_heads = num_attention_heads\n self.d_ff = d_ff\n self.relative_attention_num_buckets = relative_attention_num_buckets\n self.dropout_rate = dropout_rate\n self.initializer_factor = initializer_factor\n self.eos_token_id = eos_token_id\n self.pad_token_id = pad_token_id\n self.is_encoder_decoder = is_encoder_decoder\n self.scope = None\n self.is_training = is_training\n\n def get_large_model_config(self):\n return T5Config.from_pretrained(\"t5-base\")\n\n def prepare_config_and_inputs(self):\n input_ids = ids_tensor([self.batch_size, self.encoder_seq_length], self.vocab_size)\n\n attention_mask = None\n if self.use_attention_mask:\n attention_mask = ids_tensor([self.batch_size, self.encoder_seq_length], vocab_size=2)\n\n config = T5Config(\n vocab_size=self.vocab_size,\n d_model=self.hidden_size,\n d_ff=self.d_ff,\n d_kv=self.hidden_size // self.num_attention_heads,\n num_layers=self.num_hidden_layers,\n num_heads=self.num_attention_heads,\n relative_attention_num_buckets=self.relative_attention_num_buckets,\n dropout_rate=self.dropout_rate,\n initializer_factor=self.initializer_factor,\n eos_token_id=self.eos_token_id,\n bos_token_id=self.pad_token_id,\n pad_token_id=self.pad_token_id,\n is_encoder_decoder=self.is_encoder_decoder,\n )\n\n return (\n config,\n input_ids,\n attention_mask,\n )\n\n def create_and_check_model(\n self,\n config,\n input_ids,\n attention_mask,\n ):\n model = T5EncoderModel(config=config)\n model.to(torch_device)\n model.eval()\n result = model(\n input_ids=input_ids,\n attention_mask=attention_mask,\n )\n result = model(input_ids=input_ids)\n encoder_output = result.last_hidden_state\n\n self.parent.assertEqual(encoder_output.size(), (self.batch_size, self.encoder_seq_length, self.hidden_size))\n\n def create_and_check_model_fp16_forward(\n self,\n config,\n input_ids,\n attention_mask,\n ):\n model = T5EncoderModel(config=config).to(torch_device).half().eval()\n output = model(input_ids, attention_mask=attention_mask)[\"last_hidden_state\"]\n self.parent.assertFalse(torch.isnan(output).any().item())\n\n def prepare_config_and_inputs_for_common(self):\n config_and_inputs = self.prepare_config_and_inputs()\n (\n config,\n input_ids,\n attention_mask,\n ) = config_and_inputs\n\n inputs_dict = {\n \"input_ids\": input_ids,\n \"attention_mask\": attention_mask,\n }\n return config, inputs_dict\n\n\nclass T5EncoderOnlyModelTest(ModelTesterMixin, unittest.TestCase):\n all_model_classes = (T5EncoderModel,) if is_torch_available() else ()\n test_pruning = False\n test_resize_embeddings = False\n test_model_parallel = True\n all_parallelizable_model_classes = (T5EncoderModel,) if is_torch_available() else ()\n\n def setUp(self):\n self.model_tester = T5EncoderOnlyModelTester(self)\n self.config_tester = ConfigTester(self, config_class=T5Config, d_model=37)\n\n def test_config(self):\n self.config_tester.run_common_tests()\n\n def test_model(self):\n config_and_inputs = self.model_tester.prepare_config_and_inputs()\n self.model_tester.create_and_check_model(*config_and_inputs)\n\n @unittest.skipIf(torch_device == \"cpu\", \"Cant do half precision\")\n def test_model_fp16_forward(self):\n config_and_inputs = self.model_tester.prepare_config_and_inputs()\n self.model_tester.create_and_check_model_fp16_forward(*config_and_inputs)\n\n\ndef use_task_specific_params(model, task):\n model.config.update(model.config.task_specific_params[task])\n\n\n@require_torch\n@require_sentencepiece\n@require_tokenizers\nclass T5ModelIntegrationTests(unittest.TestCase):\n @cached_property\n def model(self):\n return T5ForConditionalGeneration.from_pretrained(\"t5-base\").to(torch_device)\n\n @cached_property\n def tokenizer(self):\n return T5Tokenizer.from_pretrained(\"t5-base\")\n\n @slow\n def test_small_generation(self):\n model = T5ForConditionalGeneration.from_pretrained(\"t5-small\").to(torch_device)\n model.config.max_length = 8\n model.config.num_beams = 1\n model.config.do_sample = False\n tokenizer = T5Tokenizer.from_pretrained(\"t5-small\")\n\n input_ids = tokenizer(\"summarize: Hello there\", return_tensors=\"pt\").input_ids.to(torch_device)\n\n sequences = model.generate(input_ids)\n\n output_str = tokenizer.batch_decode(sequences, skip_special_tokens=True)[0]\n self.assertTrue(output_str == \"Hello there!\")\n\n @slow\n def test_small_integration_test(self):\n \"\"\"\n For comparision run:\n >>> import t5 # pip install t5==0.7.1\n >>> from t5.data.sentencepiece_vocabulary import SentencePieceVocabulary\n\n >>> path_to_mtf_small_t5_checkpoint = '<fill_in>'\n >>> path_to_mtf_small_spm_model_path = '<fill_in>'\n >>> t5_model = t5.models.MtfModel(model_dir=path_to_mtf_small_t5_checkpoint, batch_size=1, tpu=None)\n >>> vocab = SentencePieceVocabulary(path_to_mtf_small_spm_model_path, extra_ids=100)\n >>> score = t5_model.score(inputs=[\"Hello there\"], targets=[\"Hi I am\"], vocabulary=vocab)\n \"\"\"\n\n model = T5ForConditionalGeneration.from_pretrained(\"t5-small\").to(torch_device)\n tokenizer = T5Tokenizer.from_pretrained(\"t5-small\")\n\n input_ids = tokenizer(\"Hello there\", return_tensors=\"pt\").input_ids\n labels = tokenizer(\"Hi I am\", return_tensors=\"pt\").input_ids\n\n loss = model(input_ids.to(torch_device), labels=labels.to(torch_device)).loss\n mtf_score = -(labels.shape[-1] * loss.item())\n\n EXPECTED_SCORE = -19.0845\n self.assertTrue(abs(mtf_score - EXPECTED_SCORE) < 1e-4)\n\n @slow\n def test_small_v1_1_integration_test(self):\n \"\"\"\n For comparision run:\n >>> import t5 # pip install t5==0.7.1\n >>> from t5.data.sentencepiece_vocabulary import SentencePieceVocabulary\n\n >>> path_to_mtf_small_t5_v1_1_checkpoint = '<fill_in>'\n >>> path_to_mtf_small_spm_model_path = '<fill_in>'\n >>> t5_model = t5.models.MtfModel(model_dir=path_to_mtf_small_t5_v1_1_checkpoint, batch_size=1, tpu=None)\n >>> vocab = SentencePieceVocabulary(path_to_mtf_small_spm_model_path, extra_ids=100)\n >>> score = t5_model.score(inputs=[\"Hello there\"], targets=[\"Hi I am\"], vocabulary=vocab)\n \"\"\"\n\n model = T5ForConditionalGeneration.from_pretrained(\"google/t5-v1_1-small\").to(torch_device)\n tokenizer = T5Tokenizer.from_pretrained(\"google/t5-v1_1-small\")\n\n input_ids = tokenizer(\"Hello there\", return_tensors=\"pt\").input_ids\n labels = tokenizer(\"Hi I am\", return_tensors=\"pt\").input_ids\n\n loss = model(input_ids.to(torch_device), labels=labels.to(torch_device)).loss\n mtf_score = -(labels.shape[-1] * loss.item())\n\n EXPECTED_SCORE = -59.0293\n self.assertTrue(abs(mtf_score - EXPECTED_SCORE) < 1e-4)\n\n @slow\n def test_small_byt5_integration_test(self):\n \"\"\"\n For comparision run:\n >>> import t5 # pip install t5==0.9.1\n\n >>> path_to_byt5_small_checkpoint = '<fill_in>'\n >>> t5_model = t5.models.MtfModel(model_dir=path_to_tf_checkpoint, batch_size=1, tpu=None)\n >>> vocab = t5.data.ByteVocabulary()\n >>> score = t5_model.score(inputs=[\"Hello there\"], targets=[\"Hi I am\"], vocabulary=vocab)\n \"\"\"\n\n model = T5ForConditionalGeneration.from_pretrained(\"google/byt5-small\").to(torch_device)\n tokenizer = ByT5Tokenizer.from_pretrained(\"google/byt5-small\")\n\n input_ids = tokenizer(\"Hello there\", return_tensors=\"pt\").input_ids\n labels = tokenizer(\"Hi I am\", return_tensors=\"pt\").input_ids\n\n loss = model(input_ids.to(torch_device), labels=labels.to(torch_device)).loss\n mtf_score = -(labels.shape[-1] * loss.item())\n\n EXPECTED_SCORE = -60.7397\n self.assertTrue(abs(mtf_score - EXPECTED_SCORE) < 1e-4)\n\n @slow\n def test_summarization(self):\n model = self.model\n tok = self.tokenizer\n\n FRANCE_ARTICLE = ( # @noqa\n \"Marseille, France (CNN)The French prosecutor leading an investigation into the crash of Germanwings\"\n \" Flight 9525 insisted Wednesday that he was not aware of any video footage from on board the plane.\"\n ' Marseille prosecutor Brice Robin told CNN that \"so far no videos were used in the crash investigation.\"'\n ' He added, \"A person who has such a video needs to immediately give it to the investigators.\" Robin\\'s'\n \" comments follow claims by two magazines, German daily Bild and French Paris Match, of a cell phone video\"\n \" showing the harrowing final seconds from on board Germanwings Flight 9525 as it crashed into the French\"\n \" Alps. All 150 on board were killed. Paris Match and Bild reported that the video was recovered from a\"\n \" phone at the wreckage site. The two publications described the supposed video, but did not post it on\"\n \" their websites. The publications said that they watched the video, which was found by a source close to\"\n \" the investigation. \\\"One can hear cries of 'My God' in several languages,\\\" Paris Match reported.\"\n ' \"Metallic banging can also be heard more than three times, perhaps of the pilot trying to open the'\n \" cockpit door with a heavy object. Towards the end, after a heavy shake, stronger than the others, the\"\n ' screaming intensifies. Then nothing.\" \"It is a very disturbing scene,\" said Julian Reichelt,'\n \" editor-in-chief of Bild online. An official with France's accident investigation agency, the BEA, said\"\n \" the agency is not aware of any such video. Lt. Col. Jean-Marc Menichini, a French Gendarmerie spokesman\"\n \" in charge of communications on rescue efforts around the Germanwings crash site, told CNN that the\"\n ' reports were \"completely wrong\" and \"unwarranted.\" Cell phones have been collected at the site, he said,'\n ' but that they \"hadn\\'t been exploited yet.\" Menichini said he believed the cell phones would need to be'\n \" sent to the Criminal Research Institute in Rosny sous-Bois, near Paris, in order to be analyzed by\"\n \" specialized technicians working hand-in-hand with investigators. But none of the cell phones found so\"\n \" far have been sent to the institute, Menichini said. Asked whether staff involved in the search could\"\n ' have leaked a memory card to the media, Menichini answered with a categorical \"no.\" Reichelt told \"Erin'\n ' Burnett: Outfront\" that he had watched the video and stood by the report, saying Bild and Paris Match'\n ' are \"very confident\" that the clip is real. He noted that investigators only revealed they\\'d recovered'\n ' cell phones from the crash site after Bild and Paris Match published their reports. \"That is something'\n \" we did not know before. ... Overall we can say many things of the investigation weren't revealed by the\"\n ' investigation at the beginning,\" he said. What was mental state of Germanwings co-pilot? German airline'\n \" Lufthansa confirmed Tuesday that co-pilot Andreas Lubitz had battled depression years before he took the\"\n \" controls of Germanwings Flight 9525, which he's accused of deliberately crashing last week in the\"\n ' French Alps. Lubitz told his Lufthansa flight training school in 2009 that he had a \"previous episode of'\n ' severe depression,\" the airline said Tuesday. Email correspondence between Lubitz and the school'\n \" discovered in an internal investigation, Lufthansa said, included medical documents he submitted in\"\n \" connection with resuming his flight training. The announcement indicates that Lufthansa, the parent\"\n \" company of Germanwings, knew of Lubitz's battle with depression, allowed him to continue training and\"\n \" ultimately put him in the cockpit. Lufthansa, whose CEO Carsten Spohr previously said Lubitz was 100%\"\n ' fit to fly, described its statement Tuesday as a \"swift and seamless clarification\" and said it was'\n \" sharing the information and documents -- including training and medical records -- with public\"\n \" prosecutors. Spohr traveled to the crash site Wednesday, where recovery teams have been working for the\"\n \" past week to recover human remains and plane debris scattered across a steep mountainside. He saw the\"\n \" crisis center set up in Seyne-les-Alpes, laid a wreath in the village of Le Vernet, closer to the crash\"\n \" site, where grieving families have left flowers at a simple stone memorial. Menichini told CNN late\"\n \" Tuesday that no visible human remains were left at the site but recovery teams would keep searching.\"\n \" French President Francois Hollande, speaking Tuesday, said that it should be possible to identify all\"\n \" the victims using DNA analysis by the end of the week, sooner than authorities had previously suggested.\"\n \" In the meantime, the recovery of the victims' personal belongings will start Wednesday, Menichini said.\"\n \" Among those personal belongings could be more cell phones belonging to the 144 passengers and six crew\"\n \" on board. Check out the latest from our correspondents . The details about Lubitz's correspondence with\"\n \" the flight school during his training were among several developments as investigators continued to\"\n \" delve into what caused the crash and Lubitz's possible motive for downing the jet. A Lufthansa\"\n \" spokesperson told CNN on Tuesday that Lubitz had a valid medical certificate, had passed all his\"\n ' examinations and \"held all the licenses required.\" Earlier, a spokesman for the prosecutor\\'s office in'\n \" Dusseldorf, Christoph Kumpa, said medical records reveal Lubitz suffered from suicidal tendencies at\"\n \" some point before his aviation career and underwent psychotherapy before he got his pilot's license.\"\n \" Kumpa emphasized there's no evidence suggesting Lubitz was suicidal or acting aggressively before the\"\n \" crash. Investigators are looking into whether Lubitz feared his medical condition would cause him to\"\n \" lose his pilot's license, a European government official briefed on the investigation told CNN on\"\n ' Tuesday. While flying was \"a big part of his life,\" the source said, it\\'s only one theory being'\n \" considered. Another source, a law enforcement official briefed on the investigation, also told CNN that\"\n \" authorities believe the primary motive for Lubitz to bring down the plane was that he feared he would\"\n \" not be allowed to fly because of his medical problems. Lubitz's girlfriend told investigators he had\"\n \" seen an eye doctor and a neuropsychologist, both of whom deemed him unfit to work recently and concluded\"\n \" he had psychological issues, the European government official said. But no matter what details emerge\"\n \" about his previous mental health struggles, there's more to the story, said Brian Russell, a forensic\"\n ' psychologist. \"Psychology can explain why somebody would turn rage inward on themselves about the fact'\n \" that maybe they weren't going to keep doing their job and they're upset about that and so they're\"\n ' suicidal,\" he said. \"But there is no mental illness that explains why somebody then feels entitled to'\n \" also take that rage and turn it outward on 149 other people who had nothing to do with the person's\"\n ' problems.\" Germanwings crash compensation: What we know . Who was the captain of Germanwings Flight'\n \" 9525? CNN's Margot Haddad reported from Marseille and Pamela Brown from Dusseldorf, while Laura\"\n \" Smith-Spark wrote from London. CNN's Frederik Pleitgen, Pamela Boykoff, Antonia Mortensen, Sandrine\"\n \" Amiel and Anna-Maja Rappard contributed to this report.\"\n )\n SHORTER_ARTICLE = (\n \"(CNN)The Palestinian Authority officially became the 123rd member of the International Criminal Court on\"\n \" Wednesday, a step that gives the court jurisdiction over alleged crimes in Palestinian territories. The\"\n \" formal accession was marked with a ceremony at The Hague, in the Netherlands, where the court is based.\"\n \" The Palestinians signed the ICC's founding Rome Statute in January, when they also accepted its\"\n ' jurisdiction over alleged crimes committed \"in the occupied Palestinian territory, including East'\n ' Jerusalem, since June 13, 2014.\" Later that month, the ICC opened a preliminary examination into the'\n \" situation in Palestinian territories, paving the way for possible war crimes investigations against\"\n \" Israelis. As members of the court, Palestinians may be subject to counter-charges as well. Israel and\"\n \" the United States, neither of which is an ICC member, opposed the Palestinians' efforts to join the\"\n \" body. But Palestinian Foreign Minister Riad al-Malki, speaking at Wednesday's ceremony, said it was a\"\n ' move toward greater justice. \"As Palestine formally becomes a State Party to the Rome Statute today, the'\n ' world is also a step closer to ending a long era of impunity and injustice,\" he said, according to an'\n ' ICC news release. \"Indeed, today brings us closer to our shared goals of justice and peace.\" Judge'\n \" Kuniko Ozaki, a vice president of the ICC, said acceding to the treaty was just the first step for the\"\n ' Palestinians. \"As the Rome Statute today enters into force for the State of Palestine, Palestine'\n \" acquires all the rights as well as responsibilities that come with being a State Party to the Statute.\"\n ' These are substantive commitments, which cannot be taken lightly,\" she said. Rights group Human Rights'\n ' Watch welcomed the development. \"Governments seeking to penalize Palestine for joining the ICC should'\n \" immediately end their pressure, and countries that support universal acceptance of the court's treaty\"\n ' should speak out to welcome its membership,\" said Balkees Jarrah, international justice counsel for the'\n \" group. \\\"What's objectionable is the attempts to undermine international justice, not Palestine's\"\n ' decision to join a treaty to which over 100 countries around the world are members.\" In January, when'\n \" the preliminary ICC examination was opened, Israeli Prime Minister Benjamin Netanyahu described it as an\"\n ' outrage, saying the court was overstepping its boundaries. The United States also said it \"strongly\"'\n \" disagreed with the court's decision. \\\"As we have said repeatedly, we do not believe that Palestine is a\"\n ' state and therefore we do not believe that it is eligible to join the ICC,\" the State Department said in'\n ' a statement. It urged the warring sides to resolve their differences through direct negotiations. \"We'\n ' will continue to oppose actions against Israel at the ICC as counterproductive to the cause of peace,\"'\n \" it said. But the ICC begs to differ with the definition of a state for its purposes and refers to the\"\n ' territories as \"Palestine.\" While a preliminary examination is not a formal investigation, it allows the'\n \" court to review evidence and determine whether to investigate suspects on both sides. Prosecutor Fatou\"\n ' Bensouda said her office would \"conduct its analysis in full independence and impartiality.\" The war'\n \" between Israel and Hamas militants in Gaza last summer left more than 2,000 people dead. The inquiry\"\n \" will include alleged war crimes committed since June. The International Criminal Court was set up in\"\n \" 2002 to prosecute genocide, crimes against humanity and war crimes. CNN's Vasco Cotovio, Kareem Khadder\"\n \" and Faith Karimi contributed to this report.\"\n )\n IRAN_ARTICLE = (\n \"(CNN)The United States and its negotiating partners reached a very strong framework agreement with Iran\"\n \" in Lausanne, Switzerland, on Thursday that limits Iran's nuclear program in such a way as to effectively\"\n \" block it from building a nuclear weapon. Expect pushback anyway, if the recent past is any harbinger.\"\n \" Just last month, in an attempt to head off such an agreement, House Speaker John Boehner invited Israeli\"\n \" Prime Minister Benjamin Netanyahu to preemptively blast it before Congress, and 47 senators sent a\"\n \" letter to the Iranian leadership warning them away from a deal. The debate that has already begun since\"\n \" the announcement of the new framework will likely result in more heat than light. It will not be helped\"\n \" by the gathering swirl of dubious assumptions and doubtful assertions. Let us address some of these: .\"\n \" The most misleading assertion, despite universal rejection by experts, is that the negotiations'\"\n \" objective at the outset was the total elimination of any nuclear program in Iran. That is the position\"\n \" of Netanyahu and his acolytes in the U.S. Congress. But that is not and never was the objective. If it\"\n \" had been, there would have been no Iranian team at the negotiating table. Rather, the objective has\"\n \" always been to structure an agreement or series of agreements so that Iran could not covertly develop a\"\n \" nuclear arsenal before the United States and its allies could respond. The new framework has exceeded\"\n \" expectations in achieving that goal. It would reduce Iran's low-enriched uranium stockpile, cut by\"\n \" two-thirds its number of installed centrifuges and implement a rigorous inspection regime. Another\"\n \" dubious assumption of opponents is that the Iranian nuclear program is a covert weapons program. Despite\"\n \" sharp accusations by some in the United States and its allies, Iran denies having such a program, and\"\n \" U.S. intelligence contends that Iran has not yet made the decision to build a nuclear weapon. Iran's\"\n \" continued cooperation with International Atomic Energy Agency inspections is further evidence on this\"\n \" point, and we'll know even more about Iran's program in the coming months and years because of the deal.\"\n \" In fact, the inspections provisions that are part of this agreement are designed to protect against any\"\n \" covert action by the Iranians. What's more, the rhetoric of some members of Congress has implied that\"\n \" the negotiations have been between only the United States and Iran (i.e., the 47 senators' letter\"\n \" warning that a deal might be killed by Congress or a future president). This of course is not the case.\"\n \" The talks were between Iran and the five permanent members of the U.N. Security Council (United States,\"\n \" United Kingdom, France, China and Russia) plus Germany, dubbed the P5+1. While the United States has\"\n \" played a leading role in the effort, it negotiated the terms alongside its partners. If the agreement\"\n \" reached by the P5+1 is rejected by Congress, it could result in an unraveling of the sanctions on Iran\"\n \" and threaten NATO cohesion in other areas. Another questionable assertion is that this agreement\"\n \" contains a sunset clause, after which Iran will be free to do as it pleases. Again, this is not the\"\n \" case. Some of the restrictions on Iran's nuclear activities, such as uranium enrichment, will be eased\"\n \" or eliminated over time, as long as 15 years. But most importantly, the framework agreement includes\"\n \" Iran's ratification of the Additional Protocol, which allows IAEA inspectors expanded access to nuclear\"\n \" sites both declared and nondeclared. This provision will be permanent. It does not sunset. Thus, going\"\n \" forward, if Iran decides to enrich uranium to weapons-grade levels, monitors will be able to detect such\"\n \" a move in a matter of days and alert the U.N. Security Council. Many in Congress have said that the\"\n ' agreement should be a formal treaty requiring the Senate to \"advise and consent.\" But the issue is not'\n \" suited for a treaty. Treaties impose equivalent obligations on all signatories. For example, the New\"\n \" START treaty limits Russia and the United States to 1,550 deployed strategic warheads. But any agreement\"\n \" with Iran will not be so balanced. The restrictions and obligations in the final framework agreement\"\n \" will be imposed almost exclusively on Iran. The P5+1 are obligated only to ease and eventually remove\"\n \" most but not all economic sanctions, which were imposed as leverage to gain this final deal. Finally\"\n \" some insist that any agreement must address Iranian missile programs, human rights violations or support\"\n \" for Hamas or Hezbollah. As important as these issues are, and they must indeed be addressed, they are\"\n \" unrelated to the most important aim of a nuclear deal: preventing a nuclear Iran. To include them in\"\n \" the negotiations would be a poison pill. This agreement should be judged on its merits and on how it\"\n \" affects the security of our negotiating partners and allies, including Israel. Those judgments should be\"\n \" fact-based, not based on questionable assertions or dubious assumptions.\"\n )\n ARTICLE_SUBWAY = (\n \"New York (CNN)When Liana Barrientos was 23 years old, she got married in Westchester County, New York. A\"\n \" year later, she got married again in Westchester County, but to a different man and without divorcing\"\n \" her first husband. Only 18 days after that marriage, she got hitched yet again. Then, Barrientos\"\n ' declared \"I do\" five more times, sometimes only within two weeks of each other. In 2010, she married'\n \" once more, this time in the Bronx. In an application for a marriage license, she stated it was her\"\n ' \"first and only\" marriage. Barrientos, now 39, is facing two criminal counts of \"offering a false'\n ' instrument for filing in the first degree,\" referring to her false statements on the 2010 marriage'\n \" license application, according to court documents. Prosecutors said the marriages were part of an\"\n \" immigration scam. On Friday, she pleaded not guilty at State Supreme Court in the Bronx, according to\"\n \" her attorney, Christopher Wright, who declined to comment further. After leaving court, Barrientos was\"\n \" arrested and charged with theft of service and criminal trespass for allegedly sneaking into the New\"\n \" York subway through an emergency exit, said Detective Annette Markowski, a police spokeswoman. In total,\"\n \" Barrientos has been married 10 times, with nine of her marriages occurring between 1999 and 2002. All\"\n \" occurred either in Westchester County, Long Island, New Jersey or the Bronx. She is believed to still be\"\n \" married to four men, and at one time, she was married to eight men at once, prosecutors say. Prosecutors\"\n \" said the immigration scam involved some of her husbands, who filed for permanent residence status\"\n \" shortly after the marriages. Any divorces happened only after such filings were approved. It was\"\n \" unclear whether any of the men will be prosecuted. The case was referred to the Bronx District\"\n \" Attorney's Office by Immigration and Customs Enforcement and the Department of Homeland Security's\"\n ' Investigation Division. Seven of the men are from so-called \"red-flagged\" countries, including Egypt,'\n \" Turkey, Georgia, Pakistan and Mali. Her eighth husband, Rashid Rajput, was deported in 2006 to his\"\n \" native Pakistan after an investigation by the Joint Terrorism Task Force. If convicted, Barrientos faces\"\n \" up to four years in prison. Her next court appearance is scheduled for May 18.\"\n )\n\n expected_summaries = [\n 'prosecutor: \"so far no videos were used in the crash investigation\" two magazines claim to have found a'\n \" cell phone video of the final seconds . \\\"one can hear cries of 'My God' in several languages,\\\" one\"\n \" magazine says .\",\n \"the formal accession was marked by a ceremony at The Hague, in the Netherlands . the ICC opened a\"\n \" preliminary examination into the situation in the occupied Palestinian territory . as members of the\"\n \" court, Palestinians may be subject to counter-charges as well .\",\n \"the u.s. and its negotiating partners reached a very strong framework agreement with Iran . aaron miller:\"\n \" the debate that has already begun since the announcement of the new framework will likely result in more\"\n \" heat than light . the deal would reduce Iran's low-enriched uranium stockpile, cut centrifuges and\"\n \" implement a rigorous inspection regime .\",\n \"prosecutors say the marriages were part of an immigration scam . if convicted, barrientos faces two\"\n ' criminal counts of \"offering a false instrument for filing in the first degree\" she has been married 10'\n \" times, with nine of her marriages occurring between 1999 and 2002 .\",\n ]\n\n use_task_specific_params(model, \"summarization\")\n\n dct = tok(\n [model.config.prefix + x for x in [FRANCE_ARTICLE, SHORTER_ARTICLE, IRAN_ARTICLE, ARTICLE_SUBWAY]],\n padding=\"max_length\",\n truncation=True,\n return_tensors=\"pt\",\n ).to(torch_device)\n self.assertEqual(512, dct[\"input_ids\"].shape[1])\n\n hypotheses_batch = model.generate(\n **dct,\n num_beams=4,\n length_penalty=2.0,\n max_length=142,\n min_length=56,\n no_repeat_ngram_size=3,\n do_sample=False,\n early_stopping=True,\n )\n\n decoded = tok.batch_decode(hypotheses_batch, skip_special_tokens=True, clean_up_tokenization_spaces=False)\n self.assertListEqual(\n expected_summaries,\n decoded,\n )\n\n @slow\n def test_translation_en_to_de(self):\n model = self.model\n tok = self.tokenizer\n use_task_specific_params(model, \"translation_en_to_de\")\n\n en_text = '\"Luigi often said to me that he never wanted the brothers to end up in court\", she wrote.'\n expected_translation = (\n '\"Luigi sagte mir oft, dass er nie wollte, dass die Brüder am Gericht sitzen\", schrieb sie.'\n )\n\n input_ids = tok.encode(model.config.prefix + en_text, return_tensors=\"pt\")\n input_ids = input_ids.to(torch_device)\n output = model.generate(input_ids)\n translation = tok.decode(output[0], skip_special_tokens=True, clean_up_tokenization_spaces=False)\n self.assertEqual(translation, expected_translation)\n\n @slow\n def test_translation_en_to_fr(self):\n model = self.model # t5-base\n tok = self.tokenizer\n use_task_specific_params(model, \"translation_en_to_fr\")\n\n en_text = (\n ' This image section from an infrared recording by the Spitzer telescope shows a \"family portrait\" of'\n \" countless generations of stars: the oldest stars are seen as blue dots. \"\n )\n\n input_ids = tok.encode(model.config.prefix + en_text, return_tensors=\"pt\")\n input_ids = input_ids.to(torch_device)\n\n output = model.generate(\n input_ids=input_ids,\n num_beams=4,\n length_penalty=2.0,\n max_length=100,\n no_repeat_ngram_size=3,\n do_sample=False,\n early_stopping=True,\n )\n translation = tok.decode(output[0], skip_special_tokens=True, clean_up_tokenization_spaces=False)\n new_truncated_translation = (\n \"Cette section d'images provenant de l'enregistrement infrarouge effectué par le télescope Spitzer montre \"\n \"un \"\n \"« portrait familial » de générations innombrables d’étoiles : les plus anciennes sont observées \"\n \"sous forme \"\n \"de points bleus.\"\n )\n\n self.assertEqual(translation, new_truncated_translation)\n\n @slow\n def test_translation_en_to_ro(self):\n model = self.model\n tok = self.tokenizer\n use_task_specific_params(model, \"translation_en_to_ro\")\n en_text = \"Taco Bell said it plans to add 2,000 locations in the US by 2022.\"\n expected_translation = \"Taco Bell a declarat că intenţionează să adauge 2 000 de locaţii în SUA până în 2022.\"\n\n inputs = tok(model.config.prefix + en_text, return_tensors=\"pt\").to(torch_device)\n output = model.generate(**inputs)\n translation = tok.decode(output[0], skip_special_tokens=True, clean_up_tokenization_spaces=False)\n self.assertEqual(translation, expected_translation)\n\n\n@require_torch\nclass TestAsymmetricT5(unittest.TestCase):\n def build_model_and_check_forward_pass(self, **kwargs):\n tester = T5ModelTester(self, **kwargs)\n config, *inputs = tester.prepare_config_and_inputs()\n (\n input_ids,\n decoder_input_ids,\n attention_mask,\n decoder_attention_mask,\n lm_labels,\n ) = inputs\n model = T5ForConditionalGeneration(config=config).to(torch_device).eval()\n outputs = model(\n input_ids=input_ids,\n decoder_input_ids=decoder_input_ids,\n decoder_attention_mask=decoder_attention_mask,\n labels=lm_labels,\n )\n # outputs = model(*inputs)\n assert len(outputs) == 4\n assert outputs[\"logits\"].size() == (tester.batch_size, tester.decoder_seq_length, tester.vocab_size)\n assert outputs[\"loss\"].size() == ()\n return model\n\n def test_small_decoder(self):\n # num_hidden_layers is passed to T5Config as num_layers\n model = self.build_model_and_check_forward_pass(decoder_layers=1, num_hidden_layers=2)\n assert len(model.encoder.block) == 2\n assert len(model.decoder.block) == 1\n\n def test_defaulting_to_symmetry(self):\n # num_hidden_layers is passed to T5Config as num_layers\n model = self.build_model_and_check_forward_pass(num_hidden_layers=2)\n assert len(model.decoder.block) == len(model.encoder.block) == 2\n",
"# coding=utf-8\n# Copyright 2020 The HuggingFace Team. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport unittest\n\nfrom transformers import BigBirdTokenizer, BigBirdTokenizerFast\nfrom transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow\nfrom transformers.utils import cached_property\n\nfrom ...test_tokenization_common import TokenizerTesterMixin\n\n\nSPIECE_UNDERLINE = \"▁\"\n\nSAMPLE_VOCAB = get_tests_dir(\"fixtures/test_sentencepiece.model\")\n\n\n@require_sentencepiece\n@require_tokenizers\nclass BigBirdTokenizationTest(TokenizerTesterMixin, unittest.TestCase):\n\n tokenizer_class = BigBirdTokenizer\n rust_tokenizer_class = BigBirdTokenizerFast\n test_rust_tokenizer = True\n test_sentencepiece = True\n\n def setUp(self):\n super().setUp()\n\n tokenizer = self.tokenizer_class(SAMPLE_VOCAB, keep_accents=True)\n tokenizer.save_pretrained(self.tmpdirname)\n\n def test_convert_token_and_id(self):\n \"\"\"Test ``_convert_token_to_id`` and ``_convert_id_to_token``.\"\"\"\n token = \"<s>\"\n token_id = 1\n\n self.assertEqual(self.get_tokenizer()._convert_token_to_id(token), token_id)\n self.assertEqual(self.get_tokenizer()._convert_id_to_token(token_id), token)\n\n def test_get_vocab(self):\n vocab_keys = list(self.get_tokenizer().get_vocab().keys())\n\n self.assertEqual(vocab_keys[0], \"<unk>\")\n self.assertEqual(vocab_keys[1], \"<s>\")\n self.assertEqual(vocab_keys[-1], \"[MASK]\")\n self.assertEqual(len(vocab_keys), 1_004)\n\n def test_vocab_size(self):\n self.assertEqual(self.get_tokenizer().vocab_size, 1_000)\n\n def test_rust_and_python_full_tokenizers(self):\n if not self.test_rust_tokenizer:\n return\n\n tokenizer = self.get_tokenizer()\n rust_tokenizer = self.get_rust_tokenizer()\n\n sequence = \"I was born in 92000, and this is falsé.\"\n\n tokens = tokenizer.tokenize(sequence)\n rust_tokens = rust_tokenizer.tokenize(sequence)\n self.assertListEqual(tokens, rust_tokens)\n\n ids = tokenizer.encode(sequence, add_special_tokens=False)\n rust_ids = rust_tokenizer.encode(sequence, add_special_tokens=False)\n self.assertListEqual(ids, rust_ids)\n\n rust_tokenizer = self.get_rust_tokenizer()\n ids = tokenizer.encode(sequence)\n rust_ids = rust_tokenizer.encode(sequence)\n self.assertListEqual(ids, rust_ids)\n\n def test_full_tokenizer(self):\n tokenizer = BigBirdTokenizer(SAMPLE_VOCAB, keep_accents=True)\n\n tokens = tokenizer.tokenize(\"This is a test\")\n self.assertListEqual(tokens, [\"▁This\", \"▁is\", \"▁a\", \"▁t\", \"est\"])\n\n self.assertListEqual(\n tokenizer.convert_tokens_to_ids(tokens),\n [285, 46, 10, 170, 382],\n )\n\n tokens = tokenizer.tokenize(\"I was born in 92000, and this is falsé.\")\n self.assertListEqual(\n tokens,\n [\n SPIECE_UNDERLINE + \"I\",\n SPIECE_UNDERLINE + \"was\",\n SPIECE_UNDERLINE + \"b\",\n \"or\",\n \"n\",\n SPIECE_UNDERLINE + \"in\",\n SPIECE_UNDERLINE + \"\",\n \"9\",\n \"2\",\n \"0\",\n \"0\",\n \"0\",\n \",\",\n SPIECE_UNDERLINE + \"and\",\n SPIECE_UNDERLINE + \"this\",\n SPIECE_UNDERLINE + \"is\",\n SPIECE_UNDERLINE + \"f\",\n \"al\",\n \"s\",\n \"é\",\n \".\",\n ],\n )\n ids = tokenizer.convert_tokens_to_ids(tokens)\n self.assertListEqual(\n ids,\n [8, 21, 84, 55, 24, 19, 7, 0, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 0, 4],\n )\n\n back_tokens = tokenizer.convert_ids_to_tokens(ids)\n self.assertListEqual(\n back_tokens,\n [\n SPIECE_UNDERLINE + \"I\",\n SPIECE_UNDERLINE + \"was\",\n SPIECE_UNDERLINE + \"b\",\n \"or\",\n \"n\",\n SPIECE_UNDERLINE + \"in\",\n SPIECE_UNDERLINE + \"\",\n \"<unk>\",\n \"2\",\n \"0\",\n \"0\",\n \"0\",\n \",\",\n SPIECE_UNDERLINE + \"and\",\n SPIECE_UNDERLINE + \"this\",\n SPIECE_UNDERLINE + \"is\",\n SPIECE_UNDERLINE + \"f\",\n \"al\",\n \"s\",\n \"<unk>\",\n \".\",\n ],\n )\n\n @cached_property\n def big_tokenizer(self):\n return BigBirdTokenizer.from_pretrained(\"google/bigbird-roberta-base\")\n\n @slow\n def test_tokenization_base_easy_symbols(self):\n symbols = \"Hello World!\"\n original_tokenizer_encodings = [65, 18536, 2260, 101, 66]\n\n self.assertListEqual(original_tokenizer_encodings, self.big_tokenizer.encode(symbols))\n\n @slow\n def test_tokenization_base_hard_symbols(self):\n symbols = (\n 'This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) \" [ ] ! : - . Also we will'\n \" add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth\"\n )\n # fmt: off\n original_tokenizer_encodings = [65, 871, 419, 358, 946, 991, 2521, 452, 358, 1357, 387, 7751, 3536, 112, 985, 456, 126, 865, 938, 5400, 5734, 458, 1368, 467, 786, 2462, 5246, 1159, 633, 865, 4519, 457, 582, 852, 2557, 427, 916, 508, 405, 34324, 497, 391, 408, 11342, 1244, 385, 100, 938, 985, 456, 574, 362, 12597, 3200, 3129, 1172, 66] # noqa: E231\n # fmt: on\n self.assertListEqual(original_tokenizer_encodings, self.big_tokenizer.encode(symbols))\n\n @require_torch\n @slow\n def test_torch_encode_plus_sent_to_model(self):\n import torch\n\n from transformers import BigBirdConfig, BigBirdModel\n\n # Build sequence\n first_ten_tokens = list(self.big_tokenizer.get_vocab().keys())[:10]\n sequence = \" \".join(first_ten_tokens)\n encoded_sequence = self.big_tokenizer.encode_plus(sequence, return_tensors=\"pt\", return_token_type_ids=False)\n batch_encoded_sequence = self.big_tokenizer.batch_encode_plus(\n [sequence + \" \" + sequence], return_tensors=\"pt\", return_token_type_ids=False\n )\n\n config = BigBirdConfig(attention_type=\"original_full\")\n model = BigBirdModel(config)\n\n assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size\n\n with torch.no_grad():\n model(**encoded_sequence)\n model(**batch_encoded_sequence)\n\n @slow\n def test_special_tokens(self):\n \"\"\"\n To reproduce:\n\n $ wget https://github.com/google-research/bigbird/blob/master/bigbird/vocab/gpt2.model?raw=true\n $ mv gpt2.model?raw=true gpt2.model\n\n ```\n import tensorflow_text as tft\n import tensorflow as tf\n\n vocab_model_file = \"./gpt2.model\"\n tokenizer = tft.SentencepieceTokenizer(model=tf.io.gfile.GFile(vocab_model_file, \"rb\").read()))\n ids = tokenizer.tokenize(\"Paris is the [MASK].\")\n ids = tf.concat([tf.constant([65]), ids, tf.constant([66])], axis=0)\n detokenized = tokenizer.detokenize(ids) # should give [CLS] Paris is the [MASK].[SEP]\n \"\"\"\n tokenizer = BigBirdTokenizer.from_pretrained(\"google/bigbird-roberta-base\")\n decoded_text = tokenizer.decode(tokenizer(\"Paris is the [MASK].\").input_ids)\n\n self.assertTrue(decoded_text == \"[CLS] Paris is the[MASK].[SEP]\")\n\n @slow\n def test_tokenizer_integration(self):\n # fmt: off\n expected_encoding = {'input_ids': [[65, 39286, 458, 36335, 2001, 456, 13073, 13266, 455, 113, 7746, 1741, 11157, 391, 13073, 13266, 455, 113, 3967, 35412, 113, 4936, 109, 3870, 2377, 113, 30084, 45720, 458, 134, 17496, 112, 503, 11672, 113, 118, 112, 5665, 13347, 38687, 112, 1496, 31389, 112, 3268, 47264, 134, 962, 112, 16377, 8035, 23130, 430, 12169, 15518, 28592, 458, 146, 41697, 109, 391, 12169, 15518, 16689, 458, 146, 41358, 109, 452, 726, 4034, 111, 763, 35412, 5082, 388, 1903, 111, 9051, 391, 2870, 48918, 1900, 1123, 550, 998, 112, 9586, 15985, 455, 391, 410, 22955, 37636, 114, 66], [65, 448, 17496, 419, 3663, 385, 763, 113, 27533, 2870, 3283, 13043, 1639, 24713, 523, 656, 24013, 18550, 2521, 517, 27014, 21244, 420, 1212, 1465, 391, 927, 4833, 388, 578, 11786, 114, 66, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [65, 484, 2169, 7687, 21932, 18146, 726, 363, 17032, 3391, 114, 66, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501\n # fmt: on\n\n self.tokenizer_integration_test_util(\n expected_encoding=expected_encoding,\n model_name=\"google/bigbird-roberta-base\",\n revision=\"215c99f1600e06f83acce68422f2035b2b5c3510\",\n )\n",
"# coding=utf-8\n# Copyright 2018 The Google AI Language Team Authors, The HuggingFace Inc. team, and the\n# Lxmert Authors.\n# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\" TF 2.0 LXMERT model.\"\"\"\n\nimport warnings\nfrom dataclasses import dataclass\nfrom typing import Dict, Optional, Tuple\n\nimport tensorflow as tf\n\nfrom transformers.tf_utils import stable_softmax\n\nfrom ...activations_tf import get_tf_activation\nfrom ...modeling_tf_utils import TFPreTrainedModel, get_initializer, keras_serializable, shape_list, unpack_inputs\nfrom ...utils import (\n ModelOutput,\n add_code_sample_docstrings,\n add_start_docstrings,\n add_start_docstrings_to_model_forward,\n logging,\n replace_return_docstrings,\n)\nfrom .configuration_lxmert import LxmertConfig\n\n\nlogger = logging.get_logger(__name__)\n\n_CHECKPOINT_FOR_DOC = \"unc-nlp/lxmert-base-uncased\"\n_CONFIG_FOR_DOC = \"LxmertConfig\"\n_TOKENIZER_FOR_DOC = \"LxmertTokenizer\"\n\nTF_LXMERT_PRETRAINED_MODEL_ARCHIVE_LIST = [\n \"unc-nlp/lxmert-base-uncased\",\n]\n\n\n@dataclass\nclass TFLxmertModelOutput(ModelOutput):\n \"\"\"\n Lxmert's outputs that contain the last hidden states, pooled outputs, and attention probabilities for the language,\n visual, and, cross-modality encoders. (note: the visual encoder in Lxmert is referred to as the \"relation-ship\"\n encoder\")\n\n\n Args:\n language_output (`tf.Tensor` of shape `(batch_size, sequence_length, hidden_size)`):\n Sequence of hidden-states at the output of the last layer of the language encoder.\n vision_output (`tf.Tensor` of shape `(batch_size, sequence_length, hidden_size)`):\n Sequence of hidden-states at the output of the last layer of the visual encoder.\n pooled_output (`tf.Tensor` of shape `(batch_size, hidden_size)`):\n Last layer hidden-state of the first token of the sequence (classification, CLS, token) further processed\n by a Linear layer and a Tanh activation function. The Linear\n language_hidden_states (`tuple(tf.Tensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):\n Tuple of `tf.Tensor` (one for input features + one for the output of each cross-modality layer) of shape\n `(batch_size, sequence_length, hidden_size)`.\n vision_hidden_states (`tuple(tf.Tensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):\n Tuple of `tf.Tensor` (one for input features + one for the output of each cross-modality layer) of shape\n `(batch_size, sequence_length, hidden_size)`.\n language_attentions (`tuple(tf.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):\n Tuple of `tf.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,\n sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in\n the self-attention heads.\n vision_attentions (`tuple(tf.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):\n Tuple of `tf.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,\n sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in\n the self-attention heads.\n cross_encoder_attentions (`tuple(tf.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):\n Tuple of `tf.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,\n sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in\n the self-attention heads.\n \"\"\"\n\n language_output: Optional[tf.Tensor] = None\n vision_output: Optional[tf.Tensor] = None\n pooled_output: Optional[tf.Tensor] = None\n language_hidden_states: Optional[Tuple[tf.Tensor]] = None\n vision_hidden_states: Optional[Tuple[tf.Tensor]] = None\n language_attentions: Optional[Tuple[tf.Tensor]] = None\n vision_attentions: Optional[Tuple[tf.Tensor]] = None\n cross_encoder_attentions: Optional[Tuple[tf.Tensor]] = None\n\n\n@dataclass\nclass TFLxmertForPreTrainingOutput(ModelOutput):\n \"\"\"\n Output type of [`LxmertForPreTraining`].\n\n Args:\n loss (*optional*, returned when `labels` is provided, `tf.Tensor` of shape `(1,)`):\n Total loss as the sum of the masked language modeling loss and the next sequence prediction\n (classification) loss.\n prediction_logits (`tf.Tensor` of shape `(batch_size, sequence_length, config.vocab_size)`):\n Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).\n cross_relationship_score: (`tf.Tensor` of shape `(batch_size, 2)`):\n Prediction scores of the textual matching objective (classification) head (scores of True/False\n continuation before SoftMax).\n question_answering_score: (`tf.Tensor` of shape `(batch_size, n_qa_answers)`):\n Prediction scores of question answering objective (classification).\n language_hidden_states (`tuple(tf.Tensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):\n Tuple of `tf.Tensor` (one for input features + one for the output of each cross-modality layer) of shape\n `(batch_size, sequence_length, hidden_size)`.\n vision_hidden_states (`tuple(tf.Tensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):\n Tuple of `tf.Tensor` (one for input features + one for the output of each cross-modality layer) of shape\n `(batch_size, sequence_length, hidden_size)`.\n language_attentions (`tuple(tf.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):\n Tuple of `tf.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,\n sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in\n the self-attention heads.\n vision_attentions (`tuple(tf.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):\n Tuple of `tf.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,\n sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in\n the self-attention heads.\n cross_encoder_attentions (`tuple(tf.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):\n Tuple of `tf.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,\n sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in\n the self-attention heads.\n\n \"\"\"\n\n loss: Optional[tf.Tensor] = None\n prediction_logits: Optional[tf.Tensor] = None\n cross_relationship_score: Optional[tf.Tensor] = None\n question_answering_score: Optional[tf.Tensor] = None\n language_hidden_states: Optional[Tuple[tf.Tensor]] = None\n vision_hidden_states: Optional[Tuple[tf.Tensor]] = None\n language_attentions: Optional[Tuple[tf.Tensor]] = None\n vision_attentions: Optional[Tuple[tf.Tensor]] = None\n cross_encoder_attentions: Optional[Tuple[tf.Tensor]] = None\n\n\nclass TFLxmertVisualFeatureEncoder(tf.keras.layers.Layer):\n def __init__(self, config, **kwargs):\n super().__init__(**kwargs)\n\n # Object feature encoding\n self.visn_fc = tf.keras.layers.Dense(\n config.hidden_size,\n kernel_initializer=get_initializer(config.initializer_range),\n name=\"visn_fc\",\n )\n self.visn_layer_norm = tf.keras.layers.LayerNormalization(\n epsilon=config.layer_norm_eps, name=\"visn_layer_norm\"\n )\n\n # Box position encoding\n self.box_fc = tf.keras.layers.Dense(\n config.hidden_size,\n kernel_initializer=get_initializer(config.initializer_range),\n name=\"box_fc\",\n )\n self.box_layer_norm = tf.keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name=\"box_layer_norm\")\n\n self.dropout = tf.keras.layers.Dropout(config.hidden_dropout_prob)\n\n def call(self, visn_input, training=False):\n feats, boxes = visn_input\n\n x = self.visn_fc(feats)\n x = self.visn_layer_norm(x)\n y = self.box_fc(boxes)\n y = self.box_layer_norm(y)\n output = (x + y) / 2\n\n output = self.dropout(output, training=training)\n return output\n\n\nclass TFLxmertEmbeddings(tf.keras.layers.Layer):\n \"\"\"Construct the embeddings from word, position and token_type embeddings.\"\"\"\n\n def __init__(self, config, **kwargs):\n super().__init__(**kwargs)\n\n self.vocab_size = config.vocab_size\n self.type_vocab_size = config.type_vocab_size\n self.hidden_size = config.hidden_size\n self.max_position_embeddings = config.max_position_embeddings\n self.initializer_range = config.initializer_range\n self.LayerNorm = tf.keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name=\"LayerNorm\")\n self.dropout = tf.keras.layers.Dropout(rate=config.hidden_dropout_prob)\n\n def build(self, input_shape):\n with tf.name_scope(\"word_embeddings\"):\n self.weight = self.add_weight(\n name=\"weight\",\n shape=[self.vocab_size, self.hidden_size],\n initializer=get_initializer(initializer_range=self.initializer_range),\n )\n\n with tf.name_scope(\"token_type_embeddings\"):\n self.token_type_embeddings = self.add_weight(\n name=\"embeddings\",\n shape=[self.type_vocab_size, self.hidden_size],\n initializer=get_initializer(initializer_range=self.initializer_range),\n )\n\n with tf.name_scope(\"position_embeddings\"):\n self.position_embeddings = self.add_weight(\n name=\"embeddings\",\n shape=[self.max_position_embeddings, self.hidden_size],\n initializer=get_initializer(initializer_range=self.initializer_range),\n )\n\n super().build(input_shape)\n\n def call(self, input_ids=None, token_type_ids=None, inputs_embeds=None, training=False):\n \"\"\"\n Applies embedding based on inputs tensor.\n\n Returns:\n final_embeddings (`tf.Tensor`): output embedding tensor.\n \"\"\"\n assert not (input_ids is None and inputs_embeds is None)\n\n if input_ids is not None:\n inputs_embeds = tf.gather(params=self.weight, indices=input_ids)\n\n input_shape = shape_list(inputs_embeds)[:-1]\n\n if token_type_ids is None:\n token_type_ids = tf.fill(dims=input_shape, value=0)\n\n position_ids = tf.expand_dims(tf.range(start=0, limit=input_shape[-1]), axis=0)\n position_embeds = tf.gather(params=self.position_embeddings, indices=position_ids)\n token_type_embeds = tf.gather(params=self.token_type_embeddings, indices=token_type_ids)\n final_embeddings = inputs_embeds + position_embeds + token_type_embeds\n final_embeddings = self.LayerNorm(inputs=final_embeddings)\n final_embeddings = self.dropout(inputs=final_embeddings, training=training)\n\n return final_embeddings\n\n\nclass TFLxmertAttention(tf.keras.layers.Layer):\n def __init__(self, config, **kwargs):\n super().__init__(**kwargs)\n if config.hidden_size % config.num_attention_heads != 0:\n raise ValueError(\n f\"The hidden size ({config.hidden_size}) is not a multiple of the number of attention \"\n f\"heads ({config.num_attention_heads}\"\n )\n\n self.num_attention_heads = config.num_attention_heads\n assert config.hidden_size % config.num_attention_heads == 0\n self.attention_head_size = int(config.hidden_size / config.num_attention_heads)\n self.all_head_size = self.num_attention_heads * self.attention_head_size\n\n self.query = tf.keras.layers.Dense(\n self.all_head_size,\n kernel_initializer=get_initializer(config.initializer_range),\n name=\"query\",\n )\n self.key = tf.keras.layers.Dense(\n self.all_head_size,\n kernel_initializer=get_initializer(config.initializer_range),\n name=\"key\",\n )\n self.value = tf.keras.layers.Dense(\n self.all_head_size,\n kernel_initializer=get_initializer(config.initializer_range),\n name=\"value\",\n )\n\n self.dropout = tf.keras.layers.Dropout(config.attention_probs_dropout_prob)\n\n def transpose_for_scores(self, x, batch_size):\n # Reshape from [batch_size, seq_length, all_head_size] to [batch_size, seq_length, num_attention_heads, attention_head_size]\n x = tf.reshape(x, (batch_size, -1, self.num_attention_heads, self.attention_head_size))\n return tf.transpose(x, perm=[0, 2, 1, 3])\n\n def call(self, hidden_states, context, attention_mask, output_attentions, training=False):\n batch_size = shape_list(hidden_states)[0]\n mixed_query_layer = self.query(hidden_states)\n mixed_key_layer = self.key(context)\n mixed_value_layer = self.value(context)\n\n query_layer = self.transpose_for_scores(mixed_query_layer, batch_size)\n key_layer = self.transpose_for_scores(mixed_key_layer, batch_size)\n value_layer = self.transpose_for_scores(mixed_value_layer, batch_size)\n\n # Take the dot product between \"query\" and \"key\" to get the raw attention scores.\n attention_scores = tf.matmul(\n query_layer, key_layer, transpose_b=True\n ) # (batch size, num_heads, seq_len_q, seq_len_k)\n dk = tf.cast(shape_list(key_layer)[-1], dtype=attention_scores.dtype) # scale attention_scores\n attention_scores = attention_scores / tf.math.sqrt(dk)\n\n if attention_mask is not None:\n # Apply the attention mask is (precomputed for all layers in TFLxmertModel call() function)\n attention_mask = tf.cast(attention_mask, dtype=attention_scores.dtype)\n attention_scores = attention_scores + attention_mask\n\n # Normalize the attention scores to probabilities.\n attention_probs = stable_softmax(attention_scores, axis=-1)\n\n # This is actually dropping out entire tokens to attend to, which might\n # seem a bit unusual, but is taken from the original Transformer paper.\n attention_probs = self.dropout(attention_probs, training=training)\n context_layer = tf.matmul(attention_probs, value_layer)\n\n context_layer = tf.transpose(context_layer, perm=[0, 2, 1, 3])\n context_layer = tf.reshape(\n context_layer, (batch_size, -1, self.all_head_size)\n ) # (batch_size, seq_len_q, all_head_size)\n\n outputs = (context_layer, attention_probs) if output_attentions else (context_layer,)\n return outputs\n\n\nclass TFLxmertIntermediate(tf.keras.layers.Layer):\n def __init__(self, config, **kwargs):\n super().__init__(**kwargs)\n self.dense = tf.keras.layers.Dense(\n config.intermediate_size,\n kernel_initializer=get_initializer(config.initializer_range),\n name=\"dense\",\n )\n if isinstance(config.hidden_act, str):\n self.intermediate_act_fn = get_tf_activation(config.hidden_act)\n else:\n self.intermediate_act_fn = config.hidden_act\n\n def call(self, hidden_states):\n hidden_states = self.dense(hidden_states)\n hidden_states = self.intermediate_act_fn(hidden_states)\n return hidden_states\n\n\nclass TFLxmertOutput(tf.keras.layers.Layer):\n def __init__(self, config, **kwargs):\n super().__init__(**kwargs)\n self.dense = tf.keras.layers.Dense(\n config.hidden_size,\n kernel_initializer=get_initializer(config.initializer_range),\n name=\"dense\",\n )\n\n self.LayerNorm = tf.keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name=\"LayerNorm\")\n self.dropout = tf.keras.layers.Dropout(config.hidden_dropout_prob)\n\n def call(self, hidden_states, input_tensor, training=False):\n hidden_states = self.dense(hidden_states)\n hidden_states = self.dropout(hidden_states, training)\n hidden_states = self.LayerNorm(hidden_states + input_tensor)\n return hidden_states\n\n\nclass TFLxmertAttentionOutput(tf.keras.layers.Layer):\n def __init__(self, config, **kwargs):\n super().__init__(**kwargs)\n self.dense = tf.keras.layers.Dense(\n config.hidden_size,\n kernel_initializer=get_initializer(config.initializer_range),\n name=\"dense\",\n )\n self.LayerNorm = tf.keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name=\"LayerNorm\")\n self.dropout = tf.keras.layers.Dropout(config.hidden_dropout_prob)\n\n def call(self, hidden_states, input_tensor, training=False):\n hidden_states = self.dense(hidden_states)\n hidden_states = self.dropout(hidden_states, training=training)\n hidden_states = self.LayerNorm(hidden_states + input_tensor)\n return hidden_states\n\n\nclass TFLxmertSelfAttentionLayer(tf.keras.layers.Layer):\n def __init__(self, config, **kwargs):\n super().__init__(**kwargs)\n self.self = TFLxmertAttention(config, name=\"self\")\n self.attention_output = TFLxmertAttentionOutput(config, name=\"output\")\n\n def call(self, input_tensor, attention_mask, output_attentions, training=False):\n # Self attention attends to itself, thus keys and queries are the same (input_tensor).\n self_output = self.self(input_tensor, input_tensor, attention_mask, output_attentions)\n if output_attentions:\n attention_probs = self_output[1]\n attention_output = self.attention_output(self_output[0], input_tensor)\n return (attention_output, attention_probs) if output_attentions else (attention_output,)\n\n\nclass TFLxmertCrossAttentionLayer(tf.keras.layers.Layer):\n def __init__(self, config, **kwargs):\n super().__init__(**kwargs)\n self.att = TFLxmertAttention(config, name=\"att\")\n self.attention_output = TFLxmertAttentionOutput(config, name=\"output\")\n\n def call(\n self,\n input_tensor,\n ctx_tensor,\n ctx_att_mask,\n output_attentions=False,\n training=False,\n ):\n output = self.att(input_tensor, ctx_tensor, ctx_att_mask, output_attentions, training=training)\n if output_attentions:\n attention_probs = output[1]\n attention_output = self.attention_output(output[0], input_tensor, training=training)\n outputs = (attention_output, attention_probs) if output_attentions else (attention_output,)\n return outputs\n\n\nclass TFLxmertLayer(tf.keras.layers.Layer):\n def __init__(self, config, **kwargs):\n super().__init__(**kwargs)\n self.attention = TFLxmertSelfAttentionLayer(config, name=\"attention\")\n self.intermediate = TFLxmertIntermediate(config, name=\"intermediate\")\n self.transformer_output = TFLxmertOutput(config, name=\"output\")\n\n def call(self, hidden_states, attention_mask, output_attentions, training=False):\n attention_outputs = self.attention(hidden_states, attention_mask, output_attentions, training=training)\n attention_output = attention_outputs[0]\n intermediate_output = self.intermediate(attention_output)\n layer_output = self.transformer_output(intermediate_output, attention_output, training=training)\n outputs = (layer_output,) + attention_outputs[1:] # add attentions if we output them\n return outputs\n\n\nclass TFLxmertXLayer(tf.keras.layers.Layer):\n def __init__(self, config, **kwargs):\n super().__init__(**kwargs)\n self.visual_attention = TFLxmertCrossAttentionLayer(config, name=\"visual_attention\")\n\n # Self-attention Layers\n self.lang_self_att = TFLxmertSelfAttentionLayer(config, name=\"lang_self_att\")\n self.visn_self_att = TFLxmertSelfAttentionLayer(config, name=\"visn_self_att\")\n\n # Intermediate and Output Layers (FFNs)\n self.lang_inter = TFLxmertIntermediate(config, name=\"lang_inter\")\n self.lang_output = TFLxmertOutput(config, name=\"lang_output\")\n self.visn_inter = TFLxmertIntermediate(config, name=\"visn_inter\")\n self.visn_output = TFLxmertOutput(config, name=\"visn_output\")\n\n def cross_att(\n self,\n lang_input,\n lang_attention_mask,\n visn_input,\n visn_attention_mask,\n output_attentions,\n training=False,\n ):\n # Cross Attention\n\n # Keras saving and loading model *does not work* with the same inputs for two layers.\n lang_attention_lang_input = tf.identity(lang_input)\n visn_attention_lang_input = tf.identity(lang_input)\n lang_attention_visn_input = tf.identity(visn_input)\n visn_attention_visn_input = tf.identity(visn_input)\n\n lang_att_output = self.visual_attention(\n lang_attention_lang_input,\n lang_attention_visn_input,\n visn_attention_mask,\n output_attentions=output_attentions,\n training=training,\n )\n visn_att_output = self.visual_attention(\n visn_attention_visn_input,\n visn_attention_lang_input,\n lang_attention_mask,\n output_attentions=output_attentions,\n training=training,\n )\n return lang_att_output, visn_att_output\n\n def self_att(\n self,\n lang_input,\n lang_attention_mask,\n visn_input,\n visn_attention_mask,\n training=False,\n ):\n # Self Attention\n output_attentions = False\n lang_att_output = self.lang_self_att(lang_input, lang_attention_mask, output_attentions, training=training)\n visn_att_output = self.visn_self_att(visn_input, visn_attention_mask, output_attentions, training=training)\n return lang_att_output[0], visn_att_output[0]\n\n def output_fc(self, lang_input, visn_input, training=False):\n # FC layers\n lang_inter_output = self.lang_inter(lang_input)\n visn_inter_output = self.visn_inter(visn_input)\n\n # Layer output\n lang_output = self.lang_output(lang_inter_output, lang_input, training)\n visn_output = self.visn_output(visn_inter_output, visn_input, training)\n return lang_output, visn_output\n\n def call(\n self,\n lang_feats,\n lang_attention_mask,\n visn_feats,\n visn_attention_mask,\n output_attentions,\n training=False,\n ):\n lang_att_output = lang_feats\n visn_att_output = visn_feats\n\n lang_att_output, visn_att_output = self.cross_att(\n lang_att_output,\n lang_attention_mask,\n visn_att_output,\n visn_attention_mask,\n output_attentions,\n training=training,\n )\n attention_probs = lang_att_output[1:]\n lang_att_output, visn_att_output = self.self_att(\n lang_att_output[0],\n lang_attention_mask,\n visn_att_output[0],\n visn_attention_mask,\n training=training,\n )\n lang_output, visn_output = self.output_fc(lang_att_output, visn_att_output, training=training)\n\n return (lang_output, visn_output, attention_probs[0]) if output_attentions else (lang_output, visn_output)\n\n\nclass TFLxmertEncoder(tf.keras.layers.Layer):\n def __init__(self, config, **kwargs):\n super().__init__(**kwargs)\n\n self.visn_fc = TFLxmertVisualFeatureEncoder(config, name=\"visn_fc\")\n\n # Number of layers\n self.num_l_layers = config.l_layers\n self.num_x_layers = config.x_layers\n self.num_r_layers = config.r_layers\n\n # Layers\n # Using self.layer instead of self.l_layer to support loading BERT weights.\n self.layer = [TFLxmertLayer(config, name=f\"layer_._{i}\") for i in range(self.num_l_layers)]\n self.x_layers = [TFLxmertXLayer(config, name=f\"x_layers_._{i}\") for i in range(self.num_x_layers)]\n self.r_layers = [TFLxmertLayer(config, name=f\"r_layers_._{i}\") for i in range(self.num_r_layers)]\n self.config = config\n\n def call(\n self,\n lang_feats=None,\n lang_attention_mask=None,\n visual_feats=None,\n visual_pos=None,\n visual_attention_mask=None,\n output_attentions=None,\n training=False,\n ):\n vision_hidden_states = ()\n language_hidden_states = ()\n vision_attentions = () if output_attentions or self.config.output_attentions else None\n language_attentions = () if output_attentions or self.config.output_attentions else None\n cross_encoder_attentions = () if output_attentions or self.config.output_attentions else None\n\n visual_feats = self.visn_fc([visual_feats, visual_pos], training=training)\n\n # Run language layers\n for layer_module in self.layer:\n l_outputs = layer_module(lang_feats, lang_attention_mask, output_attentions, training=training)\n lang_feats = l_outputs[0]\n language_hidden_states = language_hidden_states + (lang_feats,)\n if language_attentions is not None:\n language_attentions = language_attentions + (l_outputs[1],)\n\n # Run relational layers\n for layer_module in self.r_layers:\n v_outputs = layer_module(\n visual_feats,\n visual_attention_mask,\n output_attentions,\n training=training,\n )\n visual_feats = v_outputs[0]\n vision_hidden_states = vision_hidden_states + (visual_feats,)\n if vision_attentions is not None:\n vision_attentions = vision_attentions + (v_outputs[1],)\n\n # Run cross-modality layers\n for layer_module in self.x_layers:\n x_outputs = layer_module(\n lang_feats,\n lang_attention_mask,\n visual_feats,\n visual_attention_mask,\n output_attentions,\n training=training,\n )\n lang_feats, visual_feats = x_outputs[:2]\n vision_hidden_states = vision_hidden_states + (visual_feats,)\n language_hidden_states = language_hidden_states + (lang_feats,)\n if cross_encoder_attentions is not None:\n cross_encoder_attentions = cross_encoder_attentions + (x_outputs[2],)\n\n visual_encoder_outputs = (\n vision_hidden_states,\n vision_attentions if output_attentions else None,\n )\n lang_encoder_outputs = (\n language_hidden_states,\n language_attentions if output_attentions else None,\n )\n\n return (\n visual_encoder_outputs,\n lang_encoder_outputs,\n cross_encoder_attentions if output_attentions else None,\n )\n\n\n@keras_serializable\nclass TFLxmertMainLayer(tf.keras.layers.Layer):\n config_class = LxmertConfig\n\n @property\n def dummy_inputs(self):\n \"\"\"\n Dummy inputs to build the network.\n\n Returns:\n tf.Tensor with dummy inputs\n \"\"\"\n batch_size = 2\n num_visual_features = 10\n input_ids = tf.constant([[3, 5, 6], [2, 3, 4]])\n visual_feats = tf.random.uniform((batch_size, num_visual_features, self.config.visual_feat_dim))\n visual_pos = tf.random.uniform((batch_size, num_visual_features, 4))\n\n return {\n \"input_ids\": input_ids,\n \"visual_feats\": visual_feats,\n \"visual_pos\": visual_pos,\n }\n\n def __init__(self, config, **kwargs):\n super().__init__(**kwargs)\n\n self.config = config\n self.num_l_layers = config.l_layers\n self.num_x_layers = config.x_layers\n self.num_r_layers = config.r_layers\n self.initializer_range = config.initializer_range\n self.output_attentions = config.output_attentions\n self.output_hidden_states = config.output_hidden_states\n self.return_dict = config.use_return_dict\n self.embeddings = TFLxmertEmbeddings(config, name=\"embeddings\")\n self.encoder = TFLxmertEncoder(config, name=\"encoder\")\n self.pooler = TFLxmertPooler(config, name=\"pooler\")\n self.config = config\n\n def get_input_embeddings(self):\n return self.embeddings\n\n def set_input_embeddings(self, value):\n self.embeddings.weight = value\n self.embeddings.vocab_size = shape_list(value)[0]\n\n def _prune_heads(self, heads_to_prune):\n raise NotImplementedError\n\n @unpack_inputs\n def call(\n self,\n input_ids=None,\n visual_feats=None,\n visual_pos=None,\n attention_mask=None,\n visual_attention_mask=None,\n token_type_ids=None,\n inputs_embeds=None,\n output_attentions=None,\n output_hidden_states=None,\n return_dict=None,\n training=False,\n ):\n\n if input_ids is not None and inputs_embeds is not None:\n raise ValueError(\"You cannot specify both input_ids and inputs_embeds at the same time\")\n elif input_ids is not None:\n input_shape = shape_list(input_ids)\n elif inputs_embeds is not None:\n input_shape = shape_list(inputs_embeds)[:-1]\n else:\n raise ValueError(\"You have to specify either input_ids or inputs_embeds\")\n if visual_pos is None or visual_feats is None:\n raise ValueError(\"visual_feats and visual_pos cannot be `None` in LXMERT's `call` method.\")\n\n if attention_mask is None:\n attention_mask = tf.fill(input_shape, 1)\n\n if token_type_ids is None:\n token_type_ids = tf.fill(input_shape, 0)\n\n # Positional Word Embeddings\n embedding_output = self.embeddings(input_ids, token_type_ids, inputs_embeds, training)\n\n # We create a 3D attention mask from a 2D tensor mask.\n # Sizes are [batch_size, 1, 1, to_seq_length]\n # So we can broadcast to [batch_size, num_heads, from_seq_length, to_seq_length]\n # this attention mask is more simple than the triangular masking of causal attention\n # used in OpenAI GPT, we just need to prepare the broadcast dimension here.\n extended_attention_mask = tf.reshape(attention_mask, (input_shape[0], 1, 1, input_shape[1]))\n\n # Since attention_mask is 1.0 for positions we want to attend and 0.0 for\n # masked positions, this operation will create a tensor which is 0.0 for\n # positions we want to attend and -10000.0 for masked positions.\n # Since we are adding it to the raw scores before the softmax, this is\n # effectively the same as removing these entirely.\n\n extended_attention_mask = tf.cast(extended_attention_mask, dtype=embedding_output.dtype)\n one_cst = tf.constant(1.0, dtype=embedding_output.dtype)\n ten_thousand_cst = tf.constant(-10000.0, dtype=embedding_output.dtype)\n extended_attention_mask = tf.multiply(tf.subtract(one_cst, extended_attention_mask), ten_thousand_cst)\n\n if visual_attention_mask is not None:\n extended_visual_attention_mask = tf.reshape(visual_attention_mask, (input_shape[0], 1, 1, input_shape[1]))\n extended_visual_attention_mask = tf.expand_dims(tf.expand_dims(visual_attention_mask, axis=1), axis=1)\n\n extended_visual_attention_mask = tf.cast(extended_visual_attention_mask, dtype=embedding_output.dtype)\n extended_visual_attention_mask = tf.multiply(\n tf.subtract(one_cst, extended_visual_attention_mask), ten_thousand_cst\n )\n else:\n extended_visual_attention_mask = None\n\n # Run Lxmert encoder\n encoder_outputs = self.encoder(\n embedding_output,\n extended_attention_mask,\n visual_feats,\n visual_pos,\n extended_visual_attention_mask,\n output_attentions,\n training,\n )\n visual_encoder_outputs, lang_encoder_outputs = encoder_outputs[:2]\n vision_hidden_states = visual_encoder_outputs[0]\n language_hidden_states = lang_encoder_outputs[0]\n\n all_attentions = ()\n if output_attentions:\n language_attentions = lang_encoder_outputs[1]\n vision_attentions = visual_encoder_outputs[1]\n cross_encoder_attentions = encoder_outputs[2]\n all_attentions = (\n language_attentions,\n vision_attentions,\n cross_encoder_attentions,\n )\n\n hidden_states = (language_hidden_states, vision_hidden_states) if output_hidden_states else ()\n\n visual_output = vision_hidden_states[-1]\n lang_output = language_hidden_states[-1]\n pooled_output = self.pooler(lang_output)\n\n if not return_dict:\n return (lang_output, visual_output, pooled_output) + hidden_states + all_attentions\n\n return TFLxmertModelOutput(\n pooled_output=pooled_output,\n language_output=lang_output,\n vision_output=visual_output,\n language_hidden_states=language_hidden_states if output_hidden_states else None,\n vision_hidden_states=vision_hidden_states if output_hidden_states else None,\n language_attentions=language_attentions if output_attentions else None,\n vision_attentions=vision_attentions if output_attentions else None,\n cross_encoder_attentions=cross_encoder_attentions if output_attentions else None,\n )\n\n\nclass TFLxmertPreTrainedModel(TFPreTrainedModel):\n \"\"\"\n An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained\n models.\n \"\"\"\n\n config_class = LxmertConfig\n base_model_prefix = \"lxmert\"\n\n @property\n def dummy_inputs(self) -> Dict[str, tf.Tensor]:\n return getattr(self, self.base_model_prefix).dummy_inputs\n\n @tf.function(\n input_signature=[\n {\n \"input_ids\": tf.TensorSpec((None, None), tf.int32, name=\"input_ids\"),\n \"attention_mask\": tf.TensorSpec((None, None), tf.int32, name=\"attention_mask\"),\n \"visual_feats\": tf.TensorSpec((None, None, None), tf.float32, name=\"visual_feats\"),\n \"visual_pos\": tf.TensorSpec((None, None, None), tf.float32, name=\"visual_pos\"),\n \"visual_attention_mask\": tf.TensorSpec((None, None), tf.int32, name=\"visual_attention_mask\"),\n \"token_type_ids\": tf.TensorSpec((None, None), tf.int32, name=\"token_type_ids\"),\n }\n ]\n )\n def serving(self, inputs):\n output = self.call(inputs)\n\n return self.serving_output(output)\n\n\nLXMERT_START_DOCSTRING = r\"\"\"\n\n The LXMERT model was proposed in [LXMERT: Learning Cross-Modality Encoder Representations from\n Transformers](https://arxiv.org/abs/1908.07490) by Hao Tan and Mohit Bansal. It's a vision and language transformer\n model, pre-trained on a variety of multi-modal datasets comprising of GQA, VQAv2.0, MCSCOCO captions, and Visual\n genome, using a combination of masked language modeling, region of interest feature regression, cross entropy loss\n for question answering attribute prediction, and object tag prediction.\n\n This model is also a [tf.keras.Model](https://www.tensorflow.org/api_docs/python/tf/keras/Model) subclass. Use it\n as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and\n behavior.\n\n <Tip>\n\n TF 2.0 models accepts two formats as inputs:\n\n - having all inputs as keyword arguments (like PyTorch models), or\n - having all inputs as a list, tuple or dict in the first positional arguments.\n\n This second option is useful when using [`tf.keras.Model.fit`] method which currently requires having all the\n tensors in the first argument of the model call function: `model(inputs)`.\n\n If you choose this second option, there are three possibilities you can use to gather all the input Tensors in the\n first positional argument :\n\n - a single Tensor with `input_ids` only and nothing else: `model(inputs_ids)`\n - a list of varying length with one or several input Tensors IN THE ORDER given in the docstring:\n `model([input_ids, attention_mask])` or `model([input_ids, attention_mask, token_type_ids])`\n - a dictionary with one or several input Tensors associated to the input names given in the docstring:\n `model({\"input_ids\": input_ids, \"token_type_ids\": token_type_ids})`\n\n </Tip>\n\n Parameters:\n config ([`LxmertConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n\"\"\"\n\nLXMERT_INPUTS_DOCSTRING = r\"\"\"\n Args:\n input_ids (`np.ndarray` or `tf.Tensor` of shape `(batch_size, sequence_length)`):\n Indices of input sequence tokens in the vocabulary.\n\n Indices can be obtained using [`LxmertTokenizer`]. See [`PreTrainedTokenizer.__call__`] and\n [`PreTrainedTokenizer.encode`] for details.\n\n [What are input IDs?](../glossary#input-ids)\n visual_feats: (`tf.Tensor` of shape `(batch_size, num_visual_features, visual_feat_dim)`):\n This input represents visual features. They ROI pooled object features from bounding boxes using a\n faster-RCNN model)\n\n These are currently not provided by the transformers library.\n visual_pos: (`tf.Tensor` of shape `(batch_size, num_visual_features, visual_feat_dim)`):\n This input represents spacial features corresponding to their relative (via index) visual features. The\n pre-trained LXMERT model expects these spacial features to be normalized bounding boxes on a scale of 0 to\n 1.\n\n These are currently not provided by the transformers library.\n attention_mask (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*):\n Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:\n\n - 1 for tokens that are **not masked**,\n - 0 for tokens that are **masked**.\n\n [What are attention masks?](../glossary#attention-mask)\n visual_attention_mask (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*):\n MMask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:\n\n - 1 for tokens that are **not masked**,\n - 0 for tokens that are **masked**.\n\n [What are attention masks?](../glossary#attention-mask)\n token_type_ids (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*):\n Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0,\n 1]`:\n\n - 0 corresponds to a *sentence A* token,\n - 1 corresponds to a *sentence B* token.\n\n [What are token type IDs?](../glossary#token-type-ids)\n inputs_embeds (`tf.Tensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):\n Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This\n is useful if you want more control over how to convert `input_ids` indices into associated vectors than the\n model's internal embedding lookup matrix.\n output_attentions (`bool`, *optional*):\n Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned\n tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the\n config will be used instead.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail. This argument can be used only in eager mode, in graph mode the value in the config will be\n used instead.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. This argument can be used in\n eager mode, in graph mode the value will always be set to True.\n training (`bool`, *optional*, defaults to `False`):\n Whether or not to use the model in training mode (some modules like dropout modules have different\n behaviors between training and evaluation).\n\"\"\"\n\n\n@add_start_docstrings(\n \"The bare Lxmert Model transformer outputting raw hidden-states without any specific head on top.\",\n LXMERT_START_DOCSTRING,\n)\nclass TFLxmertModel(TFLxmertPreTrainedModel):\n def __init__(self, config, *inputs, **kwargs):\n super().__init__(config, *inputs, **kwargs)\n self.lxmert = TFLxmertMainLayer(config, name=\"lxmert\")\n\n @unpack_inputs\n @add_start_docstrings_to_model_forward(LXMERT_INPUTS_DOCSTRING)\n @add_code_sample_docstrings(\n processor_class=_TOKENIZER_FOR_DOC,\n checkpoint=_CHECKPOINT_FOR_DOC,\n output_type=TFLxmertModelOutput,\n config_class=_CONFIG_FOR_DOC,\n )\n def call(\n self,\n input_ids=None,\n visual_feats=None,\n visual_pos=None,\n attention_mask=None,\n visual_attention_mask=None,\n token_type_ids=None,\n inputs_embeds=None,\n output_attentions=None,\n output_hidden_states=None,\n return_dict=None,\n training=False,\n ):\n outputs = self.lxmert(\n input_ids,\n visual_feats,\n visual_pos,\n attention_mask,\n visual_attention_mask,\n token_type_ids,\n inputs_embeds,\n output_attentions,\n output_hidden_states,\n return_dict,\n training,\n )\n\n return outputs\n\n def serving_output(self, output):\n l_hs = tf.convert_to_tensor(output.language_hidden_states) if self.config.output_hidden_states else None\n v_hs = tf.convert_to_tensor(output.vision_hidden_states) if self.config.output_hidden_states else None\n l_attns = tf.convert_to_tensor(output.language_attentions) if self.config.output_attentions else None\n v_attns = tf.convert_to_tensor(output.vision_attentions) if self.config.output_attentions else None\n c_enc_attns = tf.convert_to_tensor(output.cross_encoder_attentions) if self.config.output_attentions else None\n\n return TFLxmertModelOutput(\n pooled_output=output.pooled_output,\n language_output=output.language_output,\n vision_output=output.vision_output,\n language_hidden_states=l_hs,\n vision_hidden_states=v_hs,\n language_attentions=l_attns,\n vision_attentions=v_attns,\n cross_encoder_attentions=c_enc_attns,\n )\n\n\nclass TFLxmertPooler(tf.keras.layers.Layer):\n def __init__(self, config, **kwargs):\n super().__init__(**kwargs)\n self.dense = tf.keras.layers.Dense(\n config.hidden_size,\n kernel_initializer=get_initializer(config.initializer_range),\n activation=\"tanh\",\n name=\"dense\",\n )\n\n def call(self, hidden_states):\n # We \"pool\" the model by simply taking the hidden state corresponding\n # to the first token.\n first_token_tensor = hidden_states[:, 0]\n pooled_output = self.dense(first_token_tensor)\n return pooled_output\n\n\n# Copied from transformers.models.bert.modeling_tf_bert.TFBertPredictionHeadTransform with Bert->Lxmert\nclass TFLxmertPredictionHeadTransform(tf.keras.layers.Layer):\n def __init__(self, config: LxmertConfig, **kwargs):\n super().__init__(**kwargs)\n\n self.dense = tf.keras.layers.Dense(\n units=config.hidden_size,\n kernel_initializer=get_initializer(config.initializer_range),\n name=\"dense\",\n )\n\n if isinstance(config.hidden_act, str):\n self.transform_act_fn = get_tf_activation(config.hidden_act)\n else:\n self.transform_act_fn = config.hidden_act\n\n self.LayerNorm = tf.keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name=\"LayerNorm\")\n\n def call(self, hidden_states: tf.Tensor) -> tf.Tensor:\n hidden_states = self.dense(inputs=hidden_states)\n hidden_states = self.transform_act_fn(hidden_states)\n hidden_states = self.LayerNorm(inputs=hidden_states)\n\n return hidden_states\n\n\n# Copied from transformers.models.bert.modeling_tf_bert.TFBertLMPredictionHead with Bert->Lxmert\nclass TFLxmertLMPredictionHead(tf.keras.layers.Layer):\n def __init__(self, config: LxmertConfig, input_embeddings: tf.keras.layers.Layer, **kwargs):\n super().__init__(**kwargs)\n\n self.vocab_size = config.vocab_size\n self.hidden_size = config.hidden_size\n\n self.transform = TFLxmertPredictionHeadTransform(config, name=\"transform\")\n\n # The output weights are the same as the input embeddings, but there is\n # an output-only bias for each token.\n self.input_embeddings = input_embeddings\n\n def build(self, input_shape: tf.TensorShape):\n self.bias = self.add_weight(shape=(self.vocab_size,), initializer=\"zeros\", trainable=True, name=\"bias\")\n\n super().build(input_shape)\n\n def get_output_embeddings(self) -> tf.keras.layers.Layer:\n return self.input_embeddings\n\n def set_output_embeddings(self, value: tf.Variable):\n self.input_embeddings.weight = value\n self.input_embeddings.vocab_size = shape_list(value)[0]\n\n def get_bias(self) -> Dict[str, tf.Variable]:\n return {\"bias\": self.bias}\n\n def set_bias(self, value: tf.Variable):\n self.bias = value[\"bias\"]\n self.vocab_size = shape_list(value[\"bias\"])[0]\n\n def call(self, hidden_states: tf.Tensor) -> tf.Tensor:\n hidden_states = self.transform(hidden_states=hidden_states)\n seq_length = shape_list(hidden_states)[1]\n hidden_states = tf.reshape(tensor=hidden_states, shape=[-1, self.hidden_size])\n hidden_states = tf.matmul(a=hidden_states, b=self.input_embeddings.weight, transpose_b=True)\n hidden_states = tf.reshape(tensor=hidden_states, shape=[-1, seq_length, self.vocab_size])\n hidden_states = tf.nn.bias_add(value=hidden_states, bias=self.bias)\n\n return hidden_states\n\n\n# Copied from transformers.models.bert.modeling_tf_bert.TFBertMLMHead with Bert->Lxmert\nclass TFLxmertMLMHead(tf.keras.layers.Layer):\n def __init__(self, config: LxmertConfig, input_embeddings: tf.keras.layers.Layer, **kwargs):\n super().__init__(**kwargs)\n\n self.predictions = TFLxmertLMPredictionHead(config, input_embeddings, name=\"predictions\")\n\n def call(self, sequence_output: tf.Tensor) -> tf.Tensor:\n prediction_scores = self.predictions(hidden_states=sequence_output)\n\n return prediction_scores\n\n\nclass TFLxmertPreTrainingHeads(tf.keras.layers.Layer):\n def __init__(self, config, input_embeddings, **kwargs):\n super().__init__(**kwargs)\n self.predictions = TFLxmertLMPredictionHead(config, input_embeddings, name=\"predictions\")\n\n self.seq_relationship = tf.keras.layers.Dense(\n 2,\n kernel_initializer=get_initializer(config.initializer_range),\n name=\"seq_relationship\",\n )\n\n def call(self, sequence_output, pooled_output):\n prediction_scores = self.predictions(sequence_output)\n seq_relationship_score = self.seq_relationship(pooled_output)\n return prediction_scores, seq_relationship_score\n\n\nclass TFLxmertVisualAnswerHead(tf.keras.layers.Layer):\n def __init__(self, config, num_labels, **kwargs):\n super().__init__(**kwargs)\n hid_dim = config.hidden_size\n self.dense = tf.keras.layers.Dense(\n hid_dim * 2,\n kernel_initializer=get_initializer(config.initializer_range),\n name=\"logit_fc_._0\",\n )\n self.activation = get_tf_activation(\"gelu\")\n self.layer_norm = tf.keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name=\"logit_fc_._2\")\n self.dense_1 = tf.keras.layers.Dense(\n num_labels,\n kernel_initializer=get_initializer(config.initializer_range),\n name=\"logit_fc_._3\",\n )\n\n def call(self, hidden_states):\n hidden_states = self.dense(hidden_states)\n hidden_states = self.activation(hidden_states)\n hidden_states = self.layer_norm(hidden_states)\n hidden_states = self.dense_1(hidden_states)\n\n return hidden_states\n\n\nclass TFLxmertVisualObjHead(tf.keras.layers.Layer):\n def __init__(self, config, **kwargs):\n super().__init__(**kwargs)\n self.transform = TFLxmertPredictionHeadTransform(config, name=\"transform\")\n\n # Decide the use of visual losses\n visual_losses = {}\n if config.visual_obj_loss:\n visual_losses[\"obj\"] = {\"shape\": (-1,), \"num\": config.num_object_labels}\n if config.visual_attr_loss:\n visual_losses[\"attr\"] = {\"shape\": (-1,), \"num\": config.num_attr_labels}\n if config.visual_obj_loss:\n visual_losses[\"feat\"] = {\"shape\": (-1, 2048), \"num\": config.visual_feat_dim}\n self.visual_losses = visual_losses\n\n # The output weights are the same as the input embeddings, but there is\n # an output-only bias for each token.\n self.decoder_dict = {\n key: tf.keras.layers.Dense(\n self.visual_losses[key][\"num\"],\n kernel_initializer=get_initializer(config.initializer_range),\n name=f\"decoder_dict.{key}\",\n )\n for key in self.visual_losses\n }\n\n def call(self, hidden_states):\n hidden_states = self.transform(hidden_states)\n output = {}\n for key in self.visual_losses:\n output[key] = self.decoder_dict[key](hidden_states)\n return output\n\n\n@add_start_docstrings(\"\"\"Lxmert Model with a `language modeling` head on top.\"\"\", LXMERT_START_DOCSTRING)\nclass TFLxmertForPreTraining(TFLxmertPreTrainedModel):\n def __init__(self, config, *inputs, **kwargs):\n super().__init__(config, *inputs, **kwargs)\n\n self.config = config\n self.num_qa_labels = config.num_qa_labels\n self.visual_loss_normalizer = config.visual_loss_normalizer\n\n # Use of pretraining tasks\n self.task_mask_lm = config.task_mask_lm\n self.task_obj_predict = config.task_obj_predict\n self.task_matched = config.task_matched\n self.task_qa = config.task_qa\n\n # Lxmert backbone\n self.lxmert = TFLxmertMainLayer(config, name=\"lxmert\")\n\n # Pre-training heads\n self.cls = TFLxmertPreTrainingHeads(config, self.lxmert.embeddings, name=\"cls\")\n if self.task_obj_predict:\n self.obj_predict_head = TFLxmertVisualObjHead(config, name=\"obj_predict_head\")\n if self.task_qa:\n self.answer_head = TFLxmertVisualAnswerHead(config, self.num_qa_labels, name=\"answer_head\")\n\n # Loss functions\n self.loss_fcts = {\n \"l2\": tf.keras.losses.Huber(delta=1.0, name=\"huber_loss\"),\n \"visn_ce\": tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),\n \"ce\": tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),\n }\n\n visual_losses = {}\n if config.visual_obj_loss:\n visual_losses[\"obj\"] = {\n \"shape\": (-1,),\n \"num\": config.num_object_labels,\n \"loss\": \"visn_ce\",\n }\n if config.visual_attr_loss:\n visual_losses[\"attr\"] = {\n \"shape\": (-1,),\n \"num\": config.num_attr_labels,\n \"loss\": \"visn_ce\",\n }\n if config.visual_obj_loss:\n visual_losses[\"feat\"] = {\n \"shape\": (-1, config.visual_feat_dim),\n \"num\": config.visual_feat_dim,\n \"loss\": \"l2\",\n }\n self.visual_losses = visual_losses\n\n @property\n def dummy_inputs(self):\n \"\"\"\n Dummy inputs to build the network.\n\n Returns:\n tf.Tensor with dummy inputs\n \"\"\"\n batch_size = 2\n num_visual_features = 10\n input_ids = tf.constant([[3, 5, 6], [2, 3, 4]])\n visual_feats = tf.random.uniform((batch_size, num_visual_features, self.config.visual_feat_dim))\n visual_pos = tf.random.uniform((batch_size, num_visual_features, 4))\n\n if self.config.task_obj_predict:\n obj_labels = {}\n if self.config.visual_attr_loss and self.config.task_obj_predict:\n obj_labels[\"attr\"] = (\n tf.ones([batch_size, num_visual_features]),\n tf.ones([batch_size, num_visual_features]),\n )\n if self.config.visual_feat_loss and self.config.task_obj_predict:\n obj_labels[\"feat\"] = (\n tf.ones([batch_size, num_visual_features, self.config.visual_feat_dim]),\n tf.ones([batch_size, num_visual_features]),\n )\n if self.config.visual_obj_loss and self.config.task_obj_predict:\n obj_labels[\"obj\"] = (\n tf.ones([batch_size, num_visual_features]),\n tf.ones([batch_size, num_visual_features]),\n )\n\n return {\n **{\n \"input_ids\": input_ids,\n \"visual_feats\": visual_feats,\n \"visual_pos\": visual_pos,\n },\n **({\"obj_labels\": obj_labels} if self.config.task_obj_predict else {}),\n }\n\n def get_lm_head(self):\n return self.cls.predictions\n\n def get_prefix_bias_name(self):\n warnings.warn(\"The method get_prefix_bias_name is deprecated. Please use `get_bias` instead.\", FutureWarning)\n return self.name + \"/\" + self.cls.name + \"/\" + self.cls.predictions.name\n\n @unpack_inputs\n @add_start_docstrings_to_model_forward(LXMERT_INPUTS_DOCSTRING)\n @replace_return_docstrings(output_type=TFLxmertForPreTrainingOutput, config_class=_CONFIG_FOR_DOC)\n def call(\n self,\n input_ids=None,\n visual_feats=None,\n visual_pos=None,\n attention_mask=None,\n visual_attention_mask=None,\n token_type_ids=None,\n inputs_embeds=None,\n masked_lm_labels=None,\n obj_labels=None,\n matched_label=None,\n ans=None,\n output_attentions=None,\n output_hidden_states=None,\n return_dict=None,\n training=False,\n ):\n r\"\"\"\n masked_lm_labels (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*):\n Labels for computing the masked language modeling loss. Indices should be in `[-100, 0, ...,\n config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked), the\n loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`\n obj_labels: (`Dict[Str: Tuple[tf.Tensor, tf.Tensor]]`, *optional*, defaults to `None`):\n each key is named after each one of the visual losses and each element of the tuple is of the shape\n `(batch_size, num_features)` and `(batch_size, num_features, visual_feature_dim)` for each the label id and\n the label score respectively\n matched_label (`tf.Tensor` of shape `(batch_size,)`, *optional*):\n Labels for computing the whether or not the text input matches the image (classification) loss. Input\n should be a sequence pair (see `input_ids` docstring) Indices should be in `[0, 1]`:\n\n - 0 indicates that the sentence does not match the image,\n - 1 indicates that the sentence does match the image.\n ans (`Torch.Tensor` of shape `(batch_size)`, *optional*, defaults to `None`):\n a one hot representation hof the correct answer *optional*\n\n Returns:\n \"\"\"\n\n lxmert_output = self.lxmert(\n input_ids,\n visual_feats,\n visual_pos,\n attention_mask,\n visual_attention_mask,\n token_type_ids,\n inputs_embeds,\n output_attentions,\n output_hidden_states,\n return_dict,\n training,\n )\n\n lang_output, visual_output, pooled_output = (\n lxmert_output[0],\n lxmert_output[1],\n lxmert_output[2],\n )\n lang_prediction_scores, cross_relationship_score = self.cls(lang_output, pooled_output)\n if self.task_qa:\n answer_score = self.answer_head(pooled_output)\n else:\n answer_score = pooled_output[0][0]\n\n total_loss = (\n None\n if (masked_lm_labels is None and matched_label is None and obj_labels is None and ans is None)\n else tf.constant(0.0)\n )\n losses = ()\n if masked_lm_labels is not None and self.task_mask_lm:\n masked_lm_loss = self.loss_fcts[\"ce\"](\n tf.reshape(masked_lm_labels, [-1]),\n tf.reshape(lang_prediction_scores, [-1, self.config.vocab_size]),\n )\n total_loss += masked_lm_loss\n losses += (masked_lm_loss,)\n if matched_label is not None and self.task_matched:\n matched_loss = self.loss_fcts[\"ce\"](\n tf.reshape(matched_label, [-1]),\n tf.reshape(cross_relationship_score, [-1, 2]),\n )\n total_loss += matched_loss\n losses += (matched_loss,)\n if obj_labels is not None and self.task_obj_predict:\n total_visn_loss = 0.0\n visn_prediction_scores_dict = self.obj_predict_head(visual_output)\n for key, key_info in self.visual_losses.items():\n label, mask_conf = obj_labels[key]\n output_dim = key_info[\"num\"]\n loss_fct_name = key_info[\"loss\"]\n label_shape = key_info[\"shape\"]\n weight = self.visual_loss_normalizer\n visn_loss_fct = self.loss_fcts[loss_fct_name]\n visn_prediction_scores = visn_prediction_scores_dict[key]\n visn_loss = visn_loss_fct(\n tf.reshape(label, label_shape),\n tf.reshape(visn_prediction_scores, [-1, output_dim]),\n )\n\n if visn_loss.ndim > 1: # Regression Losses\n visn_loss = tf.reduce_mean(visn_loss)\n visn_loss = tf.reduce_mean(visn_loss * tf.cast(tf.reshape(mask_conf, [-1]), visn_loss.dtype)) * weight\n total_visn_loss += visn_loss\n losses += (visn_loss,)\n total_loss += total_visn_loss\n if ans is not None and self.task_qa:\n answer_loss = self.loss_fcts[\"ce\"](\n tf.reshape(ans, [-1]), tf.reshape(answer_score, [-1, self.num_qa_labels])\n )\n # exclude \"*2\" here to match the effect of QA losses.\n # Previous: (loss *0) for 6 epochs, (loss *2) for 6 epochs. (Used 10 instead of 6 in EMNLP paper)\n # Now : (loss *1) for 12 epochs\n #\n # * 2 # Multiply by 2 because > half of the data will not have label\n total_loss += answer_loss\n losses += (answer_loss,)\n # return total_loss, tf.stack(losses)[tf.new_axis, ...], answer_score.detach()\n\n if not return_dict:\n output = (\n lang_prediction_scores,\n cross_relationship_score,\n answer_score,\n ) + lxmert_output[3:]\n return ((total_loss,) + output) if total_loss is not None else output\n\n return TFLxmertForPreTrainingOutput(\n loss=total_loss,\n prediction_logits=lang_prediction_scores,\n cross_relationship_score=cross_relationship_score,\n question_answering_score=answer_score,\n language_hidden_states=lxmert_output.language_hidden_states,\n vision_hidden_states=lxmert_output.vision_hidden_states,\n language_attentions=lxmert_output.language_attentions,\n vision_attentions=lxmert_output.vision_attentions,\n cross_encoder_attentions=lxmert_output.cross_encoder_attentions,\n )\n\n def serving_output(self, output):\n l_hs = tf.convert_to_tensor(output.language_hidden_states) if self.config.output_hidden_states else None\n v_hs = tf.convert_to_tensor(output.vision_hidden_states) if self.config.output_hidden_states else None\n l_attns = tf.convert_to_tensor(output.language_attentions) if self.config.output_attentions else None\n v_attns = tf.convert_to_tensor(output.vision_attentions) if self.config.output_attentions else None\n c_enc_attns = tf.convert_to_tensor(output.cross_encoder_attentions) if self.config.output_attentions else None\n\n return TFLxmertForPreTrainingOutput(\n prediction_logits=output.prediction_logits,\n cross_relationship_score=output.cross_relationship_score,\n question_answering_score=output.question_answering_score,\n language_hidden_states=l_hs,\n vision_hidden_states=v_hs,\n language_attentions=l_attns,\n vision_attentions=v_attns,\n cross_encoder_attentions=c_enc_attns,\n )\n",
"# coding=utf-8\n# Copyright 2021 The HuggingFace Team The HuggingFace Inc. team. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\" PyTorch RemBERT model.\"\"\"\n\n\nimport math\nimport os\nfrom typing import Optional, Tuple, Union\n\nimport torch\nimport torch.utils.checkpoint\nfrom torch import nn\nfrom torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss\n\nfrom ...activations import ACT2FN\nfrom ...modeling_outputs import (\n BaseModelOutputWithPastAndCrossAttentions,\n BaseModelOutputWithPoolingAndCrossAttentions,\n CausalLMOutputWithCrossAttentions,\n MaskedLMOutput,\n MultipleChoiceModelOutput,\n QuestionAnsweringModelOutput,\n SequenceClassifierOutput,\n TokenClassifierOutput,\n)\nfrom ...modeling_utils import PreTrainedModel\nfrom ...pytorch_utils import apply_chunking_to_forward, find_pruneable_heads_and_indices, prune_linear_layer\nfrom ...utils import (\n add_code_sample_docstrings,\n add_start_docstrings,\n add_start_docstrings_to_model_forward,\n logging,\n replace_return_docstrings,\n)\nfrom .configuration_rembert import RemBertConfig\n\n\nlogger = logging.get_logger(__name__)\n\n_CONFIG_FOR_DOC = \"RemBertConfig\"\n_TOKENIZER_FOR_DOC = \"RemBertTokenizer\"\n_CHECKPOINT_FOR_DOC = \"google/rembert\"\n\nREMBERT_PRETRAINED_MODEL_ARCHIVE_LIST = [\n \"google/rembert\",\n # See all RemBERT models at https://huggingface.co/models?filter=rembert\n]\n\n\ndef load_tf_weights_in_rembert(model, config, tf_checkpoint_path):\n \"\"\"Load tf checkpoints in a pytorch model.\"\"\"\n try:\n import re\n\n import numpy as np\n import tensorflow as tf\n except ImportError:\n logger.error(\n \"Loading a TensorFlow model in PyTorch, requires TensorFlow to be installed. Please see \"\n \"https://www.tensorflow.org/install/ for installation instructions.\"\n )\n raise\n tf_path = os.path.abspath(tf_checkpoint_path)\n logger.info(f\"Converting TensorFlow checkpoint from {tf_path}\")\n # Load weights from TF model\n init_vars = tf.train.list_variables(tf_path)\n names = []\n arrays = []\n for name, shape in init_vars:\n # Checkpoint is 12Gb, save memory by not loading useless variables\n # Output embedding and cls are reset at classification time\n if any(deny in name for deny in (\"adam_v\", \"adam_m\", \"output_embedding\", \"cls\")):\n # logger.info(\"Skipping loading of %s\", name)\n continue\n logger.info(f\"Loading TF weight {name} with shape {shape}\")\n array = tf.train.load_variable(tf_path, name)\n names.append(name)\n arrays.append(array)\n\n for name, array in zip(names, arrays):\n # Replace prefix with right one\n name = name.replace(\"bert/\", \"rembert/\")\n # The pooler is a linear layer\n # name = name.replace(\"pooler/dense\", \"pooler\")\n\n name = name.split(\"/\")\n # adam_v and adam_m are variables used in AdamWeightDecayOptimizer to calculated m and v\n # which are not required for using pretrained model\n if any(\n n in [\"adam_v\", \"adam_m\", \"AdamWeightDecayOptimizer\", \"AdamWeightDecayOptimizer_1\", \"global_step\"]\n for n in name\n ):\n logger.info(f\"Skipping {'/'.join(name)}\")\n continue\n pointer = model\n for m_name in name:\n if re.fullmatch(r\"[A-Za-z]+_\\d+\", m_name):\n scope_names = re.split(r\"_(\\d+)\", m_name)\n else:\n scope_names = [m_name]\n if scope_names[0] == \"kernel\" or scope_names[0] == \"gamma\":\n pointer = getattr(pointer, \"weight\")\n elif scope_names[0] == \"output_bias\" or scope_names[0] == \"beta\":\n pointer = getattr(pointer, \"bias\")\n elif scope_names[0] == \"output_weights\":\n pointer = getattr(pointer, \"weight\")\n elif scope_names[0] == \"squad\":\n pointer = getattr(pointer, \"classifier\")\n else:\n try:\n pointer = getattr(pointer, scope_names[0])\n except AttributeError:\n logger.info(\"Skipping {}\".format(\"/\".join(name)))\n continue\n if len(scope_names) >= 2:\n num = int(scope_names[1])\n pointer = pointer[num]\n if m_name[-11:] == \"_embeddings\":\n pointer = getattr(pointer, \"weight\")\n elif m_name == \"kernel\":\n array = np.transpose(array)\n try:\n if pointer.shape != array.shape:\n raise ValueError(f\"Pointer shape {pointer.shape} and array shape {array.shape} mismatched\")\n except AssertionError as e:\n e.args += (pointer.shape, array.shape)\n raise\n logger.info(f\"Initialize PyTorch weight {name}\")\n pointer.data = torch.from_numpy(array)\n return model\n\n\nclass RemBertEmbeddings(nn.Module):\n \"\"\"Construct the embeddings from word, position and token_type embeddings.\"\"\"\n\n def __init__(self, config):\n super().__init__()\n self.word_embeddings = nn.Embedding(\n config.vocab_size, config.input_embedding_size, padding_idx=config.pad_token_id\n )\n self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.input_embedding_size)\n self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.input_embedding_size)\n\n # self.LayerNorm is not snake-cased to stick with TensorFlow model variable name and be able to load\n # any TensorFlow checkpoint file\n self.LayerNorm = nn.LayerNorm(config.input_embedding_size, eps=config.layer_norm_eps)\n self.dropout = nn.Dropout(config.hidden_dropout_prob)\n\n # position_ids (1, len position emb) is contiguous in memory and exported when serialized\n self.register_buffer(\"position_ids\", torch.arange(config.max_position_embeddings).expand((1, -1)))\n\n def forward(\n self,\n input_ids: Optional[torch.LongTensor] = None,\n token_type_ids: Optional[torch.LongTensor] = None,\n position_ids: Optional[torch.LongTensor] = None,\n inputs_embeds: Optional[torch.FloatTensor] = None,\n past_key_values_length: int = 0,\n ) -> torch.Tensor:\n if input_ids is not None:\n input_shape = input_ids.size()\n else:\n input_shape = inputs_embeds.size()[:-1]\n\n seq_length = input_shape[1]\n\n if position_ids is None:\n position_ids = self.position_ids[:, past_key_values_length : seq_length + past_key_values_length]\n\n if token_type_ids is None:\n token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=self.position_ids.device)\n\n if inputs_embeds is None:\n inputs_embeds = self.word_embeddings(input_ids)\n token_type_embeddings = self.token_type_embeddings(token_type_ids)\n\n embeddings = inputs_embeds + token_type_embeddings\n position_embeddings = self.position_embeddings(position_ids)\n embeddings += position_embeddings\n embeddings = self.LayerNorm(embeddings)\n embeddings = self.dropout(embeddings)\n return embeddings\n\n\n# Copied from transformers.models.bert.modeling_bert.BertPooler with Bert->RemBert\nclass RemBertPooler(nn.Module):\n def __init__(self, config):\n super().__init__()\n self.dense = nn.Linear(config.hidden_size, config.hidden_size)\n self.activation = nn.Tanh()\n\n def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:\n # We \"pool\" the model by simply taking the hidden state corresponding\n # to the first token.\n first_token_tensor = hidden_states[:, 0]\n pooled_output = self.dense(first_token_tensor)\n pooled_output = self.activation(pooled_output)\n return pooled_output\n\n\nclass RemBertSelfAttention(nn.Module):\n def __init__(self, config):\n super().__init__()\n if config.hidden_size % config.num_attention_heads != 0 and not hasattr(config, \"embedding_size\"):\n raise ValueError(\n f\"The hidden size ({config.hidden_size}) is not a multiple of the number of attention \"\n f\"heads ({config.num_attention_heads})\"\n )\n\n self.num_attention_heads = config.num_attention_heads\n self.attention_head_size = int(config.hidden_size / config.num_attention_heads)\n self.all_head_size = self.num_attention_heads * self.attention_head_size\n\n self.query = nn.Linear(config.hidden_size, self.all_head_size)\n self.key = nn.Linear(config.hidden_size, self.all_head_size)\n self.value = nn.Linear(config.hidden_size, self.all_head_size)\n\n self.dropout = nn.Dropout(config.attention_probs_dropout_prob)\n\n self.is_decoder = config.is_decoder\n\n def transpose_for_scores(self, x):\n new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size)\n x = x.view(*new_x_shape)\n return x.permute(0, 2, 1, 3)\n\n def forward(\n self,\n hidden_states: torch.Tensor,\n attention_mask: Optional[torch.FloatTensor] = None,\n head_mask: Optional[torch.FloatTensor] = None,\n encoder_hidden_states: Optional[torch.FloatTensor] = None,\n encoder_attention_mask: Optional[torch.FloatTensor] = None,\n past_key_value: Tuple[Tuple[torch.FloatTensor]] = None,\n output_attentions: bool = False,\n ) -> Tuple:\n mixed_query_layer = self.query(hidden_states)\n\n # If this is instantiated as a cross-attention module, the keys\n # and values come from an encoder; the attention mask needs to be\n # such that the encoder's padding tokens are not attended to.\n is_cross_attention = encoder_hidden_states is not None\n\n if is_cross_attention and past_key_value is not None:\n # reuse k,v, cross_attentions\n key_layer = past_key_value[0]\n value_layer = past_key_value[1]\n attention_mask = encoder_attention_mask\n elif is_cross_attention:\n key_layer = self.transpose_for_scores(self.key(encoder_hidden_states))\n value_layer = self.transpose_for_scores(self.value(encoder_hidden_states))\n attention_mask = encoder_attention_mask\n elif past_key_value is not None:\n key_layer = self.transpose_for_scores(self.key(hidden_states))\n value_layer = self.transpose_for_scores(self.value(hidden_states))\n key_layer = torch.cat([past_key_value[0], key_layer], dim=2)\n value_layer = torch.cat([past_key_value[1], value_layer], dim=2)\n else:\n key_layer = self.transpose_for_scores(self.key(hidden_states))\n value_layer = self.transpose_for_scores(self.value(hidden_states))\n\n query_layer = self.transpose_for_scores(mixed_query_layer)\n\n if self.is_decoder:\n # if cross_attention save Tuple(torch.Tensor, torch.Tensor) of all cross attention key/value_states.\n # Further calls to cross_attention layer can then reuse all cross-attention\n # key/value_states (first \"if\" case)\n # if uni-directional self-attention (decoder) save Tuple(torch.Tensor, torch.Tensor) of\n # all previous decoder key/value_states. Further calls to uni-directional self-attention\n # can concat previous decoder key/value_states to current projected key/value_states (third \"elif\" case)\n # if encoder bi-directional self-attention `past_key_value` is always `None`\n past_key_value = (key_layer, value_layer)\n\n # Take the dot product between \"query\" and \"key\" to get the raw attention scores.\n attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2))\n\n attention_scores = attention_scores / math.sqrt(self.attention_head_size)\n if attention_mask is not None:\n # Apply the attention mask is (precomputed for all layers in RemBertModel forward() function)\n attention_scores = attention_scores + attention_mask\n\n # Normalize the attention scores to probabilities.\n attention_probs = nn.functional.softmax(attention_scores, dim=-1)\n\n # This is actually dropping out entire tokens to attend to, which might\n # seem a bit unusual, but is taken from the original Transformer paper.\n attention_probs = self.dropout(attention_probs)\n\n # Mask heads if we want to\n if head_mask is not None:\n attention_probs = attention_probs * head_mask\n\n context_layer = torch.matmul(attention_probs, value_layer)\n\n context_layer = context_layer.permute(0, 2, 1, 3).contiguous()\n new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,)\n context_layer = context_layer.view(*new_context_layer_shape)\n\n outputs = (context_layer, attention_probs) if output_attentions else (context_layer,)\n\n if self.is_decoder:\n outputs = outputs + (past_key_value,)\n return outputs\n\n\n# Copied from transformers.models.bert.modeling_bert.BertSelfOutput with Bert->RemBert\nclass RemBertSelfOutput(nn.Module):\n def __init__(self, config):\n super().__init__()\n self.dense = nn.Linear(config.hidden_size, config.hidden_size)\n self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)\n self.dropout = nn.Dropout(config.hidden_dropout_prob)\n\n def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor:\n hidden_states = self.dense(hidden_states)\n hidden_states = self.dropout(hidden_states)\n hidden_states = self.LayerNorm(hidden_states + input_tensor)\n return hidden_states\n\n\nclass RemBertAttention(nn.Module):\n def __init__(self, config):\n super().__init__()\n self.self = RemBertSelfAttention(config)\n self.output = RemBertSelfOutput(config)\n self.pruned_heads = set()\n\n # Copied from transformers.models.bert.modeling_bert.BertAttention.prune_heads\n def prune_heads(self, heads):\n if len(heads) == 0:\n return\n heads, index = find_pruneable_heads_and_indices(\n heads, self.self.num_attention_heads, self.self.attention_head_size, self.pruned_heads\n )\n\n # Prune linear layers\n self.self.query = prune_linear_layer(self.self.query, index)\n self.self.key = prune_linear_layer(self.self.key, index)\n self.self.value = prune_linear_layer(self.self.value, index)\n self.output.dense = prune_linear_layer(self.output.dense, index, dim=1)\n\n # Update hyper params and store pruned heads\n self.self.num_attention_heads = self.self.num_attention_heads - len(heads)\n self.self.all_head_size = self.self.attention_head_size * self.self.num_attention_heads\n self.pruned_heads = self.pruned_heads.union(heads)\n\n # Copied from transformers.models.bert.modeling_bert.BertAttention.forward\n def forward(\n self,\n hidden_states: torch.Tensor,\n attention_mask: Optional[torch.FloatTensor] = None,\n head_mask: Optional[torch.FloatTensor] = None,\n encoder_hidden_states: Optional[torch.FloatTensor] = None,\n encoder_attention_mask: Optional[torch.FloatTensor] = None,\n past_key_value: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,\n output_attentions: Optional[bool] = False,\n ) -> Tuple[torch.Tensor]:\n self_outputs = self.self(\n hidden_states,\n attention_mask,\n head_mask,\n encoder_hidden_states,\n encoder_attention_mask,\n past_key_value,\n output_attentions,\n )\n attention_output = self.output(self_outputs[0], hidden_states)\n outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them\n return outputs\n\n\n# Copied from transformers.models.bert.modeling_bert.BertIntermediate with Bert->RemBert\nclass RemBertIntermediate(nn.Module):\n def __init__(self, config):\n super().__init__()\n self.dense = nn.Linear(config.hidden_size, config.intermediate_size)\n if isinstance(config.hidden_act, str):\n self.intermediate_act_fn = ACT2FN[config.hidden_act]\n else:\n self.intermediate_act_fn = config.hidden_act\n\n def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:\n hidden_states = self.dense(hidden_states)\n hidden_states = self.intermediate_act_fn(hidden_states)\n return hidden_states\n\n\n# Copied from transformers.models.bert.modeling_bert.BertOutput with Bert->RemBert\nclass RemBertOutput(nn.Module):\n def __init__(self, config):\n super().__init__()\n self.dense = nn.Linear(config.intermediate_size, config.hidden_size)\n self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)\n self.dropout = nn.Dropout(config.hidden_dropout_prob)\n\n def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor:\n hidden_states = self.dense(hidden_states)\n hidden_states = self.dropout(hidden_states)\n hidden_states = self.LayerNorm(hidden_states + input_tensor)\n return hidden_states\n\n\nclass RemBertLayer(nn.Module):\n def __init__(self, config):\n super().__init__()\n self.chunk_size_feed_forward = config.chunk_size_feed_forward\n self.seq_len_dim = 1\n self.attention = RemBertAttention(config)\n self.is_decoder = config.is_decoder\n self.add_cross_attention = config.add_cross_attention\n if self.add_cross_attention:\n if not self.is_decoder:\n raise ValueError(f\"{self} should be used as a decoder model if cross attention is added\")\n self.crossattention = RemBertAttention(config)\n self.intermediate = RemBertIntermediate(config)\n self.output = RemBertOutput(config)\n\n # Copied from transformers.models.bert.modeling_bert.BertLayer.forward\n def forward(\n self,\n hidden_states: torch.Tensor,\n attention_mask: Optional[torch.FloatTensor] = None,\n head_mask: Optional[torch.FloatTensor] = None,\n encoder_hidden_states: Optional[torch.FloatTensor] = None,\n encoder_attention_mask: Optional[torch.FloatTensor] = None,\n past_key_value: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,\n output_attentions: Optional[bool] = False,\n ) -> Tuple[torch.Tensor]:\n # decoder uni-directional self-attention cached key/values tuple is at positions 1,2\n self_attn_past_key_value = past_key_value[:2] if past_key_value is not None else None\n self_attention_outputs = self.attention(\n hidden_states,\n attention_mask,\n head_mask,\n output_attentions=output_attentions,\n past_key_value=self_attn_past_key_value,\n )\n attention_output = self_attention_outputs[0]\n\n # if decoder, the last output is tuple of self-attn cache\n if self.is_decoder:\n outputs = self_attention_outputs[1:-1]\n present_key_value = self_attention_outputs[-1]\n else:\n outputs = self_attention_outputs[1:] # add self attentions if we output attention weights\n\n cross_attn_present_key_value = None\n if self.is_decoder and encoder_hidden_states is not None:\n if not hasattr(self, \"crossattention\"):\n raise ValueError(\n f\"If `encoder_hidden_states` are passed, {self} has to be instantiated with cross-attention layers\"\n \" by setting `config.add_cross_attention=True`\"\n )\n\n # cross_attn cached key/values tuple is at positions 3,4 of past_key_value tuple\n cross_attn_past_key_value = past_key_value[-2:] if past_key_value is not None else None\n cross_attention_outputs = self.crossattention(\n attention_output,\n attention_mask,\n head_mask,\n encoder_hidden_states,\n encoder_attention_mask,\n cross_attn_past_key_value,\n output_attentions,\n )\n attention_output = cross_attention_outputs[0]\n outputs = outputs + cross_attention_outputs[1:-1] # add cross attentions if we output attention weights\n\n # add cross-attn cache to positions 3,4 of present_key_value tuple\n cross_attn_present_key_value = cross_attention_outputs[-1]\n present_key_value = present_key_value + cross_attn_present_key_value\n\n layer_output = apply_chunking_to_forward(\n self.feed_forward_chunk, self.chunk_size_feed_forward, self.seq_len_dim, attention_output\n )\n outputs = (layer_output,) + outputs\n\n # if decoder, return the attn key/values as the last output\n if self.is_decoder:\n outputs = outputs + (present_key_value,)\n\n return outputs\n\n # Copied from transformers.models.bert.modeling_bert.BertLayer.feed_forward_chunk\n def feed_forward_chunk(self, attention_output):\n intermediate_output = self.intermediate(attention_output)\n layer_output = self.output(intermediate_output, attention_output)\n return layer_output\n\n\nclass RemBertEncoder(nn.Module):\n def __init__(self, config):\n super().__init__()\n self.config = config\n\n self.embedding_hidden_mapping_in = nn.Linear(config.input_embedding_size, config.hidden_size)\n self.layer = nn.ModuleList([RemBertLayer(config) for _ in range(config.num_hidden_layers)])\n self.gradient_checkpointing = False\n\n def forward(\n self,\n hidden_states: torch.Tensor,\n attention_mask: Optional[torch.FloatTensor] = None,\n head_mask: Optional[torch.FloatTensor] = None,\n encoder_hidden_states: Optional[torch.FloatTensor] = None,\n encoder_attention_mask: Optional[torch.FloatTensor] = None,\n past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,\n use_cache: Optional[bool] = None,\n output_attentions: bool = False,\n output_hidden_states: bool = False,\n return_dict: bool = True,\n ) -> Union[Tuple, BaseModelOutputWithPastAndCrossAttentions]:\n\n hidden_states = self.embedding_hidden_mapping_in(hidden_states)\n all_hidden_states = () if output_hidden_states else None\n all_self_attentions = () if output_attentions else None\n all_cross_attentions = () if output_attentions and self.config.add_cross_attention else None\n\n next_decoder_cache = () if use_cache else None\n for i, layer_module in enumerate(self.layer):\n if output_hidden_states:\n all_hidden_states = all_hidden_states + (hidden_states,)\n\n layer_head_mask = head_mask[i] if head_mask is not None else None\n past_key_value = past_key_values[i] if past_key_values is not None else None\n\n if self.gradient_checkpointing and self.training:\n\n if use_cache:\n logger.warning(\n \"`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`...\"\n )\n use_cache = False\n\n def create_custom_forward(module):\n def custom_forward(*inputs):\n return module(*inputs, past_key_value, output_attentions)\n\n return custom_forward\n\n layer_outputs = torch.utils.checkpoint.checkpoint(\n create_custom_forward(layer_module),\n hidden_states,\n attention_mask,\n layer_head_mask,\n encoder_hidden_states,\n encoder_attention_mask,\n )\n else:\n layer_outputs = layer_module(\n hidden_states,\n attention_mask,\n layer_head_mask,\n encoder_hidden_states,\n encoder_attention_mask,\n past_key_value,\n output_attentions,\n )\n\n hidden_states = layer_outputs[0]\n if use_cache:\n next_decoder_cache += (layer_outputs[-1],)\n if output_attentions:\n all_self_attentions = all_self_attentions + (layer_outputs[1],)\n if self.config.add_cross_attention:\n all_cross_attentions = all_cross_attentions + (layer_outputs[2],)\n\n if output_hidden_states:\n all_hidden_states = all_hidden_states + (hidden_states,)\n\n if not return_dict:\n return tuple(\n v\n for v in [\n hidden_states,\n next_decoder_cache,\n all_hidden_states,\n all_self_attentions,\n all_cross_attentions,\n ]\n if v is not None\n )\n return BaseModelOutputWithPastAndCrossAttentions(\n last_hidden_state=hidden_states,\n past_key_values=next_decoder_cache,\n hidden_states=all_hidden_states,\n attentions=all_self_attentions,\n cross_attentions=all_cross_attentions,\n )\n\n\n# Copied from transformers.models.bert.modeling_bert.BertPredictionHeadTransform with Bert->RemBert\nclass RemBertPredictionHeadTransform(nn.Module):\n def __init__(self, config):\n super().__init__()\n self.dense = nn.Linear(config.hidden_size, config.hidden_size)\n if isinstance(config.hidden_act, str):\n self.transform_act_fn = ACT2FN[config.hidden_act]\n else:\n self.transform_act_fn = config.hidden_act\n self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)\n\n def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:\n hidden_states = self.dense(hidden_states)\n hidden_states = self.transform_act_fn(hidden_states)\n hidden_states = self.LayerNorm(hidden_states)\n return hidden_states\n\n\nclass RemBertLMPredictionHead(nn.Module):\n def __init__(self, config):\n super().__init__()\n self.dense = nn.Linear(config.hidden_size, config.output_embedding_size)\n self.decoder = nn.Linear(config.output_embedding_size, config.vocab_size)\n self.activation = ACT2FN[config.hidden_act]\n self.LayerNorm = nn.LayerNorm(config.output_embedding_size, eps=config.layer_norm_eps)\n\n def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:\n hidden_states = self.dense(hidden_states)\n hidden_states = self.activation(hidden_states)\n hidden_states = self.LayerNorm(hidden_states)\n hidden_states = self.decoder(hidden_states)\n return hidden_states\n\n\n# Copied from transformers.models.bert.modeling_bert.BertOnlyMLMHead with Bert->RemBert\nclass RemBertOnlyMLMHead(nn.Module):\n def __init__(self, config):\n super().__init__()\n self.predictions = RemBertLMPredictionHead(config)\n\n def forward(self, sequence_output: torch.Tensor) -> torch.Tensor:\n prediction_scores = self.predictions(sequence_output)\n return prediction_scores\n\n\nclass RemBertPreTrainedModel(PreTrainedModel):\n \"\"\"\n An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained\n models.\n \"\"\"\n\n config_class = RemBertConfig\n load_tf_weights = load_tf_weights_in_rembert\n base_model_prefix = \"rembert\"\n supports_gradient_checkpointing = True\n _keys_to_ignore_on_load_missing = [r\"position_ids\"]\n\n def _init_weights(self, module):\n \"\"\"Initialize the weights\"\"\"\n if isinstance(module, nn.Linear):\n # Slightly different from the TF version which uses truncated_normal for initialization\n # cf https://github.com/pytorch/pytorch/pull/5617\n module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)\n if module.bias is not None:\n module.bias.data.zero_()\n elif isinstance(module, nn.Embedding):\n module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)\n if module.padding_idx is not None:\n module.weight.data[module.padding_idx].zero_()\n elif isinstance(module, nn.LayerNorm):\n module.bias.data.zero_()\n module.weight.data.fill_(1.0)\n\n def _set_gradient_checkpointing(self, module, value=False):\n if isinstance(module, RemBertEncoder):\n module.gradient_checkpointing = value\n\n\nREMBERT_START_DOCSTRING = r\"\"\"\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use\n it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n\n Parameters:\n config ([`RemBertConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n\"\"\"\n\nREMBERT_INPUTS_DOCSTRING = r\"\"\"\n Args:\n input_ids (`torch.LongTensor` of shape `({0})`):\n Indices of input sequence tokens in the vocabulary.\n\n Indices can be obtained using [`RemBertTokenizer`]. See [`PreTrainedTokenizer.encode`] and\n [`PreTrainedTokenizer.__call__`] for details.\n\n [What are input IDs?](../glossary#input-ids)\n attention_mask (`torch.FloatTensor` of shape `({0})`, *optional*):\n Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:\n\n - 1 for tokens that are **not masked**,\n - 0 for tokens that are **masked**.\n\n [What are attention masks?](../glossary#attention-mask)\n token_type_ids (`torch.LongTensor` of shape `({0})`, *optional*):\n Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0,\n 1]`:\n\n - 0 corresponds to a *sentence A* token,\n - 1 corresponds to a *sentence B* token.\n\n [What are token type IDs?](../glossary#token-type-ids)\n position_ids (`torch.LongTensor` of shape `({0})`, *optional*):\n Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,\n config.max_position_embeddings - 1]`.\n\n [What are position IDs?](../glossary#position-ids)\n head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):\n Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`:\n\n - 1 indicates the head is **not masked**,\n - 0 indicates the head is **masked**.\n\n inputs_embeds (`torch.FloatTensor` of shape `({0}, hidden_size)`, *optional*):\n Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This\n is useful if you want more control over how to convert *input_ids* indices into associated vectors than the\n model's internal embedding lookup matrix.\n output_attentions (`bool`, *optional*):\n Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned\n tensors for more detail.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n\"\"\"\n\n\n@add_start_docstrings(\n \"The bare RemBERT Model transformer outputting raw hidden-states without any specific head on top.\",\n REMBERT_START_DOCSTRING,\n)\nclass RemBertModel(RemBertPreTrainedModel):\n \"\"\"\n\n The model can behave as an encoder (with only self-attention) as well as a decoder, in which case a layer of\n cross-attention is added between the self-attention layers, following the architecture described in [Attention is\n all you need](https://arxiv.org/abs/1706.03762) by Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit,\n Llion Jones, Aidan N. Gomez, Lukasz Kaiser and Illia Polosukhin.\n\n To behave as an decoder the model needs to be initialized with the `is_decoder` argument of the configuration set\n to `True`. To be used in a Seq2Seq model, the model needs to initialized with both `is_decoder` argument and\n `add_cross_attention` set to `True`; an `encoder_hidden_states` is then expected as an input to the forward pass.\n \"\"\"\n\n def __init__(self, config, add_pooling_layer=True):\n super().__init__(config)\n self.config = config\n\n self.embeddings = RemBertEmbeddings(config)\n self.encoder = RemBertEncoder(config)\n\n self.pooler = RemBertPooler(config) if add_pooling_layer else None\n\n # Initialize weights and apply final processing\n self.post_init()\n\n def get_input_embeddings(self):\n return self.embeddings.word_embeddings\n\n def set_input_embeddings(self, value):\n self.embeddings.word_embeddings = value\n\n def _prune_heads(self, heads_to_prune):\n \"\"\"\n Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base\n class PreTrainedModel\n \"\"\"\n for layer, heads in heads_to_prune.items():\n self.encoder.layer[layer].attention.prune_heads(heads)\n\n @add_start_docstrings_to_model_forward(REMBERT_INPUTS_DOCSTRING.format(\"batch_size, sequence_length\"))\n @add_code_sample_docstrings(\n processor_class=_TOKENIZER_FOR_DOC,\n checkpoint=\"rembert\",\n output_type=BaseModelOutputWithPastAndCrossAttentions,\n config_class=_CONFIG_FOR_DOC,\n )\n def forward(\n self,\n input_ids: torch.LongTensor = None,\n attention_mask: Optional[torch.LongTensor] = None,\n token_type_ids: Optional[torch.LongTensor] = None,\n position_ids: Optional[torch.LongTensor] = None,\n head_mask: Optional[torch.FloatTensor] = None,\n inputs_embeds: Optional[torch.FloatTensor] = None,\n encoder_hidden_states: Optional[torch.FloatTensor] = None,\n encoder_attention_mask: Optional[torch.FloatTensor] = None,\n past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,\n use_cache: Optional[bool] = None,\n output_attentions: Optional[bool] = None,\n output_hidden_states: Optional[bool] = None,\n return_dict: Optional[bool] = None,\n ) -> Union[Tuple, BaseModelOutputWithPoolingAndCrossAttentions]:\n r\"\"\"\n encoder_hidden_states (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):\n Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if\n the model is configured as a decoder.\n encoder_attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*):\n Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in\n the cross-attention if the model is configured as a decoder. Mask values selected in `[0, 1]`:\n\n - 1 for tokens that are **not masked**,\n - 0 for tokens that are **masked**.\n past_key_values (`tuple(tuple(torch.FloatTensor))` of length `config.n_layers` with each tuple having 4 tensors of shape `(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`):\n Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding.\n If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that\n don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all\n `decoder_input_ids` of shape `(batch_size, sequence_length)`.\n use_cache (`bool`, *optional*):\n If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see\n `past_key_values`).\n \"\"\"\n output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions\n output_hidden_states = (\n output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states\n )\n return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n\n if self.config.is_decoder:\n use_cache = use_cache if use_cache is not None else self.config.use_cache\n else:\n use_cache = False\n\n if input_ids is not None and inputs_embeds is not None:\n raise ValueError(\"You cannot specify both input_ids and inputs_embeds at the same time\")\n elif input_ids is not None:\n input_shape = input_ids.size()\n elif inputs_embeds is not None:\n input_shape = inputs_embeds.size()[:-1]\n else:\n raise ValueError(\"You have to specify either input_ids or inputs_embeds\")\n\n batch_size, seq_length = input_shape\n device = input_ids.device if input_ids is not None else inputs_embeds.device\n\n # past_key_values_length\n past_key_values_length = past_key_values[0][0].shape[2] if past_key_values is not None else 0\n\n if attention_mask is None:\n attention_mask = torch.ones(((batch_size, seq_length + past_key_values_length)), device=device)\n if token_type_ids is None:\n token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device)\n\n # We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]\n # ourselves in which case we just need to make it broadcastable to all heads.\n extended_attention_mask: torch.Tensor = self.get_extended_attention_mask(attention_mask, input_shape)\n\n # If a 2D or 3D attention mask is provided for the cross-attention\n # we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]\n if self.config.is_decoder and encoder_hidden_states is not None:\n encoder_batch_size, encoder_sequence_length, _ = encoder_hidden_states.size()\n encoder_hidden_shape = (encoder_batch_size, encoder_sequence_length)\n if encoder_attention_mask is None:\n encoder_attention_mask = torch.ones(encoder_hidden_shape, device=device)\n encoder_extended_attention_mask = self.invert_attention_mask(encoder_attention_mask)\n else:\n encoder_extended_attention_mask = None\n\n # Prepare head mask if needed\n # 1.0 in head_mask indicate we keep the head\n # attention_probs has shape bsz x n_heads x N x N\n # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]\n # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]\n head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers)\n\n embedding_output = self.embeddings(\n input_ids=input_ids,\n position_ids=position_ids,\n token_type_ids=token_type_ids,\n inputs_embeds=inputs_embeds,\n past_key_values_length=past_key_values_length,\n )\n encoder_outputs = self.encoder(\n embedding_output,\n attention_mask=extended_attention_mask,\n head_mask=head_mask,\n encoder_hidden_states=encoder_hidden_states,\n encoder_attention_mask=encoder_extended_attention_mask,\n past_key_values=past_key_values,\n use_cache=use_cache,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n return_dict=return_dict,\n )\n sequence_output = encoder_outputs[0]\n pooled_output = self.pooler(sequence_output) if self.pooler is not None else None\n\n if not return_dict:\n return (sequence_output, pooled_output) + encoder_outputs[1:]\n\n return BaseModelOutputWithPoolingAndCrossAttentions(\n last_hidden_state=sequence_output,\n pooler_output=pooled_output,\n past_key_values=encoder_outputs.past_key_values,\n hidden_states=encoder_outputs.hidden_states,\n attentions=encoder_outputs.attentions,\n cross_attentions=encoder_outputs.cross_attentions,\n )\n\n\n@add_start_docstrings(\"\"\"RemBERT Model with a `language modeling` head on top.\"\"\", REMBERT_START_DOCSTRING)\nclass RemBertForMaskedLM(RemBertPreTrainedModel):\n def __init__(self, config):\n super().__init__(config)\n\n if config.is_decoder:\n logger.warning(\n \"If you want to use `RemBertForMaskedLM` make sure `config.is_decoder=False` for \"\n \"bi-directional self-attention.\"\n )\n\n self.rembert = RemBertModel(config, add_pooling_layer=False)\n self.cls = RemBertOnlyMLMHead(config)\n\n # Initialize weights and apply final processing\n self.post_init()\n\n def get_output_embeddings(self):\n return self.cls.predictions.decoder\n\n def set_output_embeddings(self, new_embeddings):\n self.cls.predictions.decoder = new_embeddings\n\n @add_start_docstrings_to_model_forward(REMBERT_INPUTS_DOCSTRING.format(\"batch_size, sequence_length\"))\n @add_code_sample_docstrings(\n processor_class=_TOKENIZER_FOR_DOC,\n checkpoint=\"rembert\",\n output_type=MaskedLMOutput,\n config_class=_CONFIG_FOR_DOC,\n )\n def forward(\n self,\n input_ids: torch.LongTensor = None,\n attention_mask: Optional[torch.LongTensor] = None,\n token_type_ids: Optional[torch.LongTensor] = None,\n position_ids: Optional[torch.LongTensor] = None,\n head_mask: Optional[torch.FloatTensor] = None,\n inputs_embeds: Optional[torch.FloatTensor] = None,\n encoder_hidden_states: Optional[torch.FloatTensor] = None,\n encoder_attention_mask: Optional[torch.FloatTensor] = None,\n labels: Optional[torch.LongTensor] = None,\n output_attentions: Optional[bool] = None,\n output_hidden_states: Optional[bool] = None,\n return_dict: Optional[bool] = None,\n ) -> Union[Tuple, MaskedLMOutput]:\n r\"\"\"\n labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):\n Labels for computing the masked language modeling loss. Indices should be in `[-100, 0, ...,\n config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked), the\n loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.\n \"\"\"\n return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n\n outputs = self.rembert(\n input_ids,\n attention_mask=attention_mask,\n token_type_ids=token_type_ids,\n position_ids=position_ids,\n head_mask=head_mask,\n inputs_embeds=inputs_embeds,\n encoder_hidden_states=encoder_hidden_states,\n encoder_attention_mask=encoder_attention_mask,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n return_dict=return_dict,\n )\n\n sequence_output = outputs[0]\n prediction_scores = self.cls(sequence_output)\n\n masked_lm_loss = None\n if labels is not None:\n loss_fct = CrossEntropyLoss() # -100 index = padding token\n masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), labels.view(-1))\n\n if not return_dict:\n output = (prediction_scores,) + outputs[2:]\n return ((masked_lm_loss,) + output) if masked_lm_loss is not None else output\n\n return MaskedLMOutput(\n loss=masked_lm_loss,\n logits=prediction_scores,\n hidden_states=outputs.hidden_states,\n attentions=outputs.attentions,\n )\n\n def prepare_inputs_for_generation(self, input_ids, attention_mask=None, **model_kwargs):\n input_shape = input_ids.shape\n effective_batch_size = input_shape[0]\n\n # add a dummy token\n assert self.config.pad_token_id is not None, \"The PAD token should be defined for generation\"\n attention_mask = torch.cat([attention_mask, attention_mask.new_zeros((attention_mask.shape[0], 1))], dim=-1)\n dummy_token = torch.full(\n (effective_batch_size, 1), self.config.pad_token_id, dtype=torch.long, device=input_ids.device\n )\n input_ids = torch.cat([input_ids, dummy_token], dim=1)\n\n return {\"input_ids\": input_ids, \"attention_mask\": attention_mask}\n\n\n@add_start_docstrings(\n \"\"\"RemBERT Model with a `language modeling` head on top for CLM fine-tuning.\"\"\", REMBERT_START_DOCSTRING\n)\nclass RemBertForCausalLM(RemBertPreTrainedModel):\n\n _keys_to_ignore_on_load_missing = [r\"position_ids\", r\"predictions.decoder.bias\"]\n\n def __init__(self, config):\n super().__init__(config)\n\n if not config.is_decoder:\n logger.warning(\"If you want to use `RemBertForCausalLM` as a standalone, add `is_decoder=True.`\")\n\n self.rembert = RemBertModel(config, add_pooling_layer=False)\n self.cls = RemBertOnlyMLMHead(config)\n\n # Initialize weights and apply final processing\n self.post_init()\n\n def get_output_embeddings(self):\n return self.cls.predictions.decoder\n\n def set_output_embeddings(self, new_embeddings):\n self.cls.predictions.decoder = new_embeddings\n\n @add_start_docstrings_to_model_forward(REMBERT_INPUTS_DOCSTRING.format(\"batch_size, sequence_length\"))\n @replace_return_docstrings(output_type=CausalLMOutputWithCrossAttentions, config_class=_CONFIG_FOR_DOC)\n def forward(\n self,\n input_ids: torch.LongTensor = None,\n attention_mask: Optional[torch.LongTensor] = None,\n token_type_ids: Optional[torch.LongTensor] = None,\n position_ids: Optional[torch.LongTensor] = None,\n head_mask: Optional[torch.FloatTensor] = None,\n inputs_embeds: Optional[torch.FloatTensor] = None,\n encoder_hidden_states: Optional[torch.FloatTensor] = None,\n encoder_attention_mask: Optional[torch.FloatTensor] = None,\n past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,\n labels: Optional[torch.LongTensor] = None,\n use_cache: Optional[bool] = None,\n output_attentions: Optional[bool] = None,\n output_hidden_states: Optional[bool] = None,\n return_dict: Optional[bool] = None,\n ) -> Union[Tuple, CausalLMOutputWithCrossAttentions]:\n r\"\"\"\n encoder_hidden_states (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):\n Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if\n the model is configured as a decoder.\n encoder_attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*):\n Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in\n the cross-attention if the model is configured as a decoder. Mask values selected in `[0, 1]`:\n\n - 1 for tokens that are **not masked**,\n - 0 for tokens that are **masked**.\n past_key_values (`tuple(tuple(torch.FloatTensor))` of length `config.n_layers` with each tuple having 4 tensors of shape `(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`):\n Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding.\n If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that\n don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all\n `decoder_input_ids` of shape `(batch_size, sequence_length)`.\n labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):\n Labels for computing the left-to-right language modeling loss (next word prediction). Indices should be in\n `[-100, 0, ..., config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are\n ignored (masked), the loss is only computed for the tokens with labels n `[0, ..., config.vocab_size]`.\n use_cache (`bool`, *optional*):\n If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see\n `past_key_values`).\n\n Returns:\n\n Example:\n\n ```python\n >>> from transformers import RemBertTokenizer, RemBertForCausalLM, RemBertConfig\n >>> import torch\n\n >>> tokenizer = RemBertTokenizer.from_pretrained(\"google/rembert\")\n >>> config = RemBertConfig.from_pretrained(\"google/rembert\")\n >>> config.is_decoder = True\n >>> model = RemBertForCausalLM.from_pretrained(\"google/rembert\", config=config)\n\n >>> inputs = tokenizer(\"Hello, my dog is cute\", return_tensors=\"pt\")\n >>> outputs = model(**inputs)\n\n >>> prediction_logits = outputs.logits\n ```\"\"\"\n return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n\n outputs = self.rembert(\n input_ids,\n attention_mask=attention_mask,\n token_type_ids=token_type_ids,\n position_ids=position_ids,\n head_mask=head_mask,\n inputs_embeds=inputs_embeds,\n encoder_hidden_states=encoder_hidden_states,\n encoder_attention_mask=encoder_attention_mask,\n past_key_values=past_key_values,\n use_cache=use_cache,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n return_dict=return_dict,\n )\n\n sequence_output = outputs[0]\n prediction_scores = self.cls(sequence_output)\n\n lm_loss = None\n if labels is not None:\n # we are doing next-token prediction; shift prediction scores and input ids by one\n shifted_prediction_scores = prediction_scores[:, :-1, :].contiguous()\n labels = labels[:, 1:].contiguous()\n loss_fct = CrossEntropyLoss()\n lm_loss = loss_fct(shifted_prediction_scores.view(-1, self.config.vocab_size), labels.view(-1))\n\n if not return_dict:\n output = (prediction_scores,) + outputs[2:]\n return ((lm_loss,) + output) if lm_loss is not None else output\n\n return CausalLMOutputWithCrossAttentions(\n loss=lm_loss,\n logits=prediction_scores,\n past_key_values=outputs.past_key_values,\n hidden_states=outputs.hidden_states,\n attentions=outputs.attentions,\n cross_attentions=outputs.cross_attentions,\n )\n\n def prepare_inputs_for_generation(self, input_ids, past=None, attention_mask=None, **model_kwargs):\n input_shape = input_ids.shape\n\n # if model is used as a decoder in encoder-decoder model, the decoder attention mask is created on the fly\n if attention_mask is None:\n attention_mask = input_ids.new_ones(input_shape)\n\n # cut decoder_input_ids if past is used\n if past is not None:\n input_ids = input_ids[:, -1:]\n\n return {\"input_ids\": input_ids, \"attention_mask\": attention_mask, \"past_key_values\": past}\n\n def _reorder_cache(self, past, beam_idx):\n reordered_past = ()\n for layer_past in past:\n reordered_past += (\n tuple(past_state.index_select(0, beam_idx) for past_state in layer_past[:2]) + layer_past[2:],\n )\n return reordered_past\n\n\n@add_start_docstrings(\n \"\"\"\n RemBERT Model transformer with a sequence classification/regression head on top (a linear layer on top of the\n pooled output) e.g. for GLUE tasks.\n \"\"\",\n REMBERT_START_DOCSTRING,\n)\nclass RemBertForSequenceClassification(RemBertPreTrainedModel):\n def __init__(self, config):\n super().__init__(config)\n self.num_labels = config.num_labels\n self.rembert = RemBertModel(config)\n self.dropout = nn.Dropout(config.classifier_dropout_prob)\n self.classifier = nn.Linear(config.hidden_size, config.num_labels)\n\n # Initialize weights and apply final processing\n self.post_init()\n\n @add_start_docstrings_to_model_forward(REMBERT_INPUTS_DOCSTRING.format(\"batch_size, sequence_length\"))\n @add_code_sample_docstrings(\n processor_class=_TOKENIZER_FOR_DOC,\n checkpoint=\"rembert\",\n output_type=SequenceClassifierOutput,\n config_class=_CONFIG_FOR_DOC,\n )\n def forward(\n self,\n input_ids: torch.FloatTensor = None,\n attention_mask: Optional[torch.FloatTensor] = None,\n token_type_ids: Optional[torch.LongTensor] = None,\n position_ids: Optional[torch.FloatTensor] = None,\n head_mask: Optional[torch.FloatTensor] = None,\n inputs_embeds: Optional[torch.FloatTensor] = None,\n labels: Optional[torch.LongTensor] = None,\n output_attentions: Optional[bool] = None,\n output_hidden_states: Optional[bool] = None,\n return_dict: Optional[bool] = None,\n ) -> Union[Tuple, SequenceClassifierOutput]:\n r\"\"\"\n labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):\n Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,\n config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If\n `config.num_labels > 1` a classification loss is computed (Cross-Entropy).\n \"\"\"\n return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n\n outputs = self.rembert(\n input_ids,\n attention_mask=attention_mask,\n token_type_ids=token_type_ids,\n position_ids=position_ids,\n head_mask=head_mask,\n inputs_embeds=inputs_embeds,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n return_dict=return_dict,\n )\n\n pooled_output = outputs[1]\n\n pooled_output = self.dropout(pooled_output)\n logits = self.classifier(pooled_output)\n\n loss = None\n if labels is not None:\n if self.config.problem_type is None:\n if self.num_labels == 1:\n self.config.problem_type = \"regression\"\n elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):\n self.config.problem_type = \"single_label_classification\"\n else:\n self.config.problem_type = \"multi_label_classification\"\n\n if self.config.problem_type == \"regression\":\n loss_fct = MSELoss()\n if self.num_labels == 1:\n loss = loss_fct(logits.squeeze(), labels.squeeze())\n else:\n loss = loss_fct(logits, labels)\n elif self.config.problem_type == \"single_label_classification\":\n loss_fct = CrossEntropyLoss()\n loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))\n elif self.config.problem_type == \"multi_label_classification\":\n loss_fct = BCEWithLogitsLoss()\n loss = loss_fct(logits, labels)\n if not return_dict:\n output = (logits,) + outputs[2:]\n return ((loss,) + output) if loss is not None else output\n\n return SequenceClassifierOutput(\n loss=loss,\n logits=logits,\n hidden_states=outputs.hidden_states,\n attentions=outputs.attentions,\n )\n\n\n@add_start_docstrings(\n \"\"\"\n RemBERT Model with a multiple choice classification head on top (a linear layer on top of the pooled output and a\n softmax) e.g. for RocStories/SWAG tasks.\n \"\"\",\n REMBERT_START_DOCSTRING,\n)\nclass RemBertForMultipleChoice(RemBertPreTrainedModel):\n def __init__(self, config):\n super().__init__(config)\n\n self.rembert = RemBertModel(config)\n self.dropout = nn.Dropout(config.classifier_dropout_prob)\n self.classifier = nn.Linear(config.hidden_size, 1)\n\n # Initialize weights and apply final processing\n self.post_init()\n\n @add_start_docstrings_to_model_forward(REMBERT_INPUTS_DOCSTRING.format(\"batch_size, num_choices, sequence_length\"))\n @add_code_sample_docstrings(\n processor_class=_TOKENIZER_FOR_DOC,\n checkpoint=\"rembert\",\n output_type=MultipleChoiceModelOutput,\n config_class=_CONFIG_FOR_DOC,\n )\n def forward(\n self,\n input_ids: torch.FloatTensor = None,\n attention_mask: Optional[torch.FloatTensor] = None,\n token_type_ids: Optional[torch.LongTensor] = None,\n position_ids: Optional[torch.FloatTensor] = None,\n head_mask: Optional[torch.FloatTensor] = None,\n inputs_embeds: Optional[torch.FloatTensor] = None,\n labels: Optional[torch.LongTensor] = None,\n output_attentions: Optional[bool] = None,\n output_hidden_states: Optional[bool] = None,\n return_dict: Optional[bool] = None,\n ) -> Union[Tuple, MultipleChoiceModelOutput]:\n r\"\"\"\n labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):\n Labels for computing the multiple choice classification loss. Indices should be in `[0, ...,\n num_choices-1]` where `num_choices` is the size of the second dimension of the input tensors. (See\n `input_ids` above)\n \"\"\"\n return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n num_choices = input_ids.shape[1] if input_ids is not None else inputs_embeds.shape[1]\n\n input_ids = input_ids.view(-1, input_ids.size(-1)) if input_ids is not None else None\n attention_mask = attention_mask.view(-1, attention_mask.size(-1)) if attention_mask is not None else None\n token_type_ids = token_type_ids.view(-1, token_type_ids.size(-1)) if token_type_ids is not None else None\n position_ids = position_ids.view(-1, position_ids.size(-1)) if position_ids is not None else None\n inputs_embeds = (\n inputs_embeds.view(-1, inputs_embeds.size(-2), inputs_embeds.size(-1))\n if inputs_embeds is not None\n else None\n )\n\n outputs = self.rembert(\n input_ids,\n attention_mask=attention_mask,\n token_type_ids=token_type_ids,\n position_ids=position_ids,\n head_mask=head_mask,\n inputs_embeds=inputs_embeds,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n return_dict=return_dict,\n )\n\n pooled_output = outputs[1]\n\n pooled_output = self.dropout(pooled_output)\n logits = self.classifier(pooled_output)\n reshaped_logits = logits.view(-1, num_choices)\n\n loss = None\n if labels is not None:\n loss_fct = CrossEntropyLoss()\n loss = loss_fct(reshaped_logits, labels)\n\n if not return_dict:\n output = (reshaped_logits,) + outputs[2:]\n return ((loss,) + output) if loss is not None else output\n\n return MultipleChoiceModelOutput(\n loss=loss,\n logits=reshaped_logits,\n hidden_states=outputs.hidden_states,\n attentions=outputs.attentions,\n )\n\n\n@add_start_docstrings(\n \"\"\"\n RemBERT Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g. for\n Named-Entity-Recognition (NER) tasks.\n \"\"\",\n REMBERT_START_DOCSTRING,\n)\nclass RemBertForTokenClassification(RemBertPreTrainedModel):\n def __init__(self, config):\n super().__init__(config)\n self.num_labels = config.num_labels\n\n self.rembert = RemBertModel(config, add_pooling_layer=False)\n self.dropout = nn.Dropout(config.classifier_dropout_prob)\n self.classifier = nn.Linear(config.hidden_size, config.num_labels)\n\n # Initialize weights and apply final processing\n self.post_init()\n\n @add_start_docstrings_to_model_forward(REMBERT_INPUTS_DOCSTRING.format(\"batch_size, sequence_length\"))\n @add_code_sample_docstrings(\n processor_class=_TOKENIZER_FOR_DOC,\n checkpoint=\"rembert\",\n output_type=TokenClassifierOutput,\n config_class=_CONFIG_FOR_DOC,\n )\n def forward(\n self,\n input_ids: torch.FloatTensor = None,\n attention_mask: Optional[torch.FloatTensor] = None,\n token_type_ids: Optional[torch.LongTensor] = None,\n position_ids: Optional[torch.FloatTensor] = None,\n head_mask: Optional[torch.FloatTensor] = None,\n inputs_embeds: Optional[torch.FloatTensor] = None,\n labels: Optional[torch.LongTensor] = None,\n output_attentions: Optional[bool] = None,\n output_hidden_states: Optional[bool] = None,\n return_dict: Optional[bool] = None,\n ) -> Union[Tuple, TokenClassifierOutput]:\n r\"\"\"\n labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):\n Labels for computing the token classification loss. Indices should be in `[0, ..., config.num_labels - 1]`.\n \"\"\"\n return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n\n outputs = self.rembert(\n input_ids,\n attention_mask=attention_mask,\n token_type_ids=token_type_ids,\n position_ids=position_ids,\n head_mask=head_mask,\n inputs_embeds=inputs_embeds,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n return_dict=return_dict,\n )\n\n sequence_output = outputs[0]\n\n sequence_output = self.dropout(sequence_output)\n logits = self.classifier(sequence_output)\n\n loss = None\n if labels is not None:\n loss_fct = CrossEntropyLoss()\n loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))\n\n if not return_dict:\n output = (logits,) + outputs[2:]\n return ((loss,) + output) if loss is not None else output\n\n return TokenClassifierOutput(\n loss=loss,\n logits=logits,\n hidden_states=outputs.hidden_states,\n attentions=outputs.attentions,\n )\n\n\n@add_start_docstrings(\n \"\"\"\n RemBERT Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear\n layers on top of the hidden-states output to compute `span start logits` and `span end logits`).\n \"\"\",\n REMBERT_START_DOCSTRING,\n)\nclass RemBertForQuestionAnswering(RemBertPreTrainedModel):\n def __init__(self, config):\n super().__init__(config)\n\n self.num_labels = config.num_labels\n\n self.rembert = RemBertModel(config, add_pooling_layer=False)\n self.qa_outputs = nn.Linear(config.hidden_size, config.num_labels)\n\n # Initialize weights and apply final processing\n self.post_init()\n\n @add_start_docstrings_to_model_forward(REMBERT_INPUTS_DOCSTRING.format(\"batch_size, sequence_length\"))\n @add_code_sample_docstrings(\n processor_class=_TOKENIZER_FOR_DOC,\n checkpoint=\"rembert\",\n output_type=QuestionAnsweringModelOutput,\n config_class=_CONFIG_FOR_DOC,\n )\n def forward(\n self,\n input_ids: torch.FloatTensor = None,\n attention_mask: Optional[torch.FloatTensor] = None,\n token_type_ids: Optional[torch.LongTensor] = None,\n position_ids: Optional[torch.FloatTensor] = None,\n head_mask: Optional[torch.FloatTensor] = None,\n inputs_embeds: Optional[torch.FloatTensor] = None,\n start_positions: Optional[torch.LongTensor] = None,\n end_positions: Optional[torch.LongTensor] = None,\n output_attentions: Optional[bool] = None,\n output_hidden_states: Optional[bool] = None,\n return_dict: Optional[bool] = None,\n ) -> Union[Tuple, QuestionAnsweringModelOutput]:\n r\"\"\"\n start_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*):\n Labels for position (index) of the start of the labelled span for computing the token classification loss.\n Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence\n are not taken into account for computing the loss.\n end_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*):\n Labels for position (index) of the end of the labelled span for computing the token classification loss.\n Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence\n are not taken into account for computing the loss.\n \"\"\"\n return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n\n outputs = self.rembert(\n input_ids,\n attention_mask=attention_mask,\n token_type_ids=token_type_ids,\n position_ids=position_ids,\n head_mask=head_mask,\n inputs_embeds=inputs_embeds,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n return_dict=return_dict,\n )\n\n sequence_output = outputs[0]\n\n logits = self.qa_outputs(sequence_output)\n start_logits, end_logits = logits.split(1, dim=-1)\n start_logits = start_logits.squeeze(-1)\n end_logits = end_logits.squeeze(-1)\n\n total_loss = None\n if start_positions is not None and end_positions is not None:\n # If we are on multi-GPU, split add a dimension\n if len(start_positions.size()) > 1:\n start_positions = start_positions.squeeze(-1)\n if len(end_positions.size()) > 1:\n end_positions = end_positions.squeeze(-1)\n # sometimes the start/end positions are outside our model inputs, we ignore these terms\n ignored_index = start_logits.size(1)\n start_positions.clamp_(0, ignored_index)\n end_positions.clamp_(0, ignored_index)\n\n loss_fct = CrossEntropyLoss(ignore_index=ignored_index)\n start_loss = loss_fct(start_logits, start_positions)\n end_loss = loss_fct(end_logits, end_positions)\n total_loss = (start_loss + end_loss) / 2\n\n if not return_dict:\n output = (start_logits, end_logits) + outputs[2:]\n return ((total_loss,) + output) if total_loss is not None else output\n\n return QuestionAnsweringModelOutput(\n loss=total_loss,\n start_logits=start_logits,\n end_logits=end_logits,\n hidden_states=outputs.hidden_states,\n attentions=outputs.attentions,\n )\n"
] | [
[
"torch.nn.functional.softmax",
"torch.nn.init.uniform_",
"torch.nn.functional.dropout",
"torch.zeros",
"torch.cat",
"torch.FloatTensor",
"torch.nn.Dropout",
"torch.nn.CrossEntropyLoss",
"torch.ones",
"numpy.arange",
"torch.backends.cudnn.flags",
"torch.tensor",
"torch.bmm",
"torch.arange",
"torch.nn.GroupNorm",
"numpy.zeros",
"torch.ones_like",
"torch.nn.init.constant_",
"numpy.put_along_axis",
"torch.nn.ModuleList",
"torch.nn.Linear",
"torch.bernoulli",
"numpy.random.rand",
"torch.log",
"torch.nn.Conv1d",
"torch.stack",
"numpy.array",
"torch.nn.functional.ctc_loss",
"torch.nn.functional.log_softmax",
"torch.nn.utils.weight_norm",
"torch.nn.LayerNorm",
"numpy.ones",
"numpy.broadcast_to",
"numpy.random.uniform",
"torch.nn.init.kaiming_normal_"
],
[
"torch.nn.Dropout",
"torch.nn.functional.softmax",
"torch.nn.CrossEntropyLoss",
"torch.ones",
"torch.cat",
"torch.zeros",
"torch.nn.functional.cross_entropy",
"torch.sum",
"torch.zeros_like",
"torch.nn.Embedding",
"torch.nn.LayerNorm",
"torch.nn.Tanh",
"torch.nn.Linear",
"torch.matmul",
"torch.arange",
"torch.gather",
"torch.cumsum"
],
[
"tensorflow.keras.models.load_model",
"tensorflow.TensorShape",
"tensorflow.constant",
"tensorflow.keras.Input",
"tensorflow.ones_like",
"tensorflow.debugging.assert_near"
],
[
"numpy.array"
],
[
"torch.ones",
"torch.distributed.init_process_group",
"torch.cuda.set_device",
"torch.utils.data.distributed.DistributedSampler",
"torch.zeros",
"numpy.squeeze",
"torch.utils.data.SequentialSampler",
"torch.utils.data.DataLoader",
"torch.nn.DataParallel",
"numpy.argmax",
"torch.log",
"torch.pow",
"torch.cuda.is_available",
"torch.device",
"torch.cuda.device_count",
"torch.ones_like",
"torch.nn.parallel.DistributedDataParallel"
],
[
"torch.all",
"torch.onnx.export",
"torch.ones",
"torch.isnan",
"torch.cat",
"torch.zeros",
"torch.manual_seed",
"torch.allclose"
],
[
"torch.no_grad"
],
[
"tensorflow.keras.layers.LayerNormalization",
"tensorflow.convert_to_tensor",
"tensorflow.cast",
"tensorflow.subtract",
"tensorflow.gather",
"tensorflow.name_scope",
"tensorflow.matmul",
"tensorflow.fill",
"tensorflow.random.uniform",
"tensorflow.identity",
"tensorflow.keras.losses.Huber",
"tensorflow.nn.bias_add",
"tensorflow.math.sqrt",
"tensorflow.transpose",
"tensorflow.constant",
"tensorflow.range",
"tensorflow.reduce_mean",
"tensorflow.keras.losses.SparseCategoricalCrossentropy",
"tensorflow.reshape",
"tensorflow.expand_dims",
"tensorflow.ones",
"tensorflow.keras.layers.Dropout",
"tensorflow.TensorSpec"
],
[
"torch.nn.functional.softmax",
"torch.cat",
"torch.zeros",
"torch.nn.Embedding",
"torch.nn.BCEWithLogitsLoss",
"torch.nn.Dropout",
"torch.nn.CrossEntropyLoss",
"torch.ones",
"torch.from_numpy",
"torch.arange",
"tensorflow.train.list_variables",
"torch.full",
"tensorflow.train.load_variable",
"torch.nn.Linear",
"numpy.transpose",
"torch.nn.LayerNorm",
"torch.nn.Tanh",
"torch.matmul",
"torch.nn.MSELoss"
]
] |
sashuIya/ssd.pytorch | [
"fe7d8722414fef4cce32f67422c896ef0c45d6bc"
] | [
"layers/box_utils.py"
] | [
"import torch\n\n\ndef point_form(boxes):\n \"\"\" Convert prior_boxes to (xmin, ymin, xmax, ymax)\n representation for comparison to point form ground truth data.\n Args:\n boxes: (tensor) center-size default boxes from priorbox layers.\n Return:\n boxes: (tensor) Converted xmin, ymin, xmax, ymax form of boxes.\n \"\"\"\n return torch.cat((boxes[:, :2] - boxes[:, 2:]/2, # xmin, ymin\n boxes[:, :2] + boxes[:, 2:]/2), 1) # xmax, ymax\n\n\ndef center_size(boxes):\n \"\"\" Convert prior_boxes to (cx, cy, w, h)\n representation for comparison to center-size form ground truth data.\n Args:\n boxes: (tensor) point_form boxes\n Return:\n boxes: (tensor) Converted xmin, ymin, xmax, ymax form of boxes.\n \"\"\"\n return torch.cat((boxes[:, 2:] + boxes[:, :2])/2, # cx, cy\n boxes[:, 2:] - boxes[:, :2], 1) # w, h\n\n\ndef intersect(box_a, box_b):\n \"\"\" We resize both tensors to [A,B,2] without new malloc:\n [A,2] -> [A,1,2] -> [A,B,2]\n [B,2] -> [1,B,2] -> [A,B,2]\n Then we compute the area of intersect between box_a and box_b.\n Args:\n box_a: (tensor) bounding boxes, Shape: [A,4].\n box_b: (tensor) bounding boxes, Shape: [B,4].\n Return:\n (tensor) intersection area, Shape: [A,B].\n \"\"\"\n A = box_a.size(0)\n B = box_b.size(0)\n max_xy = torch.min(box_a[:, 2:].unsqueeze(1).expand(A, B, 2),\n box_b[:, 2:].unsqueeze(0).expand(A, B, 2))\n min_xy = torch.max(box_a[:, :2].unsqueeze(1).expand(A, B, 2),\n box_b[:, :2].unsqueeze(0).expand(A, B, 2))\n inter = torch.clamp((max_xy - min_xy), min=0)\n return inter[:, :, 0] * inter[:, :, 1]\n\n\ndef jaccard(box_a, box_b):\n \"\"\"Compute the jaccard overlap of two sets of boxes. The jaccard overlap\n is simply the intersection over union of two boxes. Here we operate on\n ground truth boxes and default boxes.\n E.g.:\n A ∩ B / A ∪ B = A ∩ B / (area(A) + area(B) - A ∩ B)\n Args:\n box_a: (tensor) Ground truth bounding boxes, Shape: [num_objects,4]\n box_b: (tensor) Prior boxes from priorbox layers, Shape: [num_priors,4]\n Return:\n jaccard overlap: (tensor) Shape: [box_a.size(0), box_b.size(0)]\n \"\"\"\n inter = intersect(box_a, box_b)\n area_a = ((box_a[:, 2]-box_a[:, 0]) *\n (box_a[:, 3]-box_a[:, 1])).unsqueeze(1).expand_as(inter) # [A,B]\n area_b = ((box_b[:, 2]-box_b[:, 0]) *\n (box_b[:, 3]-box_b[:, 1])).unsqueeze(0).expand_as(inter) # [A,B]\n union = area_a + area_b - inter\n return inter / union # [A,B]\n\n\ndef match(threshold, truths, priors, variances, labels, loc_t, conf_t, idx):\n \"\"\"Match each prior box with the ground truth box of the highest jaccard\n overlap, encode the bounding boxes, then return the matched indices\n corresponding to both confidence and location preds.\n Args:\n threshold: (float) The overlap threshold used when mathing boxes.\n truths: (tensor) Ground truth boxes, Shape: [num_obj, num_priors].\n priors: (tensor) Prior boxes from priorbox layers, Shape: [n_priors,4].\n variances: (tensor) Variances corresponding to each prior coord,\n Shape: [num_priors, 4].\n labels: (tensor) All the class labels for the image, Shape: [num_obj].\n loc_t: (tensor) Tensor to be filled w/ endcoded location targets.\n conf_t: (tensor) Tensor to be filled w/ matched indices for conf preds.\n idx: (int) current batch index\n Return:\n The matched indices corresponding to 1)location and 2)confidence preds.\n \"\"\"\n # jaccard index\n overlaps = jaccard(\n truths,\n point_form(priors)\n )\n # (Bipartite Matching)\n # [1,num_objects] best prior for each ground truth\n best_prior_overlap, best_prior_idx = overlaps.max(1)\n # [1,num_priors] best ground truth for each prior\n best_truth_overlap, best_truth_idx = overlaps.max(0)\n best_truth_idx.squeeze_(0)\n best_truth_overlap.squeeze_(0)\n best_prior_idx.squeeze_(1)\n best_prior_overlap.squeeze_(1)\n best_truth_overlap.index_fill_(0, best_prior_idx, 2) # ensure best prior\n # TODO refactor: index best_prior_idx with long tensor\n # ensure every gt matches with its prior of max overlap\n for j in range(best_prior_idx.size(0)):\n best_truth_idx[best_prior_idx[j]] = j\n matches = truths[best_truth_idx] # Shape: [num_priors,4]\n conf = labels[best_truth_idx] + 1 # Shape: [num_priors]\n conf[best_truth_overlap < threshold] = 0 # label as background\n loc = encode(matches, priors, variances)\n loc_t[idx] = loc # [num_priors,4] encoded offsets to learn\n conf_t[idx] = conf # [num_priors] top class label for each prior\n\n\ndef encode(matched, priors, variances):\n \"\"\"Encode the variances from the priorbox layers into the ground truth boxes\n we have matched (based on jaccard overlap) with the prior boxes.\n Args:\n matched: (tensor) Coords of ground truth for each prior in point-form\n Shape: [num_priors, 4].\n priors: (tensor) Prior boxes in center-offset form\n Shape: [num_priors,4].\n variances: (list[float]) Variances of priorboxes\n Return:\n encoded boxes (tensor), Shape: [num_priors, 4]\n \"\"\"\n\n # dist b/t match center and prior's center\n g_cxcy = (matched[:, :2] + matched[:, 2:])/2 - priors[:, :2]\n # encode variance\n g_cxcy /= (variances[0] * priors[:, 2:])\n # match wh / prior wh\n g_wh = (matched[:, 2:] - matched[:, :2]) / priors[:, 2:]\n g_wh = torch.log(g_wh) / variances[1]\n # return target for smooth_l1_loss\n return torch.cat([g_cxcy, g_wh], 1) # [num_priors,4]\n\n\n# Adapted from https://github.com/Hakuyume/chainer-ssd\ndef decode(loc, priors, variances):\n \"\"\"Decode locations from predictions using priors to undo\n the encoding we did for offset regression at train time.\n Args:\n loc (tensor): location predictions for loc layers,\n Shape: [num_priors,4]\n priors (tensor): Prior boxes in center-offset form.\n Shape: [num_priors,4].\n variances: (list[float]) Variances of priorboxes\n Return:\n decoded bounding box predictions\n \"\"\"\n\n boxes = torch.cat((\n priors[:, :2] + loc[:, :2] * variances[0] * priors[:, 2:],\n priors[:, 2:] * torch.exp(loc[:, 2:] * variances[1])), 1)\n boxes[:, :2] -= boxes[:, 2:] / 2\n boxes[:, 2:] += boxes[:, :2]\n return boxes\n\n\ndef log_sum_exp(x):\n \"\"\"Utility function for computing log_sum_exp while determining\n This will be used to determine unaveraged confidence loss across\n all examples in a batch.\n Args:\n x (Variable(tensor)): conf_preds from conf layers\n \"\"\"\n x_max = x.data.max()\n return torch.log(torch.sum(torch.exp(x-x_max), 1)) + x_max\n\n\n# Original author: Francisco Massa:\n# https://github.com/fmassa/object-detection.torch\n# Ported to PyTorch by Max deGroot (02/01/2017)\ndef nms(boxes, scores, overlap=0.5, top_k=200):\n \"\"\"Apply non-maximum suppression at test time to avoid detecting too many\n overlapping bounding boxes for a given object.\n Args:\n boxes: (tensor) The location preds for the img, Shape: [num_priors,4].\n scores: (tensor) The class predscores for the img, Shape:[num_priors].\n overlap: (float) The overlap thresh for suppressing unnecessary boxes.\n top_k: (int) The Maximum number of box preds to consider.\n Return:\n The indices of the kept boxes with respect to num_priors.\n \"\"\"\n\n keep = scores.new(scores.size(0)).zero_().long()\n if boxes.numel() == 0:\n return keep\n x1 = boxes[:, 0]\n y1 = boxes[:, 1]\n x2 = boxes[:, 2]\n y2 = boxes[:, 3]\n area = torch.mul(x2 - x1, y2 - y1)\n v, idx = scores.sort(0) # sort in ascending order\n # I = I[v >= 0.01]\n idx = idx[-top_k:] # indices of the top-k largest vals\n xx1 = boxes.new()\n yy1 = boxes.new()\n xx2 = boxes.new()\n yy2 = boxes.new()\n w = boxes.new()\n h = boxes.new()\n\n # keep = torch.Tensor()\n count = 0\n while idx.numel() > 0:\n i = idx[-1] # index of current largest val\n # keep.append(i)\n keep[count] = i\n count += 1\n if idx.size(0) == 1:\n break\n idx = idx[:-1] # remove kept element from view\n # load bboxes of next highest vals\n torch.index_select(x1, 0, idx, out=xx1)\n torch.index_select(y1, 0, idx, out=yy1)\n torch.index_select(x2, 0, idx, out=xx2)\n torch.index_select(y2, 0, idx, out=yy2)\n # store element-wise max with next highest score\n xx1 = torch.clamp(xx1, min=x1[i])\n yy1 = torch.clamp(yy1, min=y1[i])\n xx2 = torch.clamp(xx2, max=x2[i])\n yy2 = torch.clamp(yy2, max=y2[i])\n w.resize_as_(xx2)\n h.resize_as_(yy2)\n w = xx2 - xx1\n h = yy2 - yy1\n # check sizes of xx1 and xx2.. after each iteration\n w = torch.clamp(w, min=0.0)\n h = torch.clamp(h, min=0.0)\n inter = w*h\n # IoU = i / (area(a) + area(b) - i)\n rem_areas = torch.index_select(area, 0, idx) # load remaining areas)\n union = (rem_areas - inter) + area[i]\n IoU = inter/union # store result in iou\n # keep only elements with an IoU <= overlap\n idx = idx[IoU.le(overlap)]\n return keep, count\n"
] | [
[
"torch.cat",
"torch.exp",
"torch.mul",
"torch.log",
"torch.clamp",
"torch.index_select"
]
] |
meet-seth/Coursera-Deep-Learning | [
"195fad43e99de5efe6491817ad2b79e12665cc2a",
"6fbf9d406468c825ffa1ff2e177dbfd43084bace"
] | [
"Natural Language Processing with Attention Models/Week 4 - Chatbot/w4_unittest.py",
"Custom Models, Layers, and Loss Functions with TensorFlow/Week 4 - Custom Models/utils.py"
] | [
"import numpy as np\nimport trax\n#from trax import layers as tl\n#from trax.fastmath import numpy as fastnp\n#from trax.supervised import training\n\n# UNIT TEST for UNQ_C1\ndef test_get_conversation(target):\n\n data = {'file1.json': {'log':[{'text': 'hi'},\n {'text': 'hello'},\n {'text': 'nice'}]},\n 'file2.json':{'log':[{'text': 'a b'}, \n {'text': ''}, \n {'text': 'good '}, \n {'text': 'no?'}]}}\n \n res1 = target('file1.json', data)\n res2 = target('file2.json', data)\n \n expected1 = ' Person 1: hi Person 2: hello Person 1: nice'\n expected2 = ' Person 1: a b Person 2: Person 1: good Person 2: no?'\n\n success = 0\n fails = 0\n \n try:\n assert res1 == expected1\n success += 1\n except ValueError:\n print('Error in test 1 \\nResult : ', res1, 'x \\nExpected: ', expected1)\n fails += 1\n try:\n assert res2 == expected2\n success += 1\n except:\n print('Error in test 2 \\nResult : ', res2, ' \\nExpected: ', expected2)\n fails += 1\n \n if fails == 0:\n print(\"\\033[92m All tests passed\")\n else:\n print('\\033[92m', success,\" Tests passed\")\n print('\\033[91m', fails, \" Tests failed\")\n\n\n# UNIT TEST for UNQ_C2\ndef test_reversible_layer_forward(target):\n f1 = lambda x: x + 2\n g1 = lambda x: x * 3\n \n f2 = lambda x: x + 1\n g2 = lambda x: x * 2\n \n input_vector1 = np.array([1, 2, 3, 4, 5, 6, 7, 8])\n expected1 = np.array([8, 10, 12, 14, 29, 36, 43, 50])\n \n input_vector2 = np.array([1] * 128)\n expected2 = np.array([3] * 64 + [7] * 64)\n \n success = 0\n fails = 0\n try:\n res = target(input_vector1, f1, g1)\n assert isinstance(res, np.ndarray)\n success += 1\n except:\n print('Wrong type! Output is not of type np.ndarray')\n fails += 1\n try:\n res = target(input_vector1, f1, g1)\n assert np.allclose(res, expected1)\n success += 1\n except ValueError:\n print('Error in test 1 \\nResult : ', res, 'x \\nExpected: ', expected1)\n fails += 1\n try:\n res = target(input_vector2, f2, g2)\n assert np.allclose(res, expected2)\n success += 1\n except:\n print('Error in test 2 \\nResult : ', res, ' \\nExpected: ', expected2)\n fails += 1\n \n if fails == 0:\n print(\"\\033[92m All tests passed\")\n else:\n print('\\033[92m', success,\" Tests passed\")\n print('\\033[91m', fails, \" Tests failed\")\n\n\n# UNIT TEST for UNQ_C3\ndef test_reversible_layer_reverse(target):\n \n f1 = lambda x: x + 2\n g1 = lambda x: x * 3\n \n f2 = lambda x: x + 1\n g2 = lambda x: x * 2\n \n input_vector1 = np.array([1, 2, 3, 4, 5, 6, 7, 8])\n expected1 = np.array([-3, 0, 3, 6, 2, 0, -2, -4])\n \n input_vector2 = np.array([1] * 128)\n expected2 = np.array([1] * 64 + [-1] * 64)\n \n success = 0\n fails = 0\n try:\n res = target(input_vector1, f1, g1)\n assert isinstance(res, np.ndarray)\n success += 1\n except:\n print('Wrong type! Output is not of type np.ndarray')\n fails += 1\n try:\n res = target(input_vector1, f1, g1)\n assert np.allclose(res, expected1)\n success += 1\n except ValueError:\n print('Error in test 1 \\nResult : ', res, 'x \\nExpected: ', expected1)\n fails += 1\n try:\n res = target(input_vector2, f2, g2)\n assert np.allclose(res, expected2)\n success += 1\n except:\n print('Error in test 2 \\nResult : ', res, ' \\nExpected: ', expected2)\n fails += 1\n \n if fails == 0:\n print(\"\\033[92m All tests passed\")\n else:\n print('\\033[92m', success,\" Tests passed\")\n print('\\033[91m', fails, \" Tests failed\")\n \n\n# UNIT TEST for UNQ_C4\ndef test_ReformerLM(target):\n test_cases = [\n {\n \"name\":\"layer_len_check\",\n \"expected\":11,\n \"error\":\"We found {} layers in your model. It should be 11.\\nCheck the LSTM stack before the dense layer\"\n },\n {\n \"name\":\"simple_test_check\",\n \"expected\":\"Serial[ShiftRight(1)Embedding_train_512DropoutPositionalEncodingDup_out2ReversibleSerial_in2_out2[ReversibleHalfResidualV2_in2_out2[Serial[LayerNorm]SelfAttention]ReversibleSwap_in2_out2ReversibleHalfResidualV2_in2_out2[Serial[LayerNormDense_2048DropoutFastGeluDense_512Dropout]]ReversibleSwap_in2_out2ReversibleHalfResidualV2_in2_out2[Serial[LayerNorm]SelfAttention]ReversibleSwap_in2_out2ReversibleHalfResidualV2_in2_out2[Serial[LayerNormDense_2048DropoutFastGeluDense_512Dropout]]ReversibleSwap_in2_out2]Concatenate_in2LayerNormDropoutDense_trainLogSoftmax]\",\n \"error\":\"The ReformerLM is not defined properly.\"\n }\n ]\n temp_model = target('train')\n \n success = 0\n fails = 0\n \n for test_case in test_cases:\n try:\n if test_case['name'] == \"simple_test_check\":\n assert test_case[\"expected\"] == str(temp_model).replace(' ', '').replace('\\n','')\n success += 1\n if test_case['name'] == \"layer_len_check\":\n if test_case[\"expected\"] == len(temp_model.sublayers):\n success += 1\n else:\n print(test_case[\"error\"].format(len(temp_model.sublayers))) \n fails += 1\n except:\n print(test_case['error'])\n fails += 1\n \n if fails == 0:\n print(\"\\033[92m All tests passed\")\n else:\n print('\\033[92m', success,\" Tests passed\")\n print('\\033[91m', fails, \" Tests failed\")\n\n\n# UNIT TEST for UNQ_C5\ndef test_tasks(train_task, eval_task):\n target = train_task\n success = 0\n fails = 0\n \n # Test the labeled data parameter for train_task\n try:\n strlabel = str(target._labeled_data)\n assert (\"generator\" in strlabel) and (\"add_loss_weights\" in strlabel)\n success += 1\n except:\n fails += 1\n print(\"Wrong labeled data parameter in train_task\")\n \n # Test the cross entropy loss data parameter\n try:\n strlabel = str(target._loss_layer)\n assert(strlabel == \"CrossEntropyLoss_in3\")\n success += 1\n except:\n fails += 1\n print(\"Wrong loss functions. CrossEntropyLoss_in3 was expected\")\n \n # Test the optimizer parameter\n try:\n assert(isinstance(target.optimizer, trax.optimizers.adam.Adam))\n success += 1\n except:\n fails += 1\n print(\"Wrong optimizer\")\n \n # Test the schedule parameter\n try:\n assert(isinstance(target._lr_schedule,trax.supervised.lr_schedules._BodyAndTail))\n success += 1\n except:\n fails += 1\n print(\"Wrong learning rate schedule type\")\n \n # Test the _n_steps_per_checkpoint parameter\n try:\n assert(target._n_steps_per_checkpoint==10)\n success += 1\n except:\n fails += 1\n print(\"Wrong checkpoint step frequency\")\n \n target = eval_task\n # Test the labeled data parameter for eval_task\n try:\n strlabel = str(target._labeled_data)\n assert (\"generator\" in strlabel) and (\"add_loss_weights\" in strlabel)\n success += 1\n except:\n fails += 1\n print(\"Wrong labeled data parameter in eval_task\")\n \n # Test the metrics in eval_task \n try:\n strlabel = str(target._metrics).replace(' ', '')\n assert(strlabel == \"[CrossEntropyLoss_in3,Accuracy_in3]\")\n success += 1\n except:\n fails += 1\n print(f\"Wrong metrics. found {strlabel} but expected [CrossEntropyLoss_in3,Accuracy_in3]\")\n \n \n if fails == 0:\n print(\"\\033[92m All tests passed\")\n else:\n print('\\033[92m', success,\" Tests passed\")\n print('\\033[91m', fails, \" Tests failed\")\n \n\n",
"import tensorflow as tf\nfrom tensorflow.keras import layers\n\ndef test_loop(test_cases):\n \n success = 0\n fails = 0\n \n for test_case in test_cases:\n try:\n assert test_case[\"result\"] == test_case[\"expected\"]\n success += 1\n \n except:\n fails += 1\n print(f'{test_case[\"name\"]}: {test_case[\"error_message\"]}\\nExpected: {test_case[\"expected\"]}\\nResult: {test_case[\"result\"]}\\n')\n\n if fails == 0:\n print(\"\\033[92m All public tests passed\")\n\n else:\n print('\\033[92m', success,\" Tests passed\")\n print('\\033[91m', fails, \" Tests failed\")\n raise Exception(test_case[\"error_message\"])\n\n \ndef test_block_class(Block):\n \n filters = 64\n kernel_size = 3\n padding = 'same'\n pool_size = 3\n repetitions = 2\n test_block = Block(filters, kernel_size, repetitions, pool_size)\n test_block(tf.random.uniform(shape=[2, 3, 4, 5]))\n\n vars_test_block = vars(test_block)\n \n test_cases = [\n {\n \"name\": \"max_pool_type_check\",\n \"result\": type(test_block.max_pool),\n \"expected\": layers.MaxPooling2D,\n \"error_message\": f'Incorrect layer type for self.maxpool'\n },\n {\n \"name\": \"max_pool_size_check\",\n \"result\": vars_test_block['max_pool'].pool_size,\n \"expected\": (pool_size, pool_size),\n \"error_message\": f'max pool size incorrect. check parameters.'\n },\n {\n \"name\": \"max_pool_size_check\",\n \"result\": vars_test_block['max_pool'].strides,\n \"expected\": (2,2),\n \"error_message\": f'max pool strides incorrect. check parameters.'\n },\n {\n \"name\": \"conv2D_0_type_check\",\n \"result\": type(vars_test_block['conv2D_0']),\n \"expected\": layers.Conv2D,\n \"error_message\": f'Incorrect layer type for block_0'\n },\n {\n \"name\": \"conv2D_1_type_check\",\n \"result\": type(vars_test_block['conv2D_1']),\n \"expected\": layers.Conv2D,\n \"error_message\": f'Incorrect layer type for block_0'\n },\n {\n \"name\": \"conv2D_0_filters_check\",\n \"result\": vars_test_block['conv2D_0'].filters,\n \"expected\": filters,\n \"error_message\": f'Incorrect filters for Conv2D layer. Please check parameters.'\n },\n {\n \"name\": \"conv2D_0_kernel_size_check\",\n \"result\": vars_test_block['conv2D_0'].kernel_size,\n \"expected\": (kernel_size, kernel_size),\n \"error_message\": f'Incorrect kernel_size for Conv2D layer. Please check parameters.'\n },\n {\n \"name\": \"conv2D_0_activation_check\",\n \"result\": vars_test_block['conv2D_0'].activation,\n \"expected\": tf.keras.activations.relu,\n \"error_message\": f'Incorrect activation for Conv2D layer. Please check parameters.'\n },\n {\n \"name\": \"conv2D_0_padding_check\",\n \"result\": vars_test_block['conv2D_0'].padding,\n \"expected\": padding,\n \"error_message\": f'Incorrect padding for Conv2D layer. Please check parameters.'\n },\n \n ]\n \n test_loop(test_cases)\n \ndef test_myvgg_class(MyVGG, Block):\n test_vgg = MyVGG(num_classes=2)\n test_vgg_layers = test_vgg.layers\n\n def get_block_params(block):\n return (block.filters, block.kernel_size, block.repetitions)\n \n test_cases = [\n {\n \"name\": \"block_a_type_check\",\n \"result\": type(test_vgg.block_a),\n \"expected\": Block,\n \"error_message\": \"self.block_a has an incorrect type. Please check declaration.\"\n },\n {\n \"name\": \"block_b_type_check\",\n \"result\": type(test_vgg.block_b),\n \"expected\": Block,\n \"error_message\": \"self.block_b has an incorrect type. Please check declaration.\"\n },\n {\n \"name\": \"block_c_type_check\",\n \"result\": type(test_vgg.block_c),\n \"expected\": Block,\n \"error_message\": \"self.block_c has an incorrect type. Please check declaration.\"\n },\n {\n \"name\": \"block_d_type_check\",\n \"result\": type(test_vgg.block_d),\n \"expected\": Block,\n \"error_message\": \"self.block_d has an incorrect type. Please check declaration.\"\n },\n {\n \"name\": \"block_e_type_check\",\n \"result\": type(test_vgg.block_e),\n \"expected\": Block,\n \"error_message\": \"self.block_e has an incorrect type. Please check declaration.\"\n },\n {\n \"name\": \"block_a_param_check\",\n \"result\": get_block_params(test_vgg.block_a),\n \"expected\": (64, 3, 2),\n \"error_message\": \"self.block_a has incorrect parameters. Please check hints in the code comments.\"\n },\n {\n \"name\": \"block_b_param_check\",\n \"result\": get_block_params(test_vgg.block_b),\n \"expected\": (128, 3, 2),\n \"error_message\": \"self.block_b has incorrect parameters. Please check hints in the code comments.\"\n },\n {\n \"name\": \"block_c_param_check\",\n \"result\": get_block_params(test_vgg.block_c),\n \"expected\": (256, 3, 3),\n \"error_message\": \"self.block_c has incorrect parameters. Please check hints in the code comments.\"\n },\n {\n \"name\": \"block_d_param_check\",\n \"result\": get_block_params(test_vgg.block_d),\n \"expected\": (512, 3, 3),\n \"error_message\": \"self.block_d has incorrect parameters. Please check hints in the code comments.\"\n },\n {\n \"name\": \"block_e_param_check\",\n \"result\": get_block_params(test_vgg.block_e),\n \"expected\": (512, 3, 3),\n \"error_message\": \"self.block_e has incorrect parameters. Please check hints in the code comments.\"\n },\n {\n \"name\": \"flatten_type_check\",\n \"result\": type(test_vgg.flatten),\n \"expected\": layers.Flatten,\n \"error_message\": \"self.flatten has an incorrect type. Please check declaration.\"\n },\n {\n \"name\": \"fc_type_check\",\n \"result\": type(test_vgg.fc),\n \"expected\": layers.Dense,\n \"error_message\": \"self.fc has an incorrect type. Please check declaration.\"\n },\n {\n \"name\": \"fc_units_check\",\n \"result\": test_vgg.fc.units,\n \"expected\": 256,\n \"error_message\": \"self.fc has an incorrect number of units. Please check declaration.\"\n },\n {\n \"name\": \"fc_activation_check\",\n \"result\": test_vgg.fc.activation,\n \"expected\": tf.keras.activations.relu,\n \"error_message\": \"self.fc has an incorrect activation. Please check declaration.\"\n },\n {\n \"name\": \"classifier_type_check\",\n \"result\": type(test_vgg.classifier),\n \"expected\": layers.Dense,\n \"error_message\": \"self.classifier has an incorrect type. Please check declaration.\"\n },\n {\n \"name\": \"fc_units_check\",\n \"result\": test_vgg.classifier.units,\n \"expected\": 2,\n \"error_message\": \"self.classifier has an incorrect number of units. Please check declaration.\"\n },\n {\n \"name\": \"fc_activation_check\",\n \"result\": test_vgg.classifier.activation,\n \"expected\": tf.keras.activations.softmax,\n \"error_message\": \"self.classifier has an incorrect activation. Please check declaration.\"\n },\n {\n \"name\": \"layer_0_check\",\n \"result\": type(test_vgg_layers[0]),\n \"expected\": Block,\n \"error_message\": \"Layer 0 of myVGG is incorrect. Please check its call() method.\"\n },\n {\n \"name\": \"layer_1_check\",\n \"result\": type(test_vgg_layers[1]),\n \"expected\": Block,\n \"error_message\": \"Layer 1 of myVGG is incorrect. Please check its call() method.\"\n },\n {\n \"name\": \"layer_2_check\",\n \"result\": type(test_vgg_layers[2]),\n \"expected\": Block,\n \"error_message\": \"Layer 2 of myVGG is incorrect. Please check its call() method.\"\n },\n {\n \"name\": \"layer_3_check\",\n \"result\": type(test_vgg_layers[3]),\n \"expected\": Block,\n \"error_message\": \"Layer 3 of myVGG is incorrect. Please check its call() method.\"\n },\n {\n \"name\": \"layer_4_check\",\n \"result\": type(test_vgg_layers[4]),\n \"expected\": Block,\n \"error_message\": \"Layer 4 of myVGG is incorrect. Please check its call() method.\"\n },\n {\n \"name\": \"layer_5_check\",\n \"result\": type(test_vgg_layers[5]),\n \"expected\": layers.Flatten,\n \"error_message\": \"Layer 5 of myVGG is incorrect. Please check its call() method.\"\n },\n {\n \"name\": \"layer_6_check\",\n \"result\": type(test_vgg_layers[6]),\n \"expected\": layers.Dense,\n \"error_message\": \"Layer 6 of myVGG is incorrect. Please check its call() method.\"\n },\n {\n \"name\": \"layer_7_check\",\n \"result\": type(test_vgg_layers[7]),\n \"expected\": layers.Dense,\n \"error_message\": \"Layer 7 of myVGG is incorrect. Please check its call() method.\"\n },\n \n ]\n \n test_loop(test_cases)\n "
] | [
[
"numpy.array",
"numpy.allclose"
],
[
"tensorflow.random.uniform"
]
] |
PeterDomanski/agents | [
"63c1c76f16f2068a637b26282c34a8825583e73e",
"1c4f2a0dd0abf3795a221dfc8a1771cff0e6ebb9",
"1c4f2a0dd0abf3795a221dfc8a1771cff0e6ebb9"
] | [
"tf_agents/bandits/agents/neural_linucb_agent_test.py",
"tf_agents/agents/sac/examples/v2/train_eval.py",
"tf_agents/policies/categorical_q_policy_test.py"
] | [
"# coding=utf-8\n# Copyright 2018 The TF-Agents Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Tests for tf_agents.bandits.agents.neural_linucb_agent.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport os\n\nfrom absl.testing import parameterized\nimport numpy as np\nimport tensorflow as tf\nimport tensorflow_probability as tfp\nfrom tf_agents.bandits.agents import neural_linucb_agent\nfrom tf_agents.bandits.agents import utils as bandit_utils\nfrom tf_agents.bandits.drivers import driver_utils\nfrom tf_agents.bandits.policies import policy_utilities\nfrom tf_agents.networks import network\nfrom tf_agents.specs import tensor_spec\nfrom tf_agents.trajectories import policy_step\nfrom tf_agents.trajectories import time_step\nfrom tensorflow.python.framework import test_util # pylint: disable=g-direct-tensorflow-import # TF internal\n\n\ntfd = tfp.distributions\n\n\nclass DummyNet(network.Network):\n\n def __init__(self, observation_spec, encoding_dim=10):\n super(DummyNet, self).__init__(\n observation_spec, state_spec=(), name='DummyNet')\n context_dim = observation_spec.shape[0]\n self._layers.append(\n tf.keras.layers.Dense(\n encoding_dim,\n kernel_initializer=tf.compat.v1.initializers.constant(\n np.ones([context_dim, encoding_dim])),\n bias_initializer=tf.compat.v1.initializers.constant(\n np.zeros([encoding_dim]))))\n\n def call(self, inputs, step_type=None, network_state=()):\n del step_type\n inputs = tf.cast(inputs, tf.float32)\n for layer in self.layers:\n inputs = layer(inputs)\n return inputs, network_state\n\n\ndef test_cases():\n return parameterized.named_parameters(\n {\n 'testcase_name': '_batch1_contextdim10',\n 'batch_size': 1,\n 'context_dim': 10,\n }, {\n 'testcase_name': '_batch4_contextdim5',\n 'batch_size': 4,\n 'context_dim': 5,\n })\n\n\ndef _get_initial_and_final_steps(batch_size, context_dim):\n observation = np.array(range(batch_size * context_dim)).reshape(\n [batch_size, context_dim])\n reward = np.random.uniform(0.0, 1.0, [batch_size])\n initial_step = time_step.TimeStep(\n tf.constant(\n time_step.StepType.FIRST, dtype=tf.int32, shape=[batch_size],\n name='step_type'),\n tf.constant(0.0, dtype=tf.float32, shape=[batch_size], name='reward'),\n tf.constant(1.0, dtype=tf.float32, shape=[batch_size], name='discount'),\n tf.constant(observation, dtype=tf.float32,\n shape=[batch_size, context_dim], name='observation'))\n final_step = time_step.TimeStep(\n tf.constant(\n time_step.StepType.LAST, dtype=tf.int32, shape=[batch_size],\n name='step_type'),\n tf.constant(reward, dtype=tf.float32, shape=[batch_size], name='reward'),\n tf.constant(1.0, dtype=tf.float32, shape=[batch_size], name='discount'),\n tf.constant(observation + 100.0, dtype=tf.float32,\n shape=[batch_size, context_dim], name='observation'))\n return initial_step, final_step\n\n\ndef _get_initial_and_final_steps_with_action_mask(batch_size,\n context_dim,\n num_actions=None):\n observation = np.array(range(batch_size * context_dim)).reshape(\n [batch_size, context_dim])\n observation = tf.constant(observation, dtype=tf.float32)\n mask = 1 - tf.eye(batch_size, num_columns=num_actions, dtype=tf.int32)\n reward = np.random.uniform(0.0, 1.0, [batch_size])\n initial_step = time_step.TimeStep(\n tf.constant(\n time_step.StepType.FIRST,\n dtype=tf.int32,\n shape=[batch_size],\n name='step_type'),\n tf.constant(0.0, dtype=tf.float32, shape=[batch_size], name='reward'),\n tf.constant(1.0, dtype=tf.float32, shape=[batch_size], name='discount'),\n (observation, mask))\n final_step = time_step.TimeStep(\n tf.constant(\n time_step.StepType.LAST,\n dtype=tf.int32,\n shape=[batch_size],\n name='step_type'),\n tf.constant(reward, dtype=tf.float32, shape=[batch_size], name='reward'),\n tf.constant(1.0, dtype=tf.float32, shape=[batch_size], name='discount'),\n (observation + 100.0, mask))\n return initial_step, final_step\n\n\ndef _get_action_step(action):\n return policy_step.PolicyStep(\n action=tf.convert_to_tensor(action),\n info=policy_utilities.PolicyInfo())\n\n\ndef _get_experience(initial_step, action_step, final_step):\n single_experience = driver_utils.trajectory_for_bandit(\n initial_step, action_step, final_step)\n # Adds a 'time' dimension.\n return tf.nest.map_structure(\n lambda x: tf.expand_dims(tf.convert_to_tensor(x), 1),\n single_experience)\n\n\n@test_util.run_all_in_graph_and_eager_modes\nclass NeuralLinUCBAgentTest(tf.test.TestCase, parameterized.TestCase):\n\n def setUp(self):\n super(NeuralLinUCBAgentTest, self).setUp()\n tf.compat.v1.enable_resource_variables()\n\n @test_cases()\n def testInitializeAgentNumTrainSteps0(self, batch_size, context_dim):\n num_actions = 5\n observation_spec = tensor_spec.TensorSpec([context_dim], tf.float32)\n time_step_spec = time_step.time_step_spec(observation_spec)\n action_spec = tensor_spec.BoundedTensorSpec(\n dtype=tf.int32, shape=(), minimum=0, maximum=num_actions - 1)\n\n encoder = DummyNet(observation_spec)\n agent = neural_linucb_agent.NeuralLinUCBAgent(\n time_step_spec=time_step_spec,\n action_spec=action_spec,\n encoding_network=encoder,\n encoding_network_num_train_steps=0,\n encoding_dim=10,\n optimizer=None)\n self.evaluate(agent.initialize())\n\n @test_cases()\n def testInitializeAgentNumTrainSteps10(self, batch_size, context_dim):\n num_actions = 5\n observation_spec = tensor_spec.TensorSpec([context_dim], tf.float32)\n time_step_spec = time_step.time_step_spec(observation_spec)\n action_spec = tensor_spec.BoundedTensorSpec(\n dtype=tf.int32, shape=(), minimum=0, maximum=num_actions - 1)\n\n encoder = DummyNet(observation_spec)\n agent = neural_linucb_agent.NeuralLinUCBAgent(\n time_step_spec=time_step_spec,\n action_spec=action_spec,\n encoding_network=encoder,\n encoding_network_num_train_steps=10,\n encoding_dim=10,\n optimizer=None)\n self.evaluate(agent.initialize())\n\n @test_cases()\n def testNeuralLinUCBUpdateNumTrainSteps0(self, batch_size=1, context_dim=10):\n \"\"\"Check NeuralLinUCBAgent updates when behaving like LinUCB.\"\"\"\n\n # Construct a `Trajectory` for the given action, observation, reward.\n num_actions = 5\n initial_step, final_step = _get_initial_and_final_steps(\n batch_size, context_dim)\n action = np.random.randint(num_actions, size=batch_size, dtype=np.int32)\n action_step = _get_action_step(action)\n experience = _get_experience(initial_step, action_step, final_step)\n\n # Construct an agent and perform the update.\n observation_spec = tensor_spec.TensorSpec([context_dim], tf.float32)\n time_step_spec = time_step.time_step_spec(observation_spec)\n action_spec = tensor_spec.BoundedTensorSpec(\n dtype=tf.int32, shape=(), minimum=0, maximum=num_actions - 1)\n encoder = DummyNet(observation_spec)\n encoding_dim = 10\n agent = neural_linucb_agent.NeuralLinUCBAgent(\n time_step_spec=time_step_spec,\n action_spec=action_spec,\n encoding_network=encoder,\n encoding_network_num_train_steps=0,\n encoding_dim=encoding_dim,\n optimizer=tf.compat.v1.train.AdamOptimizer(learning_rate=1e-2))\n\n loss_info = agent.train(experience)\n self.evaluate(agent.initialize())\n self.evaluate(tf.compat.v1.global_variables_initializer())\n self.evaluate(loss_info)\n final_a = self.evaluate(agent.cov_matrix)\n final_b = self.evaluate(agent.data_vector)\n\n # Compute the expected updated estimates.\n observations_list = tf.dynamic_partition(\n data=tf.reshape(tf.cast(experience.observation, tf.float64),\n [batch_size, context_dim]),\n partitions=tf.convert_to_tensor(action),\n num_partitions=num_actions)\n rewards_list = tf.dynamic_partition(\n data=tf.reshape(tf.cast(experience.reward, tf.float64), [batch_size]),\n partitions=tf.convert_to_tensor(action),\n num_partitions=num_actions)\n expected_a_updated_list = []\n expected_b_updated_list = []\n for _, (observations_for_arm, rewards_for_arm) in enumerate(zip(\n observations_list, rewards_list)):\n\n encoded_observations_for_arm, _ = encoder(observations_for_arm)\n encoded_observations_for_arm = tf.cast(\n encoded_observations_for_arm, dtype=tf.float64)\n\n num_samples_for_arm_current = tf.cast(\n tf.shape(rewards_for_arm)[0], tf.float64)\n num_samples_for_arm_total = num_samples_for_arm_current\n\n # pylint: disable=cell-var-from-loop\n def true_fn():\n a_new = tf.matmul(\n encoded_observations_for_arm,\n encoded_observations_for_arm,\n transpose_a=True)\n b_new = bandit_utils.sum_reward_weighted_observations(\n rewards_for_arm, encoded_observations_for_arm)\n return a_new, b_new\n def false_fn():\n return (tf.zeros([encoding_dim, encoding_dim], dtype=tf.float64),\n tf.zeros([encoding_dim], dtype=tf.float64))\n a_new, b_new = tf.cond(\n tf.squeeze(num_samples_for_arm_total) > 0,\n true_fn,\n false_fn)\n\n expected_a_updated_list.append(self.evaluate(a_new))\n expected_b_updated_list.append(self.evaluate(b_new))\n\n # Check that the actual updated estimates match the expectations.\n self.assertAllClose(expected_a_updated_list, final_a)\n self.assertAllClose(expected_b_updated_list, final_b)\n\n @test_cases()\n def testNeuralLinUCBUpdateNumTrainSteps10(self, batch_size=1, context_dim=10):\n \"\"\"Check NeuralLinUCBAgent updates when behaving like eps-greedy.\"\"\"\n\n # Construct a `Trajectory` for the given action, observation, reward.\n num_actions = 5\n initial_step, final_step = _get_initial_and_final_steps(\n batch_size, context_dim)\n action = np.random.randint(num_actions, size=batch_size, dtype=np.int32)\n action_step = _get_action_step(action)\n experience = _get_experience(initial_step, action_step, final_step)\n\n # Construct an agent and perform the update.\n observation_spec = tensor_spec.TensorSpec([context_dim], tf.float32)\n time_step_spec = time_step.time_step_spec(observation_spec)\n action_spec = tensor_spec.BoundedTensorSpec(\n dtype=tf.int32, shape=(), minimum=0, maximum=num_actions - 1)\n encoder = DummyNet(observation_spec)\n encoding_dim = 10\n variable_collection = neural_linucb_agent.NeuralLinUCBVariableCollection(\n num_actions, encoding_dim)\n agent = neural_linucb_agent.NeuralLinUCBAgent(\n time_step_spec=time_step_spec,\n action_spec=action_spec,\n encoding_network=encoder,\n encoding_network_num_train_steps=10,\n encoding_dim=encoding_dim,\n variable_collection=variable_collection,\n optimizer=tf.compat.v1.train.AdamOptimizer(learning_rate=0.001))\n\n loss_info, _ = agent.train(experience)\n self.evaluate(agent.initialize())\n self.evaluate(tf.compat.v1.global_variables_initializer())\n loss_value = self.evaluate(loss_info)\n self.assertGreater(loss_value, 0.0)\n\n @test_cases()\n def testNeuralLinUCBUpdateNumTrainSteps10MaskedActions(\n self, batch_size=1, context_dim=10):\n \"\"\"Check updates when behaving like eps-greedy and using masked actions.\"\"\"\n\n # Construct a `Trajectory` for the given action, observation, reward.\n num_actions = 5\n initial_step, final_step = _get_initial_and_final_steps_with_action_mask(\n batch_size, context_dim, num_actions)\n action = np.random.randint(num_actions, size=batch_size, dtype=np.int32)\n action_step = _get_action_step(action)\n experience = _get_experience(initial_step, action_step, final_step)\n\n # Construct an agent and perform the update.\n observation_spec = (tensor_spec.TensorSpec([context_dim], tf.float32),\n tensor_spec.TensorSpec([num_actions], tf.int32))\n time_step_spec = time_step.time_step_spec(observation_spec)\n action_spec = tensor_spec.BoundedTensorSpec(\n dtype=tf.int32, shape=(), minimum=0, maximum=num_actions - 1)\n encoder = DummyNet(observation_spec[0])\n encoding_dim = 10\n agent = neural_linucb_agent.NeuralLinUCBAgent(\n time_step_spec=time_step_spec,\n action_spec=action_spec,\n encoding_network=encoder,\n encoding_network_num_train_steps=10,\n encoding_dim=encoding_dim,\n optimizer=tf.compat.v1.train.AdamOptimizer(learning_rate=0.001),\n observation_and_action_constraint_splitter=lambda x: (x[0], x[1]))\n\n loss_info, _ = agent.train(experience)\n self.evaluate(agent.initialize())\n self.evaluate(tf.compat.v1.global_variables_initializer())\n loss_value = self.evaluate(loss_info)\n self.assertGreater(loss_value, 0.0)\n\n def testInitializeRestoreVariableCollection(self):\n if not tf.executing_eagerly():\n self.skipTest('Test only works in eager mode.')\n num_actions = 5\n encoding_dim = 7\n variable_collection = neural_linucb_agent.NeuralLinUCBVariableCollection(\n num_actions=num_actions, encoding_dim=encoding_dim)\n self.evaluate(tf.compat.v1.global_variables_initializer())\n self.evaluate(variable_collection.num_samples_list)\n checkpoint = tf.train.Checkpoint(variable_collection=variable_collection)\n checkpoint_dir = self.get_temp_dir()\n checkpoint_prefix = os.path.join(checkpoint_dir, 'checkpoint')\n checkpoint.save(file_prefix=checkpoint_prefix)\n\n variable_collection.actions_from_reward_layer.assign(False)\n\n latest_checkpoint = tf.train.latest_checkpoint(checkpoint_dir)\n checkpoint_load_status = checkpoint.restore(latest_checkpoint)\n self.evaluate(checkpoint_load_status.initialize_or_restore())\n self.assertEqual(\n self.evaluate(variable_collection.actions_from_reward_layer), True)\n\n\nif __name__ == '__main__':\n tf.test.main()\n",
"# coding=utf-8\n# Copyright 2018 The TF-Agents Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nr\"\"\"Train and Eval SAC.\n\nTo run:\n\n```bash\ntensorboard --logdir $HOME/tmp/sac/gym/HalfCheetah-v2/ --port 2223 &\n\npython tf_agents/agents/sac/examples/v2/train_eval.py \\\n --root_dir=$HOME/tmp/sac/gym/HalfCheetah-v2/ \\\n --alsologtostderr\n```\n\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport os\nimport time\n\nfrom absl import app\nfrom absl import flags\nfrom absl import logging\n\nimport gin\nimport tensorflow as tf\n\nfrom tf_agents.agents.ddpg import critic_network\nfrom tf_agents.agents.sac import sac_agent\nfrom tf_agents.drivers import dynamic_step_driver\nfrom tf_agents.environments import parallel_py_environment\nfrom tf_agents.environments import suite_mujoco\nfrom tf_agents.environments import tf_py_environment\nfrom tf_agents.eval import metric_utils\nfrom tf_agents.metrics import tf_metrics\nfrom tf_agents.networks import actor_distribution_network\nfrom tf_agents.networks import normal_projection_network\nfrom tf_agents.policies import greedy_policy\nfrom tf_agents.policies import random_tf_policy\nfrom tf_agents.replay_buffers import tf_uniform_replay_buffer\nfrom tf_agents.utils import common\n\nflags.DEFINE_string('root_dir', os.getenv('TEST_UNDECLARED_OUTPUTS_DIR'),\n 'Root directory for writing logs/summaries/checkpoints.')\nflags.DEFINE_multi_string('gin_file', None, 'Path to the trainer config files.')\nflags.DEFINE_multi_string('gin_param', None, 'Gin binding to pass through.')\n\nFLAGS = flags.FLAGS\n\n\[email protected]\ndef normal_projection_net(action_spec,\n init_action_stddev=0.35,\n init_means_output_factor=0.1):\n del init_action_stddev\n return normal_projection_network.NormalProjectionNetwork(\n action_spec,\n mean_transform=None,\n state_dependent_std=True,\n init_means_output_factor=init_means_output_factor,\n std_transform=sac_agent.std_clip_transform,\n scale_distribution=True)\n\n\n_DEFAULT_REWARD_SCALE = 0\n\n\[email protected]\ndef train_eval(\n root_dir,\n env_name='HalfCheetah-v2',\n eval_env_name=None,\n env_load_fn=suite_mujoco.load,\n num_iterations=1000000,\n actor_fc_layers=(256, 256),\n critic_obs_fc_layers=None,\n critic_action_fc_layers=None,\n critic_joint_fc_layers=(256, 256),\n num_parallel_environments=1,\n # Params for collect\n initial_collect_steps=10000,\n collect_steps_per_iteration=1,\n replay_buffer_capacity=1000000,\n # Params for target update\n target_update_tau=0.005,\n target_update_period=1,\n # Params for train\n train_steps_per_iteration=1,\n batch_size=256,\n actor_learning_rate=3e-4,\n critic_learning_rate=3e-4,\n alpha_learning_rate=3e-4,\n td_errors_loss_fn=tf.compat.v1.losses.mean_squared_error,\n gamma=0.99,\n reward_scale_factor=_DEFAULT_REWARD_SCALE,\n gradient_clipping=None,\n use_tf_functions=True,\n # Params for eval\n num_eval_episodes=30,\n eval_interval=10000,\n # Params for summaries and logging\n train_checkpoint_interval=10000,\n policy_checkpoint_interval=5000,\n rb_checkpoint_interval=50000,\n log_interval=1000,\n summary_interval=1000,\n summaries_flush_secs=10,\n debug_summaries=False,\n summarize_grads_and_vars=False,\n eval_metrics_callback=None):\n \"\"\"A simple train and eval for SAC on Mujoco.\n\n All hyperparameters come from the original SAC paper\n (https://arxiv.org/pdf/1801.01290.pdf).\n \"\"\"\n\n if reward_scale_factor == _DEFAULT_REWARD_SCALE:\n # Use value recommended by https://arxiv.org/abs/1801.01290\n if env_name.startswith('Humanoid'):\n reward_scale_factor = 20.0\n else:\n reward_scale_factor = 5.0\n\n root_dir = os.path.expanduser(root_dir)\n\n summary_writer = tf.compat.v2.summary.create_file_writer(\n root_dir, flush_millis=summaries_flush_secs * 1000)\n summary_writer.set_as_default()\n\n eval_metrics = [\n tf_metrics.AverageReturnMetric(buffer_size=num_eval_episodes),\n tf_metrics.AverageEpisodeLengthMetric(buffer_size=num_eval_episodes)\n ]\n\n global_step = tf.compat.v1.train.get_or_create_global_step()\n with tf.compat.v2.summary.record_if(\n lambda: tf.math.equal(global_step % summary_interval, 0)):\n # create training environment\n if num_parallel_environments == 1:\n py_env = env_load_fn(env_name)\n else:\n py_env = parallel_py_environment.ParallelPyEnvironment(\n [lambda: env_load_fn(env_name)] * num_parallel_environments)\n tf_env = tf_py_environment.TFPyEnvironment(py_env)\n # create evaluation environment\n eval_env_name = eval_env_name or env_name\n eval_py_env = env_load_fn(eval_env_name)\n eval_tf_env = tf_py_environment.TFPyEnvironment(eval_py_env)\n\n time_step_spec = tf_env.time_step_spec()\n observation_spec = time_step_spec.observation\n action_spec = tf_env.action_spec()\n\n actor_net = actor_distribution_network.ActorDistributionNetwork(\n observation_spec,\n action_spec,\n fc_layer_params=actor_fc_layers,\n continuous_projection_net=normal_projection_net)\n critic_net = critic_network.CriticNetwork(\n (observation_spec, action_spec),\n observation_fc_layer_params=critic_obs_fc_layers,\n action_fc_layer_params=critic_action_fc_layers,\n joint_fc_layer_params=critic_joint_fc_layers)\n\n tf_agent = sac_agent.SacAgent(\n time_step_spec,\n action_spec,\n actor_network=actor_net,\n critic_network=critic_net,\n actor_optimizer=tf.compat.v1.train.AdamOptimizer(\n learning_rate=actor_learning_rate),\n critic_optimizer=tf.compat.v1.train.AdamOptimizer(\n learning_rate=critic_learning_rate),\n alpha_optimizer=tf.compat.v1.train.AdamOptimizer(\n learning_rate=alpha_learning_rate),\n target_update_tau=target_update_tau,\n target_update_period=target_update_period,\n td_errors_loss_fn=td_errors_loss_fn,\n gamma=gamma,\n reward_scale_factor=reward_scale_factor,\n gradient_clipping=gradient_clipping,\n debug_summaries=debug_summaries,\n summarize_grads_and_vars=summarize_grads_and_vars,\n train_step_counter=global_step)\n tf_agent.initialize()\n\n # Make the replay buffer.\n replay_buffer = tf_uniform_replay_buffer.TFUniformReplayBuffer(\n data_spec=tf_agent.collect_data_spec,\n batch_size=num_parallel_environments,\n max_length=replay_buffer_capacity)\n replay_observer = [replay_buffer.add_batch]\n\n env_steps = tf_metrics.EnvironmentSteps(prefix='Train')\n average_return = tf_metrics.AverageReturnMetric(\n prefix='Train',\n buffer_size=num_eval_episodes,\n batch_size=tf_env.batch_size)\n train_metrics = [\n tf_metrics.NumberOfEpisodes(prefix='Train'),\n env_steps,\n average_return,\n tf_metrics.AverageEpisodeLengthMetric(\n prefix='Train',\n buffer_size=num_eval_episodes,\n batch_size=tf_env.batch_size),\n ]\n\n eval_policy = greedy_policy.GreedyPolicy(tf_agent.policy)\n initial_collect_policy = random_tf_policy.RandomTFPolicy(\n tf_env.time_step_spec(), tf_env.action_spec())\n collect_policy = tf_agent.collect_policy\n\n train_checkpointer = common.Checkpointer(\n ckpt_dir=os.path.join(root_dir, 'train'),\n agent=tf_agent,\n global_step=global_step,\n metrics=metric_utils.MetricsGroup(train_metrics, 'train_metrics'))\n policy_checkpointer = common.Checkpointer(\n ckpt_dir=os.path.join(root_dir, 'policy'),\n policy=eval_policy,\n global_step=global_step)\n rb_checkpointer = common.Checkpointer(\n ckpt_dir=os.path.join(root_dir, 'replay_buffer'),\n max_to_keep=1,\n replay_buffer=replay_buffer)\n\n train_checkpointer.initialize_or_restore()\n rb_checkpointer.initialize_or_restore()\n\n initial_collect_driver = dynamic_step_driver.DynamicStepDriver(\n tf_env,\n initial_collect_policy,\n observers=replay_observer + train_metrics,\n num_steps=initial_collect_steps)\n\n collect_driver = dynamic_step_driver.DynamicStepDriver(\n tf_env,\n collect_policy,\n observers=replay_observer + train_metrics,\n num_steps=collect_steps_per_iteration)\n\n if use_tf_functions:\n initial_collect_driver.run = common.function(initial_collect_driver.run)\n collect_driver.run = common.function(collect_driver.run)\n tf_agent.train = common.function(tf_agent.train)\n\n # Collect initial replay data.\n if env_steps.result() == 0 or replay_buffer.num_frames() == 0:\n logging.info(\n 'Initializing replay buffer by collecting experience for %d steps'\n 'with a random policy.', initial_collect_steps)\n initial_collect_driver.run()\n\n results = metric_utils.eager_compute(\n eval_metrics,\n eval_tf_env,\n eval_policy,\n num_episodes=num_eval_episodes,\n train_step=env_steps.result(),\n summary_writer=summary_writer,\n summary_prefix='Eval',\n )\n if eval_metrics_callback is not None:\n eval_metrics_callback(results, env_steps.result())\n metric_utils.log_metrics(eval_metrics)\n\n time_step = None\n policy_state = collect_policy.get_initial_state(tf_env.batch_size)\n\n time_acc = 0\n env_steps_before = env_steps.result().numpy()\n\n # Dataset generates trajectories with shape [Bx2x...]\n dataset = replay_buffer.as_dataset(\n num_parallel_calls=3, sample_batch_size=batch_size,\n num_steps=2).prefetch(3)\n iterator = iter(dataset)\n\n def train_step():\n experience, _ = next(iterator)\n return tf_agent.train(experience)\n\n if use_tf_functions:\n train_step = common.function(train_step)\n\n for _ in range(num_iterations):\n start_time = time.time()\n time_step, policy_state = collect_driver.run(\n time_step=time_step,\n policy_state=policy_state,\n )\n for _ in range(train_steps_per_iteration):\n train_step()\n time_acc += time.time() - start_time\n\n if global_step.numpy() % log_interval == 0:\n logging.info('env steps = %d, average return = %f', env_steps.result(),\n average_return.result())\n env_steps_per_sec = (env_steps.result().numpy() -\n env_steps_before) / time_acc\n logging.info('%.3f env steps/sec', env_steps_per_sec)\n tf.compat.v2.summary.scalar(\n name='env_steps_per_sec',\n data=env_steps_per_sec,\n step=env_steps.result())\n time_acc = 0\n env_steps_before = env_steps.result().numpy()\n\n for train_metric in train_metrics:\n train_metric.tf_summaries(train_step=env_steps.result())\n\n if global_step.numpy() % eval_interval == 0:\n results = metric_utils.eager_compute(\n eval_metrics,\n eval_tf_env,\n eval_policy,\n num_episodes=num_eval_episodes,\n train_step=env_steps.result(),\n summary_writer=summary_writer,\n summary_prefix='Eval',\n )\n if eval_metrics_callback is not None:\n eval_metrics_callback(results, env_steps.result())\n metric_utils.log_metrics(eval_metrics)\n\n global_step_val = global_step.numpy()\n if global_step_val % train_checkpoint_interval == 0:\n train_checkpointer.save(global_step=global_step_val)\n\n if global_step_val % policy_checkpoint_interval == 0:\n policy_checkpointer.save(global_step=global_step_val)\n\n if global_step_val % rb_checkpoint_interval == 0:\n rb_checkpointer.save(global_step=global_step_val)\n\n\ndef main(_):\n tf.compat.v1.enable_v2_behavior()\n logging.set_verbosity(logging.INFO)\n gin.parse_config_files_and_bindings(FLAGS.gin_file, FLAGS.gin_param)\n train_eval(FLAGS.root_dir)\n\n\nif __name__ == '__main__':\n flags.mark_flag_as_required('root_dir')\n app.run(main)\n",
"# coding=utf-8\n# Copyright 2018 The TF-Agents Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Tests for learning.reinforcement_learning.policies.categorical_q_policy.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport os\nfrom absl import flags\n\nimport numpy as np\nimport tensorflow as tf\n\nfrom tf_agents.networks import categorical_q_network\nfrom tf_agents.networks import network\nfrom tf_agents.policies import categorical_q_policy\nfrom tf_agents.policies import policy_saver\nfrom tf_agents.specs import tensor_spec\nfrom tf_agents.trajectories import time_step as ts\nfrom tf_agents.utils import test_utils\n\n\nclass DummyCategoricalNet(network.Network):\n\n def __init__(self,\n input_tensor_spec,\n num_atoms=51,\n num_actions=2,\n name=None):\n self._num_atoms = num_atoms\n self._num_actions = num_actions\n super(DummyCategoricalNet, self).__init__(\n input_tensor_spec=input_tensor_spec,\n state_spec=(),\n name=name)\n\n # In CategoricalDQN we are dealing with a distribution over Q-values, which\n # are represented as num_atoms bins, ranging from min_q_value to\n # max_q_value. In order to replicate the setup in the non-categorical\n # network (namely, [[2, 1], [1, 1]]), we use the following \"logits\":\n # [[0, 1, ..., num_atoms-1, num_atoms, 1, ..., 1],\n # [1, ......................................, 1]]\n # The important bit is that the first half of the first list (which\n # corresponds to the logits for the first action) place more weight on the\n # higher q_values than on the lower ones, thereby resulting in a higher\n # value for the first action.\n weights_initializer = np.array([\n np.concatenate((np.arange(num_atoms), np.ones(num_atoms))),\n np.concatenate((np.ones(num_atoms), np.ones(num_atoms)))])\n kernel_initializer = tf.compat.v1.initializers.constant(\n weights_initializer, verify_shape=True)\n bias_initializer = tf.compat.v1.initializers.ones()\n\n # Store custom layers that can be serialized through the Checkpointable API.\n self._dummy_layers = []\n self._dummy_layers.append(\n tf.keras.layers.Dense(\n num_actions * num_atoms,\n kernel_initializer=kernel_initializer,\n bias_initializer=bias_initializer))\n\n @property\n def num_atoms(self):\n return self._num_atoms\n\n def call(self, inputs, step_type=None, network_state=()):\n del step_type\n inputs = tf.cast(inputs, tf.float32)\n for layer in self._dummy_layers:\n inputs = layer(inputs)\n logits = tf.reshape(inputs, [-1, self._num_actions, self._num_atoms])\n return logits, network_state\n\n\nclass CategoricalQPolicyTest(test_utils.TestCase):\n\n def setUp(self):\n super(CategoricalQPolicyTest, self).setUp()\n self._obs_spec = tensor_spec.TensorSpec([2], tf.float32)\n self._time_step_spec = ts.time_step_spec(self._obs_spec)\n self._action_spec = tensor_spec.BoundedTensorSpec([1], tf.int32, 0, 1)\n self._min_q_value = -10\n self._max_q_value = 10\n self._q_network = DummyCategoricalNet(\n input_tensor_spec=self._obs_spec,\n num_atoms=3,\n num_actions=2)\n\n def testBuild(self):\n policy = categorical_q_policy.CategoricalQPolicy(\n self._time_step_spec, self._action_spec, self._q_network,\n self._min_q_value, self._max_q_value)\n\n self.assertEqual(policy.time_step_spec, self._time_step_spec)\n self.assertEqual(policy.action_spec, self._action_spec)\n\n # There should be two variables in our network for the fc_layer we specified\n # (one kernel and one bias).\n self.assertLen(policy.variables(), 2)\n\n def testMultipleActionsRaiseError(self):\n with self.assertRaisesRegexp(\n TypeError, '.*action_spec must be a BoundedTensorSpec.*'):\n # Replace the action_spec for this test.\n action_spec = [tensor_spec.BoundedTensorSpec([1], tf.int32, 0, 1)] * 2\n q_network = categorical_q_network.CategoricalQNetwork(\n input_tensor_spec=self._obs_spec,\n action_spec=action_spec,\n num_atoms=3,\n fc_layer_params=[4])\n categorical_q_policy.CategoricalQPolicy(\n self._time_step_spec, action_spec, q_network,\n self._min_q_value, self._max_q_value)\n\n def testAction(self):\n policy = categorical_q_policy.CategoricalQPolicy(\n self._time_step_spec, self._action_spec, self._q_network,\n self._min_q_value, self._max_q_value)\n\n observations = tf.constant([[1, 2], [3, 4]], dtype=tf.float32)\n time_step = ts.restart(observations)\n actions, _, _ = policy.action(time_step)\n self.assertEqual(actions.shape.as_list(), [2])\n self.assertEqual(actions.dtype, tf.int32)\n # Initialize all variables\n self.evaluate(tf.compat.v1.global_variables_initializer())\n actions = self.evaluate(actions)\n\n # actions should be a list of two elements; e.g., [0, 1]\n self.assertLen(actions, 2)\n\n for action in actions:\n self.assertGreaterEqual(action, self._action_spec.minimum)\n self.assertLessEqual(action, self._action_spec.maximum)\n\n def testSample(self):\n policy = categorical_q_policy.CategoricalQPolicy(\n self._time_step_spec, self._action_spec, self._q_network,\n self._min_q_value, self._max_q_value)\n\n observations = tf.constant([[1, 2], [3, 4]], dtype=tf.float32)\n time_step = ts.restart(observations)\n actions = policy.action(time_step).action\n self.assertEqual(actions.shape.as_list(), [2])\n self.assertEqual(actions.dtype, tf.int32)\n self.evaluate(tf.compat.v1.global_variables_initializer())\n actions = self.evaluate(actions)\n\n # actions should be a list of two elements; e.g., [0, 1]\n self.assertLen(actions, 2)\n\n for action in actions:\n self.assertGreaterEqual(action, self._action_spec.minimum)\n self.assertLessEqual(action, self._action_spec.maximum)\n\n def testUpdate(self):\n policy = categorical_q_policy.CategoricalQPolicy(\n self._time_step_spec, self._action_spec, self._q_network,\n self._min_q_value, self._max_q_value)\n\n new_policy = categorical_q_policy.CategoricalQPolicy(\n self._time_step_spec, self._action_spec, self._q_network,\n self._min_q_value, self._max_q_value)\n\n observations = tf.constant([[1, 2], [3, 4]], dtype=tf.float32)\n time_step = ts.restart(observations)\n\n # There should be two variables in our networks for the fc_layer we\n # specified (one kernel and one bias).\n self.assertLen(policy.variables(), 2)\n self.assertLen(new_policy.variables(), 2)\n\n actions, _, _ = policy.action(time_step)\n new_actions, _, _ = new_policy.action(time_step)\n\n self.assertEqual(actions.shape, new_actions.shape)\n self.assertEqual(actions.dtype, new_actions.dtype)\n\n self.evaluate(tf.compat.v1.global_variables_initializer())\n actions = self.evaluate(actions)\n\n # actions should be a list of two elements; e.g., [0, 1]\n self.assertLen(actions, 2)\n\n for action in actions:\n self.assertGreaterEqual(action, self._action_spec.minimum)\n self.assertLessEqual(action, self._action_spec.maximum)\n\n self.assertEqual(self.evaluate(new_policy.update(policy)), None)\n new_actions = self.evaluate(new_actions)\n\n # new_actions should also be a list of two elements; e.g., [0, 1]\n self.assertLen(new_actions, 2)\n\n for action in new_actions:\n self.assertGreaterEqual(action, self._action_spec.minimum)\n self.assertLessEqual(action, self._action_spec.maximum)\n\n def testMasking(self):\n batch_size = 1000\n num_state_dims = 5\n num_actions = 8\n observations = tf.random.uniform([batch_size, num_state_dims])\n time_step = ts.restart(observations, batch_size=batch_size)\n input_tensor_spec = tensor_spec.TensorSpec([num_state_dims], tf.float32)\n action_spec = tensor_spec.BoundedTensorSpec(\n [1], tf.int32, 0, num_actions - 1)\n\n # We create a fixed mask here for testing purposes. Normally the mask would\n # be part of the observation.\n mask = [0, 1, 0, 1, 0, 0, 1, 0]\n np_mask = np.array(mask)\n tf_mask = tf.constant([mask for _ in range(batch_size)])\n q_network = categorical_q_network.CategoricalQNetwork(\n input_tensor_spec=input_tensor_spec,\n action_spec=action_spec,\n num_atoms=3,\n fc_layer_params=[4])\n policy = categorical_q_policy.CategoricalQPolicy(\n self._time_step_spec, action_spec, q_network,\n self._min_q_value, self._max_q_value,\n observation_and_action_constraint_splitter=(\n lambda observation: (observation, tf_mask)))\n\n self.evaluate(tf.compat.v1.global_variables_initializer())\n\n # Sample from the policy 1000 times, and ensure that actions considered\n # invalid according to the mask are never chosen.\n action_step = policy.action(time_step)\n action = self.evaluate(action_step.action)\n self.assertEqual(action.shape, (batch_size,))\n self.assertAllEqual(np_mask[action], np.ones([batch_size]))\n\n def testSaver(self):\n policy = categorical_q_policy.CategoricalQPolicy(\n self._time_step_spec, self._action_spec, self._q_network,\n self._min_q_value, self._max_q_value)\n\n saver = policy_saver.PolicySaver(policy)\n\n self.evaluate(tf.compat.v1.global_variables_initializer())\n self.evaluate(tf.compat.v1.local_variables_initializer())\n\n save_path = os.path.join(flags.FLAGS.test_tmpdir,\n 'saved_categorical_q_policy')\n saver.save(save_path)\n\n\nif __name__ == '__main__':\n tf.test.main()\n"
] | [
[
"tensorflow.convert_to_tensor",
"tensorflow.zeros",
"tensorflow.cast",
"numpy.random.randint",
"tensorflow.compat.v1.train.AdamOptimizer",
"tensorflow.test.main",
"tensorflow.squeeze",
"numpy.zeros",
"tensorflow.matmul",
"tensorflow.executing_eagerly",
"tensorflow.shape",
"tensorflow.train.Checkpoint",
"tensorflow.constant",
"tensorflow.train.latest_checkpoint",
"tensorflow.eye",
"tensorflow.compat.v1.global_variables_initializer",
"numpy.ones",
"tensorflow.compat.v1.enable_resource_variables",
"numpy.random.uniform"
],
[
"tensorflow.compat.v1.enable_v2_behavior",
"tensorflow.compat.v1.train.AdamOptimizer",
"tensorflow.compat.v2.summary.create_file_writer",
"tensorflow.compat.v1.train.get_or_create_global_step",
"tensorflow.math.equal"
],
[
"tensorflow.constant",
"tensorflow.compat.v1.local_variables_initializer",
"tensorflow.keras.layers.Dense",
"numpy.arange",
"tensorflow.reshape",
"tensorflow.cast",
"tensorflow.test.main",
"tensorflow.random.uniform",
"tensorflow.compat.v1.global_variables_initializer",
"numpy.ones",
"tensorflow.compat.v1.initializers.constant",
"numpy.array",
"tensorflow.compat.v1.initializers.ones"
]
] |
marcelkotze007/mk007---ML-Python-library | [
"307e51762fc821588206440daa2c18a6128f4aec",
"307e51762fc821588206440daa2c18a6128f4aec",
"307e51762fc821588206440daa2c18a6128f4aec"
] | [
"util.py",
"Basics Tools/Exercises/Exercise9.py",
"Neural Network/Softmax.py"
] | [
"# https://deeplearningcourses.com/c/data-science-supervised-machine-learning-in-python\n# https://www.udemy.com/data-science-supervised-machine-learning-in-python\nfrom __future__ import print_function, division\nfrom builtins import range, input\n# Note: you may need to update your version of future\n# sudo pip install -U future\n\n\nimport numpy as np\nimport pandas as pd\n\ndef get_data(limit=None):\n print(\"Reading in and transforming data...\")\n df = pd.read_csv('train.csv')\n data = df.values\n np.random.shuffle(data)\n X = data[:, 1:]\n print(X[4000])\n X = data[:, 1:] / 255.0 # data is from 0..255\n print(X[4000])\n Y = data[:, 0]\n if limit is not None:\n X, Y = X[:limit], Y[:limit]\n return X, Y\n\ndef get_xor():\n X = np.zeros((200, 2))\n X[:50] = np.random.random((50, 2)) / 2 + 0.5 # (0.5-1, 0.5-1)\n X[50:100] = np.random.random((50, 2)) / 2 # (0-0.5, 0-0.5)\n X[100:150] = np.random.random((50, 2)) / 2 + np.array([[0, 0.5]]) # (0-0.5, 0.5-1)\n X[150:] = np.random.random((50, 2)) / 2 + np.array([[0.5, 0]]) # (0.5-1, 0-0.5)\n Y = np.array([0]*100 + [1]*100)\n return X, Y\n\ndef get_donut():\n N = 200\n R_inner = 5\n R_outer = 10\n\n # distance from origin is radius + random normal\n # angle theta is uniformly distributed between (0, 2pi)\n R1 = np.random.randn(N//2) + R_inner\n theta = 2*np.pi*np.random.random(N//2)\n X_inner = np.concatenate([[R1 * np.cos(theta)], [R1 * np.sin(theta)]]).T\n\n R2 = np.random.randn(N//2) + R_outer\n theta = 2*np.pi*np.random.random(N//2)\n X_outer = np.concatenate([[R2 * np.cos(theta)], [R2 * np.sin(theta)]]).T\n\n X = np.concatenate([ X_inner, X_outer ])\n Y = np.array([0]*(N//2) + [1]*(N//2))\n return X, Y\n\nget_data()",
"# https://deeplearningcourses.com/c/deep-learning-prerequisites-the-numpy-stack-in-python\n# https://www.udemy.com/deep-learning-prerequisites-the-numpy-stack-in-python\n\nfrom __future__ import print_function, division\nfrom future.utils import iteritems\nfrom builtins import range, input\n# Note: you may need to update your version of future\n# sudo pip install -U future\n\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\n\n\nfrom Exercise8 import get_spiral\n\n# get the data\nX, Y = get_spiral()\n\n# combine the data into one array\n# data to be concatenated must have same # of dimensions\n# e.g. N x D and N x 1\n# not N x D and N\ndata = np.concatenate((X, np.expand_dims(Y, 1)), axis=1)\n\ndf = pd.DataFrame(data)\ndf.columns = ['x1', 'x2', 'y']\ndf.to_csv('mydata.csv', index=False)",
"import numpy as np \n\ndef softmax_single_array(N = 5):\n \"\"\"\n Returns the probability of each of the output nodes\n \"\"\"\n #Final output of a neural network\n a = np.random.randn(5)\n # These values represent the output of the output neurons\n #start by exponetiating the values:\n exp_a = np.exp(a)\n prob_answer_sin = exp_a / exp_a.sum()\n\n return prob_answer_sin\n\ndef softmax_matrix(N = 100, D = 5):\n \"\"\"\n Returns the probability of each of the output nodes for a matrix of output \n \"\"\"\n #100 samples in 5 classes\n a = np.random.randn(N, D)\n \n exp_a = np.exp(a)\n \n #Use this to sum along the rows, the keepdims is so (100,5) (100,) can be added\n prob_answer_mat = exp_a / exp_a.sum(axis = 1, keepdims = True)\n\n #shows that each row sums to 1\n print(prob_answer_mat.sum(axis = 1))\n"
] | [
[
"pandas.read_csv",
"numpy.random.random",
"numpy.cos",
"numpy.random.shuffle",
"numpy.sin",
"numpy.concatenate",
"numpy.random.randn",
"numpy.array",
"numpy.zeros"
],
[
"numpy.expand_dims",
"pandas.DataFrame"
],
[
"numpy.exp",
"numpy.random.randn"
]
] |
bem4solvation/pbj | [
"4fa9c111596359192539787ae241a79d4316b15b"
] | [
"pbj/electrostatics/pb_formulation/formulations/direct_external.py"
] | [
"import numpy as np\nimport bempp.api\nimport os\nfrom bempp.api.operators.boundary import sparse, laplace, modified_helmholtz\nfrom .common import calculate_potential_one_surface\n\ninvert_potential = True\n\n\ndef verify_parameters(self):\n return True\n\n\ndef lhs(self):\n dirichl_space = self.dirichl_space\n neumann_space = self.neumann_space\n ep_in = self.ep_in\n ep_out = self.ep_ex\n kappa = self.kappa\n operator_assembler = self.operator_assembler\n\n identity = sparse.identity(dirichl_space, dirichl_space, dirichl_space)\n slp_in = laplace.single_layer(\n neumann_space, dirichl_space, dirichl_space, assembler=operator_assembler\n )\n dlp_in = laplace.double_layer(\n dirichl_space, dirichl_space, dirichl_space, assembler=operator_assembler\n )\n slp_out = modified_helmholtz.single_layer(\n neumann_space, dirichl_space, dirichl_space, kappa, assembler=operator_assembler\n )\n dlp_out = modified_helmholtz.double_layer(\n dirichl_space, dirichl_space, dirichl_space, kappa, assembler=operator_assembler\n )\n\n A = bempp.api.BlockedOperator(2, 2)\n\n A[0, 0] = 0.5 * identity - dlp_out\n A[0, 1] = slp_out\n A[1, 0] = 0.5 * identity + dlp_in\n A[1, 1] = -(ep_out / ep_in) * slp_in\n\n self.matrices[\"A\"] = A\n\n\ndef rhs(self):\n dirichl_space = self.dirichl_space\n neumann_space = self.neumann_space\n q = self.q\n x_q = self.x_q\n ep_in = self.ep_in\n rhs_constructor = self.rhs_constructor\n\n if rhs_constructor == \"fmm\":\n\n @bempp.api.callable(vectorized=True)\n def fmm_green_func(x, n, domain_index, result):\n import exafmm.laplace as _laplace\n\n sources = _laplace.init_sources(x_q, q)\n targets = _laplace.init_targets(x.T)\n fmm = _laplace.LaplaceFmm(p=10, ncrit=500, filename=\".rhs.tmp\")\n tree = _laplace.setup(sources, targets, fmm)\n values = _laplace.evaluate(tree, fmm)\n os.remove(\".rhs.tmp\")\n result[:] = values[:, 0] / ep_in\n\n @bempp.api.real_callable\n def zero(x, n, domain_index, result):\n result[0] = 0\n\n rhs_1 = bempp.api.GridFunction(neumann_space, fun=zero)\n rhs_2 = bempp.api.GridFunction(dirichl_space, fun=fmm_green_func)\n\n else:\n\n @bempp.api.real_callable\n def charges_fun(x, n, domain_index, result):\n nrm = np.sqrt(\n (x[0] - x_q[:, 0]) ** 2\n + (x[1] - x_q[:, 1]) ** 2\n + (x[2] - x_q[:, 2]) ** 2\n )\n aux = np.sum(q / nrm)\n result[0] = aux / (4 * np.pi * ep_in)\n\n @bempp.api.real_callable\n def zero(x, n, domain_index, result):\n result[0] = 0\n\n rhs_1 = bempp.api.GridFunction(neumann_space, fun=zero)\n rhs_2 = bempp.api.GridFunction(dirichl_space, fun=charges_fun)\n\n self.rhs[\"rhs_1\"], self.rhs[\"rhs_2\"] = rhs_1, rhs_2\n\ndef calculate_potential(self, rerun_all):\n calculate_potential_one_surface(self, rerun_all)\n\n"
] | [
[
"numpy.sum",
"numpy.sqrt"
]
] |
luxinglong/ViZDoom-SL | [
"fbc54c401b1ca320e9e804f2c97fdedc5d0c534d"
] | [
"doom/test.py"
] | [
"import sys\r\nimport argparse\r\nimport numpy as np\r\n\r\nfrom actions import ActionBuilder\r\nfrom game import Game\r\n\r\n# use_continuous speed action_combinations crouch freelook\r\n\r\nFALSY_STRINGS = {'off', 'false', '0'}\r\nTRUTHY_STRINGS = {'on', 'true', '1'}\r\n\r\ndef bool_flag(string):\r\n \"\"\"\r\n Parse boolean arguments from the command line.\r\n \"\"\"\r\n if string.lower() in FALSY_STRINGS:\r\n return False\r\n elif string.lower() in TRUTHY_STRINGS:\r\n return True\r\n else:\r\n raise argparse.ArgumentTypeError(\"invalid value for a boolean flag. \"\r\n \"use 0 or 1\")\r\n\r\ndef main():\r\n parser = argparse.ArgumentParser(description='LUBAN runner')\r\n parser.add_argument(\"--use_continuous\", type=bool_flag, default=False,\r\n help=\"weather use continuous actions\")\r\n # Available actions\r\n # combination of actions the agent is allowed to do.\r\n # this is for non-continuous mode only, and is ignored in continuous mode\r\n parser.add_argument(\"--action_combinations\", type=str,\r\n default='move_fb+turn_lr+move_lr+attack',\r\n help=\"Allowed combinations of actions\")\r\n # freelook: allow the agent to look up and down\r\n parser.add_argument(\"--freelook\", type=bool_flag, default=False,\r\n help=\"Enable freelook (look up / look down)\")\r\n parser.add_argument(\"--human_player\", type=bool_flag, default=False,\r\n help=\"DoomGame mode\")\r\n\r\n # speed and crouch buttons: in non-continuous mode, the network can not\r\n # have control on these buttons, and they must be set to always 'on' or\r\n # 'off'. In continuous mode, the network can manually control crouch and\r\n # speed.\r\n parser.add_argument(\"--speed\", type=str, default='off',\r\n help=\"Crouch: on / off / manual\")\r\n parser.add_argument(\"--crouch\", type=str, default='off',\r\n help=\"Crouch: on / off / manual\")\r\n\r\n # for process_buffers\r\n parser.add_argument(\"--height\", type=int, default=60,\r\n help=\"Image height\")\r\n parser.add_argument(\"--width\", type=int, default=108,\r\n help=\"Image width\")\r\n parser.add_argument(\"--gray\", type=bool_flag, default=False,\r\n help=\"Use grayscale\")\r\n parser.add_argument(\"--use_screen_buffer\", type=bool_flag, default=True,\r\n help=\"Use the screen buffer\")\r\n parser.add_argument(\"--use_depth_buffer\", type=bool_flag, default=False,\r\n help=\"Use the depth buffer\")\r\n parser.add_argument(\"--labels_mapping\", type=str, default='',\r\n help=\"Map labels to different feature maps\")\r\n parser.add_argument(\"--dump_freq\", type=int, default=0,\r\n help=\"Dump every X iterations (0 to disable)\")\r\n # for observe_state\r\n parser.add_argument(\"--hist_size\", type=int, default=4,\r\n help=\"History size\")\r\n\r\n params, unparsed = parser.parse_known_args(sys.argv)\r\n print(sys.argv)\r\n params.game_variables = [('health', 101), ('sel_ammo', 301)]\r\n print(params)\r\n\r\n action_builder = ActionBuilder(params)\r\n print(action_builder.n_actions)\r\n print(action_builder.available_actions)\r\n\r\n game = Game(\r\n scenario='full_deathmatch',\r\n action_builder=action_builder,\r\n score_variable='USER2',\r\n freedoom=True,\r\n screen_resolution='RES_800X450',\r\n use_screen_buffer=True,\r\n use_depth_buffer=True,\r\n labels_mapping=\"\",\r\n game_features=\"target,enemy\",\r\n mode=('SPECTATOR' if params.human_player else 'PLAYER'),\r\n render_hud=True,\r\n render_crosshair=True,\r\n render_weapon=True,\r\n freelook=params.freelook,\r\n visible=0,\r\n n_bots=10,\r\n use_scripted_marines=True\r\n )\r\n\r\n game.start(map_id = 2)\r\n\r\n game.init_bots_health(100)\r\n\r\n episodes = 100000\r\n\r\n last_states = []\r\n\r\n for _ in range(episodes):\r\n if game.is_player_dead():\r\n game.respawn_player()\r\n game.observe_state(params, last_states)\r\n action = np.random.randint(0, 29)\r\n game.make_action(action, frame_skip=1, sleep=None)\r\n game.close()\r\n\t\r\nif __name__ == '__main__':\r\n main()\r\n"
] | [
[
"numpy.random.randint"
]
] |
Mohamed-Abdulaty/UDACITY-CarND-P2-Advanced-Lane-Lines | [
"e5d5fdff45c523a4f17635897b9de4b2e50d273d"
] | [
"src/Calibration.py"
] | [
"import os\nimport cv2\nimport numpy as np\n\n\nclass Calibration:\n def __init__(\n self,\n source_images_directory,\n destination_image_sub_directory,\n chessboard_shape,\n logger\n ):\n self.source_images_directory = source_images_directory\n self.destination_image_sub_directory= destination_image_sub_directory\n self.cornered_output_images = str(self.destination_image_sub_directory+'/Cornered')\n self.undistorted_output_images = str(self.destination_image_sub_directory+'/Undistorted')\n self.chessboard_x, self.chessboard_y= chessboard_shape\n self.logger = logger\n self.name_list_of_boards = os.listdir(self.source_images_directory)\n self.number_of_boards = len(self.name_list_of_boards)\n self.image_size = None\n self.object_points = []\n self.image_points = []\n self.camera_matrix, self.distortion_coefficient = \\\n self.__calculate_calibration_parameters()\n\n \n\n def get_calibration_parameters(self):\n return self.camera_matrix, self.distortion_coefficient\n\n def __calculate_calibration_parameters(self):\n object_points = np.zeros((self.chessboard_x*self.chessboard_y, 3), np.float32)\n object_points[:, :2] = np.mgrid[0:self.chessboard_x, 0:self.chessboard_y].T.reshape(-1, 2)\n \n for img_name in self.name_list_of_boards:\n # Read the image\n image_path = '{}/{}'.format(str(self.source_images_directory), str(img_name))\n image_obj = cv2.imread(image_path)\n # Gray it\n gray_image = cv2.cvtColor(image_obj, cv2.COLOR_BGR2GRAY)\n self.image_size = gray_image.shape[::-1]\n\n # Find its corners\n ret, corners = cv2.findChessboardCorners(gray_image, (self.chessboard_x, self.chessboard_y), None)\n\n if ret:\n self.object_points.append(object_points)\n self.image_points.append(corners)\n\n # save image with corners\n image = cv2.drawChessboardCorners(\\\n image_obj, \\\n (self.chessboard_y, self.chessboard_x), \\\n corners, \\\n ret)\n # Saved image with corners\n self.logger.save_image(str(self.cornered_output_images), img_name, image)\n else:\n self.logger.log_error('Can not find all needed corners in {}'.format(str(img_name)))\n \n # Calibrate the camera\n calibration_parameters = \\\n cv2.calibrateCamera(self.object_points, \\\n self.image_points, \\\n self.image_size, \\\n None, None)\n\n # save corrected images\n self.__save_undistorted_images(calibration_parameters[1], calibration_parameters[2])\n\n # return onlt camera_matrix, and dis_coef\n return calibration_parameters[1], calibration_parameters[2]\n \n\n def __save_undistorted_images(self, camera_matrix, distortion_coef):\n cornered_images_list = os.listdir(str('./results/'+self.cornered_output_images))\n \n for cornered_img in cornered_images_list:\n image_path = '{}/{}'.format(str('./results/'+self.cornered_output_images), str(cornered_img))\n image_obj = cv2.imread(image_path)\n\n self.logger.save_image( \\\n str(self.undistorted_output_images), \\\n cornered_img, \n cv2.undistort(image_obj, camera_matrix, distortion_coef, None, camera_matrix))"
] | [
[
"numpy.zeros"
]
] |
barslmn/dove | [
"df6344286633422219c0e93e15d4327f9d082041"
] | [
"dove/utils/bed.py"
] | [
"# -*- coding: utf-8 -*-\n__author__ = 'bars'\n\nfrom io import StringIO\nimport pandas as pd\nfrom collections import defaultdict\n\n\nclass Bed:\n \"\"\"description\"\"\"\n\n def __init__(self, bed_file, mode='file'):\n self.bed_file = bed_file\n self.mode = mode\n\n def get_header(self):\n lines_to_skip = 0\n header = defaultdict(list)\n if self.mode == 'str':\n for line in self.bed_file.split('\\n'):\n if line.startswith('track'):\n header['track'].append(line)\n lines_to_skip += 1\n elif line.startswith('browser'):\n header['browser'].append(line)\n lines_to_skip += 1\n else:\n break\n else:\n with open(self.bed_file) as f:\n lines = f.read().splitlines()\n for line in lines:\n if line.startswith('track'):\n header['track'].append(line)\n lines_to_skip += 1\n elif line.startswith('browser'):\n header['browser'].append(line)\n lines_to_skip += 1\n else:\n break\n return lines_to_skip, header\n\n def from_file(self):\n lines_to_skip, header = self.get_header()\n df_bed = pd.read_csv(\n self.bed_file,\n sep='\\t',\n usecols=[0, 1, 2],\n names=['CHR', 'START', 'END'],\n dtype={'START': int, 'END': int},\n skiprows=lines_to_skip\n )\n return df_bed\n\n def from_string(self):\n lines_to_skip, header = self.get_header()\n df_bed = pd.read_csv(\n StringIO(self.bed_file),\n sep='\\t',\n usecols=[0, 1, 2],\n names=['CHR', 'START', 'END'],\n dtype={'START': int, 'END': int},\n skiprows=lines_to_skip\n )\n return df_bed\n"
] | [
[
"pandas.read_csv"
]
] |
junzhezhang/cmr | [
"f0b2ded813535493f124852ce64b26efa761a35c"
] | [
"nnutils/dibr_kaolin.py"
] | [
"from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\n\nimport numpy as np\nimport scipy.misc\nimport tqdm\nimport cv2\n\nimport torch\n\nfrom nnutils import geom_utils\n\n# from kaolin.graphics.dib_renderer.rasterizer import linear_rasterizer\n# from kaolin.graphics.dib_renderer.utils import datanormalize\n# from kaolin.graphics.dib_renderer.renderer.phongrender import PhongRender\nfrom kaolin.graphics.dib_renderer.renderer.texrender import TexRender\nfrom kaolin.graphics.dib_renderer.utils.perspective import lookatnp, perspectiveprojectionnp\n\nfrom kaolin.graphics.dib_renderer.utils.mesh import loadobj, face2pfmtx, loadobjtex, savemesh\n\n\ndef quaternion_to_matrix(quaternions):\n \"\"\"\n Convert rotations given as quaternions to rotation matrices.\n Args:\n quaternions: quaternions with real part first,\n as tensor of shape (..., 4).\n Returns:\n Rotation matrices as tensor of shape (..., 3, 3).\n \"\"\"\n r, i, j, k = torch.unbind(quaternions, -1)\n two_s = 2.0 / (quaternions * quaternions).sum(-1)\n\n o = torch.stack(\n (\n 1 - two_s * (j * j + k * k),\n two_s * (i * j - k * r),\n two_s * (i * k + j * r),\n two_s * (i * j + k * r),\n 1 - two_s * (i * i + k * k),\n two_s * (j * k - i * r),\n two_s * (i * k - j * r),\n two_s * (j * k + i * r),\n 1 - two_s * (i * i + j * j),\n ),\n -1,\n )\n return o.reshape(quaternions.shape[:-1] + (3, 3))\n\nclass NeuralRenderer(torch.nn.Module):\n \"\"\"\n replace NeuralRenderer from nmr.py with the kaolin's\n \"\"\"\n # 512 --> 256 TODO\n def __init__(self, img_size=256,uv_sampler=None):\n self.img_size = img_size\n super(NeuralRenderer, self).__init__()\n self.renderer = TexRender(height=img_size,width=img_size)\n # self.renderer = NeuralMeshRenderer(image_size=img_size, camera_mode='look_at',perspective=False,viewing_angle=30,light_intensity_ambient=0.8)\n self.offset_z = 5.\n self.proj_fn = geom_utils.orthographic_proj_withz\n if uv_sampler is not None:\n self.uv_sampler = uv_sampler.clone()\n else:\n print('no uv sampler')\n print('DIB-R...')\n \n def ambient_light_only(self):\n # Make light only ambient.\n # self.renderer.light_intensity_ambient = 1\n # self.renderer.light_intensity_directional = 0\n print(\"TODO: ambient_light_only\")\n pass\n \n def set_bgcolor(self, color):\n # self.renderer.background_color = color\n print(\"TODO: set_bgcolor\")\n pass\n\n def project_points(self, verts, cams):\n proj = self.proj_fn(verts, cams)\n return proj[:, :, :2]\n \n def forward(self, vertices, faces, cams, textures=None):\n ### TODO save mesh\n if textures is not None:\n v_np = vertices[0].detach().cpu().numpy()\n f_np = faces[0].detach().cpu().numpy()\n file_name = 'vis/bird.obj'\n try:\n savemesh(v_np, f_np, file_name)\n except:\n import pdb; pdb.set_trace()\n # ours = False\n ours = True\n if ours:\n translation = cams[:,:3]\n quant = cams[:,-4:]\n tfcamviewmtx_bx3x3 = quaternion_to_matrix(quant)\n tfcamshift_bx3 = - translation\n\n # camfovy = 45 / 180.0 * np.pi\n camfovy = 90 / 180.0 * np.pi\n camprojmtx = perspectiveprojectionnp(camfovy, 1.0 * 1.0 / 1.0)\n tfcamproj_3x1 = torch.from_numpy(camprojmtx).cuda()\n\n tfcameras = [tfcamviewmtx_bx3x3,\n tfcamshift_bx3,\n tfcamproj_3x1]\n else:\n tfcameras = self.get_sample_cams(bs=vertices.shape[0])\n # import pdb; pdb.set_trace()\n print('1:',tfcameras[0].shape)\n print('2:',tfcameras[1].shape)\n print('3:',tfcameras[2].shape)\n \n \n if textures is None:\n tex_flag = False\n # shape = [vertices.shape[0], 1280, 6,6,6,3]\n # textures = torch.ones(vertices.shape[0], 1280, 6,6,6,3).cuda()*256\n textures = torch.ones(vertices.shape[0],3,self.img_size,self.img_size).cuda()\n else:\n tex_flag = True\n \n # # TODO try with convmesh output\n imfile = '/mnt/lustre/zhangjunzhe/tm/convmesh/output/pretrained_cub_512x512_class/mesh_0.png'\n # textures_np = cv2.imread(imfile)[:, :, ::-1].astype(np.float32) / 255.0\n textures_np = cv2.imread(imfile)[:, :, ::-1].astype(np.float32) \n dim = (self.img_size, self.img_size)\n resized = cv2.resize(textures_np, dim, interpolation = cv2.INTER_AREA)\n textures = torch.from_numpy(resized).cuda().unsqueeze(0)\n textures = textures.permute([0, 3, 1, 2])\n # print('tex shape:', textures.shape)\n # # import pdb; pdb.set_trace()\n # textures = torch.ones(vertices.shape[0],3,self.img_size,self.img_size).cuda()\n\n # print(texture)\n # renderer.set_smooth(pfmtx) # TODO for phong renderer\n tfp_bxpx3 = vertices\n tff_fx3 = faces[0] # TODO to verify if fixed topology within a batch\n # tff_fx3 = tff_fx3.type(int64)\n tff_fx3 = tff_fx3.type(torch.long)\n points = [tfp_bxpx3, tff_fx3]\n uvs = self.uv_sampler\n # TODO texture to clone?\n # TODOL ft_fx3\n # ft_fx3??? TODO\n #only keep rgb, no alpha and depth\n print('uv shape:',uvs.shape)\n imgs = self.renderer(points=points,\n cameras=tfcameras,\n uv_bxpx2 = uvs,\n texture_bx3xthxtw=textures,\n ft_fx3=None)[0]\n if tex_flag:\n for i, img in enumerate(imgs):\n img = img.detach().cpu().numpy()\n\n cv2.imwrite('./vis/lam'+str(i)+'.jpg',img*255)\n print('saved img')\n print('!!!imgs:',imgs.shape)\n \n imgs = imgs.permute([0,3,1,2])\n print('new shape:',imgs.shape)\n # print(' cam:',cams) \n return imgs\n\n def get_sample_cams(self,bs):\n ##########################################################\n # campos = np.array([0, 0, 1.5], dtype=np.float32) # where camera it is\n # campos = np.array([0, 0, 4], dtype=np.float32)\n # campos = np.array([0, 4, 0], dtype=np.float32)\n campos = np.array([4, 0, 0], dtype=np.float32)\n \n camcenter = np.array([0, 0, 0], dtype=np.float32) # where camra is looking at\n \n # camup = np.array([-1, 1, 0], dtype=np.float32) # y axis of camera view\n # camup = np.array([-1, 0, 1], dtype=np.float32)\n # camup = np.array([0, -1, 1], dtype=np.float32)\n # camup = np.array([0, 1, -1], dtype=np.float32)\n # camup = np.array([1, -1, 0], dtype=np.float32)\n # camup = np.array([1, 0, -1], dtype=np.float32)\n # camup = np.array([1, 1, 0], dtype=np.float32)\n # camup = np.array([-1, 0, -1], dtype=np.float32)\n camup = np.array([1, 0, 1], dtype=np.float32)\n \n camviewmtx, camviewshift = lookatnp(campos.reshape(3, 1), camcenter.reshape(3, 1), camup.reshape(3, 1))\n camviewshift = -np.dot(camviewmtx.transpose(), camviewshift)\n\n camfovy = 45 / 180.0 * np.pi\n camprojmtx = perspectiveprojectionnp(camfovy, 1.0 * 1.0 / 1.0)\n\n #####################################################\n # tfp_px3 = torch.from_numpy(p)\n # tfp_px3.requires_grad = True\n\n # tff_fx3 = torch.from_numpy(f)\n\n # tfuv_tx2 = torch.from_numpy(uv)\n # tfuv_tx2.requires_grad = True\n # tfft_fx3 = torch.from_numpy(ft)\n\n # tftex_thxtwx3 = torch.from_numpy(np.ascontiguousarray(texturenp))\n # tftex_thxtwx3.requires_grad = True\n\n tfcamviewmtx = torch.from_numpy(camviewmtx)\n tfcamshift = torch.from_numpy(camviewshift)\n tfcamproj = torch.from_numpy(camprojmtx)\n\n ##########################################################\n # tfp_1xpx3 = torch.unsqueeze(tfp_px3, dim=0)\n # tfuv_1xtx2 = torch.unsqueeze(tfuv_tx2, dim=0)\n # tftex_1xthxtwx3 = torch.unsqueeze(tftex_thxtwx3, dim=0)\n\n tfcamviewmtx_1x3x3 = torch.unsqueeze(tfcamviewmtx, dim=0)\n tfcamshift_1x3 = tfcamshift.view(-1, 3)\n tfcamproj_3x1 = tfcamproj\n\n # bs = 4\n # tfp_bxpx3 = tfp_1xpx3.repeat([bs, 1, 1])\n # tfuv_bxtx2 = tfuv_1xtx2.repeat([bs, 1, 1])\n # tftex_bxthxtwx3 = tftex_1xthxtwx3.repeat([bs, 1, 1, 1])\n\n tfcamviewmtx_bx3x3 = tfcamviewmtx_1x3x3.repeat([bs, 1, 1])\n tfcamshift_bx3 = tfcamshift_1x3.repeat([bs, 1]) \n\n tfcameras = [tfcamviewmtx_bx3x3.cuda(),\n tfcamshift_bx3.cuda(),\n tfcamproj_3x1.cuda()]\n return tfcameras\n\n # def compute_uvsampler(self,verts_t, faces_t, tex_size=2):\n # \"\"\"\n # NOTE: copied from utils/mesh.py\n # tex_size texture resolution per face default = 6\n # TODO : merge with backbone\n\n # For this mesh, pre-computes the UV coordinates for\n # F x T x T points.\n # Returns F x T x T x 2\n # \"\"\"\n # verts = verts_t[0].clone().detach().cpu().numpy()\n # faces = faces_t[0].clone().detach().cpu().numpy()\n # # import pdb; pdb.set_trace()\n # alpha = np.arange(tex_size, dtype=np.float) / (tex_size-1)\n # beta = np.arange(tex_size, dtype=np.float) / (tex_size-1)\n # import itertools\n # # Barycentric coordinate values\n # coords = np.stack([p for p in itertools.product(*[alpha, beta])])\n # vs = verts[faces]\n # # Compute alpha, beta (this is the same order as NMR)\n # v2 = vs[:, 2]\n # v0v2 = vs[:, 0] - vs[:, 2]\n # v1v2 = vs[:, 1] - vs[:, 2] \n # # F x 3 x T*2\n # samples = np.dstack([v0v2, v1v2]).dot(coords.T) + v2.reshape(-1, 3, 1) \n # # F x T*2 x 3 points on the sphere \n # samples = np.transpose(samples, (0, 2, 1))\n\n # # Now convert these to uv.\n # uv = get_spherical_coords(samples.reshape(-1, 3))\n # # uv = uv.reshape(-1, len(coords), 2)\n\n # uv = uv.reshape(-1, tex_size, tex_size, 2)\n # return uv"
] | [
[
"torch.ones",
"torch.from_numpy",
"torch.unsqueeze",
"torch.unbind",
"torch.stack",
"numpy.array"
]
] |
horizon-blue/beanmachine-1 | [
"b13e4e3e28ffb860947eb8046863b0cabb581222",
"b13e4e3e28ffb860947eb8046863b0cabb581222",
"b13e4e3e28ffb860947eb8046863b0cabb581222",
"b13e4e3e28ffb860947eb8046863b0cabb581222"
] | [
"src/beanmachine/ppl/inference/proposer/nmc/single_site_half_space_nmc_proposer.py",
"src/beanmachine/ppl/compiler/tests/fix_vectorized_models_test.py",
"src/beanmachine/graph/tests/graph_test.py",
"src/beanmachine/ppl/world/variable.py"
] | [
"# Copyright (c) Meta Platforms, Inc. and affiliates.\n#\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\n\nimport logging\nfrom typing import Tuple\n\nimport torch\nimport torch.distributions as dist\nfrom beanmachine.ppl.inference.proposer.single_site_ancestral_proposer import (\n SingleSiteAncestralProposer,\n)\nfrom beanmachine.ppl.legacy.inference.proposer.newtonian_monte_carlo_utils import (\n is_valid,\n hessian_of_log_prob,\n)\nfrom beanmachine.ppl.model.rv_identifier import RVIdentifier\nfrom beanmachine.ppl.utils import tensorops\nfrom beanmachine.ppl.world import World\n\n\nLOGGER = logging.getLogger(\"beanmachine\")\n\n\nclass SingleSiteHalfSpaceNMCProposer(SingleSiteAncestralProposer):\n \"\"\"\n Single-Site Half Space Newtonian Monte Carlo Proposers.\n See sec. 3.2 of [1]\n\n [1] Arora, Nim, et al. `Newtonian Monte Carlo: single-site MCMC meets second-order gradient methods`\n \"\"\"\n\n def __init__(self, node: RVIdentifier):\n super().__init__(node)\n self._proposal_distribution = None\n\n def compute_alpha_beta(\n self, world: World\n ) -> Tuple[bool, torch.Tensor, torch.Tensor]:\n \"\"\"\n Computes alpha and beta of the Gamma proposal given the node.\n alpha = 1 - hessian_diag * x^2\n beta = -1 * x * hessian_diag - first_grad\n \"\"\"\n node_val = world[self.node]\n first_gradient, hessian_diag = hessian_of_log_prob(\n world, self.node, node_val, tensorops.halfspace_gradients\n )\n if not is_valid(first_gradient) or not is_valid(hessian_diag):\n LOGGER.warning(\n \"Gradient or Hessian is invalid at node {n}.\\n\".format(n=str(self.node))\n )\n return False, torch.tensor(0.0), torch.tensor(0.0)\n\n node_val_reshaped = node_val.reshape(-1)\n predicted_alpha = (\n 1 - hessian_diag * (node_val_reshaped * node_val_reshaped)\n ).t()\n predicted_beta = -1 * node_val_reshaped * hessian_diag - first_gradient\n condition = (predicted_alpha > 0) & (predicted_beta > 0)\n predicted_alpha = torch.where(\n condition, predicted_alpha, torch.tensor(1.0).to(dtype=predicted_beta.dtype)\n )\n node_var = world.get_variable(self.node)\n mean = (\n node_var.distribution.mean.reshape(-1)\n if is_valid(node_var.distribution.mean)\n else torch.ones_like(predicted_beta)\n )\n predicted_beta = torch.where(condition, predicted_beta, mean)\n predicted_alpha = predicted_alpha.reshape(node_val.shape)\n predicted_beta = predicted_beta.reshape(node_val.shape)\n return True, predicted_alpha, predicted_beta\n\n def get_proposal_distribution(self, world: World) -> dist.Distribution:\n \"\"\"\n Returns the proposal distribution of the node.\n\n Args:\n world: the world in which we're proposing a new value for node.\n Returns:\n The proposal distribution.\n \"\"\"\n # if the number of variables in the world is 1 and proposal distribution\n # has already been computed, we can use the old proposal distribution\n # and skip re-computing the gradient, since there are no other variable\n # in the world that may change the gradient and the old one is still\n # correct.\n if self._proposal_distribution is not None and len(world.latent_nodes) == 1:\n return self._proposal_distribution\n\n is_valid, alpha, beta = self.compute_alpha_beta(world)\n if not is_valid:\n LOGGER.warning(\n \"Node {n} has invalid proposal solution. \".format(n=self.node)\n + \"Proposer falls back to SingleSiteAncestralProposer.\\n\"\n )\n return super().get_proposal_distribution(world)\n\n self._proposal_distribution = dist.Gamma(alpha, beta)\n return self._proposal_distribution\n",
"# Copyright (c) Meta Platforms, Inc. and affiliates.\n#\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\n\nimport unittest\n\nimport beanmachine.ppl as bm\nfrom beanmachine.ppl.inference import BMGInference\nfrom torch import tensor\nfrom torch.distributions import Bernoulli, Beta, Normal, Uniform, HalfCauchy, StudentT\n\n\[email protected]_variable\ndef beta(n):\n return Beta(2.0, 2.0)\n\n\[email protected]_variable\ndef flip_beta():\n return Bernoulli(tensor([beta(0), beta(1)]))\n\n\[email protected]_variable\ndef beta_2_2():\n return Beta(2.0, tensor([3.0, 4.0]))\n\n\[email protected]_variable\ndef flip_beta_2_2():\n return Bernoulli(beta_2_2())\n\n\[email protected]_variable\ndef uniform_2_2():\n return Uniform(0.0, tensor([1.0, 1.0]))\n\n\[email protected]_variable\ndef flip_uniform_2_2():\n return Bernoulli(uniform_2_2())\n\n\[email protected]_variable\ndef flip_logits():\n return Bernoulli(logits=tensor([beta(0), beta(1)]))\n\n\[email protected]_variable\ndef flip_const():\n return Bernoulli(tensor([0.25, 0.75]))\n\n\[email protected]_variable\ndef flip_const_4():\n return Bernoulli(tensor([0.25, 0.75, 0.5, 0.5]))\n\n\[email protected]_variable\ndef flip_const_2_3():\n return Bernoulli(tensor([[0.25, 0.75, 0.5], [0.125, 0.875, 0.625]]))\n\n\[email protected]_variable\ndef normal_2_3():\n mus = flip_const_2_3() # 2 x 3 tensor of 0 or 1\n sigmas = tensor([2.0, 3.0, 4.0])\n\n return Normal(mus, sigmas)\n\n\[email protected]_variable\ndef hc_3():\n return HalfCauchy(tensor([1.0, 2.0, 3.0]))\n\n\[email protected]_variable\ndef studentt_2_3():\n return StudentT(hc_3(), normal_2_3(), hc_3())\n\n\[email protected]\ndef operators():\n # Note that we do NOT devectorize the multiplication; it gets\n # turned into a MatrixScale.\n return ((beta_2_2() + tensor([[5.0, 6.0], [7.0, 8.0]])) * 10.0).exp()\n\n\nclass FixVectorizedModelsTest(unittest.TestCase):\n def test_fix_vectorized_models_1(self) -> None:\n self.maxDiff = None\n observations = {flip_beta(): tensor([0.0, 1.0])}\n queries = [flip_beta(), flip_const()]\n\n observed = BMGInference().to_dot(queries, observations, after_transform=False)\n\n # The model before the rewrite:\n\n expected = \"\"\"\ndigraph \"graph\" {\n N00[label=2.0];\n N01[label=Beta];\n N02[label=Sample];\n N03[label=Sample];\n N04[label=Tensor];\n N05[label=Bernoulli];\n N06[label=Sample];\n N07[label=\"Observation tensor([0., 1.])\"];\n N08[label=Query];\n N09[label=\"[0.25,0.75]\"];\n N10[label=Bernoulli];\n N11[label=Sample];\n N12[label=Query];\n N00 -> N01;\n N00 -> N01;\n N01 -> N02;\n N01 -> N03;\n N02 -> N04;\n N03 -> N04;\n N04 -> N05;\n N05 -> N06;\n N06 -> N07;\n N06 -> N08;\n N09 -> N10;\n N10 -> N11;\n N11 -> N12;\n}\n\"\"\"\n self.assertEqual(expected.strip(), observed.strip())\n\n # After:\n\n observed = BMGInference().to_dot(queries, observations, after_transform=True)\n expected = \"\"\"\ndigraph \"graph\" {\n N00[label=2.0];\n N01[label=Beta];\n N02[label=Sample];\n N03[label=Sample];\n N04[label=2];\n N05[label=1];\n N06[label=Bernoulli];\n N07[label=Sample];\n N08[label=Bernoulli];\n N09[label=Sample];\n N10[label=ToMatrix];\n N11[label=Query];\n N12[label=0.25];\n N13[label=Bernoulli];\n N14[label=Sample];\n N15[label=0.75];\n N16[label=Bernoulli];\n N17[label=Sample];\n N18[label=ToMatrix];\n N19[label=Query];\n N20[label=\"Observation False\"];\n N21[label=\"Observation True\"];\n N00 -> N01;\n N00 -> N01;\n N01 -> N02;\n N01 -> N03;\n N02 -> N06;\n N03 -> N08;\n N04 -> N10;\n N04 -> N18;\n N05 -> N10;\n N05 -> N18;\n N06 -> N07;\n N07 -> N10;\n N07 -> N20;\n N08 -> N09;\n N09 -> N10;\n N09 -> N21;\n N10 -> N11;\n N12 -> N13;\n N13 -> N14;\n N14 -> N18;\n N15 -> N16;\n N16 -> N17;\n N17 -> N18;\n N18 -> N19;\n}\n\"\"\"\n self.assertEqual(expected.strip(), observed.strip())\n\n def test_fix_vectorized_models_2(self) -> None:\n self.maxDiff = None\n observations = {flip_const_4(): tensor([0.0, 1.0, 0.0, 1.0])}\n queries = [flip_const_4()]\n\n observed = BMGInference().to_dot(queries, observations, after_transform=False)\n\n # The model before the rewrite:\n\n expected = \"\"\"\ndigraph \"graph\" {\n N0[label=\"[0.25,0.75,0.5,0.5]\"];\n N1[label=Bernoulli];\n N2[label=Sample];\n N3[label=\"Observation tensor([0., 1., 0., 1.])\"];\n N4[label=Query];\n N0 -> N1;\n N1 -> N2;\n N2 -> N3;\n N2 -> N4;\n}\n\"\"\"\n self.assertEqual(expected.strip(), observed.strip())\n\n # After:\n\n # Note that due to the order in which we do the rewriting we\n # end up with a not-deduplicated Bernoulli(0.5) node here, which\n # is slightly unfortunate but probably not worth fixing right now.\n\n observed = BMGInference().to_dot(queries, observations, after_transform=True)\n expected = \"\"\"\ndigraph \"graph\" {\n N00[label=4];\n N01[label=1];\n N02[label=0.25];\n N03[label=Bernoulli];\n N04[label=Sample];\n N05[label=0.75];\n N06[label=Bernoulli];\n N07[label=Sample];\n N08[label=0.5];\n N09[label=Bernoulli];\n N10[label=Sample];\n N11[label=Bernoulli];\n N12[label=Sample];\n N13[label=ToMatrix];\n N14[label=Query];\n N15[label=\"Observation False\"];\n N16[label=\"Observation True\"];\n N17[label=\"Observation False\"];\n N18[label=\"Observation True\"];\n N00 -> N13;\n N01 -> N13;\n N02 -> N03;\n N03 -> N04;\n N04 -> N13;\n N04 -> N15;\n N05 -> N06;\n N06 -> N07;\n N07 -> N13;\n N07 -> N16;\n N08 -> N09;\n N08 -> N11;\n N09 -> N10;\n N10 -> N13;\n N10 -> N17;\n N11 -> N12;\n N12 -> N13;\n N12 -> N18;\n N13 -> N14;\n}\n\"\"\"\n self.assertEqual(expected.strip(), observed.strip())\n\n def test_fix_vectorized_models_3(self) -> None:\n self.maxDiff = None\n observations = {flip_const_2_3(): tensor([[0.0, 0.0, 0.0], [1.0, 1.0, 1.0]])}\n queries = [flip_const_2_3()]\n\n observed = BMGInference().to_dot(queries, observations, after_transform=False)\n\n # The model before the rewrite:\n\n expected = \"\"\"\ndigraph \"graph\" {\n N0[label=\"[[0.25,0.75,0.5],\\\\\\\\n[0.125,0.875,0.625]]\"];\n N1[label=Bernoulli];\n N2[label=Sample];\n N3[label=\"Observation tensor([[0., 0., 0.],\\\\n [1., 1., 1.]])\"];\n N4[label=Query];\n N0 -> N1;\n N1 -> N2;\n N2 -> N3;\n N2 -> N4;\n}\n \"\"\"\n self.assertEqual(expected.strip(), observed.strip())\n\n # After:\n\n observed = BMGInference().to_dot(queries, observations, after_transform=True)\n expected = \"\"\"\ndigraph \"graph\" {\n N00[label=3];\n N01[label=2];\n N02[label=0.25];\n N03[label=Bernoulli];\n N04[label=Sample];\n N05[label=0.75];\n N06[label=Bernoulli];\n N07[label=Sample];\n N08[label=0.5];\n N09[label=Bernoulli];\n N10[label=Sample];\n N11[label=0.125];\n N12[label=Bernoulli];\n N13[label=Sample];\n N14[label=0.875];\n N15[label=Bernoulli];\n N16[label=Sample];\n N17[label=0.625];\n N18[label=Bernoulli];\n N19[label=Sample];\n N20[label=ToMatrix];\n N21[label=Query];\n N22[label=\"Observation False\"];\n N23[label=\"Observation False\"];\n N24[label=\"Observation False\"];\n N25[label=\"Observation True\"];\n N26[label=\"Observation True\"];\n N27[label=\"Observation True\"];\n N00 -> N20;\n N01 -> N20;\n N02 -> N03;\n N03 -> N04;\n N04 -> N20;\n N04 -> N22;\n N05 -> N06;\n N06 -> N07;\n N07 -> N20;\n N07 -> N23;\n N08 -> N09;\n N09 -> N10;\n N10 -> N20;\n N10 -> N24;\n N11 -> N12;\n N12 -> N13;\n N13 -> N20;\n N13 -> N25;\n N14 -> N15;\n N15 -> N16;\n N16 -> N20;\n N16 -> N26;\n N17 -> N18;\n N18 -> N19;\n N19 -> N20;\n N19 -> N27;\n N20 -> N21;\n}\n \"\"\"\n self.assertEqual(expected.strip(), observed.strip())\n\n def test_fix_vectorized_models_4(self) -> None:\n\n # Demonstrate we can also do devectorizations on logits-style Bernoullis.\n # (A logits Bernoulli with a beta prior is a likely mistake in a real model,\n # but it is a convenient test case.)\n\n self.maxDiff = None\n observations = {}\n queries = [flip_logits()]\n\n observed = BMGInference().to_dot(queries, observations, after_transform=False)\n\n # The model before the rewrite:\n\n expected = \"\"\"\ndigraph \"graph\" {\n N0[label=2.0];\n N1[label=Beta];\n N2[label=Sample];\n N3[label=Sample];\n N4[label=Tensor];\n N5[label=\"Bernoulli(logits)\"];\n N6[label=Sample];\n N7[label=Query];\n N0 -> N1;\n N0 -> N1;\n N1 -> N2;\n N1 -> N3;\n N2 -> N4;\n N3 -> N4;\n N4 -> N5;\n N5 -> N6;\n N6 -> N7;\n}\n \"\"\"\n self.assertEqual(expected.strip(), observed.strip())\n\n # After:\n\n observed = BMGInference().to_dot(queries, observations, after_transform=True)\n expected = \"\"\"\ndigraph \"graph\" {\n N00[label=2.0];\n N01[label=Beta];\n N02[label=Sample];\n N03[label=Sample];\n N04[label=2];\n N05[label=1];\n N06[label=ToReal];\n N07[label=\"Bernoulli(logits)\"];\n N08[label=Sample];\n N09[label=ToReal];\n N10[label=\"Bernoulli(logits)\"];\n N11[label=Sample];\n N12[label=ToMatrix];\n N13[label=Query];\n N00 -> N01;\n N00 -> N01;\n N01 -> N02;\n N01 -> N03;\n N02 -> N06;\n N03 -> N09;\n N04 -> N12;\n N05 -> N12;\n N06 -> N07;\n N07 -> N08;\n N08 -> N12;\n N09 -> N10;\n N10 -> N11;\n N11 -> N12;\n N12 -> N13;\n}\n \"\"\"\n self.assertEqual(expected.strip(), observed.strip())\n\n def test_fix_vectorized_models_5(self) -> None:\n self.maxDiff = None\n observations = {}\n queries = [studentt_2_3()]\n\n observed = BMGInference().to_dot(queries, observations, after_transform=False)\n\n # The model before the rewrite. Note that we have a size[3] stochastic input and\n # a size[2, 3] stochastic input to the StudentT, and we broadcast the three\n # HalfCauchy samples correctly\n\n expected = \"\"\"\ndigraph \"graph\" {\n N00[label=\"[1.0,2.0,3.0]\"];\n N01[label=HalfCauchy];\n N02[label=Sample];\n N03[label=\"[[0.25,0.75,0.5],\\\\\\\\n[0.125,0.875,0.625]]\"];\n N04[label=Bernoulli];\n N05[label=Sample];\n N06[label=\"[2.0,3.0,4.0]\"];\n N07[label=Normal];\n N08[label=Sample];\n N09[label=StudentT];\n N10[label=Sample];\n N11[label=Query];\n N00 -> N01;\n N01 -> N02;\n N02 -> N09;\n N02 -> N09;\n N03 -> N04;\n N04 -> N05;\n N05 -> N07;\n N06 -> N07;\n N07 -> N08;\n N08 -> N09;\n N09 -> N10;\n N10 -> N11;\n}\n\"\"\"\n self.assertEqual(expected.strip(), observed.strip())\n\n # After:\n\n observed = BMGInference().to_dot(queries, observations, after_transform=True)\n expected = \"\"\"\ndigraph \"graph\" {\n N00[label=3];\n N01[label=2];\n N02[label=1.0];\n N03[label=HalfCauchy];\n N04[label=Sample];\n N05[label=0.25];\n N06[label=Bernoulli];\n N07[label=Sample];\n N08[label=ToReal];\n N09[label=2.0];\n N10[label=Normal];\n N11[label=Sample];\n N12[label=StudentT];\n N13[label=Sample];\n N14[label=HalfCauchy];\n N15[label=Sample];\n N16[label=0.75];\n N17[label=Bernoulli];\n N18[label=Sample];\n N19[label=ToReal];\n N20[label=3.0];\n N21[label=Normal];\n N22[label=Sample];\n N23[label=StudentT];\n N24[label=Sample];\n N25[label=HalfCauchy];\n N26[label=Sample];\n N27[label=0.5];\n N28[label=Bernoulli];\n N29[label=Sample];\n N30[label=ToReal];\n N31[label=4.0];\n N32[label=Normal];\n N33[label=Sample];\n N34[label=StudentT];\n N35[label=Sample];\n N36[label=0.125];\n N37[label=Bernoulli];\n N38[label=Sample];\n N39[label=ToReal];\n N40[label=Normal];\n N41[label=Sample];\n N42[label=StudentT];\n N43[label=Sample];\n N44[label=0.875];\n N45[label=Bernoulli];\n N46[label=Sample];\n N47[label=ToReal];\n N48[label=Normal];\n N49[label=Sample];\n N50[label=StudentT];\n N51[label=Sample];\n N52[label=0.625];\n N53[label=Bernoulli];\n N54[label=Sample];\n N55[label=ToReal];\n N56[label=Normal];\n N57[label=Sample];\n N58[label=StudentT];\n N59[label=Sample];\n N60[label=ToMatrix];\n N61[label=Query];\n N00 -> N60;\n N01 -> N60;\n N02 -> N03;\n N03 -> N04;\n N04 -> N12;\n N04 -> N12;\n N04 -> N42;\n N04 -> N42;\n N05 -> N06;\n N06 -> N07;\n N07 -> N08;\n N08 -> N10;\n N09 -> N10;\n N09 -> N14;\n N09 -> N40;\n N10 -> N11;\n N11 -> N12;\n N12 -> N13;\n N13 -> N60;\n N14 -> N15;\n N15 -> N23;\n N15 -> N23;\n N15 -> N50;\n N15 -> N50;\n N16 -> N17;\n N17 -> N18;\n N18 -> N19;\n N19 -> N21;\n N20 -> N21;\n N20 -> N25;\n N20 -> N48;\n N21 -> N22;\n N22 -> N23;\n N23 -> N24;\n N24 -> N60;\n N25 -> N26;\n N26 -> N34;\n N26 -> N34;\n N26 -> N58;\n N26 -> N58;\n N27 -> N28;\n N28 -> N29;\n N29 -> N30;\n N30 -> N32;\n N31 -> N32;\n N31 -> N56;\n N32 -> N33;\n N33 -> N34;\n N34 -> N35;\n N35 -> N60;\n N36 -> N37;\n N37 -> N38;\n N38 -> N39;\n N39 -> N40;\n N40 -> N41;\n N41 -> N42;\n N42 -> N43;\n N43 -> N60;\n N44 -> N45;\n N45 -> N46;\n N46 -> N47;\n N47 -> N48;\n N48 -> N49;\n N49 -> N50;\n N50 -> N51;\n N51 -> N60;\n N52 -> N53;\n N53 -> N54;\n N54 -> N55;\n N55 -> N56;\n N56 -> N57;\n N57 -> N58;\n N58 -> N59;\n N59 -> N60;\n N60 -> N61;\n}\n\"\"\"\n self.assertEqual(expected.strip(), observed.strip())\n\n def test_fix_vectorized_models_6(self) -> None:\n self.maxDiff = None\n observations = {}\n queries = [flip_beta_2_2(), flip_uniform_2_2()]\n\n observed = BMGInference().to_dot(queries, observations, after_transform=False)\n\n # The model before the rewrite: notice that here torch automatically\n # broadcast the 2.0 to [2.0, 2.0] for us when the node was accumulated,\n # and similarly for 0.0.\n\n expected = \"\"\"\ndigraph \"graph\" {\n N00[label=\"[2.0,2.0]\"];\n N01[label=\"[3.0,4.0]\"];\n N02[label=Beta];\n N03[label=Sample];\n N04[label=Bernoulli];\n N05[label=Sample];\n N06[label=Query];\n N07[label=\"[0.0,0.0]\"];\n N08[label=\"[1.0,1.0]\"];\n N09[label=Uniform];\n N10[label=Sample];\n N11[label=Bernoulli];\n N12[label=Sample];\n N13[label=Query];\n N00 -> N02;\n N01 -> N02;\n N02 -> N03;\n N03 -> N04;\n N04 -> N05;\n N05 -> N06;\n N07 -> N09;\n N08 -> N09;\n N09 -> N10;\n N10 -> N11;\n N11 -> N12;\n N12 -> N13;\n}\n\"\"\"\n self.assertEqual(expected.strip(), observed.strip())\n\n # After: notice that we correctly generate two samples from a Flat distribution\n # here.\n\n observed = BMGInference().to_dot(queries, observations, after_transform=True)\n expected = \"\"\"\ndigraph \"graph\" {\n N00[label=2];\n N01[label=1];\n N02[label=2.0];\n N03[label=3.0];\n N04[label=Beta];\n N05[label=Sample];\n N06[label=Bernoulli];\n N07[label=Sample];\n N08[label=4.0];\n N09[label=Beta];\n N10[label=Sample];\n N11[label=Bernoulli];\n N12[label=Sample];\n N13[label=ToMatrix];\n N14[label=Query];\n N15[label=Flat];\n N16[label=Sample];\n N17[label=Bernoulli];\n N18[label=Sample];\n N19[label=Sample];\n N20[label=Bernoulli];\n N21[label=Sample];\n N22[label=ToMatrix];\n N23[label=Query];\n N00 -> N13;\n N00 -> N22;\n N01 -> N13;\n N01 -> N22;\n N02 -> N04;\n N02 -> N09;\n N03 -> N04;\n N04 -> N05;\n N05 -> N06;\n N06 -> N07;\n N07 -> N13;\n N08 -> N09;\n N09 -> N10;\n N10 -> N11;\n N11 -> N12;\n N12 -> N13;\n N13 -> N14;\n N15 -> N16;\n N15 -> N19;\n N16 -> N17;\n N17 -> N18;\n N18 -> N22;\n N19 -> N20;\n N20 -> N21;\n N21 -> N22;\n N22 -> N23;\n}\n\n\"\"\"\n self.assertEqual(expected.strip(), observed.strip())\n\n def test_fix_vectorized_models_7(self) -> None:\n self.maxDiff = None\n observations = {}\n queries = [operators()]\n\n observed = BMGInference().to_dot(queries, observations, after_transform=False)\n\n # The model before the rewrite:\n\n expected = \"\"\"\ndigraph \"graph\" {\n N0[label=\"[2.0,2.0]\"];\n N1[label=\"[3.0,4.0]\"];\n N2[label=Beta];\n N3[label=Sample];\n N4[label=\"[[5.0,6.0],\\\\\\\\n[7.0,8.0]]\"];\n N5[label=\"+\"];\n N6[label=10.0];\n N7[label=\"*\"];\n N8[label=Exp];\n N9[label=Query];\n N0 -> N2;\n N1 -> N2;\n N2 -> N3;\n N3 -> N5;\n N4 -> N5;\n N5 -> N7;\n N6 -> N7;\n N7 -> N8;\n N8 -> N9;\n}\n\"\"\"\n self.assertEqual(expected.strip(), observed.strip())\n\n # After:\n\n observed = BMGInference().to_dot(queries, observations, after_transform=True)\n expected = \"\"\"\ndigraph \"graph\" {\n N00[label=2];\n N01[label=10.0];\n N02[label=2.0];\n N03[label=3.0];\n N04[label=Beta];\n N05[label=Sample];\n N06[label=ToPosReal];\n N07[label=5.0];\n N08[label=\"+\"];\n N09[label=4.0];\n N10[label=Beta];\n N11[label=Sample];\n N12[label=ToPosReal];\n N13[label=6.0];\n N14[label=\"+\"];\n N15[label=7.0];\n N16[label=\"+\"];\n N17[label=8.0];\n N18[label=\"+\"];\n N19[label=ToMatrix];\n N20[label=MatrixScale];\n N21[label=0];\n N22[label=ColumnIndex];\n N23[label=index];\n N24[label=Exp];\n N25[label=1];\n N26[label=index];\n N27[label=Exp];\n N28[label=ColumnIndex];\n N29[label=index];\n N30[label=Exp];\n N31[label=index];\n N32[label=Exp];\n N33[label=ToMatrix];\n N34[label=Query];\n N00 -> N19;\n N00 -> N19;\n N00 -> N33;\n N00 -> N33;\n N01 -> N20;\n N02 -> N04;\n N02 -> N10;\n N03 -> N04;\n N04 -> N05;\n N05 -> N06;\n N06 -> N08;\n N06 -> N16;\n N07 -> N08;\n N08 -> N19;\n N09 -> N10;\n N10 -> N11;\n N11 -> N12;\n N12 -> N14;\n N12 -> N18;\n N13 -> N14;\n N14 -> N19;\n N15 -> N16;\n N16 -> N19;\n N17 -> N18;\n N18 -> N19;\n N19 -> N20;\n N20 -> N22;\n N20 -> N28;\n N21 -> N22;\n N21 -> N23;\n N21 -> N29;\n N22 -> N23;\n N22 -> N26;\n N23 -> N24;\n N24 -> N33;\n N25 -> N26;\n N25 -> N28;\n N25 -> N31;\n N26 -> N27;\n N27 -> N33;\n N28 -> N29;\n N28 -> N31;\n N29 -> N30;\n N30 -> N33;\n N31 -> N32;\n N32 -> N33;\n N33 -> N34;\n}\n\"\"\"\n self.assertEqual(expected.strip(), observed.strip())\n",
"# Copyright (c) Meta Platforms, Inc. and affiliates.\n#\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\n\nimport unittest\n\nimport numpy as np\nfrom beanmachine import graph\n\n\nclass TestBayesNet(unittest.TestCase):\n def test_simple_dep(self):\n g = graph.Graph()\n c1 = g.add_constant_col_simplex_matrix(np.array([0.8, 0.2]))\n d1 = g.add_distribution(\n graph.DistributionType.TABULAR, graph.AtomicType.BOOLEAN, [c1]\n )\n g.add_operator(graph.OperatorType.SAMPLE, [d1])\n\n def test_tabular(self):\n g = graph.Graph()\n c1 = g.add_constant_col_simplex_matrix(np.array([0.8, 0.2]))\n\n # negative test\n with self.assertRaises(ValueError) as cm:\n g.add_distribution(\n graph.DistributionType.TABULAR, graph.AtomicType.BOOLEAN, []\n )\n self.assertTrue(\"must be COL_SIMPLEX\" in str(cm.exception))\n\n g = graph.Graph()\n c1 = g.add_constant_col_simplex_matrix(np.array([0.8, 0.2]))\n var1 = g.add_operator(\n graph.OperatorType.SAMPLE,\n [\n g.add_distribution(\n graph.DistributionType.TABULAR, graph.AtomicType.BOOLEAN, [c1]\n )\n ],\n )\n var2 = g.add_operator(\n graph.OperatorType.SAMPLE,\n [\n g.add_distribution(\n graph.DistributionType.TABULAR, graph.AtomicType.BOOLEAN, [c1]\n )\n ],\n )\n\n # since the following has two parents it must have a tabular dist with\n # 3 dimensions in the tensor\n with self.assertRaises(ValueError) as cm:\n g.add_operator(\n graph.OperatorType.SAMPLE,\n [\n g.add_distribution(\n graph.DistributionType.TABULAR,\n graph.AtomicType.BOOLEAN,\n [c1, var1, var2],\n )\n ],\n )\n self.assertTrue(\"expected 4 dims got 1\" in str(cm.exception))\n\n c2 = g.add_constant_col_simplex_matrix(np.array([[0.6, 0.99], [0.4, 0.01]]))\n g.add_distribution(\n graph.DistributionType.TABULAR,\n graph.AtomicType.BOOLEAN,\n [c2, g.add_constant_bool(True)],\n )\n\n with self.assertRaises(ValueError) as cm:\n g.add_distribution(\n graph.DistributionType.TABULAR,\n graph.AtomicType.BOOLEAN,\n [c2, g.add_constant_natural(1)],\n )\n self.assertTrue(\"only supports boolean parents\" in str(cm.exception))\n\n c3 = g.add_constant_real_matrix(np.array([1.1, -0.1]))\n with self.assertRaises(ValueError) as cm:\n g.add_distribution(\n graph.DistributionType.TABULAR, graph.AtomicType.BOOLEAN, [c3]\n )\n self.assertTrue(\"must be COL_SIMPLEX\" in str(cm.exception))\n\n c4 = g.add_constant_col_simplex_matrix(np.array([0.6, 0.3, 0.1]))\n with self.assertRaises(ValueError) as cm:\n g.add_distribution(\n graph.DistributionType.TABULAR, graph.AtomicType.BOOLEAN, [c4]\n )\n self.assertTrue(\"must have two rows\" in str(cm.exception))\n\n def test_bernoulli(self):\n g = graph.Graph()\n c1 = g.add_constant_probability(1.0)\n c2 = g.add_constant_probability(0.8)\n\n # negative tests on number of parents\n # 0 parents not allowed\n with self.assertRaises(ValueError) as cm:\n g.add_distribution(\n graph.DistributionType.BERNOULLI, graph.AtomicType.BOOLEAN, []\n )\n self.assertTrue(\n \"Bernoulli distribution must have exactly one parent\" in str(cm.exception)\n )\n # 2 parents not allowed\n with self.assertRaises(ValueError) as cm:\n g.add_distribution(\n graph.DistributionType.BERNOULLI, graph.AtomicType.BOOLEAN, [c1, c2]\n )\n self.assertTrue(\n \"Bernoulli distribution must have exactly one parent\" in str(cm.exception)\n )\n\n # 1 parent is OK\n d1 = g.add_distribution(\n graph.DistributionType.BERNOULLI, graph.AtomicType.BOOLEAN, [c1]\n )\n\n # negative test on type of parent\n c3 = g.add_constant_natural(1)\n with self.assertRaises(ValueError) as cm:\n g.add_distribution(\n graph.DistributionType.BERNOULLI, graph.AtomicType.BOOLEAN, [c3]\n )\n self.assertTrue(\"must be a probability\" in str(cm.exception))\n\n # negative test on value of parent\n with self.assertRaises(ValueError) as cm:\n g.add_constant_probability(1.1)\n self.assertTrue(\"must be between 0 and 1\" in str(cm.exception))\n\n v1 = g.add_operator(graph.OperatorType.SAMPLE, [d1])\n g.query(v1)\n samples = g.infer(1)\n self.assertEqual(type(samples[0][0]), bool)\n self.assertTrue(samples[0][0])\n means = g.infer_mean(1)\n self.assertEqual(len(means), 1, \"exactly one node queried\")\n\n def test_beta(self):\n g = graph.Graph()\n c1 = g.add_constant_pos_real(1.1)\n c2 = g.add_constant_pos_real(5.0)\n # negative tests on number of parents\n # 0 parents not allowed\n with self.assertRaises(ValueError) as cm:\n g.add_distribution(\n graph.DistributionType.BETA, graph.AtomicType.PROBABILITY, []\n )\n self.assertTrue(\n \"Beta distribution must have exactly two parents\" in str(cm.exception)\n )\n # 1 parent not allowed\n with self.assertRaises(ValueError) as cm:\n g.add_distribution(\n graph.DistributionType.BETA, graph.AtomicType.PROBABILITY, [c1]\n )\n self.assertTrue(\n \"Beta distribution must have exactly two parents\" in str(cm.exception)\n )\n # negative test on type of parent\n c3 = g.add_constant_bool(True)\n with self.assertRaises(ValueError) as cm:\n g.add_distribution(\n graph.DistributionType.BETA, graph.AtomicType.PROBABILITY, [c3, c3]\n )\n self.assertTrue(\"must be positive real-valued\" in str(cm.exception))\n # negative test on sample type\n with self.assertRaises(ValueError) as cm:\n g.add_distribution(\n graph.DistributionType.BETA, graph.AtomicType.REAL, [c1, c2]\n )\n self.assertTrue(\"Beta produces probability samples\" in str(cm.exception))\n # 2 real-valued parents with probability sample type are OK\n d1 = g.add_distribution(\n graph.DistributionType.BETA, graph.AtomicType.PROBABILITY, [c1, c2]\n )\n # now let's draw some samples from the Beta distribution\n v1 = g.add_operator(graph.OperatorType.SAMPLE, [d1])\n g.query(v1)\n samples = g.infer(1, graph.InferenceType.REJECTION)\n self.assertEqual(type(samples[0][0]), float)\n self.assertTrue(samples[0][0] > 0 and samples[0][0] < 1)\n means = g.infer_mean(10000, graph.InferenceType.REJECTION)\n self.assertAlmostEqual(means[0], 1.1 / (1.1 + 5.0), 2, \"beta mean\")\n\n def test_binomial(self):\n g = graph.Graph()\n c1 = g.add_constant_natural(10)\n c2 = g.add_constant_probability(0.55)\n d1 = g.add_distribution(\n graph.DistributionType.BINOMIAL, graph.AtomicType.NATURAL, [c1, c2]\n )\n v1 = g.add_operator(graph.OperatorType.SAMPLE, [d1])\n g.query(v1)\n samples = g.infer(1, graph.InferenceType.REJECTION)\n self.assertEqual(type(samples[0][0]), int)\n self.assertTrue(samples[0][0] <= 10)\n means = g.infer_mean(10000, graph.InferenceType.REJECTION)\n self.assertTrue(means[0] > 5 and means[0] < 6)\n\n def test_categorical(self):\n g = graph.Graph()\n simplex = [0.5, 0.25, 0.125, 0.125]\n c1 = g.add_constant_col_simplex_matrix(np.array(simplex))\n # Negative test: Number of parents must be exactly one:\n with self.assertRaises(ValueError) as cm:\n g.add_distribution(\n graph.DistributionType.CATEGORICAL, graph.AtomicType.NATURAL, []\n )\n self.assertTrue(\n \"Categorical distribution must have exactly one parent\" in str(cm.exception)\n )\n\n with self.assertRaises(ValueError) as cm:\n g.add_distribution(\n graph.DistributionType.CATEGORICAL, graph.AtomicType.NATURAL, [c1, c1]\n )\n self.assertEqual(\n \"Categorical distribution must have exactly one parent\", str(cm.exception)\n )\n\n # Negative test: parent must be simplex:\n c3 = g.add_constant_natural(1)\n with self.assertRaises(ValueError) as cm:\n g.add_distribution(\n graph.DistributionType.CATEGORICAL, graph.AtomicType.NATURAL, [c3]\n )\n self.assertEqual(\n \"Categorical parent must be a one-column simplex\", str(cm.exception)\n )\n\n # Negative test: type must be natural\n with self.assertRaises(ValueError) as cm:\n g.add_distribution(\n graph.DistributionType.CATEGORICAL, graph.AtomicType.REAL, [c1]\n )\n self.assertEqual(\n \"Categorical produces natural valued samples\", str(cm.exception)\n )\n\n # Positive test:\n d1 = g.add_distribution(\n graph.DistributionType.CATEGORICAL, graph.AtomicType.NATURAL, [c1]\n )\n\n v1 = g.add_operator(graph.OperatorType.SAMPLE, [d1])\n g.query(v1)\n num_samples = 10000\n # TODO: We use rejection sampling in this test because at present NMC\n # does not support inference over naturals. If inference over discrete\n # variables is important for BMG, we should create a Uniform Proposer\n # similar to how it's done in Bean Machine proper.\n samples = g.infer(\n num_samples=num_samples,\n algorithm=graph.InferenceType.REJECTION,\n seed=123,\n n_chains=1,\n )[0]\n\n # The distribution of the samples should closely match the simplex used to\n # generate them.\n\n histogram = [0, 0, 0, 0]\n for sample in samples:\n histogram[sample[0]] += 1\n\n self.assertAlmostEqual(simplex[0], histogram[0] / num_samples, delta=0.01)\n self.assertAlmostEqual(simplex[1], histogram[1] / num_samples, delta=0.01)\n self.assertAlmostEqual(simplex[2], histogram[2] / num_samples, delta=0.01)\n self.assertAlmostEqual(simplex[3], histogram[3] / num_samples, delta=0.01)\n\n def _create_graph(self):\n g = graph.Graph()\n c1 = g.add_constant_col_simplex_matrix(np.array([0.8, 0.2]))\n c2 = g.add_constant_col_simplex_matrix(np.array([[0.6, 0.99], [0.4, 0.01]]))\n c3 = g.add_constant_col_simplex_matrix(\n np.transpose(np.array([[1, 0], [0.2, 0.8], [0.1, 0.9], [0.01, 0.99]]))\n )\n Rain = g.add_operator(\n graph.OperatorType.SAMPLE,\n [\n g.add_distribution(\n graph.DistributionType.TABULAR, graph.AtomicType.BOOLEAN, [c1]\n )\n ],\n )\n Sprinkler = g.add_operator(\n graph.OperatorType.SAMPLE,\n [\n g.add_distribution(\n graph.DistributionType.TABULAR, graph.AtomicType.BOOLEAN, [c2, Rain]\n )\n ],\n )\n GrassWet = g.add_operator(\n graph.OperatorType.SAMPLE,\n [\n g.add_distribution(\n graph.DistributionType.TABULAR,\n graph.AtomicType.BOOLEAN,\n [c3, Sprinkler, Rain],\n )\n ],\n )\n return g, Rain, Sprinkler, GrassWet\n\n def test_query(self):\n g, Rain, Sprinkler, GrassWet = self._create_graph()\n g.query(Rain)\n g.query(Sprinkler)\n g.query(GrassWet)\n g.infer(1)\n\n p = g.add_constant_probability(0.8)\n b = g.add_distribution(\n graph.DistributionType.BERNOULLI, graph.AtomicType.BOOLEAN, [p]\n )\n # Querying a constant is weird but allowed\n g.query(p)\n # But querying a distribution directly rather than a sample is\n # illegal:\n with self.assertRaises(ValueError) as cm:\n g.query(b)\n self.assertEqual(\n f\"Query of node_id {b} expected a node of type 1 or 3 but is 2\",\n str(cm.exception),\n )\n\n def test_to_dot(self):\n self.maxDiff = None\n g, Rain, Sprinkler, GrassWet = self._create_graph()\n g.query(Rain)\n g.query(Sprinkler)\n g.query(GrassWet)\n g.observe(GrassWet, True)\n observed = g.to_dot()\n expected = \"\"\"\ndigraph \"graph\" {\n N0[label=\"simplex\"];\n N1[label=\"simplex\"];\n N2[label=\"simplex\"];\n N3[label=\"Tabular\"];\n N4[label=\"~\"];\n N5[label=\"Tabular\"];\n N6[label=\"~\"];\n N7[label=\"Tabular\"];\n N8[label=\"~\"];\n N0 -> N3;\n N1 -> N5;\n N2 -> N7;\n N3 -> N4;\n N4 -> N5;\n N4 -> N7;\n N5 -> N6;\n N6 -> N7;\n N7 -> N8;\n O0[label=\"Observation\"];\n N8 -> O0;\n Q0[label=\"Query\"];\n N4 -> Q0;\n Q1[label=\"Query\"];\n N6 -> Q1;\n Q2[label=\"Query\"];\n N8 -> Q2;\n}\"\"\"\n self.assertEqual(expected.strip(), observed.strip())\n\n def test_observe(self):\n g, Rain, Sprinkler, GrassWet = self._create_graph()\n g.observe(GrassWet, True)\n with self.assertRaises(ValueError) as cm:\n g.observe(GrassWet, True)\n self.assertTrue(\"duplicate observe for node\" in str(cm.exception))\n\n g = graph.Graph()\n c1 = g.add_constant_probability(1.0)\n c2 = g.add_constant_probability(0.5)\n o1 = g.add_operator(graph.OperatorType.MULTIPLY, [c1, c2])\n d1 = g.add_distribution(\n graph.DistributionType.BERNOULLI, graph.AtomicType.BOOLEAN, [o1]\n )\n o2 = g.add_operator(graph.OperatorType.SAMPLE, [d1])\n with self.assertRaises(ValueError) as cm:\n g.observe(o1, True)\n self.assertTrue(\n \"only SAMPLE and IID_SAMPLE nodes may be observed\" in str(cm.exception)\n )\n g.observe(o2, True) # ok to observe this node\n with self.assertRaises(ValueError) as cm:\n g.observe(o2, False)\n self.assertTrue(\"duplicate observe\" in str(cm.exception))\n g.remove_observations()\n g.observe(o2, False)\n\n def test_inference(self):\n g, Rain, Sprinkler, GrassWet = self._create_graph()\n g.observe(GrassWet, True)\n qr = g.query(Rain)\n g.query(GrassWet)\n # Querying the same node twice is idempotent.\n self.assertEqual(g.query(Rain), qr)\n samples = g.infer(1)\n self.assertTrue(len(samples) == 1)\n # since we have observed grass wet is true the query should be true\n self.assertEqual(type(samples[0][1]), bool)\n self.assertTrue(samples[0][1])\n # test parallel inference\n samples_all = g.infer(num_samples=1, n_chains=2)\n self.assertTrue(len(samples_all) == 2)\n self.assertTrue(len(samples_all[0]) == 1)\n self.assertTrue(len(samples_all[1]) == 1)\n self.assertEqual(samples[0][0], samples_all[0][0][0])\n self.assertEqual(samples[0][1], samples_all[0][0][1])\n self.assertEqual(type(samples_all[1][0][0]), bool)\n self.assertEqual(type(samples_all[1][0][1]), bool)\n self.assertTrue(samples_all[1][0][1])\n\n def test_infer_mean(self):\n g = graph.Graph()\n c1 = g.add_constant_probability(1.0)\n op1 = g.add_operator(graph.OperatorType.MULTIPLY, [c1, c1])\n d1 = g.add_distribution(\n graph.DistributionType.BERNOULLI, graph.AtomicType.BOOLEAN, [op1]\n )\n op2 = g.add_operator(graph.OperatorType.SAMPLE, [d1])\n g.query(op1)\n g.query(op2)\n means = g.infer_mean(100)\n self.assertAlmostEqual(means[0], 1.0)\n self.assertAlmostEqual(means[1], 1.0)\n # test parallel inference\n means_all = g.infer_mean(num_samples=100, n_chains=2)\n self.assertTrue(len(means_all) == 2)\n self.assertAlmostEqual(means_all[0][0], 1.0)\n self.assertAlmostEqual(means_all[0][1], 1.0)\n self.assertAlmostEqual(means_all[1][0], 1.0)\n self.assertAlmostEqual(means_all[1][1], 1.0)\n\n def test_neg_real(self):\n g = graph.Graph()\n with self.assertRaises(ValueError) as cm:\n g.add_constant_neg_real(1.25)\n self.assertTrue(\"neg_real must be <=0\" in str(cm.exception))\n neg1 = g.add_constant_neg_real(-1.25)\n expected = \"\"\"\n Node 0 type 1 parents [ ] children [ ] negative real -1.25\n \"\"\"\n self.assertEqual(g.to_string().strip(), expected.strip())\n add_negs = g.add_operator(graph.OperatorType.ADD, [neg1, neg1])\n g.query(add_negs)\n means = g.infer_mean(10)\n self.assertAlmostEqual(means[0], -2.5)\n samples = g.infer(10)\n self.assertAlmostEqual(samples[0][0], -2.5)\n\n def test_get_log_prob(self):\n g, Rain, Sprinkler, GrassWet = self._create_graph()\n g.observe(GrassWet, True)\n g.query(Rain)\n g.query(GrassWet)\n conf = graph.InferConfig()\n conf.keep_log_prob = True\n g.infer(\n num_samples=10,\n algorithm=graph.InferenceType.GIBBS,\n seed=123,\n n_chains=2,\n infer_config=conf,\n )\n log_probs = g.get_log_prob()\n self.assertEqual(len(log_probs), 2)\n self.assertEqual(len(log_probs[0]), 10)\n",
"# Copyright (c) Meta Platforms, Inc. and affiliates.\n#\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\n\nfrom __future__ import annotations\n\nimport dataclasses\nfrom typing import Set\n\nimport torch\nimport torch.distributions as dist\nfrom beanmachine.ppl.model.rv_identifier import RVIdentifier\nfrom torch.distributions.utils import lazy_property\n\n\[email protected]\nclass Variable:\n \"\"\"\n Primitive used for maintaining metadata of random variables. Usually used\n in conjunction with `World` during inference.\n\n Attributes:\n value (torch.Tensor): Sampled value of random variable\n distribution (torch.distributions.Distribution): Distribution random variable was sampled from\n parents (set): Set containing the RVIdentifiers of the parents of the random variable\n children (set): Set containing the RVIdentifiers of the children of the random variable\n \"\"\"\n\n value: torch.Tensor\n distribution: dist.Distribution\n parents: Set[RVIdentifier] = dataclasses.field(default_factory=set)\n children: Set[RVIdentifier] = dataclasses.field(default_factory=set)\n\n @lazy_property\n def log_prob(self) -> torch.Tensor:\n \"\"\"\n Returns\n The logprob of the `value` of the value given the distribution.\n \"\"\"\n try:\n return self.distribution.log_prob(self.value)\n # Numerical errors in Cholesky factorization are handled upstream\n # in respective proposers or in `Sampler.send`.\n # TODO: Change to torch.linalg.LinAlgError when in release.\n except (RuntimeError, ValueError) as e:\n err_msg = str(e)\n if isinstance(e, RuntimeError) and (\n \"singular U\" in err_msg or \"input is not positive-definite\" in err_msg\n ):\n raise e\n dtype = (\n self.value.dtype\n if torch.is_floating_point(self.value)\n else torch.float32\n )\n return torch.tensor(float(\"-inf\"), device=self.value.device, dtype=dtype)\n\n def replace(self, **changes) -> Variable:\n \"\"\"Return a new Variable object with fields replaced by the changes\"\"\"\n return dataclasses.replace(self, **changes)\n"
] | [
[
"torch.distributions.Gamma",
"torch.ones_like",
"torch.where",
"torch.tensor"
],
[
"torch.tensor",
"torch.distributions.Normal",
"torch.distributions.Beta"
],
[
"numpy.array"
],
[
"torch.is_floating_point"
]
] |
dhimmel/pandas | [
"776fed3ab63d74ddef6e5af1a702b10c2a30bbb6",
"776fed3ab63d74ddef6e5af1a702b10c2a30bbb6",
"776fed3ab63d74ddef6e5af1a702b10c2a30bbb6"
] | [
"pandas/tests/frame/test_analytics.py",
"pandas/tests/extension/base/ops.py",
"pandas/core/ops.py"
] | [
"# -*- coding: utf-8 -*-\n\nfrom __future__ import print_function\n\nimport warnings\nfrom datetime import timedelta\nimport operator\nimport pytest\n\nfrom string import ascii_lowercase\nfrom numpy import nan\nfrom numpy.random import randn\nimport numpy as np\n\nfrom pandas.compat import lrange, PY35\nfrom pandas import (compat, isna, notna, DataFrame, Series,\n MultiIndex, date_range, Timestamp, Categorical,\n _np_version_under1p12,\n to_datetime, to_timedelta)\nimport pandas as pd\nimport pandas.core.nanops as nanops\nimport pandas.core.algorithms as algorithms\n\nimport pandas.util.testing as tm\nimport pandas.util._test_decorators as td\nfrom pandas.tests.frame.common import TestData\n\n\nclass TestDataFrameAnalytics(TestData):\n\n # ---------------------------------------------------------------------=\n # Correlation and covariance\n\n @td.skip_if_no_scipy\n def test_corr_pearson(self):\n self.frame['A'][:5] = nan\n self.frame['B'][5:10] = nan\n\n self._check_method('pearson')\n\n @td.skip_if_no_scipy\n def test_corr_kendall(self):\n self.frame['A'][:5] = nan\n self.frame['B'][5:10] = nan\n\n self._check_method('kendall')\n\n @td.skip_if_no_scipy\n def test_corr_spearman(self):\n self.frame['A'][:5] = nan\n self.frame['B'][5:10] = nan\n\n self._check_method('spearman')\n\n def _check_method(self, method='pearson', check_minp=False):\n if not check_minp:\n correls = self.frame.corr(method=method)\n exp = self.frame['A'].corr(self.frame['C'], method=method)\n tm.assert_almost_equal(correls['A']['C'], exp)\n else:\n result = self.frame.corr(min_periods=len(self.frame) - 8)\n expected = self.frame.corr()\n expected.loc['A', 'B'] = expected.loc['B', 'A'] = nan\n tm.assert_frame_equal(result, expected)\n\n @td.skip_if_no_scipy\n def test_corr_non_numeric(self):\n self.frame['A'][:5] = nan\n self.frame['B'][5:10] = nan\n\n # exclude non-numeric types\n result = self.mixed_frame.corr()\n expected = self.mixed_frame.loc[:, ['A', 'B', 'C', 'D']].corr()\n tm.assert_frame_equal(result, expected)\n\n @td.skip_if_no_scipy\n @pytest.mark.parametrize('meth', ['pearson', 'kendall', 'spearman'])\n def test_corr_nooverlap(self, meth):\n # nothing in common\n df = DataFrame({'A': [1, 1.5, 1, np.nan, np.nan, np.nan],\n 'B': [np.nan, np.nan, np.nan, 1, 1.5, 1],\n 'C': [np.nan, np.nan, np.nan, np.nan,\n np.nan, np.nan]})\n rs = df.corr(meth)\n assert isna(rs.loc['A', 'B'])\n assert isna(rs.loc['B', 'A'])\n assert rs.loc['A', 'A'] == 1\n assert rs.loc['B', 'B'] == 1\n assert isna(rs.loc['C', 'C'])\n\n @td.skip_if_no_scipy\n @pytest.mark.parametrize('meth', ['pearson', 'spearman'])\n def test_corr_constant(self, meth):\n # constant --> all NA\n\n df = DataFrame({'A': [1, 1, 1, np.nan, np.nan, np.nan],\n 'B': [np.nan, np.nan, np.nan, 1, 1, 1]})\n rs = df.corr(meth)\n assert isna(rs.values).all()\n\n def test_corr_int(self):\n # dtypes other than float64 #1761\n df3 = DataFrame({\"a\": [1, 2, 3, 4], \"b\": [1, 2, 3, 4]})\n\n df3.cov()\n df3.corr()\n\n @td.skip_if_no_scipy\n def test_corr_int_and_boolean(self):\n # when dtypes of pandas series are different\n # then ndarray will have dtype=object,\n # so it need to be properly handled\n df = DataFrame({\"a\": [True, False], \"b\": [1, 0]})\n\n expected = DataFrame(np.ones((2, 2)), index=[\n 'a', 'b'], columns=['a', 'b'])\n for meth in ['pearson', 'kendall', 'spearman']:\n\n # RuntimeWarning\n with warnings.catch_warnings(record=True):\n result = df.corr(meth)\n tm.assert_frame_equal(result, expected)\n\n def test_corr_cov_independent_index_column(self):\n # GH 14617\n df = pd.DataFrame(np.random.randn(4 * 10).reshape(10, 4),\n columns=list(\"abcd\"))\n for method in ['cov', 'corr']:\n result = getattr(df, method)()\n assert result.index is not result.columns\n assert result.index.equals(result.columns)\n\n def test_cov(self):\n # min_periods no NAs (corner case)\n expected = self.frame.cov()\n result = self.frame.cov(min_periods=len(self.frame))\n\n tm.assert_frame_equal(expected, result)\n\n result = self.frame.cov(min_periods=len(self.frame) + 1)\n assert isna(result.values).all()\n\n # with NAs\n frame = self.frame.copy()\n frame['A'][:5] = nan\n frame['B'][5:10] = nan\n result = self.frame.cov(min_periods=len(self.frame) - 8)\n expected = self.frame.cov()\n expected.loc['A', 'B'] = np.nan\n expected.loc['B', 'A'] = np.nan\n\n # regular\n self.frame['A'][:5] = nan\n self.frame['B'][:10] = nan\n cov = self.frame.cov()\n\n tm.assert_almost_equal(cov['A']['C'],\n self.frame['A'].cov(self.frame['C']))\n\n # exclude non-numeric types\n result = self.mixed_frame.cov()\n expected = self.mixed_frame.loc[:, ['A', 'B', 'C', 'D']].cov()\n tm.assert_frame_equal(result, expected)\n\n # Single column frame\n df = DataFrame(np.linspace(0.0, 1.0, 10))\n result = df.cov()\n expected = DataFrame(np.cov(df.values.T).reshape((1, 1)),\n index=df.columns, columns=df.columns)\n tm.assert_frame_equal(result, expected)\n df.loc[0] = np.nan\n result = df.cov()\n expected = DataFrame(np.cov(df.values[1:].T).reshape((1, 1)),\n index=df.columns, columns=df.columns)\n tm.assert_frame_equal(result, expected)\n\n def test_corrwith(self):\n a = self.tsframe\n noise = Series(randn(len(a)), index=a.index)\n\n b = self.tsframe.add(noise, axis=0)\n\n # make sure order does not matter\n b = b.reindex(columns=b.columns[::-1], index=b.index[::-1][10:])\n del b['B']\n\n colcorr = a.corrwith(b, axis=0)\n tm.assert_almost_equal(colcorr['A'], a['A'].corr(b['A']))\n\n rowcorr = a.corrwith(b, axis=1)\n tm.assert_series_equal(rowcorr, a.T.corrwith(b.T, axis=0))\n\n dropped = a.corrwith(b, axis=0, drop=True)\n tm.assert_almost_equal(dropped['A'], a['A'].corr(b['A']))\n assert 'B' not in dropped\n\n dropped = a.corrwith(b, axis=1, drop=True)\n assert a.index[-1] not in dropped.index\n\n # non time-series data\n index = ['a', 'b', 'c', 'd', 'e']\n columns = ['one', 'two', 'three', 'four']\n df1 = DataFrame(randn(5, 4), index=index, columns=columns)\n df2 = DataFrame(randn(4, 4), index=index[:4], columns=columns)\n correls = df1.corrwith(df2, axis=1)\n for row in index[:4]:\n tm.assert_almost_equal(correls[row],\n df1.loc[row].corr(df2.loc[row]))\n\n def test_corrwith_with_objects(self):\n df1 = tm.makeTimeDataFrame()\n df2 = tm.makeTimeDataFrame()\n cols = ['A', 'B', 'C', 'D']\n\n df1['obj'] = 'foo'\n df2['obj'] = 'bar'\n\n result = df1.corrwith(df2)\n expected = df1.loc[:, cols].corrwith(df2.loc[:, cols])\n tm.assert_series_equal(result, expected)\n\n result = df1.corrwith(df2, axis=1)\n expected = df1.loc[:, cols].corrwith(df2.loc[:, cols], axis=1)\n tm.assert_series_equal(result, expected)\n\n def test_corrwith_series(self):\n result = self.tsframe.corrwith(self.tsframe['A'])\n expected = self.tsframe.apply(self.tsframe['A'].corr)\n\n tm.assert_series_equal(result, expected)\n\n def test_corrwith_matches_corrcoef(self):\n df1 = DataFrame(np.arange(10000), columns=['a'])\n df2 = DataFrame(np.arange(10000) ** 2, columns=['a'])\n c1 = df1.corrwith(df2)['a']\n c2 = np.corrcoef(df1['a'], df2['a'])[0][1]\n\n tm.assert_almost_equal(c1, c2)\n assert c1 < 1\n\n def test_corrwith_mixed_dtypes(self):\n # GH 18570\n df = pd.DataFrame({'a': [1, 4, 3, 2], 'b': [4, 6, 7, 3],\n 'c': ['a', 'b', 'c', 'd']})\n s = pd.Series([0, 6, 7, 3])\n result = df.corrwith(s)\n corrs = [df['a'].corr(s), df['b'].corr(s)]\n expected = pd.Series(data=corrs, index=['a', 'b'])\n tm.assert_series_equal(result, expected)\n\n def test_bool_describe_in_mixed_frame(self):\n df = DataFrame({\n 'string_data': ['a', 'b', 'c', 'd', 'e'],\n 'bool_data': [True, True, False, False, False],\n 'int_data': [10, 20, 30, 40, 50],\n })\n\n # Integer data are included in .describe() output,\n # Boolean and string data are not.\n result = df.describe()\n expected = DataFrame({'int_data': [5, 30, df.int_data.std(),\n 10, 20, 30, 40, 50]},\n index=['count', 'mean', 'std', 'min', '25%',\n '50%', '75%', 'max'])\n tm.assert_frame_equal(result, expected)\n\n # Top value is a boolean value that is False\n result = df.describe(include=['bool'])\n\n expected = DataFrame({'bool_data': [5, 2, False, 3]},\n index=['count', 'unique', 'top', 'freq'])\n tm.assert_frame_equal(result, expected)\n\n def test_describe_bool_frame(self):\n # GH 13891\n df = pd.DataFrame({\n 'bool_data_1': [False, False, True, True],\n 'bool_data_2': [False, True, True, True]\n })\n result = df.describe()\n expected = DataFrame({'bool_data_1': [4, 2, True, 2],\n 'bool_data_2': [4, 2, True, 3]},\n index=['count', 'unique', 'top', 'freq'])\n tm.assert_frame_equal(result, expected)\n\n df = pd.DataFrame({\n 'bool_data': [False, False, True, True, False],\n 'int_data': [0, 1, 2, 3, 4]\n })\n result = df.describe()\n expected = DataFrame({'int_data': [5, 2, df.int_data.std(), 0, 1,\n 2, 3, 4]},\n index=['count', 'mean', 'std', 'min', '25%',\n '50%', '75%', 'max'])\n tm.assert_frame_equal(result, expected)\n\n df = pd.DataFrame({\n 'bool_data': [False, False, True, True],\n 'str_data': ['a', 'b', 'c', 'a']\n })\n result = df.describe()\n expected = DataFrame({'bool_data': [4, 2, True, 2],\n 'str_data': [4, 3, 'a', 2]},\n index=['count', 'unique', 'top', 'freq'])\n tm.assert_frame_equal(result, expected)\n\n def test_describe_categorical(self):\n df = DataFrame({'value': np.random.randint(0, 10000, 100)})\n labels = [\"{0} - {1}\".format(i, i + 499) for i in range(0, 10000, 500)]\n cat_labels = Categorical(labels, labels)\n\n df = df.sort_values(by=['value'], ascending=True)\n df['value_group'] = pd.cut(df.value, range(0, 10500, 500),\n right=False, labels=cat_labels)\n cat = df\n\n # Categoricals should not show up together with numerical columns\n result = cat.describe()\n assert len(result.columns) == 1\n\n # In a frame, describe() for the cat should be the same as for string\n # arrays (count, unique, top, freq)\n\n cat = Categorical([\"a\", \"b\", \"b\", \"b\"], categories=['a', 'b', 'c'],\n ordered=True)\n s = Series(cat)\n result = s.describe()\n expected = Series([4, 2, \"b\", 3],\n index=['count', 'unique', 'top', 'freq'])\n tm.assert_series_equal(result, expected)\n\n cat = Series(Categorical([\"a\", \"b\", \"c\", \"c\"]))\n df3 = DataFrame({\"cat\": cat, \"s\": [\"a\", \"b\", \"c\", \"c\"]})\n res = df3.describe()\n tm.assert_numpy_array_equal(res[\"cat\"].values, res[\"s\"].values)\n\n def test_describe_categorical_columns(self):\n # GH 11558\n columns = pd.CategoricalIndex(['int1', 'int2', 'obj'],\n ordered=True, name='XXX')\n df = DataFrame({'int1': [10, 20, 30, 40, 50],\n 'int2': [10, 20, 30, 40, 50],\n 'obj': ['A', 0, None, 'X', 1]},\n columns=columns)\n result = df.describe()\n\n exp_columns = pd.CategoricalIndex(['int1', 'int2'],\n categories=['int1', 'int2', 'obj'],\n ordered=True, name='XXX')\n expected = DataFrame({'int1': [5, 30, df.int1.std(),\n 10, 20, 30, 40, 50],\n 'int2': [5, 30, df.int2.std(),\n 10, 20, 30, 40, 50]},\n index=['count', 'mean', 'std', 'min', '25%',\n '50%', '75%', 'max'],\n columns=exp_columns)\n tm.assert_frame_equal(result, expected)\n tm.assert_categorical_equal(result.columns.values,\n expected.columns.values)\n\n def test_describe_datetime_columns(self):\n columns = pd.DatetimeIndex(['2011-01-01', '2011-02-01', '2011-03-01'],\n freq='MS', tz='US/Eastern', name='XXX')\n df = DataFrame({0: [10, 20, 30, 40, 50],\n 1: [10, 20, 30, 40, 50],\n 2: ['A', 0, None, 'X', 1]})\n df.columns = columns\n result = df.describe()\n\n exp_columns = pd.DatetimeIndex(['2011-01-01', '2011-02-01'],\n freq='MS', tz='US/Eastern', name='XXX')\n expected = DataFrame({0: [5, 30, df.iloc[:, 0].std(),\n 10, 20, 30, 40, 50],\n 1: [5, 30, df.iloc[:, 1].std(),\n 10, 20, 30, 40, 50]},\n index=['count', 'mean', 'std', 'min', '25%',\n '50%', '75%', 'max'])\n expected.columns = exp_columns\n tm.assert_frame_equal(result, expected)\n assert result.columns.freq == 'MS'\n assert result.columns.tz == expected.columns.tz\n\n def test_describe_timedelta_values(self):\n # GH 6145\n t1 = pd.timedelta_range('1 days', freq='D', periods=5)\n t2 = pd.timedelta_range('1 hours', freq='H', periods=5)\n df = pd.DataFrame({'t1': t1, 't2': t2})\n\n expected = DataFrame({'t1': [5, pd.Timedelta('3 days'),\n df.iloc[:, 0].std(),\n pd.Timedelta('1 days'),\n pd.Timedelta('2 days'),\n pd.Timedelta('3 days'),\n pd.Timedelta('4 days'),\n pd.Timedelta('5 days')],\n 't2': [5, pd.Timedelta('3 hours'),\n df.iloc[:, 1].std(),\n pd.Timedelta('1 hours'),\n pd.Timedelta('2 hours'),\n pd.Timedelta('3 hours'),\n pd.Timedelta('4 hours'),\n pd.Timedelta('5 hours')]},\n index=['count', 'mean', 'std', 'min', '25%',\n '50%', '75%', 'max'])\n\n res = df.describe()\n tm.assert_frame_equal(res, expected)\n\n exp_repr = (\" t1 t2\\n\"\n \"count 5 5\\n\"\n \"mean 3 days 00:00:00 0 days 03:00:00\\n\"\n \"std 1 days 13:56:50.394919 0 days 01:34:52.099788\\n\"\n \"min 1 days 00:00:00 0 days 01:00:00\\n\"\n \"25% 2 days 00:00:00 0 days 02:00:00\\n\"\n \"50% 3 days 00:00:00 0 days 03:00:00\\n\"\n \"75% 4 days 00:00:00 0 days 04:00:00\\n\"\n \"max 5 days 00:00:00 0 days 05:00:00\")\n assert repr(res) == exp_repr\n\n def test_describe_tz_values(self, tz_naive_fixture):\n # GH 21332\n tz = tz_naive_fixture\n s1 = Series(range(5))\n start = Timestamp(2018, 1, 1)\n end = Timestamp(2018, 1, 5)\n s2 = Series(date_range(start, end, tz=tz))\n df = pd.DataFrame({'s1': s1, 's2': s2})\n\n expected = DataFrame({'s1': [5, np.nan, np.nan, np.nan, np.nan, np.nan,\n 2, 1.581139, 0, 1, 2, 3, 4],\n 's2': [5, 5, s2.value_counts().index[0], 1,\n start.tz_localize(tz),\n end.tz_localize(tz), np.nan, np.nan,\n np.nan, np.nan, np.nan, np.nan, np.nan]},\n index=['count', 'unique', 'top', 'freq', 'first',\n 'last', 'mean', 'std', 'min', '25%', '50%',\n '75%', 'max']\n )\n res = df.describe(include='all')\n tm.assert_frame_equal(res, expected)\n\n def test_reduce_mixed_frame(self):\n # GH 6806\n df = DataFrame({\n 'bool_data': [True, True, False, False, False],\n 'int_data': [10, 20, 30, 40, 50],\n 'string_data': ['a', 'b', 'c', 'd', 'e'],\n })\n df.reindex(columns=['bool_data', 'int_data', 'string_data'])\n test = df.sum(axis=0)\n tm.assert_numpy_array_equal(test.values,\n np.array([2, 150, 'abcde'], dtype=object))\n tm.assert_series_equal(test, df.T.sum(axis=1))\n\n def test_count(self):\n f = lambda s: notna(s).sum()\n self._check_stat_op('count', f,\n has_skipna=False,\n has_numeric_only=True,\n check_dtype=False,\n check_dates=True)\n\n # corner case\n frame = DataFrame()\n ct1 = frame.count(1)\n assert isinstance(ct1, Series)\n\n ct2 = frame.count(0)\n assert isinstance(ct2, Series)\n\n # GH #423\n df = DataFrame(index=lrange(10))\n result = df.count(1)\n expected = Series(0, index=df.index)\n tm.assert_series_equal(result, expected)\n\n df = DataFrame(columns=lrange(10))\n result = df.count(0)\n expected = Series(0, index=df.columns)\n tm.assert_series_equal(result, expected)\n\n df = DataFrame()\n result = df.count()\n expected = Series(0, index=[])\n tm.assert_series_equal(result, expected)\n\n def test_nunique(self):\n f = lambda s: len(algorithms.unique1d(s.dropna()))\n self._check_stat_op('nunique', f, has_skipna=False,\n check_dtype=False, check_dates=True)\n\n df = DataFrame({'A': [1, 1, 1],\n 'B': [1, 2, 3],\n 'C': [1, np.nan, 3]})\n tm.assert_series_equal(df.nunique(), Series({'A': 1, 'B': 3, 'C': 2}))\n tm.assert_series_equal(df.nunique(dropna=False),\n Series({'A': 1, 'B': 3, 'C': 3}))\n tm.assert_series_equal(df.nunique(axis=1), Series({0: 1, 1: 2, 2: 2}))\n tm.assert_series_equal(df.nunique(axis=1, dropna=False),\n Series({0: 1, 1: 3, 2: 2}))\n\n def test_sum(self):\n self._check_stat_op('sum', np.sum, has_numeric_only=True,\n skipna_alternative=np.nansum)\n\n # mixed types (with upcasting happening)\n self._check_stat_op('sum', np.sum,\n frame=self.mixed_float.astype('float32'),\n has_numeric_only=True, check_dtype=False,\n check_less_precise=True)\n\n @pytest.mark.parametrize(\n \"method\", ['sum', 'mean', 'prod', 'var',\n 'std', 'skew', 'min', 'max'])\n def test_stat_operators_attempt_obj_array(self, method):\n # GH #676\n data = {\n 'a': [-0.00049987540199591344, -0.0016467257772919831,\n 0.00067695870775883013],\n 'b': [-0, -0, 0.0],\n 'c': [0.00031111847529610595, 0.0014902627951905339,\n -0.00094099200035979691]\n }\n df1 = DataFrame(data, index=['foo', 'bar', 'baz'],\n dtype='O')\n\n df2 = DataFrame({0: [np.nan, 2], 1: [np.nan, 3],\n 2: [np.nan, 4]}, dtype=object)\n\n for df in [df1, df2]:\n assert df.values.dtype == np.object_\n result = getattr(df, method)(1)\n expected = getattr(df.astype('f8'), method)(1)\n\n if method in ['sum', 'prod']:\n tm.assert_series_equal(result, expected)\n\n def test_mean(self):\n self._check_stat_op('mean', np.mean, check_dates=True)\n\n def test_product(self):\n self._check_stat_op('product', np.prod)\n\n def test_median(self):\n def wrapper(x):\n if isna(x).any():\n return np.nan\n return np.median(x)\n\n self._check_stat_op('median', wrapper, check_dates=True)\n\n def test_min(self):\n with warnings.catch_warnings(record=True):\n self._check_stat_op('min', np.min, check_dates=True)\n self._check_stat_op('min', np.min, frame=self.intframe)\n\n def test_cummin(self):\n self.tsframe.loc[5:10, 0] = nan\n self.tsframe.loc[10:15, 1] = nan\n self.tsframe.loc[15:, 2] = nan\n\n # axis = 0\n cummin = self.tsframe.cummin()\n expected = self.tsframe.apply(Series.cummin)\n tm.assert_frame_equal(cummin, expected)\n\n # axis = 1\n cummin = self.tsframe.cummin(axis=1)\n expected = self.tsframe.apply(Series.cummin, axis=1)\n tm.assert_frame_equal(cummin, expected)\n\n # it works\n df = DataFrame({'A': np.arange(20)}, index=np.arange(20))\n result = df.cummin() # noqa\n\n # fix issue\n cummin_xs = self.tsframe.cummin(axis=1)\n assert np.shape(cummin_xs) == np.shape(self.tsframe)\n\n def test_cummax(self):\n self.tsframe.loc[5:10, 0] = nan\n self.tsframe.loc[10:15, 1] = nan\n self.tsframe.loc[15:, 2] = nan\n\n # axis = 0\n cummax = self.tsframe.cummax()\n expected = self.tsframe.apply(Series.cummax)\n tm.assert_frame_equal(cummax, expected)\n\n # axis = 1\n cummax = self.tsframe.cummax(axis=1)\n expected = self.tsframe.apply(Series.cummax, axis=1)\n tm.assert_frame_equal(cummax, expected)\n\n # it works\n df = DataFrame({'A': np.arange(20)}, index=np.arange(20))\n result = df.cummax() # noqa\n\n # fix issue\n cummax_xs = self.tsframe.cummax(axis=1)\n assert np.shape(cummax_xs) == np.shape(self.tsframe)\n\n def test_max(self):\n with warnings.catch_warnings(record=True):\n self._check_stat_op('max', np.max, check_dates=True)\n self._check_stat_op('max', np.max, frame=self.intframe)\n\n def test_mad(self):\n f = lambda x: np.abs(x - x.mean()).mean()\n self._check_stat_op('mad', f)\n\n def test_var_std(self):\n alt = lambda x: np.var(x, ddof=1)\n self._check_stat_op('var', alt)\n\n alt = lambda x: np.std(x, ddof=1)\n self._check_stat_op('std', alt)\n\n result = self.tsframe.std(ddof=4)\n expected = self.tsframe.apply(lambda x: x.std(ddof=4))\n tm.assert_almost_equal(result, expected)\n\n result = self.tsframe.var(ddof=4)\n expected = self.tsframe.apply(lambda x: x.var(ddof=4))\n tm.assert_almost_equal(result, expected)\n\n arr = np.repeat(np.random.random((1, 1000)), 1000, 0)\n result = nanops.nanvar(arr, axis=0)\n assert not (result < 0).any()\n\n with pd.option_context('use_bottleneck', False):\n result = nanops.nanvar(arr, axis=0)\n assert not (result < 0).any()\n\n @pytest.mark.parametrize(\n \"meth\", ['sem', 'var', 'std'])\n def test_numeric_only_flag(self, meth):\n # GH #9201\n df1 = DataFrame(np.random.randn(5, 3), columns=['foo', 'bar', 'baz'])\n # set one entry to a number in str format\n df1.loc[0, 'foo'] = '100'\n\n df2 = DataFrame(np.random.randn(5, 3), columns=['foo', 'bar', 'baz'])\n # set one entry to a non-number str\n df2.loc[0, 'foo'] = 'a'\n\n result = getattr(df1, meth)(axis=1, numeric_only=True)\n expected = getattr(df1[['bar', 'baz']], meth)(axis=1)\n tm.assert_series_equal(expected, result)\n\n result = getattr(df2, meth)(axis=1, numeric_only=True)\n expected = getattr(df2[['bar', 'baz']], meth)(axis=1)\n tm.assert_series_equal(expected, result)\n\n # df1 has all numbers, df2 has a letter inside\n pytest.raises(TypeError, lambda: getattr(df1, meth)(\n axis=1, numeric_only=False))\n pytest.raises(TypeError, lambda: getattr(df2, meth)(\n axis=1, numeric_only=False))\n\n @pytest.mark.parametrize('op', ['mean', 'std', 'var',\n 'skew', 'kurt', 'sem'])\n def test_mixed_ops(self, op):\n # GH 16116\n df = DataFrame({'int': [1, 2, 3, 4],\n 'float': [1., 2., 3., 4.],\n 'str': ['a', 'b', 'c', 'd']})\n\n result = getattr(df, op)()\n assert len(result) == 2\n\n with pd.option_context('use_bottleneck', False):\n result = getattr(df, op)()\n assert len(result) == 2\n\n def test_cumsum(self):\n self.tsframe.loc[5:10, 0] = nan\n self.tsframe.loc[10:15, 1] = nan\n self.tsframe.loc[15:, 2] = nan\n\n # axis = 0\n cumsum = self.tsframe.cumsum()\n expected = self.tsframe.apply(Series.cumsum)\n tm.assert_frame_equal(cumsum, expected)\n\n # axis = 1\n cumsum = self.tsframe.cumsum(axis=1)\n expected = self.tsframe.apply(Series.cumsum, axis=1)\n tm.assert_frame_equal(cumsum, expected)\n\n # works\n df = DataFrame({'A': np.arange(20)}, index=np.arange(20))\n result = df.cumsum() # noqa\n\n # fix issue\n cumsum_xs = self.tsframe.cumsum(axis=1)\n assert np.shape(cumsum_xs) == np.shape(self.tsframe)\n\n def test_cumprod(self):\n self.tsframe.loc[5:10, 0] = nan\n self.tsframe.loc[10:15, 1] = nan\n self.tsframe.loc[15:, 2] = nan\n\n # axis = 0\n cumprod = self.tsframe.cumprod()\n expected = self.tsframe.apply(Series.cumprod)\n tm.assert_frame_equal(cumprod, expected)\n\n # axis = 1\n cumprod = self.tsframe.cumprod(axis=1)\n expected = self.tsframe.apply(Series.cumprod, axis=1)\n tm.assert_frame_equal(cumprod, expected)\n\n # fix issue\n cumprod_xs = self.tsframe.cumprod(axis=1)\n assert np.shape(cumprod_xs) == np.shape(self.tsframe)\n\n # ints\n df = self.tsframe.fillna(0).astype(int)\n df.cumprod(0)\n df.cumprod(1)\n\n # ints32\n df = self.tsframe.fillna(0).astype(np.int32)\n df.cumprod(0)\n df.cumprod(1)\n\n def test_sem(self):\n alt = lambda x: np.std(x, ddof=1) / np.sqrt(len(x))\n self._check_stat_op('sem', alt)\n\n result = self.tsframe.sem(ddof=4)\n expected = self.tsframe.apply(\n lambda x: x.std(ddof=4) / np.sqrt(len(x)))\n tm.assert_almost_equal(result, expected)\n\n arr = np.repeat(np.random.random((1, 1000)), 1000, 0)\n result = nanops.nansem(arr, axis=0)\n assert not (result < 0).any()\n\n with pd.option_context('use_bottleneck', False):\n result = nanops.nansem(arr, axis=0)\n assert not (result < 0).any()\n\n @td.skip_if_no_scipy\n def test_skew(self):\n from scipy.stats import skew\n\n def alt(x):\n if len(x) < 3:\n return np.nan\n return skew(x, bias=False)\n\n self._check_stat_op('skew', alt)\n\n @td.skip_if_no_scipy\n def test_kurt(self):\n from scipy.stats import kurtosis\n\n def alt(x):\n if len(x) < 4:\n return np.nan\n return kurtosis(x, bias=False)\n\n self._check_stat_op('kurt', alt)\n\n index = MultiIndex(levels=[['bar'], ['one', 'two', 'three'], [0, 1]],\n labels=[[0, 0, 0, 0, 0, 0],\n [0, 1, 2, 0, 1, 2],\n [0, 1, 0, 1, 0, 1]])\n df = DataFrame(np.random.randn(6, 3), index=index)\n\n kurt = df.kurt()\n kurt2 = df.kurt(level=0).xs('bar')\n tm.assert_series_equal(kurt, kurt2, check_names=False)\n assert kurt.name is None\n assert kurt2.name == 'bar'\n\n def _check_stat_op(self, name, alternative, frame=None, has_skipna=True,\n has_numeric_only=False, check_dtype=True,\n check_dates=False, check_less_precise=False,\n skipna_alternative=None):\n if frame is None:\n frame = self.frame\n # set some NAs\n frame.loc[5:10] = np.nan\n frame.loc[15:20, -2:] = np.nan\n\n f = getattr(frame, name)\n\n if check_dates:\n df = DataFrame({'b': date_range('1/1/2001', periods=2)})\n _f = getattr(df, name)\n result = _f()\n assert isinstance(result, Series)\n\n df['a'] = lrange(len(df))\n result = getattr(df, name)()\n assert isinstance(result, Series)\n assert len(result)\n\n if has_skipna:\n def wrapper(x):\n return alternative(x.values)\n\n skipna_wrapper = tm._make_skipna_wrapper(alternative,\n skipna_alternative)\n result0 = f(axis=0, skipna=False)\n result1 = f(axis=1, skipna=False)\n tm.assert_series_equal(result0, frame.apply(wrapper),\n check_dtype=check_dtype,\n check_less_precise=check_less_precise)\n # HACK: win32\n tm.assert_series_equal(result1, frame.apply(wrapper, axis=1),\n check_dtype=False,\n check_less_precise=check_less_precise)\n else:\n skipna_wrapper = alternative\n wrapper = alternative\n\n result0 = f(axis=0)\n result1 = f(axis=1)\n tm.assert_series_equal(result0, frame.apply(skipna_wrapper),\n check_dtype=check_dtype,\n check_less_precise=check_less_precise)\n if name in ['sum', 'prod']:\n exp = frame.apply(skipna_wrapper, axis=1)\n tm.assert_series_equal(result1, exp, check_dtype=False,\n check_less_precise=check_less_precise)\n\n # check dtypes\n if check_dtype:\n lcd_dtype = frame.values.dtype\n assert lcd_dtype == result0.dtype\n assert lcd_dtype == result1.dtype\n\n # result = f(axis=1)\n # comp = frame.apply(alternative, axis=1).reindex(result.index)\n # assert_series_equal(result, comp)\n\n # bad axis\n tm.assert_raises_regex(ValueError, 'No axis named 2', f, axis=2)\n # make sure works on mixed-type frame\n getattr(self.mixed_frame, name)(axis=0)\n getattr(self.mixed_frame, name)(axis=1)\n\n if has_numeric_only:\n getattr(self.mixed_frame, name)(axis=0, numeric_only=True)\n getattr(self.mixed_frame, name)(axis=1, numeric_only=True)\n getattr(self.frame, name)(axis=0, numeric_only=False)\n getattr(self.frame, name)(axis=1, numeric_only=False)\n\n # all NA case\n if has_skipna:\n all_na = self.frame * np.NaN\n r0 = getattr(all_na, name)(axis=0)\n r1 = getattr(all_na, name)(axis=1)\n if name in ['sum', 'prod']:\n unit = int(name == 'prod')\n expected = pd.Series(unit, index=r0.index, dtype=r0.dtype)\n tm.assert_series_equal(r0, expected)\n expected = pd.Series(unit, index=r1.index, dtype=r1.dtype)\n tm.assert_series_equal(r1, expected)\n\n @pytest.mark.parametrize(\"dropna, expected\", [\n (True, {'A': [12],\n 'B': [10.0],\n 'C': [1.0],\n 'D': ['a'],\n 'E': Categorical(['a'], categories=['a']),\n 'F': to_datetime(['2000-1-2']),\n 'G': to_timedelta(['1 days'])}),\n (False, {'A': [12],\n 'B': [10.0],\n 'C': [np.nan],\n 'D': np.array([np.nan], dtype=object),\n 'E': Categorical([np.nan], categories=['a']),\n 'F': [pd.NaT],\n 'G': to_timedelta([pd.NaT])}),\n (True, {'H': [8, 9, np.nan, np.nan],\n 'I': [8, 9, np.nan, np.nan],\n 'J': [1, np.nan, np.nan, np.nan],\n 'K': Categorical(['a', np.nan, np.nan, np.nan],\n categories=['a']),\n 'L': to_datetime(['2000-1-2', 'NaT', 'NaT', 'NaT']),\n 'M': to_timedelta(['1 days', 'nan', 'nan', 'nan']),\n 'N': [0, 1, 2, 3]}),\n (False, {'H': [8, 9, np.nan, np.nan],\n 'I': [8, 9, np.nan, np.nan],\n 'J': [1, np.nan, np.nan, np.nan],\n 'K': Categorical([np.nan, 'a', np.nan, np.nan],\n categories=['a']),\n 'L': to_datetime(['NaT', '2000-1-2', 'NaT', 'NaT']),\n 'M': to_timedelta(['nan', '1 days', 'nan', 'nan']),\n 'N': [0, 1, 2, 3]})\n ])\n def test_mode_dropna(self, dropna, expected):\n\n df = DataFrame({\"A\": [12, 12, 19, 11],\n \"B\": [10, 10, np.nan, 3],\n \"C\": [1, np.nan, np.nan, np.nan],\n \"D\": [np.nan, np.nan, 'a', np.nan],\n \"E\": Categorical([np.nan, np.nan, 'a', np.nan]),\n \"F\": to_datetime(['NaT', '2000-1-2', 'NaT', 'NaT']),\n \"G\": to_timedelta(['1 days', 'nan', 'nan', 'nan']),\n \"H\": [8, 8, 9, 9],\n \"I\": [9, 9, 8, 8],\n \"J\": [1, 1, np.nan, np.nan],\n \"K\": Categorical(['a', np.nan, 'a', np.nan]),\n \"L\": to_datetime(['2000-1-2', '2000-1-2',\n 'NaT', 'NaT']),\n \"M\": to_timedelta(['1 days', 'nan',\n '1 days', 'nan']),\n \"N\": np.arange(4, dtype='int64')})\n\n result = df[sorted(list(expected.keys()))].mode(dropna=dropna)\n expected = DataFrame(expected)\n tm.assert_frame_equal(result, expected)\n\n @pytest.mark.skipif(not compat.PY3, reason=\"only PY3\")\n def test_mode_sortwarning(self):\n # Check for the warning that is raised when the mode\n # results cannot be sorted\n\n df = DataFrame({\"A\": [np.nan, np.nan, 'a', 'a']})\n expected = DataFrame({'A': ['a', np.nan]})\n\n with tm.assert_produces_warning(UserWarning, check_stacklevel=False):\n result = df.mode(dropna=False)\n result = result.sort_values(by='A').reset_index(drop=True)\n\n tm.assert_frame_equal(result, expected)\n\n def test_operators_timedelta64(self):\n from datetime import timedelta\n df = DataFrame(dict(A=date_range('2012-1-1', periods=3, freq='D'),\n B=date_range('2012-1-2', periods=3, freq='D'),\n C=Timestamp('20120101') -\n timedelta(minutes=5, seconds=5)))\n\n diffs = DataFrame(dict(A=df['A'] - df['C'],\n B=df['A'] - df['B']))\n\n # min\n result = diffs.min()\n assert result[0] == diffs.loc[0, 'A']\n assert result[1] == diffs.loc[0, 'B']\n\n result = diffs.min(axis=1)\n assert (result == diffs.loc[0, 'B']).all()\n\n # max\n result = diffs.max()\n assert result[0] == diffs.loc[2, 'A']\n assert result[1] == diffs.loc[2, 'B']\n\n result = diffs.max(axis=1)\n assert (result == diffs['A']).all()\n\n # abs\n result = diffs.abs()\n result2 = abs(diffs)\n expected = DataFrame(dict(A=df['A'] - df['C'],\n B=df['B'] - df['A']))\n tm.assert_frame_equal(result, expected)\n tm.assert_frame_equal(result2, expected)\n\n # mixed frame\n mixed = diffs.copy()\n mixed['C'] = 'foo'\n mixed['D'] = 1\n mixed['E'] = 1.\n mixed['F'] = Timestamp('20130101')\n\n # results in an object array\n from pandas.core.tools.timedeltas import (\n _coerce_scalar_to_timedelta_type as _coerce)\n\n result = mixed.min()\n expected = Series([_coerce(timedelta(seconds=5 * 60 + 5)),\n _coerce(timedelta(days=-1)),\n 'foo', 1, 1.0,\n Timestamp('20130101')],\n index=mixed.columns)\n tm.assert_series_equal(result, expected)\n\n # excludes numeric\n result = mixed.min(axis=1)\n expected = Series([1, 1, 1.], index=[0, 1, 2])\n tm.assert_series_equal(result, expected)\n\n # works when only those columns are selected\n result = mixed[['A', 'B']].min(1)\n expected = Series([timedelta(days=-1)] * 3)\n tm.assert_series_equal(result, expected)\n\n result = mixed[['A', 'B']].min()\n expected = Series([timedelta(seconds=5 * 60 + 5),\n timedelta(days=-1)], index=['A', 'B'])\n tm.assert_series_equal(result, expected)\n\n # GH 3106\n df = DataFrame({'time': date_range('20130102', periods=5),\n 'time2': date_range('20130105', periods=5)})\n df['off1'] = df['time2'] - df['time']\n assert df['off1'].dtype == 'timedelta64[ns]'\n\n df['off2'] = df['time'] - df['time2']\n df._consolidate_inplace()\n assert df['off1'].dtype == 'timedelta64[ns]'\n assert df['off2'].dtype == 'timedelta64[ns]'\n\n def test_sum_corner(self):\n axis0 = self.empty.sum(0)\n axis1 = self.empty.sum(1)\n assert isinstance(axis0, Series)\n assert isinstance(axis1, Series)\n assert len(axis0) == 0\n assert len(axis1) == 0\n\n @pytest.mark.parametrize('method, unit', [\n ('sum', 0),\n ('prod', 1),\n ])\n def test_sum_prod_nanops(self, method, unit):\n idx = ['a', 'b', 'c']\n df = pd.DataFrame({\"a\": [unit, unit],\n \"b\": [unit, np.nan],\n \"c\": [np.nan, np.nan]})\n # The default\n result = getattr(df, method)\n expected = pd.Series([unit, unit, unit], index=idx, dtype='float64')\n\n # min_count=1\n result = getattr(df, method)(min_count=1)\n expected = pd.Series([unit, unit, np.nan], index=idx)\n tm.assert_series_equal(result, expected)\n\n # min_count=0\n result = getattr(df, method)(min_count=0)\n expected = pd.Series([unit, unit, unit], index=idx, dtype='float64')\n tm.assert_series_equal(result, expected)\n\n result = getattr(df.iloc[1:], method)(min_count=1)\n expected = pd.Series([unit, np.nan, np.nan], index=idx)\n tm.assert_series_equal(result, expected)\n\n # min_count > 1\n df = pd.DataFrame({\"A\": [unit] * 10, \"B\": [unit] * 5 + [np.nan] * 5})\n result = getattr(df, method)(min_count=5)\n expected = pd.Series(result, index=['A', 'B'])\n tm.assert_series_equal(result, expected)\n\n result = getattr(df, method)(min_count=6)\n expected = pd.Series(result, index=['A', 'B'])\n tm.assert_series_equal(result, expected)\n\n def test_sum_nanops_timedelta(self):\n # prod isn't defined on timedeltas\n idx = ['a', 'b', 'c']\n df = pd.DataFrame({\"a\": [0, 0],\n \"b\": [0, np.nan],\n \"c\": [np.nan, np.nan]})\n\n df2 = df.apply(pd.to_timedelta)\n\n # 0 by default\n result = df2.sum()\n expected = pd.Series([0, 0, 0], dtype='m8[ns]', index=idx)\n tm.assert_series_equal(result, expected)\n\n # min_count=0\n result = df2.sum(min_count=0)\n tm.assert_series_equal(result, expected)\n\n # min_count=1\n result = df2.sum(min_count=1)\n expected = pd.Series([0, 0, np.nan], dtype='m8[ns]', index=idx)\n tm.assert_series_equal(result, expected)\n\n def test_sum_object(self):\n values = self.frame.values.astype(int)\n frame = DataFrame(values, index=self.frame.index,\n columns=self.frame.columns)\n deltas = frame * timedelta(1)\n deltas.sum()\n\n def test_sum_bool(self):\n # ensure this works, bug report\n bools = np.isnan(self.frame)\n bools.sum(1)\n bools.sum(0)\n\n def test_mean_corner(self):\n # unit test when have object data\n the_mean = self.mixed_frame.mean(axis=0)\n the_sum = self.mixed_frame.sum(axis=0, numeric_only=True)\n tm.assert_index_equal(the_sum.index, the_mean.index)\n assert len(the_mean.index) < len(self.mixed_frame.columns)\n\n # xs sum mixed type, just want to know it works...\n the_mean = self.mixed_frame.mean(axis=1)\n the_sum = self.mixed_frame.sum(axis=1, numeric_only=True)\n tm.assert_index_equal(the_sum.index, the_mean.index)\n\n # take mean of boolean column\n self.frame['bool'] = self.frame['A'] > 0\n means = self.frame.mean(0)\n assert means['bool'] == self.frame['bool'].values.mean()\n\n def test_stats_mixed_type(self):\n # don't blow up\n self.mixed_frame.std(1)\n self.mixed_frame.var(1)\n self.mixed_frame.mean(1)\n self.mixed_frame.skew(1)\n\n def test_median_corner(self):\n def wrapper(x):\n if isna(x).any():\n return np.nan\n return np.median(x)\n\n self._check_stat_op('median', wrapper, frame=self.intframe,\n check_dtype=False, check_dates=True)\n\n # Miscellanea\n\n def test_count_objects(self):\n dm = DataFrame(self.mixed_frame._series)\n df = DataFrame(self.mixed_frame._series)\n\n tm.assert_series_equal(dm.count(), df.count())\n tm.assert_series_equal(dm.count(1), df.count(1))\n\n def test_cumsum_corner(self):\n dm = DataFrame(np.arange(20).reshape(4, 5),\n index=lrange(4), columns=lrange(5))\n # ?(wesm)\n result = dm.cumsum() # noqa\n\n def test_sum_bools(self):\n df = DataFrame(index=lrange(1), columns=lrange(10))\n bools = isna(df)\n assert bools.sum(axis=1)[0] == 10\n\n # Index of max / min\n\n def test_idxmin(self):\n frame = self.frame\n frame.loc[5:10] = np.nan\n frame.loc[15:20, -2:] = np.nan\n for skipna in [True, False]:\n for axis in [0, 1]:\n for df in [frame, self.intframe]:\n result = df.idxmin(axis=axis, skipna=skipna)\n expected = df.apply(Series.idxmin, axis=axis,\n skipna=skipna)\n tm.assert_series_equal(result, expected)\n\n pytest.raises(ValueError, frame.idxmin, axis=2)\n\n def test_idxmax(self):\n frame = self.frame\n frame.loc[5:10] = np.nan\n frame.loc[15:20, -2:] = np.nan\n for skipna in [True, False]:\n for axis in [0, 1]:\n for df in [frame, self.intframe]:\n result = df.idxmax(axis=axis, skipna=skipna)\n expected = df.apply(Series.idxmax, axis=axis,\n skipna=skipna)\n tm.assert_series_equal(result, expected)\n\n pytest.raises(ValueError, frame.idxmax, axis=2)\n\n # ----------------------------------------------------------------------\n # Logical reductions\n\n def test_any_all(self):\n self._check_bool_op('any', np.any, has_skipna=True, has_bool_only=True)\n self._check_bool_op('all', np.all, has_skipna=True, has_bool_only=True)\n\n def test_any_all_extra(self):\n df = DataFrame({\n 'A': [True, False, False],\n 'B': [True, True, False],\n 'C': [True, True, True],\n }, index=['a', 'b', 'c'])\n result = df[['A', 'B']].any(1)\n expected = Series([True, True, False], index=['a', 'b', 'c'])\n tm.assert_series_equal(result, expected)\n\n result = df[['A', 'B']].any(1, bool_only=True)\n tm.assert_series_equal(result, expected)\n\n result = df.all(1)\n expected = Series([True, False, False], index=['a', 'b', 'c'])\n tm.assert_series_equal(result, expected)\n\n result = df.all(1, bool_only=True)\n tm.assert_series_equal(result, expected)\n\n # Axis is None\n result = df.all(axis=None).item()\n assert result is False\n\n result = df.any(axis=None).item()\n assert result is True\n\n result = df[['C']].all(axis=None).item()\n assert result is True\n\n # skip pathological failure cases\n # class CantNonzero(object):\n\n # def __nonzero__(self):\n # raise ValueError\n\n # df[4] = CantNonzero()\n\n # it works!\n # df.any(1)\n # df.all(1)\n # df.any(1, bool_only=True)\n # df.all(1, bool_only=True)\n\n # df[4][4] = np.nan\n # df.any(1)\n # df.all(1)\n # df.any(1, bool_only=True)\n # df.all(1, bool_only=True)\n\n @pytest.mark.parametrize('func, data, expected', [\n (np.any, {}, False),\n (np.all, {}, True),\n (np.any, {'A': []}, False),\n (np.all, {'A': []}, True),\n (np.any, {'A': [False, False]}, False),\n (np.all, {'A': [False, False]}, False),\n (np.any, {'A': [True, False]}, True),\n (np.all, {'A': [True, False]}, False),\n (np.any, {'A': [True, True]}, True),\n (np.all, {'A': [True, True]}, True),\n\n (np.any, {'A': [False], 'B': [False]}, False),\n (np.all, {'A': [False], 'B': [False]}, False),\n\n (np.any, {'A': [False, False], 'B': [False, True]}, True),\n (np.all, {'A': [False, False], 'B': [False, True]}, False),\n\n # other types\n (np.all, {'A': pd.Series([0.0, 1.0], dtype='float')}, False),\n (np.any, {'A': pd.Series([0.0, 1.0], dtype='float')}, True),\n (np.all, {'A': pd.Series([0, 1], dtype=int)}, False),\n (np.any, {'A': pd.Series([0, 1], dtype=int)}, True),\n pytest.param(np.all, {'A': pd.Series([0, 1], dtype='M8[ns]')}, False,\n marks=[td.skip_if_np_lt_115]),\n pytest.param(np.any, {'A': pd.Series([0, 1], dtype='M8[ns]')}, True,\n marks=[td.skip_if_np_lt_115]),\n pytest.param(np.all, {'A': pd.Series([1, 2], dtype='M8[ns]')}, True,\n marks=[td.skip_if_np_lt_115]),\n pytest.param(np.any, {'A': pd.Series([1, 2], dtype='M8[ns]')}, True,\n marks=[td.skip_if_np_lt_115]),\n pytest.param(np.all, {'A': pd.Series([0, 1], dtype='m8[ns]')}, False,\n marks=[td.skip_if_np_lt_115]),\n pytest.param(np.any, {'A': pd.Series([0, 1], dtype='m8[ns]')}, True,\n marks=[td.skip_if_np_lt_115]),\n pytest.param(np.all, {'A': pd.Series([1, 2], dtype='m8[ns]')}, True,\n marks=[td.skip_if_np_lt_115]),\n pytest.param(np.any, {'A': pd.Series([1, 2], dtype='m8[ns]')}, True,\n marks=[td.skip_if_np_lt_115]),\n (np.all, {'A': pd.Series([0, 1], dtype='category')}, False),\n (np.any, {'A': pd.Series([0, 1], dtype='category')}, True),\n (np.all, {'A': pd.Series([1, 2], dtype='category')}, True),\n (np.any, {'A': pd.Series([1, 2], dtype='category')}, True),\n\n # # Mix\n # GH-21484\n # (np.all, {'A': pd.Series([10, 20], dtype='M8[ns]'),\n # 'B': pd.Series([10, 20], dtype='m8[ns]')}, True),\n ])\n def test_any_all_np_func(self, func, data, expected):\n # https://github.com/pandas-dev/pandas/issues/19976\n data = DataFrame(data)\n result = func(data)\n assert isinstance(result, np.bool_)\n assert result.item() is expected\n\n # method version\n result = getattr(DataFrame(data), func.__name__)(axis=None)\n assert isinstance(result, np.bool_)\n assert result.item() is expected\n\n def test_any_all_object(self):\n # https://github.com/pandas-dev/pandas/issues/19976\n result = np.all(DataFrame(columns=['a', 'b'])).item()\n assert result is True\n\n result = np.any(DataFrame(columns=['a', 'b'])).item()\n assert result is False\n\n @pytest.mark.parametrize('method', ['any', 'all'])\n def test_any_all_level_axis_none_raises(self, method):\n df = DataFrame(\n {\"A\": 1},\n index=MultiIndex.from_product([['A', 'B'], ['a', 'b']],\n names=['out', 'in'])\n )\n xpr = \"Must specify 'axis' when aggregating by level.\"\n with tm.assert_raises_regex(ValueError, xpr):\n getattr(df, method)(axis=None, level='out')\n\n def _check_bool_op(self, name, alternative, frame=None, has_skipna=True,\n has_bool_only=False):\n if frame is None:\n frame = self.frame > 0\n # set some NAs\n frame = DataFrame(frame.values.astype(object), frame.index,\n frame.columns)\n frame.loc[5:10] = np.nan\n frame.loc[15:20, -2:] = np.nan\n\n f = getattr(frame, name)\n\n if has_skipna:\n def skipna_wrapper(x):\n nona = x.dropna().values\n return alternative(nona)\n\n def wrapper(x):\n return alternative(x.values)\n\n result0 = f(axis=0, skipna=False)\n result1 = f(axis=1, skipna=False)\n tm.assert_series_equal(result0, frame.apply(wrapper))\n tm.assert_series_equal(result1, frame.apply(wrapper, axis=1),\n check_dtype=False) # HACK: win32\n else:\n skipna_wrapper = alternative\n wrapper = alternative\n\n result0 = f(axis=0)\n result1 = f(axis=1)\n tm.assert_series_equal(result0, frame.apply(skipna_wrapper))\n tm.assert_series_equal(result1, frame.apply(skipna_wrapper, axis=1),\n check_dtype=False)\n\n # result = f(axis=1)\n # comp = frame.apply(alternative, axis=1).reindex(result.index)\n # assert_series_equal(result, comp)\n\n # bad axis\n pytest.raises(ValueError, f, axis=2)\n\n # make sure works on mixed-type frame\n mixed = self.mixed_frame\n mixed['_bool_'] = np.random.randn(len(mixed)) > 0\n getattr(mixed, name)(axis=0)\n getattr(mixed, name)(axis=1)\n\n class NonzeroFail(object):\n\n def __nonzero__(self):\n raise ValueError\n\n mixed['_nonzero_fail_'] = NonzeroFail()\n\n if has_bool_only:\n getattr(mixed, name)(axis=0, bool_only=True)\n getattr(mixed, name)(axis=1, bool_only=True)\n getattr(frame, name)(axis=0, bool_only=False)\n getattr(frame, name)(axis=1, bool_only=False)\n\n # all NA case\n if has_skipna:\n all_na = frame * np.NaN\n r0 = getattr(all_na, name)(axis=0)\n r1 = getattr(all_na, name)(axis=1)\n if name == 'any':\n assert not r0.any()\n assert not r1.any()\n else:\n assert r0.all()\n assert r1.all()\n\n # ----------------------------------------------------------------------\n # Isin\n\n def test_isin(self):\n # GH #4211\n df = DataFrame({'vals': [1, 2, 3, 4], 'ids': ['a', 'b', 'f', 'n'],\n 'ids2': ['a', 'n', 'c', 'n']},\n index=['foo', 'bar', 'baz', 'qux'])\n other = ['a', 'b', 'c']\n\n result = df.isin(other)\n expected = DataFrame([df.loc[s].isin(other) for s in df.index])\n tm.assert_frame_equal(result, expected)\n\n @pytest.mark.parametrize(\"empty\", [[], Series(), np.array([])])\n def test_isin_empty(self, empty):\n # see gh-16991\n df = DataFrame({'A': ['a', 'b', 'c'], 'B': ['a', 'e', 'f']})\n expected = DataFrame(False, df.index, df.columns)\n\n result = df.isin(empty)\n tm.assert_frame_equal(result, expected)\n\n def test_isin_dict(self):\n df = DataFrame({'A': ['a', 'b', 'c'], 'B': ['a', 'e', 'f']})\n d = {'A': ['a']}\n\n expected = DataFrame(False, df.index, df.columns)\n expected.loc[0, 'A'] = True\n\n result = df.isin(d)\n tm.assert_frame_equal(result, expected)\n\n # non unique columns\n df = DataFrame({'A': ['a', 'b', 'c'], 'B': ['a', 'e', 'f']})\n df.columns = ['A', 'A']\n expected = DataFrame(False, df.index, df.columns)\n expected.loc[0, 'A'] = True\n result = df.isin(d)\n tm.assert_frame_equal(result, expected)\n\n def test_isin_with_string_scalar(self):\n # GH4763\n df = DataFrame({'vals': [1, 2, 3, 4], 'ids': ['a', 'b', 'f', 'n'],\n 'ids2': ['a', 'n', 'c', 'n']},\n index=['foo', 'bar', 'baz', 'qux'])\n with pytest.raises(TypeError):\n df.isin('a')\n\n with pytest.raises(TypeError):\n df.isin('aaa')\n\n def test_isin_df(self):\n df1 = DataFrame({'A': [1, 2, 3, 4], 'B': [2, np.nan, 4, 4]})\n df2 = DataFrame({'A': [0, 2, 12, 4], 'B': [2, np.nan, 4, 5]})\n expected = DataFrame(False, df1.index, df1.columns)\n result = df1.isin(df2)\n expected['A'].loc[[1, 3]] = True\n expected['B'].loc[[0, 2]] = True\n tm.assert_frame_equal(result, expected)\n\n # partial overlapping columns\n df2.columns = ['A', 'C']\n result = df1.isin(df2)\n expected['B'] = False\n tm.assert_frame_equal(result, expected)\n\n def test_isin_tuples(self):\n # GH16394\n df = pd.DataFrame({'A': [1, 2, 3], 'B': ['a', 'b', 'f']})\n df['C'] = list(zip(df['A'], df['B']))\n result = df['C'].isin([(1, 'a')])\n tm.assert_series_equal(result,\n Series([True, False, False], name=\"C\"))\n\n def test_isin_df_dupe_values(self):\n df1 = DataFrame({'A': [1, 2, 3, 4], 'B': [2, np.nan, 4, 4]})\n # just cols duped\n df2 = DataFrame([[0, 2], [12, 4], [2, np.nan], [4, 5]],\n columns=['B', 'B'])\n with pytest.raises(ValueError):\n df1.isin(df2)\n\n # just index duped\n df2 = DataFrame([[0, 2], [12, 4], [2, np.nan], [4, 5]],\n columns=['A', 'B'], index=[0, 0, 1, 1])\n with pytest.raises(ValueError):\n df1.isin(df2)\n\n # cols and index:\n df2.columns = ['B', 'B']\n with pytest.raises(ValueError):\n df1.isin(df2)\n\n def test_isin_dupe_self(self):\n other = DataFrame({'A': [1, 0, 1, 0], 'B': [1, 1, 0, 0]})\n df = DataFrame([[1, 1], [1, 0], [0, 0]], columns=['A', 'A'])\n result = df.isin(other)\n expected = DataFrame(False, index=df.index, columns=df.columns)\n expected.loc[0] = True\n expected.iloc[1, 1] = True\n tm.assert_frame_equal(result, expected)\n\n def test_isin_against_series(self):\n df = pd.DataFrame({'A': [1, 2, 3, 4], 'B': [2, np.nan, 4, 4]},\n index=['a', 'b', 'c', 'd'])\n s = pd.Series([1, 3, 11, 4], index=['a', 'b', 'c', 'd'])\n expected = DataFrame(False, index=df.index, columns=df.columns)\n expected['A'].loc['a'] = True\n expected.loc['d'] = True\n result = df.isin(s)\n tm.assert_frame_equal(result, expected)\n\n def test_isin_multiIndex(self):\n idx = MultiIndex.from_tuples([(0, 'a', 'foo'), (0, 'a', 'bar'),\n (0, 'b', 'bar'), (0, 'b', 'baz'),\n (2, 'a', 'foo'), (2, 'a', 'bar'),\n (2, 'c', 'bar'), (2, 'c', 'baz'),\n (1, 'b', 'foo'), (1, 'b', 'bar'),\n (1, 'c', 'bar'), (1, 'c', 'baz')])\n df1 = DataFrame({'A': np.ones(12),\n 'B': np.zeros(12)}, index=idx)\n df2 = DataFrame({'A': [1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1],\n 'B': [1, 1, 0, 1, 1, 0, 0, 1, 1, 1, 1, 1]})\n # against regular index\n expected = DataFrame(False, index=df1.index, columns=df1.columns)\n result = df1.isin(df2)\n tm.assert_frame_equal(result, expected)\n\n df2.index = idx\n expected = df2.values.astype(np.bool)\n expected[:, 1] = ~expected[:, 1]\n expected = DataFrame(expected, columns=['A', 'B'], index=idx)\n\n result = df1.isin(df2)\n tm.assert_frame_equal(result, expected)\n\n def test_isin_empty_datetimelike(self):\n # GH 15473\n df1_ts = DataFrame({'date':\n pd.to_datetime(['2014-01-01', '2014-01-02'])})\n df1_td = DataFrame({'date':\n [pd.Timedelta(1, 's'), pd.Timedelta(2, 's')]})\n df2 = DataFrame({'date': []})\n df3 = DataFrame()\n\n expected = DataFrame({'date': [False, False]})\n\n result = df1_ts.isin(df2)\n tm.assert_frame_equal(result, expected)\n result = df1_ts.isin(df3)\n tm.assert_frame_equal(result, expected)\n\n result = df1_td.isin(df2)\n tm.assert_frame_equal(result, expected)\n result = df1_td.isin(df3)\n tm.assert_frame_equal(result, expected)\n\n # Rounding\n def test_round(self):\n # GH 2665\n\n # Test that rounding an empty DataFrame does nothing\n df = DataFrame()\n tm.assert_frame_equal(df, df.round())\n\n # Here's the test frame we'll be working with\n df = DataFrame({'col1': [1.123, 2.123, 3.123],\n 'col2': [1.234, 2.234, 3.234]})\n\n # Default round to integer (i.e. decimals=0)\n expected_rounded = DataFrame(\n {'col1': [1., 2., 3.], 'col2': [1., 2., 3.]})\n tm.assert_frame_equal(df.round(), expected_rounded)\n\n # Round with an integer\n decimals = 2\n expected_rounded = DataFrame({'col1': [1.12, 2.12, 3.12],\n 'col2': [1.23, 2.23, 3.23]})\n tm.assert_frame_equal(df.round(decimals), expected_rounded)\n\n # This should also work with np.round (since np.round dispatches to\n # df.round)\n tm.assert_frame_equal(np.round(df, decimals), expected_rounded)\n\n # Round with a list\n round_list = [1, 2]\n with pytest.raises(TypeError):\n df.round(round_list)\n\n # Round with a dictionary\n expected_rounded = DataFrame(\n {'col1': [1.1, 2.1, 3.1], 'col2': [1.23, 2.23, 3.23]})\n round_dict = {'col1': 1, 'col2': 2}\n tm.assert_frame_equal(df.round(round_dict), expected_rounded)\n\n # Incomplete dict\n expected_partially_rounded = DataFrame(\n {'col1': [1.123, 2.123, 3.123], 'col2': [1.2, 2.2, 3.2]})\n partial_round_dict = {'col2': 1}\n tm.assert_frame_equal(df.round(partial_round_dict),\n expected_partially_rounded)\n\n # Dict with unknown elements\n wrong_round_dict = {'col3': 2, 'col2': 1}\n tm.assert_frame_equal(df.round(wrong_round_dict),\n expected_partially_rounded)\n\n # float input to `decimals`\n non_int_round_dict = {'col1': 1, 'col2': 0.5}\n with pytest.raises(TypeError):\n df.round(non_int_round_dict)\n\n # String input\n non_int_round_dict = {'col1': 1, 'col2': 'foo'}\n with pytest.raises(TypeError):\n df.round(non_int_round_dict)\n\n non_int_round_Series = Series(non_int_round_dict)\n with pytest.raises(TypeError):\n df.round(non_int_round_Series)\n\n # List input\n non_int_round_dict = {'col1': 1, 'col2': [1, 2]}\n with pytest.raises(TypeError):\n df.round(non_int_round_dict)\n\n non_int_round_Series = Series(non_int_round_dict)\n with pytest.raises(TypeError):\n df.round(non_int_round_Series)\n\n # Non integer Series inputs\n non_int_round_Series = Series(non_int_round_dict)\n with pytest.raises(TypeError):\n df.round(non_int_round_Series)\n\n non_int_round_Series = Series(non_int_round_dict)\n with pytest.raises(TypeError):\n df.round(non_int_round_Series)\n\n # Negative numbers\n negative_round_dict = {'col1': -1, 'col2': -2}\n big_df = df * 100\n expected_neg_rounded = DataFrame(\n {'col1': [110., 210, 310], 'col2': [100., 200, 300]})\n tm.assert_frame_equal(big_df.round(negative_round_dict),\n expected_neg_rounded)\n\n # nan in Series round\n nan_round_Series = Series({'col1': nan, 'col2': 1})\n\n # TODO(wesm): unused?\n expected_nan_round = DataFrame({ # noqa\n 'col1': [1.123, 2.123, 3.123],\n 'col2': [1.2, 2.2, 3.2]})\n\n with pytest.raises(TypeError):\n df.round(nan_round_Series)\n\n # Make sure this doesn't break existing Series.round\n tm.assert_series_equal(df['col1'].round(1), expected_rounded['col1'])\n\n # named columns\n # GH 11986\n decimals = 2\n expected_rounded = DataFrame(\n {'col1': [1.12, 2.12, 3.12], 'col2': [1.23, 2.23, 3.23]})\n df.columns.name = \"cols\"\n expected_rounded.columns.name = \"cols\"\n tm.assert_frame_equal(df.round(decimals), expected_rounded)\n\n # interaction of named columns & series\n tm.assert_series_equal(df['col1'].round(decimals),\n expected_rounded['col1'])\n tm.assert_series_equal(df.round(decimals)['col1'],\n expected_rounded['col1'])\n\n def test_numpy_round(self):\n # See gh-12600\n df = DataFrame([[1.53, 1.36], [0.06, 7.01]])\n out = np.round(df, decimals=0)\n expected = DataFrame([[2., 1.], [0., 7.]])\n tm.assert_frame_equal(out, expected)\n\n msg = \"the 'out' parameter is not supported\"\n with tm.assert_raises_regex(ValueError, msg):\n np.round(df, decimals=0, out=df)\n\n def test_round_mixed_type(self):\n # GH11885\n df = DataFrame({'col1': [1.1, 2.2, 3.3, 4.4],\n 'col2': ['1', 'a', 'c', 'f'],\n 'col3': date_range('20111111', periods=4)})\n round_0 = DataFrame({'col1': [1., 2., 3., 4.],\n 'col2': ['1', 'a', 'c', 'f'],\n 'col3': date_range('20111111', periods=4)})\n tm.assert_frame_equal(df.round(), round_0)\n tm.assert_frame_equal(df.round(1), df)\n tm.assert_frame_equal(df.round({'col1': 1}), df)\n tm.assert_frame_equal(df.round({'col1': 0}), round_0)\n tm.assert_frame_equal(df.round({'col1': 0, 'col2': 1}), round_0)\n tm.assert_frame_equal(df.round({'col3': 1}), df)\n\n def test_round_issue(self):\n # GH11611\n\n df = pd.DataFrame(np.random.random([3, 3]), columns=['A', 'B', 'C'],\n index=['first', 'second', 'third'])\n\n dfs = pd.concat((df, df), axis=1)\n rounded = dfs.round()\n tm.assert_index_equal(rounded.index, dfs.index)\n\n decimals = pd.Series([1, 0, 2], index=['A', 'B', 'A'])\n pytest.raises(ValueError, df.round, decimals)\n\n def test_built_in_round(self):\n if not compat.PY3:\n pytest.skip(\"build in round cannot be overridden \"\n \"prior to Python 3\")\n\n # GH11763\n # Here's the test frame we'll be working with\n df = DataFrame(\n {'col1': [1.123, 2.123, 3.123], 'col2': [1.234, 2.234, 3.234]})\n\n # Default round to integer (i.e. decimals=0)\n expected_rounded = DataFrame(\n {'col1': [1., 2., 3.], 'col2': [1., 2., 3.]})\n tm.assert_frame_equal(round(df), expected_rounded)\n\n def test_pct_change(self):\n # GH 11150\n pnl = DataFrame([np.arange(0, 40, 10), np.arange(0, 40, 10), np.arange(\n 0, 40, 10)]).astype(np.float64)\n pnl.iat[1, 0] = np.nan\n pnl.iat[1, 1] = np.nan\n pnl.iat[2, 3] = 60\n\n for axis in range(2):\n expected = pnl.ffill(axis=axis) / pnl.ffill(axis=axis).shift(\n axis=axis) - 1\n result = pnl.pct_change(axis=axis, fill_method='pad')\n\n tm.assert_frame_equal(result, expected)\n\n # Clip\n\n def test_clip(self):\n median = self.frame.median().median()\n original = self.frame.copy()\n\n capped = self.frame.clip_upper(median)\n assert not (capped.values > median).any()\n\n floored = self.frame.clip_lower(median)\n assert not (floored.values < median).any()\n\n double = self.frame.clip(upper=median, lower=median)\n assert not (double.values != median).any()\n\n # Verify that self.frame was not changed inplace\n assert (self.frame.values == original.values).all()\n\n def test_inplace_clip(self):\n # GH #15388\n median = self.frame.median().median()\n frame_copy = self.frame.copy()\n\n frame_copy.clip_upper(median, inplace=True)\n assert not (frame_copy.values > median).any()\n frame_copy = self.frame.copy()\n\n frame_copy.clip_lower(median, inplace=True)\n assert not (frame_copy.values < median).any()\n frame_copy = self.frame.copy()\n\n frame_copy.clip(upper=median, lower=median, inplace=True)\n assert not (frame_copy.values != median).any()\n\n def test_dataframe_clip(self):\n # GH #2747\n df = DataFrame(np.random.randn(1000, 2))\n\n for lb, ub in [(-1, 1), (1, -1)]:\n clipped_df = df.clip(lb, ub)\n\n lb, ub = min(lb, ub), max(ub, lb)\n lb_mask = df.values <= lb\n ub_mask = df.values >= ub\n mask = ~lb_mask & ~ub_mask\n assert (clipped_df.values[lb_mask] == lb).all()\n assert (clipped_df.values[ub_mask] == ub).all()\n assert (clipped_df.values[mask] == df.values[mask]).all()\n\n def test_clip_mixed_numeric(self):\n # TODO(jreback)\n # clip on mixed integer or floats\n # with integer clippers coerces to float\n df = DataFrame({'A': [1, 2, 3],\n 'B': [1., np.nan, 3.]})\n result = df.clip(1, 2)\n expected = DataFrame({'A': [1, 2, 2.],\n 'B': [1., np.nan, 2.]})\n tm.assert_frame_equal(result, expected, check_like=True)\n\n @pytest.mark.parametrize(\"inplace\", [True, False])\n def test_clip_against_series(self, inplace):\n # GH #6966\n\n df = DataFrame(np.random.randn(1000, 2))\n lb = Series(np.random.randn(1000))\n ub = lb + 1\n\n original = df.copy()\n clipped_df = df.clip(lb, ub, axis=0, inplace=inplace)\n\n if inplace:\n clipped_df = df\n\n for i in range(2):\n lb_mask = original.iloc[:, i] <= lb\n ub_mask = original.iloc[:, i] >= ub\n mask = ~lb_mask & ~ub_mask\n\n result = clipped_df.loc[lb_mask, i]\n tm.assert_series_equal(result, lb[lb_mask], check_names=False)\n assert result.name == i\n\n result = clipped_df.loc[ub_mask, i]\n tm.assert_series_equal(result, ub[ub_mask], check_names=False)\n assert result.name == i\n\n tm.assert_series_equal(clipped_df.loc[mask, i], df.loc[mask, i])\n\n @pytest.mark.parametrize(\"inplace\", [True, False])\n @pytest.mark.parametrize(\"lower\", [[2, 3, 4], np.asarray([2, 3, 4])])\n @pytest.mark.parametrize(\"axis,res\", [\n (0, [[2., 2., 3.], [4., 5., 6.], [7., 7., 7.]]),\n (1, [[2., 3., 4.], [4., 5., 6.], [5., 6., 7.]])\n ])\n def test_clip_against_list_like(self, inplace, lower, axis, res):\n # GH #15390\n original = self.simple.copy(deep=True)\n\n result = original.clip(lower=lower, upper=[5, 6, 7],\n axis=axis, inplace=inplace)\n\n expected = pd.DataFrame(res,\n columns=original.columns,\n index=original.index)\n if inplace:\n result = original\n tm.assert_frame_equal(result, expected, check_exact=True)\n\n @pytest.mark.parametrize(\"axis\", [0, 1, None])\n def test_clip_against_frame(self, axis):\n df = DataFrame(np.random.randn(1000, 2))\n lb = DataFrame(np.random.randn(1000, 2))\n ub = lb + 1\n\n clipped_df = df.clip(lb, ub, axis=axis)\n\n lb_mask = df <= lb\n ub_mask = df >= ub\n mask = ~lb_mask & ~ub_mask\n\n tm.assert_frame_equal(clipped_df[lb_mask], lb[lb_mask])\n tm.assert_frame_equal(clipped_df[ub_mask], ub[ub_mask])\n tm.assert_frame_equal(clipped_df[mask], df[mask])\n\n def test_clip_with_na_args(self):\n \"\"\"Should process np.nan argument as None \"\"\"\n # GH # 17276\n tm.assert_frame_equal(self.frame.clip(np.nan), self.frame)\n tm.assert_frame_equal(self.frame.clip(upper=np.nan, lower=np.nan),\n self.frame)\n\n # GH #19992\n df = DataFrame({'col_0': [1, 2, 3], 'col_1': [4, 5, 6],\n 'col_2': [7, 8, 9]})\n\n result = df.clip(lower=[4, 5, np.nan], axis=0)\n expected = DataFrame({'col_0': [4, 5, np.nan], 'col_1': [4, 5, np.nan],\n 'col_2': [7, 8, np.nan]})\n tm.assert_frame_equal(result, expected)\n\n result = df.clip(lower=[4, 5, np.nan], axis=1)\n expected = DataFrame({'col_0': [4, 4, 4], 'col_1': [5, 5, 6],\n 'col_2': [np.nan, np.nan, np.nan]})\n tm.assert_frame_equal(result, expected)\n\n # Matrix-like\n def test_dot(self):\n a = DataFrame(np.random.randn(3, 4), index=['a', 'b', 'c'],\n columns=['p', 'q', 'r', 's'])\n b = DataFrame(np.random.randn(4, 2), index=['p', 'q', 'r', 's'],\n columns=['one', 'two'])\n\n result = a.dot(b)\n expected = DataFrame(np.dot(a.values, b.values),\n index=['a', 'b', 'c'],\n columns=['one', 'two'])\n # Check alignment\n b1 = b.reindex(index=reversed(b.index))\n result = a.dot(b)\n tm.assert_frame_equal(result, expected)\n\n # Check series argument\n result = a.dot(b['one'])\n tm.assert_series_equal(result, expected['one'], check_names=False)\n assert result.name is None\n\n result = a.dot(b1['one'])\n tm.assert_series_equal(result, expected['one'], check_names=False)\n assert result.name is None\n\n # can pass correct-length arrays\n row = a.iloc[0].values\n\n result = a.dot(row)\n exp = a.dot(a.iloc[0])\n tm.assert_series_equal(result, exp)\n\n with tm.assert_raises_regex(ValueError,\n 'Dot product shape mismatch'):\n a.dot(row[:-1])\n\n a = np.random.rand(1, 5)\n b = np.random.rand(5, 1)\n A = DataFrame(a)\n\n # TODO(wesm): unused\n B = DataFrame(b) # noqa\n\n # it works\n result = A.dot(b)\n\n # unaligned\n df = DataFrame(randn(3, 4), index=[1, 2, 3], columns=lrange(4))\n df2 = DataFrame(randn(5, 3), index=lrange(5), columns=[1, 2, 3])\n\n with tm.assert_raises_regex(ValueError, 'aligned'):\n df.dot(df2)\n\n @pytest.mark.skipif(not PY35,\n reason='matmul supported for Python>=3.5')\n @pytest.mark.xfail(\n _np_version_under1p12,\n reason=\"unpredictable return types under numpy < 1.12\")\n def test_matmul(self):\n # matmul test is for GH #10259\n a = DataFrame(np.random.randn(3, 4), index=['a', 'b', 'c'],\n columns=['p', 'q', 'r', 's'])\n b = DataFrame(np.random.randn(4, 2), index=['p', 'q', 'r', 's'],\n columns=['one', 'two'])\n\n # DataFrame @ DataFrame\n result = operator.matmul(a, b)\n expected = DataFrame(np.dot(a.values, b.values),\n index=['a', 'b', 'c'],\n columns=['one', 'two'])\n tm.assert_frame_equal(result, expected)\n\n # DataFrame @ Series\n result = operator.matmul(a, b.one)\n expected = Series(np.dot(a.values, b.one.values),\n index=['a', 'b', 'c'])\n tm.assert_series_equal(result, expected)\n\n # np.array @ DataFrame\n result = operator.matmul(a.values, b)\n expected = np.dot(a.values, b.values)\n tm.assert_almost_equal(result, expected)\n\n # nested list @ DataFrame (__rmatmul__)\n result = operator.matmul(a.values.tolist(), b)\n expected = DataFrame(np.dot(a.values, b.values),\n index=['a', 'b', 'c'],\n columns=['one', 'two'])\n tm.assert_almost_equal(result.values, expected.values)\n\n # mixed dtype DataFrame @ DataFrame\n a['q'] = a.q.round().astype(int)\n result = operator.matmul(a, b)\n expected = DataFrame(np.dot(a.values, b.values),\n index=['a', 'b', 'c'],\n columns=['one', 'two'])\n tm.assert_frame_equal(result, expected)\n\n # different dtypes DataFrame @ DataFrame\n a = a.astype(int)\n result = operator.matmul(a, b)\n expected = DataFrame(np.dot(a.values, b.values),\n index=['a', 'b', 'c'],\n columns=['one', 'two'])\n tm.assert_frame_equal(result, expected)\n\n # unaligned\n df = DataFrame(randn(3, 4), index=[1, 2, 3], columns=lrange(4))\n df2 = DataFrame(randn(5, 3), index=lrange(5), columns=[1, 2, 3])\n\n with tm.assert_raises_regex(ValueError, 'aligned'):\n operator.matmul(df, df2)\n\n\[email protected]\ndef df_duplicates():\n return pd.DataFrame({'a': [1, 2, 3, 4, 4],\n 'b': [1, 1, 1, 1, 1],\n 'c': [0, 1, 2, 5, 4]},\n index=[0, 0, 1, 1, 1])\n\n\[email protected]\ndef df_strings():\n return pd.DataFrame({'a': np.random.permutation(10),\n 'b': list(ascii_lowercase[:10]),\n 'c': np.random.permutation(10).astype('float64')})\n\n\[email protected]\ndef df_main_dtypes():\n return pd.DataFrame(\n {'group': [1, 1, 2],\n 'int': [1, 2, 3],\n 'float': [4., 5., 6.],\n 'string': list('abc'),\n 'category_string': pd.Series(list('abc')).astype('category'),\n 'category_int': [7, 8, 9],\n 'datetime': pd.date_range('20130101', periods=3),\n 'datetimetz': pd.date_range('20130101',\n periods=3,\n tz='US/Eastern'),\n 'timedelta': pd.timedelta_range('1 s', periods=3, freq='s')},\n columns=['group', 'int', 'float', 'string',\n 'category_string', 'category_int',\n 'datetime', 'datetimetz',\n 'timedelta'])\n\n\nclass TestNLargestNSmallest(object):\n\n dtype_error_msg_template = (\"Column {column!r} has dtype {dtype}, cannot \"\n \"use method {method!r} with this dtype\")\n\n # ----------------------------------------------------------------------\n # Top / bottom\n @pytest.mark.parametrize('order', [\n ['a'],\n ['c'],\n ['a', 'b'],\n ['a', 'c'],\n ['b', 'a'],\n ['b', 'c'],\n ['a', 'b', 'c'],\n ['c', 'a', 'b'],\n ['c', 'b', 'a'],\n ['b', 'c', 'a'],\n ['b', 'a', 'c'],\n\n # dups!\n ['b', 'c', 'c']])\n @pytest.mark.parametrize('n', range(1, 11))\n def test_n(self, df_strings, nselect_method, n, order):\n # GH10393\n df = df_strings\n if 'b' in order:\n\n error_msg = self.dtype_error_msg_template.format(\n column='b', method=nselect_method, dtype='object')\n with tm.assert_raises_regex(TypeError, error_msg):\n getattr(df, nselect_method)(n, order)\n else:\n ascending = nselect_method == 'nsmallest'\n result = getattr(df, nselect_method)(n, order)\n expected = df.sort_values(order, ascending=ascending).head(n)\n tm.assert_frame_equal(result, expected)\n\n @pytest.mark.parametrize('columns', [\n ('group', 'category_string'), ('group', 'string')])\n def test_n_error(self, df_main_dtypes, nselect_method, columns):\n df = df_main_dtypes\n col = columns[1]\n error_msg = self.dtype_error_msg_template.format(\n column=col, method=nselect_method, dtype=df[col].dtype)\n # escape some characters that may be in the repr\n error_msg = (error_msg.replace('(', '\\\\(').replace(\")\", \"\\\\)\")\n .replace(\"[\", \"\\\\[\").replace(\"]\", \"\\\\]\"))\n with tm.assert_raises_regex(TypeError, error_msg):\n getattr(df, nselect_method)(2, columns)\n\n def test_n_all_dtypes(self, df_main_dtypes):\n df = df_main_dtypes\n df.nsmallest(2, list(set(df) - {'category_string', 'string'}))\n df.nlargest(2, list(set(df) - {'category_string', 'string'}))\n\n def test_n_identical_values(self):\n # GH15297\n df = pd.DataFrame({'a': [1] * 5, 'b': [1, 2, 3, 4, 5]})\n\n result = df.nlargest(3, 'a')\n expected = pd.DataFrame(\n {'a': [1] * 3, 'b': [1, 2, 3]}, index=[0, 1, 2]\n )\n tm.assert_frame_equal(result, expected)\n\n result = df.nsmallest(3, 'a')\n expected = pd.DataFrame({'a': [1] * 3, 'b': [1, 2, 3]})\n tm.assert_frame_equal(result, expected)\n\n @pytest.mark.parametrize('order', [\n ['a', 'b', 'c'],\n ['c', 'b', 'a'],\n ['a'],\n ['b'],\n ['a', 'b'],\n ['c', 'b']])\n @pytest.mark.parametrize('n', range(1, 6))\n def test_n_duplicate_index(self, df_duplicates, n, order):\n # GH 13412\n\n df = df_duplicates\n result = df.nsmallest(n, order)\n expected = df.sort_values(order).head(n)\n tm.assert_frame_equal(result, expected)\n\n result = df.nlargest(n, order)\n expected = df.sort_values(order, ascending=False).head(n)\n tm.assert_frame_equal(result, expected)\n\n def test_duplicate_keep_all_ties(self):\n # see gh-16818\n df = pd.DataFrame({'a': [5, 4, 4, 2, 3, 3, 3, 3],\n 'b': [10, 9, 8, 7, 5, 50, 10, 20]})\n result = df.nlargest(4, 'a', keep='all')\n expected = pd.DataFrame({'a': {0: 5, 1: 4, 2: 4, 4: 3,\n 5: 3, 6: 3, 7: 3},\n 'b': {0: 10, 1: 9, 2: 8, 4: 5,\n 5: 50, 6: 10, 7: 20}})\n tm.assert_frame_equal(result, expected)\n\n result = df.nsmallest(2, 'a', keep='all')\n expected = pd.DataFrame({'a': {3: 2, 4: 3, 5: 3, 6: 3, 7: 3},\n 'b': {3: 7, 4: 5, 5: 50, 6: 10, 7: 20}})\n tm.assert_frame_equal(result, expected)\n\n def test_series_broadcasting(self):\n # smoke test for numpy warnings\n # GH 16378, GH 16306\n df = DataFrame([1.0, 1.0, 1.0])\n df_nan = DataFrame({'A': [np.nan, 2.0, np.nan]})\n s = Series([1, 1, 1])\n s_nan = Series([np.nan, np.nan, 1])\n\n with tm.assert_produces_warning(None):\n df_nan.clip_lower(s, axis=0)\n for op in ['lt', 'le', 'gt', 'ge', 'eq', 'ne']:\n getattr(df, op)(s_nan, axis=0)\n\n def test_series_nat_conversion(self):\n # GH 18521\n # Check rank does not mutate DataFrame\n df = DataFrame(np.random.randn(10, 3), dtype='float64')\n expected = df.copy()\n df.rank()\n result = df\n tm.assert_frame_equal(result, expected)\n",
"import pytest\n\nimport operator\n\nimport pandas as pd\nfrom pandas.core import ops\nfrom .base import BaseExtensionTests\n\n\nclass BaseOpsUtil(BaseExtensionTests):\n\n def get_op_from_name(self, op_name):\n short_opname = op_name.strip('_')\n try:\n op = getattr(operator, short_opname)\n except AttributeError:\n # Assume it is the reverse operator\n rop = getattr(operator, short_opname[1:])\n op = lambda x, y: rop(y, x)\n\n return op\n\n def check_opname(self, s, op_name, other, exc=NotImplementedError):\n op = self.get_op_from_name(op_name)\n\n self._check_op(s, op, other, exc)\n\n def _check_op(self, s, op, other, exc=NotImplementedError):\n if exc is None:\n result = op(s, other)\n expected = s.combine(other, op)\n self.assert_series_equal(result, expected)\n else:\n with pytest.raises(exc):\n op(s, other)\n\n def _check_divmod_op(self, s, op, other, exc=NotImplementedError):\n # divmod has multiple return values, so check separatly\n if exc is None:\n result_div, result_mod = op(s, other)\n if op is divmod:\n expected_div, expected_mod = s // other, s % other\n else:\n expected_div, expected_mod = other // s, other % s\n self.assert_series_equal(result_div, expected_div)\n self.assert_series_equal(result_mod, expected_mod)\n else:\n with pytest.raises(exc):\n divmod(s, other)\n\n\nclass BaseArithmeticOpsTests(BaseOpsUtil):\n \"\"\"Various Series and DataFrame arithmetic ops methods.\"\"\"\n\n def test_arith_series_with_scalar(self, data, all_arithmetic_operators):\n # series & scalar\n op_name = all_arithmetic_operators\n s = pd.Series(data)\n self.check_opname(s, op_name, s.iloc[0], exc=TypeError)\n\n @pytest.mark.xfail(run=False, reason=\"_reduce needs implementation\")\n def test_arith_frame_with_scalar(self, data, all_arithmetic_operators):\n # frame & scalar\n op_name = all_arithmetic_operators\n df = pd.DataFrame({'A': data})\n self.check_opname(df, op_name, data[0], exc=TypeError)\n\n def test_arith_series_with_array(self, data, all_arithmetic_operators):\n # ndarray & other series\n op_name = all_arithmetic_operators\n s = pd.Series(data)\n self.check_opname(s, op_name, [s.iloc[0]] * len(s), exc=TypeError)\n\n def test_divmod(self, data):\n s = pd.Series(data)\n self._check_divmod_op(s, divmod, 1, exc=TypeError)\n self._check_divmod_op(1, ops.rdivmod, s, exc=TypeError)\n\n def test_error(self, data, all_arithmetic_operators):\n # invalid ops\n op_name = all_arithmetic_operators\n with pytest.raises(AttributeError):\n getattr(data, op_name)\n\n\nclass BaseComparisonOpsTests(BaseOpsUtil):\n \"\"\"Various Series and DataFrame comparison ops methods.\"\"\"\n\n def _compare_other(self, s, data, op_name, other):\n op = self.get_op_from_name(op_name)\n if op_name == '__eq__':\n assert getattr(data, op_name)(other) is NotImplemented\n assert not op(s, other).all()\n elif op_name == '__ne__':\n assert getattr(data, op_name)(other) is NotImplemented\n assert op(s, other).all()\n\n else:\n\n # array\n assert getattr(data, op_name)(other) is NotImplemented\n\n # series\n s = pd.Series(data)\n with pytest.raises(TypeError):\n op(s, other)\n\n def test_compare_scalar(self, data, all_compare_operators):\n op_name = all_compare_operators\n s = pd.Series(data)\n self._compare_other(s, data, op_name, 0)\n\n def test_compare_array(self, data, all_compare_operators):\n op_name = all_compare_operators\n s = pd.Series(data)\n other = [0] * len(data)\n self._compare_other(s, data, op_name, other)\n",
"\"\"\"\nArithmetic operations for PandasObjects\n\nThis is not a public API.\n\"\"\"\n# necessary to enforce truediv in Python 2.X\nfrom __future__ import division\nimport datetime\nimport operator\nimport textwrap\nimport warnings\n\nimport numpy as np\nimport pandas as pd\n\nfrom pandas._libs import lib, algos as libalgos, ops as libops\n\nfrom pandas import compat\nfrom pandas.util._decorators import Appender\n\nfrom pandas.compat import bind_method\nimport pandas.core.missing as missing\nimport pandas.core.common as com\n\nfrom pandas.errors import NullFrequencyError\nfrom pandas.core.dtypes.missing import notna, isna\nfrom pandas.core.dtypes.common import (\n needs_i8_conversion,\n is_datetimelike_v_numeric,\n is_integer_dtype, is_categorical_dtype,\n is_object_dtype, is_timedelta64_dtype,\n is_datetime64_dtype, is_datetime64tz_dtype,\n is_bool_dtype,\n is_list_like,\n is_scalar,\n is_extension_array_dtype,\n ensure_object)\nfrom pandas.core.dtypes.cast import (\n maybe_upcast_putmask, find_common_type,\n construct_1d_object_array_from_listlike)\nfrom pandas.core.dtypes.generic import (\n ABCSeries,\n ABCDataFrame, ABCPanel,\n ABCIndex,\n ABCSparseSeries, ABCSparseArray)\n\n\n# -----------------------------------------------------------------------------\n# Ops Wrapping Utilities\n\ndef get_op_result_name(left, right):\n \"\"\"\n Find the appropriate name to pin to an operation result. This result\n should always be either an Index or a Series.\n\n Parameters\n ----------\n left : {Series, Index}\n right : object\n\n Returns\n -------\n name : object\n Usually a string\n \"\"\"\n # `left` is always a pd.Series when called from within ops\n if isinstance(right, (ABCSeries, pd.Index)):\n name = _maybe_match_name(left, right)\n else:\n name = left.name\n return name\n\n\ndef _maybe_match_name(a, b):\n \"\"\"\n Try to find a name to attach to the result of an operation between\n a and b. If only one of these has a `name` attribute, return that\n name. Otherwise return a consensus name if they match of None if\n they have different names.\n\n Parameters\n ----------\n a : object\n b : object\n\n Returns\n -------\n name : str or None\n\n See also\n --------\n pandas.core.common.consensus_name_attr\n \"\"\"\n a_has = hasattr(a, 'name')\n b_has = hasattr(b, 'name')\n if a_has and b_has:\n if a.name == b.name:\n return a.name\n else:\n # TODO: what if they both have np.nan for their names?\n return None\n elif a_has:\n return a.name\n elif b_has:\n return b.name\n return None\n\n\n# -----------------------------------------------------------------------------\n# Reversed Operations not available in the stdlib operator module.\n# Defining these instead of using lambdas allows us to reference them by name.\n\ndef radd(left, right):\n return right + left\n\n\ndef rsub(left, right):\n return right - left\n\n\ndef rmul(left, right):\n return right * left\n\n\ndef rdiv(left, right):\n return right / left\n\n\ndef rtruediv(left, right):\n return right / left\n\n\ndef rfloordiv(left, right):\n return right // left\n\n\ndef rmod(left, right):\n # check if right is a string as % is the string\n # formatting operation; this is a TypeError\n # otherwise perform the op\n if isinstance(right, compat.string_types):\n raise TypeError(\"{typ} cannot perform the operation mod\".format(\n typ=type(left).__name__))\n\n return right % left\n\n\ndef rdivmod(left, right):\n return divmod(right, left)\n\n\ndef rpow(left, right):\n return right ** left\n\n\ndef rand_(left, right):\n return operator.and_(right, left)\n\n\ndef ror_(left, right):\n return operator.or_(right, left)\n\n\ndef rxor(left, right):\n return operator.xor(right, left)\n\n\n# -----------------------------------------------------------------------------\n\ndef make_invalid_op(name):\n \"\"\"\n Return a binary method that always raises a TypeError.\n\n Parameters\n ----------\n name : str\n\n Returns\n -------\n invalid_op : function\n \"\"\"\n def invalid_op(self, other=None):\n raise TypeError(\"cannot perform {name} with this index type: \"\n \"{typ}\".format(name=name, typ=type(self).__name__))\n\n invalid_op.__name__ = name\n return invalid_op\n\n\ndef _gen_eval_kwargs(name):\n \"\"\"\n Find the keyword arguments to pass to numexpr for the given operation.\n\n Parameters\n ----------\n name : str\n\n Returns\n -------\n eval_kwargs : dict\n\n Examples\n --------\n >>> _gen_eval_kwargs(\"__add__\")\n {}\n\n >>> _gen_eval_kwargs(\"rtruediv\")\n {\"reversed\": True, \"truediv\": True}\n \"\"\"\n kwargs = {}\n\n # Series and Panel appear to only pass __add__, __radd__, ...\n # but DataFrame gets both these dunder names _and_ non-dunder names\n # add, radd, ...\n name = name.replace('__', '')\n\n if name.startswith('r'):\n if name not in ['radd', 'rand', 'ror', 'rxor']:\n # Exclude commutative operations\n kwargs['reversed'] = True\n\n if name in ['truediv', 'rtruediv']:\n kwargs['truediv'] = True\n\n if name in ['ne']:\n kwargs['masker'] = True\n\n return kwargs\n\n\ndef _gen_fill_zeros(name):\n \"\"\"\n Find the appropriate fill value to use when filling in undefined values\n in the results of the given operation caused by operating on\n (generally dividing by) zero.\n\n Parameters\n ----------\n name : str\n\n Returns\n -------\n fill_value : {None, np.nan, np.inf}\n \"\"\"\n name = name.strip('__')\n if 'div' in name:\n # truediv, floordiv, div, and reversed variants\n fill_value = np.inf\n elif 'mod' in name:\n # mod, rmod\n fill_value = np.nan\n else:\n fill_value = None\n return fill_value\n\n\ndef _get_frame_op_default_axis(name):\n \"\"\"\n Only DataFrame cares about default_axis, specifically:\n special methods have default_axis=None and flex methods\n have default_axis='columns'.\n\n Parameters\n ----------\n name : str\n\n Returns\n -------\n default_axis: str or None\n \"\"\"\n if name.replace('__r', '__') in ['__and__', '__or__', '__xor__']:\n # bool methods\n return 'columns'\n elif name.startswith('__'):\n # __add__, __mul__, ...\n return None\n else:\n # add, mul, ...\n return 'columns'\n\n\ndef _get_opstr(op, cls):\n \"\"\"\n Find the operation string, if any, to pass to numexpr for this\n operation.\n\n Parameters\n ----------\n op : binary operator\n cls : class\n\n Returns\n -------\n op_str : string or None\n \"\"\"\n # numexpr is available for non-sparse classes\n subtyp = getattr(cls, '_subtyp', '')\n use_numexpr = 'sparse' not in subtyp\n\n if not use_numexpr:\n # if we're not using numexpr, then don't pass a str_rep\n return None\n\n return {operator.add: '+',\n radd: '+',\n operator.mul: '*',\n rmul: '*',\n operator.sub: '-',\n rsub: '-',\n operator.truediv: '/',\n rtruediv: '/',\n operator.floordiv: '//',\n rfloordiv: '//',\n operator.mod: None, # TODO: Why None for mod but '%' for rmod?\n rmod: '%',\n operator.pow: '**',\n rpow: '**',\n operator.eq: '==',\n operator.ne: '!=',\n operator.le: '<=',\n operator.lt: '<',\n operator.ge: '>=',\n operator.gt: '>',\n operator.and_: '&',\n rand_: '&',\n operator.or_: '|',\n ror_: '|',\n operator.xor: '^',\n rxor: '^',\n divmod: None,\n rdivmod: None}[op]\n\n\ndef _get_op_name(op, special):\n \"\"\"\n Find the name to attach to this method according to conventions\n for special and non-special methods.\n\n Parameters\n ----------\n op : binary operator\n special : bool\n\n Returns\n -------\n op_name : str\n \"\"\"\n opname = op.__name__.strip('_')\n if special:\n opname = '__{opname}__'.format(opname=opname)\n return opname\n\n\n# -----------------------------------------------------------------------------\n# Docstring Generation and Templates\n\n_add_example_FRAME = \"\"\"\n>>> a = pd.DataFrame([1, 1, 1, np.nan], index=['a', 'b', 'c', 'd'],\n... columns=['one'])\n>>> a\n one\na 1.0\nb 1.0\nc 1.0\nd NaN\n>>> b = pd.DataFrame(dict(one=[1, np.nan, 1, np.nan],\n... two=[np.nan, 2, np.nan, 2]),\n... index=['a', 'b', 'd', 'e'])\n>>> b\n one two\na 1.0 NaN\nb NaN 2.0\nd 1.0 NaN\ne NaN 2.0\n>>> a.add(b, fill_value=0)\n one two\na 2.0 NaN\nb 1.0 2.0\nc 1.0 NaN\nd 1.0 NaN\ne NaN 2.0\n\"\"\"\n\n_sub_example_FRAME = \"\"\"\n>>> a = pd.DataFrame([2, 1, 1, np.nan], index=['a', 'b', 'c', 'd'],\n... columns=['one'])\n>>> a\n one\na 2.0\nb 1.0\nc 1.0\nd NaN\n>>> b = pd.DataFrame(dict(one=[1, np.nan, 1, np.nan],\n... two=[3, 2, np.nan, 2]),\n... index=['a', 'b', 'd', 'e'])\n>>> b\n one two\na 1.0 3.0\nb NaN 2.0\nd 1.0 NaN\ne NaN 2.0\n>>> a.sub(b, fill_value=0)\n one two\na 1.0 -3.0\nb 1.0 -2.0\nc 1.0 NaN\nd -1.0 NaN\ne NaN -2.0\n\"\"\"\n\n_mod_example_FRAME = \"\"\"\n**Using a scalar argument**\n\n>>> df = pd.DataFrame([2, 4, np.nan, 6.2], index=[\"a\", \"b\", \"c\", \"d\"],\n... columns=['one'])\n>>> df\n one\na 2.0\nb 4.0\nc NaN\nd 6.2\n>>> df.mod(3, fill_value=-1)\n one\na 2.0\nb 1.0\nc 2.0\nd 0.2\n\n**Using a DataFrame argument**\n\n>>> df = pd.DataFrame(dict(one=[np.nan, 2, 3, 14], two=[np.nan, 1, 1, 3]),\n... index=['a', 'b', 'c', 'd'])\n>>> df\n one two\na NaN NaN\nb 2.0 1.0\nc 3.0 1.0\nd 14.0 3.0\n>>> other = pd.DataFrame(dict(one=[np.nan, np.nan, 6, np.nan],\n... three=[np.nan, 10, np.nan, -7]),\n... index=['a', 'b', 'd', 'e'])\n>>> other\n one three\na NaN NaN\nb NaN 10.0\nd 6.0 NaN\ne NaN -7.0\n>>> df.mod(other, fill_value=3)\n one three two\na NaN NaN NaN\nb 2.0 3.0 1.0\nc 0.0 NaN 1.0\nd 2.0 NaN 0.0\ne NaN -4.0 NaN\n\"\"\"\n\n_op_descriptions = {\n # Arithmetic Operators\n 'add': {'op': '+',\n 'desc': 'Addition',\n 'reverse': 'radd',\n 'df_examples': _add_example_FRAME},\n 'sub': {'op': '-',\n 'desc': 'Subtraction',\n 'reverse': 'rsub',\n 'df_examples': _sub_example_FRAME},\n 'mul': {'op': '*',\n 'desc': 'Multiplication',\n 'reverse': 'rmul',\n 'df_examples': None},\n 'mod': {'op': '%',\n 'desc': 'Modulo',\n 'reverse': 'rmod',\n 'df_examples': _mod_example_FRAME},\n 'pow': {'op': '**',\n 'desc': 'Exponential power',\n 'reverse': 'rpow',\n 'df_examples': None},\n 'truediv': {'op': '/',\n 'desc': 'Floating division',\n 'reverse': 'rtruediv',\n 'df_examples': None},\n 'floordiv': {'op': '//',\n 'desc': 'Integer division',\n 'reverse': 'rfloordiv',\n 'df_examples': None},\n 'divmod': {'op': 'divmod',\n 'desc': 'Integer division and modulo',\n 'reverse': None,\n 'df_examples': None},\n\n # Comparison Operators\n 'eq': {'op': '==',\n 'desc': 'Equal to',\n 'reverse': None,\n 'df_examples': None},\n 'ne': {'op': '!=',\n 'desc': 'Not equal to',\n 'reverse': None,\n 'df_examples': None},\n 'lt': {'op': '<',\n 'desc': 'Less than',\n 'reverse': None,\n 'df_examples': None},\n 'le': {'op': '<=',\n 'desc': 'Less than or equal to',\n 'reverse': None,\n 'df_examples': None},\n 'gt': {'op': '>',\n 'desc': 'Greater than',\n 'reverse': None,\n 'df_examples': None},\n 'ge': {'op': '>=',\n 'desc': 'Greater than or equal to',\n 'reverse': None,\n 'df_examples': None}}\n\n_op_names = list(_op_descriptions.keys())\nfor key in _op_names:\n _op_descriptions[key]['reversed'] = False\n reverse_op = _op_descriptions[key]['reverse']\n if reverse_op is not None:\n _op_descriptions[reverse_op] = _op_descriptions[key].copy()\n _op_descriptions[reverse_op]['reversed'] = True\n _op_descriptions[reverse_op]['reverse'] = key\n\n_flex_doc_SERIES = \"\"\"\n{desc} of series and other, element-wise (binary operator `{op_name}`).\n\nEquivalent to ``{equiv}``, but with support to substitute a fill_value for\nmissing data in one of the inputs.\n\nParameters\n----------\nother : Series or scalar value\nfill_value : None or float value, default None (NaN)\n Fill existing missing (NaN) values, and any new element needed for\n successful Series alignment, with this value before computation.\n If data in both corresponding Series locations is missing\n the result will be missing\nlevel : int or name\n Broadcast across a level, matching Index values on the\n passed MultiIndex level\n\nReturns\n-------\nresult : Series\n\nExamples\n--------\n>>> a = pd.Series([1, 1, 1, np.nan], index=['a', 'b', 'c', 'd'])\n>>> a\na 1.0\nb 1.0\nc 1.0\nd NaN\ndtype: float64\n>>> b = pd.Series([1, np.nan, 1, np.nan], index=['a', 'b', 'd', 'e'])\n>>> b\na 1.0\nb NaN\nd 1.0\ne NaN\ndtype: float64\n>>> a.add(b, fill_value=0)\na 2.0\nb 1.0\nc 1.0\nd 1.0\ne NaN\ndtype: float64\n\nSee also\n--------\nSeries.{reverse}\n\"\"\"\n\n_arith_doc_FRAME = \"\"\"\nBinary operator %s with support to substitute a fill_value for missing data in\none of the inputs\n\nParameters\n----------\nother : Series, DataFrame, or constant\naxis : {0, 1, 'index', 'columns'}\n For Series input, axis to match Series index on\nfill_value : None or float value, default None\n Fill existing missing (NaN) values, and any new element needed for\n successful DataFrame alignment, with this value before computation.\n If data in both corresponding DataFrame locations is missing\n the result will be missing\nlevel : int or name\n Broadcast across a level, matching Index values on the\n passed MultiIndex level\n\nNotes\n-----\nMismatched indices will be unioned together\n\nReturns\n-------\nresult : DataFrame\n\"\"\"\n\n_flex_doc_FRAME = \"\"\"\n{desc} of dataframe and other, element-wise (binary operator `{op_name}`).\n\nEquivalent to ``{equiv}``, but with support to substitute a fill_value for\nmissing data in one of the inputs.\n\nParameters\n----------\nother : Series, DataFrame, or constant\naxis : {{0, 1, 'index', 'columns'}}\n For Series input, axis to match Series index on\nlevel : int or name\n Broadcast across a level, matching Index values on the\n passed MultiIndex level\nfill_value : None or float value, default None\n Fill existing missing (NaN) values, and any new element needed for\n successful DataFrame alignment, with this value before computation.\n If data in both corresponding DataFrame locations is missing\n the result will be missing\n\nNotes\n-----\nMismatched indices will be unioned together\n\nReturns\n-------\nresult : DataFrame\n\nExamples\n--------\n{df_examples}\n\nSee also\n--------\nDataFrame.{reverse}\n\"\"\"\n\n_flex_doc_PANEL = \"\"\"\n{desc} of series and other, element-wise (binary operator `{op_name}`).\nEquivalent to ``{equiv}``.\n\nParameters\n----------\nother : DataFrame or Panel\naxis : {{items, major_axis, minor_axis}}\n Axis to broadcast over\n\nReturns\n-------\nPanel\n\nSee also\n--------\nPanel.{reverse}\n\"\"\"\n\n\n_agg_doc_PANEL = \"\"\"\nWrapper method for {op_name}\n\nParameters\n----------\nother : DataFrame or Panel\naxis : {{items, major_axis, minor_axis}}\n Axis to broadcast over\n\nReturns\n-------\nPanel\n\"\"\"\n\n\ndef _make_flex_doc(op_name, typ):\n \"\"\"\n Make the appropriate substitutions for the given operation and class-typ\n into either _flex_doc_SERIES or _flex_doc_FRAME to return the docstring\n to attach to a generated method.\n\n Parameters\n ----------\n op_name : str {'__add__', '__sub__', ... '__eq__', '__ne__', ...}\n typ : str {series, 'dataframe']}\n\n Returns\n -------\n doc : str\n \"\"\"\n op_name = op_name.replace('__', '')\n op_desc = _op_descriptions[op_name]\n\n if op_desc['reversed']:\n equiv = 'other ' + op_desc['op'] + ' ' + typ\n else:\n equiv = typ + ' ' + op_desc['op'] + ' other'\n\n if typ == 'series':\n base_doc = _flex_doc_SERIES\n doc = base_doc.format(desc=op_desc['desc'], op_name=op_name,\n equiv=equiv, reverse=op_desc['reverse'])\n elif typ == 'dataframe':\n base_doc = _flex_doc_FRAME\n doc = base_doc.format(desc=op_desc['desc'], op_name=op_name,\n equiv=equiv, reverse=op_desc['reverse'],\n df_examples=op_desc['df_examples'])\n elif typ == 'panel':\n base_doc = _flex_doc_PANEL\n doc = base_doc.format(desc=op_desc['desc'], op_name=op_name,\n equiv=equiv, reverse=op_desc['reverse'])\n else:\n raise AssertionError('Invalid typ argument.')\n return doc\n\n\n# -----------------------------------------------------------------------------\n# Masking NA values and fallbacks for operations numpy does not support\n\ndef fill_binop(left, right, fill_value):\n \"\"\"\n If a non-None fill_value is given, replace null entries in left and right\n with this value, but only in positions where _one_ of left/right is null,\n not both.\n\n Parameters\n ----------\n left : array-like\n right : array-like\n fill_value : object\n\n Returns\n -------\n left : array-like\n right : array-like\n\n Notes\n -----\n Makes copies if fill_value is not None\n \"\"\"\n # TODO: can we make a no-copy implementation?\n if fill_value is not None:\n left_mask = isna(left)\n right_mask = isna(right)\n left = left.copy()\n right = right.copy()\n\n # one but not both\n mask = left_mask ^ right_mask\n left[left_mask & mask] = fill_value\n right[right_mask & mask] = fill_value\n return left, right\n\n\ndef mask_cmp_op(x, y, op, allowed_types):\n \"\"\"\n Apply the function `op` to only non-null points in x and y.\n\n Parameters\n ----------\n x : array-like\n y : array-like\n op : binary operation\n allowed_types : class or tuple of classes\n\n Returns\n -------\n result : ndarray[bool]\n \"\"\"\n # TODO: Can we make the allowed_types arg unnecessary?\n xrav = x.ravel()\n result = np.empty(x.size, dtype=bool)\n if isinstance(y, allowed_types):\n yrav = y.ravel()\n mask = notna(xrav) & notna(yrav)\n result[mask] = op(np.array(list(xrav[mask])),\n np.array(list(yrav[mask])))\n else:\n mask = notna(xrav)\n result[mask] = op(np.array(list(xrav[mask])), y)\n\n if op == operator.ne: # pragma: no cover\n np.putmask(result, ~mask, True)\n else:\n np.putmask(result, ~mask, False)\n result = result.reshape(x.shape)\n return result\n\n\ndef invalid_comparison(left, right, op):\n \"\"\"\n If a comparison has mismatched types and is not necessarily meaningful,\n follow python3 conventions by:\n\n - returning all-False for equality\n - returning all-True for inequality\n - raising TypeError otherwise\n\n Parameters\n ----------\n left : array-like\n right : scalar, array-like\n op : operator.{eq, ne, lt, le, gt}\n\n Raises\n ------\n TypeError : on inequality comparisons\n \"\"\"\n if op is operator.eq:\n res_values = np.zeros(left.shape, dtype=bool)\n elif op is operator.ne:\n res_values = np.ones(left.shape, dtype=bool)\n else:\n raise TypeError(\"Invalid comparison between dtype={dtype} and {typ}\"\n .format(dtype=left.dtype, typ=type(right).__name__))\n return res_values\n\n\n# -----------------------------------------------------------------------------\n# Functions that add arithmetic methods to objects, given arithmetic factory\n# methods\n\ndef _get_method_wrappers(cls):\n \"\"\"\n Find the appropriate operation-wrappers to use when defining flex/special\n arithmetic, boolean, and comparison operations with the given class.\n\n Parameters\n ----------\n cls : class\n\n Returns\n -------\n arith_flex : function or None\n comp_flex : function or None\n arith_special : function\n comp_special : function\n bool_special : function\n\n Notes\n -----\n None is only returned for SparseArray\n \"\"\"\n if issubclass(cls, ABCSparseSeries):\n # Be sure to catch this before ABCSeries and ABCSparseArray,\n # as they will both come see SparseSeries as a subclass\n arith_flex = _flex_method_SERIES\n comp_flex = _flex_method_SERIES\n arith_special = _arith_method_SPARSE_SERIES\n comp_special = _arith_method_SPARSE_SERIES\n bool_special = _bool_method_SERIES\n # TODO: I don't think the functions defined by bool_method are tested\n elif issubclass(cls, ABCSeries):\n # Just Series; SparseSeries is caught above\n arith_flex = _flex_method_SERIES\n comp_flex = _flex_method_SERIES\n arith_special = _arith_method_SERIES\n comp_special = _comp_method_SERIES\n bool_special = _bool_method_SERIES\n elif issubclass(cls, ABCSparseArray):\n arith_flex = None\n comp_flex = None\n arith_special = _arith_method_SPARSE_ARRAY\n comp_special = _arith_method_SPARSE_ARRAY\n bool_special = _arith_method_SPARSE_ARRAY\n elif issubclass(cls, ABCPanel):\n arith_flex = _flex_method_PANEL\n comp_flex = _comp_method_PANEL\n arith_special = _arith_method_PANEL\n comp_special = _comp_method_PANEL\n bool_special = _arith_method_PANEL\n elif issubclass(cls, ABCDataFrame):\n # Same for DataFrame and SparseDataFrame\n arith_flex = _arith_method_FRAME\n comp_flex = _flex_comp_method_FRAME\n arith_special = _arith_method_FRAME\n comp_special = _comp_method_FRAME\n bool_special = _arith_method_FRAME\n return arith_flex, comp_flex, arith_special, comp_special, bool_special\n\n\ndef _create_methods(cls, arith_method, comp_method, bool_method,\n special=False):\n # creates actual methods based upon arithmetic, comp and bool method\n # constructors.\n\n have_divmod = issubclass(cls, ABCSeries)\n # divmod is available for Series and SparseSeries\n\n # yapf: disable\n new_methods = dict(\n add=arith_method(cls, operator.add, special),\n radd=arith_method(cls, radd, special),\n sub=arith_method(cls, operator.sub, special),\n mul=arith_method(cls, operator.mul, special),\n truediv=arith_method(cls, operator.truediv, special),\n floordiv=arith_method(cls, operator.floordiv, special),\n # Causes a floating point exception in the tests when numexpr enabled,\n # so for now no speedup\n mod=arith_method(cls, operator.mod, special),\n pow=arith_method(cls, operator.pow, special),\n # not entirely sure why this is necessary, but previously was included\n # so it's here to maintain compatibility\n rmul=arith_method(cls, rmul, special),\n rsub=arith_method(cls, rsub, special),\n rtruediv=arith_method(cls, rtruediv, special),\n rfloordiv=arith_method(cls, rfloordiv, special),\n rpow=arith_method(cls, rpow, special),\n rmod=arith_method(cls, rmod, special))\n # yapf: enable\n new_methods['div'] = new_methods['truediv']\n new_methods['rdiv'] = new_methods['rtruediv']\n if have_divmod:\n # divmod doesn't have an op that is supported by numexpr\n new_methods['divmod'] = arith_method(cls, divmod, special)\n\n new_methods.update(dict(\n eq=comp_method(cls, operator.eq, special),\n ne=comp_method(cls, operator.ne, special),\n lt=comp_method(cls, operator.lt, special),\n gt=comp_method(cls, operator.gt, special),\n le=comp_method(cls, operator.le, special),\n ge=comp_method(cls, operator.ge, special)))\n\n if bool_method:\n new_methods.update(\n dict(and_=bool_method(cls, operator.and_, special),\n or_=bool_method(cls, operator.or_, special),\n # For some reason ``^`` wasn't used in original.\n xor=bool_method(cls, operator.xor, special),\n rand_=bool_method(cls, rand_, special),\n ror_=bool_method(cls, ror_, special),\n rxor=bool_method(cls, rxor, special)))\n\n if special:\n dunderize = lambda x: '__{name}__'.format(name=x.strip('_'))\n else:\n dunderize = lambda x: x\n new_methods = {dunderize(k): v for k, v in new_methods.items()}\n return new_methods\n\n\ndef add_methods(cls, new_methods):\n for name, method in new_methods.items():\n # For most methods, if we find that the class already has a method\n # of the same name, it is OK to over-write it. The exception is\n # inplace methods (__iadd__, __isub__, ...) for SparseArray, which\n # retain the np.ndarray versions.\n force = not (issubclass(cls, ABCSparseArray) and\n name.startswith('__i'))\n if force or name not in cls.__dict__:\n bind_method(cls, name, method)\n\n\n# ----------------------------------------------------------------------\n# Arithmetic\ndef add_special_arithmetic_methods(cls):\n \"\"\"\n Adds the full suite of special arithmetic methods (``__add__``,\n ``__sub__``, etc.) to the class.\n\n Parameters\n ----------\n cls : class\n special methods will be defined and pinned to this class\n \"\"\"\n _, _, arith_method, comp_method, bool_method = _get_method_wrappers(cls)\n new_methods = _create_methods(cls, arith_method, comp_method, bool_method,\n special=True)\n # inplace operators (I feel like these should get passed an `inplace=True`\n # or just be removed\n\n def _wrap_inplace_method(method):\n \"\"\"\n return an inplace wrapper for this method\n \"\"\"\n\n def f(self, other):\n result = method(self, other)\n\n # this makes sure that we are aligned like the input\n # we are updating inplace so we want to ignore is_copy\n self._update_inplace(result.reindex_like(self, copy=False)._data,\n verify_is_copy=False)\n\n return self\n\n return f\n\n new_methods.update(\n dict(__iadd__=_wrap_inplace_method(new_methods[\"__add__\"]),\n __isub__=_wrap_inplace_method(new_methods[\"__sub__\"]),\n __imul__=_wrap_inplace_method(new_methods[\"__mul__\"]),\n __itruediv__=_wrap_inplace_method(new_methods[\"__truediv__\"]),\n __ifloordiv__=_wrap_inplace_method(new_methods[\"__floordiv__\"]),\n __imod__=_wrap_inplace_method(new_methods[\"__mod__\"]),\n __ipow__=_wrap_inplace_method(new_methods[\"__pow__\"])))\n if not compat.PY3:\n new_methods[\"__idiv__\"] = _wrap_inplace_method(new_methods[\"__div__\"])\n\n new_methods.update(\n dict(__iand__=_wrap_inplace_method(new_methods[\"__and__\"]),\n __ior__=_wrap_inplace_method(new_methods[\"__or__\"]),\n __ixor__=_wrap_inplace_method(new_methods[\"__xor__\"])))\n\n add_methods(cls, new_methods=new_methods)\n\n\ndef add_flex_arithmetic_methods(cls):\n \"\"\"\n Adds the full suite of flex arithmetic methods (``pow``, ``mul``, ``add``)\n to the class.\n\n Parameters\n ----------\n cls : class\n flex methods will be defined and pinned to this class\n \"\"\"\n flex_arith_method, flex_comp_method, _, _, _ = _get_method_wrappers(cls)\n new_methods = _create_methods(cls, flex_arith_method,\n flex_comp_method, bool_method=None,\n special=False)\n new_methods.update(dict(multiply=new_methods['mul'],\n subtract=new_methods['sub'],\n divide=new_methods['div']))\n # opt out of bool flex methods for now\n assert not any(kname in new_methods for kname in ('ror_', 'rxor', 'rand_'))\n\n add_methods(cls, new_methods=new_methods)\n\n\n# -----------------------------------------------------------------------------\n# Series\n\ndef _align_method_SERIES(left, right, align_asobject=False):\n \"\"\" align lhs and rhs Series \"\"\"\n\n # ToDo: Different from _align_method_FRAME, list, tuple and ndarray\n # are not coerced here\n # because Series has inconsistencies described in #13637\n\n if isinstance(right, ABCSeries):\n # avoid repeated alignment\n if not left.index.equals(right.index):\n\n if align_asobject:\n # to keep original value's dtype for bool ops\n left = left.astype(object)\n right = right.astype(object)\n\n left, right = left.align(right, copy=False)\n\n return left, right\n\n\ndef _construct_result(left, result, index, name, dtype=None):\n \"\"\"\n If the raw op result has a non-None name (e.g. it is an Index object) and\n the name argument is None, then passing name to the constructor will\n not be enough; we still need to override the name attribute.\n \"\"\"\n out = left._constructor(result, index=index, dtype=dtype)\n\n out.name = name\n return out\n\n\ndef _construct_divmod_result(left, result, index, name, dtype=None):\n \"\"\"divmod returns a tuple of like indexed series instead of a single series.\n \"\"\"\n constructor = left._constructor\n return (\n constructor(result[0], index=index, name=name, dtype=dtype),\n constructor(result[1], index=index, name=name, dtype=dtype),\n )\n\n\ndef dispatch_to_extension_op(op, left, right):\n \"\"\"\n Assume that left or right is a Series backed by an ExtensionArray,\n apply the operator defined by op.\n \"\"\"\n\n # The op calls will raise TypeError if the op is not defined\n # on the ExtensionArray\n # TODO(jreback)\n # we need to listify to avoid ndarray, or non-same-type extension array\n # dispatching\n\n if is_extension_array_dtype(left):\n\n new_left = left.values\n if isinstance(right, np.ndarray):\n\n # handle numpy scalars, this is a PITA\n # TODO(jreback)\n new_right = lib.item_from_zerodim(right)\n if is_scalar(new_right):\n new_right = [new_right]\n new_right = list(new_right)\n elif is_extension_array_dtype(right) and type(left) != type(right):\n new_right = list(new_right)\n else:\n new_right = right\n\n else:\n\n new_left = list(left.values)\n new_right = right\n\n res_values = op(new_left, new_right)\n res_name = get_op_result_name(left, right)\n\n if op.__name__ == 'divmod':\n return _construct_divmod_result(\n left, res_values, left.index, res_name)\n\n return _construct_result(left, res_values, left.index, res_name)\n\n\ndef _arith_method_SERIES(cls, op, special):\n \"\"\"\n Wrapper function for Series arithmetic operations, to avoid\n code duplication.\n \"\"\"\n str_rep = _get_opstr(op, cls)\n op_name = _get_op_name(op, special)\n eval_kwargs = _gen_eval_kwargs(op_name)\n fill_zeros = _gen_fill_zeros(op_name)\n construct_result = (_construct_divmod_result\n if op is divmod else _construct_result)\n\n def na_op(x, y):\n import pandas.core.computation.expressions as expressions\n try:\n result = expressions.evaluate(op, str_rep, x, y, **eval_kwargs)\n except TypeError:\n if isinstance(y, (np.ndarray, ABCSeries, pd.Index)):\n dtype = find_common_type([x.dtype, y.dtype])\n result = np.empty(x.size, dtype=dtype)\n mask = notna(x) & notna(y)\n result[mask] = op(x[mask], com.values_from_object(y[mask]))\n else:\n assert isinstance(x, np.ndarray)\n result = np.empty(len(x), dtype=x.dtype)\n mask = notna(x)\n result[mask] = op(x[mask], y)\n\n result, changed = maybe_upcast_putmask(result, ~mask, np.nan)\n\n result = missing.fill_zeros(result, x, y, op_name, fill_zeros)\n return result\n\n def safe_na_op(lvalues, rvalues):\n \"\"\"\n return the result of evaluating na_op on the passed in values\n\n try coercion to object type if the native types are not compatible\n\n Parameters\n ----------\n lvalues : array-like\n rvalues : array-like\n\n Raises\n ------\n TypeError: invalid operation\n \"\"\"\n try:\n with np.errstate(all='ignore'):\n return na_op(lvalues, rvalues)\n except Exception:\n if is_object_dtype(lvalues):\n return libalgos.arrmap_object(lvalues,\n lambda x: op(x, rvalues))\n raise\n\n def wrapper(left, right):\n if isinstance(right, ABCDataFrame):\n return NotImplemented\n\n left, right = _align_method_SERIES(left, right)\n res_name = get_op_result_name(left, right)\n\n if is_categorical_dtype(left):\n raise TypeError(\"{typ} cannot perform the operation \"\n \"{op}\".format(typ=type(left).__name__, op=str_rep))\n\n elif (is_extension_array_dtype(left) or\n is_extension_array_dtype(right)):\n return dispatch_to_extension_op(op, left, right)\n\n elif is_datetime64_dtype(left) or is_datetime64tz_dtype(left):\n result = dispatch_to_index_op(op, left, right, pd.DatetimeIndex)\n return construct_result(left, result,\n index=left.index, name=res_name,\n dtype=result.dtype)\n\n elif is_timedelta64_dtype(left):\n result = dispatch_to_index_op(op, left, right, pd.TimedeltaIndex)\n return construct_result(left, result,\n index=left.index, name=res_name,\n dtype=result.dtype)\n\n lvalues = left.values\n rvalues = right\n if isinstance(rvalues, ABCSeries):\n rvalues = rvalues.values\n\n result = safe_na_op(lvalues, rvalues)\n return construct_result(left, result,\n index=left.index, name=res_name, dtype=None)\n\n return wrapper\n\n\ndef dispatch_to_index_op(op, left, right, index_class):\n \"\"\"\n Wrap Series left in the given index_class to delegate the operation op\n to the index implementation. DatetimeIndex and TimedeltaIndex perform\n type checking, timezone handling, overflow checks, etc.\n\n Parameters\n ----------\n op : binary operator (operator.add, operator.sub, ...)\n left : Series\n right : object\n index_class : DatetimeIndex or TimedeltaIndex\n\n Returns\n -------\n result : object, usually DatetimeIndex, TimedeltaIndex, or Series\n \"\"\"\n left_idx = index_class(left)\n\n # avoid accidentally allowing integer add/sub. For datetime64[tz] dtypes,\n # left_idx may inherit a freq from a cached DatetimeIndex.\n # See discussion in GH#19147.\n if getattr(left_idx, 'freq', None) is not None:\n left_idx = left_idx._shallow_copy(freq=None)\n try:\n result = op(left_idx, right)\n except NullFrequencyError:\n # DatetimeIndex and TimedeltaIndex with freq == None raise ValueError\n # on add/sub of integers (or int-like). We re-raise as a TypeError.\n raise TypeError('incompatible type for a datetime/timedelta '\n 'operation [{name}]'.format(name=op.__name__))\n return result\n\n\ndef _comp_method_OBJECT_ARRAY(op, x, y):\n if isinstance(y, list):\n y = construct_1d_object_array_from_listlike(y)\n if isinstance(y, (np.ndarray, ABCSeries, ABCIndex)):\n if not is_object_dtype(y.dtype):\n y = y.astype(np.object_)\n\n if isinstance(y, (ABCSeries, ABCIndex)):\n y = y.values\n\n result = libops.vec_compare(x, y, op)\n else:\n result = libops.scalar_compare(x, y, op)\n return result\n\n\ndef _comp_method_SERIES(cls, op, special):\n \"\"\"\n Wrapper function for Series arithmetic operations, to avoid\n code duplication.\n \"\"\"\n op_name = _get_op_name(op, special)\n masker = _gen_eval_kwargs(op_name).get('masker', False)\n\n def na_op(x, y):\n # TODO:\n # should have guarantess on what x, y can be type-wise\n # Extension Dtypes are not called here\n\n # dispatch to the categorical if we have a categorical\n # in either operand\n if is_categorical_dtype(y) and not is_scalar(y):\n # The `not is_scalar(y)` check excludes the string \"category\"\n return op(y, x)\n\n elif is_object_dtype(x.dtype):\n result = _comp_method_OBJECT_ARRAY(op, x, y)\n\n elif is_datetimelike_v_numeric(x, y):\n return invalid_comparison(x, y, op)\n\n else:\n\n # we want to compare like types\n # we only want to convert to integer like if\n # we are not NotImplemented, otherwise\n # we would allow datetime64 (but viewed as i8) against\n # integer comparisons\n\n # we have a datetime/timedelta and may need to convert\n assert not needs_i8_conversion(x)\n mask = None\n if not is_scalar(y) and needs_i8_conversion(y):\n mask = isna(x) | isna(y)\n y = y.view('i8')\n x = x.view('i8')\n\n method = getattr(x, op_name, None)\n if method is not None:\n with np.errstate(all='ignore'):\n result = method(y)\n if result is NotImplemented:\n raise TypeError(\"invalid type comparison\")\n else:\n result = op(x, y)\n\n if mask is not None and mask.any():\n result[mask] = masker\n\n return result\n\n def wrapper(self, other, axis=None):\n # Validate the axis parameter\n if axis is not None:\n self._get_axis_number(axis)\n\n res_name = get_op_result_name(self, other)\n\n if isinstance(other, ABCDataFrame): # pragma: no cover\n # Defer to DataFrame implementation; fail early\n return NotImplemented\n\n elif isinstance(other, ABCSeries) and not self._indexed_same(other):\n raise ValueError(\"Can only compare identically-labeled \"\n \"Series objects\")\n\n elif is_categorical_dtype(self):\n # Dispatch to Categorical implementation; pd.CategoricalIndex\n # behavior is non-canonical GH#19513\n res_values = dispatch_to_index_op(op, self, other, pd.Categorical)\n return self._constructor(res_values, index=self.index,\n name=res_name)\n\n if is_datetime64_dtype(self) or is_datetime64tz_dtype(self):\n # Dispatch to DatetimeIndex to ensure identical\n # Series/Index behavior\n if (isinstance(other, datetime.date) and\n not isinstance(other, datetime.datetime)):\n # https://github.com/pandas-dev/pandas/issues/21152\n # Compatibility for difference between Series comparison w/\n # datetime and date\n msg = (\n \"Comparing Series of datetimes with 'datetime.date'. \"\n \"Currently, the 'datetime.date' is coerced to a \"\n \"datetime. In the future pandas will not coerce, \"\n \"and {future}. \"\n \"To retain the current behavior, \"\n \"convert the 'datetime.date' to a datetime with \"\n \"'pd.Timestamp'.\"\n )\n\n if op in {operator.lt, operator.le, operator.gt, operator.ge}:\n future = \"a TypeError will be raised\"\n else:\n future = (\n \"'the values will not compare equal to the \"\n \"'datetime.date'\"\n )\n msg = '\\n'.join(textwrap.wrap(msg.format(future=future)))\n warnings.warn(msg, FutureWarning, stacklevel=2)\n other = pd.Timestamp(other)\n\n res_values = dispatch_to_index_op(op, self, other,\n pd.DatetimeIndex)\n\n return self._constructor(res_values, index=self.index,\n name=res_name)\n\n elif is_timedelta64_dtype(self):\n res_values = dispatch_to_index_op(op, self, other,\n pd.TimedeltaIndex)\n return self._constructor(res_values, index=self.index,\n name=res_name)\n\n elif (is_extension_array_dtype(self) or\n (is_extension_array_dtype(other) and\n not is_scalar(other))):\n return dispatch_to_extension_op(op, self, other)\n\n elif isinstance(other, ABCSeries):\n # By this point we have checked that self._indexed_same(other)\n res_values = na_op(self.values, other.values)\n # rename is needed in case res_name is None and res_values.name\n # is not.\n return self._constructor(res_values, index=self.index,\n name=res_name).rename(res_name)\n\n elif isinstance(other, (np.ndarray, pd.Index)):\n # do not check length of zerodim array\n # as it will broadcast\n if other.ndim != 0 and len(self) != len(other):\n raise ValueError('Lengths must match to compare')\n\n res_values = na_op(self.values, np.asarray(other))\n result = self._constructor(res_values, index=self.index)\n # rename is needed in case res_name is None and self.name\n # is not.\n return result.__finalize__(self).rename(res_name)\n\n elif isinstance(other, pd.Categorical):\n # ordering of checks matters; by this point we know\n # that not is_categorical_dtype(self)\n res_values = op(self.values, other)\n return self._constructor(res_values, index=self.index,\n name=res_name)\n\n elif is_scalar(other) and isna(other):\n # numpy does not like comparisons vs None\n if op is operator.ne:\n res_values = np.ones(len(self), dtype=bool)\n else:\n res_values = np.zeros(len(self), dtype=bool)\n return self._constructor(res_values, index=self.index,\n name=res_name, dtype='bool')\n\n else:\n values = self.get_values()\n if isinstance(other, list):\n other = np.asarray(other)\n\n with np.errstate(all='ignore'):\n res = na_op(values, other)\n if is_scalar(res):\n raise TypeError('Could not compare {typ} type with Series'\n .format(typ=type(other)))\n\n # always return a full value series here\n res_values = com.values_from_object(res)\n return self._constructor(res_values, index=self.index,\n name=res_name, dtype='bool')\n\n return wrapper\n\n\ndef _bool_method_SERIES(cls, op, special):\n \"\"\"\n Wrapper function for Series arithmetic operations, to avoid\n code duplication.\n \"\"\"\n\n def na_op(x, y):\n try:\n result = op(x, y)\n except TypeError:\n if isinstance(y, list):\n y = construct_1d_object_array_from_listlike(y)\n\n if isinstance(y, (np.ndarray, ABCSeries)):\n if (is_bool_dtype(x.dtype) and is_bool_dtype(y.dtype)):\n result = op(x, y) # when would this be hit?\n else:\n x = ensure_object(x)\n y = ensure_object(y)\n result = libops.vec_binop(x, y, op)\n else:\n # let null fall thru\n if not isna(y):\n y = bool(y)\n try:\n result = libops.scalar_binop(x, y, op)\n except:\n raise TypeError(\"cannot compare a dtyped [{dtype}] array \"\n \"with a scalar of type [{typ}]\"\n .format(dtype=x.dtype,\n typ=type(y).__name__))\n\n return result\n\n fill_int = lambda x: x.fillna(0)\n fill_bool = lambda x: x.fillna(False).astype(bool)\n\n def wrapper(self, other):\n is_self_int_dtype = is_integer_dtype(self.dtype)\n\n self, other = _align_method_SERIES(self, other, align_asobject=True)\n\n if isinstance(other, ABCDataFrame):\n # Defer to DataFrame implementation; fail early\n return NotImplemented\n\n elif isinstance(other, ABCSeries):\n name = get_op_result_name(self, other)\n is_other_int_dtype = is_integer_dtype(other.dtype)\n other = fill_int(other) if is_other_int_dtype else fill_bool(other)\n\n filler = (fill_int if is_self_int_dtype and is_other_int_dtype\n else fill_bool)\n\n res_values = na_op(self.values, other.values)\n unfilled = self._constructor(res_values,\n index=self.index, name=name)\n return filler(unfilled)\n\n else:\n # scalars, list, tuple, np.array\n filler = (fill_int if is_self_int_dtype and\n is_integer_dtype(np.asarray(other)) else fill_bool)\n\n res_values = na_op(self.values, other)\n unfilled = self._constructor(res_values, index=self.index)\n return filler(unfilled).__finalize__(self)\n\n return wrapper\n\n\ndef _flex_method_SERIES(cls, op, special):\n name = _get_op_name(op, special)\n doc = _make_flex_doc(name, 'series')\n\n @Appender(doc)\n def flex_wrapper(self, other, level=None, fill_value=None, axis=0):\n # validate axis\n if axis is not None:\n self._get_axis_number(axis)\n if isinstance(other, ABCSeries):\n return self._binop(other, op, level=level, fill_value=fill_value)\n elif isinstance(other, (np.ndarray, list, tuple)):\n if len(other) != len(self):\n raise ValueError('Lengths must be equal')\n other = self._constructor(other, self.index)\n return self._binop(other, op, level=level, fill_value=fill_value)\n else:\n if fill_value is not None:\n self = self.fillna(fill_value)\n\n return self._constructor(op(self, other),\n self.index).__finalize__(self)\n\n flex_wrapper.__name__ = name\n return flex_wrapper\n\n\n# -----------------------------------------------------------------------------\n# DataFrame\n\ndef _combine_series_frame(self, other, func, fill_value=None, axis=None,\n level=None, try_cast=True):\n \"\"\"\n Apply binary operator `func` to self, other using alignment and fill\n conventions determined by the fill_value, axis, level, and try_cast kwargs.\n\n Parameters\n ----------\n self : DataFrame\n other : Series\n func : binary operator\n fill_value : object, default None\n axis : {0, 1, 'columns', 'index', None}, default None\n level : int or None, default None\n try_cast : bool, default True\n\n Returns\n -------\n result : DataFrame\n \"\"\"\n if fill_value is not None:\n raise NotImplementedError(\"fill_value {fill} not supported.\"\n .format(fill=fill_value))\n\n if axis is not None:\n axis = self._get_axis_number(axis)\n if axis == 0:\n return self._combine_match_index(other, func, level=level)\n else:\n return self._combine_match_columns(other, func, level=level,\n try_cast=try_cast)\n else:\n if not len(other):\n return self * np.nan\n\n if not len(self):\n # Ambiguous case, use _series so works with DataFrame\n return self._constructor(data=self._series, index=self.index,\n columns=self.columns)\n\n # default axis is columns\n return self._combine_match_columns(other, func, level=level,\n try_cast=try_cast)\n\n\ndef _align_method_FRAME(left, right, axis):\n \"\"\" convert rhs to meet lhs dims if input is list, tuple or np.ndarray \"\"\"\n\n def to_series(right):\n msg = ('Unable to coerce to Series, length must be {req_len}: '\n 'given {given_len}')\n if axis is not None and left._get_axis_name(axis) == 'index':\n if len(left.index) != len(right):\n raise ValueError(msg.format(req_len=len(left.index),\n given_len=len(right)))\n right = left._constructor_sliced(right, index=left.index)\n else:\n if len(left.columns) != len(right):\n raise ValueError(msg.format(req_len=len(left.columns),\n given_len=len(right)))\n right = left._constructor_sliced(right, index=left.columns)\n return right\n\n if isinstance(right, np.ndarray):\n\n if right.ndim == 1:\n right = to_series(right)\n\n elif right.ndim == 2:\n if left.shape != right.shape:\n raise ValueError(\"Unable to coerce to DataFrame, shape \"\n \"must be {req_shape}: given {given_shape}\"\n .format(req_shape=left.shape,\n given_shape=right.shape))\n\n right = left._constructor(right, index=left.index,\n columns=left.columns)\n elif right.ndim > 2:\n raise ValueError('Unable to coerce to Series/DataFrame, dim '\n 'must be <= 2: {dim}'.format(dim=right.shape))\n\n elif (is_list_like(right) and\n not isinstance(right, (ABCSeries, ABCDataFrame))):\n # GH17901\n right = to_series(right)\n\n return right\n\n\ndef _arith_method_FRAME(cls, op, special):\n str_rep = _get_opstr(op, cls)\n op_name = _get_op_name(op, special)\n eval_kwargs = _gen_eval_kwargs(op_name)\n fill_zeros = _gen_fill_zeros(op_name)\n default_axis = _get_frame_op_default_axis(op_name)\n\n def na_op(x, y):\n import pandas.core.computation.expressions as expressions\n\n try:\n result = expressions.evaluate(op, str_rep, x, y, **eval_kwargs)\n except TypeError:\n xrav = x.ravel()\n if isinstance(y, (np.ndarray, ABCSeries)):\n dtype = find_common_type([x.dtype, y.dtype])\n result = np.empty(x.size, dtype=dtype)\n yrav = y.ravel()\n mask = notna(xrav) & notna(yrav)\n xrav = xrav[mask]\n\n if yrav.shape != mask.shape:\n # FIXME: GH#5284, GH#5035, GH#19448\n # Without specifically raising here we get mismatched\n # errors in Py3 (TypeError) vs Py2 (ValueError)\n raise ValueError('Cannot broadcast operands together.')\n\n yrav = yrav[mask]\n if xrav.size:\n with np.errstate(all='ignore'):\n result[mask] = op(xrav, yrav)\n\n elif isinstance(x, np.ndarray):\n # mask is only meaningful for x\n result = np.empty(x.size, dtype=x.dtype)\n mask = notna(xrav)\n xrav = xrav[mask]\n if xrav.size:\n with np.errstate(all='ignore'):\n result[mask] = op(xrav, y)\n else:\n raise TypeError(\"cannot perform operation {op} between \"\n \"objects of type {x} and {y}\"\n .format(op=op_name, x=type(x), y=type(y)))\n\n result, changed = maybe_upcast_putmask(result, ~mask, np.nan)\n result = result.reshape(x.shape)\n\n result = missing.fill_zeros(result, x, y, op_name, fill_zeros)\n\n return result\n\n if op_name in _op_descriptions:\n # i.e. include \"add\" but not \"__add__\"\n doc = _make_flex_doc(op_name, 'dataframe')\n else:\n doc = _arith_doc_FRAME % op_name\n\n @Appender(doc)\n def f(self, other, axis=default_axis, level=None, fill_value=None):\n\n other = _align_method_FRAME(self, other, axis)\n\n if isinstance(other, ABCDataFrame): # Another DataFrame\n return self._combine_frame(other, na_op, fill_value, level)\n elif isinstance(other, ABCSeries):\n return _combine_series_frame(self, other, na_op,\n fill_value=fill_value, axis=axis,\n level=level, try_cast=True)\n else:\n if fill_value is not None:\n self = self.fillna(fill_value)\n\n return self._combine_const(other, na_op, try_cast=True)\n\n f.__name__ = op_name\n\n return f\n\n\ndef _flex_comp_method_FRAME(cls, op, special):\n str_rep = _get_opstr(op, cls)\n op_name = _get_op_name(op, special)\n default_axis = _get_frame_op_default_axis(op_name)\n\n def na_op(x, y):\n try:\n with np.errstate(invalid='ignore'):\n result = op(x, y)\n except TypeError:\n result = mask_cmp_op(x, y, op, (np.ndarray, ABCSeries))\n return result\n\n @Appender('Wrapper for flexible comparison methods {name}'\n .format(name=op_name))\n def f(self, other, axis=default_axis, level=None):\n\n other = _align_method_FRAME(self, other, axis)\n\n if isinstance(other, ABCDataFrame):\n # Another DataFrame\n if not self._indexed_same(other):\n self, other = self.align(other, 'outer',\n level=level, copy=False)\n return self._compare_frame(other, na_op, str_rep)\n\n elif isinstance(other, ABCSeries):\n return _combine_series_frame(self, other, na_op,\n fill_value=None, axis=axis,\n level=level, try_cast=False)\n else:\n return self._combine_const(other, na_op, try_cast=False)\n\n f.__name__ = op_name\n\n return f\n\n\ndef _comp_method_FRAME(cls, func, special):\n str_rep = _get_opstr(func, cls)\n op_name = _get_op_name(func, special)\n\n @Appender('Wrapper for comparison method {name}'.format(name=op_name))\n def f(self, other):\n if isinstance(other, ABCDataFrame):\n # Another DataFrame\n if not self._indexed_same(other):\n raise ValueError('Can only compare identically-labeled '\n 'DataFrame objects')\n return self._compare_frame(other, func, str_rep)\n\n elif isinstance(other, ABCSeries):\n return _combine_series_frame(self, other, func,\n fill_value=None, axis=None,\n level=None, try_cast=False)\n else:\n\n # straight boolean comparisons we want to allow all columns\n # (regardless of dtype to pass thru) See #4537 for discussion.\n res = self._combine_const(other, func,\n errors='ignore',\n try_cast=False)\n return res.fillna(True).astype(bool)\n\n f.__name__ = op_name\n\n return f\n\n\n# -----------------------------------------------------------------------------\n# Panel\n\ndef _arith_method_PANEL(cls, op, special):\n # work only for scalars\n op_name = _get_op_name(op, special)\n\n def f(self, other):\n if not is_scalar(other):\n raise ValueError('Simple arithmetic with {name} can only be '\n 'done with scalar values'\n .format(name=self._constructor.__name__))\n\n return self._combine(other, op)\n\n f.__name__ = op_name\n return f\n\n\ndef _comp_method_PANEL(cls, op, special):\n str_rep = _get_opstr(op, cls)\n op_name = _get_op_name(op, special)\n\n def na_op(x, y):\n import pandas.core.computation.expressions as expressions\n\n try:\n result = expressions.evaluate(op, str_rep, x, y)\n except TypeError:\n result = mask_cmp_op(x, y, op, np.ndarray)\n return result\n\n @Appender('Wrapper for comparison method {name}'.format(name=op_name))\n def f(self, other, axis=None):\n # Validate the axis parameter\n if axis is not None:\n self._get_axis_number(axis)\n\n if isinstance(other, self._constructor):\n return self._compare_constructor(other, na_op, try_cast=False)\n elif isinstance(other, (self._constructor_sliced, ABCDataFrame,\n ABCSeries)):\n raise Exception(\"input needs alignment for this object [{object}]\"\n .format(object=self._constructor))\n else:\n return self._combine_const(other, na_op, try_cast=False)\n\n f.__name__ = op_name\n\n return f\n\n\ndef _flex_method_PANEL(cls, op, special):\n str_rep = _get_opstr(op, cls)\n op_name = _get_op_name(op, special)\n eval_kwargs = _gen_eval_kwargs(op_name)\n fill_zeros = _gen_fill_zeros(op_name)\n\n def na_op(x, y):\n import pandas.core.computation.expressions as expressions\n\n try:\n result = expressions.evaluate(op, str_rep, x, y,\n errors='raise',\n **eval_kwargs)\n except TypeError:\n result = op(x, y)\n\n # handles discrepancy between numpy and numexpr on division/mod\n # by 0 though, given that these are generally (always?)\n # non-scalars, I'm not sure whether it's worth it at the moment\n result = missing.fill_zeros(result, x, y, op_name, fill_zeros)\n return result\n\n if op_name in _op_descriptions:\n doc = _make_flex_doc(op_name, 'panel')\n else:\n # doc strings substitors\n doc = _agg_doc_PANEL.format(op_name=op_name)\n\n @Appender(doc)\n def f(self, other, axis=0):\n return self._combine(other, na_op, axis=axis)\n\n f.__name__ = op_name\n return f\n\n\n# -----------------------------------------------------------------------------\n# Sparse\n\ndef _cast_sparse_series_op(left, right, opname):\n \"\"\"\n For SparseSeries operation, coerce to float64 if the result is expected\n to have NaN or inf values\n\n Parameters\n ----------\n left : SparseArray\n right : SparseArray\n opname : str\n\n Returns\n -------\n left : SparseArray\n right : SparseArray\n \"\"\"\n opname = opname.strip('_')\n\n if is_integer_dtype(left) and is_integer_dtype(right):\n # series coerces to float64 if result should have NaN/inf\n if opname in ('floordiv', 'mod') and (right.values == 0).any():\n left = left.astype(np.float64)\n right = right.astype(np.float64)\n elif opname in ('rfloordiv', 'rmod') and (left.values == 0).any():\n left = left.astype(np.float64)\n right = right.astype(np.float64)\n\n return left, right\n\n\ndef _arith_method_SPARSE_SERIES(cls, op, special):\n \"\"\"\n Wrapper function for Series arithmetic operations, to avoid\n code duplication.\n \"\"\"\n op_name = _get_op_name(op, special)\n\n def wrapper(self, other):\n if isinstance(other, ABCDataFrame):\n return NotImplemented\n elif isinstance(other, ABCSeries):\n if not isinstance(other, ABCSparseSeries):\n other = other.to_sparse(fill_value=self.fill_value)\n return _sparse_series_op(self, other, op, op_name)\n elif is_scalar(other):\n with np.errstate(all='ignore'):\n new_values = op(self.values, other)\n return self._constructor(new_values,\n index=self.index,\n name=self.name)\n else: # pragma: no cover\n raise TypeError('operation with {other} not supported'\n .format(other=type(other)))\n\n wrapper.__name__ = op_name\n return wrapper\n\n\ndef _sparse_series_op(left, right, op, name):\n left, right = left.align(right, join='outer', copy=False)\n new_index = left.index\n new_name = get_op_result_name(left, right)\n\n from pandas.core.sparse.array import _sparse_array_op\n lvalues, rvalues = _cast_sparse_series_op(left.values, right.values, name)\n result = _sparse_array_op(lvalues, rvalues, op, name)\n return left._constructor(result, index=new_index, name=new_name)\n\n\ndef _arith_method_SPARSE_ARRAY(cls, op, special):\n \"\"\"\n Wrapper function for Series arithmetic operations, to avoid\n code duplication.\n \"\"\"\n op_name = _get_op_name(op, special)\n\n def wrapper(self, other):\n from pandas.core.sparse.array import (\n SparseArray, _sparse_array_op, _wrap_result, _get_fill)\n if isinstance(other, np.ndarray):\n if len(self) != len(other):\n raise AssertionError(\"length mismatch: {self} vs. {other}\"\n .format(self=len(self), other=len(other)))\n if not isinstance(other, SparseArray):\n dtype = getattr(other, 'dtype', None)\n other = SparseArray(other, fill_value=self.fill_value,\n dtype=dtype)\n return _sparse_array_op(self, other, op, op_name)\n elif is_scalar(other):\n with np.errstate(all='ignore'):\n fill = op(_get_fill(self), np.asarray(other))\n result = op(self.sp_values, other)\n\n return _wrap_result(op_name, result, self.sp_index, fill)\n else: # pragma: no cover\n raise TypeError('operation with {other} not supported'\n .format(other=type(other)))\n\n wrapper.__name__ = op_name\n return wrapper\n"
] | [
[
"numpy.dot",
"pandas.to_datetime",
"pandas.Series",
"numpy.linspace",
"numpy.asarray",
"pandas.util.testing.assert_produces_warning",
"pandas.MultiIndex.from_tuples",
"pandas.DataFrame",
"pandas.util.testing.assert_frame_equal",
"numpy.round",
"pandas.util.testing.assert_index_equal",
"numpy.random.randn",
"numpy.var",
"pandas.isna",
"numpy.random.randint",
"pandas.util.testing.makeTimeDataFrame",
"pandas.notna",
"pandas.util.testing._make_skipna_wrapper",
"pandas.util.testing.assert_numpy_array_equal",
"pandas.util.testing.assert_categorical_equal",
"numpy.arange",
"pandas.util.testing.assert_series_equal",
"pandas.DatetimeIndex",
"numpy.std",
"scipy.stats.skew",
"numpy.zeros",
"pandas.concat",
"pandas.core.nanops.nansem",
"pandas.MultiIndex",
"numpy.isnan",
"pandas.Categorical",
"numpy.median",
"pandas.util.testing.assert_almost_equal",
"pandas.option_context",
"pandas.Timedelta",
"numpy.cov",
"numpy.random.rand",
"pandas.MultiIndex.from_product",
"pandas.date_range",
"scipy.stats.kurtosis",
"numpy.corrcoef",
"numpy.array",
"pandas.timedelta_range",
"pandas.CategoricalIndex",
"numpy.random.random",
"pandas.util.testing.assert_raises_regex",
"numpy.ones",
"numpy.random.permutation",
"numpy.shape",
"pandas.to_timedelta",
"pandas.Timestamp",
"pandas.compat.lrange",
"pandas.core.nanops.nanvar"
],
[
"pandas.Series",
"pandas.DataFrame"
],
[
"pandas._libs.ops.vec_compare",
"pandas.core.computation.expressions.evaluate",
"pandas.core.dtypes.cast.maybe_upcast_putmask",
"pandas.core.dtypes.common.is_extension_array_dtype",
"pandas._libs.ops.scalar_binop",
"pandas.core.sparse.array._sparse_array_op",
"numpy.asarray",
"pandas.core.dtypes.missing.notna",
"pandas.core.dtypes.common.is_datetime64tz_dtype",
"pandas.core.sparse.array._wrap_result",
"pandas.core.dtypes.common.is_datetime64_dtype",
"pandas.core.dtypes.common.ensure_object",
"pandas.core.common.values_from_object",
"pandas._libs.ops.vec_binop",
"pandas._libs.ops.scalar_compare",
"pandas.core.missing.fill_zeros",
"numpy.zeros",
"pandas.core.dtypes.common.is_datetimelike_v_numeric",
"pandas.core.dtypes.common.is_categorical_dtype",
"numpy.putmask",
"pandas.core.dtypes.common.is_integer_dtype",
"pandas.util._decorators.Appender",
"pandas.core.dtypes.common.is_list_like",
"pandas.core.dtypes.common.is_timedelta64_dtype",
"pandas.core.sparse.array._get_fill",
"pandas.core.dtypes.cast.construct_1d_object_array_from_listlike",
"pandas.compat.bind_method",
"numpy.errstate",
"pandas.core.dtypes.common.needs_i8_conversion",
"pandas.core.dtypes.common.is_bool_dtype",
"pandas.core.dtypes.common.is_scalar",
"pandas.core.dtypes.cast.find_common_type",
"numpy.ones",
"pandas.core.dtypes.common.is_object_dtype",
"pandas.core.dtypes.missing.isna",
"pandas.core.sparse.array.SparseArray",
"pandas.Timestamp",
"pandas._libs.lib.item_from_zerodim",
"numpy.empty"
]
] |
MehariBZ/pydca | [
"034e0707a13e6e43da1343630047d47caeca896e"
] | [
"pydca/meanfield_dca/meanfield_dca.py"
] | [
"from __future__ import absolute_import, division\nfrom . import msa_numerics\nfrom pydca.fasta_reader import fasta_reader\nimport logging\nimport numpy as np\n\n\"\"\"This module implements Direc Coupling Analysis (DCA) of residue coevolution\nfor protein and RNA sequences using the mean-field algorithm. The final\ncoevolution score is computed from the direct probability. The general steps\ncarried out are outlined as follows\n\nFor a detailed information about Direct Coupling Analysis, one can refer to the\nfollowing articles:\n\n a) Identification of direct residue contacts in protein-protein interaction\n by message-passing\n Martin Weigt, Robert A White, Hendrik Szurmant, James A Hoch, Terence Hwa\n Journal: Proceedings of the National Academy of Sciences\n Volume: 106\n Issue: 1\n Pages: 67-72\n b) Direct-coupling analysis of residue coevolution captures native contacts\n across many protein families\n Faruck Morcos, Andrea Pagnani, Bryan Lunt, Arianna Bertolino,\n Debora S Marks, Chris Sander, Riccardo Zecchina, Jose N Onuchic,\n Terence Hwa, Martin Weigt\n Journal: Proceedings of the National Academy of Sciences\n Volume: 108\n Issue: 49\n Pages: E1293-E1301\n\nAuthor(s) Mehari B. Zerihun, Alexander Schug\n\"\"\"\n\nlogger = logging.getLogger(__name__)\n\nclass MeanFieldDCAException(Exception):\n \"\"\"\n \"\"\"\n\nclass MeanFieldDCA:\n \"\"\"MeanFieldDCA class. Instances of this class are used to carry out Direct\n Coupling Analysis (DCA) of residue coevolution using the mean-field DCA\n algorithm.\n \"\"\"\n def __init__(self, msa_file_name, biomolecule, pseudocount=None, seqid=None):\n \"\"\"MeanFieldDCA object class initializer\n Parameters\n ----------\n msa_file : str\n Name of the FASTA formatted file containing alignmnet\n biomolecule : str\n Type of biomolecule (must be protein or RNA, lower or\n upper case)\n pseudocount : float\n Parameter for regularizing data before DCA analysis.\n Default value is 0.5\n seqid : float\n This parameter's value measure the maximum\n similarity two or more sequences can have so that they can be\n considered distinct, or lumped together otherwise.\n Returns\n -------\n None : None\n \"\"\"\n\n self.__pseudocount = pseudocount if pseudocount is not None else 0.5\n self.__seqid = seqid if seqid is not None else 0.8\n #Validate the value of pseudo count incase user provide an invalid one\n if self.__pseudocount >= 1.0 or self.__pseudocount < 0:\n logger.error('\\n\\tValue of relative pseudo-count must be'\n ' between 0 and 1.0. Typical value is 0.5')\n raise ValueError\n #Validate the value of sequence identity\n if self.__seqid > 1.0 or self.__seqid <= 0.0:\n logger.error('\\n\\tValue of sequence-identity must'\n ' not exceed 1 nor less than 0. Typical values are 0.7, 0.8., 0.9')\n raise ValueError\n biomolecule = biomolecule.strip().upper()\n self.__msa_file_name = msa_file_name\n if biomolecule=='RNA':\n self.__num_site_states = 5\n elif biomolecule=='PROTEIN':\n self.__num_site_states = 21\n else:\n logger.error(\n '\\n\\tUnknown biomolecule ... must be protein (PROTEIN) or rna (RNA)',\n )\n raise ValueError\n \n self.__sequences = fasta_reader.get_alignment_int_form(\n self.__msa_file_name,\n biomolecule=biomolecule,\n )\n\n self.__num_sequences = len(self.__sequences)\n self.__sequences_len = len(self.__sequences[0])\n self.__biomolecule = biomolecule\n if self.__seqid < 1.0:\n self.__sequences_weight = self.compute_sequences_weight()\n else :\n # assign each sequence a weight of one\n self.__sequences_weight = np.ones((self.__num_sequences,), dtype = np.float64)\n self.__effective_num_sequences = np.sum(self.__sequences_weight)\n #sometimes users might enter the wrong biomolecule type\n #verify biomolecule type\n\n mf_dca_info = \"\"\"\\n\\tCreated a MeanFieldDCA object with the following attributes\n \\tbiomolecule: {}\n \\ttotal states at sites: {}\n \\tpseudocount: {}\n \\tsequence identity: {}\n \\talignment length: {}\n \\ttotal number of unique sequences (excluding redundant sequences with 100 percent similarity): {}\n \\teffective number of sequences (with sequence identity {}): {}\n \"\"\".format(\n biomolecule,\n self.__num_site_states,\n self.__pseudocount,\n self.__seqid,\n self.__sequences_len,\n self.__num_sequences,\n self.__seqid,\n self.__effective_num_sequences,\n )\n logger.info(mf_dca_info)\n return None\n\n\n def __str__(self):\n \"\"\"Describes the MeanFieldDCA object.\n\n Parameters\n ----------\n self: MeanFieldDCA\n Instance of MeanFieldDCA class\n\n Returns\n -------\n description : str\n A representation about objects created from\n the MeanFieldDCA class.\n \"\"\"\n description = '<instance of MeanFieldDCA>'\n return description\n\n\n def __call__(self, pseudocount = 0.5 , seqid = 0.8):\n \"\"\"Resets the value of pseudo count and sequence identity through\n the instance.\n\n Parameters\n ----------\n self : MeanFieldDCA\n MeanFieldDCA instance.\n pseudocount : float\n The value of the raltive pseudo count. It must be between\n 0 and 1. Default value is 0.5.\n seqid : float\n Threshold sequence similarity for computing sequences weight.\n This parameter must be between 0 and 1. Typical values are\n 0.7, 0.8, 0.9 or something in between these numbers.\n\n Returns\n -------\n None : None\n \"\"\"\n\n #warn the user that paramertes are being reset\n self.__pseudocount = pseudocount\n self.__seqid = seqid\n logger.warning('\\n\\tYou have changed one of the parameters (pseudo count or sequence identity)'\n '\\n\\tfrom their default values'\n '\\n\\tpseudocount: {} \\n\\tsequence_identity: {}'.format(\n self.__pseudocount, self.__seqid,\n )\n )\n return None\n\n\n @property\n def alignment(self):\n \"\"\"Alignment data getter.\n Parameters\n ----------\n self : MeanFieldDCA\n Instance of MeanFieldDCA class\n\n Returns\n --------\n self.__sequences : list\n A 2d list of alignment sequences in integer representation.\n \"\"\"\n\n return self.__sequences\n\n @property\n def biomolecule(self):\n \"\"\"Sequence type getter\n\n Parameters\n ----------\n Self : MeanFieldDCA\n Instance of MeanFieldDCA class\n Returns\n -------\n self.__biomolecule : str\n Biomolecule type (protein or RNA)\n \"\"\"\n return self.__biomolecule\n @property\n def sequences_len(self):\n \"\"\"Sequences length getter.\n\n Parameters\n ---------\n self : MeanFieldDCA\n Instance of MeanFieldDCA class\n\n Returns\n -------\n self.__sequences_len : int\n Sequences length in alignment data\n \"\"\"\n\n return self.__sequences_len\n\n\n @property\n def num_site_states(self):\n \"\"\"Get number of states for an MSA (eg. 5 for RNAs and 21 for proteins)\n\n Parameters\n ----------\n self : MeanFieldDCA\n Instance of MeanFieldDCA class\n\n Returns\n -------\n self.__num_site_states : int\n Maximum number of states in a sequence site\n \"\"\"\n\n return self.__num_site_states\n\n @property\n def num_sequences(self):\n \"\"\"Getter for the number of sequences read from alignment file\n\n Parameters\n ----------\n self : MeanFieldDCA\n Instance of MeanFieldDCA class\n\n Returns\n -------\n self.__num_sequences : int\n The total number of sequences in alignment data\n \"\"\"\n\n return self.__num_sequences\n\n\n @property\n def sequence_identity(self):\n \"\"\"Getter for the value of sequence indentity.\n\n Parameters\n ----------\n self : MeanFieldDCA\n Instance of MeanFieldDCA class\n\n Returns\n -------\n self.__seqid : float\n Cut-off value for sequences similarity above which sequences are\n considered identical\n \"\"\"\n\n return self.__seqid\n\n\n @property\n def pseudocount(self):\n \"\"\"Getter for value of pseudo count\n\n Parameters\n ----------\n self : MeanFieldDCA\n Instance of MeanFieldDCA class\n\n Returns\n -------\n self.__pseudocount : float\n Value of pseudo count usef for regularization\n \"\"\"\n\n return self.__pseudocount\n\n\n @property\n def sequences_weight(self):\n \"\"\"Getter for the weight of each sequences in alignment data.\n\n Parameters\n ----------\n self : MeanFieldDCA\n Instance of MeanFieldDCA class\n\n Returns\n -------\n self.__sequences_weight : np.array(dtype=np.float64)\n A 1d numpy array containing the weight of each sequences in the\n alignment.\n \"\"\"\n\n return self.__sequences_weight\n\n\n @property\n def effective_num_sequences(self):\n \"\"\"Getter for the effective number of sequences.\n\n Parameters\n ----------\n self : MeanFieldDCA\n Instance of MeanFieldDCA class\n\n Returns\n -------\n np.sum(self.__sequences_weight) : float\n The sum of each sequence's weight.\n \"\"\"\n\n return np.sum(self.__sequences_weight)\n\n\n def compute_sequences_weight(self):\n \"\"\"Computes the weight of each sequences in the alignment. If the\n sequences identity is one, each sequences has equal weight and this is\n the maximum weight a sequence in the alignment data can have. Whenever\n the sequence identity is set a value less than one, sequences that have\n similarity beyond the sequence identity are lumped together. If there are\n m similar sequences, their corresponding weight is the reciprocal.\n\n Parameters\n ----------\n self : MeanFieldDCA\n The instance\n\n Returns\n -------\n weights : np.array\n A 1d numpy array of size self.__num_sequences containing the\n weight of each sequence.\n \"\"\"\n\n logger.info('\\n\\tComputing sequences weights')\n weights = msa_numerics.compute_sequences_weight(\n alignment_data= np.array(self.__sequences, dtype=np.int32),\n seqid = self.__seqid,\n )\n return weights\n\n\n def get_single_site_freqs(self):\n \"\"\"Computes single site frequency counts.\n\n Parameters\n ----------\n self : MeanFieldDCA\n The instance.\n\n Returns\n -------\n single_site_freqs : np.array\n A 2d numpy array of shape (L, q) containing the frequency\n count of residues at sequence sites. L is the length of\n sequences in the alignment, and q is the maximum possible\n states a site can accommodate. The last state (q) of each\n site represents a gap.\n \"\"\"\n\n logger.info('\\n\\tComputing single site frequencies')\n\n single_site_freqs = msa_numerics.compute_single_site_freqs(\n alignment_data = np.array(self.__sequences),\n num_site_states = self.__num_site_states,\n seqs_weight = self.__sequences_weight,\n )\n return single_site_freqs\n\n\n def get_reg_single_site_freqs(self):\n \"\"\"Regularizes single site frequencies.\n\n Parameters\n ----------\n self : MeanFieldDCA\n The instance\n\n Returns\n -------\n reg_single_site_freqs : np.array\n A 2d numpy array of shape (L, q) containing regularized single\n site frequencies. L and q are the sequences length and maximum\n number of site-states respectively.\n \"\"\"\n\n single_site_freqs = self.get_single_site_freqs()\n\n logger.info('\\n\\tRegularizing single site frequencies')\n\n reg_single_site_freqs = msa_numerics.get_reg_single_site_freqs(\n single_site_freqs = single_site_freqs,\n seqs_len = self.__sequences_len,\n num_site_states = self.__num_site_states,\n pseudocount = self.__pseudocount,\n )\n return reg_single_site_freqs\n\n\n def get_pair_site_freqs(self):\n \"\"\"Computes pair site frequencies\n\n Parameters\n ----------\n self : MeanFieldDCA\n The instance.\n\n Returns\n -------\n pair_site_freqs : np.array\n A 2d numpy array of pair site frequncies. It has a shape of\n (N, q-1, q-1) where N is the number of unique site pairs and q\n is the maximum number of states a site can accommodate. Note\n site pairig is performed in the following order: (0, 0), (0, 1),\n ..., (0, L-1), ...(L-1, L) where L is the sequences length. This\n ordering is critical that any computation involding pair site\n frequencies must be implemented in the righ order of pairs.\n \"\"\"\n\n logger.info('\\n\\tComputing pair site frequencies')\n pair_site_freqs = msa_numerics.compute_pair_site_freqs(\n alignment_data = np.array(self.__sequences),\n num_site_states = self.__num_site_states,\n seqs_weight = self.__sequences_weight,\n )\n return pair_site_freqs\n\n\n def get_reg_pair_site_freqs(self):\n \"\"\"Regularizes pair site frequencies\n\n Parameters\n ----------\n self : MeanFieldDCA\n The instance.\n\n Returns\n -------\n reg_pair_site_freqs : np.array\n A 3d numpy array of shape (N, q-1, q-1) containing regularized\n pair site frequencies. N is the number of unique site pairs and\n q is the maximum number of states in a sequence site. The\n ordering of pairs follows numbering like (unregularized) pair\n site frequencies.\n \"\"\"\n\n pair_site_freqs = self.get_pair_site_freqs()\n logger.info('\\n\\tRegularizing pair site frequencies')\n reg_pair_site_freqs = msa_numerics.get_reg_pair_site_freqs(\n pair_site_freqs = pair_site_freqs,\n seqs_len = self.__sequences_len,\n num_site_states = self.__num_site_states,\n pseudocount = self.__pseudocount,\n )\n return reg_pair_site_freqs\n\n\n def construct_corr_mat(self, reg_fi, reg_fij):\n \"\"\"Constructs the correlation matrix from regularized frequencies.\n\n Parameters\n ----------\n self : MeanFieldDCA\n The instance.\n reg_fi : np.array\n Regularized single site frequencies.\n reg_fij : np.array\n Regularized pair site frequncies.\n\n Returns\n -------\n corr_mat : np.array\n A 2d numpy array of (N, N) where N = L*(q-1) where L and q are\n the length of sequences and number of states in a site\n respectively.\n \"\"\"\n\n logger.info('\\n\\tConstructing the correlation matrix')\n corr_mat = msa_numerics.construct_corr_mat(\n reg_fi = reg_fi,\n reg_fij = reg_fij,\n seqs_len = self.__sequences_len,\n num_site_states = self.__num_site_states,\n )\n return corr_mat\n\n\n def compute_couplings(self, corr_mat):\n \"\"\"Computing couplings by inverting the matrix of correlations. Note that\n the couplings are the negative of the inverse of the correlation matrix.\n\n Parameters\n ----------\n self : MeanFieldDCA\n The instance.\n corr_mat : np.array\n The correlation matrix formed from regularized pair site and\n single site frequencies.\n\n Returns\n -------\n couplings : np.array\n A 2d numpy array of the same shape as the correlation matrix.\n \"\"\"\n\n logger.info('\\n\\tComputing couplings')\n try:\n couplings = msa_numerics.compute_couplings(corr_mat = corr_mat)\n except Exception as e:\n logger.error('\\n\\tCorrelation {}\\n\\tYou set the pseudocount {}.'\n ' You might need to increase it.'.format(e, self.__pseudocount)\n )\n raise\n # capture couplings to avoid recomputing\n self.__couplings = couplings \n logger.info('\\n\\tMaximum and minimum couplings: {}, {}'.format(\n np.max(couplings), np.min(couplings)))\n return couplings\n\n\n def compute_two_site_model_fields(self, couplings, reg_fi):\n \"\"\"Computes two site model fields by fitting the marginal probabilities\n of the direct probability with the empirical data obtained from the\n alignment\n\n Parameters\n ----------\n self : MeanFieldDCA\n The instance.\n couplings : np.array\n A 2d numpy array of couplings computed from the correlation matrix.\n reg_fi : np.array\n A 3d numpy array of regularized single site frequencies.\n Returns\n -------\n two_site_model_fields : np.array\n A 3d numpy array of shape (N, q, q) where N is the total number\n of unique site pairs and q is the maximum number of states a site\n can accommodate. The ordering of site pairs is the same as those\n in pair site frequencies.\n \"\"\"\n\n logger.info('\\n\\tComputing two site model fields')\n two_site_model_fields = msa_numerics.compute_two_site_model_fields(\n couplings = couplings,\n reg_fi = reg_fi,\n seqs_len = self.__sequences_len,\n num_site_states = self.__num_site_states,\n )\n return two_site_model_fields\n\n\n def compute_fields(self, couplings=None):\n \"\"\"Computes the local fields of the global probability of sequence space.\n\n Parameters\n ----------\n self : MeanFieldDCA\n An instance of MeanFieldDCA class\n\n couplings : np.array\n A 2d numpy array of the couplings. If not give, will be computed.\n\n Returns\n -------\n fields : dict \n A dictionary of fields whose keys are sites in MSA and whose values\n are arrays of fields per site.\n \"\"\"\n\n if couplings is None:\n reg_fi = self.get_reg_single_site_freqs()\n reg_fij = self.get_reg_pair_site_freqs()\n corr_mat = self.construct_corr_mat(reg_fi, reg_fij)\n couplings = self.compute_couplings(corr_mat)\n else:\n reg_fi = self.get_reg_single_site_freqs()\n q = self.__num_site_states\n fields = dict()\n logger.info('\\n\\tComputing local fields of the global probability function')\n for i in range(self.__sequences_len):\n pi = reg_fi[i]\n piq = pi[-1]\n sum = np.zeros((q-1, 1))\n row_start = i * (q - 1)\n row_end = row_start + (q - 1)\n for j in range(self.__sequences_len):\n if j != i:\n pj = reg_fi[j]\n col_start = j * (q - 1)\n col_end = col_start + (q - 1)\n couplings_ij = couplings[row_start:row_end, col_start:col_end]\n pj_col_vec = np.reshape(pj[:-1], (q-1, 1))\n sum += np.dot(couplings_ij, pj_col_vec)\n\n fields_i = np.log(pi[:-1]/piq) - np.reshape(sum, (q-1, ))\n fields[i] = fields_i\n return fields\n \n \n def shift_couplings(self, couplings_ij):\n \"\"\"Shifts the couplings value.\n\n Parameters\n ----------\n self : MeanFieldDCA \n An instance of MeanFieldDCA class\n couplings_ij : np.array\n 1d array of couplings for site pair (i, j)\n Returns\n -------\n shifted_couplings_ij : np.array\n A 2d array of the couplings for site pair (i, j)\n \"\"\"\n qm1 = self.__num_site_states - 1\n couplings_ij = np.reshape(couplings_ij, (qm1,qm1))\n avx = np.mean(couplings_ij, axis=1)\n avx = np.reshape(avx, (qm1, 1))\n avy = np.mean(couplings_ij, axis=0)\n avy = np.reshape(avy, (1, qm1))\n av = np.mean(couplings_ij)\n couplings_ij = couplings_ij - avx - avy + av\n return couplings_ij \n\n \n def compute_params(self, seqbackmapper=None, ranked_by=None, linear_dist=None, num_site_pairs=None):\n \"\"\"Computes fields and couplings with the couplings ranked by DCA score.\n\n Parameters\n ----------\n self : MeanFieldDCA\n An instanc of MeanFieldDCA class\n seqbackmapper : SequenceBackmapper\n An instance of SequenceBackmapper class\n ranked_by : str\n DCA score type usef to rank the couplings by their site pairs.\n By default they are ranked by the Frobenius Norm of couplings with\n average product correction.\n linear_dist : int\n Minimum separation beteween site pairs (i, j).\n num_site_pairs : int \n Number of site pairs whose couplings are to be otained. \n \n Returns\n -------\n fields, couplings : tuple \n A tuple of lists of fields and couplings. \n \"\"\"\n if ranked_by is None: ranked_by = 'fn_apc'\n if linear_dist is None: linear_dist = 4\n\n RANKING_METHODS = ('FN', 'FN_APC', 'DI', 'DI_APC')\n ranked_by = ranked_by.strip().upper()\n if ranked_by not in RANKING_METHODS:\n logger.error('\\n\\tInvalid ranking criterion {}.\\nChoose from {}'.format(ranked_by, RANKING_METHODS))\n raise MeanFieldDCAException\n if ranked_by == 'FN': dca_scores = self.compute_sorted_FN(seqbackmapper=seqbackmapper)\n if ranked_by == 'FN_APC': dca_scores = self.compute_sorted_FN_APC(seqbackmapper=seqbackmapper)\n if ranked_by == 'DI': dca_scores = self.compute_sorted_DI(seqbackmapper=seqbackmapper)\n if ranked_by == 'DI_APC': dca_scores = self.compute_sorted_DI_APC(seqbackmapper=seqbackmapper)\n\n fields = self.compute_fields(couplings=self.__couplings)\n\n qm1 = self.__num_site_states - 1 \n\n if seqbackmapper is not None:\n # mapping_dict has keys from MSA sites and values from refseq sites\n # we need to reverse this mapping as the fields and couplings are from MSA sites\n mapping_dict = {\n value : key for key, value in self.__refseq_mapping_dict.items()\n }\n else:\n mapping_dict = {\n i : i for i in range(self.__sequences_len)\n }\n # set default number of site pairs whose couplings are to be extracted\n if num_site_pairs is None :\n num_site_pairs = len(seqbackmapper.ref_sequence) if seqbackmapper is not None else len(mapping_dict.keys()) \n # we need only the fields corresponding to mapped sites \n fields_mapped = list()\n logger.info('\\n\\tExtracting fields')\n for i in mapping_dict.keys():\n site_in_msa = mapping_dict[i]\n fields_im = fields[site_in_msa]\n site_fields = i, fields_im\n fields_mapped.append(site_fields)\n # extract couplings\n logger.info('\\n\\tExtracting couplings for top {} site pairs (i, j) with |i - j| > {} and ranked by {}'.format(\n num_site_pairs, linear_dist, ranked_by)\n )\n couplings_ranked_by_dca_score = list()\n count_pairs = 0\n for pair, score in dca_scores:\n site_1_in_refseq, site_2_in_refseq = pair[0], pair[1]\n if abs(site_1_in_refseq - site_2_in_refseq) > linear_dist:\n count_pairs += 1\n if count_pairs > num_site_pairs: break \n i, j = mapping_dict[site_1_in_refseq], mapping_dict[site_2_in_refseq]\n if(i > j): \n logger.error('\\n\\tInvalid site pair. Site pair (i, j) should be ordered in i < j')\n raise MeanFieldDCAException\n row_start = i * qm1 \n row_end = row_start + qm1 \n column_start = j * qm1 \n column_end = column_start + qm1 \n couplings_ij = self.__couplings[row_start:row_end, column_start:column_end]\n couplings_ij = self.shift_couplings(couplings_ij) # now couplings_ij is a 2d numpy array\n couplings_ij = np.reshape(couplings_ij, (qm1*qm1,))\n pair_couplings_ij = pair, couplings_ij \n couplings_ranked_by_dca_score.append(pair_couplings_ij)\n if count_pairs < num_site_pairs:\n logger.warning('\\n\\tObtained couplings for only {} ranked site pairs.' \n '\\n\\tThis is the maximum number of site paris we can obtain under ' \n 'the given criteria'.format(count_pairs)\n )\n \n return tuple(fields_mapped), tuple(couplings_ranked_by_dca_score) \n\n\n def get_mapped_site_pairs_dca_scores(self, sorted_dca_scores, seqbackmapper):\n \"\"\"Filters mapped site pairs with a reference sequence. \n\n Parameters\n -----------\n self : MeanFieldDCA\n An instance of MeanFieldDCA class\n sorted_dca_scores : tuple of tuples\n A tuple of tuples of site-pair and DCA score sorted by DCA scores \n in reverse order.\n seqbackmapper : SequenceBackmapper \n An instance of SequenceBackmapper class\n \n Returns\n -------\n sorted_scores_mapped : tuple\n A tuple of tuples of site pairs and dca score\n \"\"\"\n mapping_dict = seqbackmapper.map_to_reference_sequence()\n # Add attribute __reseq_mapping_dict\n self.__refseq_mapping_dict = mapping_dict \n sorted_scores_mapped = list()\n num_mapped_pairs = 0\n for pair, score in sorted_dca_scores:\n try:\n mapped_pair = mapping_dict[pair[0]], mapping_dict[pair[1]]\n except KeyError:\n pass \n else:\n current_pair_score = mapped_pair, score \n sorted_scores_mapped.append(current_pair_score)\n num_mapped_pairs += 1\n # sort mapped pairs in case they were not\n sorted_scores_mapped = sorted(sorted_scores_mapped, key = lambda k : k[1], reverse=True)\n logger.info('\\n\\tTotal number of mapped sites: {}'.format(num_mapped_pairs))\n return tuple(sorted_scores_mapped)\n\n \n def get_site_pair_di_score(self):\n \"\"\"Obtains computed direct information (DI) scores from backend and\n puts them a list of tuples of in (site-pair, score) form.\n\n Parameters\n ----------\n self : MeanFieldDCA\n The instance.\n\n Returns\n -------\n site_pair_di_score : list\n A list of tuples containing site pairs and DCA score, i.e., the\n list [((i, j), score), ...] for all unique ite pairs (i, j) \n such that j > i.\n \"\"\"\n reg_fi = self.get_reg_single_site_freqs()\n reg_fij = self.get_reg_pair_site_freqs()\n corr_mat = self.construct_corr_mat(reg_fi, reg_fij)\n couplings = self.compute_couplings(corr_mat)\n fields_ij = self.compute_two_site_model_fields(couplings, reg_fi)\n logger.info('\\n\\tComputing direct information')\n unsorted_DI = msa_numerics.compute_direct_info(\n couplings = couplings,\n fields_ij = fields_ij,\n reg_fi = reg_fi,\n seqs_len = self.__sequences_len,\n num_site_states = self.__num_site_states,\n )\n\n site_pair_di_score= dict()\n pair_counter = 0\n for i in range(self.__sequences_len - 1):\n for j in range(i + 1, self.__sequences_len):\n site_pair = (i , j)\n site_pair_di_score[site_pair] = unsorted_DI[pair_counter]\n pair_counter += 1\n return site_pair_di_score\n\n def compute_sorted_DI(self, seqbackmapper=None):\n \"\"\"Computes direct informations for each pair of sites and sorts them in\n descending order of DCA score.\n\n Parameters\n ----------\n self : MeanFieldDCA\n The instance.\n seqbackmapper : SequenceBackmapper\n An instance of SequenceBackmapper class.\n\n Returns\n -------\n sorted_DI : list\n A list of tuples containing site pairs and DCA score, i.e., the\n contents of sorted_DI are [((i, j), score), ...] for all unique\n site pairs (i, j) such that j > i.\n \"\"\"\n unsorted_DI = self.get_site_pair_di_score()\n sorted_DI = sorted(unsorted_DI.items(), key = lambda k : k[1], reverse=True)\n if seqbackmapper is not None:\n sorted_DI = self.get_mapped_site_pairs_dca_scores(sorted_DI, seqbackmapper)\n return sorted_DI\n\n\n def compute_sorted_DI_APC(self, seqbackmapper=None):\n \"\"\"Computes the average DI score for every site.\n\n Parameters\n ----------\n self : MeanFieldDCA\n An instance of MeanFieldDCA class\n seqbackmapper : SequenceBackmapper\n An instance of SequenceBackmapper class.\n Returns\n -------\n sorted_DI_APC : list\n A list of tuples containing site pairs and DCA score, i.e., the\n contents of sorted_DI are [((i, j), score), ...] for all unique\n site pairs (i, j) such that j > i. These DI scores are average \n product corrected.\n \"\"\"\n\n sorted_DI = self.compute_sorted_DI() # we must not supply seqbackmapper at this point. \n # the backmapping is done at the end of APC step\n logger.info('\\n\\tPerforming average product correction (APC) of DI scores')\n # compute the average score of each site\n av_score_sites = list()\n N = self.__sequences_len\n for i in range(N):\n i_scores = [score for pair, score in sorted_DI if i in pair]\n assert len(i_scores) == N - 1\n i_scores_sum = sum(i_scores)\n i_scores_ave = i_scores_sum/float(N - 1)\n av_score_sites.append(i_scores_ave)\n # compute average product corrected DI\n av_all_scores = sum(av_score_sites)/float(N)\n sorted_DI_APC = list()\n for pair, score in sorted_DI:\n i, j = pair\n score_apc = score - av_score_sites[i] * (av_score_sites[j]/av_all_scores)\n sorted_DI_APC.append((pair, score_apc))\n # sort the scores as doing APC may have disrupted the ordering\n sorted_DI_APC = sorted(sorted_DI_APC, key = lambda k : k[1], reverse=True)\n # Now we must do backmapping if seqbackmapper is provided.\n if seqbackmapper is not None:\n sorted_DI_APC = self.get_mapped_site_pairs_dca_scores(sorted_DI_APC, seqbackmapper)\n return sorted_DI_APC\n\n\n def compute_sorted_FN(self, seqbackmapper=None):\n \"\"\"Computes the Frobenius norm of couplings.\n Parameters\n ----------\n self : MeanFieldDCA\n An instance of MeanFieldDCA class.\n seqbackmapper : SequenceBackmapper\n An instance of SequenceBackmapper class.\n\n Returns\n -------\n fn_sorted : list\n A list of tuples containing site pairs and DCA score, i.e., the\n list [((i, j), score), ...] for all unique\n site pairs (i, j) such that j > i.\n \"\"\"\n reg_fi = self.get_reg_single_site_freqs()\n reg_fij = self.get_reg_pair_site_freqs()\n corr_mat = self.construct_corr_mat(reg_fi, reg_fij)\n couplings = self.compute_couplings(corr_mat)\n logger.info('\\n\\tComputing Frobenius norm of couplings')\n num_sites = self.__sequences_len\n q = self.__num_site_states\n frobenius_norm = list()\n for i in range(num_sites):\n row_start = i * (q - 1)\n row_end = row_start + (q - 1)\n for j in range(i + 1, num_sites):\n site_pair = (i, j)\n col_start = j * (q - 1)\n col_end = col_start + (q - 1)\n cij = couplings[row_start:row_end, col_start:col_end]\n cij_mean_1 = np.reshape(np.mean(cij, axis=0), (1, q-1))\n cij_mean_2 = np.reshape(np.mean(cij, axis=1), (q-1, 1))\n cij_mean = np.mean(cij)\n cij_new = cij - cij_mean_1 - cij_mean_2 + cij_mean\n fn_ij = np.sqrt(np.sum(cij_new * cij_new))\n frobenius_norm.append((site_pair, fn_ij))\n fn_sorted = sorted(frobenius_norm, key = lambda x : x[1], reverse=True)\n if seqbackmapper is not None:\n fn_sorted = self.get_mapped_site_pairs_dca_scores(fn_sorted, seqbackmapper)\n return fn_sorted\n\n\n def compute_sorted_FN_APC(self, seqbackmapper = None):\n \"\"\"Performs average product correction (APC) on DCA scores\n\n Parameters\n ----------\n self : MeanFieldDCA\n An instance of MeanFieldDCA class.\n seqbackmapper : SequenceBackmapper\n An instance of SequenceBackmapper class.\n\n Returns\n -------\n sorted_FN_APC : list\n A list of tuples containing site pairs and DCA score, i.e., the\n list [((i, j), score), ...] for all unique site pairs (i, j) \n such that j > i. The DCA scores are average product corrected.\n \"\"\"\n raw_FN = self.compute_sorted_FN() # Must not supply seqbackmapper at this stage.\n logger.info('\\n\\tPerforming average product correction (APC) to Frobenius'\n ' norm of couplings.'\n )\n\n # compute the average score of each site\n av_score_sites = list()\n N = self.__sequences_len\n for i in range(N):\n i_scores = [score for pair, score in raw_FN if i in pair]\n assert len(i_scores) == N - 1\n i_scores_sum = sum(i_scores)\n i_scores_ave = i_scores_sum/float(N - 1)\n av_score_sites.append(i_scores_ave)\n # compute average product corrected DI\n av_all_scores = sum(av_score_sites)/float(N)\n sorted_FN_APC = list()\n for pair, score in raw_FN:\n i, j = pair\n score_apc = score - av_score_sites[i] * (av_score_sites[j]/av_all_scores)\n sorted_FN_APC.append((pair, score_apc))\n sorted_FN_APC = sorted(sorted_FN_APC, key=lambda x : x[1], reverse=True)\n # Must do backmapping is sebackmapper is not None\n if seqbackmapper is not None:\n sorted_FN_APC = self.get_mapped_site_pairs_dca_scores(sorted_FN_APC, seqbackmapper) \n return sorted_FN_APC\n\n\nif __name__ == '__main__':\n \"\"\"\n \"\"\"\n"
] | [
[
"numpy.dot",
"numpy.log",
"numpy.min",
"numpy.reshape",
"numpy.ones",
"numpy.max",
"numpy.mean",
"numpy.array",
"numpy.zeros",
"numpy.sum"
]
] |
wakafengfan/CPM-1-Finetune | [
"b2c30bd94df31bcd6ee75ba90c347113563d4075"
] | [
"arguments.py"
] | [
"# coding=utf-8\n# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"argparser configuration\"\"\"\n\nimport argparse\nimport os\nimport torch\nimport deepspeed\n\n\ndef add_model_config_args(parser):\n \"\"\"Model arguments\"\"\"\n\n group = parser.add_argument_group('model', 'model configuration')\n\n group.add_argument('--pretrained-bert', action='store_true',\n help='use a pretrained bert-large-uncased model instead'\n 'of initializing from scratch. See '\n '--tokenizer-model-type to specify which pretrained '\n 'BERT model to use')\n group.add_argument('--attention-dropout', type=float, default=0.1,\n help='dropout probability for attention weights')\n group.add_argument('--num-attention-heads', type=int, default=16,\n help='num of transformer attention heads')\n group.add_argument('--hidden-size', type=int, default=1024,\n help='tansformer hidden size')\n group.add_argument('--intermediate-size', type=int, default=None,\n help='transformer embedding dimension for FFN'\n 'set to 4*`--hidden-size` if it is None')\n group.add_argument('--num-layers', type=int, default=24,\n help='num decoder layers')\n group.add_argument('--layernorm-epsilon', type=float, default=1e-5,\n help='layer norm epsilon')\n group.add_argument('--hidden-dropout', type=float, default=0.1,\n help='dropout probability for hidden state transformer')\n group.add_argument('--max-position-embeddings', type=int, default=512,\n help='maximum number of position embeddings to use')\n group.add_argument('--vocab-size', type=int, default=30522,\n help='vocab size to use for non-character-level '\n 'tokenization. This value will only be used when '\n 'creating a tokenizer')\n group.add_argument('--deep-init', action='store_true',\n help='initialize bert model similar to gpt2 model.'\n 'scales initialization of projection layers by a '\n 'factor of 1/sqrt(2N). Necessary to train bert '\n 'models larger than BERT-Large.')\n group.add_argument('--make-vocab-size-divisible-by', type=int, default=128,\n help='Pad the vocab size to be divisible by this value.'\n 'This is added for computational efficieny reasons.')\n group.add_argument('--cpu-optimizer', action='store_true',\n help='Run optimizer on CPU')\n group.add_argument('--cpu_torch_adam', action='store_true',\n help='Use Torch Adam as optimizer on CPU.')\n\n return parser\n\n\ndef add_fp16_config_args(parser):\n \"\"\"Mixed precision arguments.\"\"\"\n\n group = parser.add_argument_group('fp16', 'fp16 configurations')\n\n group.add_argument('--fp16', action='store_true',\n help='Run model in fp16 mode')\n group.add_argument('--fp32-embedding', action='store_true',\n help='embedding in fp32')\n group.add_argument('--fp32-layernorm', action='store_true',\n help='layer norm in fp32')\n group.add_argument('--fp32-tokentypes', action='store_true',\n help='embedding token types in fp32')\n group.add_argument('--fp32-allreduce', action='store_true',\n help='all-reduce in fp32')\n group.add_argument('--hysteresis', type=int, default=2,\n help='hysteresis for dynamic loss scaling')\n group.add_argument('--loss-scale', type=float, default=None,\n help='Static loss scaling, positive power of 2 '\n 'values can improve fp16 convergence. If None, dynamic'\n 'loss scaling is used.')\n group.add_argument('--loss-scale-window', type=float, default=1000,\n help='Window over which to raise/lower dynamic scale')\n group.add_argument('--min-scale', type=float, default=1,\n help='Minimum loss scale for dynamic loss scale')\n\n return parser\n\n\ndef add_training_args(parser):\n \"\"\"Training arguments.\"\"\"\n\n group = parser.add_argument_group('train', 'training configurations')\n\n group.add_argument('--do_train', action='store_true',\n help=\"Do training\")\n group.add_argument('--do_eval', action='store_true',\n help=\"Do evaluation\")\n group.add_argument('--zero_shot', action=\"store_true\",\n help=\"do zero-shot\")\n group.add_argument('--batch-size', type=int, default=4,\n help='Data Loader batch size')\n group.add_argument('--weight-decay', type=float, default=0.01,\n help='weight decay coefficient for L2 regularization')\n group.add_argument('--checkpoint-activations', action='store_true',\n help='checkpoint activation to allow for training '\n 'with larger models and sequences')\n group.add_argument('--checkpoint-num-layers', type=int, default=1,\n help='chunk size (number of layers) for checkpointing')\n group.add_argument('--deepspeed-activation-checkpointing', action='store_true',\n help='uses activation checkpointing from deepspeed')\n group.add_argument('--clip-grad', type=float, default=1.0,\n help='gradient clipping')\n group.add_argument('--epoch', type=int, default=10,\n help='total number of iterations to train over all training runs')\n group.add_argument('--log-interval', type=int, default=100,\n help='report interval')\n group.add_argument('--exit-interval', type=int, default=None,\n help='Exit the program after this many new iterations.')\n\n group.add_argument('--seed', type=int, default=1234,\n help='random seed')\n # Batch prodecuer arguments\n group.add_argument('--reset-position-ids', action='store_true',\n help='Reset posistion ids after end-of-document token.')\n group.add_argument('--reset-attention-mask', action='store_true',\n help='Reset self attention maske after '\n 'end-of-document token.')\n \n # Learning rate.\n group.add_argument('--lr-decay-iters', type=int, default=None,\n help='number of iterations to decay LR over,'\n ' If None defaults to `--train-iters`*`--epochs`')\n group.add_argument('--lr-decay-style', type=str, default='linear',\n choices=['constant', 'linear', 'cosine', 'exponential'],\n help='learning rate decay function')\n group.add_argument('--lr', type=float, default=1.0e-4,\n help='initial learning rate')\n group.add_argument('--warmup', type=float, default=0.01,\n help='percentage of data to warmup on (.01 = 1% of all '\n 'training iters). Default 0.01')\n # model checkpointing\n group.add_argument('--save', type=str, default=None,\n help='Output directory to save checkpoints to.')\n group.add_argument('--save-interval', type=int, default=5000,\n help='number of iterations between saves')\n group.add_argument('--no-save-optim', action='store_true',\n help='Do not save current optimizer.')\n group.add_argument('--no-save-rng', action='store_true',\n help='Do not save current rng state.')\n group.add_argument('--load', type=str, default=None,\n help='Path to a directory containing a model checkpoint.')\n group.add_argument('--no-load-optim', action='store_true',\n help='Do not load optimizer when loading checkpoint.')\n group.add_argument('--no-load-rng', action='store_true',\n help='Do not load rng state when loading checkpoint.')\n group.add_argument('--finetune', action='store_true',\n help='Load model for finetuning. Do not load optimizer '\n 'or rng state from checkpoint and set iteration to 0. '\n 'Assumed when loading a release checkpoint.')\n # distributed training args\n group.add_argument('--distributed-backend', default='nccl',\n help='which backend to use for distributed '\n 'training. One of [gloo, nccl]')\n\n group.add_argument('--local_rank', type=int, default=None,\n help='local rank passed from distributed launcher.')\n\n group.add_argument('--results_dir', type=str, default=None,\n help='The dir to save the model.')\n group.add_argument('--model_name', type=str, default=\"test\",\n help=\"The name you give to the model.\")\n\n # eval\n group.add_argument('--eval_ckpt_path', type=str, default=None,\n help='The checkpoint path used for evaluation')\n\n return parser\n\n\ndef add_evaluation_args(parser):\n \"\"\"Evaluation arguments.\"\"\"\n\n group = parser.add_argument_group('validation', 'validation configurations')\n\n group.add_argument('--eval-batch-size', type=int, default=None,\n help='Data Loader batch size for evaluation datasets.'\n 'Defaults to `--batch-size`')\n group.add_argument('--eval-iters', type=int, default=100,\n help='number of iterations to run for evaluation'\n 'validation/test for')\n group.add_argument('--eval-interval', type=int, default=1000,\n help='interval between running evaluation on validation set')\n group.add_argument('--eval-seq-length', type=int, default=None,\n help='Maximum sequence length to process for '\n 'evaluation. Defaults to `--seq-length`')\n group.add_argument('--eval-max-preds-per-seq', type=int, default=None,\n help='Maximum number of predictions to use for '\n 'evaluation. Defaults to '\n 'math.ceil(`--eval-seq-length`*.15/10)*10')\n group.add_argument('--overlapping-eval', type=int, default=32,\n help='sliding window for overlapping eval ')\n group.add_argument('--cloze-eval', action='store_true',\n help='Evaluation dataset from `--valid-data` is a cloze task')\n group.add_argument('--eval-hf', action='store_true',\n help='perform evaluation with huggingface openai model.'\n 'use `--load` to specify weights path to be loaded')\n group.add_argument('--load-openai', action='store_true',\n help='load openai weights into our model. Use `--load` '\n 'to specify weights path to be loaded')\n\n return parser\n\ndef add_text_generate_args(parser):\n \"\"\"Text generate arguments.\"\"\"\n\n group = parser.add_argument_group('Text generation', 'configurations')\n group.add_argument(\"--temperature\", type=float, default=1.0)\n group.add_argument(\"--top_p\", type=float, default=0.0)\n group.add_argument(\"--top_k\", type=int, default=0)\n group.add_argument(\"--out-seq-length\", type=int, default=256)\n return parser\n\n\ndef add_data_args(parser):\n \"\"\"Train/valid/test data arguments.\"\"\"\n\n group = parser.add_argument_group('data', 'data configurations')\n group.add_argument('--data_dir', type=str, required=True,\n help=\"Training data dir\")\n group.add_argument('--mmap-warmup', action='store_true',\n help='Warm up mmap files.')\n group.add_argument('--model-parallel-size', type=int, default=1,\n help='size of the model parallel.')\n group.add_argument('--shuffle', action='store_true',\n help='Shuffle data. Shuffling is deterministic '\n 'based on seed and current epoch.')\n group.add_argument('--use-npy-data-loader', action='store_true',\n help='Use the numpy data loader. If set, then'\n 'train-data-path, val-data-path, and test-data-path'\n 'should also be provided.')\n group.add_argument('--num-workers', type=int, default=2,\n help=\"\"\"Number of workers to use for dataloading\"\"\")\n group.add_argument('--tokenizer-model-type', type=str,\n default='bert-large-uncased',\n help=\"Model type to use for sentencepiece tokenization \\\n (one of ['bpe', 'char', 'unigram', 'word']) or \\\n bert vocab to use for BertWordPieceTokenizer (one of \\\n ['bert-large-uncased', 'bert-large-cased', etc.])\")\n group.add_argument('--tokenizer-path', type=str, default='tokenizer.model',\n help='path used to save/load sentencepiece tokenization '\n 'models')\n group.add_argument('--tokenizer-type', type=str,\n default='BertWordPieceTokenizer',\n choices=['CharacterLevelTokenizer',\n 'SentencePieceTokenizer',\n 'BertWordPieceTokenizer',\n 'GPT2BPETokenizer'],\n help='what type of tokenizer to use')\n group.add_argument(\"--cache-dir\", default=None, type=str,\n help=\"Where to store pre-trained BERT downloads\")\n group.add_argument('--use-tfrecords', action='store_true',\n help='load `--train-data`, `--valid-data`, '\n '`--test-data` from BERT tf records instead of '\n 'normal data pipeline')\n group.add_argument('--seq-length', type=int, default=512,\n help=\"Maximum sequence length to process\")\n group.add_argument('--max-preds-per-seq', type=int, default=None,\n help='Maximum number of predictions to use per sequence.'\n 'Defaults to math.ceil(`--seq-length`*.15/10)*10.'\n 'MUST BE SPECIFIED IF `--use-tfrecords` is True.')\n\n return parser\n\ndef get_args():\n \"\"\"Parse all the args.\"\"\"\n\n parser = argparse.ArgumentParser(description='PyTorch BERT Model')\n parser = add_model_config_args(parser)\n parser = add_fp16_config_args(parser)\n parser = add_training_args(parser)\n parser = add_evaluation_args(parser)\n parser = add_text_generate_args(parser)\n parser = add_data_args(parser)\n\n # Include DeepSpeed configuration arguments\n parser = deepspeed.add_config_arguments(parser)\n\n args = parser.parse_args()\n\n if not args.data_dir:\n print('WARNING: No data specified')\n\n args.cuda = torch.cuda.is_available()\n\n args.rank = int(os.getenv('RANK', '0'))\n args.world_size = int(os.getenv(\"WORLD_SIZE\", '1'))\n\n if os.getenv('OMPI_COMM_WORLD_LOCAL_RANK'):\n # We are using (OpenMPI) mpirun for launching distributed data parallel processes\n local_rank = int(os.getenv('OMPI_COMM_WORLD_LOCAL_RANK'))\n local_size = int(os.getenv('OMPI_COMM_WORLD_LOCAL_SIZE'))\n\n # Possibly running with Slurm\n num_nodes = int(os.getenv('SLURM_JOB_NUM_NODES', '1'))\n nodeid = int(os.getenv('SLURM_NODEID', '0'))\n\n args.local_rank = local_rank\n args.rank = nodeid*local_size + local_rank\n args.world_size = num_nodes*local_size\n\n args.model_parallel_size = min(args.model_parallel_size, args.world_size)\n if args.rank == 0:\n print('using world size: {} and model-parallel size: {} '.format(\n args.world_size, args.model_parallel_size))\n\n args.dynamic_loss_scale = False\n if args.loss_scale is None:\n args.dynamic_loss_scale = True\n if args.rank == 0:\n print(' > using dynamic loss scaling')\n\n # The args fp32_* or fp16_* meant to be active when the\n # args fp16 is set. So the default behaviour should all\n # be false.\n if not args.fp16:\n args.fp32_embedding = False\n args.fp32_tokentypes = False\n args.fp32_layernorm = False\n\n return args\n"
] | [
[
"torch.cuda.is_available"
]
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.