repo_name
stringlengths
6
130
hexsha
list
file_path
list
code
list
apis
list
possible_versions
list
qianhk/FeiPython
[ "c87578d3c04b7345a99fef7390c8ea12c6f2c716", "c87578d3c04b7345a99fef7390c8ea12c6f2c716", "c87578d3c04b7345a99fef7390c8ea12c6f2c716" ]
[ "Python3Test/TensorflowTest.py", "Python3Test/Numpy/array_index.py", "Python3Test/matplotlib/google_test.py" ]
[ "#!/usr/bin/env python3\n# coding=utf-8\n\nimport tensorflow as tf\nimport numpy as np\n\n# with tf.device('/cpu:0'):\n#\n# sess = tf.Session()\n#\n# # a_gpu = tf.Variable(0, name=\"a_gup\")\n# # sess = tf.Session(config=tf.ConfigProto(allow_soft_placement=True))\n#\n# hello = tf.constant('Hello, TensorFlow!')\n# print(sess.run(hello))\n#\n# a = tf.constant(10)\n# b = tf.constant(32)\n# print(sess.run(a + b))\n#\n# c = tf.constant('haHa')\n# print(sess.run(c))\n#\n# sess.close()\n\n\nidentity_matrix = tf.diag([1.0, 3.0, 1.0])\nA = tf.truncated_normal([2, 3])\nB = tf.fill([2, 3], 5.0)\nC = tf.random_uniform([3, 2], maxval=100)\nD = tf.convert_to_tensor(np.array([[1., 2., 3.], [-3., -7., -1.], [0., 5., -2.]]))\nsess = tf.Session()\n# sess.run(tf.global_variables_initializer())\n\n# print(sess.run(tf.random_normal(mean=10, shape=[10])))\n\n# A = tf.Variable(tf.random_normal(shape=[1, 1]))\n# sess.run(tf.global_variables_initializer())\n# print(sess.run(A))\n\nprint('\\nI=')\nprint(sess.run(identity_matrix))\nprint('\\nA=')\nprint(sess.run(A))\nprint('\\nB=')\nprint(sess.run(B))\nprint('\\nC=')\nC = sess.run(C)\nprint(C)\nprint('\\nD=')\nprint(sess.run(D))\n\nprint('\\nA+B=')\nprint(sess.run(A + B))\n\nprint('\\nB-B=')\nprint(sess.run(B - B))\n\nprint('\\nB*I=')\nBI = tf.matmul(B, identity_matrix)\nprint(sess.run(BI))\n\nprint('\\ntranspose(C)=')\nprint(sess.run(tf.transpose(C)))\n\nprint('\\ntranspose(D)=')\nprint(sess.run(tf.transpose(D)))\n\nprint('\\ninverse(D)=')\nprint(sess.run(tf.matrix_inverse(D)))\n\nprint('\\ndeterminant(D)={:.1f}'.format(sess.run(tf.matrix_determinant(D))))\n\nprint('\\ncholesky(D):')\nprint(sess.run(tf.cholesky(identity_matrix)))\n\nprint('\\nselfAdjointEig(D):')\nprint(sess.run(tf.self_adjoint_eig(D)))\n\nprint(sess.run(tf.div(13, 4)))\nprint(sess.run(tf.truediv(13, 4)))\nprint(sess.run(tf.floordiv(13, 4)))\nprint(sess.run(tf.mod(13.2, 4)))\n\nprint(sess.run(tf.cross([1, 0, 0], [0, 1, 0])))\nprint(sess.run(tf.square([1, 2, 3])))\n\n\ndef custom_polynomial(local_tf, value):\n return local_tf.subtract(3 * local_tf.square(value), value) + 10\n\n\nprint((sess.run(custom_polynomial(tf, 11))))\n\nalpha = 0.1\nval = tf.constant([[2, 3], [1, 4]], dtype=tf.float32)\nl1 = tf.contrib.layers.l1_regularizer(alpha)(val)\nl2 = tf.contrib.layers.l2_regularizer(alpha)(val)\n\nA = [[0.8, 0.6, 0.3], [0.1, 0.6, 0.4]]\nB = [1, 1]\ntop_k = tf.nn.top_k(A, 2)\nin_top_k = tf.nn.in_top_k(A, B, 1)\n\nsess.run(tf.global_variables_initializer())\n\nprint(f'\\nl1={sess.run(l1)} l2={sess.run(l2)}')\n\na = np.array([1, 2, 3], dtype=np.float32)\ntf_v = tf.Variable(5, dtype=tf.float32)\n\nsess.run(tf.global_variables_initializer())\n\nprint(f'a * tf_v = {sess.run(a * tf_v)}')\n\nweights = tf.constant([[1.0, -2], [-3, 4]]);\nregular_l1 = tf.contrib.layers.l1_regularizer(0.5)(weights)\nregular_l2 = tf.contrib.layers.l2_regularizer(0.5)(weights)\nprint(f'\\nregular_l1={sess.run(regular_l1)} regular_l2={sess.run(regular_l2)}')\n\nval_val = sess.run(val)\nprint('\\nval=' + str(val_val))\nprint(f'\\nargmax_0={val_val.argmax(0)} argmax_1={val_val.argmax(1)}')\nprint('\\ntf.argmax(val, 0)=' + str(sess.run(tf.argmax(val, 0))))\nprint('tf.argmax(val, 1)=' + str(sess.run(tf.argmax(val, 1))))\n\nvalues, indices = sess.run(top_k)\nprint(f'\\ntop_k: values={values}\\nindices={indices}')\nprint(f'in_top_k = {sess.run(in_top_k)}')\n\nsess.close()\n", "#!/usr/bin/env python3\n# coding=utf-8\n\nimport numpy as np\n\nbase_data = np.arange(100, 200)\nprint(\"base_data\\n={}\\n\".format(base_data))\n\nprint(\"base_data[10] = {}\\n\".format(base_data[10]))\n\nevery_five = np.arange(0, 100, 5)\nprint(\"every_five={}\\n\".format(every_five))\nprint(\"base_data[every_five] = \\n{}\\n\".format(base_data[every_five]))\n\na = np.array([(1, 2), (10, 20)])\nprint(\"a = \\n{}\\n\".format(a))\nprint(\"base_data[a] = \\n{}\\n\".format(base_data[a]))\n\nbase_data2 = base_data.reshape(10, -1)\nprint(\"base_data2 = np.reshape(base_data, (10, -1)) = \\n{}\\n\".format(base_data2))\n\nprint(\"base_data2[2] = \\n{}\\n\".format(base_data2[2]))\nprint(\"base_data2[2, 3] = \\n{}\\n\".format(base_data2[2, 3]))\nprint(\"base_data2[-1, -1] = \\n{}\\n\".format(base_data2[-1, -1]))\n\nprint(\"base_data2[2, :]] = \\n{}\\n\".format(base_data2[2, :]))\nprint(\"base_data2[:, 3]] = \\n{}\\n\".format(base_data2[:, 3]))\nprint(\"base_data2[2:5, 2:4]] = \\n{}\\n\".format(base_data2[2:5, 2:4]))\n", "#!/usr/bin/env python3\n# coding=utf-8\n\n\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n\ndef one(x):\n return 1 / x\n\n\ndef two(x):\n return -1 / (x * x)\n\n\n# data = np.array([2, 3, 4, 5, 6, 7, 8, 9, 10, 11])\ndata_a = np.arange(2, 12, 1)\nprint('data_a = %s' % data_a)\none_a = one(data_a)\ntwo_a = two(data_a)\n\nplt.plot(range(len(data_a)), one_a, 'r')\nplt.plot(range(len(data_a)), two_a, 'b')\nplt.show()\n\ndata_b = np.arange(1, 12)\nprint('data_b = %s' % data_b)\none_b = np.log(data_b)\ntwo_b = np.log(data_b + 10)\nplt.plot(range(len(data_b)), one_b, 'r')\nplt.plot(range(len(data_b)), two_b, 'b')\nplt.show()\n\n# plt.axis('off')\n# plt.xticks([])\n# plt.yticks([])\n\n# frame = plt.gca()\n# frame.axes.get_yaxis().set_visible(False)\n# frame.axes.get_xaxis().set_visible(False)\n" ]
[ [ "tensorflow.floordiv", "tensorflow.matrix_determinant", "tensorflow.diag", "tensorflow.cholesky", "tensorflow.Variable", "tensorflow.div", "tensorflow.nn.top_k", "tensorflow.self_adjoint_eig", "tensorflow.Session", "tensorflow.square", "tensorflow.nn.in_top_k", "tensorflow.cross", "tensorflow.argmax", "tensorflow.matmul", "tensorflow.fill", "tensorflow.truncated_normal", "tensorflow.contrib.layers.l1_regularizer", "tensorflow.global_variables_initializer", "numpy.array", "tensorflow.truediv", "tensorflow.constant", "tensorflow.transpose", "tensorflow.matrix_inverse", "tensorflow.mod", "tensorflow.contrib.layers.l2_regularizer", "tensorflow.random_uniform" ], [ "numpy.arange", "numpy.array" ], [ "numpy.arange", "numpy.log", "matplotlib.pyplot.show" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10", "1.12", "1.4", "1.13", "1.5", "1.7", "0.12", "1.0", "1.2" ] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
Kreidl/pymailtojira
[ "abca5ad83d5cb9dcf526b2f3aa661d42ba69d9d6" ]
[ "main.py" ]
[ "#import pandas for reading the xlsx File\nimport pandas as pd\n#import pymsgbox for displaying a messagebox, the request to check if the URL from the mapping is available, the Handler for outlook, the time for the sleep, the custom py for jira\nimport pymsgbox, urllib.request,urllib.parse,urllib.error, win32com.client, time, sys, jira_py\n\n#impot the os for creating a tempo attachment\nimport os\n\ndef changeMailName(mail, issue, addJIRAKeytoMailName):\n if addJIRAKeytoMailName:\n mail.Subject = str(issue) + \"_\" + mail.Subject\n return mail\n\n\ndef fileHandler(jiraURL, jiraUSER, jirapw, issue, attachment):\n path = os.getcwd()+ \"\\\\\" + attachment.FileName\n attachment.SaveAsFile(path)\n if jira_py.add_attachment(jiraURL, jiraUSER, jirapw, issue, path):\n os.remove(path)\n print(\"removed\")\n \n \n\n#Get Arguments from Batfile\nif sys.argv:\n iterateTimeInSeconds = int(sys.argv[1])\n addJIRAKeytoMailName = sys.argv[2]\n mailCounter = int(sys.argv[3])\n desiredFolder = sys.argv[4]\n\n\n#Create a Messagebox with Yes/No\nresult = pymsgbox.confirm('Shall we create JIRA Issues from Mail?', 'Outlook to JIRA', [\"Yes\", 'No'])\n\n#Declare the filepath to the mappingtable\nfilepath = \"Mappingtable.xlsx\"\n\n#End the Script if the Selection was NO or None\nif result == 'No' or result is None:\n print(\"End\")\n quit()\n\n#Get Outlook from the Computer\noutlook = win32com.client.Dispatch(\"Outlook.Application\").GetNamespace(\"MAPI\")\n#Get the Outlook Accounts\naccounts= win32com.client.Dispatch(\"Outlook.Application\").Session.Accounts\n\n\n#Load file into variable\ndata = pd.read_excel(filepath)\n\n\nglobal inbox\n\n\nwhile True:\n \n #Iterate over Mappingtable.xlsx\n for index, row in data.iterrows():\n counter = 0\n #Iterate over all Accounts in the account variable\n for account in accounts:\n #Only do Further Stuff if account is like the defined Account in the Mappingtable\n if account.DisplayName == row['MailAccount']:\n #get All Folders from the Account\n inbox = outlook.Folders(account.DeliveryStore.DisplayName)\n folders = inbox.Folders\n for folder in folders:\n #Check if the Folder is like the searchingFolder\n if folder.Name == desiredFolder:\n messages = folder.Items\n #Iterate over the First 50 Messages from newest to latest\n for message in reversed(messages):\n if counter == mailCounter:\n break\n #Check if the Category of the Message is like the defined Category in the Mappingtable\n if message.Categories == row['Label']:\n try:\n #Try to open the URL to check if it is reachable\n url_requested = urllib.request.urlopen(row['JIRAURL'])\n if 200 == url_requested.code:\n #Create JIRA Issue and clear Category if jira Issue was created\n new_issue = jira_py.createjiraIssue(row['JIRAURL'], row['JIRAUser'], row['JiraPW'], row['ProjectKey'], message.Subject, message.Body, row['IssueType'])\n if new_issue:\n #Add All Attacments to JIRA Issue if there are any\n if message.Attachments:\n for attachment in message.Attachments:\n fileHandler(row['JIRAURL'], row['JIRAUser'], row['JiraPW'], new_issue, attachment)\n message = changeMailName(message, new_issue, addJIRAKeytoMailName)\n message.Categories = \"\"\n message.save() \n #Except if the URL could not be read\n except urllib.error.URLError as e: print('URL ' + row['JIRAURL'] + ' could not be read')\n #Except a ValueError and prints it\n except ValueError as e: print(e)\n counter += 1\n\n print(\"Iterate\")\n time.sleep(iterateTimeInSeconds)\n \n\n\n" ]
[ [ "pandas.read_excel" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "0.19", "1.1", "1.5", "1.2", "0.24", "0.20", "1.0", "0.25", "1.3" ], "scipy": [], "tensorflow": [] } ]
mparthasarathy25/DiscordBotPython
[ "057a528ed6ee8852abe6e54ab5cc21a299f12d66" ]
[ "main.py" ]
[ "#imports with methods labeled, some imports were for the full library\nfrom discord import File, Embed\nfrom discord.ext import commands, tasks\nfrom sqlite3 import connect\nfrom datetime import datetime, timedelta, date\nfrom matplotlib.pyplot import xticks, yticks, xlabel, ylabel, show, close, bar, savefig, plot, figure, title\nimport numpy as np\nfrom io import BytesIO, StringIO\nfrom os import remove, path, environ\nfrom pandas import Index, DataFrame, read_csv, concat\nfrom requests import get\nfrom random import randint\nfrom scipy.stats import norm\n\n#creating the bot and setting the value of the 'client' variable to the bot\nclient = commands.Bot(command_prefix = '$', case_insensitive = True)\n\n#reading in the tickers.csv, which includes symbol names (the main purpose)\nticker_data = read_csv('tickers.csv')\nfor element in range(len(ticker_data)):\n #setting a global dictionary as to make it easier to create every variable\n globals()[f\"count_{element}\"] = 0\n globals()[f\"count_{element}_unique\"] = 0\n\n#global trace_list for the tracing api\nglobal trace_list\ntrace_list = []\n\n\n\n\"\"\"\nA method used when the discord bot first goes online\n\n...\n\nAttributes\n----------\nN/A\n\nDescription\n-----------\nThis method will insert each message from the discord server's relevant channels into a SQLlite database.\nIt will also iterate through each message to determine if there is a keyword match. If there is, the message\nwill be traced using Pat's API. After these operations are completed, two counters are started. One counter is\nfor tracing (60 minute loop) and the other counter is for appending to the SQLlite database (5 minute loop).\n\"\"\"\[email protected]\nasync def on_ready():\n #connecting to the sqllite database\n db = connect('sigma7.sqlite')\n cursor = db.cursor()\n #creating a table if it doesn't already exist\n cursor.execute('''\n CREATE TABLE IF NOT EXISTS sigma7(\n member TEXT,\n message TEXT,\n date TEXT,\n keyword TEXT\n )\n ''')\n clear = (\"DELETE FROM sigma7\")\n #removing anything that was already in the database when the bot goes online\n cursor.execute(clear)\n market_channel = client.get_channel(852551268056825879)\n chat_channel = client.get_channel(862525581685948416)\n #for loop to iterate through each channel's history\n async for msg in market_channel.history(limit=10000):\n #inserting each message, if the author isn't a bot, into the sqllite database\n if not msg.author.bot:\n time = msg.created_at\n sql = (\"INSERT INTO sigma7(member, message, date) VALUES(?,?,?)\")\n val = (str(msg.author), str(msg.content), time.strftime('%Y-%m-%d %H:%M:%S')) \n cursor.execute(sql,val)\n async for msg in chat_channel.history(limit=10000): \n #inserting each message, if the author isn't a bot, into the sqllite database\n if not msg.author.bot:\n time = msg.created_at\n sql = (\"INSERT INTO sigma7(member, message, date) VALUES(?,?,?)\")\n val = (str(msg.author), str(msg.content), time.strftime('%Y-%m-%d %H:%M:%S')) \n cursor.execute(sql,val)\n #channel = client.get_channel(854077038514929687)\n # async for msg in channel.history(limit=10000):\n # if not msg.author.bot:\n # time = msg.created_at\n # sql = (\"INSERT INTO sigma7(member, message, date) VALUES(?,?,?)\")\n # val = (str(msg.author), str(msg.content), time.strftime('%Y-%m-%d %H:%M:%S')) \n # cursor.execute(sql,val)\n \n #same idea as the previous for loops, except this time, adding the messages to the tracing api and the global count variables\n #to determine whether there is a keyword match, each message in the server is split and checked\n msg_authors = []\n async for msg in market_channel.history(limit = 10000, after = datetime.now() + timedelta(hours=5) - timedelta(weeks=2)):\n if not msg.author.bot:\n for element in range(len(ticker_data)):\n if ticker_data.get('Symbol')[element] == msg.content:\n api_url_Trace = f'https://sigma7-trends.azurewebsites.net/api/trace?code=O8f03FccQJ8csnd8KAHq0E6s9VTaaGXBJMyXfRjduOlUvCstecmowg==&symbol={msg.content}'\n trace_list.append(get(api_url_Trace).json()) \n if msg.author not in msg_authors:\n msg_authors.append(msg.author)\n globals()[f\"count_{element}_unique\"] += 1\n break\n globals()[f\"count_{element}\"] += 1\n else:\n for i in range(len(msg.content.split())):\n if ticker_data.get('Symbol')[element] == msg.content.split()[i]:\n api_url_Trace = f'https://sigma7-trends.azurewebsites.net/api/trace?code=O8f03FccQJ8csnd8KAHq0E6s9VTaaGXBJMyXfRjduOlUvCstecmowg==&symbol={msg.content.split()[i]}'\n trace_list.append(get(api_url_Trace).json())\n if msg.author not in msg_authors:\n msg_authors.append(msg.author)\n globals()[f\"count_{element}_unique\"] += 1\n globals()[f\"count_{element}\"] += 1\n \n\n #same as the above for loops but for the other relevant channel \n async for msg in chat_channel.history(limit = 10000, after = datetime.now() + timedelta(hours=5) - timedelta(weeks=2)):\n if not msg.author.bot:\n for element in range(len(ticker_data)):\n if ticker_data.get('Symbol')[element] == msg.content:\n api_url_Trace = f'https://sigma7-trends.azurewebsites.net/api/trace?code=O8f03FccQJ8csnd8KAHq0E6s9VTaaGXBJMyXfRjduOlUvCstecmowg==&symbol={msg.content}'\n trace_list.append(get(api_url_Trace).json())\n if msg.author not in msg_authors:\n msg_authors.append(msg.author)\n globals()[f\"count_{element}_unique\"] += 1\n break\n globals()[f\"count_{element}\"] += 1\n else:\n for i in range(len(msg.content.split())):\n if ticker_data.get('Symbol')[element] == msg.content.split()[i]:\n api_url_Trace = f'https://sigma7-trends.azurewebsites.net/api/trace?code=O8f03FccQJ8csnd8KAHq0E6s9VTaaGXBJMyXfRjduOlUvCstecmowg==&symbol={msg.content.split()[i]}'\n trace_list.append(get(api_url_Trace).json())\n if msg.author not in msg_authors:\n msg_authors.append(msg.author)\n globals()[f\"count_{element}_unique\"] += 1\n globals()[f\"count_{element}\"] += 1\n \n\n # async for msg in channel.history(limit = None, after = datetime.datetime.now() + datetime.timedelta(hours=5) - datetime.timedelta(weeks=2)):\n # if not msg.author.bot:\n # for element in range(len(ticker_data)):\n # for i in range(len(msg.content.split())):\n # if ticker_data.get('Symbol')[element] == msg.content.split()[i]:\n # globals()[f\"count_{element}\"] += 1\n # break\n\n #commiting the changes to the database and starting the counter and trendcounter loop methods\n db.commit()\n cursor.close()\n db.close() \n counter.start()\n trendcounter.start()\n\n\"\"\"\nA 60-min looped method that traces keyword matched messages.\n\n...\n\nAttributes\n----------\nN/A\n\nDescription\n-----------\nThis method will perform one of the key operations described for the on_ready command: tracing matched messages.\nHowever, rather than iterating through every message every 5 minutes, the method will run every 60 minutes, so as \nto make operations a little easier. This method is mainly for accounting for the newest messages.\n\"\"\"\[email protected](minutes = 60)\nasync def trendcounter():\n #resetting the global dictionary to all values of 0\n for element in range(len(ticker_data)):\n globals()[f\"count_{element}\"] = 0\n market_channel = client.get_channel(852551268056825879)\n chat_channel = client.get_channel(862525581685948416)\n channel = client.get_channel(854077038514929687)\n msg_authors = []\n #essentially this performs the same task as what was done in the on_ready command, except every 60 minutes\n async for msg in market_channel.history(limit = 10000, after = datetime.now() + timedelta(hours=5) - timedelta(weeks=2)):\n if not msg.author.bot:\n for element in range(len(ticker_data)):\n if ticker_data.get('Symbol')[element] == msg.content:\n api_url_Trace = f'https://sigma7-trends.azurewebsites.net/api/trace?code=O8f03FccQJ8csnd8KAHq0E6s9VTaaGXBJMyXfRjduOlUvCstecmowg==&symbol={msg.content}'\n trace_list.append(get(api_url_Trace).json())\n if msg.author not in msg_authors:\n msg_authors.append(msg.author)\n globals()[f\"count_{element}_unique\"] += 1\n break\n globals()[f\"count_{element}\"] += 1\n else:\n for i in range(len(msg.content.split())):\n if ticker_data.get('Symbol')[element] == msg.content.split()[i]:\n api_url_Trace_1 = f'https://sigma7-trends.azurewebsites.net/api/trace?code=O8f03FccQJ8csnd8KAHq0E6s9VTaaGXBJMyXfRjduOlUvCstecmowg==&symbol={msg.content.split()[i]}'\n trace_list.append(get(api_url_Trace_1).json())\n if msg.author not in msg_authors:\n msg_authors.append(msg.author)\n globals()[f\"count_{element}_unique\"] += 1\n globals()[f\"count_{element}\"] += 1\n async for msg in chat_channel.history(limit = 10000, after = datetime.now() + timedelta(hours=5) - timedelta(weeks=2)):\n if not msg.author.bot:\n for element in range(len(ticker_data)):\n if ticker_data.get('Symbol')[element] == msg.content:\n api_url_Trace_2 = f'https://sigma7-trends.azurewebsites.net/api/trace?code=O8f03FccQJ8csnd8KAHq0E6s9VTaaGXBJMyXfRjduOlUvCstecmowg==&symbol={msg.content}'\n trace_list.append(get(api_url_Trace_2).json())\n if msg.author not in msg_authors:\n msg_authors.append(msg.author)\n globals()[f\"count_{element}_unique\"] += 1\n break\n globals()[f\"count_{element}\"] += 1\n else:\n for i in range(len(msg.content.split())):\n if ticker_data.get('Symbol')[element] == msg.content.split()[i]:\n api_url_Trace_3 = f'https://sigma7-trends.azurewebsites.net/api/trace?code=O8f03FccQJ8csnd8KAHq0E6s9VTaaGXBJMyXfRjduOlUvCstecmowg==&symbol={msg.content.split()[i]}'\n trace_list.append(get(api_url_Trace_3).json())\n if msg.author not in msg_authors:\n msg_authors.append(msg.author)\n globals()[f\"count_{element}_unique\"] += 1\n globals()[f\"count_{element}\"] += 1\n # async for msg in channel.history(limit = 10000, after = datetime.datetime.now() + datetime.timedelta(hours=5) - datetime.timedelta(weeks=2)):\n # if not msg.author.bot:\n # for element in range(len(ticker_data)):\n # for i in range(len(msg.content.split())):\n # if ticker_data.get('Symbol')[element] == msg.content.split()[i]:\n # globals()[f\"count_{element}\"] += 1\n # break\n\n\n\"\"\"\nA 5-min looped method that appends to the SQLlite database.\n\n...\n\nAttributes\n----------\nN/A\n\nDescription\n-----------\nThis method will perform one of the key operations described for the on_ready command: appending to SQLlite.\nSimilar to the method above, this method will run every 60 minutes so as to make operations easier. \nThis method is mainly for accounting for the newest messages.\n\"\"\"\[email protected](minutes = 60)\nasync def counter():\n #same task as the on_ready function, but every 60 minutes\n db = connect('sigma7.sqlite')\n cursor = db.cursor()\n market_channel = client.get_channel(852551268056825879)\n chat_channel = client.get_channel(862525581685948416)\n channel = client.get_channel(854077038514929687)\n async for msg in market_channel.history(limit = 10000, after = datetime.now() + timedelta(hours=5) - timedelta(minutes=5)):\n if not msg.author.bot:\n time = msg.created_at\n sql = (\"INSERT INTO sigma7(member, message, date) VALUES(?,?,?)\")\n val = (str(msg.author), str(msg.content), time.strftime('%Y-%m-%d %H:%M:%S')) \n cursor.execute(sql,val)\n async for msg in chat_channel.history(limit = 10000, after = datetime.now() + timedelta(hours=5) - timedelta(minutes=5)):\n if not msg.author.bot:\n time = msg.created_at\n sql = (\"INSERT INTO sigma7(member, message, date) VALUES(?,?,?)\")\n val = (str(msg.author), str(msg.content), time.strftime('%Y-%m-%d %H:%M:%S')) \n cursor.execute(sql,val)\n # async for msg in channel.history(limit = None, after = datetime.datetime.now() + datetime.timedelta(hours=5) - datetime.timedelta(minutes=5)):\n # if not msg.author.bot:\n # time = msg.created_at\n # sql = (\"INSERT INTO sigma7(member, message, date) VALUES(?,?,?)\")\n # val = (str(msg.author), str(msg.content), time.strftime('%Y-%m-%d %H:%M:%S')) \n # cursor.execute(sql,val)\n db.commit()\n cursor.close()\n db.close() \n\n# def trending_unique():\n# x_trends = (first_trend, second_trend, third_trend, fourth_trend, fifth_trend)\n# pos = arange(len(x_trends))\n# numbers = [new_list[0],new_list[1],new_list[2],new_list[3],new_list[4]]\n# bar(pos, numbers, align = 'center')\n# xticks(pos, x_trends, color = 'white')\n# yticks(color = 'white')\n# ylabel('Number of Messages', color = 'white')\n# savefig('trend_image.png', transparent = True)\n# close()\n# with open('trend_image.png', 'rb') as f:\n# file = BytesIO(f.read())\n \n# image = File(file, filename = 'trend.png')\n# trending_embed = Embed(title = \"Trending Stocks on Discord\")\n# trending_embed.set_image(url = 'attachment://trend.png')\n# if path.exists('trend_image.png'):\n# remove('trend_image.png')\n# trending_embed.add_field(name = (\"1. \" + first_trend), value = ticker_data['Name'][company_index1].split()[0], inline = False)\n# trending_embed.add_field(name = (\"2. \" + second_trend), value = ticker_data['Name'][company_index2].split()[0], inline = False)\n# trending_embed.add_field(name = (\"3. \" + third_trend), value = ticker_data['Name'][company_index3].split()[0], inline = False)\n# trending_embed.add_field(name = (\"4. \" + fourth_trend), value = ticker_data['Name'][company_index4].split()[0], inline = False)\n# trending_embed.add_field(name = (\"5. \" + fifth_trend), value = ticker_data['Name'][company_index5].split()[0], inline = False)\n# return_list = [trending_embed, image]\n# return return_list\n\n\n\"\"\"\nA method that returns the trending stocks online and in discord.\n\n...\n\nAttributes\n----------\nctx: commands.Context (allows for the returned message to be sent wherever the command was made.)\n\nDescription\n-----------\nThis method will store the json output of the tracking api call, and a discord embed will be sent which contains the trending stocks.\n\n\"\"\"\[email protected]()\nasync def trending(ctx: commands.Context):\n #capturing data from the tracking api\n api_url_Track = 'https://sigma7-trends.azurewebsites.net/api/pull_symbols?code=O8f03FccQJ8csnd8KAHq0E6s9VTaaGXBJMyXfRjduOlUvCstecmowg=='\n #converting the json output to something viewable\n df_Track = get(api_url_Track).json()\n trending_test_embed = Embed(title = \"Trending Stock (not in order)\")\n #running a for loop and adding a field for each trending stock in the api call\n for i in range(len(df_Track)):\n # ticker = df_Track[i]\n # iex_api_key = 'Tsk_30a2677082d54c7b8697675d84baf94b'\n # api_url = f'https://sandbox.iexapis.com/stable/ref-data/options/symbols/{ticker}/?token={iex_api_key}'\n # df1 = get(api_url).json()\n trending_test_embed.add_field(name = (f\"{i+1}.\"), value = df_Track[i], inline = False)\n await ctx.send(embed = trending_test_embed) \n # ticker_data = read_csv('tickers.csv')\n # db = connect('sigma7.sqlite')\n # cursor = db.cursor() \n # currentDate = date.today()\n # days = timedelta(14)\n # pastDate = currentDate - days\n # cursor.execute(\"SELECT message FROM sigma7 WHERE CAST(date as DATE) >= CAST(\" + pastDate.strftime('%Y-%m-%d') + \" as DATE)\")\n # result3 = cursor.fetchall()\n # db.commit()\n # cursor.close()\n # list = []\n # first_trend = \"\"\n # second_trend = \"\"\n # third_trend = \"\"\n # fourth_trend = \"\"\n # fifth_trend = \"\"\n # for element in range(len(ticker_data)):\n # list.append(globals()[f\"count_{element}\"])\n # list.sort(reverse = True)\n # new_list = list.copy()\n # for element in range(len(ticker_data)):\n # if globals()[f\"count_{element}\"] == list[0]:\n # first_trend = ticker_data.get('Symbol')[element]\n # list[0] = -1\n # elif globals()[f\"count_{element}\"] == list[1]:\n # second_trend = ticker_data.get('Symbol')[element]\n # list[1] = -1\n # elif globals()[f\"count_{element}\"] == list[2]:\n # third_trend = ticker_data.get('Symbol')[element]\n # list[2] = -1\n # elif globals()[f\"count_{element}\"] == list[3]:\n # fourth_trend = ticker_data.get('Symbol')[element]\n # list[3] = -1\n # elif globals()[f\"count_{element}\"] == list[4]:\n # fifth_trend = ticker_data.get('Symbol')[element]\n # list[4] = -1\n\n # data_index = Index(ticker_data['Symbol'])\n # company_index1 = data_index.get_loc(first_trend)\n # company_index2 = data_index.get_loc(second_trend)\n # company_index3 = data_index.get_loc(third_trend)\n # company_index4 = data_index.get_loc(fourth_trend)\n # company_index5 = data_index.get_loc(fifth_trend)\n # x_trends = (first_trend, second_trend, third_trend, fourth_trend, fifth_trend)\n # pos = arange(len(x_trends))\n # numbers = [new_list[0],new_list[1],new_list[2],new_list[3],new_list[4]]\n # bar(pos, numbers, align = 'center')\n # xticks(pos, x_trends, color = 'white')\n # yticks(color = 'white')\n # ylabel('Number of Messages', color = 'white')\n # savefig('trend_image.png', transparent = True)\n # close()\n # with open('trend_image.png', 'rb') as f:\n # file = BytesIO(f.read())\n \n # image =File(file, filename = 'trend.png')\n # trending_embed = Embed(title = \"Trending Stocks on Discord\")\n # trending_embed.set_image(url = 'attachment://trend.png')\n # if path.exists('trend_image.png'):\n # remove('trend_image.png')\n # trending_embed.add_field(name = (\"1. \" + first_trend), value = ticker_data['Name'][company_index1].split()[0], inline = False)\n # trending_embed.add_field(name = (\"2. \" + second_trend), value = ticker_data['Name'][company_index2].split()[0], inline = False)\n # trending_embed.add_field(name = (\"3. \" + third_trend), value = ticker_data['Name'][company_index3].split()[0], inline = False)\n # trending_embed.add_field(name = (\"4. \" + fourth_trend), value = ticker_data['Name'][company_index4].split()[0], inline = False)\n # trending_embed.add_field(name = (\"5. \" + fifth_trend), value = ticker_data['Name'][company_index5].split()[0], inline = False)\n # await ctx.send(embed = trending_embed, file = image)\n\n\"\"\"\nA helper method that will create a graph of a company's pricing data for a certain period of time.\n\n...\n\nAttributes\n----------\nsymbol: String, company symbol\nlength: String, length of time\nmethod: String, open, high, low, close\n\nDescription\n-----------\nThis method will use the iexcloud api's data for charting graphs. Additionally, I have added each method \nto a separate concatenated variable, which allows for the inputted method to be used in creating the graphs.\n\n\"\"\"\ndef get_historic_data(symbol, length, method):\n #using the iex cloud api\n iex_api_key = 'pk_6fdc6387a2ae4f8e9783b029fc2a3774'\n api_url = f'https://cloud.iexapis.com/stable/stock/{symbol}/chart/{length}?token={iex_api_key}'\n #converting the json output\n df = get(api_url).json()\n \n date = []\n open = []\n high = []\n low = []\n close1 = []\n \n #for loop to add data to a list for charting graphs\n for i in range(len(df)):\n date.append(df[i]['date'])\n open.append(df[i]['open'])\n high.append(df[i]['high'])\n low.append(df[i]['low'])\n close1.append(df[i]['close'])\n \n #renaming the columns for a future command\n date_df = DataFrame(date).rename(columns = {0:'date'})\n open_df = DataFrame(open).rename(columns = {0:'open'})\n high_df = DataFrame(high).rename(columns = {0:'high'})\n low_df = DataFrame(low).rename(columns = {0:'low'})\n close_df = DataFrame(close1).rename(columns = {0:'close'})\n \n #concatenating the lists to one\n frames = [date_df, open_df, high_df, low_df, close_df]\n df = concat(frames, axis = 1, join = 'inner')\n df = df.set_index('date')\n \n #plotting the graph given the method and making stylistic changes\n df[method].plot()\n xlabel('Date', fontsize = 10, color = 'white')\n ylabel('Price', fontsize = 10, color = 'white')\n xticks(fontsize = 8, color = 'white')\n yticks(fontsize = 10, color = 'white')\n savefig('stock_image.png', transparent = True)\n close()\n\n\"\"\"\nA method that has two options: graph and updates.\n\n...\n\nAttributes\n----------\nctx: commands.Context (allows for the returned message to be sent wherever the command was made.)\ncompany_name: String, company symbol\noption: String, graph or updates\nmethod: String, high, low, open, close\nlength: String, length of time\n\nDescription\n-----------\nThe graph option uses the get_historic_data helper method and adds the image to a discord embed. \nThe updates method uses the iexcloud api data and simply outputs the data in a different form.\n\n\"\"\"\[email protected]()\nasync def stock(ctx: commands.Context, company_name, option, method = '', length = ''):\n # ticker = yf.Ticker(company_name)\n # delta = datetime.timedelta(days = 30)\n # delta1 = datetime.timedelta(days = 1)\n # dates = drange(date.today()-delta, date.today(), delta1)\n # prices = ticker.history(period = \"30d\").get('Open')\n\n #if statement for the different operations\n if option == 'graph':\n #try statement for an assertion error with the company_name\n try:\n assert company_name != None\n except AssertionError:\n ctx.send(\"In order to use this command, you must input a company ticker name, for example AAPL\")\n \n #try statement for an assertion error with the method\n try:\n assert method != ''\n except AssertionError:\n ctx.send(\"In order to use this command, you must input a method, for example low\")\n \n #try statement for an assertion error with the length\n try:\n assert length != ''\n except AssertionError:\n ctx.send(\"In order to use this command, you must input a length, for example 1d\")\n if method != '':\n if length != '':\n #using the get_historic_data helper method to graph\n get_historic_data(company_name, length, method)\n embed = Embed(title= method.capitalize() + ' Price Change Of ' + company_name + ' Over The Last ' + length, colour= 0x00b2ff)\n with open('stock_image.png', 'rb') as f:\n file = BytesIO(f.read())\n if path.exists('stock_image.png'):\n remove('stock_image.png')\n image = File(file, filename='graph' + company_name + '.png')\n embed.set_image(url=f'attachment://graph' + company_name + '.png')\n #sending the embed with the image\n await ctx.send(embed=embed,file=image)\n #update operation\n elif option == 'updates':\n #try statement for an assertion error with the company_name\n try:\n assert company_name != None\n except AssertionError:\n ctx.send(\"In order to use this command, you must input a company ticker name, for example AAPL\")\n ticker = company_name\n # IEX_KEY = \"\"\n # token = environ[\"IEX_KEY\"]\n #using iex cloud api data for latest price, market cap, etc.\n iex_api_key = 'pk_6fdc6387a2ae4f8e9783b029fc2a3774'\n api_url = f'https://cloud.iexapis.com/stable/stock/{ticker}/quote/?token={iex_api_key}'\n df = get(api_url).json()\n api_url1 = f'https://cloud.iexapis.com/stable/time-series/CORE_ESTIMATES/{ticker}/?token={iex_api_key}'\n df1 = get(api_url1).json()\n embed = Embed(title='Latest Updates of ' + ticker, colour= 0x00b2ff)\n embed.add_field(name = \"Symbol : \" + str(df['symbol']), value = \"Latest Price : \" + str(df['latestPrice']) + \"\\n\" + \"Market Cap : \" + str(df['marketCap']) + \"\\n\" + \"Percent Change (from last close) : \" + str(df['changePercent']) + \"\\n\" + \"Market Consensus : \" + str(df1[0]['marketConsensus']))\n await ctx.send(embed = embed)\n\n\n\"\"\"\nA method that will print out news articles given a stock ticker\n\n...\n\nAttributes\n----------\nctx: commands.Context (allows for the returned message to be sent wherever the command was made.)\ncompany_name: String, company symbol\nnumber: int, number of articles to be printed\n\nDescription\n-----------\nThis method will use the polygon api and will print out news articles dependent on how many articles to be printed.\nThe discord embed that will be outputed prints out the title, article_url, description, and image.\n\n\"\"\"\napi = '3c3wFYiCLgsFup0dHv0p4kxJnSVx_mrG'\[email protected]()\nasync def news(ctx: commands.Context, company_name, number: int = 5):\n #try statement for an assertion error with the company_name\n try:\n assert company_name != None\n except AssertionError:\n ctx.send(\"In order to use this command, you must input a company name, for example AAPL\")\n\n bool_test = False\n data_index = Index(ticker_data['Symbol'])\n company_index = data_index.get_loc(company_name)\n\n ticker = company_name\n limit = '100'\n #using the api from polygon to get the news\n api_url = f'https://api.polygon.io/v2/reference/news?limit={limit}&order=descending&sort=published_utc&ticker={ticker}&published_utc.gte=2021-04-26&apiKey={api}'\n data = get(api_url).json()\n count = 0\n #iterating the resulting news data\n for element in data['results']:\n if count >= number:\n break\n if (ticker_data['Name'][company_index].split()[0].lower()) in element['article_url']:\n #using try statements for if there aren't descriptions or image_urls\n try: \n element['description']\n except KeyError:\n discord_embed = Embed(title= element['title'], url= element['article_url'])\n else:\n discord_embed = Embed(title= element['title'], url= element['article_url'], description = element['description'])\n try:\n element['image_url']\n except KeyError:\n continue\n else:\n discord_embed.set_image(url = element['image_url'])\n #sending the discord embed\n await ctx.send(embed = discord_embed)\n count += 1\n\n\"\"\"\nA method that will round a number to however many decimal places\n\n...\n\nAttributes\n----------\nn: int, number to be rounded\ndecimals: int, decimal to be rounded to\n\nDescription\n-----------\nThis method take a number and use the decimals variable to divide until the correct number of decimal places is reached.\n\n\"\"\"\ndef truncate(n, decimals=0):\n multiplier = 10 ** decimals\n #returning the correctly rounded number\n return int(n * multiplier) / multiplier\n\nmember_list = []\nportfolio_list = []\n\n\"\"\"\nA method that holds a user's portfolio in discord\n\n...\n\nAttributes\n----------\nctx: commands.Context (allows for the returned message to be sent wherever the command was made.)\noption: String, create, update, view\nitems: String, items for the portfolio\n\nDescription\n-----------\nThis method will create a portfolio by appending each user's portfolio\nto a portfolio list. Then, if the same user asks to view their portfolio,\nthe portfolio list will be matched to the member list position. Same goes \nfor the update method, where the user can add or remove from their previously\ncreated portfolio.\n\n\"\"\"\[email protected]()\nasync def portfolio(ctx: commands.Context, option, *, items=''):\n stock_list = []\n portfolio_embed = Embed(title = \"Your Portfolio\")\n #appending the author to the member list\n member_list.append(ctx.author.display_name)\n #iterating through the member list to add number to portfolio list\n for i in range(len(member_list)):\n portfolio_list.append(i)\n \n #if statement for the different options\n if option == \"create\":\n #try statement for an assertion error with the items\n try:\n assert items != ''\n except AssertionError:\n ctx.send(\"In order to create a portfolio, you must add stock items, for example: MSFT 2\")\n if len(items.split()) != 0:\n for i in range(len(items.split())):\n if i % 2 == 0: \n iex_api_key = 'pk_6fdc6387a2ae4f8e9783b029fc2a3774'\n api_url = f'https://cloud.iexapis.com/stable/stock/{items.split()[i]}/quote/?token={iex_api_key}'\n df = get(api_url).json()\n #essentially reprinting the items but in a form where you can view the latest price of the shares the user owns\n portfolio_embed.add_field(name = (items.split()[i+1] + \" shares of \" + items.split()[i]), value = \"$\" + str(truncate((int(items.split()[i+1])*int(df['latestPrice'])), 2)), inline = True)\n stock_list.append(items.split()[i])\n \n #portfolio_list.append(portfolio_embed)\n #setting the index of the member_list to the portfolio_embed, so the two lists match\n portfolio_list[member_list.index(ctx.author.display_name)] = portfolio_embed\n await ctx.author.send(embed = portfolio_list[member_list.index(ctx.author.display_name)])\n elif option == \"update\":\n #try statement for an assertion error with the items\n try:\n assert items != ''\n except AssertionError:\n ctx.send(\"In order to update a portfolio, you must describe what you want to update, for example: add AAPL 2 remove 1\")\n count = 0 \n if len(items.split()) != 0:\n for i in range(len(items.split())):\n #essentially performing the same task as create, but add will input more into the embed, while remove will take away\n #using split in a lot of these situations to run through a string and see if there are relevant matches\n if items.split()[i] == \"add\":\n for item in items.split()[(i+1):]:\n if item == \"remove\":\n break\n elif i+1 % 2 == 0:\n if items.split().index(item) % 2 == 0:\n iex_api_key = 'pk_6fdc6387a2ae4f8e9783b029fc2a3774'\n api_url = f'https://cloud.iexapis.com/stable/stock/{items.split()[items.split().index(item)]}/quote/?token={iex_api_key}'\n df = get(api_url).json()\n portfolio_list[member_list.index(ctx.author.display_name)].add_field(name = (items.split()[items.split().index(item)+1] + \" shares of \" + items.split()[items.split().index(item)]), value = \"$\" + str(truncate(int(items.split()[items.split().index(item)+1])*df['latestPrice'], 2)), inline = True)\n elif i+1 % 2 == 1:\n if items.split().index(item) % 2 == 1:\n iex_api_key = 'pk_6fdc6387a2ae4f8e9783b029fc2a3774'\n api_url = f'https://cloud.iexapis.com/stable/stock/{items.split()[items.split().index(item)]}/quote/?token={iex_api_key}'\n df = get(api_url).json()\n portfolio_list[member_list.index(ctx.author.display_name)].add_field(name = (items.split()[items.split().index(item)+1] + \" shares of \" + items.split()[items.split().index(item)]), value = \"$\" + str(truncate(int(items.split()[items.split().index(item)+1])*df['latestPrice'], 2)), inline = True) \n\n elif items.split()[i] == \"remove\":\n for item in items.split()[(i+1):]:\n if item == \"add\":\n break\n elif count == 0:\n portfolio_list[member_list.index(ctx.author.display_name)].remove_field(int(item) - 1)\n count += 1\n elif count > 0:\n portfolio_list[member_list.index(ctx.author.display_name)].remove_field(int(item) - 1 - count)\n count += 1\n\n\n await ctx.author.send(embed = portfolio_list[member_list.index(ctx.author.display_name)])\n\n\n elif option == \"view\":\n await ctx.author.send(embed = portfolio_list[member_list.index(ctx.author.display_name)])\n\n\"\"\"\nA method that will return a random number from 1 to n, a roll of a dice\n\n...\n\nAttributes\n----------\nctx: commands.Context (allows for the returned message to be sent wherever the command was made.)\nnumber: int, number of sides to roll\n\nDescription\n-----------\nThis method will simply perform the randint method and return a message of what number was rolled.\n\n\"\"\"\[email protected]()\nasync def roll(ctx: commands.Context, number: int = 6):\n #randint\n await ctx.send(\"Here's your lucky number: \" + str(randint(1,number)))\n\n\n\"\"\"\nA method that will return the description of a specific company\n\n...\n\nAttributes\n----------\nctx: commands.Context (allows for the returned message to be sent wherever the command was made.)\ncompany_name: String, company symbol\n\nDescription\n-----------\nThis method will use the iex cloud api method and return a description from the json output.\n\n\"\"\"\[email protected]()\nasync def description(ctx: commands.Context, company_name):\n #try statement for an assertion error with the company_name\n try:\n assert company_name != None\n except AssertionError:\n ctx.send(\"In order to use this command, you must input a company name, for example AAPL\")\n iex_api_key = 'pk_6fdc6387a2ae4f8e9783b029fc2a3774'\n api_url = f'https://cloud.iexapis.com/stable/stock/{company_name}/company/?token={iex_api_key}'\n df = get(api_url).json()\n desc_embed = Embed(title = company_name)\n #add field with the description\n desc_embed.add_field(name = 'Description', value = df['description'])\n await ctx.send(embed = desc_embed)\n\n\n\"\"\"\nA method that will simulate a company's price performance over the future 30 days.\n\n...\n\nAttributes\n----------\nctx: commands.Context (allows for the returned message to be sent wherever the command was made.)\ncompany_name: String, company symbol\n\nDescription\n-----------\nThis method takes in data from the iex cloud for the past 5 years, and takes in the pct change for\neach day. Then, the mean and stdev of the pct changes are taken. After this, 10 simulations are done\nbased off of the probability distribution of the returns (pct changes). The 10 simulations are graphed\nwith the same starting point, being the previous day's close.\n\n\"\"\"\[email protected]()\nasync def simulation(ctx: commands.Context, company_name):\n #try statement for an assertion error with the company name\n try:\n assert company_name != None\n assert company_name != \" \"\n except AssertionError:\n ctx.send(\"In order to use this command, you must input a company name, for example AAPL\")\n #using iex cloud api to take historic data\n iex_api_key = 'pk_6fdc6387a2ae4f8e9783b029fc2a3774'\n api_url = f'https://cloud.iexapis.com/stable/stock/{company_name}/chart/5y/?token={iex_api_key}'\n df = get(api_url).json()\n data = concat([DataFrame([df[i]['close']], columns=['Close']) for i in range(len(df))],\n ignore_index=True)\n #calculating the percent change day-to-day\n returns = data.pct_change()\n returns.dropna(inplace = True)\n l = norm.ppf(0.10)\n u = norm.ppf(0.85)\n #taking mean and standard deviation, which will help with probability distribution\n mean = returns.mean()\n stdev = returns.std()\n np.random.seed(42)\n n = np.random.normal(size = (30,10))\n rows = n.shape[0]\n cols = n.shape[1]\n #for loop to sift through the random.normal\n for i in range(0,rows) :\n for j in range(0,cols) :\n #with the upper limit and lower limit, restrictions are made\n if n[i][j] > u :\n n[i][j] = u #sets upper limit\n elif n[i][j] < l :\n n[i][j] = l #sets lower limit\n else :\n n[i][j] = n[i][j]\n n[i][j] = (stdev * n[i][j]) + mean\n s = data.iloc[-1]\n pred = np.zeros_like(n) + 1\n #sets beginning point of simulations\n pred[0] = s \n #for each of the 30 days, setting the data by looking at i-1 data \n for i in range(1,30) :\n pred[i] = pred[(i-1)] * (1 + n[(i-1)]) \n for i in range(0,10) :\n plot(pred[:, i])\n xlabel('Days Past Present', fontsize = 10, color = 'white')\n ylabel('Close Price', fontsize = 10, color = 'white')\n xticks(fontsize = 8, color = 'white')\n yticks(fontsize = 10, color = 'white')\n savefig('sim_image.png', transparent = True)\n close()\n embed = Embed(title= 'Monty Carlo Simulation of the Close Price Change Of ' + company_name + ' Over The Last 30 Days (Each Line Represents a Different Simulation)', colour= 0x00b2ff)\n with open('sim_image.png', 'rb') as f:\n file = BytesIO(f.read())\n if path.exists('sim_image.png'):\n remove('sim_image.png')\n image = File(file, filename='graphsim' + company_name + '.png')\n embed.set_image(url=f'attachment://graphsim' + company_name + '.png')\n await ctx.send(embed=embed,file=image)\n\n \n\"\"\"\nA method that will print out all commands that can be printed. \n\n...\n\nAttributes\n----------\nctx: commands.Context (allows for the returned message to be sent wherever the command was made.)\n\nDescription\n-----------\nThis method that returns the command list and examples in a discord embed.\n\n\"\"\"\[email protected]()\nasync def allcommands(ctx: commands.Context):\n command_embed = Embed(title = 'Available Commands')\n command_embed.add_field(name = 'Command List: ', value = '$trending \\n$news tickername numberofarticles \\n$stock tickername (graph or updates) (if graph, choose what to graph: low, high, close, open) (if graph, choose time period: 5d, 1m, 3m, 1y) \\n$portfolio (create or update or view) (if create, input the ticker followed by how many shares owned; ex: AAPL 2 MSFT 3) <- (If update instead of create, input add (add MSFT 3 NFLX 2) or remove (remove 1, will remove first stock in portfolio) \\n$simulation tickername \\n$description tickername \\n$roll number')\n command_embed.add_field(name = 'Examples: ', value = 'Examples: \\n$trending \\n$news AAPL 3 \\n$stock GS updates \\n$stock GS graph low 3d \\n$portfolio create MSFT 3 GOOGL 5 AA 2 \\n$portfolio update add AAPL 3 \\n$portfolio update remove 1 \\n$portfolio update add NFLX 2 remove 2 \\n$portfolio view \\n$simulation AAPL \\n$description NFLX \\n$roll 6')\n await ctx.send(embed = command_embed)\n \n\n\"\"\"\nA method that will purge messages in the server. This method is only for people who can manage messages in the server.\n\n...\n\nAttributes\n----------\nctx: commands.Context (allows for the returned message to be sent wherever the command was made.)\nnumber: int, number of messages to delete\n\nDescription\n-----------\nThis method will remove a specified number of messages in the channel the message was sent in. \n\n\"\"\"\[email protected]()\nasync def purge(ctx: commands.Context, *, number:int=None):\n if ctx.message.author.guild_permissions.manage_messages:\n try:\n if number is None:\n await ctx.send(\"You must input a number\")\n else:\n deleted = await ctx.message.channel.purge(limit = number)\n await ctx.send(f\"Messages purged by {ctx.message.author.mention}: '{len(deleted)}'\")\n except:\n await ctx.send(\"I can't purge messages here.\")\n else:\n await ctx.send(\"You do not have permissions to use this command.\")\n\n\n\"\"\"\nAn error method\n\n...\n\nAttributes\n----------\nctx: commands.Context (allows for the returned message to be sent wherever the command was made.)\nerror: String, command\n\nDescription\n-----------\nIf CommandNotFound error is thrown, an error message will be sent in discord\n\n\"\"\"\[email protected]\nasync def on_command_error(ctx: commands.Context, error):\n if isinstance(error, commands.CommandNotFound):\n await ctx.send(\"I'm not sure what that means...\")\n\n\n\"\"\"\nAn error method\n\n...\n\nAttributes\n----------\nctx: commands.Context (allows for the returned message to be sent wherever the command was made.)\nerror: String, command\n\nDescription\n-----------\nIf CommandNotFound error is thrown, an error message will be sent in discord.\nIf MissingRequiredArgument error is thrown, an error message will be sent in discord.\n\n\"\"\"\[email protected]\nasync def portfolio_error(ctx: commands.Context, error):\n if isinstance(error, commands.MissingRequiredArgument):\n await ctx.send(\"I think you're missing something...\")\n elif isinstance(error, commands.CommandNotFound):\n await ctx.send(\"I think you typed something wrong...\")\n\n\n\"\"\"\nAn error method\n\n...\n\nAttributes\n----------\nctx: commands.Context (allows for the returned message to be sent wherever the command was made.)\nerror: String, command\n\nDescription\n-----------\nIf CommandNotFound error is thrown, an error message will be sent in discord\nIf MissingRequiredArgument error is thrown, an error message will be sent in discord.\n\n\"\"\"\[email protected]\nasync def stock_error(ctx, error):\n if isinstance(error, commands.MissingRequiredArgument):\n await ctx.send(\"I think you're missing something...\")\n elif isinstance(error, commands.CommandNotFound):\n await ctx.send(\"I think you typed something wrong...\")\n\n\n\"\"\"\nAn error method\n\n...\n\nAttributes\n----------\nctx: commands.Context (allows for the returned message to be sent wherever the command was made.)\nerror: String, command\n\nDescription\n-----------\nIf CommandNotFound error is thrown, an error message will be sent in discord\nIf MissingRequiredArgument error is thrown, an error message will be sent in discord.\n\n\"\"\"\[email protected]\nasync def news_error(ctx: commands.Context, error):\n if isinstance(error, commands.MissingRequiredArgument):\n await ctx.send(\"I think you're missing something...\")\n elif isinstance(error, commands.CommandNotFound):\n await ctx.send(\"I think you typed something wrong...\")\n\n\n\"\"\"\nAn error method\n\n...\n\nAttributes\n----------\nctx: commands.Context (allows for the returned message to be sent wherever the command was made.)\nerror: String, command\n\nDescription\n-----------\nIf CommandNotFound error is thrown, an error message will be sent in discord\nIf MissingRequiredArgument error is thrown, an error message will be sent in discord.\n\n\"\"\"\[email protected]\nasync def roll_error(ctx: commands.Context, error):\n if isinstance(error, commands.MissingRequiredArgument):\n await ctx.send(\"I think you're missing something...\")\n elif isinstance(error, commands.CommandNotFound):\n await ctx.send(\"I think you typed something wrong...\")\n\n\n\"\"\"\nAn error method\n\n...\n\nAttributes\n----------\nctx: commands.Context (allows for the returned message to be sent wherever the command was made.)\nerror: String, command\n\nDescription\n-----------\nIf CommandNotFound error is thrown, an error message will be sent in discord\nIf MissingRequiredArgument error is thrown, an error message will be sent in discord.\n\n\"\"\"\[email protected]\nasync def description_error(ctx: commands.Context, error):\n if isinstance(error, commands.MissingRequiredArgument):\n await ctx.send(\"I think you're missing something...\")\n elif isinstance(error, commands.CommandNotFound):\n await ctx.send(\"I think you typed something wrong...\")\n\n\nclient.run('ODUzODY1MDkyNTg4NjM0MTMz.YMbl1g.NR_nKTOXqiP4NBwpov9zw-mFYtU')\n" ]
[ [ "scipy.stats.norm.ppf", "pandas.concat", "pandas.read_csv", "matplotlib.pyplot.yticks", "numpy.random.seed", "pandas.Index", "matplotlib.pyplot.savefig", "pandas.DataFrame", "matplotlib.pyplot.plot", "numpy.random.normal", "numpy.zeros_like", "matplotlib.pyplot.close", "matplotlib.pyplot.xlabel", "matplotlib.pyplot.xticks", "matplotlib.pyplot.ylabel" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "2.0", "1.4", "1.3", "1.1", "1.5", "1.2" ], "scipy": [], "tensorflow": [] } ]
NiallJeffrey/BornRaytrace
[ "cb07ed78d206563243ace6e9015804e87c6513e5" ]
[ "nersc/run_pkd_grav.py" ]
[ "import matplotlib.pyplot as plt\nimport numpy as np\nimport healpy as hp\nimport os, sys, gc\n\nfrom astropy.io import fits\nfrom astropy.cosmology import FlatLambdaCDM\nfrom astropy import units as u\nfrom astropy.cosmology import z_at_value\n\nsys.path = ['../'] + sys.path\nimport born_raytrace as br\n\nindex = int(sys.argv[1])\nprint(index)\n\ncosmo_par = np.genfromtxt('/global/cscratch1/sd/dominikz/DES_Y3_PKDGRAV_SIMS/grid_run_1/cosmo.par').T\n\n# make list of files\nfilenames = []\nom_list = []\ns8_list = []\nfor om, s8 in zip(cosmo_par[0],cosmo_par[6]):\n for i in range(5):\n filenames.append('cosmo_Om=' + str(om) + '_num=' + str(i) + '_s8=' + str(s8))\n om_list.append(om)\n s8_list.append(s8)\n\n# inputs\n\nom = om_list[index]\ns8 = s8_list[index]\nh = 0.6736\nnside = 1024\nzmax= 3.\nprint(om,s8,filenames[index], flush=True)\n\ndirectory_input = '/global/cscratch1/sd/dominikz/DES_Y3_PKDGRAV_SIMS/grid_run_1/' + str(filenames[index])\ndirectory_output = '/global/cscratch1/sd/ucapnje/DES_Y3_PKDGRAV_kappa/grid_run_1/' + str(filenames[index])\noutput_name_base = 'kappa_DES-Y3-shell_z'\n\n# generate filenames and z_bin_edges\nlog = np.genfromtxt(os.path.join(directory_input, 'DES-Y3.log'))\nz_bin_edges = log.T[1][::-1]\nz_bin_edges = z_bin_edges[np.where(z_bin_edges<zmax)]\n\nsim_filenames = [filename for filename in os.listdir(directory_input) if '.fits' in filename]\nsim_filenames = sorted(sim_filenames)\nsim_filenames = sim_filenames[:-1]\n\n# cosmo code\n\ncosmo_fiducial = FlatLambdaCDM(H0= h * 100. * u.km / u.s / u.Mpc, Om0=om)\n\nkappa_pref_evaluated = br.kappa_prefactor(cosmo_fiducial.H0, cosmo_fiducial.Om0, length_unit = 'Mpc')\n\noverdensity_array = np.zeros((len(sim_filenames),hp.nside2npix(nside)), dtype=np.float32)\nfor i in range(overdensity_array.shape[0]):\n map_read = hp.read_map(os.path.join(directory_input, sim_filenames[i]), verbose=False).astype(np.float32)\n print(sim_filenames[i], z_bin_edges[i], flush=True)\n overdensity_array[i] = hp.ud_grade(map_read/np.mean(map_read)-1.,nside)\n \n\ncomoving_edges = cosmo_fiducial.comoving_distance(z_bin_edges)\n\nz_centre = np.empty(len(comoving_edges)-1)\nfor i in range(len(comoving_edges)-1):\n z_centre[i] = z_at_value(cosmo_fiducial.comoving_distance,\n 0.5*(comoving_edges[i]+comoving_edges[i+1]))\n \nfor source_edge_index in np.arange(1,len(z_bin_edges)):\n print(z_bin_edges[source_edge_index], flush=True)\n map_temp = br.raytrace_integration(kappa_prefactor=kappa_pref_evaluated,\n overdensity_array=overdensity_array[:source_edge_index].T,\n a_centre=1./(1.+z_centre[:source_edge_index]),\n comoving_edges=comoving_edges[:source_edge_index+1])\n try:\n hp.write_map(os.path.join(directory_output,str(output_name_base)+str(z_bin_edges[source_edge_index])+'.fits'),\n map_temp, overwrite=True)\n except:\n print(str(os.path.join(directory_output) + ' does not exist - mkdir command'))\n os.mkdir(os.path.join(directory_output)) \n hp.write_map(os.path.join(directory_output,str(output_name_base)+str(z_bin_edges[source_edge_index])+'.fits'),\n map_temp, overwrite=True)\n \n gc.collect()\n" ]
[ [ "numpy.where", "numpy.mean", "numpy.genfromtxt" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
cescalara/icecube_tools
[ "d1695294f7cfab17500838ffeb72ac0ba06d3f8d" ]
[ "icecube_tools/cosmology.py" ]
[ "import numpy as np\n\nOm = 0.3\nOl = 0.7\nH0 = 70 # km s^-1 Mpc^-1\nc = 3e5 # km s^-1\nDH = c / H0 # Mpc\n\nMpc_to_cm = 3.086e24\nm_to_cm = 100\nyr_to_s = 3.154e7\n\n\ndef xx(z):\n \"\"\"\n Helper function for the computation of\n :py:func:`icecube_tools.cosmology.luminosity_distance`.\n \"\"\"\n\n return ((1 - Om) / Om) / pow(1 + z, 3)\n\n\ndef phi(x):\n \"\"\"\n Helper function for the computation of\n :py:func:`icecube_tools.cosmology.luminosity_distance`.\n \"\"\"\n\n x2 = np.power(x, 2)\n x3 = pow(x, 3)\n numerator = 1.0 + (1.320 * x) + (0.4415 * x2) + (0.02656 * x3)\n denominator = 1.0 + (1.392 * x) + (0.5121 * x2) + (0.03944 * x3)\n\n return numerator / denominator\n\n\ndef luminosity_distance(z):\n \"\"\"\n Luminosity distance based on approximation used in Adachi & Kasai 2012.\n\n :param z: Redshift\n :type z: float\n :return: Luminosity distance in Mpc\n :rtype: float\n \"\"\"\n\n x = xx(z)\n zp = 1 + z\n\n A = 2 * DH * zp / np.sqrt(Om)\n B = phi(xx(0)) - ((1 / np.sqrt(zp)) * phi(x))\n\n return A * B\n\n\ndef comoving_distance(z):\n \"\"\"\n Comoving distance.\n\n :param z: Redshift\n :type z: float\n :return: Comoving distance in Mpc\n :rtype float:\n \"\"\"\n return luminosity_distance(z) / (1 + z)\n\n\ndef E_fac(z):\n \"\"\"\n E(z) = sqrt(Om * (1+z)^3)\n\n :param z: Redshift\n :type z: float\n :return: E(z)\n :rtype float:\n \"\"\"\n\n Omp = Om * (1 + z) ** 3\n\n return np.sqrt(Omp + Ol)\n\n\ndef differential_comoving_volume(z):\n \"\"\"\n Differential comoving volume.\n\n :param z: Redshift\n :type z: float\n :return: Differential comoving volume in Mpc^3\n :rtype: float\n \"\"\"\n\n dc = comoving_distance(z)\n\n return (DH * dc ** 2) / E_fac(z)\n" ]
[ [ "numpy.sqrt", "numpy.power" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
drewlinsley/ffn_membrane
[ "4b4638c00eed847fa6a7958a7fdbeedca4236561", "4b4638c00eed847fa6a7958a7fdbeedca4236561", "4b4638c00eed847fa6a7958a7fdbeedca4236561", "4b4638c00eed847fa6a7958a7fdbeedca4236561" ]
[ "merge_predictions.py", "membrane/membrane_ops/initialization.py", "ffn/training/models/prc/feedback_hgru_3l_temporal.py", "ffn/training/models/htd_cnn_3l.py" ]
[ "import numpy as np\nfrom matplotlib import pyplot as plt\nfrom skimage.measure import regionprops as rgp\n\n\nv1 = np.load('seg-0_0_0.npz')['segmentation']\nv2 = np.load('ding_segmentations/x0015/y0015/z0017/v0/0/0/seg-0_0_0.npz')['segmentation']\nbg = 0\n\n# 1. Count size of all segments in v1 and v2\nv1segs = rgp(v1)\nv1segs = [x for x in v1segs if x.label != bg]\nv2segs = rgp(v2)\nv2segs = [x for x in v2segs if x.label != bg]\n\n# 2. Sort segs by their areas\narea_v1 = np.array([x.area for x in v1segs])\narea_v2 = np.array([x.area for x in v2segs])\nv1_idx = np.argsort(area_v1)[::-1]\nv2_idx = np.argsort(area_v2)[::-1]\ngroup_idx = np.zeros((len(v1_idx), len(v2_idx)))\ncombined_areas = np.concatenate((area_v1, area_v2))\ncombined_idx = np.argsort(combined_areas)\n\n\n\n\n", "# Copyright 2015 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Weight initializers for use with layers.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport math\n\nfrom tensorflow.python.framework import dtypes\nfrom tensorflow.python.ops import random_ops\n\n\n__all__ = [\n 'xavier_initializer',\n 'xavier_initializer_conv2d',\n 'variance_scaling_initializer'\n]\n\n\ndef xavier_initializer(\n shape,\n uniform=True,\n seed=None,\n dtype=dtypes.float32,\n mask=None):\n \"\"\"Returns an initializer performing \"Xavier\" initialization for weights.\n\n This function implements the weight initialization from:\n\n Xavier Glorot and Yoshua Bengio (2010):\n [Understanding the difficulty of training deep feedforward neural\n networks. International conference on artificial intelligence and\n statistics.](\n http://www.jmlr.org/proceedings/papers/v9/glorot10a/glorot10a.pdf)\n\n This initializer is designed to keep the scale of the gradients roughly the\n same in all layers. In uniform distribution this ends up being the range:\n `x = sqrt(6. / (in + out)); [-x, x]` and for normal distribution a standard\n deviation of `sqrt(2. / (in + out))` is used.\n\n Args:\n uniform: Whether to use uniform or normal random initialization.\n seed: A Python integer. Used to create random seeds. See\n @{tf.set_random_seed} for behavior.\n dtype: The data type. Only floating point types are supported.\n\n Returns:\n An initializer for a weight matrix.\n \"\"\"\n return variance_scaling_initializer(\n shape,\n factor=1.0,\n mode='FAN_AVG',\n uniform=uniform,\n seed=seed,\n dtype=dtype,\n mask=mask)\n\n\nxavier_initializer_conv2d = xavier_initializer\n\n\ndef variance_scaling_initializer(\n shape,\n factor=2.0,\n mode='FAN_IN',\n uniform=False,\n seed=None,\n dtype=dtypes.float32,\n mask=None):\n \"\"\"Returns an initializer that generates tensors without scaling variance.\n\n When initializing a deep network, it is in principle advantageous to keep\n the scale of the input variance constant, so it does not explode / diminish\n by reaching the final layer. This initializer use the following formula:\n\n ```python\n if mode='FAN_IN': # Count only number of input connections.\n n = fan_in\n elif mode='FAN_OUT': # Count only number of output connections.\n n = fan_out\n elif mode='FAN_AVG': # Average number of inputs and output connections.\n n = (fan_in + fan_out)/2.0\n truncated_normal(shape, 0.0, stddev=sqrt(factor / n))\n ```\n\n * To get [Delving Deep into Rectifiers](\n http://arxiv.org/pdf/1502.01852v1.pdf), use (Default):<br/>\n `factor=2.0 mode='FAN_IN' uniform=False`\n * To get [Convolutional Architecture for Fast Feature Embedding](\n http://arxiv.org/abs/1408.5093), use:<br/>\n `factor=1.0 mode='FAN_IN' uniform=True`\n * To get [Understanding the difficulty of training deep feedforward neural\n networks](http://jmlr.org/proceedings/papers/v9/glorot10a/glorot10a.pdf),\n use:<br/>\n `factor=1.0 mode='FAN_AVG' uniform=True.`\n * To get `xavier_initializer` use either:<br/>\n `factor=1.0 mode='FAN_AVG' uniform=True`, or<br/>\n `factor=1.0 mode='FAN_AVG' uniform=False`.\n\n Args:\n factor: Float. A multiplicative factor.\n mode: String. 'FAN_IN', 'FAN_OUT', 'FAN_AVG'.\n uniform: Whether to use uniform or normal distributed random.\n seed: A Python integer. Used to create random seeds. See\n @{tf.set_random_seed} for behavior.\n dtype: The data type. Only floating point types are supported.\n\n Returns:\n An initializer that generates tensors with unit variance.\n\n Raises:\n ValueError: if `dtype` is not a floating point type.\n TypeError: if `mode` is not in ['FAN_IN', 'FAN_OUT', 'FAN_AVG'].\n \"\"\"\n if not dtype.is_floating:\n raise TypeError(\n 'Cannot create initializer for non-floating point type.')\n if mode not in ['FAN_IN', 'FAN_OUT', 'FAN_AVG']:\n raise TypeError('Unknow mode %s [FAN_IN, FAN_OUT, FAN_AVG]', mode)\n\n if not dtype.is_floating:\n raise TypeError(\n 'Cannot create initializer for non-floating point type.')\n\n # Estimating fan_in and fan_out is not perfect, but we try.\n # This is the right thing for matrix multiply and convolutions.\n if shape:\n fan_in = float(shape[-2]) if len(shape) > 1 else float(shape[-1])\n fan_out = float(shape[-1])\n else:\n fan_in = 1.0\n fan_out = 1.0\n for dim in shape[:-2]:\n fan_in *= float(dim)\n fan_out *= float(dim)\n if mode == 'FAN_IN':\n # Count only number of input connections.\n n = fan_in\n elif mode == 'FAN_OUT':\n # Count only number of output connections.\n n = fan_out\n elif mode == 'FAN_AVG':\n # Average number of inputs and output connections.\n n = (fan_in + fan_out) / 2.0\n if uniform:\n # To get stddev = math.sqrt(factor / n) need to adjust for uniform.\n limit = math.sqrt(3.0 * factor / n)\n init = random_ops.random_uniform(\n shape,\n -limit,\n limit,\n dtype,\n seed=seed)\n else:\n # To get stddev = math.sqrt(factor / n) adjust for truncated.\n trunc_stddev = math.sqrt(1.3 * factor / n)\n init = random_ops.truncated_normal(\n shape,\n 0.0,\n trunc_stddev,\n dtype,\n seed=seed)\n if mask is not None:\n return mask * init\n else:\n return init\n\n\ndef Identity_init():\n \"\"\"Wrapper for TF Identity initialization.\"\"\"\n raise NotImplementedError\n\n", "\"\"\"Contextual model with partial filters.\"\"\"\nimport warnings\nimport numpy as np\nimport tensorflow as tf\nimport initialization\nfrom pooling import max_pool3d\n\n\n# Dependency for symmetric weight ops is in models/layers/ff.py\nclass hGRU(object):\n def __getitem__(self, name):\n return getattr(self, name)\n\n def __contains__(self, name):\n return hasattr(self, name)\n\n def __init__(\n self,\n layer_name,\n num_in_feats,\n timesteps,\n hgru_dhw,\n hgru_k,\n ff_conv_dhw,\n ff_conv_k,\n ff_conv_strides=[[1, 1, 1, 1, 1], [1, 1, 1, 1, 1]],\n ff_pool_dhw=[[1, 2, 2], [1, 2, 2]],\n ff_pool_strides=[[1, 2, 2], [1, 2, 2]],\n fb_mode = 'transpose',\n fb_dhw=[[1, 2, 2], [1, 2, 2]],\n padding='SAME',\n peephole=False,\n aux=None,\n train=True):\n \"\"\"Global initializations and settings.\"\"\"\n self.in_k = num_in_feats\n self.timesteps = timesteps\n self.padding = padding\n self.train = train\n self.layer_name = layer_name\n self.fb_mode = fb_mode # 'transpose', 'replicate_n_transpose'\n self.peephole = peephole\n\n # Sort through and assign the auxilliary variables\n default_vars = self.defaults()\n if aux is not None and isinstance(aux, dict):\n for k, v in aux.iteritems():\n default_vars[k] = v\n self.update_params(default_vars)\n\n # Kernel shapes\n self.ff_conv_dhw = ff_conv_dhw\n self.ff_conv_k = ff_conv_k\n self.ff_conv_strides = ff_conv_strides\n self.ff_pool_dhw = ff_pool_dhw\n self.ff_pool_strides = ff_pool_strides\n self.hgru_dhw = hgru_dhw\n self.hgru_k = hgru_k\n self.fb_dhw = fb_dhw\n\n # Nonlinearities and initializations\n if isinstance(self.recurrent_nl, basestring):\n self.recurrent_nl = self.interpret_nl(self.recurrent_nl)\n\n # Handle BN scope reuse\n if self.reuse:\n self.scope_reuse = tf.AUTO_REUSE\n else:\n self.scope_reuse = None\n self.param_initializer = {\n 'moving_mean': tf.constant_initializer(0., dtype=self.dtype),\n 'moving_variance': tf.constant_initializer(1., dtype=self.dtype),\n 'gamma': tf.constant_initializer(0.1, dtype=self.dtype)\n }\n self.param_trainable = {\n 'moving_mean': False,\n 'moving_variance': False,\n 'gamma': True\n }\n self.param_collections = {\n 'moving_mean': None, # [tf.GraphKeys.UPDATE_OPS],\n 'moving_variance': None, # [tf.GraphKeys.UPDATE_OPS],\n 'gamma': None\n }\n\n def defaults(self):\n \"\"\"A dictionary containing defaults for auxilliary variables.\n\n These are adjusted by a passed aux dict variable.\"\"\"\n return {\n 'lesion_alpha': False,\n 'lesion_mu': False,\n 'lesion_omega': False,\n 'lesion_kappa': False,\n 'dtype': tf.float32,\n 'hidden_init': 'random',\n 'gate_bias_init': 'chronos',\n 'train': True,\n 'recurrent_nl': tf.nn.tanh,\n 'gate_nl': tf.nn.sigmoid,\n 'ff_nl': tf.nn.elu,\n 'normal_initializer': True,\n 'symmetric_weights': False,\n 'symmetric_gate_weights': False,\n 'hgru_gate_dhw': [[1, 1, 1],[1, 1, 1],[1, 1, 1], [1, 1, 1], [1, 1, 1]], # Gate kernel size\n 'hgru_dilations': [[1, 1, 1, 1, 1], [1, 1, 1, 1, 1], [1, 1, 1, 1, 1], [1, 1, 1, 1, 1], [1, 1, 1, 1, 1]],\n 'gamma': True, # Scale P\n 'alpha': True, # divisive eCRF\n 'mu': True, # subtractive eCRF\n 'adapation': False,\n 'reuse': False,\n 'multiplicative_excitation': True,\n 'readout': 'fb', # l2 or fb\n 'hgru_ids': ['h1', 'h2', 'h3', 'fb2', 'fb1'], # Labels for the hGRUs\n 'include_pooling': True,\n 'resize_kernel': tf.image.ResizeMethod.BILINEAR,\n 'batch_norm': False, # Not working\n }\n\n def interpret_nl(self, nl_type):\n \"\"\"Return activation function.\"\"\"\n if nl_type == 'tanh':\n return tf.nn.tanh\n elif nl_type == 'relu':\n return tf.nn.relu\n elif nl_type == 'elu':\n return tf.nn.elu\n elif nl_type == 'selu':\n return tf.nn.selu\n elif nl_type == 'leaky_relu':\n return tf.nn.leaky_relu\n elif nl_type == 'hard_tanh':\n return lambda z: tf.maximum(tf.minimum(z, 1), 0)\n else:\n raise NotImplementedError(nl_type)\n\n def update_params(self, kwargs):\n \"\"\"Update the class attributes with kwargs.\"\"\"\n if kwargs is not None:\n for k, v in kwargs.iteritems():\n setattr(self, k, v)\n\n def symmetric_weights(self, w, name):\n \"\"\"Apply symmetric weight sharing.\"\"\"\n conv_w_t = tf.transpose(w, (2, 3, 0, 1))\n conv_w_symm = 0.5 * (conv_w_t + tf.transpose(conv_w_t, (1, 0, 2, 3)))\n conv_w = tf.transpose(conv_w_symm, (2, 3, 0, 1), name=name)\n return conv_w\n\n def prepare_tensors(self):\n \"\"\" Prepare recurrent/forward weight matrices.\n (np.prod([h, w, k]) / 2) - k params in the surround filter\n \"\"\"\n # FEEDFORWARD AND FEEDBACK KERNELS\n lower_feats = self.in_k\n for idx, (higher_feats, ff_dhw, fb_dhw) in enumerate(\n zip(self.ff_conv_k,\n self.ff_conv_dhw,\n self.fb_dhw)):\n setattr(\n self,\n 'fb_kernel_%s' % idx,\n tf.get_variable(\n name='%s_fb_kernel__%s' % (self.layer_name, idx),\n dtype=self.dtype,\n initializer=initialization.xavier_initializer(\n shape=fb_dhw + [lower_feats, higher_feats],\n dtype=self.dtype,\n uniform=self.normal_initializer),\n trainable=True))\n setattr(\n self,\n 'fb_bias_%s' % idx,\n tf.get_variable(\n name='%s_fb_bias_%s' % (self.layer_name, idx),\n dtype=self.dtype,\n initializer=tf.ones([lower_feats], dtype=self.dtype),\n trainable=True))\n setattr(\n self,\n 'ff_kernel_%s' % idx,\n tf.get_variable(\n name='%s_ff_kernel_%s' % (self.layer_name, idx),\n dtype=self.dtype,\n initializer=initialization.xavier_initializer(\n shape=ff_dhw + [lower_feats, higher_feats],\n dtype=self.dtype,\n uniform=self.normal_initializer),\n trainable=True))\n setattr(\n self,\n 'ff_bias_%s' % idx,\n tf.get_variable(\n name='%s_ff_bias_%s' % (self.layer_name, idx),\n dtype=self.dtype,\n initializer=tf.ones([higher_feats], dtype=self.dtype),\n trainable=True))\n lower_feats = higher_feats\n\n # HGRU KERNELS\n for idx, layer in enumerate(self.hgru_ids):\n with tf.variable_scope(\n '%s_hgru_weights_%s' % (self.layer_name, layer)):\n setattr(\n self,\n 'horizontal_kernels_%s' % layer,\n tf.get_variable(\n name='%s_horizontal' % self.layer_name,\n dtype=self.dtype,\n initializer=initialization.xavier_initializer(\n shape=self.hgru_dhw[idx] + [self.hgru_k[idx], self.hgru_k[idx]],\n dtype=self.dtype,\n uniform=self.normal_initializer),\n trainable=True))\n g_shape = self.hgru_gate_dhw[idx] + [self.hgru_k[idx], self.hgru_k[idx]]\n setattr(\n self,\n 'gain_kernels_%s' % layer,\n tf.get_variable(\n name='%s_gain' % self.layer_name,\n dtype=self.dtype,\n trainable=True,\n initializer=initialization.xavier_initializer(\n shape=g_shape,\n dtype=self.dtype,\n uniform=self.normal_initializer,\n mask=None)))\n m_shape = self.hgru_gate_dhw[idx] + [self.hgru_k[idx], self.hgru_k[idx]]\n setattr(\n self,\n 'mix_kernels_%s' % layer,\n tf.get_variable(\n name='%s_mix' % self.layer_name,\n dtype=self.dtype,\n trainable=True,\n initializer=initialization.xavier_initializer(\n shape=m_shape,\n dtype=self.dtype,\n uniform=self.normal_initializer,\n mask=None)))\n\n # Gain bias\n bias_shape = [1, 1, 1, 1, self.hgru_k[idx]]\n if self.gate_bias_init == 'chronos':\n bias_init = -tf.log(\n tf.random_uniform(\n bias_shape,\n minval=1,\n maxval=self.timesteps - 1,\n dtype=self.dtype))\n else:\n bias_init = tf.ones(bias_shape, dtype=self.dtype)\n setattr(\n self,\n 'gain_bias_%s' % layer,\n tf.get_variable(\n name='%s_gain_bias' % self.layer_name,\n dtype=self.dtype,\n trainable=True,\n initializer=bias_init))\n if self.gate_bias_init == 'chronos':\n bias_init = -bias_init\n else:\n bias_init = tf.ones(bias_shape, dtype=self.dtype)\n setattr(\n self,\n 'mix_bias_%s' % layer,\n tf.get_variable(\n name='%s_mix_bias' % self.layer_name,\n dtype=self.dtype,\n trainable=True,\n initializer=bias_init))\n\n # Divisive params\n if self.alpha and not self.lesion_alpha:\n setattr(\n self,\n 'alpha_%s' % layer,\n tf.get_variable(\n name='%s_alpha' % self.layer_name,\n dtype=self.dtype,\n initializer=initialization.xavier_initializer(\n shape=bias_shape,\n dtype=self.dtype,\n uniform=self.normal_initializer,\n mask=None)))\n elif self.lesion_alpha:\n setattr(\n self,\n 'alpha_%s' % layer,\n tf.constant(0.))\n else:\n setattr(\n self,\n 'alpha_%s' % layer,\n tf.constant(1.))\n\n if self.mu and not self.lesion_mu:\n setattr(\n self,\n 'mu_%s' % layer,\n tf.get_variable(\n name='%s_mu' % self.layer_name,\n dtype=self.dtype,\n initializer=initialization.xavier_initializer(\n shape=bias_shape,\n dtype=self.dtype,\n uniform=self.normal_initializer,\n mask=None)))\n\n elif self.lesion_mu:\n setattr(\n self,\n 'mu_%s' % layer,\n tf.constant(0.))\n else:\n setattr(\n self,\n 'mu_%s' % layer,\n tf.constant(1.))\n\n if self.gamma:\n setattr(\n self,\n 'gamma_%s' % layer,\n tf.get_variable(\n name='%s_gamma' % self.layer_name,\n dtype=self.dtype,\n initializer=initialization.xavier_initializer(\n shape=bias_shape,\n dtype=self.dtype,\n uniform=self.normal_initializer,\n mask=None)))\n else:\n setattr(\n self,\n 'gamma_%s' % layer,\n tf.constant(1.))\n\n if self.multiplicative_excitation:\n if self.lesion_kappa:\n setattr(\n self,\n 'kappa_%s' % layer,\n tf.constant(0.))\n else:\n setattr(\n self,\n 'kappa_%s' % layer,\n tf.get_variable(\n name='%s_kappa' % self.layer_name,\n dtype=self.dtype,\n initializer=initialization.xavier_initializer(\n shape=bias_shape,\n dtype=self.dtype,\n uniform=self.normal_initializer,\n mask=None)))\n if self.lesion_omega:\n setattr(\n self,\n 'omega_%s' % layer,\n tf.constant(0.))\n else:\n setattr(\n self,\n 'omega_%s' % layer,\n tf.get_variable(\n name='%s_omega' % self.layer_name,\n dtype=self.dtype,\n initializer=initialization.xavier_initializer(\n shape=bias_shape,\n dtype=self.dtype,\n uniform=self.normal_initializer,\n mask=None)))\n else:\n setattr(\n self,\n 'kappa_%s' % layer,\n tf.constant(1.))\n setattr(\n self,\n 'omega_%s' % layer,\n tf.constant(1.))\n if self.adapation:\n setattr(\n self,\n 'eta_%s' % layer,\n tf.get_variable(\n name='%s_eta' % self.layer_name,\n dtype=self.dtype,\n initializer=tf.random_uniform(\n [self.timesteps], dtype=tf.float32)))\n if self.lesion_omega:\n setattr(\n self,\n 'omega_%s' % layer,\n tf.constant(0.))\n if self.lesion_kappa:\n setattr(\n self,\n 'kappa_%s' % layer,\n tf.constant(0.))\n if self.reuse:\n # Make the batchnorm variables\n scopes = ['g1_bn', 'g2_bn', 'c1_bn', 'c2_bn']\n bn_vars = ['moving_mean', 'moving_variance', 'gamma']\n for s in scopes:\n with tf.variable_scope(s):\n for v in bn_vars:\n tf.get_variable(\n trainable=self.param_trainable[v],\n name=v,\n dtype=self.dtype,\n shape=[self.hgru_k[idx]],\n collections=self.param_collections[v],\n initializer=self.param_initializer[v])\n self.param_initializer = None\n\n def resize_x_to_y(\n self,\n x,\n y,\n kernel,\n bias,\n strides,\n mode='transpose',\n use_bias=True):\n \"\"\"Resize activity x to the size of y using interpolation.\"\"\"\n y_size = y.get_shape().as_list()\n if mode == 'resize':\n return tf.image.resize_images(\n x,\n y_size[:-1],\n kernel,\n align_corners=True)\n elif mode == 'transpose':\n # strides = np.asarray(self.pool_strides)\n # strides[1:] *= len(self.ff_conv_k)\n # kernels = np.asarray(self.pooling_kernel)\n # kernels[1:] *= len(self.ff_conv_k)\n # return tf.layers.conv3d_transpose(\n # inputs=x,\n # strides=strides,\n # padding=self.padding,\n # filters=y_size[-1],\n # kernel_size=kernels,\n # trainable=self.train,\n # use_bias=use_bias,\n # activation=self.ff_nl)\n resized = tf.nn.conv3d_transpose(\n value=x,\n filter=kernel,\n output_shape=y_size,\n strides=[1] + strides + [1],\n padding=self.padding,\n name='resize_x_to_y')\n resized = tf.nn.bias_add(\n resized,\n bias)\n resized = self.ff_nl(resized)\n return resized\n elif mode == 'replicate_n_transpose':\n resized = tf.image.resize_images(\n x,\n y_size[:-1],\n kernel,\n align_corners=False)\n resized = tf.nn.conv3d_transpose(\n value=resized,\n filter=kernel,\n output_shape=y_size,\n strides=[1, 1, 1, 1, 1],\n padding='SAME',\n name='resize_x_to_y')\n resized = tf.nn.bias_add(\n resized,\n bias)\n resized = self.ff_nl(resized)\n return resized\n else:\n raise NotImplementedError(mode)\n\n def conv_3d_op(\n self,\n data,\n weights,\n strides,\n symmetric_weights=False,\n dilations=None):\n \"\"\"3D convolutions for hgru.\"\"\"\n if dilations is None:\n dilations = [1, 1, 1, 1, 1]\n w_shape = [int(w) for w in weights.get_shape()]\n if len(w_shape) > 1 and int(w_shape[-2]) > 1:\n # Full convolutions\n if symmetric_weights:\n g = tf.get_default_graph()\n with g.gradient_override_map({'Conv3D': 'SymmetricConv3D'}):\n activities = tf.nn.conv3d(\n data,\n weights,\n strides,\n padding=self.padding)\n # TODO (jk): removed dilations=dilations to accommodate r1.4\n else:\n activities = tf.nn.conv3d(\n data,\n weights,\n strides,\n padding=self.padding)\n # TODO (jk): removed dilations=dilations to accommodate r1.4\n else:\n raise RuntimeError\n return activities\n\n def circuit_input(self, h2, layer, var_scope, layer_idx):\n \"\"\"Calculate gain and inh horizontal activities.\"\"\"\n gain_kernels = getattr(self, 'gain_kernels_%s' % layer)\n gain_bias = getattr(self, 'gain_bias_%s' % layer)\n horizontal_kernels = getattr(self, 'horizontal_kernels_%s' % layer)\n # h_bias = getattr(self, 'h_bias_%s' % layer)\n g1_intermediate = self.conv_3d_op(\n data=h2,\n weights=gain_kernels,\n strides=[1, 1, 1, 1, 1],\n symmetric_weights=self.symmetric_gate_weights,\n dilations=self.hgru_dilations[layer_idx])\n with tf.variable_scope(\n '%s/g1_bn' % var_scope,\n reuse=self.scope_reuse) as scope:\n g1_intermediate = tf.contrib.layers.batch_norm(\n inputs=g1_intermediate + gain_bias,\n scale=True,\n center=False,\n fused=True,\n renorm=False,\n param_initializers=self.param_initializer,\n updates_collections=None,\n scope=scope,\n reuse=self.reuse,\n is_training=self.train)\n g1 = self.gate_nl(g1_intermediate)\n h2 *= g1\n\n # Horizontal activities\n c1 = self.conv_3d_op(\n data=h2,\n weights=horizontal_kernels,\n strides=[1, 1, 1, 1, 1],\n symmetric_weights=self.symmetric_weights,\n dilations=self.hgru_dilations[layer_idx])\n return c1, g1\n\n def circuit_output(self, h1, layer, var_scope, layer_idx):\n \"\"\"Calculate mix and exc horizontal activities.\"\"\"\n mix_kernels = getattr(self, 'mix_kernels_%s' % layer)\n mix_bias = getattr(self, 'mix_bias_%s' % layer)\n horizontal_kernels = getattr(self, 'horizontal_kernels_%s' % layer)\n # h_bias = getattr(self, 'h_bias_%s' % layer)\n g2_intermediate = self.conv_3d_op(\n data=h1,\n weights=mix_kernels,\n strides=[1, 1, 1, 1, 1],\n symmetric_weights=self.symmetric_gate_weights,\n dilations=self.hgru_dilations[layer_idx])\n\n with tf.variable_scope(\n '%s/g2_bn' % var_scope,\n reuse=self.scope_reuse) as scope:\n g2_intermediate = tf.contrib.layers.batch_norm(\n inputs=g2_intermediate + mix_bias,\n scale=True,\n center=False,\n fused=True,\n renorm=False,\n param_initializers=self.param_initializer,\n updates_collections=None,\n scope=scope,\n reuse=self.reuse,\n is_training=self.train)\n g2 = self.gate_nl(g2_intermediate)\n\n # Horizontal activities\n c2 = self.conv_3d_op(\n data=h1,\n weights=horizontal_kernels,\n strides=[1, 1, 1, 1, 1],\n symmetric_weights=self.symmetric_weights,\n dilations=self.hgru_dilations[layer_idx])\n return c2, g2\n\n def input_integration(self, x, c1, h2, layer):\n \"\"\"Integration on the input.\"\"\"\n alpha = getattr(self, 'alpha_%s' % layer)\n mu = getattr(self, 'mu_%s' % layer)\n return self.recurrent_nl(x - ((alpha * h2 + mu) * c1))\n\n def output_integration(self, h1, c2, g2, h2, layer):\n \"\"\"Integration on the output.\"\"\"\n if self.multiplicative_excitation:\n # Multiplicative gating I * (P + Q)\n gamma = getattr(self, 'gamma_%s' % layer)\n kappa = getattr(self, 'kappa_%s' % layer)\n omega = getattr(self, 'omega_%s' % layer)\n e = gamma * c2\n a = kappa * (h1 + e)\n m = omega * (h1 * e)\n h2_hat = self.recurrent_nl(a + m)\n else:\n # Additive gating I + P + Q\n gamma = getattr(self, 'gamma_%s' % layer)\n h2_hat = self.recurrent_nl(\n h1 + gamma * c2)\n return (g2 * h2) + ((1 - g2) * h2_hat)\n\n def hgru_ops(self, i0, x, h2, layer, layer_idx):\n \"\"\"hGRU body.\"\"\"\n var_scope = '%s_hgru_weights' % layer\n # Circuit input receives recurrent output h2\n c1, g1 = self.circuit_input(\n h2=h2,\n layer=layer,\n var_scope=var_scope,\n layer_idx=layer_idx)\n with tf.variable_scope(\n '%s/c1_bn' % var_scope,\n reuse=self.scope_reuse) as scope:\n c1 = tf.contrib.layers.batch_norm(\n inputs=c1,\n scale=True,\n center=False,\n fused=True,\n renorm=False,\n param_initializers=self.param_initializer,\n updates_collections=None,\n scope=scope,\n reuse=self.reuse,\n is_training=self.train)\n\n # Calculate input (-) integration: h1 (4)\n h1 = self.input_integration(\n x=x,\n c1=c1,\n h2=h2,\n layer=layer)\n\n # Circuit output receives recurrent input h1\n c2, g2 = self.circuit_output(\n h1=h1,\n layer=layer,\n var_scope=var_scope,\n layer_idx=layer_idx)\n\n with tf.variable_scope(\n '%s/c2_bn' % var_scope,\n reuse=self.scope_reuse) as scope:\n c2 = tf.contrib.layers.batch_norm(\n inputs=c2,\n scale=True,\n center=False,\n fused=True,\n renorm=False,\n param_initializers=self.param_initializer,\n updates_collections=None,\n scope=scope,\n reuse=self.reuse,\n is_training=self.train)\n\n # Calculate output (+) integration: h2 (8, 9)\n h2 = self.output_integration(\n h1=h1,\n c2=c2,\n g2=g2,\n h2=h2,\n layer=layer)\n\n if self.adapation:\n eta = getattr(self, 'eta_%s' % layer)\n e = tf.gather(eta, i0, axis=-1)\n h2 *= e\n return h1, h2\n\n def full(self, i0, x, l1_h2, l2_h2, l3_h2):\n \"\"\"hGRU body.\n Take the recurrent h2 from a low level and imbue it with\n information froma high layer. This means to treat the lower\n layer h2 as the X and the higher layer h2 as the recurrent state.\n This will serve as I/E from the high layer along with feedback\n kernels.\n\n h1 -> conv -> h2 -> conv -> h3 -> fb -> h2 h2 -> fb -> h1 h1 h1\n \"\"\"\n\n # LAYER 1\n _, l1_h2 = self.hgru_ops(\n i0=i0,\n x=x,\n h2=l1_h2,\n layer='h1',\n layer_idx=0)\n\n # Intermediate FF\n if self.batch_norm:\n with tf.variable_scope(\n 'l1_h2_bn',\n reuse=self.scope_reuse) as scope:\n l1_h2 = tf.contrib.layers.batch_norm(\n inputs=l1_h2,\n scale=True,\n center=True,\n fused=True,\n renorm=False,\n param_initializers=self.param_initializer,\n updates_collections=None,\n scope=scope,\n reuse=self.reuse,\n is_training=self.train)\n\n # Pool the preceding layer's drive\n if self.include_pooling:\n processed_l1_h2 = max_pool3d(\n bottom=l1_h2,\n k=self.ff_pool_dhw[0],\n s=self.ff_pool_strides[0],\n name='ff_pool_%s' % 0)\n else:\n processed_l1_h2 = l1_h2\n\n # LAYER 2\n idx = 0\n processed_l1_h2 = tf.nn.conv3d(\n input=processed_l1_h2,\n filter=getattr(self, 'ff_kernel_%s' % idx),\n strides=self.ff_conv_strides[idx],\n padding=self.padding)\n processed_l1_h2 = tf.nn.bias_add(\n processed_l1_h2,\n getattr(self, 'ff_bias_%s' % idx))\n processed_l1_h2 = self.ff_nl(processed_l1_h2)\n if self.batch_norm:\n with tf.variable_scope(\n 'l1_h2_bn_ff_%s' % idx,\n reuse=self.scope_reuse) as scope:\n processed_l1_h2 = tf.contrib.layers.batch_norm(\n inputs=processed_l1_h2,\n scale=True,\n center=True,\n fused=True,\n renorm=False,\n param_initializers=self.param_initializer,\n updates_collections=None,\n scope=scope,\n reuse=self.reuse,\n is_training=self.train)\n _, l2_h2 = self.hgru_ops(\n i0=i0,\n x=processed_l1_h2,\n h2=l2_h2,\n layer='h2',\n layer_idx=1)\n if self.batch_norm:\n with tf.variable_scope(\n 'l2_h2_bn',\n reuse=self.scope_reuse) as scope:\n l2_h2 = tf.contrib.layers.batch_norm(\n inputs=l2_h2,\n scale=True,\n center=True,\n fused=True,\n renorm=False,\n param_initializers=self.param_initializer,\n updates_collections=None,\n scope=scope,\n reuse=self.reuse,\n is_training=self.train)\n\n # Pool the preceding layer's drive\n if self.include_pooling:\n processed_l2_h2 = max_pool3d(\n bottom=l2_h2,\n k=self.ff_pool_dhw[1],\n s=self.ff_pool_strides[1],\n name='ff_pool_%s' % idx)\n else:\n processed_l2_h2 = l2_h2\n\n # LAYER 3\n idx = 1\n processed_l2_h2 = tf.nn.conv3d(\n input=processed_l2_h2,\n filter=getattr(self, 'ff_kernel_%s' % idx),\n strides=self.ff_conv_strides[idx],\n padding=self.padding)\n processed_l2_h2 = tf.nn.bias_add(\n processed_l2_h2,\n getattr(self, 'ff_bias_%s' % idx))\n processed_l2_h2 = self.ff_nl(processed_l2_h2)\n if self.batch_norm:\n with tf.variable_scope(\n 'l3_h2_bn_ff_%s' % idx,\n reuse=self.scope_reuse) as scope:\n processed_l2_h2 = tf.contrib.layers.batch_norm(\n inputs=processed_l2_h2,\n scale=True,\n center=True,\n fused=True,\n renorm=False,\n param_initializers=self.param_initializer,\n updates_collections=None,\n scope=scope,\n reuse=self.reuse,\n is_training=self.train)\n _, l3_h2 = self.hgru_ops(\n i0=i0,\n x=processed_l2_h2,\n h2=l3_h2,\n layer='h3',\n layer_idx=1)\n if self.batch_norm:\n with tf.variable_scope(\n 'l3_h2_bn',\n reuse=self.scope_reuse) as scope:\n l3_h2 = tf.contrib.layers.batch_norm(\n inputs=l3_h2,\n scale=True,\n center=True,\n fused=True,\n renorm=False,\n param_initializers=self.param_initializer,\n updates_collections=None,\n scope=scope,\n reuse=self.reuse,\n is_training=self.train)\n\n # l3-l2 feedback (FEEDBACK KERNEL is 2x channels)\n _, temp_l2_h2 = self.hgru_ops(\n i0=i0,\n x=l2_h2,\n h2=self.resize_x_to_y(x=l3_h2, y=l2_h2,\n kernel=self.fb_kernel_1,\n bias=self.fb_bias_1,\n mode=self.fb_mode,\n strides=self.ff_pool_strides[1]),\n layer='fb2',\n layer_idx=3)\n\n # Peephole\n if self.peephole:\n l2_h2 = temp_l2_h2 + l2_h2\n else:\n l2_h2 = temp_l2_h2\n\n # l2 horizontal postprocessing\n _, l2_h2 = self.hgru_ops(\n i0=i0,\n x=l2_h2,\n h2=l2_h2,\n layer='h2',\n layer_idx=1)\n _, l2_h2 = self.hgru_ops(\n i0=i0,\n x=l2_h2,\n h2=l2_h2,\n layer='h2',\n layer_idx=1)\n\n # l2-l1 feedback (FEEDBACK KERNEL is 2x channels)\n _, temp_l1_h2 = self.hgru_ops(\n i0=i0,\n x=l1_h2,\n h2=self.resize_x_to_y(x=l2_h2, y=l1_h2,\n kernel=self.fb_kernel_0,\n bias=self.fb_bias_0,\n mode=self.fb_mode,\n strides=self.ff_pool_strides[0]),\n layer='fb1',\n layer_idx=4)\n\n # Peephole\n if self.peephole:\n l1_h2 = temp_l1_h2 + l1_h2\n else:\n l1_h2 = temp_l1_h2\n\n # l1 horizontal postprocessing\n _, l1_h2 = self.hgru_ops(\n i0=i0,\n x=x,\n h2=l1_h2,\n layer='h1',\n layer_idx=0)\n _, l1_h2 = self.hgru_ops(\n i0=i0,\n x=x,\n h2=l1_h2,\n layer='h1',\n layer_idx=0)\n\n # Iterate loop\n i0 += 1\n return i0, x, l1_h2, l2_h2, l3_h2\n\n def condition(self, i0, x, l1_h2, l2_h2, l3_h2):\n \"\"\"While loop halting condition.\"\"\"\n return i0 < self.timesteps\n\n def compute_shape(self, in_length, stride):\n if in_length % stride == 0:\n return in_length/stride\n else:\n return in_length/stride + 1\n\n def build(self, x):\n \"\"\"Run the backprop version of the Circuit.\"\"\"\n self.prepare_tensors()\n i0 = tf.constant(0)\n\n # Calculate l2 hidden state size\n x_shape = x.get_shape().as_list()\n if self.include_pooling and len(self.ff_conv_k):\n if len(self.ff_conv_k):\n final_dim = self.ff_conv_k[-1]\n else:\n final_dim = x_shape[-1]\n l2_shape = [\n x_shape[0],\n self.compute_shape(x_shape[1], self.ff_pool_strides[0][0]),\n self.compute_shape(x_shape[2], self.ff_pool_strides[0][1]),\n self.compute_shape(x_shape[3], self.ff_pool_strides[0][2]),\n final_dim]\n l3_shape = [\n x_shape[0],\n self.compute_shape(l2_shape[1], self.ff_pool_strides[1][0]),\n self.compute_shape(l2_shape[2], self.ff_pool_strides[1][1]),\n self.compute_shape(l2_shape[3], self.ff_pool_strides[1][2]),\n final_dim]\n else:\n l2_shape = tf.identity(x_shape)\n\n # Initialize hidden layer activities\n if self.hidden_init == 'identity':\n l1_h2 = tf.identity(x)\n l2_h2 = tf.zeros(l2_shape, dtype=self.dtype)\n l3_h2 = tf.zeros(l3_shape, dtype=self.dtype)\n elif self.hidden_init == 'random':\n l1_h2 = tf.random_normal(x_shape, dtype=self.dtype)\n l2_h2 = tf.random_normal(l2_shape, dtype=self.dtype)\n l3_h2 = tf.random_normal(l3_shape, dtype=self.dtype)\n elif self.hidden_init == 'zeros':\n l1_h2 = tf.zeros(x_shape, dtype=self.dtype)\n l2_h2 = tf.zeros(l2_shape, dtype=self.dtype)\n l3_h2 = tf.zeros(l3_shape, dtype=self.dtype)\n else:\n raise RuntimeError\n\n # While loop\n elems = [\n i0,\n x,\n l1_h2,\n l2_h2,\n l3_h2\n ]\n returned = tf.while_loop(\n self.condition,\n self.full,\n loop_vars=elems,\n back_prop=True,\n swap_memory=False)\n\n # Prepare output\n i0, x, l1_h2, l2_h2, l3_h2 = returned\n return l1_h2\n", "# Copyright 2017 Google Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Simplest FFN model, as described in https://arxiv.org/abs/1611.00421.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport tensorflow as tf\n\nfrom .. import model\n\n# Note: this model was originally trained with conv3d layers initialized with\n# TruncatedNormalInitializedVariable with stddev = 0.01.\ndef _predict_object_mask(input_patches, input_seed, depth=9, is_training=True, adabn=False):\n \"\"\"Computes single-object mask prediction.\"\"\"\n\n train_bn = True\n bn_decay = 0.95\n if not is_training:\n if not adabn:\n bn_decay = 1.0\n train_bn = False\n\n in_k = 14\n\n if input_patches.get_shape().as_list()[-1] == 2:\n image = tf.expand_dims(input_patches[:,:,:,:,0], axis=4)\n membrane = tf.expand_dims(input_patches[:,:,:,:,1], axis=4)\n image_k = in_k-1\n else:\n image = input_patches\n image_k = in_k\n\n x = tf.contrib.layers.conv3d(image,\n scope='conv0_a',\n num_outputs=image_k,\n kernel_size=(1, 12, 12),\n padding='SAME')\n if input_patches.get_shape().as_list()[-1] == 2:\n print('FFN-hgru-v5: using membrane as input')\n membrane = membrane*33 + 128.\n x = tf.concat([x, membrane], axis=4)\n\n from .layers.recurrent import htd_cnn_3l\n with tf.variable_scope('htd_net'):\n hgru_net = htd_cnn_3l.hGRU(var_scope='htd_net',\n timesteps=8,\n dtype=tf.float32,\n use_3d=True,\n train=is_training,\n train_bn=train_bn,\n use_in=False,\n bn_decay=bn_decay,\n in_k=in_k,\n\n hgru1_fsiz=[1, 7, 7],\n hgru2_fsiz=[3, 5, 5],\n hgru3_fsiz=[3, 3, 3],\n hgru_td3_fsiz=[1, 1, 1],\n hgru_td2_fsiz=[1, 1, 1],\n hgru_td1_fsiz=[1, 1, 1],\n hgru_h1_nl=tf.nn.relu,\n hgru_h2_nl=tf.nn.relu,\n hgru_bistream_weights='independent',\n hgru_in_place_integration=False, #########\n hgru_symmetric_weights=True,\n hgru_soft_coefficients=True,\n belly_up_td=False,\n\n ds_fsiz_list=[[1, 7, 7], [1, 5, 5], [1, 3, 3]],\n ds_conv_repeat=1,\n ds_k_list=[18, 18, 18],\n ds_pool_list=[[1, 2, 2], [2, 2, 2], [1, 2, 2]],\n ds_stride_list=[[1, 2, 2], [2, 2, 2], [1, 2, 2]],\n use_trainable_states=False)\n\n net = hgru_net.build(x, ffn_seed=input_seed)\n\n finalbn_param_initializer = {\n 'moving_mean': tf.constant_initializer(0., dtype=tf.float32),\n 'moving_variance': tf.constant_initializer(1., dtype=tf.float32),\n 'gamma': tf.constant_initializer(0.1, dtype=tf.float32)\n }\n net = tf.nn.relu(net)\n net = tf.contrib.layers.batch_norm(\n inputs=net,\n scale=True,\n center=True,\n fused=True,\n renorm=False,\n decay=bn_decay,\n param_initializers=finalbn_param_initializer,\n is_training=train_bn)\n logits = tf.contrib.layers.conv3d(net,\n scope='conv_lom1',\n num_outputs=in_k,\n kernel_size=(1, 1, 1),\n activation_fn=None)\n logits = tf.contrib.layers.batch_norm(\n inputs=logits,\n scale=True,\n center=False,\n fused=True,\n renorm=False,\n decay=bn_decay,\n param_initializers=finalbn_param_initializer,\n is_training=train_bn)\n logits = tf.nn.relu(logits)\n logits = tf.contrib.layers.conv3d(logits,\n scope='conv_lom2',\n num_outputs=1,\n kernel_size=(1, 1, 1),\n activation_fn=None)\n import numpy as np\n extras = 0\n hgru_w = 0\n ff_fb = 0\n for x in tf.trainable_variables():\n prod = np.prod(x.get_shape().as_list())\n if ('hgru' in x.name):\n if ('w' in x.name):\n hgru_w += prod/2\n elif ('mlp' in x.name):\n hgru_w += prod\n else:\n print(x.name + ' '+ str(prod))\n extras += prod\n elif ('ff' in x.name) | ('fb' in x.name) | ('conv0' in x.name) | ('conv_lom' in x.name):\n if ('weight' in x.name):\n ff_fb += prod\n else:\n print(x.name + ' ' + str(prod))\n extras += prod\n else:\n print(x.name + ' ' + str(prod))\n extras += prod\n hgru_w = int(hgru_w)\n print('>>>>>>>>>>>>>>>>>>>>>>TRAINABLE VARS: ' + 'horizontal('+str(hgru_w)+') vertical('+str(ff_fb)+') extras('+str(extras)+')')\n print('>>>>>>>>>>>>>>>>>>>>>>TRAINABLE VARS: ' + 'total(' + str(hgru_w+ff_fb+extras) + ')')\n print('>>>>>>>>>>>>>>>>>>>>>>BN-TRAIN: ' + str(train_bn))\n return logits\n\n\nclass ConvStack3DFFNModel(model.FFNModel):\n dim = 3\n\n def __init__(self, with_membrane=False, fov_size=None, optional_output_size=None, deltas=None, batch_size=None, depth=9, is_training=True, adabn=False, reuse=False, tag='', TA=None):\n super(ConvStack3DFFNModel, self).__init__(deltas, batch_size, with_membrane, validation_mode=not(is_training), tag=tag)\n\n self.optional_output_size = optional_output_size\n self.set_uniform_io_size(fov_size)\n self.depth = depth\n self.reuse=reuse\n self.TA=TA\n self.is_training=is_training\n self.adabn=adabn\n\n def define_tf_graph(self):\n self.show_center_slice(self.input_seed)\n\n if self.input_patches is None:\n self.input_patches = tf.placeholder(\n tf.float32, [1] + list(self.input_image_size[::-1]) +[1],\n name='patches')\n\n with tf.variable_scope('seed_update', reuse=self.reuse):\n logit_update = _predict_object_mask(self.input_patches, self.input_seed,\n depth=self.depth, is_training=self.is_training, adabn=self.adabn)\n if self.optional_output_size is not None:\n dx = self.input_seed_size[0] - self.optional_output_size[0]\n dy = self.input_seed_size[1] - self.optional_output_size[1]\n dz = self.input_seed_size[2] - self.optional_output_size[2]\n logit_update_cropped = logit_update[:,\n dz // 2: -(dz - dz // 2),\n dy // 2: -(dy - dy // 2),\n dx // 2: -(dx - dx // 2),\n :]\n logit_update_padded = tf.pad(logit_update_cropped, [[0, 0],\n [dz // 2, dz - dz // 2],\n [dy // 2, dy - dy // 2],\n [dx // 2, dx - dx // 2],\n [0, 0]])\n mask = tf.pad(tf.ones_like(logit_update_cropped), [[0, 0],\n [dz // 2, dz - dz // 2],\n [dy // 2, dy - dy // 2],\n [dx // 2, dx - dx // 2],\n [0, 0]])\n self.loss_weights *= mask\n logit_seed = self.update_seed(self.input_seed, logit_update_padded)\n else:\n logit_seed = self.update_seed(self.input_seed, logit_update)\n\n # Make predictions available, both as probabilities and logits.\n self.logits = logit_seed\n\n if self.labels is not None:\n self.logistic = tf.sigmoid(logit_seed)\n self.set_up_sigmoid_pixelwise_loss(logit_seed)\n self.show_center_slice(logit_seed)\n self.show_center_slice(self.labels, sigmoid=False)\n if self.TA is None:\n self.set_up_optimizer(max_gradient_entry_mag=0.0)\n else:\n self.set_up_optimizer(max_gradient_entry_mag=0.0, TA=self.TA)\n\n self.add_summaries()\n\n # ADABN: Add only non-bn vars to saver\n var_list = tf.global_variables()\n moving_ops_names = ['moving_mean', 'moving_variance']\n # var_list = [\n # x for x in var_list\n # if x.name.split('/')[-1].split(':')[0] + ':'\n # not in moving_ops_names]\n # self.saver = tf.train.Saver(\n # var_list=var_list,\n # keep_checkpoint_every_n_hours=100)\n # ADABN: Define bn-var initializer to reset moments every iteration\n moment_list = [x for x in tf.global_variables() if (moving_ops_names[0] in x.name) | (moving_ops_names[1] in x.name)]\n self.moment_list = None\n # self.moment_list = moment_list\n self.ada_initializer = tf.variables_initializer( var_list=moment_list)\n\n self.fgru_moment_list = [x for x in moment_list if 'recurrent' in x.name]\n self.fgru_ada_initializer = tf.variables_initializer(var_list=self.fgru_moment_list)\n self.ext_moment_list = [x for x in moment_list if 'recurrent' not in x.name]\n self.ext_ada_initializer = tf.variables_initializer(var_list=self.ext_moment_list)\n\n self.saver = tf.train.Saver(keep_checkpoint_every_n_hours=1)\n" ]
[ [ "numpy.concatenate", "numpy.argsort", "numpy.array", "numpy.load" ], [ "tensorflow.python.ops.random_ops.truncated_normal", "tensorflow.python.ops.random_ops.random_uniform" ], [ "tensorflow.get_variable", "tensorflow.zeros", "tensorflow.minimum", "tensorflow.get_default_graph", "tensorflow.while_loop", "tensorflow.gather", "tensorflow.nn.conv3d_transpose", "tensorflow.image.resize_images", "tensorflow.identity", "tensorflow.nn.conv3d", "tensorflow.contrib.layers.batch_norm", "tensorflow.nn.bias_add", "tensorflow.constant", "tensorflow.transpose", "tensorflow.ones", "tensorflow.constant_initializer", "tensorflow.variable_scope", "tensorflow.random_uniform", "tensorflow.random_normal" ], [ "tensorflow.nn.relu", "tensorflow.concat", "tensorflow.global_variables", "tensorflow.variables_initializer", "tensorflow.expand_dims", "tensorflow.trainable_variables", "tensorflow.sigmoid", "tensorflow.constant_initializer", "tensorflow.contrib.layers.conv3d", "tensorflow.ones_like", "tensorflow.variable_scope", "tensorflow.pad", "tensorflow.contrib.layers.batch_norm", "tensorflow.train.Saver" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "2.8", "1.10", "1.12", "2.7", "2.6", "1.4", "1.13", "2.3", "2.4", "2.9", "1.5", "1.7", "2.5", "0.12", "1.0", "2.2", "1.2", "2.10" ] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10", "1.12", "1.4", "1.13", "1.5", "1.7", "0.12", "1.0", "1.2" ] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10" ] } ]
ssattari/neural_prophet
[ "e121234d2f64d2b81f9c53f52b30d21a2cf1c6e0" ]
[ "neuralprophet/forecaster.py" ]
[ "import time\nfrom collections import OrderedDict\nimport numpy as np\nimport pandas as pd\n\nimport torch\nfrom torch.utils.data import DataLoader\nimport logging\nfrom tqdm import tqdm\n\nfrom neuralprophet import configure\nfrom neuralprophet import time_net\nfrom neuralprophet import time_dataset\nfrom neuralprophet import df_utils\nfrom neuralprophet import utils\nfrom neuralprophet.plot_forecast import plot, plot_components\nfrom neuralprophet.plot_model_parameters import plot_parameters\nfrom neuralprophet import metrics\n\nlog = logging.getLogger(\"NP.forecaster\")\n\n\nMETRICS = {\n \"mae\": metrics.MAE,\n \"mse\": metrics.MSE,\n \"rmse\": metrics.RMSE,\n}\n\n\nclass NeuralProphet:\n \"\"\"NeuralProphet forecaster.\n\n A simple yet powerful forecaster that models:\n Trend, seasonality, events, holidays, auto-regression, lagged covariates, and future-known regressors.\n Can be regualrized and configured to model nonlinear relationships.\n\n Parameters\n ----------\n COMMENT\n Trend Config\n COMMENT\n growth : {'off' or 'linear'}, default 'linear'\n Set use of trend growth type.\n\n Options:\n * ``off``: no trend.\n * (default) ``linear``: fits a piece-wise linear trend with ``n_changepoints + 1`` segments\n * ``discontinuous``: For advanced users only - not a conventional trend,\n allows arbitrary jumps at each trend changepoint\n\n changepoints : {list of str, list of np.datetimes or np.array of np.datetimes}, optional\n Manually set dates at which to include potential changepoints.\n\n Note\n ----\n Does not accept ``np.array`` of ``np.str``. If not specified, potential changepoints are selected automatically.\n\n n_changepoints : int\n Number of potential trend changepoints to include.\n\n Note\n ----\n Changepoints are selected uniformly from the first ``changepoint_range`` proportion of the history.\n Ignored if manual ``changepoints`` list is supplied.\n changepoints_range : float\n Proportion of history in which trend changepoints will be estimated.\n\n e.g. set to 0.8 to allow changepoints only in the first 80% of training data.\n Ignored if manual ``changepoints`` list is supplied.\n trend_reg : float, optional\n Parameter modulating the flexibility of the automatic changepoint selection.\n\n Note\n ----\n Large values (~1-100) will limit the variability of changepoints.\n Small values (~0.001-1.0) will allow changepoints to change faster.\n default: 0 will fully fit a trend to each segment.\n\n trend_reg_threshold : bool, optional\n Allowance for trend to change without regularization.\n\n Options\n * ``True``: Automatically set to a value that leads to a smooth trend.\n * (default) ``False``: All changes in changepoints are regularized\n\n COMMENT\n Seasonality Config\n COMMENT\n yearly_seasonality : bool, int\n Fit yearly seasonality.\n\n Options\n * ``True`` or ``False``\n * ``auto``: set automatically\n * ``value``: number of Fourier/linear terms to generate\n weekly_seasonality : bool, int\n Fit monthly seasonality.\n\n Options\n * ``True`` or ``False``\n * ``auto``: set automatically\n * ``value``: number of Fourier/linear terms to generate\n daily_seasonality : bool, int\n Fit daily seasonality.\n\n Options\n * ``True`` or ``False``\n * ``auto``: set automatically\n * ``value``: number of Fourier/linear terms to generate\n seasonality_mode : str\n Specifies mode of seasonality\n\n Options\n * (default) ``additive``\n * ``multiplicative``\n seasonality_reg : float, optional\n Parameter modulating the strength of the seasonality model.\n\n Note\n ----\n Smaller values (~0.1-1) allow the model to fit larger seasonal fluctuations,\n larger values (~1-100) dampen the seasonality.\n default: None, no regularization\n\n COMMENT\n AR Config\n COMMENT\n n_lags : int\n Previous time series steps to include in auto-regression. Aka AR-order\n ar_reg : float, optional\n how much sparsity to enduce in the AR-coefficients\n\n Note\n ----\n Large values (~1-100) will limit the number of nonzero coefficients dramatically.\n Small values (~0.001-1.0) will allow more non-zero coefficients.\n default: 0 no regularization of coefficients.\n\n COMMENT\n Model Config\n COMMENT\n n_forecasts : int\n Number of steps ahead of prediction time step to forecast.\n num_hidden_layers : int, optional\n number of hidden layer to include in AR-Net (defaults to 0)\n d_hidden : int, optional\n dimension of hidden layers of the AR-Net. Ignored if ``num_hidden_layers`` == 0.\n\n COMMENT\n Train Config\n COMMENT\n learning_rate : float\n Maximum learning rate setting for 1cycle policy scheduler.\n\n Note\n ----\n Default ``None``: Automatically sets the ``learning_rate`` based on a learning rate range test.\n For manual user input, (try values ~0.001-10).\n epochs : int\n Number of epochs (complete iterations over dataset) to train model.\n\n Note\n ----\n Default ``None``: Automatically sets the number of epochs based on dataset size.\n For best results also leave batch_size to None. For manual values, try ~5-500.\n batch_size : int\n Number of samples per mini-batch.\n\n If not provided, ``batch_size`` is approximated based on dataset size.\n For manual values, try ~8-1024.\n For best results also leave ``epochs`` to ``None``.\n newer_samples_weight: float, default 2.0\n Sets factor by which the model fit is skewed towards more recent observations.\n\n Controls the factor by which final samples are weighted more compared to initial samples.\n Applies a positional weighting to each sample's loss value.\n\n e.g. ``newer_samples_weight = 2``: final samples are weighted twice as much as initial samples.\n newer_samples_start: float, default 0.0\n Sets beginning of 'newer' samples as fraction of training data.\n\n Throughout the range of 'newer' samples, the weight is increased\n from ``1.0/newer_samples_weight`` initially to 1.0 at the end,\n in a monotonously increasing function (cosine from pi to 2*pi).\n loss_func : str, torch.nn.functional.loss\n Type of loss to use:\n\n Options\n * (default) ``Huber``: Huber loss function\n * ``MSE``: Mean Squared Error loss function\n * ``MAE``: Mean Absolute Error loss function\n * ``torch.nn.functional.loss.``: loss or callable for custom loss, eg. L1-Loss\n\n Examples\n --------\n >>> from neuralprophet import NeuralProphet\n >>> import torch\n >>> import torch.nn as nn\n >>> m = NeuralProphet(loss_func=torch.nn.L1Loss)\n\n collect_metrics : list of str, bool\n Set metrics to compute.\n\n Valid: [``mae``, ``rmse``, ``mse``]\n\n Options\n * (default) ``True``: [``mae``, ``rmse``]\n * ``False``: No metrics\n\n COMMENT\n Missing Data\n COMMENT\n impute_missing : bool\n whether to automatically impute missing dates/values\n\n Note\n ----\n imputation follows a linear method up to 10 missing values, more are filled with trend.\n\n COMMENT\n Data Normalization\n COMMENT\n normalize : str\n Type of normalization to apply to the time series.\n\n Options\n * ``off`` bypasses data normalization\n * (default, binary timeseries) ``minmax`` scales the minimum value to 0.0 and the maximum value to 1.0\n * ``standardize`` zero-centers and divides by the standard deviation\n * (default) ``soft`` scales the minimum value to 0.0 and the 95th quantile to 1.0\n * ``soft1`` scales the minimum value to 0.1 and the 90th quantile to 0.9\n global_normalization : bool\n Activation of global normalization\n\n Options\n * ``True``: dict of dataframes is used as global_time_normalization\n * (default) ``False``: local normalization\n global_time_normalization (bool):\n Specifies global time normalization\n\n Options\n * (default) ``True``: only valid in case of global modeling local normalization\n * ``False``: set time data_params locally\n unknown_data_normalization : bool\n Specifies unknown data normalization\n\n Options\n * ``True``: test data is normalized with global data params even if trained with local data params (global modeling with local normalization)\n * (default) ``False``: no global modeling with local normalization\n \"\"\"\n\n def __init__(\n self,\n growth=\"linear\",\n changepoints=None,\n n_changepoints=10,\n changepoints_range=0.9,\n trend_reg=0,\n trend_reg_threshold=False,\n yearly_seasonality=\"auto\",\n weekly_seasonality=\"auto\",\n daily_seasonality=\"auto\",\n seasonality_mode=\"additive\",\n seasonality_reg=0,\n n_forecasts=1,\n n_lags=0,\n num_hidden_layers=0,\n d_hidden=None,\n ar_reg=None,\n learning_rate=None,\n epochs=None,\n batch_size=None,\n loss_func=\"Huber\",\n optimizer=\"AdamW\",\n newer_samples_weight=2,\n newer_samples_start=0.0,\n impute_missing=True,\n collect_metrics=True,\n normalize=\"auto\",\n global_normalization=False,\n global_time_normalization=True,\n unknown_data_normalization=False,\n ):\n kwargs = locals()\n\n # General\n self.name = \"NeuralProphet\"\n self.n_forecasts = n_forecasts\n\n # Data Normalization settings\n self.config_normalization = configure.Normalization(\n normalize=normalize,\n global_normalization=global_normalization,\n global_time_normalization=global_time_normalization,\n unknown_data_normalization=unknown_data_normalization,\n )\n\n # Missing Data Preprocessing\n self.impute_missing = impute_missing\n self.impute_limit_linear = 5\n self.impute_rolling = 20\n\n # Training\n self.config_train = configure.from_kwargs(configure.Train, kwargs)\n\n if collect_metrics is None:\n collect_metrics = []\n elif collect_metrics is True:\n collect_metrics = [\"mae\", \"rmse\"]\n elif isinstance(collect_metrics, str):\n if not collect_metrics.lower() in METRICS.keys():\n raise ValueError(\"Received unsupported argument for collect_metrics.\")\n collect_metrics = [collect_metrics]\n elif isinstance(collect_metrics, list):\n if not all([m.lower() in METRICS.keys() for m in collect_metrics]):\n raise ValueError(\"Received unsupported argument for collect_metrics.\")\n elif collect_metrics is not False:\n raise ValueError(\"Received unsupported argument for collect_metrics.\")\n\n self.metrics = None\n if isinstance(collect_metrics, list):\n self.metrics = metrics.MetricsCollection(\n metrics=[metrics.LossMetric(self.config_train.loss_func)]\n + [METRICS[m.lower()]() for m in collect_metrics],\n value_metrics=[metrics.ValueMetric(\"RegLoss\")],\n )\n\n # AR\n self.config_ar = configure.from_kwargs(configure.AR, kwargs)\n self.n_lags = self.config_ar.n_lags\n self.max_lags = self.n_lags\n\n # Model\n self.config_model = configure.from_kwargs(configure.Model, kwargs)\n\n # Trend\n self.config_trend = configure.from_kwargs(configure.Trend, kwargs)\n\n # Seasonality\n self.season_config = configure.AllSeason(\n mode=seasonality_mode,\n reg_lambda=seasonality_reg,\n yearly_arg=yearly_seasonality,\n weekly_arg=weekly_seasonality,\n daily_arg=daily_seasonality,\n )\n self.config_train.reg_lambda_season = self.season_config.reg_lambda\n\n # Events\n self.events_config = None\n self.country_holidays_config = None\n\n # Extra Regressors\n self.config_covar = None\n self.regressors_config = None\n\n # set during fit()\n self.data_freq = None\n\n # Set during _train()\n self.fitted = False\n self.data_params = None\n self.optimizer = None\n self.scheduler = None\n self.model = None\n\n # set during prediction\n self.future_periods = None\n # later set by user (optional)\n self.highlight_forecast_step_n = None\n self.true_ar_weights = None\n\n def add_lagged_regressor(\n self,\n names,\n n_lags=\"auto\",\n regularization=None,\n normalize=\"auto\",\n ):\n \"\"\"Add a covariate or list of covariate time series as additional lagged regressors to be used for fitting and predicting.\n The dataframe passed to ``fit`` and ``predict`` will have the column with the specified name to be used as\n lagged regressor. When normalize=True, the covariate will be normalized unless it is binary.\n\n Parameters\n ----------\n names : string or list\n name of the regressor/list of regressors.\n n_lags : int\n previous regressors time steps to use as input in the predictor (covar order)\n if ``auto``, time steps will be equivalent to the AR order (default)\n if ``scalar``, all the regressors will only use last known value as input\n regularization : float\n optional scale for regularization strength\n normalize : bool\n optional, specify whether this regressor will benormalized prior to fitting.\n if ``auto``, binary regressors will not be normalized.\n \"\"\"\n if n_lags == 0 or n_lags is None:\n n_lags = 0\n log.warning(\n \"Please, set n_lags to a value greater than 0 or to the options 'scalar' or 'auto'. No lags will be added to regressors when n_lags = 0 or n_lags is None\"\n )\n if n_lags == \"auto\":\n if self.n_lags is not None and self.n_lags > 0:\n n_lags = self.n_lags\n log.info(\n \"n_lags = 'auto', number of lags for regressor is set to Autoregression number of lags ({})\".format(\n self.n_lags\n )\n )\n else:\n n_lags = 1\n log.info(\n \"n_lags = 'auto', but there is no lags for Autoregression. Number of lags for regressor is automatically set to 1\"\n )\n if n_lags == \"scalar\":\n n_lags = 1\n log.info(\"n_lags = 'scalar', number of lags for regressor is set to 1\")\n only_last_value = False if n_lags > 1 else True\n if self.fitted:\n raise Exception(\"Regressors must be added prior to model fitting.\")\n if not isinstance(names, list):\n names = [names]\n for name in names:\n self._validate_column_name(name)\n if self.config_covar is None:\n self.config_covar = OrderedDict({})\n self.config_covar[name] = configure.Covar(\n reg_lambda=regularization, normalize=normalize, as_scalar=only_last_value, n_lags=n_lags\n )\n return self\n\n def add_future_regressor(self, name, regularization=None, normalize=\"auto\", mode=\"additive\"):\n \"\"\"Add a regressor as lagged covariate with order 1 (scalar) or as known in advance (also scalar).\n The dataframe passed to :meth:`fit` and :meth:`predict` will have a column with the specified name to be used as\n a regressor. When normalize=True, the regressor will be normalized unless it is binary.\n\n Note\n ----\n Future Regressors have to be known for the entire forecast horizon, e.g. ``n_forecasts`` into the future.\n\n Parameters\n ----------\n name : string\n name of the regressor.\n regularization : float\n optional scale for regularization strength\n normalize : bool\n optional, specify whether this regressor will be normalized prior to fitting.\n\n Note\n ----\n if ``auto``, binary regressors will not be normalized.\n mode : str\n ``additive`` (default) or ``multiplicative``.\n\n\n \"\"\"\n if self.fitted:\n raise Exception(\"Regressors must be added prior to model fitting.\")\n if regularization is not None:\n if regularization < 0:\n raise ValueError(\"regularization must be >= 0\")\n if regularization == 0:\n regularization = None\n self._validate_column_name(name)\n\n if self.regressors_config is None:\n self.regressors_config = {}\n self.regressors_config[name] = configure.Regressor(reg_lambda=regularization, normalize=normalize, mode=mode)\n return self\n\n def add_events(self, events, lower_window=0, upper_window=0, regularization=None, mode=\"additive\"):\n \"\"\"\n Add user specified events and their corresponding lower, upper windows and the\n regularization parameters into the NeuralProphet object\n\n Parameters\n ----------\n events : str, list\n name or list of names of user specified events\n lower_window : int\n the lower window for the events in the list of events\n upper_window : int\n the upper window for the events in the list of events\n regularization : float\n optional scale for regularization strength\n mode : str\n ``additive`` (default) or ``multiplicative``.\n\n \"\"\"\n if self.fitted:\n raise Exception(\"Events must be added prior to model fitting.\")\n\n if self.events_config is None:\n self.events_config = OrderedDict({})\n\n if regularization is not None:\n if regularization < 0:\n raise ValueError(\"regularization must be >= 0\")\n if regularization == 0:\n regularization = None\n\n if not isinstance(events, list):\n events = [events]\n\n for event_name in events:\n self._validate_column_name(event_name)\n self.events_config[event_name] = configure.Event(\n lower_window=lower_window, upper_window=upper_window, reg_lambda=regularization, mode=mode\n )\n return self\n\n def add_country_holidays(self, country_name, lower_window=0, upper_window=0, regularization=None, mode=\"additive\"):\n \"\"\"\n Add a country into the NeuralProphet object to include country specific holidays\n and create the corresponding configs such as lower, upper windows and the regularization\n parameters\n\n Parameters\n ----------\n country_name : string\n name of the country\n lower_window : int\n the lower window for all the country holidays\n upper_window : int\n the upper window for all the country holidays\n regularization : float\n optional scale for regularization strength\n mode : str\n ``additive`` (default) or ``multiplicative``.\n \"\"\"\n if self.fitted:\n raise Exception(\"Country must be specified prior to model fitting.\")\n\n if regularization is not None:\n if regularization < 0:\n raise ValueError(\"regularization must be >= 0\")\n if regularization == 0:\n regularization = None\n self.country_holidays_config = configure.Holidays(\n country=country_name,\n lower_window=lower_window,\n upper_window=upper_window,\n reg_lambda=regularization,\n mode=mode,\n )\n self.country_holidays_config.init_holidays()\n return self\n\n def add_seasonality(self, name, period, fourier_order):\n \"\"\"Add a seasonal component with specified period, number of Fourier components, and regularization.\n\n Increasing the number of Fourier components allows the seasonality to change more quickly\n (at risk of overfitting).\n Note: regularization and mode (additive/multiplicative) are set in the main init.\n\n Parameters\n ----------\n name : string\n name of the seasonality component.\n period : float\n number of days in one period.\n fourier_order : int\n number of Fourier components to use.\n\n \"\"\"\n if self.fitted:\n raise Exception(\"Seasonality must be added prior to model fitting.\")\n if name in [\"daily\", \"weekly\", \"yearly\"]:\n log.error(\"Please use inbuilt daily, weekly, or yearly seasonality or set another name.\")\n # Do not Allow overwriting built-in seasonalities\n self._validate_column_name(name, seasons=True)\n if fourier_order <= 0:\n raise ValueError(\"Fourier Order must be > 0\")\n self.season_config.append(name=name, period=period, resolution=fourier_order, arg=\"custom\")\n return self\n\n def fit(self, df, freq=\"auto\", validation_df=None, progress=\"bar\", minimal=False):\n \"\"\"Train, and potentially evaluate model.\n\n Parameters\n ----------\n df : pd.DataFrame, dict\n containing column ``ds``, ``y`` with all data\n freq : str\n Data step sizes. Frequency of data recording,\n\n Note\n ----\n Any valid frequency for pd.date_range, such as ``5min``, ``D``, ``MS`` or ``auto`` (default) to automatically set frequency.\n validation_df : pd.DataFrame, dict\n if provided, model with performance will be evaluated after each training epoch over this data.\n epochs : int\n number of epochs to train (overrides default setting).\n default: if not specified, uses self.epochs\n progress : str\n Method of progress display\n\n Options\n * (default) ``bar`` display updating progress bar (tqdm)\n * ``print`` print out progress (fallback option)\n * ``plot`` plot a live updating graph of the training loss, requires [live] install or livelossplot package installed.\n * ``plot-all`` extended to all recorded metrics.\n minimal : bool\n whether to train without any printouts or metrics collection\n\n Returns\n -------\n pd.DataFrame\n metrics with training and potentially evaluation metrics\n \"\"\"\n\n df_dict, _ = df_utils.prep_copy_df_dict(df)\n if self.fitted is True:\n log.error(\"Model has already been fitted. Re-fitting may break or produce different results.\")\n self.max_lags = df_utils.get_max_num_lags(self.config_covar, self.n_lags)\n if self.max_lags == 0 and self.n_forecasts > 1:\n self.n_forecasts = 1\n log.warning(\n \"Changing n_forecasts to 1. Without lags, the forecast can be \"\n \"computed for any future time, independent of lagged values\"\n )\n df_dict = self._check_dataframe(df_dict, check_y=True, exogenous=True)\n self.data_freq = df_utils.infer_frequency(df_dict, n_lags=self.max_lags, freq=freq)\n df_dict = self._handle_missing_data(df_dict, freq=self.data_freq)\n if validation_df is not None and (self.metrics is None or minimal):\n log.warning(\"Ignoring validation_df because no metrics set or minimal training set.\")\n validation_df = None\n if validation_df is None:\n if minimal:\n self._train_minimal(df_dict, progress_bar=progress == \"bar\")\n metrics_df = None\n else:\n metrics_df = self._train(df_dict, progress=progress)\n else:\n df_val_dict, _ = df_utils.prep_copy_df_dict(validation_df)\n df_val_dict = self._check_dataframe(df_val_dict, check_y=False, exogenous=False)\n df_val_dict = self._handle_missing_data(df_val_dict, freq=self.data_freq)\n metrics_df = self._train(df_dict, df_val_dict=df_val_dict, progress=progress)\n\n self.fitted = True\n return metrics_df\n\n def predict(self, df, decompose=True, raw=False):\n \"\"\"Runs the model to make predictions.\n\n Expects all data needed to be present in dataframe.\n If you are predicting into the unknown future and need to add future regressors or events,\n please prepare data with make_future_dataframe.\n\n Parameters\n ----------\n df : pd.DataFrame, dict\n dataframe or dict of dataframes containing column ``ds``, ``y`` with data\n decompose : bool\n whether to add individual components of forecast to the dataframe\n raw : bool\n specifies raw data\n\n Options\n * (default) ``False``: returns forecasts sorted by target (highlighting forecast age)\n * ``True``: return the raw forecasts sorted by forecast start date\n\n Returns\n -------\n pd.DataFrame\n dependent on ``raw``\n\n Note\n ----\n\n ``raw == True``: columns ``ds``, ``y``, and [``step<i>``] where step<i> refers to the i-step-ahead\n prediction *made at* this row's datetime, e.g. step3 is the prediction for 3 steps into the future,\n predicted using information up to (excluding) this datetime.\n\n ``raw == False``: columns ``ds``, ``y``, ``trend`` and [``yhat<i>``] where yhat<i> refers to\n the i-step-ahead prediction for this row's datetime,\n e.g. yhat3 is the prediction for this datetime, predicted 3 steps ago, \"3 steps old\".\n \"\"\"\n if raw:\n log.warning(\"Raw forecasts are incompatible with plotting utilities\")\n if self.fitted is False:\n raise ValueError(\"Model has not been fitted. Predictions will be random.\")\n df_dict, received_unnamed_df = df_utils.prep_copy_df_dict(df)\n # to get all forecasteable values with df given, maybe extend into future:\n df_dict, periods_added = self._maybe_extend_df(df_dict)\n df_dict = self._prepare_dataframe_to_predict(df_dict)\n # normalize\n df_dict = self._normalize(df_dict)\n for key, df_i in df_dict.items():\n dates, predicted, components = self._predict_raw(df_i, key, include_components=decompose)\n if raw:\n fcst = self._convert_raw_predictions_to_raw_df(dates, predicted, components)\n if periods_added[key] > 0:\n fcst = fcst[:-1]\n else:\n fcst = self._reshape_raw_predictions_to_forecst_df(df_i, predicted, components)\n if periods_added[key] > 0:\n fcst = fcst[: -periods_added[key]]\n df_dict[key] = fcst\n df = df_utils.maybe_get_single_df_from_df_dict(df_dict, received_unnamed_df)\n return df\n\n def test(self, df):\n \"\"\"Evaluate model on holdout data.\n\n Parameters\n ----------\n df : pd.DataFrame,dict\n dataframe or dict of dataframes containing column ``ds``, ``y`` with with holdout data\n Returns\n -------\n pd.DataFrame\n evaluation metrics\n \"\"\"\n df_dict, received_unnamed_df = df_utils.prep_copy_df_dict(df)\n if self.fitted is False:\n log.warning(\"Model has not been fitted. Test results will be random.\")\n df_dict = self._check_dataframe(df_dict, check_y=True, exogenous=True)\n _ = df_utils.infer_frequency(df_dict, n_lags=self.max_lags, freq=self.data_freq)\n df_dict = self._handle_missing_data(df_dict, freq=self.data_freq)\n loader = self._init_val_loader(df_dict)\n val_metrics_df = self._evaluate(loader)\n if not self.config_normalization.global_normalization:\n log.warning(\"Note that the metrics are displayed in normalized scale because of local normalization.\")\n return val_metrics_df\n\n def split_df(self, df, freq=\"auto\", valid_p=0.2, local_split=False):\n \"\"\"Splits timeseries df into train and validation sets.\n Prevents leakage of targets. Sharing/Overbleed of inputs can be configured.\n Also performs basic data checks and fills in missing data.\n\n Parameters\n ----------\n df : pd.DataFrame, dict\n dataframe or dict of dataframes containing column ``ds``, ``y`` with all data\n freq : str\n data step sizes. Frequency of data recording,\n\n Note\n ----\n Any valid frequency for pd.date_range, such as ``5min``, ``D``, ``MS`` or ``auto`` (default) to automatically set frequency.\n valid_p : float\n fraction of data to use for holdout validation set, targets will still never be shared.\n local_split : bool\n Each dataframe will be split according to valid_p locally (in case of dict of dataframes\n\n Returns\n -------\n tuple of two pd.DataFrames\n\n training data\n\n validation data\n\n See Also\n --------\n crossvalidation_split_df : Splits timeseries data in k folds for crossvalidation.\n double_crossvalidation_split_df : Splits timeseries data in two sets of k folds for crossvalidation on training and testing data.\n\n Examples\n --------\n >>> df1 = pd.DataFrame({'ds': pd.date_range(start='2022-12-01', periods=5,\n ... freq='D'), 'y': [9.59, 8.52, 8.18, 8.07, 7.89]})\n >>> df2 = pd.DataFrame({'ds': pd.date_range(start='2022-12-09', periods=5,\n ... freq='D'), 'y': [8.71, 8.09, 7.84, 7.65, 8.02]})\n >>> df3 = pd.DataFrame({'ds': pd.date_range(start='2022-12-09', periods=5,\n ... freq='D'), 'y': [7.67, 7.64, 7.55, 8.25, 8.3]})\n >>> df3\n ds\t y\n 0\t2022-12-09\t7.67\n 1\t2022-12-10\t7.64\n 2\t2022-12-11\t7.55\n 3\t2022-12-12\t8.25\n 4\t2022-12-13\t8.30\n\n One can define a dict with many time series.\n >>> df_dict = {'data1': df1, 'data2': df2, 'data3': df3}\n\n You can split a single dataframe.\n >>> (df_train, df_val) = m.split_df(df3, valid_p=0.2)\n >>> df_train\n ds\t y\n 0\t2022-12-09\t7.67\n 1\t2022-12-10\t7.64\n 2\t2022-12-11\t7.55\n 3\t2022-12-12\t8.25\n >>> df_val\n ds\t y\n 0\t2022-12-13\t8.3\n\n You can also use a dict of dataframes (especially useful for global modeling), which will account for the time range of the whole group of time series as default.\n >>> (df_dict_train, df_dict_val) = m.split_df(df_dict, valid_p=0.2)\n >>> df_dict_train\n {'data1': ds y\n 0 2022-12-01 9.59\n 1 2022-12-02 8.52\n 2 2022-12-03 8.18\n 3 2022-12-04 8.07\n 4 2022-12-05 7.89,\n 'data2': ds y\n 0 2022-12-09 8.71\n 1 2022-12-10 8.09\n 2 2022-12-11 7.84,\n 'data3': ds y\n 0 2022-12-09 7.67\n 1 2022-12-10 7.64\n 2 2022-12-11 7.55}\n >>> df_dict_val\n {'data2': ds y\n 0 2022-12-12 7.65\n 1 2022-12-13 8.02,\n 'data3': ds y\n 0 2022-12-12 8.25\n 1 2022-12-13 8.30}\n\n In some applications, splitting locally each time series may be helpful. In this case, one should set `local_split` to True.\n >>> (df_dict_train, df_dict_val) = m.split_df(df_dict, valid_p=0.2,\n ... local_split=True)\n >>> df_dict_train\n {'data1': ds y\n 0 2022-12-01 9.59\n 1 2022-12-02 8.52\n 2 2022-12-03 8.18\n 3 2022-12-04 8.07,\n 'data2': ds y\n 0 2022-12-09 8.71\n 1 2022-12-10 8.09\n 2 2022-12-11 7.84\n 3 2022-12-12 7.65,\n 'data3': ds y\n 0 2022-12-09 7.67\n 1 2022-12-10 7.64\n 2 2022-12-11 7.55\n 3 2022-12-12 8.25}\n >>> df_dict_val\n {'data1': ds y\n 0 2022-12-05 7.89,\n 'data2': ds y\n 0 2022-12-13 8.02,\n 'data3': ds y\n 0 2022-12-13 8.3}\n \"\"\"\n df, received_unnamed_df = df_utils.prep_copy_df_dict(df)\n df = self._check_dataframe(df, check_y=False, exogenous=False)\n freq = df_utils.infer_frequency(df, n_lags=self.max_lags, freq=freq)\n df = self._handle_missing_data(df, freq=freq, predicting=False)\n df_train, df_val = df_utils.split_df(\n df,\n n_lags=self.max_lags,\n n_forecasts=self.n_forecasts,\n valid_p=valid_p,\n inputs_overbleed=True,\n local_split=local_split,\n )\n df_train = df_utils.maybe_get_single_df_from_df_dict(df_train, received_unnamed_df)\n df_val = df_utils.maybe_get_single_df_from_df_dict(df_val, received_unnamed_df)\n return df_train, df_val\n\n def crossvalidation_split_df(\n self, df, freq=\"auto\", k=5, fold_pct=0.1, fold_overlap_pct=0.5, global_model_cv_type=\"None\"\n ):\n \"\"\"Splits timeseries data in k folds for crossvalidation.\n\n Parameters\n ----------\n df : pd.DataFrame, dict\n dataframe or dict of dataframes containing column ``ds``, ``y`` with all data\n freq : str\n data step sizes. Frequency of data recording,\n\n Note\n ----\n Any valid frequency for pd.date_range, such as ``5min``, ``D``, ``MS`` or ``auto`` (default) to automatically set frequency.\n k : int\n number of CV folds\n fold_pct : float\n percentage of overall samples to be in each fold\n fold_overlap_pct : float\n percentage of overlap between the validation folds.\n global_model_cv_type : str\n Type of crossvalidation to apply to the dict of time series.\n\n options:\n\n ``global-time`` (default) crossvalidation is performed according to a time stamp threshold.\n\n ``local`` each episode will be crosvalidated locally (may cause time leakage among different episodes)\n\n ``intersect`` only the time intersection of all the episodes will be considered. A considerable amount of data may not be used. However, this approach guarantees an equal number of train/test samples for each episode.\n\n Returns\n -------\n list of k tuples [(df_train, df_val), ...]\n\n training data\n\n validation data\n \"\"\"\n df, received_unnamed_df = df_utils.prep_copy_df_dict(df)\n df = self._check_dataframe(df, check_y=False, exogenous=False)\n freq = df_utils.infer_frequency(df, n_lags=self.max_lags, freq=freq)\n df = self._handle_missing_data(df, freq=freq, predicting=False)\n folds = df_utils.crossvalidation_split_df(\n df,\n n_lags=self.max_lags,\n n_forecasts=self.n_forecasts,\n k=k,\n fold_pct=fold_pct,\n fold_overlap_pct=fold_overlap_pct,\n global_model_cv_type=global_model_cv_type,\n )\n return folds\n\n def double_crossvalidation_split_df(self, df, freq=\"auto\", k=5, valid_pct=0.10, test_pct=0.10):\n \"\"\"Splits timeseries data in two sets of k folds for crossvalidation on training and testing data.\n\n Parameters\n ----------\n df : pd.DataFrame, dict\n dataframe or dict of dataframes containing column ``ds``, ``y`` with all data\n freq : str\n data step sizes. Frequency of data recording,\n\n Note\n ----\n Any valid frequency for pd.date_range, such as ``5min``, ``D``, ``MS`` or ``auto`` (default) to automatically set frequency.\n k : int\n number of CV folds\n valid_pct : float\n percentage of overall samples to be in validation\n test_pct : float\n percentage of overall samples to be in test\n\n Returns\n -------\n tuple of k tuples [(folds_val, folds_test), …]\n elements same as :meth:`crossvalidation_split_df` returns\n \"\"\"\n if isinstance(df, dict):\n raise NotImplementedError(\"Double crossvalidation not implemented for multiple dataframes\")\n df = df.copy(deep=True)\n df = self._check_dataframe(df, check_y=False, exogenous=False)\n freq = df_utils.infer_frequency(df, n_lags=self.max_lags, freq=freq)\n df = self._handle_missing_data(df, freq=freq, predicting=False)\n folds_val, folds_test = df_utils.double_crossvalidation_split_df(\n df,\n n_lags=self.max_lags,\n n_forecasts=self.n_forecasts,\n k=k,\n valid_pct=valid_pct,\n test_pct=test_pct,\n )\n\n return folds_val, folds_test\n\n def create_df_with_events(self, df, events_df):\n \"\"\"\n Create a concatenated dataframe with the time series data along with the events data expanded.\n\n Parameters\n ----------\n df : pd.DataFrame, dict\n dataframe or dict of dataframes containing column ``ds``, ``y`` with all data\n events_df : dict, pd.DataFrame\n containing column ``ds`` and ``event``\n\n Returns\n -------\n dict, pd.DataFrame\n columns ``y``, ``ds`` and other user specified events\n \"\"\"\n if self.events_config is None:\n raise Exception(\n \"The events configs should be added to the NeuralProphet object (add_events fn)\"\n \"before creating the data with events features\"\n )\n df_dict, received_unnamed_df = df_utils.prep_copy_df_dict(df)\n df_dict = self._check_dataframe(df_dict, check_y=True, exogenous=False)\n if isinstance(events_df, pd.DataFrame):\n events_df_i = events_df.copy(deep=True)\n for df_name, df_i in df_dict.items():\n if isinstance(events_df, dict):\n events_df_i = events_df[df_name].copy(deep=True)\n for name in events_df_i[\"event\"].unique():\n assert name in self.events_config\n df_out = df_utils.convert_events_to_features(\n df_i,\n events_config=self.events_config,\n events_df=events_df_i,\n )\n df_dict[df_name] = df_out.reset_index(drop=True)\n df = df_utils.maybe_get_single_df_from_df_dict(df_dict, received_unnamed_df)\n return df\n\n def make_future_dataframe(self, df, events_df=None, regressors_df=None, periods=None, n_historic_predictions=False):\n \"\"\"\n Extends dataframe a number of periods (time steps) into the future.\n\n Only use if you predict into the *unknown* future.\n New timestamps are added to the historic dataframe, with the 'y' column being NaN, as it remains to be predicted.\n Further, the given future events and regressors are added to the periods new timestamps.\n The returned dataframe will include historic data needed to additionally produce `n_historic_predictions`,\n for which there are historic observances of the series 'y'.\n\n Parameters\n ----------\n df: pd.DataFrame\n History to date. DataFrame containing all columns up to present\n events_df : pd.DataFrame\n Future event occurences corresponding to `periods` steps into future.\n Contains columns ``ds`` and ``event``. The event column contains the name of the event.\n regressor_df : pd.DataFrame\n Future regressor values corresponding to `periods` steps into future.\n Contains column ``ds`` and one column for each of the external regressors.\n periods : int\n number of steps to extend the DataFrame into the future\n n_historic_predictions : bool, int\n Includes historic data needed to predict `n_historic_predictions` timesteps,\n for which there are historic observances of the series 'y'.\n False: drop historic data except for needed inputs to predict future.\n True: include entire history.\n\n Returns\n -------\n pd.DataFrame\n input df with ``ds`` extended into future, ``y`` set to None,\n with future events and regressors added.\n\n Examples\n --------\n >>> from neuralprophet import NeuralProphet\n >>> m = NeuralProphet()\n >>> # set the model to expect these events\n >>> m = m.add_events([\"playoff\", \"superbowl\"])\n >>> # create the data df with events\n >>> history_df = m.create_df_with_events(df, events_df)\n >>> metrics = m.fit(history_df, freq=\"D\")\n >>> # forecast with events known ahead\n >>> future = m.make_future_dataframe(\n >>> history_df, events_df, periods=365, n_historic_predictions=180\n >>> )\n >>> # get 180 past and 365 future predictions.\n >>> forecast = m.predict(df=future)\n\n \"\"\"\n df_dict, received_unnamed_df = df_utils.prep_copy_df_dict(df)\n df_dict_events, received_unnamed_events_df = df_utils.prep_copy_df_dict(events_df)\n df_dict_regressors, received_unnamed_regressors_df = df_utils.prep_copy_df_dict(regressors_df)\n if received_unnamed_events_df:\n df_dict_events = {key: df_dict_events[\"__df__\"] for key in df_dict.keys()}\n elif df_dict_events is None:\n df_dict_events = {key: None for key in df_dict.keys()}\n else:\n df_utils.compare_dict_keys(df_dict, df_dict_events, \"dataframes\", \"events\")\n if received_unnamed_regressors_df:\n df_dict_regressors = {key: df_dict_regressors[\"__df__\"] for key in df_dict.keys()}\n elif df_dict_regressors is None:\n df_dict_regressors = {key: None for key in df_dict.keys()}\n else:\n df_utils.compare_dict_keys(df_dict, df_dict_regressors, \"dataframes\", \"regressors\")\n\n df_future_dataframe = {}\n for key in df_dict.keys():\n df_future_dataframe[key] = self._make_future_dataframe(\n df=df_dict[key],\n events_df=df_dict_events[key],\n regressors_df=df_dict_regressors[key],\n periods=periods,\n n_historic_predictions=n_historic_predictions,\n )\n df_future = df_utils.maybe_get_single_df_from_df_dict(df_future_dataframe, received_unnamed_df)\n return df_future\n\n def predict_trend(self, df):\n \"\"\"Predict only trend component of the model.\n\n Parameters\n ----------\n df : pd.DataFrame, dict\n dataframe or dict of dataframes containing column ``ds``, ``y`` with all data\n\n Returns\n -------\n pd.DataFrame, dict\n trend on prediction dates.\n \"\"\"\n df_dict, received_unnamed_df = df_utils.prep_copy_df_dict(df)\n df_dict = self._check_dataframe(df_dict, check_y=False, exogenous=False)\n df_dict = self._normalize(df_dict)\n for df_name, df in df_dict.items():\n t = torch.from_numpy(np.expand_dims(df[\"t\"].values, 1))\n trend = self.model.trend(t).squeeze().detach().numpy()\n data_params = self.config_normalization.get_data_params(df_name)\n trend = trend * data_params[\"y\"].scale + data_params[\"y\"].shift\n df_dict[df_name] = pd.DataFrame({\"ds\": df[\"ds\"], \"trend\": trend})\n df = df_utils.maybe_get_single_df_from_df_dict(df_dict, received_unnamed_df)\n return df\n\n def predict_seasonal_components(self, df):\n \"\"\"Predict seasonality components\n\n Parameters\n ----------\n df : pd.DataFrame, dict\n dataframe or dict of dataframes containing columns ``ds``, ``y`` with all data\n\n Returns\n -------\n pd.DataFrame, dict\n seasonal components with columns of name <seasonality component name>\n \"\"\"\n df_dict, received_unnamed_df = df_utils.prep_copy_df_dict(df)\n df_dict = self._check_dataframe(df_dict, check_y=False, exogenous=False)\n df_dict = self._normalize(df_dict)\n for df_name, df in df_dict.items():\n dataset = time_dataset.TimeDataset(\n df,\n name=df_name,\n season_config=self.season_config,\n # n_lags=0,\n # n_forecasts=1,\n predict_mode=True,\n )\n loader = DataLoader(dataset, batch_size=min(4096, len(df)), shuffle=False, drop_last=False)\n predicted = {}\n for name in self.season_config.periods:\n predicted[name] = list()\n for inputs, _, _ in loader:\n for name in self.season_config.periods:\n features = inputs[\"seasonalities\"][name]\n y_season = torch.squeeze(self.model.seasonality(features=features, name=name))\n predicted[name].append(y_season.data.numpy())\n\n for name in self.season_config.periods:\n predicted[name] = np.concatenate(predicted[name])\n if self.season_config.mode == \"additive\":\n data_params = self.config_normalization.get_data_params(df_name)\n predicted[name] = predicted[name] * data_params[\"y\"].scale\n df_dict[df_name] = pd.DataFrame({\"ds\": df[\"ds\"], **predicted})\n df = df_utils.maybe_get_single_df_from_df_dict(df_dict, received_unnamed_df)\n return df\n\n def set_true_ar_for_eval(self, true_ar_weights):\n \"\"\"Configures model to evaluate closeness of AR weights to true weights.\n\n Parameters\n ----------\n true_ar_weights : np.array\n true AR-parameters, if known.\n \"\"\"\n self.true_ar_weights = true_ar_weights\n\n def highlight_nth_step_ahead_of_each_forecast(self, step_number=None):\n \"\"\"Set which forecast step to focus on for metrics evaluation and plotting.\n\n Parameters\n ----------\n step_number : int\n i-th step ahead forecast to use for statistics and plotting.\n \"\"\"\n if step_number is not None:\n assert step_number <= self.n_forecasts\n self.highlight_forecast_step_n = step_number\n return self\n\n def plot(self, fcst, ax=None, xlabel=\"ds\", ylabel=\"y\", figsize=(10, 6)):\n \"\"\"Plot the NeuralProphet forecast, including history.\n\n Parameters\n ----------\n fcst : pd.DataFrame\n output of self.predict.\n ax : matplotlib axes\n optional, matplotlib axes on which to plot.\n xlabel : string\n label name on X-axis\n ylabel : string\n label name on Y-axis\n figsize : tuple\n width, height in inches. default: (10, 6)\n \"\"\"\n if isinstance(fcst, dict):\n log.error(\"Received more than one DataFrame. Use a for loop for many dataframes.\")\n if self.max_lags > 0:\n num_forecasts = sum(fcst[\"yhat1\"].notna())\n if num_forecasts < self.n_forecasts:\n log.warning(\n \"Too few forecasts to plot a line per forecast step.\" \"Plotting a line per forecast origin instead.\"\n )\n return self.plot_last_forecast(\n fcst,\n ax=ax,\n xlabel=xlabel,\n ylabel=ylabel,\n figsize=figsize,\n include_previous_forecasts=num_forecasts - 1,\n plot_history_data=True,\n )\n return plot(\n fcst=fcst,\n ax=ax,\n xlabel=xlabel,\n ylabel=ylabel,\n figsize=figsize,\n highlight_forecast=self.highlight_forecast_step_n,\n )\n\n def plot_last_forecast(\n self,\n fcst,\n ax=None,\n xlabel=\"ds\",\n ylabel=\"y\",\n figsize=(10, 6),\n include_previous_forecasts=0,\n plot_history_data=None,\n ):\n \"\"\"Plot the NeuralProphet forecast, including history.\n\n Parameters\n ----------\n fcst : pd.DataFrame\n output of self.predict.\n ax : matplotlib axes\n Optional, matplotlib axes on which to plot.\n xlabel : str\n label name on X-axis\n ylabel : str\n abel name on Y-axis\n figsize : tuple\n width, height in inches. default: (10, 6)\n include_previous_forecasts : int\n number of previous forecasts to include in plot\n plot_history_data : bool\n specifies plot of historical data\n Returns\n -------\n matplotlib.axes.Axes\n plot of NeuralProphet forecasting\n \"\"\"\n if self.max_lags == 0:\n raise ValueError(\"Use the standard plot function for models without lags.\")\n if isinstance(fcst, dict):\n log.error(\"Received more than one DataFrame. Use a for loop for many dataframes.\")\n if plot_history_data is None:\n fcst = fcst[-(include_previous_forecasts + self.n_forecasts + self.max_lags) :]\n elif plot_history_data is False:\n fcst = fcst[-(include_previous_forecasts + self.n_forecasts) :]\n elif plot_history_data is True:\n fcst = fcst\n fcst = utils.fcst_df_to_last_forecast(fcst, n_last=1 + include_previous_forecasts)\n return plot(\n fcst=fcst,\n ax=ax,\n xlabel=xlabel,\n ylabel=ylabel,\n figsize=figsize,\n highlight_forecast=self.highlight_forecast_step_n,\n line_per_origin=True,\n )\n\n def plot_components(self, fcst, figsize=None, residuals=False):\n \"\"\"Plot the NeuralProphet forecast components.\n\n Parameters\n ----------\n fcst : pd.DataFrame\n output of self.predict\n figsize : tuple\n width, height in inches.\n\n Note\n ----\n None (default): automatic (10, 3 * npanel)\n\n Returns\n -------\n matplotlib.axes.Axes\n plot of NeuralProphet components\n \"\"\"\n if isinstance(fcst, dict):\n log.error(\"Receiced more than one DataFrame. Use a for loop for many dataframes.\")\n return plot_components(\n m=self,\n fcst=fcst,\n figsize=figsize,\n forecast_in_focus=self.highlight_forecast_step_n,\n residuals=residuals,\n )\n\n def plot_parameters(self, weekly_start=0, yearly_start=0, figsize=None, df_name=None):\n \"\"\"Plot the NeuralProphet forecast components.\n\n Parameters\n ----------\n weekly_start : int\n specifying the start day of the weekly seasonality plot.\n\n Note\n ----\n 0 (default) starts the week on Sunday. 1 shifts by 1 day to Monday, and so on.\n yearly_start : int\n specifying the start day of the yearly seasonality plot.\n\n Note\n ----\n 0 (default) starts the year on Jan 1. 1 shifts by 1 day to Jan 2, and so on.\n df_name : str\n name of dataframe to refer to data params from original keys of train dataframes (used for local normalization in global modeling)\n figsize : tuple\n width, height in inches.\n\n Note\n ----\n None (default): automatic (10, 3 * npanel)\n\n Returns\n -------\n matplotlib.axes.Axes\n plot of NeuralProphet forecasting\n \"\"\"\n return plot_parameters(\n m=self,\n forecast_in_focus=self.highlight_forecast_step_n,\n weekly_start=weekly_start,\n yearly_start=yearly_start,\n figsize=figsize,\n df_name=df_name,\n )\n\n def _init_model(self):\n \"\"\"Build Pytorch model with configured hyperparamters.\n\n Returns\n -------\n TimeNet model\n \"\"\"\n self.model = time_net.TimeNet(\n config_trend=self.config_trend,\n config_season=self.season_config,\n config_covar=self.config_covar,\n config_regressors=self.regressors_config,\n config_events=self.events_config,\n config_holidays=self.country_holidays_config,\n n_forecasts=self.n_forecasts,\n n_lags=self.n_lags,\n num_hidden_layers=self.config_model.num_hidden_layers,\n d_hidden=self.config_model.d_hidden,\n )\n log.debug(self.model)\n return self.model\n\n def _create_dataset(self, df_dict, predict_mode):\n \"\"\"Construct dataset from dataframe.\n\n (Configured Hyperparameters can be overridden by explicitly supplying them.\n Useful to predict a single model component.)\n\n Parameters\n ----------\n df_dict : dict\n containing pd.DataFrames of original and normalized columns ``ds``, ``y``, ``t``, ``y_scaled``\n df : pd.DataFrame, dict\n dataframe or dict of dataframes containing column ``ds``, ``y`` and\n normalized columns normalized columns ``ds``, ``y``, ``t``, ``y_scaled``\n predict_mode : bool\n specifies predict mode\n\n Options\n * ``False``: includes target values.\n * ``True``: does not include targets but includes entire dataset as input\n\n Returns\n -------\n TimeDataset\n \"\"\"\n return time_dataset.GlobalTimeDataset(\n df_dict,\n predict_mode=predict_mode,\n n_lags=self.n_lags,\n n_forecasts=self.n_forecasts,\n season_config=self.season_config,\n events_config=self.events_config,\n country_holidays_config=self.country_holidays_config,\n covar_config=self.config_covar,\n regressors_config=self.regressors_config,\n )\n\n def __handle_missing_data(self, df, freq, predicting):\n \"\"\"Checks, auto-imputes and normalizes new data\n\n Parameters\n ----------\n df : pd.DataFrame, dict\n dataframe or dict of dataframes containing column ``ds``, ``y`` with all data\n freq : str\n data step sizes. Frequency of data recording,\n\n Note\n ----\n Any valid frequency for pd.date_range, such as ``5min``, ``D``, ``MS`` or ``auto`` (default) to automatically set frequency.\n predicting : bool\n when no lags, allow NA values in ``y`` of forecast series or ``y`` to miss completely\n\n Returns\n -------\n pd.DataFrame\n preprocessed dataframe\n \"\"\"\n if self.max_lags == 0 and not predicting:\n # we can drop rows with NA in y\n sum_na = sum(df[\"y\"].isna())\n if sum_na > 0:\n df = df[df[\"y\"].notna()]\n log.info(\"dropped {} NAN row in 'y'\".format(sum_na))\n\n # add missing dates for autoregression modelling\n if self.max_lags > 0:\n df, missing_dates = df_utils.add_missing_dates_nan(df, freq=freq)\n if missing_dates > 0:\n if self.impute_missing:\n log.info(\"{} missing dates added.\".format(missing_dates))\n else:\n raise ValueError(\n \"{} missing dates found. Please preprocess data manually or set impute_missing to True.\".format(\n missing_dates\n )\n )\n\n if self.regressors_config is not None:\n # if future regressors, check that they are not nan at end, else drop\n # we ignore missing events, as those will be filled in with zeros.\n reg_nan_at_end = 0\n for col in self.regressors_config.keys():\n col_nan_at_end = 0\n while len(df) > col_nan_at_end and df[col].isnull().iloc[-(1 + col_nan_at_end)]:\n col_nan_at_end += 1\n reg_nan_at_end = max(reg_nan_at_end, col_nan_at_end)\n if reg_nan_at_end > 0:\n # drop rows at end due to missing future regressors\n df = df[:-reg_nan_at_end]\n log.info(\"Dropped {} rows at end due to missing future regressor values.\".format(reg_nan_at_end))\n\n df_end_to_append = None\n nan_at_end = 0\n while len(df) > nan_at_end and df[\"y\"].isnull().iloc[-(1 + nan_at_end)]:\n nan_at_end += 1\n if nan_at_end > 0:\n if predicting:\n # allow nans at end - will re-add at end\n if self.n_forecasts > 1 and self.n_forecasts < nan_at_end:\n # check that not more than n_forecasts nans, else drop surplus\n df = df[: -(nan_at_end - self.n_forecasts)]\n # correct new length:\n nan_at_end = self.n_forecasts\n log.info(\n \"Detected y to have more NaN values than n_forecast can predict. \"\n \"Dropped {} rows at end.\".format(nan_at_end - self.n_forecasts)\n )\n df_end_to_append = df[-nan_at_end:]\n df = df[:-nan_at_end]\n else:\n # training - drop nans at end\n df = df[:-nan_at_end]\n log.info(\n \"Dropped {} consecutive nans at end. \"\n \"Training data can only be imputed up to last observation.\".format(nan_at_end)\n )\n\n # impute missing values\n data_columns = []\n if self.max_lags > 0:\n data_columns.append(\"y\")\n if self.config_covar is not None:\n data_columns.extend(self.config_covar.keys())\n if self.regressors_config is not None:\n data_columns.extend(self.regressors_config.keys())\n if self.events_config is not None:\n data_columns.extend(self.events_config.keys())\n for column in data_columns:\n sum_na = sum(df[column].isnull())\n if sum_na > 0:\n if self.impute_missing:\n # use 0 substitution for holidays and events missing values\n if self.events_config is not None and column in self.events_config.keys():\n df[column].fillna(0, inplace=True)\n remaining_na = 0\n else:\n df.loc[:, column], remaining_na = df_utils.fill_linear_then_rolling_avg(\n df[column],\n limit_linear=self.impute_limit_linear,\n rolling=self.impute_rolling,\n )\n log.info(\"{} NaN values in column {} were auto-imputed.\".format(sum_na - remaining_na, column))\n if remaining_na > 0:\n raise ValueError(\n \"More than {} consecutive missing values encountered in column {}. \"\n \"{} NA remain. Please preprocess data manually.\".format(\n 2 * self.impute_limit_linear + self.impute_rolling, column, remaining_na\n )\n )\n else: # fail because set to not impute missing\n raise ValueError(\n \"Missing values found. \" \"Please preprocess data manually or set impute_missing to True.\"\n )\n if df_end_to_append is not None:\n df = df.append(df_end_to_append)\n return df\n\n def _handle_missing_data(self, df, freq, predicting=False):\n \"\"\"Checks, auto-imputes and normalizes new data\n\n Parameters\n ----------\n df : pd.DataFrame, dict\n dataframe or dict of dataframes containing column ``ds``, ``y`` with all data\n freq : str\n data step sizes. Frequency of data recording,\n\n Note\n ----\n Any valid frequency for pd.date_range, such as ``5min``, ``D``, ``MS`` or ``auto`` (default) to automatically set frequency.\n predicting (bool): when no lags, allow NA values in ``y`` of forecast series or ``y`` to miss completely\n\n Returns\n -------\n pre-processed df\n \"\"\"\n df_is_dict = True\n if isinstance(df, pd.DataFrame):\n df_is_dict = False\n df = {\"__df__\": df}\n elif not isinstance(df, dict):\n raise ValueError(\"Please insert valid df type (i.e. pd.DataFrame, dict)\")\n df_handled_missing_dict = {}\n for key in df:\n df_handled_missing_dict[key] = self.__handle_missing_data(df[key], freq, predicting)\n if not df_is_dict:\n df_handled_missing_dict = df_handled_missing_dict[\"__df__\"]\n return df_handled_missing_dict\n\n def _check_dataframe(self, df, check_y=True, exogenous=True):\n \"\"\"Performs basic data sanity checks and ordering\n\n Prepare dataframe for fitting or predicting.\n\n Parameters\n ----------\n df : pd.DataFrame, dict\n dataframe or dict of dataframes containing column ``ds``, ``y`` with all data\n check_y : bool\n if df must have series values\n\n Note\n ----\n set to True if training or predicting with autoregression\n exogenous : bool\n whether to check covariates, regressors and events column names\n\n Returns\n -------\n pd.DataFrame\n checked dataframe\n \"\"\"\n df_is_dict = True\n if isinstance(df, pd.DataFrame):\n df_is_dict = False\n df = {\"__df__\": df}\n elif not isinstance(df, dict):\n raise ValueError(\"Please insert valid df type (i.e. pd.DataFrame, dict)\")\n checked_df = {}\n for key, df_i in df.items():\n checked_df[key] = df_utils.check_single_dataframe(\n df=df_i,\n check_y=check_y,\n covariates=self.config_covar if exogenous else None,\n regressors=self.regressors_config if exogenous else None,\n events=self.events_config if exogenous else None,\n )\n if not df_is_dict:\n checked_df = checked_df[\"__df__\"]\n return checked_df\n\n def _validate_column_name(self, name, events=True, seasons=True, regressors=True, covariates=True):\n \"\"\"Validates the name of a seasonality, event, or regressor.\n\n Parameters\n ----------\n name : str\n name of seasonality, event or regressor\n events : bool\n check if name already used for event\n seasons : bool\n check if name already used for seasonality\n regressors : bool\n check if name already used for regressor\n \"\"\"\n reserved_names = [\n \"trend\",\n \"additive_terms\",\n \"daily\",\n \"weekly\",\n \"yearly\",\n \"events\",\n \"holidays\",\n \"zeros\",\n \"extra_regressors_additive\",\n \"yhat\",\n \"extra_regressors_multiplicative\",\n \"multiplicative_terms\",\n ]\n rn_l = [n + \"_lower\" for n in reserved_names]\n rn_u = [n + \"_upper\" for n in reserved_names]\n reserved_names.extend(rn_l)\n reserved_names.extend(rn_u)\n reserved_names.extend([\"ds\", \"y\", \"cap\", \"floor\", \"y_scaled\", \"cap_scaled\"])\n if name in reserved_names:\n raise ValueError(\"Name {name!r} is reserved.\".format(name=name))\n if events and self.events_config is not None:\n if name in self.events_config.keys():\n raise ValueError(\"Name {name!r} already used for an event.\".format(name=name))\n if events and self.country_holidays_config is not None:\n if name in self.country_holidays_config.holiday_names:\n raise ValueError(\n \"Name {name!r} is a holiday name in {country_holidays}.\".format(\n name=name, country_holidays=self.country_holidays_config.country\n )\n )\n if seasons and self.season_config is not None:\n if name in self.season_config.periods:\n raise ValueError(\"Name {name!r} already used for a seasonality.\".format(name=name))\n if covariates and self.config_covar is not None:\n if name in self.config_covar:\n raise ValueError(\"Name {name!r} already used for an added covariate.\".format(name=name))\n if regressors and self.regressors_config is not None:\n if name in self.regressors_config.keys():\n raise ValueError(\"Name {name!r} already used for an added regressor.\".format(name=name))\n\n def _normalize(self, df_dict):\n \"\"\"Apply data scales.\n\n Applies data scaling factors to df using data_params.\n\n Parameters\n ----------\n df : pd.DataFrame, dict\n dataframe or dict of dataframes containing column ``ds``, ``y`` with all data\n\n Returns\n -------\n df_dict: dict of pd.DataFrame, normalized\n \"\"\"\n for df_name, df_i in df_dict.items():\n data_params = self.config_normalization.get_data_params(df_name)\n df_dict[df_name] = df_utils.normalize(df_i, data_params)\n return df_dict\n\n def _init_train_loader(self, df_dict):\n \"\"\"Executes data preparation steps and initiates training procedure.\n\n Parameters\n ----------\n df : pd.DataFrame, dict\n dataframe or dict of dataframes containing column ``ds``, ``y`` with all data\n\n Returns\n -------\n torch DataLoader\n \"\"\"\n if not isinstance(df_dict, dict):\n raise ValueError(\"df_dict must be a dict of pd.DataFrames.\")\n # if not self.fitted:\n self.config_normalization.init_data_params(\n df_dict=df_dict,\n covariates_config=self.config_covar,\n regressor_config=self.regressors_config,\n events_config=self.events_config,\n )\n\n df_dict = self._normalize(df_dict)\n # if not self.fitted:\n if self.config_trend.changepoints is not None:\n # scale user-specified changepoint times\n self.config_trend.changepoints = self._normalize(\n {\"__df__\": pd.DataFrame({\"ds\": pd.Series(self.config_trend.changepoints)})}\n )[\"__df__\"][\"t\"].values\n\n df_merged, _ = df_utils.join_dataframes(df_dict)\n df_merged = df_merged.sort_values(\"ds\")\n df_merged.drop_duplicates(inplace=True, keep=\"first\", subset=[\"ds\"])\n\n self.season_config = utils.set_auto_seasonalities(df_merged, season_config=self.season_config)\n if self.country_holidays_config is not None:\n self.country_holidays_config.init_holidays(df_merged)\n\n dataset = self._create_dataset(df_dict, predict_mode=False) # needs to be called after set_auto_seasonalities\n self.config_train.set_auto_batch_epoch(n_data=len(dataset))\n\n loader = DataLoader(dataset, batch_size=self.config_train.batch_size, shuffle=True)\n\n # if not self.fitted:\n self.model = self._init_model() # needs to be called after set_auto_seasonalities\n\n if self.config_train.learning_rate is None:\n self.config_train.learning_rate = self.config_train.find_learning_rate(self.model, dataset)\n log.info(\"lr-range-test selected learning rate: {:.2E}\".format(self.config_train.learning_rate))\n self.optimizer = self.config_train.get_optimizer(self.model.parameters())\n self.scheduler = self.config_train.get_scheduler(self.optimizer, steps_per_epoch=len(loader))\n return loader\n\n def _init_val_loader(self, df_dict):\n \"\"\"Executes data preparation steps and initiates evaluation procedure.\n\n Parameters\n ----------\n df : pd.DataFrame, dict\n dataframe or dict of dataframes containing column ``ds``, ``y`` with all data\n\n Returns\n -------\n torch DataLoader\n \"\"\"\n df_dict = self._normalize(df_dict)\n dataset = self._create_dataset(df_dict, predict_mode=False)\n loader = DataLoader(dataset, batch_size=min(1024, len(dataset)), shuffle=False, drop_last=False)\n return loader\n\n def _get_time_based_sample_weight(self, t):\n weight = torch.ones_like(t)\n if self.config_train.newer_samples_weight > 1.0:\n end_w = self.config_train.newer_samples_weight\n start_t = self.config_train.newer_samples_start\n time = (t.detach() - start_t) / (1.0 - start_t)\n time = torch.maximum(torch.zeros_like(time), time)\n time = torch.minimum(torch.ones_like(time), time) # time = 0 to 1\n time = np.pi * (time - 1.0) # time = -pi to 0\n time = 0.5 * torch.cos(time) + 0.5 # time = 0 to 1\n # scales end to be end weight times bigger than start weight\n # with end weight being 1.0\n weight = (1.0 + time * (end_w - 1.0)) / end_w\n return weight\n\n def _train_epoch(self, e, loader):\n \"\"\"Make one complete iteration over all samples in dataloader and update model after each batch.\n\n Parameters\n ----------\n e : int\n current epoch number\n loader : torch DataLoader\n Training Dataloader\n \"\"\"\n self.model.train()\n for i, (inputs, targets, meta) in enumerate(loader):\n # Run forward calculation\n predicted = self.model.forward(inputs)\n # Compute loss. no reduction.\n loss = self.config_train.loss_func(predicted, targets)\n # Weigh newer samples more.\n loss = loss * self._get_time_based_sample_weight(t=inputs[\"time\"])\n loss = loss.mean()\n # Regularize.\n loss, reg_loss = self._add_batch_regualarizations(loss, e, i / float(len(loader)))\n self.optimizer.zero_grad()\n loss.backward()\n self.optimizer.step()\n self.scheduler.step()\n if self.metrics is not None:\n self.metrics.update(\n predicted=predicted.detach(), target=targets.detach(), values={\"Loss\": loss, \"RegLoss\": reg_loss}\n )\n if self.metrics is not None:\n return self.metrics.compute(save=True)\n else:\n return None\n\n def _add_batch_regualarizations(self, loss, e, iter_progress):\n \"\"\"Add regulatization terms to loss, if applicable\n\n Parameters\n ----------\n loss : torch.Tensor, scalar\n current batch loss\n e : int\n current epoch number\n iter_progress : float\n this epoch's progress of iterating over dataset [0, 1]\n\n Returns\n -------\n loss, reg_loss\n \"\"\"\n delay_weight = self.config_train.get_reg_delay_weight(e, iter_progress)\n\n reg_loss = torch.zeros(1, dtype=torch.float, requires_grad=False)\n if delay_weight > 0:\n # Add regularization of AR weights - sparsify\n if self.max_lags > 0 and self.config_ar.reg_lambda is not None:\n reg_ar = self.config_ar.regularize(self.model.ar_weights)\n reg_ar = torch.sum(reg_ar).squeeze() / self.n_forecasts\n reg_loss += self.config_ar.reg_lambda * reg_ar\n\n # Regularize trend to be smoother/sparse\n l_trend = self.config_trend.trend_reg\n if self.config_trend.n_changepoints > 0 and l_trend is not None and l_trend > 0:\n reg_trend = utils.reg_func_trend(\n weights=self.model.get_trend_deltas,\n threshold=self.config_train.trend_reg_threshold,\n )\n reg_loss += l_trend * reg_trend\n\n # Regularize seasonality: sparsify fourier term coefficients\n l_season = self.config_train.reg_lambda_season\n if self.model.season_dims is not None and l_season is not None and l_season > 0:\n for name in self.model.season_params.keys():\n reg_season = utils.reg_func_season(self.model.season_params[name])\n reg_loss += l_season * reg_season\n\n # Regularize events: sparsify events features coefficients\n if self.events_config is not None or self.country_holidays_config is not None:\n reg_events_loss = utils.reg_func_events(self.events_config, self.country_holidays_config, self.model)\n reg_loss += reg_events_loss\n\n # Regularize regressors: sparsify regressor features coefficients\n if self.regressors_config is not None:\n reg_regressor_loss = utils.reg_func_regressors(self.regressors_config, self.model)\n reg_loss += reg_regressor_loss\n\n reg_loss = delay_weight * reg_loss\n loss = loss + reg_loss\n return loss, reg_loss\n\n def _evaluate_epoch(self, loader, val_metrics):\n \"\"\"Evaluates model performance.\n\n Parameters\n ----------\n loader : torch DataLoader\n instantiated Validation Dataloader (with TimeDataset)\n val_metrics : MetricsCollection\n alidation metrics to be computed.\n\n Returns\n -------\n dict with evaluation metrics\n \"\"\"\n with torch.no_grad():\n self.model.eval()\n for inputs, targets, meta in loader:\n predicted = self.model.forward(inputs)\n val_metrics.update(predicted=predicted.detach(), target=targets.detach())\n val_metrics = val_metrics.compute(save=True)\n return val_metrics\n\n def _train(self, df_dict, df_val_dict=None, progress=\"bar\"):\n \"\"\"Execute model training procedure for a configured number of epochs.\n\n Parameters\n ----------\n df_dict : pd.DataFrame, dict\n dataframe or dict of dataframes containing column ``ds``, ``y`` with all data\n df_val_dict : pd.DataFrame, dict\n dataframe or dict of dataframes containing column ``ds``, ``y`` with validation data\n progress : str\n Method of progress display.\n\n Options\n * (default) ``bar`` display updating progress bar (tqdm)\n * ``print`` print out progress (fallback option)\n * ``plot`` plot a live updating graph of the training loss, requires [live] install or livelossplot package installed.\n * ``plot-all`` \"plot\" extended to all recorded metrics.\n\n Returns\n -------\n pd.DataFrame\n metrics\n \"\"\"\n # parse progress arg\n progress_bar = False\n progress_print = False\n plot_live_loss = False\n plot_live_all_metrics = False\n if progress.lower() == \"bar\":\n progress_bar = True\n elif progress.lower() == \"print\":\n progress_print = True\n elif progress.lower() == \"plot\":\n plot_live_loss = True\n elif progress.lower() in [\"plot-all\", \"plotall\", \"plot all\"]:\n plot_live_loss = True\n plot_live_all_metrics = True\n elif not progress.lower() == \"none\":\n raise ValueError(\"received unexpected value for progress {}\".format(progress))\n\n if self.metrics is None:\n log.info(\"No progress prints or plots possible because metrics are deactivated.\")\n if df_val_dict is not None:\n log.warning(\"Ignoring supplied df_val as no metrics are specified.\")\n if plot_live_loss or plot_live_all_metrics:\n log.warning(\"Can not plot live loss as no metrics are specified.\")\n progress_bar = True\n if progress_print:\n log.warning(\"Can not print progress as no metrics are specified.\")\n return self._train_minimal(df_dict, progress_bar=progress_bar)\n\n # set up data loader\n loader = self._init_train_loader(df_dict)\n # set up Metrics\n if self.highlight_forecast_step_n is not None:\n self.metrics.add_specific_target(target_pos=self.highlight_forecast_step_n - 1)\n if not self.config_normalization.global_normalization:\n log.warning(\"When Global modeling with local normalization, metrics are displayed in normalized scale.\")\n else:\n if not self.config_normalization.normalize == \"off\":\n self.metrics.set_shift_scale(\n (\n self.config_normalization.global_data_params[\"y\"].shift,\n self.config_normalization.global_data_params[\"y\"].scale,\n )\n )\n\n validate = df_val_dict is not None\n if validate:\n val_loader = self._init_val_loader(df_val_dict)\n val_metrics = metrics.MetricsCollection([m.new() for m in self.metrics.batch_metrics])\n\n # set up printing and plotting\n if plot_live_loss:\n try:\n from livelossplot import PlotLosses\n\n live_out = [\"MatplotlibPlot\"]\n if not progress_bar:\n live_out.append(\"ExtremaPrinter\")\n live_loss = PlotLosses(outputs=live_out)\n plot_live_loss = True\n except:\n log.warning(\n \"To plot live loss, please install neuralprophet[live].\"\n \"Using pip: 'pip install neuralprophet[live]'\"\n \"Or install the missing package manually: 'pip install livelossplot'\",\n exc_info=True,\n )\n plot_live_loss = False\n progress_bar = True\n if progress_bar:\n training_loop = tqdm(\n range(self.config_train.epochs),\n total=self.config_train.epochs,\n leave=log.getEffectiveLevel() <= 20,\n )\n else:\n training_loop = range(self.config_train.epochs)\n\n start = time.time()\n # run training loop\n for e in training_loop:\n metrics_live = OrderedDict({})\n self.metrics.reset()\n if validate:\n val_metrics.reset()\n # run epoch\n epoch_metrics = self._train_epoch(e, loader)\n # collect metrics\n if validate:\n val_epoch_metrics = self._evaluate_epoch(val_loader, val_metrics)\n print_val_epoch_metrics = {k + \"_val\": v for k, v in val_epoch_metrics.items()}\n else:\n val_epoch_metrics = None\n print_val_epoch_metrics = OrderedDict({})\n # print metrics\n if progress_bar:\n training_loop.set_description(f\"Epoch[{(e+1)}/{self.config_train.epochs}]\")\n training_loop.set_postfix(ordered_dict=epoch_metrics, **print_val_epoch_metrics)\n elif progress_print:\n metrics_string = utils.print_epoch_metrics(epoch_metrics, e=e, val_metrics=val_epoch_metrics)\n if e == 0:\n log.info(metrics_string.splitlines()[0])\n log.info(metrics_string.splitlines()[1])\n else:\n log.info(metrics_string.splitlines()[1])\n # plot metrics\n if plot_live_loss:\n metrics_train = list(epoch_metrics)\n metrics_live[\"log-{}\".format(metrics_train[0])] = np.log(epoch_metrics[metrics_train[0]])\n if plot_live_all_metrics and len(metrics_train) > 1:\n for i in range(1, len(metrics_train)):\n metrics_live[\"{}\".format(metrics_train[i])] = epoch_metrics[metrics_train[i]]\n if validate:\n metrics_val = list(val_epoch_metrics)\n metrics_live[\"val_log-{}\".format(metrics_val[0])] = np.log(val_epoch_metrics[metrics_val[0]])\n if plot_live_all_metrics and len(metrics_val) > 1:\n for i in range(1, len(metrics_val)):\n metrics_live[\"val_{}\".format(metrics_val[i])] = val_epoch_metrics[metrics_val[i]]\n live_loss.update(metrics_live)\n if e % (1 + self.config_train.epochs // 20) == 0 or e + 1 == self.config_train.epochs:\n live_loss.send()\n\n # return metrics as df\n log.debug(\"Train Time: {:8.3f}\".format(time.time() - start))\n log.debug(\"Total Batches: {}\".format(self.metrics.total_updates))\n metrics_df = self.metrics.get_stored_as_df()\n if validate:\n metrics_df_val = val_metrics.get_stored_as_df()\n for col in metrics_df_val.columns:\n metrics_df[\"{}_val\".format(col)] = metrics_df_val[col]\n return metrics_df\n\n def _train_minimal(self, df_dict, progress_bar=False):\n \"\"\"Execute minimal model training procedure for a configured number of epochs.\n\n Parameters\n ----------\n df_dict : pd.DataFrame, dict\n dataframe or dict of dataframes containing column ``ds``, ``y`` with all data\n\n Returns\n -------\n None\n \"\"\"\n loader = self._init_train_loader(df_dict)\n if progress_bar:\n training_loop = tqdm(\n range(self.config_train.epochs),\n total=self.config_train.epochs,\n leave=log.getEffectiveLevel() <= 20,\n )\n else:\n training_loop = range(self.config_train.epochs)\n for e in training_loop:\n if progress_bar:\n training_loop.set_description(f\"Epoch[{(e+1)}/{self.config_train.epochs}]\")\n _ = self._train_epoch(e, loader)\n\n def _eval_true_ar(self):\n assert self.max_lags > 0\n if self.highlight_forecast_step_n is None:\n if self.max_lags > 1:\n raise ValueError(\"Please define forecast_lag for sTPE computation\")\n forecast_pos = 1\n else:\n forecast_pos = self.highlight_forecast_step_n\n weights = self.model.ar_weights.detach().numpy()\n weights = weights[forecast_pos - 1, :][::-1]\n sTPE = utils.symmetric_total_percentage_error(self.true_ar_weights, weights)\n log.info(\"AR parameters: \", self.true_ar_weights, \"\\n\", \"Model weights: \", weights)\n return sTPE\n\n def _evaluate(self, loader):\n \"\"\"Evaluates model performance.\n\n Parameters\n ----------\n loader : torch DataLoader\n instantiated Validation Dataloader (with TimeDataset)\n\n Returns\n -------\n pd.DataFrame\n evaluation metrics\n \"\"\"\n val_metrics = metrics.MetricsCollection([m.new() for m in self.metrics.batch_metrics])\n if self.highlight_forecast_step_n is not None:\n val_metrics.add_specific_target(target_pos=self.highlight_forecast_step_n - 1)\n ## Run\n val_metrics_dict = self._evaluate_epoch(loader, val_metrics)\n\n if self.true_ar_weights is not None:\n val_metrics_dict[\"sTPE\"] = self._eval_true_ar()\n log.info(\"Validation metrics: {}\".format(utils.print_epoch_metrics(val_metrics_dict)))\n val_metrics_df = val_metrics.get_stored_as_df()\n return val_metrics_df\n\n def _make_future_dataframe(self, df, events_df, regressors_df, periods, n_historic_predictions):\n if periods == 0 and n_historic_predictions is True:\n log.warning(\n \"Not extending df into future as no periods specified.\" \"You can call predict directly instead.\"\n )\n df = df.copy(deep=True)\n _ = df_utils.infer_frequency(df, n_lags=self.max_lags, freq=self.data_freq)\n last_date = pd.to_datetime(df[\"ds\"].copy(deep=True).dropna()).sort_values().max()\n if events_df is not None:\n events_df = events_df.copy(deep=True).reset_index(drop=True)\n if regressors_df is not None:\n regressors_df = regressors_df.copy(deep=True).reset_index(drop=True)\n if periods is None:\n periods = 1 if self.max_lags == 0 else self.n_forecasts\n else:\n assert periods >= 0\n\n if isinstance(n_historic_predictions, bool):\n if n_historic_predictions:\n n_historic_predictions = len(df) - self.max_lags\n else:\n n_historic_predictions = 0\n elif not isinstance(n_historic_predictions, int):\n log.error(\"non-integer value for n_historic_predictions set to zero.\")\n n_historic_predictions = 0\n\n if periods == 0 and n_historic_predictions == 0:\n raise ValueError(\"Set either history or future to contain more than zero values.\")\n\n # check for external regressors known in future\n if self.regressors_config is not None and periods > 0:\n if regressors_df is None:\n raise ValueError(\"Future values of all user specified regressors not provided\")\n else:\n for regressor in self.regressors_config.keys():\n if regressor not in regressors_df.columns:\n raise ValueError(\"Future values of user specified regressor {} not provided\".format(regressor))\n\n if len(df) < self.max_lags:\n raise ValueError(\"Insufficient data for a prediction\")\n elif len(df) < self.max_lags + n_historic_predictions:\n log.warning(\n \"Insufficient data for {} historic forecasts, reduced to {}.\".format(\n n_historic_predictions, len(df) - self.max_lags\n )\n )\n n_historic_predictions = len(df) - self.max_lags\n if (n_historic_predictions + self.max_lags) == 0:\n df = pd.DataFrame(columns=df.columns)\n else:\n df = df[-(self.max_lags + n_historic_predictions) :]\n\n if len(df) > 0:\n if len(df.columns) == 1 and \"ds\" in df:\n assert self.max_lags == 0\n df = self._check_dataframe(df, check_y=False, exogenous=False)\n else:\n df = self._check_dataframe(df, check_y=self.max_lags > 0, exogenous=True)\n\n # future data\n # check for external events known in future\n if self.events_config is not None and periods > 0 and events_df is None:\n log.warning(\n \"Future values not supplied for user specified events. \"\n \"All events being treated as not occurring in future\"\n )\n\n if self.max_lags > 0:\n if periods > 0 and periods != self.n_forecasts:\n periods = self.n_forecasts\n log.warning(\n \"Number of forecast steps is defined by n_forecasts. \" \"Adjusted to {}.\".format(self.n_forecasts)\n )\n\n if periods > 0:\n future_df = df_utils.make_future_df(\n df_columns=df.columns,\n last_date=last_date,\n periods=periods,\n freq=self.data_freq,\n events_config=self.events_config,\n events_df=events_df,\n regressor_config=self.regressors_config,\n regressors_df=regressors_df,\n )\n if len(df) > 0:\n df = df.append(future_df)\n else:\n df = future_df\n df.reset_index(drop=True, inplace=True)\n return df\n\n def _get_maybe_extend_periods(self, df):\n periods_add = 0\n nan_at_end = 0\n while len(df) > nan_at_end and df[\"y\"].isnull().iloc[-(1 + nan_at_end)]:\n nan_at_end += 1\n if self.max_lags > 0:\n if self.regressors_config is None:\n # if dataframe has already been extended into future,\n # don't extend beyond n_forecasts.\n periods_add = max(0, self.n_forecasts - nan_at_end)\n else:\n # can not extend as we lack future regressor values.\n periods_add = 0\n return periods_add\n\n def _maybe_extend_df(self, df_dict):\n periods_add = {}\n for df_name, df in df_dict.items():\n _ = df_utils.infer_frequency(df, n_lags=self.max_lags, freq=self.data_freq)\n # to get all forecasteable values with df given, maybe extend into future:\n periods_add[df_name] = self._get_maybe_extend_periods(df)\n if periods_add[df_name] > 0:\n # This does not include future regressors or events.\n # periods should be 0 if those are configured.\n last_date = pd.to_datetime(df[\"ds\"].copy(deep=True)).sort_values().max()\n future_df = df_utils.make_future_df(\n df_columns=df.columns,\n last_date=last_date,\n periods=periods_add[df_name],\n freq=self.data_freq,\n )\n df = df.append(future_df)\n df.reset_index(drop=True, inplace=True)\n df_dict[df_name] = df\n return df_dict, periods_add\n\n def _prepare_dataframe_to_predict(self, df_dict):\n for df_name, df in df_dict.items():\n df = df.copy(deep=True)\n _ = df_utils.infer_frequency(df, n_lags=self.max_lags, freq=self.data_freq)\n # check if received pre-processed df\n if \"y_scaled\" in df.columns or \"t\" in df.columns:\n raise ValueError(\n \"DataFrame has already been normalized. \" \"Please provide raw dataframe or future dataframe.\"\n )\n # Checks\n if len(df) == 0 or len(df) < self.max_lags:\n raise ValueError(\"Insufficient data to make predictions.\")\n if len(df.columns) == 1 and \"ds\" in df:\n if self.max_lags != 0:\n raise ValueError(\"only datestamps provided but y values needed for auto-regression.\")\n df = self._check_dataframe(df, check_y=False, exogenous=False)\n else:\n df = self._check_dataframe(df, check_y=self.max_lags > 0, exogenous=False)\n # fill in missing nans except for nans at end\n df = self._handle_missing_data(df, freq=self.data_freq, predicting=True)\n df.reset_index(drop=True, inplace=True)\n df_dict[df_name] = df\n return df_dict\n\n def _predict_raw(self, df, df_name, include_components=False):\n \"\"\"Runs the model to make predictions.\n\n Predictions are returned in raw vector format without decomposition.\n Predictions are given on a forecast origin basis, not on a target basis.\n\n Parameters\n ----------\n df : pd.DataFrame, dict\n dataframe or dict of dataframes containing column ``ds``, ``y`` with all data\n df_name : str\n name of the data params from which the current dataframe refers to (only in case of local_normalization)\n include_components : bool\n whether to return individual components of forecast\n\n Returns\n -------\n pd.Series\n timestamps referring to the start of the predictions.\n np.array\n array containing the forecasts\n dict[np.array]\n Dictionary of components containing an array of each components contribution to the forecast\n \"\"\"\n if isinstance(df, dict):\n raise ValueError(\"Receiced more than one DataFrame. Use a for loop for many dataframes.\")\n if \"y_scaled\" not in df.columns or \"t\" not in df.columns:\n raise ValueError(\"Received unprepared dataframe to predict. \" \"Please call predict_dataframe_to_predict.\")\n dataset = self._create_dataset(df_dict={df_name: df}, predict_mode=True)\n loader = DataLoader(dataset, batch_size=min(1024, len(df)), shuffle=False, drop_last=False)\n if self.n_forecasts > 1:\n dates = df[\"ds\"].iloc[self.max_lags : -self.n_forecasts + 1]\n else:\n dates = df[\"ds\"].iloc[self.max_lags :]\n predicted_vectors = list()\n component_vectors = None\n\n with torch.no_grad():\n self.model.eval()\n for inputs, _, _ in loader:\n predicted = self.model.forward(inputs)\n predicted_vectors.append(predicted.detach().numpy())\n\n if include_components:\n components = self.model.compute_components(inputs)\n if component_vectors is None:\n component_vectors = {name: [value.detach().numpy()] for name, value in components.items()}\n else:\n for name, value in components.items():\n component_vectors[name].append(value.detach().numpy())\n\n predicted = np.concatenate(predicted_vectors)\n data_params = self.config_normalization.get_data_params(df_name)\n scale_y, shift_y = data_params[\"y\"].scale, data_params[\"y\"].shift\n predicted = predicted * scale_y + shift_y\n\n if include_components:\n components = {name: np.concatenate(value) for name, value in component_vectors.items()}\n for name, value in components.items():\n if \"multiplicative\" in name:\n continue\n elif \"event_\" in name:\n event_name = name.split(\"_\")[1]\n if self.events_config is not None and event_name in self.events_config:\n if self.events_config[event_name].mode == \"multiplicative\":\n continue\n elif (\n self.country_holidays_config is not None\n and event_name in self.country_holidays_config.holiday_names\n ):\n if self.country_holidays_config.mode == \"multiplicative\":\n continue\n elif \"season\" in name and self.season_config.mode == \"multiplicative\":\n continue\n\n # scale additive components\n components[name] = value * scale_y\n if \"trend\" in name:\n components[name] += shift_y\n else:\n components = None\n return dates, predicted, components\n\n def _convert_raw_predictions_to_raw_df(self, dates, predicted, components=None):\n \"\"\"Turns forecast-origin-wise predictions into forecast-target-wise predictions.\n\n Parameters\n ----------\n dates : pd.Series\n timestamps referring to the start of the predictions.\n predicted : np.array\n Array containing the forecasts\n components : dict[np.array]\n Dictionary of components containing an array of each components' contribution to the forecast\n\n Returns\n -------\n pd. DataFrame\n columns ``ds``, ``y``, and [``step<i>``]\n\n Note\n ----\n where step<i> refers to the i-step-ahead prediction *made at* this row's datetime.\n e.g. the first forecast step0 is the prediction for this timestamp,\n the step1 is for the timestamp after, ...\n ... step3 is the prediction for 3 steps into the future,\n predicted using information up to (excluding) this datetime.\n \"\"\"\n if isinstance(dates, dict):\n raise ValueError(\"Receiced more than one DataFrame. Use a for loop for many dataframes.\")\n predicted_names = [\"step{}\".format(i) for i in range(self.n_forecasts)]\n all_data = predicted\n all_names = predicted_names\n if components is not None:\n for comp_name, comp_data in components.items():\n all_data = np.concatenate((all_data, comp_data), 1)\n all_names += [\"{}{}\".format(comp_name, i) for i in range(self.n_forecasts)]\n\n df_raw = pd.DataFrame(data=all_data, columns=all_names)\n df_raw.insert(0, \"ds\", dates.values)\n return df_raw\n\n def _reshape_raw_predictions_to_forecst_df(self, df, predicted, components):\n \"\"\"Turns forecast-origin-wise predictions into forecast-target-wise predictions.\n\n Parameters\n ----------\n df : pd.DataFrame\n input dataframe\n predicted : np.array\n Array containing the forecasts\n components : dict[np.array]\n Dictionary of components containing an array of each components' contribution to the forecast\n\n Returns\n -------\n pd.DataFrame\n columns ``ds``, ``y``, ``trend`` and [``yhat<i>``]\n\n Note\n ----\n where yhat<i> refers to the i-step-ahead prediction for this row's datetime.\n e.g. yhat3 is the prediction for this datetime, predicted 3 steps ago, \"3 steps old\".\n \"\"\"\n if isinstance(df, dict):\n raise ValueError(\"Receiced more than one DataFrame. Use a for loop for many dataframes.\")\n cols = [\"ds\", \"y\"] # cols to keep from df\n df_forecast = pd.concat((df[cols],), axis=1)\n # create a line for each forecast_lag\n # 'yhat<i>' is the forecast for 'y' at 'ds' from i steps ago.\n for forecast_lag in range(1, self.n_forecasts + 1):\n forecast = predicted[:, forecast_lag - 1]\n pad_before = self.max_lags + forecast_lag - 1\n pad_after = self.n_forecasts - forecast_lag\n yhat = np.concatenate(([None] * pad_before, forecast, [None] * pad_after))\n df_forecast[\"yhat{}\".format(forecast_lag)] = yhat\n df_forecast[\"residual{}\".format(forecast_lag)] = yhat - df_forecast[\"y\"]\n if components is None:\n return df_forecast\n\n # else add components\n lagged_components = [\n \"ar\",\n ]\n if self.config_covar is not None:\n for name in self.config_covar.keys():\n lagged_components.append(\"lagged_regressor_{}\".format(name))\n for comp in lagged_components:\n if comp in components:\n for forecast_lag in range(1, self.n_forecasts + 1):\n forecast = components[comp][:, forecast_lag - 1]\n pad_before = self.max_lags + forecast_lag - 1\n pad_after = self.n_forecasts - forecast_lag\n yhat = np.concatenate(([None] * pad_before, forecast, [None] * pad_after))\n df_forecast[\"{}{}\".format(comp, forecast_lag)] = yhat\n\n # only for non-lagged components\n for comp in components:\n if comp not in lagged_components:\n forecast_0 = components[comp][0, :]\n forecast_rest = components[comp][1:, self.n_forecasts - 1]\n yhat = np.concatenate(([None] * self.max_lags, forecast_0, forecast_rest))\n df_forecast[comp] = yhat\n return df_forecast\n" ]
[ [ "pandas.concat", "numpy.expand_dims", "numpy.log", "pandas.Series", "torch.zeros", "torch.utils.data.DataLoader", "torch.zeros_like", "pandas.DataFrame", "torch.sum", "numpy.concatenate", "torch.no_grad", "torch.ones_like", "torch.cos" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "1.3", "0.19", "1.1", "1.5", "0.24", "0.20", "1.0", "0.25", "1.2" ], "scipy": [], "tensorflow": [] } ]
ActuallyRuben/home-assistant
[ "b09f5b67436d8db44825d146b78ddce391d4469c" ]
[ "homeassistant/components/iqvia/sensor.py" ]
[ "\"\"\"Support for IQVIA sensors.\"\"\"\nimport logging\nfrom statistics import mean\n\nimport numpy as np\n\nfrom homeassistant.components.iqvia import (\n DATA_CLIENT, DOMAIN, SENSORS, TYPE_ALLERGY_FORECAST, TYPE_ALLERGY_OUTLOOK,\n TYPE_ALLERGY_INDEX, TYPE_ALLERGY_TODAY, TYPE_ALLERGY_TOMORROW,\n TYPE_ASTHMA_FORECAST, TYPE_ASTHMA_INDEX, TYPE_ASTHMA_TODAY,\n TYPE_ASTHMA_TOMORROW, TYPE_DISEASE_FORECAST, IQVIAEntity)\nfrom homeassistant.const import ATTR_STATE\n\n_LOGGER = logging.getLogger(__name__)\n\nATTR_ALLERGEN_AMOUNT = 'allergen_amount'\nATTR_ALLERGEN_GENUS = 'allergen_genus'\nATTR_ALLERGEN_NAME = 'allergen_name'\nATTR_ALLERGEN_TYPE = 'allergen_type'\nATTR_CITY = 'city'\nATTR_OUTLOOK = 'outlook'\nATTR_RATING = 'rating'\nATTR_SEASON = 'season'\nATTR_TREND = 'trend'\nATTR_ZIP_CODE = 'zip_code'\n\nRATING_MAPPING = [{\n 'label': 'Low',\n 'minimum': 0.0,\n 'maximum': 2.4\n}, {\n 'label': 'Low/Medium',\n 'minimum': 2.5,\n 'maximum': 4.8\n}, {\n 'label': 'Medium',\n 'minimum': 4.9,\n 'maximum': 7.2\n}, {\n 'label': 'Medium/High',\n 'minimum': 7.3,\n 'maximum': 9.6\n}, {\n 'label': 'High',\n 'minimum': 9.7,\n 'maximum': 12\n}]\n\nTREND_FLAT = 'Flat'\nTREND_INCREASING = 'Increasing'\nTREND_SUBSIDING = 'Subsiding'\n\n\nasync def async_setup_platform(\n hass, config, async_add_entities, discovery_info=None):\n \"\"\"Configure the platform and add the sensors.\"\"\"\n iqvia = hass.data[DOMAIN][DATA_CLIENT]\n\n sensor_class_mapping = {\n TYPE_ALLERGY_FORECAST: ForecastSensor,\n TYPE_ALLERGY_TODAY: IndexSensor,\n TYPE_ALLERGY_TOMORROW: IndexSensor,\n TYPE_ASTHMA_FORECAST: ForecastSensor,\n TYPE_ASTHMA_TODAY: IndexSensor,\n TYPE_ASTHMA_TOMORROW: IndexSensor,\n TYPE_DISEASE_FORECAST: ForecastSensor,\n }\n\n sensors = []\n for sensor_type in iqvia.sensor_types:\n klass = sensor_class_mapping[sensor_type]\n name, icon = SENSORS[sensor_type]\n sensors.append(klass(iqvia, sensor_type, name, icon, iqvia.zip_code))\n\n async_add_entities(sensors, True)\n\n\ndef calculate_trend(indices):\n \"\"\"Calculate the \"moving average\" of a set of indices.\"\"\"\n index_range = np.arange(0, len(indices))\n index_array = np.array(indices)\n linear_fit = np.polyfit(index_range, index_array, 1)\n slope = round(linear_fit[0], 2)\n\n if slope > 0:\n return TREND_INCREASING\n\n if slope < 0:\n return TREND_SUBSIDING\n\n return TREND_FLAT\n\n\nclass ForecastSensor(IQVIAEntity):\n \"\"\"Define sensor related to forecast data.\"\"\"\n\n async def async_update(self):\n \"\"\"Update the sensor.\"\"\"\n if not self._iqvia.data:\n return\n\n data = self._iqvia.data[self._type].get('Location')\n if not data:\n return\n\n indices = [p['Index'] for p in data['periods']]\n average = round(mean(indices), 1)\n [rating] = [\n i['label'] for i in RATING_MAPPING\n if i['minimum'] <= average <= i['maximum']\n ]\n\n self._attrs.update({\n ATTR_CITY: data['City'].title(),\n ATTR_RATING: rating,\n ATTR_STATE: data['State'],\n ATTR_TREND: calculate_trend(indices),\n ATTR_ZIP_CODE: data['ZIP']\n })\n\n if self._type == TYPE_ALLERGY_FORECAST:\n outlook = self._iqvia.data[TYPE_ALLERGY_OUTLOOK]\n self._attrs[ATTR_OUTLOOK] = outlook.get('Outlook')\n self._attrs[ATTR_SEASON] = outlook.get('Season')\n\n self._state = average\n\n\nclass IndexSensor(IQVIAEntity):\n \"\"\"Define sensor related to indices.\"\"\"\n\n async def async_update(self):\n \"\"\"Update the sensor.\"\"\"\n if not self._iqvia.data:\n return\n\n data = {}\n if self._type in (TYPE_ALLERGY_TODAY, TYPE_ALLERGY_TOMORROW):\n data = self._iqvia.data[TYPE_ALLERGY_INDEX].get('Location')\n elif self._type in (TYPE_ASTHMA_TODAY, TYPE_ASTHMA_TOMORROW):\n data = self._iqvia.data[TYPE_ASTHMA_INDEX].get('Location')\n\n if not data:\n return\n\n key = self._type.split('_')[-1].title()\n [period] = [p for p in data['periods'] if p['Type'] == key]\n [rating] = [\n i['label'] for i in RATING_MAPPING\n if i['minimum'] <= period['Index'] <= i['maximum']\n ]\n\n self._attrs.update({\n ATTR_CITY: data['City'].title(),\n ATTR_RATING: rating,\n ATTR_STATE: data['State'],\n ATTR_ZIP_CODE: data['ZIP']\n })\n\n if self._type in (TYPE_ALLERGY_TODAY, TYPE_ALLERGY_TOMORROW):\n for idx, attrs in enumerate(period['Triggers']):\n index = idx + 1\n self._attrs.update({\n '{0}_{1}'.format(ATTR_ALLERGEN_GENUS, index):\n attrs['Genus'],\n '{0}_{1}'.format(ATTR_ALLERGEN_NAME, index):\n attrs['Name'],\n '{0}_{1}'.format(ATTR_ALLERGEN_TYPE, index):\n attrs['PlantType'],\n })\n elif self._type in (TYPE_ASTHMA_TODAY, TYPE_ASTHMA_TOMORROW):\n for idx, attrs in enumerate(period['Triggers']):\n index = idx + 1\n self._attrs.update({\n '{0}_{1}'.format(ATTR_ALLERGEN_NAME, index):\n attrs['Name'],\n '{0}_{1}'.format(ATTR_ALLERGEN_AMOUNT, index):\n attrs['PPM'],\n })\n\n self._state = period['Index']\n" ]
[ [ "numpy.polyfit", "numpy.array" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
banne2266/UAV-autopilot-NCTU-2021
[ "1a25d4add2de9659516d045054935e3b6e04d06d", "1a25d4add2de9659516d045054935e3b6e04d06d", "1a25d4add2de9659516d045054935e3b6e04d06d" ]
[ "util.py", "final/evaluation.py", "lab5.py" ]
[ "from cv2 import cv2\r\nimport tello\r\nimport time\r\nimport numpy as np\r\nimport math\r\nfrom enum import Enum\r\n\r\ndef get_coloser(drone, tvec, rvec, go_distance, idx):\r\n up_down = tvec[idx][0][1] + 5\r\n distance = tvec[idx][0][2] - go_distance\r\n left_right = tvec[idx][0][0]\r\n\r\n dst, jaco = cv2.Rodrigues(rvec[idx][0])\r\n z_ = np.array([dst[0][2], dst[1][2], dst[2][2]])\r\n v = np.array([z_[0], 0, z_[2]])\r\n degree = math.atan2(z_[2], z_[0])\r\n degree = -degree * 180 / math.pi\r\n\r\n \r\n if up_down > 10:\r\n drone.move_down(up_down/100)\r\n elif up_down < -10:\r\n drone.move_up(-up_down/100)\r\n elif left_right > 15:\r\n drone.move_right(max(left_right*2/3, 20)/100)\r\n elif left_right < -15:\r\n drone.move_left(max(-left_right*2/3, 20)/100)\r\n\r\n elif degree > 100:\r\n drone.rotate_cw(10)\r\n elif degree < 80:\r\n drone.rotate_ccw(10)\r\n \r\n \r\n\r\n if distance > 0:\r\n print(drone.move_forward(max(distance*2/3, 20)/100))\r\n elif distance < -10:\r\n drone.move_backward(20/100)\r\n\r\n return degree, distance, left_right, up_down\r\n\r\ndef get_lowest_id(markerIds):\r\n idx = 0\r\n min_val = 9999\r\n for i in range(len(markerIds)):\r\n if markerIds[i][0] < min_val:\r\n idx = i\r\n min_val = markerIds[i][0]\r\n return idx\r\n\r\n\r\n\r\ndef pixelsum(frame):\r\n return np.sum(frame, axis = 0), np.sum(frame, axis = 1)", "import pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\norb_slam = pd.read_csv('KeyFrameTrajectory.txt', sep=' ', names=['frame', 'x', 'y', 'z', 'q_x', 'q_y', 'q_z', 'q_w'])\ncolmap = pd.read_csv('colmap.txt', sep=' ', names=['frame', 'focal_length', 'q_w', 'q_x', 'q_y', 'q_z', 'x', 'y', 'z', 'radial_distortion', '0'])\norb_slam = orb_slam.drop(['q_x', 'q_y', 'q_z', 'q_w'], axis=1)\ncolmap = colmap.drop(['focal_length', 'q_w', 'q_x', 'q_y', 'q_z', 'radial_distortion', '0'], axis=1)\nfor i in range(colmap.shape[0]):\n temp = ''\n for j in range(len(colmap.iloc[i, 0])):\n if colmap.iloc[i, 0][j].isnumeric():\n temp += colmap.iloc[i, 0][j]\n colmap.iloc[i, 0] = temp\ncolmap['frame'] = colmap['frame'].astype(int)\norb_slam['frame'] = orb_slam['frame'].astype(int)\n\ncolmap = colmap.sort_values(by=['frame']).reset_index(drop=True)\norb_slam = orb_slam.sort_values(by=['frame']).reset_index(drop=True)\n#A*T=B, T=inverse(A)*B \ni = 25\nj = 40\nk = 87\ni0 = orb_slam['frame'][i]\nj0 = orb_slam['frame'][j]\nk0 = orb_slam['frame'][k]\nA = np.matrix([[orb_slam['x'][i], orb_slam['y'][i], orb_slam['z'][i]], [orb_slam['x'][j], orb_slam['y'][j], orb_slam['z'][j]], [orb_slam['x'][k], orb_slam['y'][k], orb_slam['z'][k]]])\nB = np.matrix([[colmap['x'][i0], colmap['y'][i0], colmap['z'][i0]],[colmap['x'][j0], colmap['y'][j0], colmap['z'][j0]],[colmap['x'][k0], colmap['y'][k0], colmap['z'][k0]]])\nT = np.linalg.inv(100*A)*(100*B)\norb_slam_transformed = pd.DataFrame(columns=['x', 'y', 'z'])\nfor i in range(orb_slam.shape[0]):\n a = np.matrix([[orb_slam['x'][i], orb_slam['y'][i], orb_slam['z'][i]]])\n b = pd.DataFrame(a*T,columns=['x', 'y', 'z'])\n orb_slam_transformed = pd.concat([orb_slam_transformed, b], ignore_index=True)\ntotal_error = 0\nj = 0\nfor i in range(orb_slam_transformed.shape[0]):\n frame = orb_slam['frame'][i]\n while colmap['frame'][j] != frame:\n if j+1<colmap.shape[0]:\n j += 1\n else:\n break\n total_error += ((orb_slam_transformed['x'][i]-colmap['x'][j])**2 + (orb_slam_transformed['y'][i]-colmap['y'][j])**2 + (orb_slam_transformed['z'][i]-colmap['z'][j])**2)**0.5\nprint('Total error: %.4f' % total_error)\nprint('Average error: %.4f' % (total_error/orb_slam_transformed.shape[0]))\n\nfig = plt.figure()\nax = fig.add_subplot(projection='3d')\nax.scatter3D(orb_slam_transformed['x'], orb_slam_transformed['y'], orb_slam_transformed['z'], color=[0.5, 0, 0], label='ORB-SLAM2')\nax.scatter3D(colmap['x'], colmap['y'], colmap['z'], color=[0, 0, 0.5], label='COLMAP')\nax.set_xlabel('x')\nax.set_ylabel('y')\nax.set_zlabel('z')\nplt.title('ORB-SLAM2 and COLMAP')\nplt.legend()\nplt.show()", "from cv2 import cv2\nimport tello\nimport time\nimport numpy as np\n\ndef main():\n drone = tello.Tello('', 8889)\n time.sleep(10)\n chase_count = 0\n chase_image_list = []\n chase_corner_list = []\n objp = np.zeros((9*6, 3), np.float32)\n objp[:,:2] = np.mgrid[0:9,0:6].T.reshape(-1,2)\n criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 30, 0.001)\n\n while(True):\n frame = drone.read()\n imageSize = (frame.shape[0], frame.shape[1])\n frame = cv2.cvtColor(frame, cv2.COLOR_RGB2BGR)\n frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n\n ret, corner = cv2.findChessboardCorners(frame, (9,6), None)\n if ret == True:\n #print('detect')\n chase_count += 1\n cv2.cornerSubPix(frame, corner, (11,11), (-1,-1), criteria)\n chase_image_list.append(objp)\n chase_corner_list.append(corner)\n cv2.waitKey(500)\n \n if chase_count > 10:\n retval, cameraMatrix, distCoeffs, rvecs, tvecs = cv2.calibrateCamera(chase_image_list, chase_corner_list, imageSize, None, None)\n break\n\n cv2.imshow('frame', frame)\n cv2.waitKey(33)\n \n \n time.sleep(2)\n dictionary = cv2.aruco.Dictionary_get(cv2.aruco.DICT_6X6_250)\n parameters = cv2.aruco.DetectorParameters_create()\n\n intrinsic = cameraMatrix\n distortion = distCoeffs\n \n while(True):\n frame = drone.read()\n imageSize = (frame.shape[0], frame.shape[1])\n frame = cv2.cvtColor(frame, cv2.COLOR_RGB2BGR)\n markerCorners, markerIds, rejectedCandidates = cv2.aruco.detectMarkers(frame, dictionary, parameters=parameters)\n if len(markerCorners) > 0:\n frame = cv2.aruco.drawDetectedMarkers(frame, markerCorners, markerIds)\n\n rvec, tvec, _objPoints = cv2.aruco.estimatePoseSingleMarkers(markerCorners, 15, intrinsic, distortion)\n frame = cv2.aruco.drawAxis(frame, intrinsic, distortion, rvec, tvec, 0.1)\n\n text = 'x = ' + str(tvec[0]) + 'y = ' + str(tvec[1]) + 'z = ' + str(tvec[2])\n\n cv2.putText(frame, str(tvec), (10, 40), cv2.FONT_HERSHEY_PLAIN,1, (0, 255, 255), 1, cv2.LINE_AA)\n\n cv2.imshow('frame', frame)\n cv2.waitKey(33)\n\n if key!= -1:\n drone.keyboard(key)\n cv2.destroyAllWindows()\n \n\n\nif __name__ == \"__main__\":\n main()" ]
[ [ "numpy.array", "numpy.sum" ], [ "numpy.matrix", "matplotlib.pyplot.legend", "pandas.concat", "pandas.read_csv", "matplotlib.pyplot.title", "numpy.linalg.inv", "pandas.DataFrame", "matplotlib.pyplot.show", "matplotlib.pyplot.figure" ], [ "numpy.zeros" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [ "1.10", "1.12", "1.11", "1.19", "1.24", "1.13", "1.16", "1.9", "1.18", "1.23", "1.21", "1.22", "1.20", "1.7", "1.15", "1.14", "1.17", "1.8" ], "pandas": [ "2.0", "1.4", "1.1", "1.5", "1.2", "1.3" ], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
wd15/graspi
[ "4319cad2d5490903998094cdee85f039f70a4ff6" ]
[ "setupGraspiCython.py" ]
[ "from setuptools import Extension, setup\nfrom Cython.Build import cythonize\n\nimport numpy\n\nsourcefiles = ['cythonizeGraspi/graspi.pyx', 'src/graph_constructors.cpp']\n\nextensions = [\n Extension('graspi', sourcefiles,\n include_dirs=[numpy.get_include(), '/Users/owodo/Packages/boost_1_72_0', 'src'],\n extra_compile_args=['-std=c++11'],\n language='c++'\n ),\n ]\n\nsetup(\n ext_modules=cythonize(extensions,compiler_directives={'language_level' : \"3\"}),\n # extra_compile_args=['-Wunused-variable']\n )\n" ]
[ [ "numpy.get_include" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
lukasz-tuz/kids-control-panel
[ "2f04087b198aa24d4039552fc61bbb4e4788a2f8" ]
[ "control-panel/conversions.py" ]
[ "# uint32_t RgbLed:: rectToRGB(float x, float y)\n# {\n# auto cval = [](float theta, float ro, float phase) {\n# float val = sin(0.6 * theta - phase)\n# if (val < 0)\n# val = 0\n# return val\n# }\n# float theta = atan2(y, x) * RAD_TO_DEG\n# float ro = sqrt(x * x + y * y)\n\n# float r = cval(theta, ro, -PI / 2)\n# float g = cval(theta, ro, 0)\n# float b = cval(theta, ro, PI / 2)\n# }\n\nimport math\nimport numpy as np\nimport matplotlib\nimport matplotlib.pyplot as plt\n\ndef cval(theta, ro, phase, neg_phase=math.pi):\n val = math.sin(0.666 * theta - phase)\n if val < 0:\n val = math.sin(0.666 * theta - neg_phase)\n\n return max(val, 0)\n\n\ndef rect2rgb(theta, ro):\n # theta = math.degrees(math.atan2(y, x))\n # ro = math.sqrt(x*x + y*y)\n\n r=cval(theta, ro, -math.pi/2)\n g=cval(theta, ro, 0, 3*math.pi/2)\n b=cval(theta, ro, math.pi/2, 5*math.pi/2)\n\n return [r, g, b]\n\nangles=[]\nred=[]\ngreen=[]\nblue=[]\n\n# for x in np.arange(-1, 1, 0.01):\n# for y in np.arange(-1, 1, 0.01):\nfor theta in np.arange(0, 4*math.pi, 0.01):\n r, g, b=rect2rgb(theta, 1)\n angles.append(theta)\n red.append(r)\n green.append(g)\n blue.append(b)\n\nplt.plot(angles, red, 'r-', angles, green, 'g-', angles, blue, 'b-')\nplt.savefig('graph.png')\n" ]
[ [ "matplotlib.pyplot.plot", "numpy.arange", "matplotlib.pyplot.savefig" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
martiansideofthemoon/language
[ "2aca4d197f48a96e79aac36c8b5a643b14204469", "2aca4d197f48a96e79aac36c8b5a643b14204469", "2aca4d197f48a96e79aac36c8b5a643b14204469", "2aca4d197f48a96e79aac36c8b5a643b14204469" ]
[ "language/conpono/cpc/run_cpc.py", "language/nql/util_test.py", "language/conpono/evals/run_squad.py", "language/conpono/cpc/model_builder.py" ]
[ "# coding=utf-8\n# Copyright 2018 The Google AI Language Team Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"BERT next sentence prediction / binary coherence finetuning runner.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nfrom absl import app\nfrom absl import flags\n\nfrom bert import modeling\nfrom bert import optimization\nfrom bert import tokenization\nfrom language.conpono.cpc import model_builder\nimport tensorflow as tf\n\n\nfrom tensorflow.contrib import cluster_resolver as contrib_cluster_resolver\nfrom tensorflow.contrib import tpu as contrib_tpu\nfrom tensorflow.contrib import training as contrib_training\n\nFLAGS = flags.FLAGS\n\n## Required parameters\nflags.DEFINE_string(\n \"eval_file\", None,\n \"The input data. Should be in tfrecord format ready to input to BERT.\")\n\nflags.DEFINE_string(\n \"train_file\", None,\n \"The input data. Should be in tfrecord format ready to input to BERT.\")\n\nflags.DEFINE_string(\n \"bert_config_file\", None,\n \"The config json file corresponding to the pre-trained BERT model. \"\n \"This specifies the model architecture.\")\n\nflags.DEFINE_string(\"vocab_file\", None,\n \"The vocabulary file that the BERT model was trained on.\")\n\nflags.DEFINE_string(\n \"output_dir\", None,\n \"The output directory where the model checkpoints will be written.\")\n\n## Other parameters\n\nflags.DEFINE_integer(\"num_choices\", 32, \"Number of negative samples + 1\")\n\nflags.DEFINE_bool(\"add_lv2loss\", False, \"Whether to use the level 2 loss.\")\n\nflags.DEFINE_string(\n \"init_checkpoint\", None,\n \"Initial checkpoint (usually from a pre-trained BERT model).\")\n\nflags.DEFINE_bool(\n \"do_lower_case\", True,\n \"Whether to lower case the input text. Should be True for uncased \"\n \"models and False for cased models.\")\n\nflags.DEFINE_integer(\n \"max_seq_length\", 128,\n \"The maximum total input sequence length after WordPiece tokenization. \"\n \"Sequences longer than this will be truncated, and sequences shorter \"\n \"than this will be padded.\")\n\nflags.DEFINE_bool(\"do_train\", False, \"Whether to run training.\")\n\nflags.DEFINE_bool(\"do_eval\", False, \"Whether to run eval on the dev set.\")\n\nflags.DEFINE_integer(\"train_batch_size\", 32, \"Total batch size for training.\")\n\nflags.DEFINE_integer(\"eval_batch_size\", 32, \"Total batch size for eval.\")\n\nflags.DEFINE_integer(\"train_data_size\", 10000, \"The number of examples in the\"\n \"training data\")\n\nflags.DEFINE_integer(\"eval_data_size\", -1, \"The number of examples in the\"\n \"validation data\")\n\nflags.DEFINE_integer(\"predict_batch_size\", 8, \"Total batch size for predict.\")\n\nflags.DEFINE_float(\"learning_rate\", 5e-5, \"The initial learning rate for Adam.\")\n\nflags.DEFINE_float(\n \"warmup_proportion\", 0.1,\n \"Proportion of training to perform linear learning rate warmup for. \"\n \"E.g., 0.1 = 10% of training.\")\n\nflags.DEFINE_integer(\"save_checkpoints_steps\", 10000,\n \"How often to save the model checkpoint.\")\n\nflags.DEFINE_integer(\"iterations_per_loop\", 1000,\n \"How many steps to make in each estimator call.\")\n\nflags.DEFINE_bool(\"use_tpu\", False, \"Whether to use TPU or GPU/CPU.\")\n\nflags.DEFINE_string(\n \"tpu_name\", None,\n \"The Cloud TPU to use for training. This should be either the name \"\n \"used when creating the Cloud TPU, or a grpc://ip.address.of.tpu:8470 \"\n \"url.\")\n\nflags.DEFINE_string(\n \"tpu_zone\", None,\n \"[Optional] GCE zone where the Cloud TPU is located in. If not \"\n \"specified, we will attempt to automatically detect the GCE project from \"\n \"metadata.\")\n\nflags.DEFINE_string(\n \"gcp_project\", None,\n \"[Optional] Project name for the Cloud TPU-enabled project. If not \"\n \"specified, we will attempt to automatically detect the GCE project from \"\n \"metadata.\")\n\nflags.DEFINE_string(\"master\", None, \"[Optional] TensorFlow master URL.\")\n\nflags.DEFINE_integer(\n \"num_tpu_cores\", 8,\n \"Only used if `use_tpu` is True. Total number of TPU cores to use.\")\n\n_SEP_TOKEN = \"[SEP]\"\n\n\nclass InputFeatures(object):\n \"\"\"A single set of features of data.\"\"\"\n\n def __init__(self,\n input_ids,\n input_mask,\n segment_ids,\n label_id,\n is_real_example=True):\n self.input_ids = input_ids\n self.input_mask = input_mask\n self.segment_ids = segment_ids\n self.label_id = label_id\n self.is_real_example = is_real_example\n\n\ndef file_based_input_fn_builder(input_file, seq_length, is_training,\n drop_remainder, num_choices):\n \"\"\"Creates an `input_fn` closure to be passed to TPUEstimator.\"\"\"\n\n input_file = input_file.split(\",\")\n\n expanded_files = []\n for infile in input_file:\n try:\n sharded_files = tf.io.gfile.glob(infile)\n expanded_files.append(sharded_files)\n except tf.errors.OpError:\n expanded_files.append(infile)\n\n name_to_features = {}\n for i in range(50):\n name_to_features[\"input_ids\" + str(i)] = tf.FixedLenFeature([seq_length],\n tf.int64)\n name_to_features[\"input_mask\" + str(i)] = tf.FixedLenFeature([seq_length],\n tf.int64)\n name_to_features[\"segment_ids\" + str(i)] = tf.FixedLenFeature([seq_length],\n tf.int64)\n name_to_features[\"label_types\"] = tf.FixedLenFeature([4], tf.int64)\n\n def _decode_record(record, name_to_features):\n \"\"\"Decodes a record to a TensorFlow example.\"\"\"\n example = tf.parse_single_example(record, name_to_features)\n\n # total of 32 examples\n # 4 labels / shuffled\n # random samples from 23 + 23 last for distractors\n num_sampled = 14\n same_doc_idxs = tf.random.shuffle(tf.range(4, 27))[:num_sampled]\n rand_doc_idxs = tf.random.shuffle(tf.range(27, 50))[:num_sampled]\n batch_indexes = tf.concat([tf.range(4), same_doc_idxs, rand_doc_idxs],\n axis=0)\n batch_indexes = tf.random.shuffle(batch_indexes)\n # At this point, we have shuffled the indexes and sampled them such that\n # we still have the index of 4 targets, 14 sampled from the same doc\n # and 14 sampled from different docs. But these are just indexes.\n\n # Here we need to grab the inputs according to the indexes above\n # We stack all the inputs so we can gather on the matrix\n input_id_stack, input_mask_stack, segment_id_stack = [], [], []\n for i in range(50):\n input_id_stack.append(example[\"input_ids\" + str(i)])\n input_mask_stack.append(example[\"input_mask\" + str(i)])\n segment_id_stack.append(example[\"segment_ids\" + str(i)])\n input_id_stack = tf.stack(input_id_stack)\n input_mask_stack = tf.stack(input_mask_stack)\n segment_id_stack = tf.stack(segment_id_stack)\n\n input_ids = tf.gather(input_id_stack, batch_indexes)\n input_masks = tf.gather(input_mask_stack, batch_indexes)\n segment_ids = tf.gather(segment_id_stack, batch_indexes)\n\n # Note that we override the name of the input (eg. input_ids5)\n # So we replace the input with the shuffled and sampled input\n # We only set num_choices of them since those will be used.\n for i in range(num_choices):\n example[\"input_ids\" + str(i)] = input_ids[i]\n example[\"input_mask\" + str(i)] = input_masks[i]\n example[\"segment_ids\" + str(i)] = segment_ids[i]\n\n # Note that for inputs num_choices-50 will not be used so we must purge them\n for i in range(num_choices, 50):\n del example[\"input_ids\" + str(i)]\n del example[\"input_mask\" + str(i)]\n del example[\"segment_ids\" + str(i)]\n\n label_idx = []\n for i in range(4):\n label_idx.append(tf.where(tf.equal(batch_indexes, tf.constant(i)))[0])\n label_idx = tf.reshape(tf.concat(label_idx, axis=0), [-1])\n label_idx = tf.scatter_nd(\n tf.reshape(example[\"label_types\"], [4, 1]), label_idx, [8])\n example[\"labels\"] = label_idx\n\n # tf.Example only supports tf.int64, but the TPU only supports tf.int32.\n # So cast all int64 to int32.\n for name in list(example.keys()):\n t = example[name]\n if t.dtype == tf.int64:\n t = tf.to_int32(t)\n example[name] = t\n\n return example\n\n def input_fn(params):\n \"\"\"The actual input function.\"\"\"\n batch_size = params[\"batch_size\"]\n\n if len(expanded_files) == 1:\n d = tf.data.TFRecordDataset(expanded_files[0])\n if is_training:\n d = d.repeat()\n d = d.shuffle(buffer_size=256)\n else:\n dataset_list = [\n tf.data.TFRecordDataset(expanded_files[i])\n for i in range(len(expanded_files))\n ]\n if is_training:\n dataset_list = [d.repeat() for d in dataset_list]\n wiki_pct = 0.02222\n dset_weights = [wiki_pct, 1 - wiki_pct]\n d = tf.data.experimental.sample_from_datasets(dataset_list, dset_weights)\n # choice_dataset = tf.data.Dataset.range(len(dataset_list)).repeat()\n\n # d = tf.data.experimental.choose_from_datasets(dataset_list,\n # choice_dataset)\n if is_training:\n d = d.shuffle(buffer_size=256)\n\n d = d.apply(\n tf.data.experimental.map_and_batch(\n lambda record: _decode_record(record, name_to_features),\n batch_size=batch_size,\n drop_remainder=drop_remainder))\n\n return d\n\n return input_fn\n\n\ndef model_fn_builder(bert_config, init_checkpoint, learning_rate,\n num_train_steps, num_warmup_steps, use_tpu,\n use_one_hot_embeddings, num_choices):\n \"\"\"Returns `model_fn` closure for TPUEstimator.\"\"\"\n\n def model_fn(features, labels, mode, params): # pylint: disable=unused-argument\n \"\"\"The `model_fn` for TPUEstimator.\"\"\"\n\n tf.logging.info(\"*** Features ***\")\n for name in sorted(features.keys()):\n tf.logging.info(\" name = %s, shape = %s\" % (name, features[name].shape))\n\n input_ids = [features[\"input_ids\" + str(i)] for i in range(num_choices)]\n input_mask = [features[\"input_mask\" + str(i)] for i in range(num_choices)]\n segment_ids = [features[\"segment_ids\" + str(i)] for i in range(num_choices)]\n label_ids = features[\"labels\"]\n label_types = features[\"label_types\"]\n\n seq_length = input_ids[0].shape[-1]\n input_ids = tf.reshape(tf.stack(input_ids, axis=1), [-1, seq_length])\n input_mask = tf.reshape(tf.stack(input_mask, axis=1), [-1, seq_length])\n segment_ids = tf.reshape(tf.stack(segment_ids, axis=1), [-1, seq_length])\n\n is_training = (mode == tf.estimator.ModeKeys.TRAIN)\n is_real_example = tf.reduce_sum(tf.one_hot(label_types, 8), axis=1)\n\n model = modeling.BertModel(\n config=bert_config,\n is_training=is_training,\n input_ids=input_ids,\n input_mask=input_mask,\n token_type_ids=segment_ids,\n use_one_hot_embeddings=use_one_hot_embeddings)\n\n (total_loss, per_example_loss, logits,\n probabilities) = model_builder.create_model(\n model, label_ids, label_types,\n FLAGS.train_batch_size if is_training else FLAGS.eval_batch_size,\n num_choices, use_tpu, FLAGS.add_lv2loss)\n\n tvars = tf.trainable_variables()\n initialized_variable_names = {}\n scaffold_fn = None\n if init_checkpoint:\n (assignment_map, initialized_variable_names\n ) = modeling.get_assignment_map_from_checkpoint(tvars, init_checkpoint)\n\n if use_tpu:\n\n def tpu_scaffold():\n tf.train.init_from_checkpoint(init_checkpoint, assignment_map)\n return tf.train.Scaffold()\n\n scaffold_fn = tpu_scaffold\n else:\n tf.train.init_from_checkpoint(init_checkpoint, assignment_map)\n\n tf.logging.info(\"**** Trainable Variables ****\")\n for var in tvars:\n init_string = \"\"\n if var.name in initialized_variable_names:\n init_string = \", *INIT_FROM_CKPT*\"\n tf.logging.info(\" name = %s, shape = %s%s\", var.name, var.shape,\n init_string)\n\n output_spec = None\n if mode == tf.estimator.ModeKeys.TRAIN:\n\n train_op = optimization.create_optimizer(total_loss, learning_rate,\n num_train_steps,\n num_warmup_steps, use_tpu)\n\n output_spec = contrib_tpu.TPUEstimatorSpec(\n mode=mode,\n loss=total_loss,\n train_op=train_op,\n scaffold_fn=scaffold_fn)\n\n elif mode == tf.estimator.ModeKeys.EVAL:\n\n def metric_fn(per_example_loss, label_ids, logits, is_real_example):\n \"\"\"Collect metrics for function.\"\"\"\n\n predictions = tf.argmax(logits, axis=-1, output_type=tf.int32)\n accuracy = tf.metrics.accuracy(\n labels=label_ids, predictions=predictions, weights=is_real_example)\n loss = tf.metrics.mean(values=per_example_loss, weights=is_real_example)\n metric_dict = {\n \"eval_accuracy\": accuracy,\n \"eval_loss\": loss,\n }\n for i in range(8):\n metric_dict[\"acc\" + str(i)] = tf.metrics.accuracy(\n labels=label_ids[:, i],\n predictions=predictions[:, i],\n weights=is_real_example[:, i])\n return metric_dict\n\n eval_metrics = (metric_fn,\n [per_example_loss, label_ids, logits, is_real_example])\n output_spec = contrib_tpu.TPUEstimatorSpec(\n mode=mode,\n loss=total_loss,\n eval_metrics=eval_metrics,\n scaffold_fn=scaffold_fn)\n else:\n output_spec = contrib_tpu.TPUEstimatorSpec(\n mode=mode,\n predictions={\"probabilities\": probabilities},\n scaffold_fn=scaffold_fn)\n return output_spec\n\n return model_fn\n\n\ndef main(_):\n tf.logging.set_verbosity(tf.logging.INFO)\n\n tokenization.validate_case_matches_checkpoint(FLAGS.do_lower_case,\n FLAGS.init_checkpoint)\n\n if not FLAGS.do_train and not FLAGS.do_eval:\n raise ValueError(\"At least one of `do_train`, `do_eval` must be True.\")\n\n bert_config = modeling.BertConfig.from_json_file(FLAGS.bert_config_file)\n\n if FLAGS.max_seq_length > bert_config.max_position_embeddings:\n raise ValueError(\n \"Cannot use sequence length %d because the BERT model \"\n \"was only trained up to sequence length %d\" %\n (FLAGS.max_seq_length, bert_config.max_position_embeddings))\n\n tf.gfile.MakeDirs(FLAGS.output_dir)\n\n tpu_cluster_resolver = None\n if FLAGS.use_tpu and FLAGS.tpu_name:\n tpu_cluster_resolver = contrib_cluster_resolver.TPUClusterResolver(\n FLAGS.tpu_name, zone=FLAGS.tpu_zone, project=FLAGS.gcp_project)\n\n is_per_host = contrib_tpu.InputPipelineConfig.PER_HOST_V2\n run_config = contrib_tpu.RunConfig(\n cluster=tpu_cluster_resolver,\n master=FLAGS.master,\n model_dir=FLAGS.output_dir,\n save_checkpoints_steps=FLAGS.save_checkpoints_steps,\n tpu_config=contrib_tpu.TPUConfig(\n iterations_per_loop=FLAGS.iterations_per_loop,\n num_shards=FLAGS.num_tpu_cores,\n per_host_input_for_training=is_per_host))\n\n num_train_steps = None\n num_warmup_steps = None\n if FLAGS.do_train:\n num_train_steps = int(FLAGS.train_data_size / FLAGS.train_batch_size)\n num_warmup_steps = int(num_train_steps * FLAGS.warmup_proportion)\n\n model_fn = model_fn_builder(\n bert_config=bert_config,\n init_checkpoint=FLAGS.init_checkpoint,\n learning_rate=FLAGS.learning_rate,\n num_train_steps=num_train_steps,\n num_warmup_steps=num_warmup_steps,\n use_tpu=FLAGS.use_tpu,\n use_one_hot_embeddings=FLAGS.use_tpu,\n num_choices=FLAGS.num_choices)\n\n # If TPU is not available, this will fall back to normal Estimator on CPU\n # or GPU.\n estimator = contrib_tpu.TPUEstimator(\n use_tpu=FLAGS.use_tpu,\n model_fn=model_fn,\n config=run_config,\n train_batch_size=FLAGS.train_batch_size,\n eval_batch_size=FLAGS.eval_batch_size,\n predict_batch_size=FLAGS.predict_batch_size)\n\n if FLAGS.do_train:\n tf.logging.info(\"***** Running training *****\")\n tf.logging.info(\" Batch size = %d\", FLAGS.train_batch_size)\n tf.logging.info(\" Num steps = %d\", num_train_steps)\n train_input_fn = file_based_input_fn_builder(\n input_file=FLAGS.train_file,\n seq_length=FLAGS.max_seq_length,\n is_training=True,\n drop_remainder=True,\n num_choices=FLAGS.num_choices)\n estimator.train(input_fn=train_input_fn, steps=num_train_steps)\n\n if FLAGS.do_eval:\n # This tells the estimator to run through the entire set.\n if FLAGS.eval_data_size < 0:\n eval_steps = None\n else:\n eval_steps = int(FLAGS.eval_data_size / FLAGS.eval_batch_size)\n\n eval_drop_remainder = True if FLAGS.use_tpu else False\n eval_input_fn = file_based_input_fn_builder(\n input_file=FLAGS.eval_file,\n seq_length=FLAGS.max_seq_length,\n is_training=False,\n drop_remainder=eval_drop_remainder,\n num_choices=FLAGS.num_choices)\n\n # checkpoints_iterator blocks until a new checkpoint appears.\n for ckpt in contrib_training.checkpoints_iterator(estimator.model_dir):\n try:\n result = estimator.evaluate(input_fn=eval_input_fn, steps=eval_steps)\n tf.logging.info(\"********** Eval results:*******\\n\")\n for key in sorted(result.keys()):\n tf.logging.info(\"%s = %s\" % (key, str(result[key])))\n except tf.errors.NotFoundError:\n tf.logging.error(\"Checkpoint path '%s' no longer exists.\", ckpt)\n\n\nif __name__ == \"__main__\":\n flags.mark_flag_as_required(\"eval_file\")\n flags.mark_flag_as_required(\"vocab_file\")\n flags.mark_flag_as_required(\"bert_config_file\")\n flags.mark_flag_as_required(\"output_dir\")\n app.run(main)\n", "# coding=utf-8\n# Copyright 2018 The Google AI Language Team Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Tests for util.\"\"\"\n\nimport os\nimport tempfile\n\nfrom language.nql import dataset\nfrom language.nql import nql\nfrom language.nql import util\nimport numpy as np\nimport tensorflow.compat.v1 as tf\n\n\ndef tabline(s):\n return \"\\t\".join(s.split(\" \")) + \"\\n\"\n\n\nTRIPPY_KG_LINES = [\n tabline(\"feature t1 purple\"),\n tabline(\"feature t1 green\"),\n tabline(\"feature t1 red\"),\n tabline(\"feature t2 purple\"),\n tabline(\"feature t2 red\"),\n tabline(\"feature t3 red\"),\n tabline(\"feature t3 black\"),\n tabline(\"feature b1 black\"),\n tabline(\"feature b1 tan\"),\n tabline(\"feature b2 white\"),\n tabline(\"feature b2 grey\"),\n tabline(\"feature b3 black\"),\n tabline(\"feature b3 white\"),\n tabline(\"feature b3 tan\"),\n tabline(\"feature u1 purple\"),\n tabline(\"feature t1 green\"),\n tabline(\"feature u2 green\"),\n tabline(\"feature t2 red\"),\n tabline(\"feature c1 black\"),\n tabline(\"feature b1 grey\"),\n tabline(\"feature c2 tan\"),\n tabline(\"feature c2 grey\")\n]\n\nTRAIN_DATA_LINES = [\n \"t1|trippy\", \"t2|trippy\", \"t3|trippy\", \"b1|boring\", \"b2|boring\", \"b3|boring\"\n]\n\nTEST_DATA_LINES = [\"u1|trippy\", \"u2|trippy\", \"c1|boring\", \"c2|boring\"]\n\n\ndef simple_tf_dataset(context,\n tuple_input,\n x_type,\n y_type,\n normalize_outputs=False,\n batch_size=1,\n shuffle_buffer_size=1000,\n feature_key=None,\n field_separator=\"\\t\"):\n \"\"\"A dataset with just two columns, x and y.\n\n Args:\n context: a NeuralQueryContext\n tuple_input: passed to util.tuple_dataset\n x_type: type of entities x\n y_type: type of entities y1,...,yk\n normalize_outputs: make the encoding of {y1,...,yk} sum to 1\n batch_size: size of minibatches\n shuffle_buffer_size: if zero, do not shuffle the dataset. Otherwise, this is\n passed in as argument to shuffle\n feature_key: if not None, wrap the x part of the minibatch in a dictionary\n with the given key\n field_separator: passed in to dataset.tuple_dataset\n\n Returns:\n a tf.data.Dataset formed by wrapping the generator\n \"\"\"\n dset = dataset.tuple_dataset(\n context,\n tuple_input, [x_type, y_type],\n normalize_outputs=normalize_outputs,\n field_separator=field_separator)\n if shuffle_buffer_size > 0:\n dset = dset.shuffle(shuffle_buffer_size)\n dset = dset.batch(batch_size)\n if feature_key is None:\n return dset\n else:\n wrap_x_in_dict = lambda x, y: ({feature_key: x}, y)\n return dset.map(wrap_x_in_dict)\n\n\nclass TrippyBuilder(util.ModelBuilder):\n\n def config_context(self, context, params=None):\n context.declare_relation(\"feature\", \"instance_t\", \"feature_t\")\n context.declare_relation(\n \"indicates\", \"feature_t\", \"label_t\", trainable=True)\n context.extend_type(\"label_t\", [\"trippy\", \"boring\"])\n context.load_kg(lines=TRIPPY_KG_LINES)\n context.set_initial_value(\n \"indicates\", np.ones(context.get_shape(\"indicates\"), dtype=\"float32\"))\n\n def config_model_prediction(self, model, feature_ph_dict, params=None):\n model.x = model.context.as_nql(feature_ph_dict[\"x\"], \"instance_t\")\n model.score = model.x.feature().indicates()\n model.predicted_y = model.score.tf_op(nql.nonneg_softmax)\n model.predictions = {\"y\": model.predicted_y}\n\n def config_model_training(self, model, labels_ph, params=None):\n model.labels = model.context.as_tf(labels_ph)\n model.loss = nql.nonneg_crossentropy(model.predicted_y.tf, model.labels)\n optimizer = tf.train.AdagradOptimizer(1.0)\n model.train_op = optimizer.minimize(\n loss=model.loss, global_step=tf.train.get_global_step())\n\n def config_model_evaluation(self, model, labels_ph, params=None):\n model.accuracy = tf.metrics.accuracy(\n tf.argmax(input=model.labels, axis=1),\n tf.argmax(input=model.predicted_y.tf, axis=1))\n model.top_labels = util.labels_of_top_ranked_predictions_in_batch(\n model.labels, model.predicted_y.tf)\n model.precision_at_one = tf.metrics.mean(model.top_labels)\n model.evaluations = {\n \"accuracy\": model.accuracy,\n \"precision@1\": model.precision_at_one\n }\n\n\nclass BaseTester(tf.test.TestCase):\n\n def setUp(self):\n super(BaseTester, self).setUp()\n self.tmp_dir = tempfile.mkdtemp()\n self.context = TrippyBuilder().build_context()\n\n def make_train_dset(self, num_epochs):\n # need to specify a non-default field separator\n # because tabs are disallowed in test input files\n return simple_tf_dataset(\n self.context,\n TRAIN_DATA_LINES,\n \"instance_t\",\n \"label_t\",\n feature_key=\"x\",\n field_separator=\"|\").repeat(num_epochs)\n\n def make_test_dset(self):\n return simple_tf_dataset(\n self.context,\n TEST_DATA_LINES,\n \"instance_t\",\n \"label_t\",\n shuffle_buffer_size=0,\n feature_key=\"x\",\n field_separator=\"|\")\n\n\nclass TestModelBuilder(BaseTester):\n\n def setUp(self):\n super(TestModelBuilder, self).setUp()\n self.session = tf.Session()\n\n def check_one_hot(self, m, i, typename):\n self.assertEqual(m.shape, (self.context.get_max_id(typename),))\n self.assertEqual(np.sum(m), 1.0)\n self.assertEqual(m[i], 1.0)\n\n def test_tf_dataset(self):\n dset1 = simple_tf_dataset(\n self.context,\n TRAIN_DATA_LINES,\n \"instance_t\",\n \"label_t\",\n shuffle_buffer_size=0,\n field_separator=\"|\")\n x, y = self.session.run(\n tf.data.make_one_shot_iterator(dset1).get_next())\n self.check_batch(x, 0, \"instance_t\")\n self.check_batch(y, 0, \"label_t\")\n\n def check_batch(self, m, i, typename):\n self.assertEqual(m.shape, (1, self.context.get_max_id(typename)))\n self.assertEqual(np.sum(m), 1.0)\n self.assertEqual(m[0, i], 1.0)\n\n def test_tf_minibatch_dataset(self):\n dset2 = simple_tf_dataset(\n self.context,\n TRAIN_DATA_LINES,\n \"instance_t\",\n \"label_t\",\n batch_size=2,\n shuffle_buffer_size=0,\n field_separator=\"|\")\n x, y = self.session.run(\n tf.data.make_one_shot_iterator(dset2).get_next())\n # check that this is a minibatch containing the first two instances\n self.assertEqual(x.shape[0], 2)\n self.assertEqual(y.shape[0], 2)\n self.assertEqual(x.shape[1], self.context.get_max_id(\"instance_t\"))\n self.assertEqual(y.shape[1], self.context.get_max_id(\"label_t\"))\n self.assertEqual(np.sum(x), 2.0)\n self.assertEqual(np.sum(y), 2.0)\n self.assertEqual(x[0, 0], 1.0)\n self.assertEqual(x[1, 1], 1.0)\n # both of the first two instances are negative\n self.assertEqual(y[0, 0], 1.0)\n self.assertEqual(y[1, 0], 1.0)\n\n def test_ph_learn(self):\n\n # build model\n feature_ph_dict = {\"x\": self.context.placeholder(\"x\", \"instance_t\")}\n labels_ph = self.context.placeholder(\"y\", \"label_t\")\n builder = TrippyBuilder()\n model = builder.build_model(feature_ph_dict, labels_ph)\n trainer = util.Trainer(self.session, model, feature_ph_dict, labels_ph)\n\n # train\n trainer.train(self.make_train_dset(5))\n\n # check the model fits the train data\n evaluation = trainer.evaluate(self.make_train_dset(1))\n self.assertEqual(evaluation[\"accuracy\"], 1.0)\n self.assertEqual(evaluation[\"precision@1\"], 1.0)\n\n # try running the model on something\n for inst_name in [\"u1\", \"u2\", \"c1\", \"c2\"]:\n x = model.context.one_hot_numpy_array(inst_name, \"instance_t\")\n x_ph = feature_ph_dict[\"x\"]\n fd = {x_ph.name: x}\n y_dict = model.predicted_y.eval(self.session, feed_dict=fd)\n # the u's are class trippy\n if inst_name[0] == \"u\":\n self.assertGreater(y_dict[\"trippy\"], y_dict[\"boring\"])\n # the c's are class boring but c1 is hard to get\n elif inst_name == \"c2\":\n self.assertLess(y_dict[\"trippy\"], y_dict[\"boring\"])\n\n # test the model\n evaluation = trainer.evaluate(self.make_test_dset())\n self.assertGreaterEqual(evaluation[\"accuracy\"], 0.7)\n self.assertGreaterEqual(evaluation[\"precision@1\"], 0.7)\n\n # test callback\n cb_model = builder.build_model(feature_ph_dict, labels_ph)\n cb_model.loss_history = []\n\n def my_callback(fd, loss, secs):\n del fd, secs # unused\n cb_model.loss_history.append(loss)\n return None\n\n cb_model.training_callback = my_callback\n with tf.Session() as session:\n cb_trainer = util.Trainer(session, cb_model, feature_ph_dict, labels_ph)\n cb_trainer.train(self.make_train_dset(5))\n self.assertEqual(len(cb_model.loss_history), 30)\n self.assertLess(cb_model.loss_history[-1], 0.05)\n\n def test_estimator_learn(self):\n\n def train_input_fn():\n return self.make_train_dset(5)\n\n def test_input_fn():\n return self.make_test_dset()\n\n estimator = TrippyBuilder().build_estimator()\n estimator.train(input_fn=train_input_fn)\n evaluation = estimator.evaluate(input_fn=train_input_fn)\n self.assertEqual(evaluation[\"accuracy\"], 1.0)\n self.assertEqual(evaluation[\"global_step\"], 30)\n evaluation = estimator.evaluate(input_fn=test_input_fn)\n self.assertGreater(evaluation[\"accuracy\"], 0.7)\n self.assertGreaterEqual(evaluation[\"precision@1\"], 0.7)\n\n\nclass TestSaveRestore(BaseTester):\n\n def setUp(self):\n super(TestSaveRestore, self).setUp()\n tmp_dir = tempfile.mkdtemp(\"util_test\")\n self.checkpoint_location_a = os.path.join(tmp_dir, \"trippy.ckpt\")\n self.checkpoint_location_b = os.path.join(tmp_dir, \"trippy2.ckpt\")\n\n def test_est(self):\n\n def train_input_fn():\n return self.make_train_dset(5)\n\n def test_input_fn():\n return self.make_test_dset()\n\n estimator = TrippyBuilder().build_estimator(\n model_dir=self.checkpoint_location_a)\n estimator.train(input_fn=train_input_fn)\n evaluation = estimator.evaluate(input_fn=test_input_fn)\n self.assertGreater(evaluation[\"accuracy\"], 0.7)\n self.assertGreaterEqual(evaluation[\"precision@1\"], 0.7)\n\n def test_ph(self):\n\n def try_model_on_test_instances(model, sess, feature_ph_dict):\n trial = {}\n for inst_name in [\"u1\", \"u2\", \"c1\", \"c2\"]:\n x = model.context.one_hot_numpy_array(inst_name, \"instance_t\")\n x_ph = feature_ph_dict[\"x\"]\n fd = {x_ph.name: x}\n y_dict = model.predicted_y.eval(sess, feed_dict=fd)\n trial[inst_name] = y_dict[\"boring\"]\n return trial\n\n # Train and save.\n with tf.Graph().as_default():\n with tf.Session() as sess1:\n builder1 = TrippyBuilder()\n context1 = builder1.build_context()\n feature_ph_dict1 = {\"x\": context1.placeholder(\"x\", \"instance_t\")}\n labels_ph1 = context1.placeholder(\"y\", \"label_t\")\n model1 = builder1.build_model(feature_ph_dict1, labels_ph1)\n\n trainer1 = util.Trainer(sess1, model1, feature_ph_dict1, labels_ph1)\n trainer1.train(self.make_train_dset(5))\n trial1a = try_model_on_test_instances(model1, sess1, feature_ph_dict1)\n saver1 = tf.train.Saver()\n saver1.save(sess1, self.checkpoint_location_a)\n\n # Restore, evaluate, train, and save.\n with tf.Graph().as_default():\n with tf.Session() as sess2:\n builder2 = TrippyBuilder()\n context2 = builder2.build_context()\n feature_ph_dict2 = {\"x\": context2.placeholder(\"x\", \"instance_t\")}\n labels_ph2 = context2.placeholder(\"y\", \"label_t\")\n model2 = builder2.build_model(feature_ph_dict2, labels_ph2)\n saver2 = tf.train.Saver()\n\n trainer2 = util.Trainer(sess2, model2, feature_ph_dict2, labels_ph2)\n saver2.restore(sess2, self.checkpoint_location_a)\n trainer2.evaluate(self.make_test_dset())\n trial2a = try_model_on_test_instances(model2, sess2, feature_ph_dict2)\n self.assertDictEqual(trial1a, trial2a)\n\n trainer2.train(self.make_train_dset(5))\n saver2.save(sess2, self.checkpoint_location_b)\n trial2b = try_model_on_test_instances(model2, sess2, feature_ph_dict2)\n with self.assertRaises(tf.test.TestCase.failureException):\n self.assertDictEqual(trial2a, trial2b)\n\n # Restore and evaluate.\n with tf.Graph().as_default():\n with tf.Session() as sess3:\n builder3 = TrippyBuilder()\n context3 = builder3.build_context()\n feature_ph_dict3 = {\"x\": context3.placeholder(\"x\", \"instance_t\")}\n labels_ph3 = context3.placeholder(\"y\", \"label_t\")\n model3 = builder3.build_model(feature_ph_dict3, labels_ph3)\n saver3 = tf.train.Saver()\n\n trainer3 = util.Trainer(sess3, model3, feature_ph_dict3, labels_ph3)\n saver3.restore(sess3, self.checkpoint_location_b)\n trainer3.evaluate(self.make_test_dset())\n trial3b = try_model_on_test_instances(model3, sess3, feature_ph_dict3)\n self.assertDictEqual(trial2b, trial3b)\n\n\nif __name__ == \"__main__\":\n tf.test.main()\n", "# coding=utf-8\n# Copyright 2018 The Google AI Language Team Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Run BERT on SQuAD 1.1 and SQuAD 2.0.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport collections\nimport json\nimport math\nimport os\nimport random\nfrom absl import flags\nfrom bert import modeling\nfrom bert import optimization\nfrom bert import tokenization\nimport six\nimport tensorflow as tf\n\n\nfrom tensorflow.contrib import cluster_resolver as contrib_cluster_resolver\nfrom tensorflow.contrib import tpu as contrib_tpu\n\nFLAGS = flags.FLAGS\n\n## Required parameters\nflags.DEFINE_string(\n \"bert_config_file\", None,\n \"The config json file corresponding to the pre-trained BERT model. \"\n \"This specifies the model architecture.\")\n\nflags.DEFINE_string(\"vocab_file\", None,\n \"The vocabulary file that the BERT model was trained on.\")\n\nflags.DEFINE_string(\n \"output_dir\", None,\n \"The output directory where the model checkpoints will be written.\")\n\n## Other parameters\nflags.DEFINE_string(\"train_file\", None,\n \"SQuAD json for training. E.g., train-v1.1.json\")\n\nflags.DEFINE_string(\n \"predict_file\", None,\n \"SQuAD json for predictions. E.g., dev-v1.1.json or test-v1.1.json\")\n\nflags.DEFINE_string(\n \"init_checkpoint\", None,\n \"Initial checkpoint (usually from a pre-trained BERT model).\")\n\nflags.DEFINE_bool(\n \"do_lower_case\", True,\n \"Whether to lower case the input text. Should be True for uncased \"\n \"models and False for cased models.\")\n\nflags.DEFINE_integer(\n \"max_seq_length\", 384,\n \"The maximum total input sequence length after WordPiece tokenization. \"\n \"Sequences longer than this will be truncated, and sequences shorter \"\n \"than this will be padded.\")\n\nflags.DEFINE_integer(\n \"doc_stride\", 128,\n \"When splitting up a long document into chunks, how much stride to \"\n \"take between chunks.\")\n\nflags.DEFINE_integer(\n \"max_query_length\", 64,\n \"The maximum number of tokens for the question. Questions longer than \"\n \"this will be truncated to this length.\")\n\nflags.DEFINE_bool(\"do_train\", False, \"Whether to run training.\")\n\nflags.DEFINE_bool(\"do_predict\", False, \"Whether to run eval on the dev set.\")\n\nflags.DEFINE_integer(\"train_batch_size\", 32, \"Total batch size for training.\")\n\nflags.DEFINE_integer(\"predict_batch_size\", 8,\n \"Total batch size for predictions.\")\n\nflags.DEFINE_float(\"learning_rate\", 5e-5, \"The initial learning rate for Adam.\")\n\nflags.DEFINE_float(\"num_train_epochs\", 3.0,\n \"Total number of training epochs to perform.\")\n\nflags.DEFINE_float(\n \"warmup_proportion\", 0.1,\n \"Proportion of training to perform linear learning rate warmup for. \"\n \"E.g., 0.1 = 10% of training.\")\n\nflags.DEFINE_integer(\"save_checkpoints_steps\", 1000,\n \"How often to save the model checkpoint.\")\n\nflags.DEFINE_integer(\"iterations_per_loop\", 1000,\n \"How many steps to make in each estimator call.\")\n\nflags.DEFINE_integer(\n \"n_best_size\", 20,\n \"The total number of n-best predictions to generate in the \"\n \"nbest_predictions.json output file.\")\n\nflags.DEFINE_integer(\n \"max_answer_length\", 30,\n \"The maximum length of an answer that can be generated. This is needed \"\n \"because the start and end predictions are not conditioned on one another.\")\n\nflags.DEFINE_bool(\"use_tpu\", False, \"Whether to use TPU or GPU/CPU.\")\n\ntf.flags.DEFINE_string(\n \"tpu_name\", None,\n \"The Cloud TPU to use for training. This should be either the name \"\n \"used when creating the Cloud TPU, or a grpc://ip.address.of.tpu:8470 \"\n \"url.\")\n\ntf.flags.DEFINE_string(\n \"tpu_zone\", None,\n \"[Optional] GCE zone where the Cloud TPU is located in. If not \"\n \"specified, we will attempt to automatically detect the GCE project from \"\n \"metadata.\")\n\ntf.flags.DEFINE_string(\n \"gcp_project\", None,\n \"[Optional] Project name for the Cloud TPU-enabled project. If not \"\n \"specified, we will attempt to automatically detect the GCE project from \"\n \"metadata.\")\n\ntf.flags.DEFINE_string(\"master\", None, \"[Optional] TensorFlow master URL.\")\n\nflags.DEFINE_integer(\n \"num_tpu_cores\", 8,\n \"Only used if `use_tpu` is True. Total number of TPU cores to use.\")\n\nflags.DEFINE_bool(\n \"verbose_logging\", False,\n \"If true, all of the warnings related to data processing will be printed. \"\n \"A number of warnings are expected for a normal SQuAD evaluation.\")\n\nflags.DEFINE_bool(\n \"version_2_with_negative\", False,\n \"If true, the SQuAD examples contain some that do not have an answer.\")\n\nflags.DEFINE_float(\n \"null_score_diff_threshold\", 0.0,\n \"If null_score - best_non_null is greater than the threshold predict null.\")\n\n\nclass SquadExample(object):\n \"\"\"A single training/test example for simple sequence classification.\n\n For examples without an answer, the start and end position are -1.\n \"\"\"\n\n def __init__(self,\n qas_id,\n question_text,\n doc_tokens,\n orig_answer_text=None,\n start_position=None,\n end_position=None,\n is_impossible=False):\n self.qas_id = qas_id\n self.question_text = question_text\n self.doc_tokens = doc_tokens\n self.orig_answer_text = orig_answer_text\n self.start_position = start_position\n self.end_position = end_position\n self.is_impossible = is_impossible\n\n def __str__(self):\n return self.__repr__()\n\n def __repr__(self):\n s = \"\"\n s += \"qas_id: %s\" % (tokenization.printable_text(self.qas_id))\n s += \", question_text: %s\" % (\n tokenization.printable_text(self.question_text))\n s += \", doc_tokens: [%s]\" % (\" \".join(self.doc_tokens))\n if self.start_position:\n s += \", start_position: %d\" % (self.start_position)\n if self.start_position:\n s += \", end_position: %d\" % (self.end_position)\n if self.start_position:\n s += \", is_impossible: %r\" % (self.is_impossible)\n return s\n\n\nclass InputFeatures(object):\n \"\"\"A single set of features of data.\"\"\"\n\n def __init__(self,\n unique_id,\n example_index,\n doc_span_index,\n tokens,\n token_to_orig_map,\n token_is_max_context,\n input_ids,\n input_mask,\n segment_ids,\n start_position=None,\n end_position=None,\n is_impossible=None):\n self.unique_id = unique_id\n self.example_index = example_index\n self.doc_span_index = doc_span_index\n self.tokens = tokens\n self.token_to_orig_map = token_to_orig_map\n self.token_is_max_context = token_is_max_context\n self.input_ids = input_ids\n self.input_mask = input_mask\n self.segment_ids = segment_ids\n self.start_position = start_position\n self.end_position = end_position\n self.is_impossible = is_impossible\n\n\ndef read_squad_examples(input_file, is_training):\n \"\"\"Read a SQuAD json file into a list of SquadExample.\"\"\"\n with tf.gfile.Open(input_file, \"r\") as reader:\n input_data = json.load(reader)[\"data\"]\n\n def is_whitespace(c):\n if c == \" \" or c == \"\\t\" or c == \"\\r\" or c == \"\\n\" or ord(c) == 0x202F:\n return True\n return False\n\n examples = []\n for entry in input_data:\n for paragraph in entry[\"paragraphs\"]:\n paragraph_text = paragraph[\"context\"]\n doc_tokens = []\n char_to_word_offset = []\n prev_is_whitespace = True\n for c in paragraph_text:\n if is_whitespace(c):\n prev_is_whitespace = True\n else:\n if prev_is_whitespace:\n doc_tokens.append(c)\n else:\n doc_tokens[-1] += c\n prev_is_whitespace = False\n char_to_word_offset.append(len(doc_tokens) - 1)\n\n for qa in paragraph[\"qas\"]:\n qas_id = qa[\"id\"]\n question_text = qa[\"question\"]\n start_position = None\n end_position = None\n orig_answer_text = None\n is_impossible = False\n if is_training:\n\n if FLAGS.version_2_with_negative:\n is_impossible = qa[\"is_impossible\"]\n if (len(qa[\"answers\"]) != 1) and (not is_impossible):\n raise ValueError(\n \"For training, each question should have exactly 1 answer.\")\n if not is_impossible:\n answer = qa[\"answers\"][0]\n orig_answer_text = answer[\"text\"]\n answer_offset = answer[\"answer_start\"]\n answer_length = len(orig_answer_text)\n start_position = char_to_word_offset[answer_offset]\n end_position = char_to_word_offset[answer_offset + answer_length -\n 1]\n # Only add answers where the text can be exactly recovered from the\n # document. If this CAN'T happen it's likely due to weird Unicode\n # stuff so we will just skip the example.\n #\n # Note that this means for training mode, every example is NOT\n # guaranteed to be preserved.\n actual_text = \" \".join(doc_tokens[start_position:(end_position +\n 1)])\n cleaned_answer_text = \" \".join(\n tokenization.whitespace_tokenize(orig_answer_text))\n if actual_text.find(cleaned_answer_text) == -1:\n tf.logging.warning(\"Could not find answer: '%s' vs. '%s'\",\n actual_text, cleaned_answer_text)\n continue\n else:\n start_position = -1\n end_position = -1\n orig_answer_text = \"\"\n\n example = SquadExample(\n qas_id=qas_id,\n question_text=question_text,\n doc_tokens=doc_tokens,\n orig_answer_text=orig_answer_text,\n start_position=start_position,\n end_position=end_position,\n is_impossible=is_impossible)\n examples.append(example)\n\n return examples\n\n\ndef convert_examples_to_features(examples, tokenizer, max_seq_length,\n doc_stride, max_query_length, is_training,\n output_fn):\n \"\"\"Loads a data file into a list of `InputBatch`s.\"\"\"\n\n unique_id = 1000000000\n\n for (example_index, example) in enumerate(examples):\n query_tokens = tokenizer.tokenize(example.question_text)\n\n if len(query_tokens) > max_query_length:\n query_tokens = query_tokens[0:max_query_length]\n\n tok_to_orig_index = []\n orig_to_tok_index = []\n all_doc_tokens = []\n for (i, token) in enumerate(example.doc_tokens):\n orig_to_tok_index.append(len(all_doc_tokens))\n sub_tokens = tokenizer.tokenize(token)\n for sub_token in sub_tokens:\n tok_to_orig_index.append(i)\n all_doc_tokens.append(sub_token)\n\n tok_start_position = None\n tok_end_position = None\n if is_training and example.is_impossible:\n tok_start_position = -1\n tok_end_position = -1\n if is_training and not example.is_impossible:\n tok_start_position = orig_to_tok_index[example.start_position]\n if example.end_position < len(example.doc_tokens) - 1:\n tok_end_position = orig_to_tok_index[example.end_position + 1] - 1\n else:\n tok_end_position = len(all_doc_tokens) - 1\n (tok_start_position, tok_end_position) = _improve_answer_span(\n all_doc_tokens, tok_start_position, tok_end_position, tokenizer,\n example.orig_answer_text)\n\n # The -3 accounts for [CLS], [SEP] and [SEP]\n max_tokens_for_doc = max_seq_length - len(query_tokens) - 3\n\n # We can have documents that are longer than the maximum sequence length.\n # To deal with this we do a sliding window approach, where we take chunks\n # of the up to our max length with a stride of `doc_stride`.\n _DocSpan = collections.namedtuple( # pylint: disable=invalid-name\n \"DocSpan\", [\"start\", \"length\"])\n doc_spans = []\n start_offset = 0\n while start_offset < len(all_doc_tokens):\n length = len(all_doc_tokens) - start_offset\n if length > max_tokens_for_doc:\n length = max_tokens_for_doc\n doc_spans.append(_DocSpan(start=start_offset, length=length))\n if start_offset + length == len(all_doc_tokens):\n break\n start_offset += min(length, doc_stride)\n\n for (doc_span_index, doc_span) in enumerate(doc_spans):\n tokens = []\n token_to_orig_map = {}\n token_is_max_context = {}\n segment_ids = []\n tokens.append(\"[CLS]\")\n segment_ids.append(0)\n for token in query_tokens:\n tokens.append(token)\n segment_ids.append(0)\n tokens.append(\"[SEP]\")\n segment_ids.append(0)\n\n for i in range(doc_span.length):\n split_token_index = doc_span.start + i\n token_to_orig_map[len(tokens)] = tok_to_orig_index[split_token_index]\n\n is_max_context = _check_is_max_context(doc_spans, doc_span_index,\n split_token_index)\n token_is_max_context[len(tokens)] = is_max_context\n tokens.append(all_doc_tokens[split_token_index])\n segment_ids.append(1)\n tokens.append(\"[SEP]\")\n segment_ids.append(1)\n\n input_ids = tokenizer.convert_tokens_to_ids(tokens)\n\n # The mask has 1 for real tokens and 0 for padding tokens. Only real\n # tokens are attended to.\n input_mask = [1] * len(input_ids)\n\n # Zero-pad up to the sequence length.\n while len(input_ids) < max_seq_length:\n input_ids.append(0)\n input_mask.append(0)\n segment_ids.append(0)\n\n assert len(input_ids) == max_seq_length\n assert len(input_mask) == max_seq_length\n assert len(segment_ids) == max_seq_length\n\n start_position = None\n end_position = None\n if is_training and not example.is_impossible:\n # For training, if our document chunk does not contain an annotation\n # we throw it out, since there is nothing to predict.\n doc_start = doc_span.start\n doc_end = doc_span.start + doc_span.length - 1\n out_of_span = False\n if not (tok_start_position >= doc_start and\n tok_end_position <= doc_end):\n out_of_span = True\n if out_of_span:\n start_position = 0\n end_position = 0\n else:\n doc_offset = len(query_tokens) + 2\n start_position = tok_start_position - doc_start + doc_offset\n end_position = tok_end_position - doc_start + doc_offset\n\n if is_training and example.is_impossible:\n start_position = 0\n end_position = 0\n\n if example_index < 20:\n tf.logging.info(\"*** Example ***\")\n tf.logging.info(\"unique_id: %s\" % (unique_id))\n tf.logging.info(\"example_index: %s\" % (example_index))\n tf.logging.info(\"doc_span_index: %s\" % (doc_span_index))\n tf.logging.info(\n \"tokens: %s\" %\n \" \".join([tokenization.printable_text(x) for x in tokens]))\n tf.logging.info(\"token_to_orig_map: %s\" % \" \".join(\n [\"%d:%d\" % (x, y) for (x, y) in six.iteritems(token_to_orig_map)]))\n tf.logging.info(\"token_is_max_context: %s\" % \" \".join([\n \"%d:%s\" % (x, y) for (x, y) in six.iteritems(token_is_max_context)\n ]))\n tf.logging.info(\"input_ids: %s\" % \" \".join([str(x) for x in input_ids]))\n tf.logging.info(\"input_mask: %s\" %\n \" \".join([str(x) for x in input_mask]))\n tf.logging.info(\"segment_ids: %s\" %\n \" \".join([str(x) for x in segment_ids]))\n if is_training and example.is_impossible:\n tf.logging.info(\"impossible example\")\n if is_training and not example.is_impossible:\n answer_text = \" \".join(tokens[start_position:(end_position + 1)])\n tf.logging.info(\"start_position: %d\" % (start_position))\n tf.logging.info(\"end_position: %d\" % (end_position))\n tf.logging.info(\"answer: %s\" %\n (tokenization.printable_text(answer_text)))\n\n feature = InputFeatures(\n unique_id=unique_id,\n example_index=example_index,\n doc_span_index=doc_span_index,\n tokens=tokens,\n token_to_orig_map=token_to_orig_map,\n token_is_max_context=token_is_max_context,\n input_ids=input_ids,\n input_mask=input_mask,\n segment_ids=segment_ids,\n start_position=start_position,\n end_position=end_position,\n is_impossible=example.is_impossible)\n\n # Run callback\n output_fn(feature)\n\n unique_id += 1\n\n\ndef _improve_answer_span(doc_tokens, input_start, input_end, tokenizer,\n orig_answer_text):\n \"\"\"Returns tokenized answer spans that better match the annotated answer.\"\"\"\n\n # The SQuAD annotations are character based. We first project them to\n # whitespace-tokenized words. But then after WordPiece tokenization, we can\n # often find a \"better match\". For example:\n #\n # Question: What year was John Smith born?\n # Context: The leader was John Smith (1895-1943).\n # Answer: 1895\n #\n # The original whitespace-tokenized answer will be \"(1895-1943).\". However\n # after tokenization, our tokens will be \"( 1895 - 1943 ) .\". So we can match\n # the exact answer, 1895.\n #\n # However, this is not always possible. Consider the following:\n #\n # Question: What country is the top exporter of electornics?\n # Context: The Japanese electronics industry is the lagest in the world.\n # Answer: Japan\n #\n # In this case, the annotator chose \"Japan\" as a character sub-span of\n # the word \"Japanese\". Since our WordPiece tokenizer does not split\n # \"Japanese\", we just use \"Japanese\" as the annotation. This is fairly rare\n # in SQuAD, but does happen.\n tok_answer_text = \" \".join(tokenizer.tokenize(orig_answer_text))\n\n for new_start in range(input_start, input_end + 1):\n for new_end in range(input_end, new_start - 1, -1):\n text_span = \" \".join(doc_tokens[new_start:(new_end + 1)])\n if text_span == tok_answer_text:\n return (new_start, new_end)\n\n return (input_start, input_end)\n\n\ndef _check_is_max_context(doc_spans, cur_span_index, position):\n \"\"\"Check if this is the 'max context' doc span for the token.\"\"\"\n\n # Because of the sliding window approach taken to scoring documents, a single\n # token can appear in multiple documents. E.g.\n # Doc: the man went to the store and bought a gallon of milk\n # Span A: the man went to the\n # Span B: to the store and bought\n # Span C: and bought a gallon of\n # ...\n #\n # Now the word 'bought' will have two scores from spans B and C. We only\n # want to consider the score with \"maximum context\", which we define as\n # the *minimum* of its left and right context (the *sum* of left and\n # right context will always be the same, of course).\n #\n # In the example the maximum context for 'bought' would be span C since\n # it has 1 left context and 3 right context, while span B has 4 left context\n # and 0 right context.\n best_score = None\n best_span_index = None\n for (span_index, doc_span) in enumerate(doc_spans):\n end = doc_span.start + doc_span.length - 1\n if position < doc_span.start:\n continue\n if position > end:\n continue\n num_left_context = position - doc_span.start\n num_right_context = end - position\n score = min(num_left_context, num_right_context) + 0.01 * doc_span.length\n if best_score is None or score > best_score:\n best_score = score\n best_span_index = span_index\n\n return cur_span_index == best_span_index\n\n\ndef create_model(bert_config, is_training, input_ids, input_mask, segment_ids,\n use_one_hot_embeddings):\n \"\"\"Creates a classification model.\"\"\"\n model = modeling.BertModel(\n config=bert_config,\n is_training=is_training,\n input_ids=input_ids,\n input_mask=input_mask,\n token_type_ids=segment_ids,\n use_one_hot_embeddings=use_one_hot_embeddings)\n\n final_hidden = model.get_sequence_output()\n\n final_hidden_shape = modeling.get_shape_list(final_hidden, expected_rank=3)\n batch_size = final_hidden_shape[0]\n seq_length = final_hidden_shape[1]\n hidden_size = final_hidden_shape[2]\n\n output_weights = tf.get_variable(\n \"cls/squad/output_weights\", [2, hidden_size],\n initializer=tf.truncated_normal_initializer(stddev=0.02))\n\n output_bias = tf.get_variable(\n \"cls/squad/output_bias\", [2], initializer=tf.zeros_initializer())\n\n final_hidden_matrix = tf.reshape(final_hidden,\n [batch_size * seq_length, hidden_size])\n logits = tf.matmul(final_hidden_matrix, output_weights, transpose_b=True)\n logits = tf.nn.bias_add(logits, output_bias)\n\n logits = tf.reshape(logits, [batch_size, seq_length, 2])\n logits = tf.transpose(logits, [2, 0, 1])\n\n unstacked_logits = tf.unstack(logits, axis=0)\n\n (start_logits, end_logits) = (unstacked_logits[0], unstacked_logits[1])\n\n return (start_logits, end_logits)\n\n\ndef model_fn_builder(bert_config, init_checkpoint, learning_rate,\n num_train_steps, num_warmup_steps, use_tpu,\n use_one_hot_embeddings):\n \"\"\"Returns `model_fn` closure for TPUEstimator.\"\"\"\n\n def model_fn(features, labels, mode, params): # pylint: disable=unused-argument\n \"\"\"The `model_fn` for TPUEstimator.\"\"\"\n\n tf.logging.info(\"*** Features ***\")\n for name in sorted(features.keys()):\n tf.logging.info(\" name = %s, shape = %s\" % (name, features[name].shape))\n\n unique_ids = features[\"unique_ids\"]\n input_ids = features[\"input_ids\"]\n input_mask = features[\"input_mask\"]\n segment_ids = features[\"segment_ids\"]\n\n is_training = (mode == tf.estimator.ModeKeys.TRAIN)\n\n (start_logits, end_logits) = create_model(\n bert_config=bert_config,\n is_training=is_training,\n input_ids=input_ids,\n input_mask=input_mask,\n segment_ids=segment_ids,\n use_one_hot_embeddings=use_one_hot_embeddings)\n\n tvars = tf.trainable_variables()\n\n initialized_variable_names = {}\n scaffold_fn = None\n if init_checkpoint:\n (assignment_map, initialized_variable_names\n ) = modeling.get_assignment_map_from_checkpoint(tvars, init_checkpoint)\n if use_tpu:\n\n def tpu_scaffold():\n tf.train.init_from_checkpoint(init_checkpoint, assignment_map)\n return tf.train.Scaffold()\n\n scaffold_fn = tpu_scaffold\n else:\n tf.train.init_from_checkpoint(init_checkpoint, assignment_map)\n\n tf.logging.info(\"**** Trainable Variables ****\")\n for var in tvars:\n init_string = \"\"\n if var.name in initialized_variable_names:\n init_string = \", *INIT_FROM_CKPT*\"\n tf.logging.info(\" name = %s, shape = %s%s\", var.name, var.shape,\n init_string)\n\n output_spec = None\n if mode == tf.estimator.ModeKeys.TRAIN:\n seq_length = modeling.get_shape_list(input_ids)[1]\n\n def compute_loss(logits, positions):\n one_hot_positions = tf.one_hot(\n positions, depth=seq_length, dtype=tf.float32)\n log_probs = tf.nn.log_softmax(logits, axis=-1)\n loss = -tf.reduce_mean(\n tf.reduce_sum(one_hot_positions * log_probs, axis=-1))\n return loss\n\n start_positions = features[\"start_positions\"]\n end_positions = features[\"end_positions\"]\n\n start_loss = compute_loss(start_logits, start_positions)\n end_loss = compute_loss(end_logits, end_positions)\n\n total_loss = (start_loss + end_loss) / 2.0\n\n train_op = optimization.create_optimizer(total_loss, learning_rate,\n num_train_steps,\n num_warmup_steps, use_tpu)\n\n output_spec = contrib_tpu.TPUEstimatorSpec(\n mode=mode,\n loss=total_loss,\n train_op=train_op,\n scaffold_fn=scaffold_fn)\n elif mode == tf.estimator.ModeKeys.PREDICT:\n predictions = {\n \"unique_ids\": unique_ids,\n \"start_logits\": start_logits,\n \"end_logits\": end_logits,\n }\n output_spec = contrib_tpu.TPUEstimatorSpec(\n mode=mode, predictions=predictions, scaffold_fn=scaffold_fn)\n else:\n raise ValueError(\"Only TRAIN and PREDICT modes are supported: %s\" %\n (mode))\n\n return output_spec\n\n return model_fn\n\n\ndef input_fn_builder(input_file, seq_length, is_training, drop_remainder):\n \"\"\"Creates an `input_fn` closure to be passed to TPUEstimator.\"\"\"\n\n name_to_features = {\n \"unique_ids\": tf.FixedLenFeature([], tf.int64),\n \"input_ids\": tf.FixedLenFeature([seq_length], tf.int64),\n \"input_mask\": tf.FixedLenFeature([seq_length], tf.int64),\n \"segment_ids\": tf.FixedLenFeature([seq_length], tf.int64),\n }\n\n if is_training:\n name_to_features[\"start_positions\"] = tf.FixedLenFeature([], tf.int64)\n name_to_features[\"end_positions\"] = tf.FixedLenFeature([], tf.int64)\n\n def _decode_record(record, name_to_features):\n \"\"\"Decodes a record to a TensorFlow example.\"\"\"\n example = tf.parse_single_example(record, name_to_features)\n\n # tf.Example only supports tf.int64, but the TPU only supports tf.int32.\n # So cast all int64 to int32.\n for name in list(example.keys()):\n t = example[name]\n if t.dtype == tf.int64:\n t = tf.to_int32(t)\n example[name] = t\n\n return example\n\n def input_fn(params):\n \"\"\"The actual input function.\"\"\"\n batch_size = params[\"batch_size\"]\n\n # For training, we want a lot of parallel reading and shuffling.\n # For eval, we want no shuffling and parallel reading doesn't matter.\n d = tf.data.TFRecordDataset(input_file)\n if is_training:\n d = d.repeat()\n d = d.shuffle(buffer_size=100)\n\n d = d.apply(\n tf.data.experimental.map_and_batch(\n lambda record: _decode_record(record, name_to_features),\n batch_size=batch_size,\n drop_remainder=drop_remainder))\n\n return d\n\n return input_fn\n\n\nRawResult = collections.namedtuple(\"RawResult\",\n [\"unique_id\", \"start_logits\", \"end_logits\"])\n\n\ndef write_predictions(all_examples, all_features, all_results, n_best_size,\n max_answer_length, do_lower_case, output_prediction_file,\n output_nbest_file, output_null_log_odds_file):\n \"\"\"Write final predictions to the json file and log-odds of null if needed.\"\"\"\n tf.logging.info(\"Writing predictions to: %s\" % (output_prediction_file))\n tf.logging.info(\"Writing nbest to: %s\" % (output_nbest_file))\n\n example_index_to_features = collections.defaultdict(list)\n for feature in all_features:\n example_index_to_features[feature.example_index].append(feature)\n\n unique_id_to_result = {}\n for result in all_results:\n unique_id_to_result[result.unique_id] = result\n\n _PrelimPrediction = collections.namedtuple( # pylint: disable=invalid-name\n \"PrelimPrediction\",\n [\"feature_index\", \"start_index\", \"end_index\", \"start_logit\", \"end_logit\"])\n\n all_predictions = collections.OrderedDict()\n all_nbest_json = collections.OrderedDict()\n scores_diff_json = collections.OrderedDict()\n\n for (example_index, example) in enumerate(all_examples):\n features = example_index_to_features[example_index]\n\n prelim_predictions = []\n # keep track of the minimum score of null start+end of position 0\n score_null = 1000000 # large and positive\n min_null_feature_index = 0 # the paragraph slice with min mull score\n null_start_logit = 0 # the start logit at the slice with min null score\n null_end_logit = 0 # the end logit at the slice with min null score\n for (feature_index, feature) in enumerate(features):\n result = unique_id_to_result[feature.unique_id]\n start_indexes = _get_best_indexes(result.start_logits, n_best_size)\n end_indexes = _get_best_indexes(result.end_logits, n_best_size)\n # if we could have irrelevant answers, get the min score of irrelevant\n if FLAGS.version_2_with_negative:\n feature_null_score = result.start_logits[0] + result.end_logits[0]\n if feature_null_score < score_null:\n score_null = feature_null_score\n min_null_feature_index = feature_index\n null_start_logit = result.start_logits[0]\n null_end_logit = result.end_logits[0]\n for start_index in start_indexes:\n for end_index in end_indexes:\n # We could hypothetically create invalid predictions, e.g., predict\n # that the start of the span is in the question. We throw out all\n # invalid predictions.\n if start_index >= len(feature.tokens):\n continue\n if end_index >= len(feature.tokens):\n continue\n if start_index not in feature.token_to_orig_map:\n continue\n if end_index not in feature.token_to_orig_map:\n continue\n if not feature.token_is_max_context.get(start_index, False):\n continue\n if end_index < start_index:\n continue\n length = end_index - start_index + 1\n if length > max_answer_length:\n continue\n prelim_predictions.append(\n _PrelimPrediction(\n feature_index=feature_index,\n start_index=start_index,\n end_index=end_index,\n start_logit=result.start_logits[start_index],\n end_logit=result.end_logits[end_index]))\n\n if FLAGS.version_2_with_negative:\n prelim_predictions.append(\n _PrelimPrediction(\n feature_index=min_null_feature_index,\n start_index=0,\n end_index=0,\n start_logit=null_start_logit,\n end_logit=null_end_logit))\n prelim_predictions = sorted(\n prelim_predictions,\n key=lambda x: (x.start_logit + x.end_logit),\n reverse=True)\n\n _NbestPrediction = collections.namedtuple( # pylint: disable=invalid-name\n \"NbestPrediction\", [\"text\", \"start_logit\", \"end_logit\"])\n\n seen_predictions = {}\n nbest = []\n for pred in prelim_predictions:\n if len(nbest) >= n_best_size:\n break\n feature = features[pred.feature_index]\n if pred.start_index > 0: # this is a non-null prediction\n tok_tokens = feature.tokens[pred.start_index:(pred.end_index + 1)]\n orig_doc_start = feature.token_to_orig_map[pred.start_index]\n orig_doc_end = feature.token_to_orig_map[pred.end_index]\n orig_tokens = example.doc_tokens[orig_doc_start:(orig_doc_end + 1)]\n tok_text = \" \".join(tok_tokens)\n\n # De-tokenize WordPieces that have been split off.\n tok_text = tok_text.replace(\" ##\", \"\")\n tok_text = tok_text.replace(\"##\", \"\")\n\n # Clean whitespace\n tok_text = tok_text.strip()\n tok_text = \" \".join(tok_text.split())\n orig_text = \" \".join(orig_tokens)\n\n final_text = get_final_text(tok_text, orig_text, do_lower_case)\n if final_text in seen_predictions:\n continue\n\n seen_predictions[final_text] = True\n else:\n final_text = \"\"\n seen_predictions[final_text] = True\n\n nbest.append(\n _NbestPrediction(\n text=final_text,\n start_logit=pred.start_logit,\n end_logit=pred.end_logit))\n\n # if we didn't inlude the empty option in the n-best, inlcude it\n if FLAGS.version_2_with_negative:\n if \"\" not in seen_predictions:\n nbest.append(\n _NbestPrediction(\n text=\"\", start_logit=null_start_logit,\n end_logit=null_end_logit))\n # In very rare edge cases we could have no valid predictions. So we\n # just create a nonce prediction in this case to avoid failure.\n if not nbest:\n nbest.append(\n _NbestPrediction(text=\"empty\", start_logit=0.0, end_logit=0.0))\n\n assert len(nbest) >= 1\n\n total_scores = []\n best_non_null_entry = None\n for entry in nbest:\n total_scores.append(entry.start_logit + entry.end_logit)\n if not best_non_null_entry:\n if entry.text:\n best_non_null_entry = entry\n\n probs = _compute_softmax(total_scores)\n\n nbest_json = []\n for (i, entry) in enumerate(nbest):\n output = collections.OrderedDict()\n output[\"text\"] = entry.text\n output[\"probability\"] = probs[i]\n output[\"start_logit\"] = entry.start_logit\n output[\"end_logit\"] = entry.end_logit\n nbest_json.append(output)\n\n assert len(nbest_json) >= 1\n\n if not FLAGS.version_2_with_negative:\n all_predictions[example.qas_id] = nbest_json[0][\"text\"]\n else:\n # predict \"\" iff the null score - the score of best non-null > threshold\n score_diff = score_null - best_non_null_entry.start_logit - (\n best_non_null_entry.end_logit)\n scores_diff_json[example.qas_id] = score_diff\n if score_diff > FLAGS.null_score_diff_threshold:\n all_predictions[example.qas_id] = \"\"\n else:\n all_predictions[example.qas_id] = best_non_null_entry.text\n\n all_nbest_json[example.qas_id] = nbest_json\n\n with tf.gfile.GFile(output_prediction_file, \"w\") as writer:\n writer.write(json.dumps(all_predictions, indent=4) + \"\\n\")\n\n with tf.gfile.GFile(output_nbest_file, \"w\") as writer:\n writer.write(json.dumps(all_nbest_json, indent=4) + \"\\n\")\n\n if FLAGS.version_2_with_negative:\n with tf.gfile.GFile(output_null_log_odds_file, \"w\") as writer:\n writer.write(json.dumps(scores_diff_json, indent=4) + \"\\n\")\n\n\ndef get_final_text(pred_text, orig_text, do_lower_case):\n \"\"\"Project the tokenized prediction back to the original text.\"\"\"\n\n # When we created the data, we kept track of the alignment between original\n # (whitespace tokenized) tokens and our WordPiece tokenized tokens. So\n # now `orig_text` contains the span of our original text corresponding to the\n # span that we predicted.\n #\n # However, `orig_text` may contain extra characters that we don't want in\n # our prediction.\n #\n # For example, let's say:\n # pred_text = steve smith\n # orig_text = Steve Smith's\n #\n # We don't want to return `orig_text` because it contains the extra \"'s\".\n #\n # We don't want to return `pred_text` because it's already been normalized\n # (the SQuAD eval script also does punctuation stripping/lower casing but\n # our tokenizer does additional normalization like stripping accent\n # characters).\n #\n # What we really want to return is \"Steve Smith\".\n #\n # Therefore, we have to apply a semi-complicated alignment heruistic between\n # `pred_text` and `orig_text` to get a character-to-charcter alignment. This\n # can fail in certain cases in which case we just return `orig_text`.\n\n def _strip_spaces(text):\n ns_chars = []\n ns_to_s_map = collections.OrderedDict()\n for (i, c) in enumerate(text):\n if c == \" \":\n continue\n ns_to_s_map[len(ns_chars)] = i\n ns_chars.append(c)\n ns_text = \"\".join(ns_chars)\n return (ns_text, ns_to_s_map)\n\n # We first tokenize `orig_text`, strip whitespace from the result\n # and `pred_text`, and check if they are the same length. If they are\n # NOT the same length, the heuristic has failed. If they are the same\n # length, we assume the characters are one-to-one aligned.\n tokenizer = tokenization.BasicTokenizer(do_lower_case=do_lower_case)\n\n tok_text = \" \".join(tokenizer.tokenize(orig_text))\n\n start_position = tok_text.find(pred_text)\n if start_position == -1:\n if FLAGS.verbose_logging:\n tf.logging.info(\"Unable to find text: '%s' in '%s'\" %\n (pred_text, orig_text))\n return orig_text\n end_position = start_position + len(pred_text) - 1\n\n (orig_ns_text, orig_ns_to_s_map) = _strip_spaces(orig_text)\n (tok_ns_text, tok_ns_to_s_map) = _strip_spaces(tok_text)\n\n if len(orig_ns_text) != len(tok_ns_text):\n if FLAGS.verbose_logging:\n tf.logging.info(\"Length not equal after stripping spaces: '%s' vs '%s'\",\n orig_ns_text, tok_ns_text)\n return orig_text\n\n # We then project the characters in `pred_text` back to `orig_text` using\n # the character-to-character alignment.\n tok_s_to_ns_map = {}\n for (i, tok_index) in six.iteritems(tok_ns_to_s_map):\n tok_s_to_ns_map[tok_index] = i\n\n orig_start_position = None\n if start_position in tok_s_to_ns_map:\n ns_start_position = tok_s_to_ns_map[start_position]\n if ns_start_position in orig_ns_to_s_map:\n orig_start_position = orig_ns_to_s_map[ns_start_position]\n\n if orig_start_position is None:\n if FLAGS.verbose_logging:\n tf.logging.info(\"Couldn't map start position\")\n return orig_text\n\n orig_end_position = None\n if end_position in tok_s_to_ns_map:\n ns_end_position = tok_s_to_ns_map[end_position]\n if ns_end_position in orig_ns_to_s_map:\n orig_end_position = orig_ns_to_s_map[ns_end_position]\n\n if orig_end_position is None:\n if FLAGS.verbose_logging:\n tf.logging.info(\"Couldn't map end position\")\n return orig_text\n\n output_text = orig_text[orig_start_position:(orig_end_position + 1)]\n return output_text\n\n\ndef _get_best_indexes(logits, n_best_size):\n \"\"\"Get the n-best logits from a list.\"\"\"\n index_and_score = sorted(enumerate(logits), key=lambda x: x[1], reverse=True)\n\n best_indexes = []\n for i in range(len(index_and_score)):\n if i >= n_best_size:\n break\n best_indexes.append(index_and_score[i][0])\n return best_indexes\n\n\ndef _compute_softmax(scores):\n \"\"\"Compute softmax probability over raw logits.\"\"\"\n if not scores:\n return []\n\n max_score = None\n for score in scores:\n if max_score is None or score > max_score:\n max_score = score\n\n exp_scores = []\n total_sum = 0.0\n for score in scores:\n x = math.exp(score - max_score)\n exp_scores.append(x)\n total_sum += x\n\n probs = []\n for score in exp_scores:\n probs.append(score / total_sum)\n return probs\n\n\nclass FeatureWriter(object):\n \"\"\"Writes InputFeature to TF example file.\"\"\"\n\n def __init__(self, filename, is_training):\n self.filename = filename\n self.is_training = is_training\n self.num_features = 0\n self._writer = tf.python_io.TFRecordWriter(filename)\n\n def process_feature(self, feature):\n \"\"\"Write a InputFeature to the TFRecordWriter as a tf.train.Example.\"\"\"\n self.num_features += 1\n\n def create_int_feature(values):\n feature = tf.train.Feature(\n int64_list=tf.train.Int64List(value=list(values)))\n return feature\n\n features = collections.OrderedDict()\n features[\"unique_ids\"] = create_int_feature([feature.unique_id])\n features[\"input_ids\"] = create_int_feature(feature.input_ids)\n features[\"input_mask\"] = create_int_feature(feature.input_mask)\n features[\"segment_ids\"] = create_int_feature(feature.segment_ids)\n\n if self.is_training:\n features[\"start_positions\"] = create_int_feature([feature.start_position])\n features[\"end_positions\"] = create_int_feature([feature.end_position])\n impossible = 0\n if feature.is_impossible:\n impossible = 1\n features[\"is_impossible\"] = create_int_feature([impossible])\n\n tf_example = tf.train.Example(features=tf.train.Features(feature=features))\n self._writer.write(tf_example.SerializeToString())\n\n def close(self):\n self._writer.close()\n\n\ndef validate_flags_or_throw(bert_config):\n \"\"\"Validate the input FLAGS or throw an exception.\"\"\"\n tokenization.validate_case_matches_checkpoint(FLAGS.do_lower_case,\n FLAGS.init_checkpoint)\n\n if not FLAGS.do_train and not FLAGS.do_predict:\n raise ValueError(\"At least one of `do_train` or `do_predict` must be True.\")\n\n if FLAGS.do_train:\n if not FLAGS.train_file:\n raise ValueError(\n \"If `do_train` is True, then `train_file` must be specified.\")\n if FLAGS.do_predict:\n if not FLAGS.predict_file:\n raise ValueError(\n \"If `do_predict` is True, then `predict_file` must be specified.\")\n\n if FLAGS.max_seq_length > bert_config.max_position_embeddings:\n raise ValueError(\n \"Cannot use sequence length %d because the BERT model \"\n \"was only trained up to sequence length %d\" %\n (FLAGS.max_seq_length, bert_config.max_position_embeddings))\n\n if FLAGS.max_seq_length <= FLAGS.max_query_length + 3:\n raise ValueError(\n \"The max_seq_length (%d) must be greater than max_query_length \"\n \"(%d) + 3\" % (FLAGS.max_seq_length, FLAGS.max_query_length))\n\n\ndef main(_):\n tf.logging.set_verbosity(tf.logging.INFO)\n\n bert_config = modeling.BertConfig.from_json_file(FLAGS.bert_config_file)\n\n validate_flags_or_throw(bert_config)\n\n tf.gfile.MakeDirs(FLAGS.output_dir)\n\n tokenizer = tokenization.FullTokenizer(\n vocab_file=FLAGS.vocab_file, do_lower_case=FLAGS.do_lower_case)\n\n tpu_cluster_resolver = None\n if FLAGS.use_tpu and FLAGS.tpu_name:\n tpu_cluster_resolver = contrib_cluster_resolver.TPUClusterResolver(\n FLAGS.tpu_name, zone=FLAGS.tpu_zone, project=FLAGS.gcp_project)\n\n is_per_host = contrib_tpu.InputPipelineConfig.PER_HOST_V2\n run_config = contrib_tpu.RunConfig(\n cluster=tpu_cluster_resolver,\n master=FLAGS.master,\n model_dir=FLAGS.output_dir,\n save_checkpoints_steps=FLAGS.save_checkpoints_steps,\n tpu_config=contrib_tpu.TPUConfig(\n iterations_per_loop=FLAGS.iterations_per_loop,\n num_shards=FLAGS.num_tpu_cores,\n per_host_input_for_training=is_per_host))\n\n train_examples = None\n num_train_steps = None\n num_warmup_steps = None\n if FLAGS.do_train:\n train_examples = read_squad_examples(\n input_file=FLAGS.train_file, is_training=True)\n num_train_steps = int(\n len(train_examples) / FLAGS.train_batch_size * FLAGS.num_train_epochs)\n num_warmup_steps = int(num_train_steps * FLAGS.warmup_proportion)\n\n # Pre-shuffle the input to avoid having to make a very large shuffle\n # buffer in in the `input_fn`.\n rng = random.Random(12345)\n rng.shuffle(train_examples)\n\n model_fn = model_fn_builder(\n bert_config=bert_config,\n init_checkpoint=FLAGS.init_checkpoint,\n learning_rate=FLAGS.learning_rate,\n num_train_steps=num_train_steps,\n num_warmup_steps=num_warmup_steps,\n use_tpu=FLAGS.use_tpu,\n use_one_hot_embeddings=FLAGS.use_tpu)\n\n # If TPU is not available, this will fall back to normal Estimator on CPU\n # or GPU.\n estimator = contrib_tpu.TPUEstimator(\n use_tpu=FLAGS.use_tpu,\n model_fn=model_fn,\n config=run_config,\n train_batch_size=FLAGS.train_batch_size,\n predict_batch_size=FLAGS.predict_batch_size)\n\n if FLAGS.do_train:\n # We write to a temporary file to avoid storing very large constant tensors\n # in memory.\n train_writer = FeatureWriter(\n filename=os.path.join(FLAGS.output_dir, \"train.tf_record\"),\n is_training=True)\n convert_examples_to_features(\n examples=train_examples,\n tokenizer=tokenizer,\n max_seq_length=FLAGS.max_seq_length,\n doc_stride=FLAGS.doc_stride,\n max_query_length=FLAGS.max_query_length,\n is_training=True,\n output_fn=train_writer.process_feature)\n train_writer.close()\n\n tf.logging.info(\"***** Running training *****\")\n tf.logging.info(\" Num orig examples = %d\", len(train_examples))\n tf.logging.info(\" Num split examples = %d\", train_writer.num_features)\n tf.logging.info(\" Batch size = %d\", FLAGS.train_batch_size)\n tf.logging.info(\" Num steps = %d\", num_train_steps)\n del train_examples\n\n train_input_fn = input_fn_builder(\n input_file=train_writer.filename,\n seq_length=FLAGS.max_seq_length,\n is_training=True,\n drop_remainder=True)\n estimator.train(input_fn=train_input_fn, max_steps=num_train_steps)\n\n if FLAGS.do_predict:\n eval_examples = read_squad_examples(\n input_file=FLAGS.predict_file, is_training=False)\n\n eval_writer = FeatureWriter(\n filename=os.path.join(FLAGS.output_dir, \"eval.tf_record\"),\n is_training=False)\n eval_features = []\n\n def append_feature(feature):\n eval_features.append(feature)\n eval_writer.process_feature(feature)\n\n convert_examples_to_features(\n examples=eval_examples,\n tokenizer=tokenizer,\n max_seq_length=FLAGS.max_seq_length,\n doc_stride=FLAGS.doc_stride,\n max_query_length=FLAGS.max_query_length,\n is_training=False,\n output_fn=append_feature)\n eval_writer.close()\n\n tf.logging.info(\"***** Running predictions *****\")\n tf.logging.info(\" Num orig examples = %d\", len(eval_examples))\n tf.logging.info(\" Num split examples = %d\", len(eval_features))\n tf.logging.info(\" Batch size = %d\", FLAGS.predict_batch_size)\n\n all_results = []\n\n predict_input_fn = input_fn_builder(\n input_file=eval_writer.filename,\n seq_length=FLAGS.max_seq_length,\n is_training=False,\n drop_remainder=False)\n\n # If running eval on the TPU, you will need to specify the number of\n # steps.\n all_results = []\n for result in estimator.predict(\n predict_input_fn, yield_single_examples=True):\n if len(all_results) % 1000 == 0:\n tf.logging.info(\"Processing example: %d\" % (len(all_results)))\n unique_id = int(result[\"unique_ids\"])\n start_logits = [float(x) for x in result[\"start_logits\"].flat]\n end_logits = [float(x) for x in result[\"end_logits\"].flat]\n all_results.append(\n RawResult(\n unique_id=unique_id,\n start_logits=start_logits,\n end_logits=end_logits))\n\n output_prediction_file = os.path.join(FLAGS.output_dir, \"predictions.json\")\n output_nbest_file = os.path.join(FLAGS.output_dir, \"nbest_predictions.json\")\n output_null_log_odds_file = os.path.join(FLAGS.output_dir, \"null_odds.json\")\n\n write_predictions(eval_examples, eval_features, all_results,\n FLAGS.n_best_size, FLAGS.max_answer_length,\n FLAGS.do_lower_case, output_prediction_file,\n output_nbest_file, output_null_log_odds_file)\n\n\nif __name__ == \"__main__\":\n flags.mark_flag_as_required(\"vocab_file\")\n flags.mark_flag_as_required(\"bert_config_file\")\n flags.mark_flag_as_required(\"output_dir\")\n tf.app.run()\n", "# coding=utf-8\n# Copyright 2018 The Google AI Language Team Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Define the paragraph reconstruction model.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nfrom bert import modeling\n\nimport tensorflow as tf\n\n\ndef create_model(\n model,\n labels,\n label_types,\n batch_size,\n num_choices,\n use_tpu,\n lv2loss=False,\n use_margin_loss=True,\n margin=1.):\n \"\"\"Creates a classification model.\n\n Args:\n model: the BERT model from modeling.py\n labels: ground truth paragraph order\n label_types: which k distances are being predicted\n batch_size: the batch size\n num_choices: number of negatives samples + 1\n use_tpu: if use tpu\n lv2loss: (bool) add a second level loss\n use_margin_loss: (bool) use margin loss instead of CE\n margin: (float) eta used in max margin loss\n\n Returns:\n tuple of (loss, per_example_loss, logits, probabilities) for model\n \"\"\"\n\n output_layer = model.get_pooled_output()\n\n hidden_size = output_layer.shape[-1].value\n\n with tf.variable_scope(\"cpc_loss\"):\n\n softmax_weights = tf.get_variable(\n \"softmax_weights\", [hidden_size, 8],\n initializer=tf.truncated_normal_initializer(stddev=0.02))\n\n with tf.variable_scope(\"loss\"):\n # if is_training:\n # I.e., 0.1 dropout\n # output_layer = tf.nn.dropout(output_layer, keep_prob=0.9)\n\n matmul_out = tf.matmul(output_layer, softmax_weights)\n\n logits = tf.reshape(matmul_out, (-1, num_choices, 8))\n logits = tf.transpose(logits, perm=[0, 2, 1])\n\n example_weights = tf.reduce_sum(tf.one_hot(label_types, 8), axis=1)\n\n if use_margin_loss:\n one_hot_labels = tf.one_hot(labels, num_choices)\n pos_logits = tf.reduce_sum(one_hot_labels * logits, axis=2)\n one_cold_labels = tf.ones_like(logits) - one_hot_labels\n downweighting = one_hot_labels * -9999\n neg_logits = tf.reduce_max(\n (one_cold_labels * logits) + downweighting, axis=2)\n per_example_loss = tf.maximum(0.,\n float(margin) - pos_logits + neg_logits)\n else:\n per_example_loss = tf.nn.sparse_softmax_cross_entropy_with_logits(\n logits=logits, labels=labels)\n probabilities = tf.nn.softmax(logits, axis=-1)\n loss_weights = tf.constant([0.1, 0.25, 0.5, 1, 1, 0.5, 0.25, 0.1])\n # loss_weights = tf.constant([0, 0, 0, 1., 1., 0, 0, 0])\n if use_tpu:\n loss_weights = tf.broadcast_to(loss_weights,\n [example_weights.shape[0], 8])\n else:\n loss_weights = tf.broadcast_to(loss_weights, [batch_size, 8])\n loss = tf.reduce_mean(\n tf.reduce_sum(\n loss_weights * example_weights * per_example_loss, axis=-1))\n\n if lv2loss:\n seq_output = tf.reshape(output_layer, [-1, num_choices, hidden_size])\n attn = modeling.attention_layer(\n seq_output, seq_output, size_per_head=hidden_size)\n\n attn = tf.reshape(attn, [-1, hidden_size])\n attn = tf.concat([output_layer, attn], axis=-1)\n\n attn_softmax_weights = tf.get_variable(\n \"attn_softmax_weights\", [hidden_size * 2, 8],\n initializer=tf.truncated_normal_initializer(stddev=0.02))\n attn_matmul_out = tf.matmul(attn, attn_softmax_weights)\n\n attn_logits = tf.reshape(attn_matmul_out, (-1, num_choices, 8))\n attn_logits = tf.transpose(attn_logits, perm=[0, 2, 1])\n\n attn_per_example_loss = tf.nn.sparse_softmax_cross_entropy_with_logits(\n logits=attn_logits, labels=labels)\n attn_probabilities = tf.nn.softmax(logits, axis=-1)\n attn_loss = tf.reduce_mean(\n example_weights * per_example_loss) / tf.to_float(batch_size)\n loss += attn_loss\n return (loss, attn_per_example_loss, attn_logits, attn_probabilities)\n\n return (loss, per_example_loss, logits, probabilities)\n\n\ndef gather_indexes(sequence_tensor, positions):\n \"\"\"Gathers the vectors at the specific positions over a minibatch.\"\"\"\n sequence_shape = modeling.get_shape_list(sequence_tensor, expected_rank=3)\n batch_size = sequence_shape[0]\n seq_length = sequence_shape[1]\n width = sequence_shape[2]\n\n flat_offsets = tf.reshape(\n tf.range(0, batch_size, dtype=tf.int32) * seq_length, [-1, 1])\n flat_positions = tf.reshape(positions + flat_offsets, [-1])\n flat_sequence_tensor = tf.reshape(sequence_tensor,\n [batch_size * seq_length, width])\n output_tensor = tf.gather(flat_sequence_tensor, flat_positions)\n return output_tensor\n\n\ndef get_masked_lm_output(bert_config, input_tensor, output_weights, positions,\n label_ids, label_weights):\n \"\"\"Get loss and log probs for the masked LM.\"\"\"\n input_tensor = gather_indexes(input_tensor, positions)\n\n with tf.variable_scope(\"cls/predictions\"):\n # We apply one more non-linear transformation before the output layer.\n # This matrix is not used after pre-training.\n with tf.variable_scope(\"transform\"):\n input_tensor = tf.layers.dense(\n input_tensor,\n units=bert_config.hidden_size,\n activation=modeling.get_activation(bert_config.hidden_act),\n kernel_initializer=modeling.create_initializer(\n bert_config.initializer_range))\n input_tensor = modeling.layer_norm(input_tensor)\n\n # The output weights are the same as the input embeddings, but there is\n # an output-only bias for each token.\n output_bias = tf.get_variable(\n \"output_bias\",\n shape=[bert_config.vocab_size],\n initializer=tf.zeros_initializer())\n logits = tf.matmul(input_tensor, output_weights, transpose_b=True)\n logits = tf.nn.bias_add(logits, output_bias)\n log_probs = tf.nn.log_softmax(logits, axis=-1)\n\n label_ids = tf.reshape(label_ids, [-1])\n label_weights = tf.reshape(label_weights, [-1])\n\n one_hot_labels = tf.one_hot(\n label_ids, depth=bert_config.vocab_size, dtype=tf.float32)\n\n # The `positions` tensor might be zero-padded (if the sequence is too\n # short to have the maximum number of predictions). The `label_weights`\n # tensor has a value of 1.0 for every real prediction and 0.0 for the\n # padding predictions.\n per_example_loss = -tf.reduce_sum(log_probs * one_hot_labels, axis=[-1])\n numerator = tf.reduce_sum(label_weights * per_example_loss)\n denominator = tf.reduce_sum(label_weights) + 1e-5\n loss = numerator / denominator\n\n return (loss, per_example_loss, log_probs)\n" ]
[ [ "tensorflow.contrib.cluster_resolver.TPUClusterResolver", "tensorflow.data.experimental.sample_from_datasets", "tensorflow.concat", "tensorflow.FixedLenFeature", "tensorflow.metrics.accuracy", "tensorflow.stack", "tensorflow.train.init_from_checkpoint", "tensorflow.gfile.MakeDirs", "tensorflow.to_int32", "tensorflow.contrib.training.checkpoints_iterator", "tensorflow.contrib.tpu.TPUEstimatorSpec", "tensorflow.contrib.tpu.TPUEstimator", "tensorflow.data.TFRecordDataset", "tensorflow.io.gfile.glob", "tensorflow.gather", "tensorflow.logging.set_verbosity", "tensorflow.trainable_variables", "tensorflow.parse_single_example", "tensorflow.argmax", "tensorflow.metrics.mean", "tensorflow.random.shuffle", "tensorflow.logging.info", "tensorflow.one_hot", "tensorflow.contrib.tpu.TPUConfig", "tensorflow.train.Scaffold", "tensorflow.constant", "tensorflow.range", "tensorflow.reshape", "tensorflow.logging.error" ], [ "tensorflow.compat.v1.train.AdagradOptimizer", "tensorflow.compat.v1.metrics.mean", "tensorflow.compat.v1.data.make_one_shot_iterator", "tensorflow.compat.v1.test.main", "tensorflow.compat.v1.argmax", "tensorflow.compat.v1.Session", "tensorflow.compat.v1.Graph", "tensorflow.compat.v1.train.get_global_step", "tensorflow.compat.v1.train.Saver", "numpy.sum" ], [ "tensorflow.contrib.cluster_resolver.TPUClusterResolver", "tensorflow.logging.warning", "tensorflow.FixedLenFeature", "tensorflow.nn.log_softmax", "tensorflow.gfile.GFile", "tensorflow.reduce_sum", "tensorflow.train.init_from_checkpoint", "tensorflow.gfile.MakeDirs", "tensorflow.to_int32", "tensorflow.contrib.tpu.TPUEstimatorSpec", "tensorflow.contrib.tpu.TPUEstimator", "tensorflow.data.TFRecordDataset", "tensorflow.truncated_normal_initializer", "tensorflow.python_io.TFRecordWriter", "tensorflow.logging.set_verbosity", "tensorflow.trainable_variables", "tensorflow.parse_single_example", "tensorflow.app.run", "tensorflow.matmul", "tensorflow.unstack", "tensorflow.gfile.Open", "tensorflow.zeros_initializer", "tensorflow.logging.info", "tensorflow.one_hot", "tensorflow.contrib.tpu.TPUConfig", "tensorflow.train.Features", "tensorflow.nn.bias_add", "tensorflow.train.Scaffold", "tensorflow.transpose", "tensorflow.flags.DEFINE_string", "tensorflow.reshape" ], [ "tensorflow.concat", "tensorflow.nn.log_softmax", "tensorflow.reduce_sum", "tensorflow.truncated_normal_initializer", "tensorflow.gather", "tensorflow.to_float", "tensorflow.matmul", "tensorflow.zeros_initializer", "tensorflow.nn.sparse_softmax_cross_entropy_with_logits", "tensorflow.one_hot", "tensorflow.nn.bias_add", "tensorflow.reduce_max", "tensorflow.nn.softmax", "tensorflow.transpose", "tensorflow.constant", "tensorflow.range", "tensorflow.reduce_mean", "tensorflow.broadcast_to", "tensorflow.reshape", "tensorflow.ones_like", "tensorflow.variable_scope" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10" ] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10", "2.7", "1.12", "2.6", "2.2", "1.13", "2.3", "2.4", "1.4", "2.9", "1.5", "1.7", "2.5", "0.12", "1.0", "2.8", "1.2", "2.10" ] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10" ] } ]
brio50/beam-calc
[ "7b05001ddbb8b45ab5c538973efab517730ae98d" ]
[ "main.py" ]
[ "from sympy.physics.continuum_mechanics.beam import Beam\nfrom sympy import *\nfrom sympy.plotting import plot, PlotGrid\nimport matplotlib.pyplot as plt\n\n# https://docs.sympy.org/latest/modules/physics/continuum_mechanics/beam_problems.html#example-7\n\ndef beam_me_up(rxn, __L, __E, __I, __F, color):\n\n ## sign convention\n # upward forces and clockwise moment are positive\n\n # symbols\n L = symbols('L', positive=True)\n E, I, F = symbols('E I F')\n\n ## definition\n\n # beam\n b = Beam(L, E, I)\n\n if rxn == 'fixed-fixed':\n\n # beam reactions\n R1, R2 = symbols('R1 R2')\n M1, M2 = symbols('M1, M2')\n b.apply_load(R1, 0, -1)\n b.apply_load(M1, 0, -2)\n b.apply_load(R2, L, -1)\n b.apply_load(M2, L, -2)\n\n # beam load\n b.apply_load(-F, L / 2, -1)\n\n # beam boundary conditions\n b.bc_deflection = [(0, 0), (L, 0)]\n b.bc_slope = [(0, 0), (L, 0)]\n\n # solve\n b.solve_for_reaction_loads(R1, R2, M1, M2)\n\n elif rxn == 'simple-simple':\n\n # beam reactions\n R1, R2 = symbols('R1 R2')\n b.apply_load(R1, 0, -1)\n b.apply_load(R2, L, -1)\n\n # beam load\n b.apply_load(-F, L / 2, -1)\n\n # beam boundary conditions\n b.bc_deflection = [(0, 0), (L, 0)]\n\n # solve\n b.solve_for_reaction_loads(R1, R2)\n\n else :\n print(\"No command defined!\")\n\n # print results\n print('Reaction Loads:')\n pprint(b.reaction_loads)\n\n print('Load:')\n pprint(b.load)\n\n print('Max Deflection:')\n pprint(b.max_deflection())\n\n ## plotting\n\n # free body diagram\n # https: // docs.sympy.org / latest / modules / physics / continuum_mechanics / beam.html # sympy.physics.continuum_mechanics.beam.Beam.draw\n # **Note:**: load F will not be rendered correctly if < -1, therefore use subs to make shear, bending, and moment diagrams\n #fbd = b.draw()\n #fbd.show()\n\n # shear, slope, moment, deflection\n #ax0 = b.plot_loading_results(subs={L: __L, E: __E, I: __I, F: __F})\n #ax1 = b.plot_shear_force()\n #ax2 = b.plot_slope()\n #ax3 = b.plot_bending_moment()\n #ax4 = b.plot_deflection()\n\n # how to access via backend\n # https://newbedev.com/display-two-sympy-plots-as-two-matplotlib-subplots\n #fig = ax0._backend.fig\n #ax0._backend = ax0.backend(ax0)\n #ax0._backend.process_series()\n #ax0._backend.ax[0].scatter([0, 60, 120], [0, 0, 0], marker='x', color='r')\n\n # extracting sympy plot data from beam.py for plotting outside of this function\n ax1 = plot(b.shear_force().subs({L: __L, E: __E, I: __I, F: __F}), (b.variable, 0, __L),\n title=\"Shear Force\", line_color=color, xlabel=r'$\\mathrm{x}$', ylabel=r'$\\mathrm{V\\quad(lb)}$', show=False)\n ax2 = plot(b.bending_moment().subs({L: __L, E: __E, I: __I, F: __F}), (b.variable, 0, __L),\n title=\"Bending Moment\", line_color=color, xlabel=r'$\\mathrm{x}$', ylabel=r'$\\mathrm{M\\quad(lb \\cdot in)}$', show=False)\n ax3 = plot(b.slope().subs({L: __L, E: __E, I: __I, F: __F}), (b.variable, 0, __L),\n title=\"Slope\", line_color=color, xlabel=r'$\\mathrm{x}$', ylabel=r'$\\theta$', show=False)\n ax4 = plot(b.deflection().subs({L: __L, E: __E, I: __I, F: __F}), (b.variable, 0, __L),\n title=\"Deflection\", line_color=color, xlabel=r'$\\mathrm{x}$', ylabel=r'$\\delta\\quad(in)$', show=False)\n\n return ax1, ax2, ax3, ax4\n\n# https://stackoverflow.com/q/63483960\ndef move_sympyplot_to_axes(p, ax):\n # move axes\n backend = p.backend(p)\n backend.ax = ax\n backend._process_series(backend.parent._series, ax, backend.parent)\n # remove top and right spines\n backend.ax.spines['top'].set_color('none')\n backend.ax.spines['right'].set_color('none')\n # move left and bottom spine to left and bottom of axis extents, previously 'bottom' was set to 'zero'\n backend.ax.spines['left'].set_position(('axes', 0))\n backend.ax.spines['bottom'].set_position(('axes', 0))\n # correct ylabel position, align vertically to center\n ax.yaxis.set_label_coords(-0.1, 0.5)\n plt.close(backend.fig)\n\n## MAIN\n\nbeam = {\n \"L\": {\n \"Name\": \"Length\",\n \"Value\": 120,\n \"Unit\": \"in\"\n },\n \"Ix\": {\n \"Name\": \"Moment of Inertia\",\n \"Value\": [6.04, 8.6, 12.83],\n \"Unit\": \"in4\"\n },\n \"E\": {\n \"Name\": \"Modulus of Elasticity\",\n \"Material\": [\"Aluminum\", \"Steel\"],\n \"Value\": [9900E3, 2.901E7],\n \"Unit\": \"lb/in2\"\n },\n \"F\": {\n \"Name\": \"Point Loads\",\n \"Value\": [2120, 2200],\n \"Unit\": \"lb\"\n },\n \"RXN\": {\n \"Name\": \"Reaction\",\n \"Value\": [\"fixed-fixed\", \"simple-simple\"]\n }\n}\n\nfor i, E in enumerate(beam['E']['Value']):\n\n MATERIAL = beam['E']['Material'][i]\n L = beam['L']['Value']\n F = beam['F']['Value'][i]\n\n for j, RXN in enumerate(beam['RXN']['Value']):\n\n Ix = beam['Ix']['Value']\n colors = ['red', 'blue', 'green']\n p = []\n\n title = f'Material = {MATERIAL}, Constraints = {RXN}'\n print(title)\n for k, I in enumerate(Ix):\n\n obj = beam_me_up(RXN, L, E, I, F, colors[k])\n p.append(obj)\n\n # https://www.spanco.com/blog/understanding-overhead-crane-deflection-and-criteria/\n # I think I could've used the b.max_deflection() formula with subs, but screw it!\n if RXN == 'fixed-fixed':\n delta = (F * L**3) / (192 * E * I)\n elif RXN == 'simple-simple':\n delta = (F * L ** 3) / (48 * E * I)\n allowable = L/450\n passed = False\n if delta < allowable:\n passed = True\n\n if k == 0:\n print(f'| Ix | &delta;<sub>max</sub> | &delta;<sub>allowable</sub> | Pass |')\n print(f'|----|-----------------------|-----------------------------|------|')\n\n print(f'| {I:10.2f} | {delta:10.2f} | {allowable:10.2f} | {passed} |')\n\n # matplotlib overlplotting\n fig, axes = plt.subplots(nrows=4)\n dpi = fig.get_dpi()\n fig.set_size_inches(800.0 / float(dpi), 600.0 / float(dpi))\n\n for P in p:\n move_sympyplot_to_axes(P[0], axes[0])\n move_sympyplot_to_axes(P[1], axes[1])\n move_sympyplot_to_axes(P[2], axes[2])\n move_sympyplot_to_axes(P[3], axes[3])\n\n # requirement\n axes[3].axhline(-allowable, linestyle='-', linewidth='1.0', color='magenta', zorder=0)\n\n # grid/limits/labels\n for ax in axes:\n ax.set_axisbelow(True)\n ax.minorticks_on()\n ax.grid(which='major', linestyle='-', linewidth='0.5', color='gray')\n ax.grid(which='minor', linestyle=':', linewidth='0.5', color='gray')\n ax.set_xlim([0, L])\n #ylabel = ax.yaxis.get_label()\n #ylabel.set_verticalalignment('center')\n\n # legend\n handles = axes[0].get_legend_handles_labels()[0] # return first value of function with [0]\n labels = [str(Ix) for Ix in Ix] # convert list of floats to list of strings\n axes[0].legend(handles, labels, loc='right', title='Moment of Inertia (Ix)', ncol=3)\n\n # voila\n fig.tight_layout()\n fig.suptitle(title, fontsize=10, x=0.5, y=0.05, color='gray')\n filename = f'./img/result_{MATERIAL.lower()}_{RXN}.png'\n fig.savefig(filename, dpi=100)\n plt.show()\n" ]
[ [ "matplotlib.pyplot.show", "matplotlib.pyplot.subplots", "matplotlib.pyplot.close" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
deb-intel/LPOTtest
[ "f7b7524c733e581668d15192b69f9d9a7ca5222d", "f7b7524c733e581668d15192b69f9d9a7ca5222d", "f7b7524c733e581668d15192b69f9d9a7ca5222d" ]
[ "lpot/adaptor/ox_utils/onnxrt_mid.py", "lpot/adaptor/tf_utils/quantize_graph/quantize_graph_concatv2.py", "examples/pytorch/huggingface_models/src/transformers/models/bart/modeling_bart.py" ]
[ "#!/usr/bin/env python\n# coding: utf-8\n#\n# Copyright (c) 2021 Intel Corporation\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n# -------------------------------------------------------------------------\n# Copyright (c) Microsoft, Intel Corporation. All rights reserved.\n# Licensed under the MIT License. See License.txt in the project root for\n# license information.\n# --------------------------------------------------------------------------\n\n\nimport copy\nimport logging\n\nimport numpy as np\nimport onnx\nimport onnxruntime\nfrom onnx import helper, TensorProto\nfrom .onnx_model import ONNXModel\n\nlogger = logging.getLogger()\n\n\nclass ONNXRTAugment:\n '''augment input model to dump tensor or for calibration'''\n\n def __init__(self, model_wrapper,\n dataloader,\n dump_op_types,\n augmented_model_path,\n black_nodes=[],\n white_nodes=[],\n iterations=[]):\n '''\n :param model: ONNX model to calibrate\n :param dataloader: user implemented object to read in and preprocess calibration dataset\n :param op_types: operator types to be calibrated and quantized, default = 'Conv,MatMul'\n :param black_nodes: operator names that should not be quantized, default = ''\n :param white_nodes: operator names that force to be quantized, default = ''\n :param augmented_model_path: save augmented_model to this path\n :param iterations: tensor of which iteration will be collected.\n '''\n self.model_wrapper = model_wrapper\n self.model = model_wrapper.model\n self.dataloader = dataloader\n self.dump_op_types = dump_op_types\n self.black_nodes = black_nodes\n self.white_nodes = white_nodes\n self.augmented_model = None\n self.augmented_model_path = augmented_model_path\n self.iterations = iterations\n self.augment_nodes = []\n self.already_quantized = False\n\n def augment_graph(self, activation_only=False, output_only=False):\n '''\n Adds nodes to all quantization_candidates op type nodes in\n model and ensures their outputs are stored as part of the graph output\n :param activation_only(bool): whether to dump activation tensor only\n :param output_only(bool): whether to dump output_only\n :return: augmented ONNX model\n '''\n\n model = copy.deepcopy(self.model)\n model_nodes_names = [node.name for node in model.graph.node]\n\n added_nodes = []\n added_outputs = []\n tensors_to_dump = set()\n\n for augment_node_type in self.augment_nodes:\n if augment_node_type not in ['ReduceMin', 'ReduceMax', 'DequantizeLinear']:\n raise ValueError(\"Unexpected augment_node {} only \\\n ReduceMin/ReduceMax are supported\".format(augment_node_type))\n\n if self.already_quantized:\n # mapping between fp32 node and int8 node\n new_white_nodes = []\n for white_node in self.white_nodes:\n new_white_node = white_node + \"_quant\"\n assert new_white_node in model_nodes_names, \"no quantized {} \\\n in the graph\".format(white_node)\n new_white_nodes.append(new_white_node)\n self.white_nodes = new_white_nodes\n\n initializer_names = [i.name for i in model.graph.initializer]\n for node in model.graph.node: # pylint: disable=no-member\n should_be_dump = ((node.op_type in self.dump_op_types) and\n (node.name not in self.black_nodes)) or \\\n (node.name in self.white_nodes)\n if should_be_dump:\n if not output_only:\n if node.op_type == \"Attention\":\n if len(node.input) >= 3:\n logger.debug(\"indice input {} of attention node {} is integer\"\n .format(node.input[3:], node.name))\n tensors_to_dump.update(node.input[:2])\n else:\n tensors_to_dump.update(node.input)\n elif node.op_type == \"Gather\":\n logger.debug(\"indice input {} of gather node {} is integer\"\n .format(node.input[-1], node.name))\n tensors_to_dump.update(node.input[:-1])\n else:\n tensors_to_dump.update(node.input)\n else:\n for input in node.input:\n if input in initializer_names:\n tensors_to_dump.add(input)\n tensors_to_dump.update(node.output)\n\n tensors_tmp = set()\n if activation_only:\n for tensor in tensors_to_dump:\n if tensor not in initializer_names: # pylint: disable=no-member\n tensors_tmp.add(tensor)\n tensors_to_dump = tensors_tmp\n\n for tensor in tensors_to_dump:\n if self.augment_nodes:\n for augment_node_type in self.augment_nodes:\n if augment_node_type in ['ReduceMin', 'ReduceMax']:\n # dump tensor for calibration\n augment_node_name = tensor + \"_\" + augment_node_type\n augment_node = onnx.helper.make_node(augment_node_type, [tensor],\n [augment_node_name],\n augment_node_name,\n keepdims=0)\n added_nodes.append(augment_node)\n added_outputs.append(helper.make_tensor_value_info(\n augment_node.output[0], # pylint: disable=no-member\n TensorProto.FLOAT, ())) # pylint: disable=no-member\n else:\n # insert DequantizeLinear node as output\n augment_node_name = tensor + \"_new_\" + augment_node_type\n scale, zero_point = self._get_scale_zo(tensor)\n if scale:\n # the tensor is in INT8 dtype\n augment_node = onnx.helper.make_node(augment_node_type,\n [tensor, scale, zero_point],\n [augment_node_name],\n augment_node_name,\n keepdims=0)\n added_nodes.append(augment_node)\n added_outputs.append(helper.make_tensor_value_info(\n augment_node.output[0], # pylint: disable=no-member\n TensorProto.FLOAT, ())) # pylint: disable=no-member\n else:\n # the tensor is in FP32 dtype\n if tensor not in [t.name for t in model.graph.output]:\n added_tensor = helper.ValueInfoProto()\n added_tensor.name = tensor\n added_outputs.append(added_tensor)\n else:\n if tensor not in [t.name for t in model.graph.output]:\n added_tensor = helper.ValueInfoProto()\n added_tensor.name = tensor\n added_outputs.append(added_tensor)\n\n if self.augment_nodes:\n model.graph.node.extend(added_nodes) # pylint: disable=no-member\n model.graph.output.extend(added_outputs) # pylint: disable=no-member\n\n self.augmented_model = model\n onnx.save(model, self.augmented_model_path)\n\n def _get_scale_zo(self, tensor):\n ''' help function to get scale and zero_point '''\n if not tensor.endswith(\"_quantized\"):\n logger.info(\"tensor {} in the quantized graph is not quantized\".format(tensor))\n return None, None\n scale = \"_\".join(tensor.split(\"_\")[:-1] + [\"scale\"])\n scale_tensor = self.model_wrapper.get_initializer(scale)\n assert scale_tensor, \"missing scale for tensor {}\".format(tensor)\n zo = \"_\".join(tensor.split(\"_\")[:-1] + [\"zero_point\"])\n zo_tensor = self.model_wrapper.get_initializer(zo)\n assert zo_tensor, \"missing zero point for tensor {}\".format(tensor)\n\n return scale, zo\n\n def get_intermediate_outputs(self):\n '''\n Gather intermediate model outputs after running inference\n :return: dictionary mapping: {node output tensor names: node output tensor }\n '''\n\n # conduct inference session and get intermediate outputs\n session = onnxruntime.InferenceSession(self.augmented_model.SerializeToString(), None)\n\n intermediate_outputs = []\n\n for idx, batch in enumerate(self.dataloader):\n ort_inputs = {}\n if self.iterations:\n if idx in self.iterations:\n for i in range(len(session.get_inputs())):\n ort_inputs.update({session.get_inputs()[i].name: batch[i]})\n intermediate_outputs.append(session.run(None, ort_inputs))\n else:\n for i in range(len(session.get_inputs())):\n ort_inputs.update({session.get_inputs()[i].name: batch[i]})\n intermediate_outputs.append(session.run(None, ort_inputs))\n node_output_names = [session.get_outputs()[i].name for i in\n range(len(intermediate_outputs[0]))]\n output_dicts_list = [\n dict(zip(node_output_names, intermediate_output)) for intermediate_output in\n intermediate_outputs\n ]\n\n return node_output_names, output_dicts_list\n\n def _map_calibration(self, node_output_names, output_dicts_list, calib_mode='naive'):\n model = self.model\n num_model_outputs = len(model.graph.output)\n merged_dict = {}\n for d in output_dicts_list:\n for k, v in d.items():\n merged_dict.setdefault(k, []).append(v)\n added_node_output_names = node_output_names[num_model_outputs:]\n node_names = [added_node_output_names[i].rpartition('_')[0]\n for i in range(0, len(added_node_output_names), 2)] # output names\n\n # Characterizing distribution of a node's values across test data sets\n clean_merged_dict = dict((i, merged_dict[i]) for i in merged_dict\n if i != list(merged_dict.keys())[0])\n if calib_mode == 'naive':\n pairs = [\n tuple([\n float(min(clean_merged_dict[added_node_output_names[i]])),\n float(max(clean_merged_dict[added_node_output_names[i + 1]]))\n ]) for i in range(0, len(added_node_output_names), 2)\n ]\n else:\n raise ValueError('Unknown value for calib_mode. \\\n Currently only naive mode is supported.')\n\n final_dict = dict(zip(node_names, pairs))\n\n return final_dict\n\n def dump_calibration(self, calib_mode='naive'):\n '''\n Gather calibration params for quantization\n parameter calib_mode: type 'naive' gives (ReduceMin, ReduceMax) pairs\n for each augmented node across test data sets, where\n the first element is a minimum of all ReduceMin values\n and the second element is a maximum of all ReduceMax\n values;\n :return: dictionary mapping: {added node names: (ReduceMin, ReduceMax) pairs }\n '''\n\n self.augment_nodes = [\"ReduceMin\", \"ReduceMax\"]\n self.augment_graph()\n node_output_names, output_dicts_list = self.get_intermediate_outputs()\n mapped_dict = self._map_calibration(node_output_names, output_dicts_list,\n calib_mode=calib_mode)\n\n return self.calculate_quantization_params(mapped_dict)\n\n def calculate_quantization_params(self, quantization_thresholds):\n '''\n Given quantization thresholds, calculate the quantization params.\n :param quantization_thresholds:\n Dictionary specifying the min and max values for outputs of conv and matmul nodes.\n The quantization_thresholds should be specified in the following format:\n {\n \"param_name\": [min, max]\n }\n example:\n {\n 'Conv_3:0': [np.float32(0), np.float32(0.5)],\n 'Conv_4:0': [np.float32(1), np.float32(3.5)]\n }\n :return: Dictionary containing the zero point and\n scale values for outputs of conv and matmul nodes.\n The dictionary format is\n {\n \"param_name\": [zero_point, scale]\n }\n '''\n if quantization_thresholds is None:\n raise ValueError(\n 'quantization thresholds is required to calculate quantization \\\n params (zero point and scale)')\n\n quantization_params = {}\n model = self.model\n\n input_name_to_nodes = self.model_wrapper.input_name_to_nodes()\n output_name_to_nodes = self.model_wrapper.output_name_to_node()\n\n for tensor_name in quantization_thresholds.keys():\n child = None\n if tensor_name in input_name_to_nodes:\n children = input_name_to_nodes[tensor_name]\n if len(children) == 1:\n child = children[0]\n parent = None\n if tensor_name in output_name_to_nodes:\n parent = output_name_to_nodes[tensor_name]\n node_thresholds = quantization_thresholds[tensor_name]\n node_params = calculate_scale_zeropoint(parent, child, node_thresholds[0],\n node_thresholds[1])\n quantization_params[tensor_name] = node_params\n\n return quantization_params\n\n def dump_tensor(self, activation_only=True):\n if \"QuantizeLinear\" in [node.op_type for node in self.model.graph.node]:\n self.augment_nodes = [\"DequantizeLinear\"]\n self.already_quantized = True\n self.augment_graph(activation_only=activation_only, output_only=True)\n _, output_dicts_list = self.get_intermediate_outputs()\n output_dicts = {}\n for output_dicts_iter in output_dicts_list:\n for output_name in output_dicts_iter:\n if output_name not in output_dicts:\n output_dicts[output_name] = []\n output_dicts[output_name].append(output_dicts_iter[output_name])\n iters = len(output_dicts_list)\n map_node_activation = [{} for _ in range(iters)]\n map_node_weight = [{} for _ in range(iters)]\n self.white_nodes = [node.replace(\"_quant\", \"\") for node in self.white_nodes]\n augmengted_wrapper = ONNXModel(self.augmented_model)\n map_output = augmengted_wrapper.output_name_to_node()\n map_input = augmengted_wrapper.input_name_to_nodes()\n model_output_names = [t.name for t in self.model.graph.output]\n model_initializer_names = [t.name for t in self.model.graph.initializer]\n for tensor_name, tensors in output_dicts.items():\n if tensor_name in model_initializer_names:\n node = map_input[tensor_name][0]\n else:\n node = map_output[tensor_name]\n if tensor_name in model_output_names and node.name not in self.white_nodes:\n continue\n while node.name not in self.white_nodes:\n node = augmengted_wrapper.get_parents(node, output_name_to_node=map_output)[0]\n if tensor_name not in model_initializer_names:\n for i in range(iters):\n map_node_activation[i][node.name] = {tensor_name: tensors[i]}\n else:\n for i in range(iters):\n map_node_weight[i][node.name] = {tensor_name: tensors[i]}\n return {\"weight\": map_node_weight, \"activation\": map_node_activation}\n\ndef calculate_scale_zeropoint(last_node, next_node, rmin, rmax):\n '''\n Given the source and destination node of tensor, \\\n return calculated zero point and scales.\n\n :param last_node: the source of the tensor\n :param next_node: the destination of the tensor\n :param rmin: min threshold of the tensor\n :param rmax: max threshold of the tensor\n :return (List): zero_point and scale\n\n '''\n\n zp_and_scale = []\n # adjust rmin and rmax such that 0 is included in the range. This is required\n # to make sure zero can be uniquely represented.\n rmin = min(rmin, 0)\n rmax = max(rmax, 0)\n if next_node:\n if next_node.op_type == 'Relu':\n if rmin < 0:\n rmin = 0\n\n if last_node:\n if last_node.op_type in ['Conv', 'FusedConv']:\n attrs = [attr for attr in last_node.attribute]\n attrs_names = [attr.name for attr in last_node.attribute]\n if 'activation' in attrs_names:\n if attrs[attrs_names.index('activation')].s == b'Relu':\n rmin = max(rmin, 0)\n if attrs[attrs_names.index('activation')].s == b'Clip':\n assert 'activation_params' in attrs_names, \"the model contains no \\\n params for clip node \\\n {}\".format(last_node)\n clip_params = attrs[attrs_names.index('activation_params')].floats\n rmin = min(rmin, clip_params[0], clip_params[1])\n rmax = max(rmax, clip_params[0], clip_params[1])\n\n scale = np.float32((rmax - rmin) / 255 if rmin != rmax else 1)\n initial_zero_point = (0 - rmin) / scale\n zero_point = np.uint8(round(max(0, min(255, initial_zero_point))))\n\n zp_and_scale.append(zero_point)\n zp_and_scale.append(scale)\n\n return zp_and_scale\n", "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n#\n# Copyright (c) 2021 Intel Corporation\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport re\n\nfrom tensorflow.python.framework import dtypes\nfrom tensorflow.core.framework import node_def_pb2\nfrom .quantize_graph_base import QuantizeNodeBase\nfrom .quantize_graph_common import QuantizeGraphHelper as helper\n\n\nclass FuseNodeStartWithConcatV2(QuantizeNodeBase):\n def _apply_concatv2_transform(self, original_node):\n namespace_prefix = original_node.name + \"_eightbit\"\n quantized_concat_name = namespace_prefix + \"_quantized_concatv2\"\n reshape_dims_name, reduction_dims_name = self._add_common_quantization_nodes(\n namespace_prefix, helper.node_name_from_input(original_node.input[-1]))\n num_input = len(original_node.input)\n shape_input_name = original_node.input[num_input - 1]\n original_inputs = original_node.input[0:num_input - 1]\n input_names = []\n min_names = []\n max_names = []\n for original_input_name in original_inputs:\n quantize_input_name, min_input_name, max_input_name = (\n self._eightbitize_input_to_node(namespace_prefix,\n original_input_name,\n reshape_dims_name,\n reduction_dims_name,\n dtype=dtypes.quint8))\n input_names.append(quantize_input_name)\n min_names.append(min_input_name)\n max_names.append(max_input_name)\n all_input_names = input_names\n all_input_names.append(shape_input_name)\n all_input_names.extend(min_names)\n all_input_names.extend(max_names)\n quantized_concat_node = helper.create_node(\"QuantizedConcatV2\",\n quantized_concat_name,\n all_input_names)\n helper.set_attr_int(quantized_concat_node, \"N\", len(original_inputs))\n helper.set_attr_dtype(quantized_concat_node, \"T\", dtypes.quint8)\n self.add_output_graph_node(quantized_concat_node)\n self._intel_cpu_add_dequantize_result_node(quantized_concat_name, original_node.name)\n\n def _quantizable_concat(self, node):\n for input_node_name in node.input[:node.attr['N'].i]:\n node_name = helper.node_name_from_input(input_node_name)\n if self.node_name_mapping[node_name].node.op != \"Dequantize\":\n return False\n return True\n\n def _apply_concatv2_quantization(self):\n for _, v in self.node_name_mapping.items():\n if v.node.op in (\"ConcatV2\",) and self._quantizable_concat(v.node) and \\\n dtypes.as_dtype(v.node.attr[\"T\"].type) == dtypes.float32 and \\\n not re.search(r'map(_\\d+)?/while', v.node.name):\n self._apply_concatv2_transform(v.node)\n else:\n new_node = node_def_pb2.NodeDef()\n new_node.CopyFrom(v.node)\n self.add_output_graph_node(new_node)\n\n def get_longest_fuse(self):\n return 1\n\n def apply_the_transform(self):\n self._apply_concatv2_quantization()\n self._reset_output_node_maps()\n if self.remove_redundant_quant_flag:\n self.output_graph = self.remove_redundant_quantization(self.output_graph)\n return self.output_graph\n", "# coding=utf-8\n# Copyright 2021 The Fairseq Authors and The HuggingFace Inc. team. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\" PyTorch BART model. \"\"\"\nimport copy\nimport math\nimport random\nimport warnings\nfrom typing import Optional, Tuple\n\nimport torch\nimport torch.nn.functional as F\nimport torch.utils.checkpoint\nfrom torch import nn\nfrom torch.nn import CrossEntropyLoss\n\nfrom ...activations import ACT2FN\nfrom ...file_utils import (\n add_code_sample_docstrings,\n add_end_docstrings,\n add_start_docstrings,\n add_start_docstrings_to_model_forward,\n replace_return_docstrings,\n)\nfrom ...modeling_outputs import (\n BaseModelOutput,\n BaseModelOutputWithPastAndCrossAttentions,\n CausalLMOutputWithCrossAttentions,\n Seq2SeqLMOutput,\n Seq2SeqModelOutput,\n Seq2SeqQuestionAnsweringModelOutput,\n Seq2SeqSequenceClassifierOutput,\n)\nfrom ...modeling_utils import PreTrainedModel\nfrom ...utils import logging\nfrom .configuration_bart import BartConfig\n\n\nlogger = logging.get_logger(__name__)\n\n_CONFIG_FOR_DOC = \"BartConfig\"\n_TOKENIZER_FOR_DOC = \"BartTokenizer\"\n\n\nBART_PRETRAINED_MODEL_ARCHIVE_LIST = [\n \"facebook/bart-large\",\n # See all BART models at https://huggingface.co/models?filter=bart\n]\n\n\ndef shift_tokens_right(input_ids: torch.Tensor, pad_token_id: int, decoder_start_token_id: int):\n \"\"\"\n Shift input ids one token to the right.\n \"\"\"\n shifted_input_ids = input_ids.new_zeros(input_ids.shape)\n shifted_input_ids[:, 1:] = input_ids[:, :-1].clone()\n shifted_input_ids[:, 0] = decoder_start_token_id\n\n assert pad_token_id is not None, \"self.model.config.pad_token_id has to be defined.\"\n # replace possible -100 values in labels by `pad_token_id`\n shifted_input_ids.masked_fill_(shifted_input_ids == -100, pad_token_id)\n\n return shifted_input_ids\n\n\ndef _make_causal_mask(input_ids_shape: torch.Size, dtype: torch.dtype, past_key_values_length: int = 0):\n \"\"\"\n Make causal mask used for bi-directional self-attention.\n \"\"\"\n bsz, tgt_len = input_ids_shape\n mask = torch.full((tgt_len, tgt_len), float(\"-inf\"))\n mask_cond = torch.arange(mask.size(-1))\n mask.masked_fill_(mask_cond < (mask_cond + 1).view(mask.size(-1), 1), 0)\n mask = mask.to(dtype)\n\n if past_key_values_length > 0:\n mask = torch.cat([torch.zeros(tgt_len, past_key_values_length, dtype=dtype), mask], dim=-1)\n return mask[None, None, :, :].expand(bsz, 1, tgt_len, tgt_len + past_key_values_length)\n\n\ndef _expand_mask(mask: torch.Tensor, dtype: torch.dtype, tgt_len: Optional[int] = None):\n \"\"\"\n Expands attention_mask from `[bsz, seq_len]` to `[bsz, 1, tgt_seq_len, src_seq_len]`.\n \"\"\"\n bsz, src_len = mask.size()\n tgt_len = tgt_len if tgt_len is not None else src_len\n\n expanded_mask = mask[:, None, None, :].expand(bsz, 1, tgt_len, src_len).to(dtype)\n\n inverted_mask = 1.0 - expanded_mask\n\n return inverted_mask.masked_fill(inverted_mask.bool(), torch.finfo(dtype).min)\n\n\nclass BartLearnedPositionalEmbedding(nn.Embedding):\n \"\"\"\n This module learns positional embeddings up to a fixed maximum size.\n \"\"\"\n\n def __init__(self, num_embeddings: int, embedding_dim: int):\n # Bart is set up so that if padding_idx is specified then offset the embedding ids by 2\n # and adjust num_embeddings appropriately. Other models dont have this hack\n self.offset = 2\n super().__init__(num_embeddings + self.offset, embedding_dim)\n\n def forward(self, input_ids_shape: torch.Size, past_key_values_length: int = 0):\n \"\"\"`input_ids_shape` is expected to be [bsz x seqlen].\"\"\"\n bsz, seq_len = input_ids_shape[:2]\n positions = torch.arange(\n past_key_values_length, past_key_values_length + seq_len, dtype=torch.long, device=self.weight.device\n )\n return super().forward(positions + self.offset)\n\n\nclass BartAttention(nn.Module):\n \"\"\"Multi-headed attention from 'Attention Is All You Need' paper\"\"\"\n\n def __init__(\n self,\n embed_dim: int,\n num_heads: int,\n dropout: float = 0.0,\n is_decoder: bool = False,\n bias: bool = True,\n ):\n super().__init__()\n self.embed_dim = embed_dim\n self.num_heads = num_heads\n self.dropout = dropout\n self.head_dim = embed_dim // num_heads\n assert (\n self.head_dim * num_heads == self.embed_dim\n ), f\"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim} and `num_heads`: {num_heads}).\"\n self.scaling = self.head_dim ** -0.5\n self.is_decoder = is_decoder\n\n self.k_proj = nn.Linear(embed_dim, embed_dim, bias=bias)\n self.v_proj = nn.Linear(embed_dim, embed_dim, bias=bias)\n self.q_proj = nn.Linear(embed_dim, embed_dim, bias=bias)\n self.out_proj = nn.Linear(embed_dim, embed_dim, bias=bias)\n\n def _shape(self, tensor: torch.Tensor, seq_len: int, bsz: int):\n return tensor.view(bsz, seq_len, self.num_heads, self.head_dim).transpose(1, 2).contiguous()\n\n def forward(\n self,\n hidden_states: torch.Tensor,\n key_value_states: Optional[torch.Tensor] = None,\n past_key_value: Optional[Tuple[torch.Tensor]] = None,\n attention_mask: Optional[torch.Tensor] = None,\n layer_head_mask: Optional[torch.Tensor] = None,\n output_attentions: bool = False,\n ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:\n \"\"\"Input shape: Batch x Time x Channel\"\"\"\n\n # if key_value_states are provided this layer is used as a cross-attention layer\n # for the decoder\n is_cross_attention = key_value_states is not None\n bsz, tgt_len, embed_dim = hidden_states.size()\n\n # get query proj\n query_states = self.q_proj(hidden_states) * self.scaling\n # get key, value proj\n if is_cross_attention and past_key_value is not None:\n # reuse k,v, cross_attentions\n key_states = past_key_value[0]\n value_states = past_key_value[1]\n elif is_cross_attention:\n # cross_attentions\n key_states = self._shape(self.k_proj(key_value_states), -1, bsz)\n value_states = self._shape(self.v_proj(key_value_states), -1, bsz)\n elif past_key_value is not None:\n # reuse k, v, self_attention\n key_states = self._shape(self.k_proj(hidden_states), -1, bsz)\n value_states = self._shape(self.v_proj(hidden_states), -1, bsz)\n key_states = torch.cat([past_key_value[0], key_states], dim=2)\n value_states = torch.cat([past_key_value[1], value_states], dim=2)\n else:\n # self_attention\n key_states = self._shape(self.k_proj(hidden_states), -1, bsz)\n value_states = self._shape(self.v_proj(hidden_states), -1, bsz)\n\n if self.is_decoder:\n # if cross_attention save Tuple(torch.Tensor, torch.Tensor) of all cross attention key/value_states.\n # Further calls to cross_attention layer can then reuse all cross-attention\n # key/value_states (first \"if\" case)\n # if uni-directional self-attention (decoder) save Tuple(torch.Tensor, torch.Tensor) of\n # all previous decoder key/value_states. Further calls to uni-directional self-attention\n # can concat previous decoder key/value_states to current projected key/value_states (third \"elif\" case)\n # if encoder bi-directional self-attention `past_key_value` is always `None`\n past_key_value = (key_states, value_states)\n\n proj_shape = (bsz * self.num_heads, -1, self.head_dim)\n query_states = self._shape(query_states, tgt_len, bsz).view(*proj_shape)\n key_states = key_states.view(*proj_shape)\n value_states = value_states.view(*proj_shape)\n\n src_len = key_states.size(1)\n attn_weights = torch.bmm(query_states, key_states.transpose(1, 2))\n\n assert attn_weights.size() == (\n bsz * self.num_heads,\n tgt_len,\n src_len,\n ), f\"Attention weights should be of size {(bsz * self.num_heads, tgt_len, src_len)}, but is {attn_weights.size()}\"\n\n if attention_mask is not None:\n assert attention_mask.size() == (\n bsz,\n 1,\n tgt_len,\n src_len,\n ), f\"Attention mask should be of size {(bsz, 1, tgt_len, src_len)}, but is {attention_mask.size()}\"\n attn_weights = attn_weights.view(bsz, self.num_heads, tgt_len, src_len) + attention_mask\n attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len)\n\n attn_weights = F.softmax(attn_weights, dim=-1)\n\n if layer_head_mask is not None:\n assert layer_head_mask.size() == (\n self.num_heads,\n ), f\"Head mask for a single layer should be of size {(self.num_heads,)}, but is {layer_head_mask.size()}\"\n attn_weights = layer_head_mask.view(1, -1, 1, 1) * attn_weights.view(bsz, self.num_heads, tgt_len, src_len)\n attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len)\n\n if output_attentions:\n # this operation is a bit akward, but it's required to\n # make sure that attn_weights keeps its gradient.\n # In order to do so, attn_weights have to reshaped\n # twice and have to be reused in the following\n attn_weights_reshaped = attn_weights.view(bsz, self.num_heads, tgt_len, src_len)\n attn_weights = attn_weights_reshaped.view(bsz * self.num_heads, tgt_len, src_len)\n else:\n attn_weights_reshaped = None\n\n attn_probs = F.dropout(attn_weights, p=self.dropout, training=self.training)\n\n attn_output = torch.bmm(attn_probs, value_states)\n\n assert attn_output.size() == (\n bsz * self.num_heads,\n tgt_len,\n self.head_dim,\n ), f\"`attn_output` should be of size {(bsz, self.num_heads, tgt_len, self.head_dim)}, but is {attn_output.size()}\"\n\n attn_output = (\n attn_output.view(bsz, self.num_heads, tgt_len, self.head_dim)\n .transpose(1, 2)\n .reshape(bsz, tgt_len, embed_dim)\n )\n\n attn_output = self.out_proj(attn_output)\n\n return attn_output, attn_weights_reshaped, past_key_value\n\n\nclass BartEncoderLayer(nn.Module):\n def __init__(self, config: BartConfig):\n super().__init__()\n self.embed_dim = config.d_model\n self.self_attn = BartAttention(\n embed_dim=self.embed_dim,\n num_heads=config.encoder_attention_heads,\n dropout=config.attention_dropout,\n )\n self.self_attn_layer_norm = nn.LayerNorm(self.embed_dim)\n self.dropout = config.dropout\n self.activation_fn = ACT2FN[config.activation_function]\n self.activation_dropout = config.activation_dropout\n self.fc1 = nn.Linear(self.embed_dim, config.encoder_ffn_dim)\n self.fc2 = nn.Linear(config.encoder_ffn_dim, self.embed_dim)\n self.final_layer_norm = nn.LayerNorm(self.embed_dim)\n\n def forward(\n self,\n hidden_states: torch.Tensor,\n attention_mask: torch.Tensor,\n layer_head_mask: torch.Tensor,\n output_attentions: bool = False,\n ):\n \"\"\"\n Args:\n hidden_states (:obj:`torch.FloatTensor`): input to the layer of shape `(seq_len, batch, embed_dim)`\n attention_mask (:obj:`torch.FloatTensor`): attention mask of size\n `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.\n layer_head_mask (:obj:`torch.FloatTensor`): mask for attention heads in a given layer of size\n `(config.encoder_attention_heads,)`.\n output_attentions (:obj:`bool`, `optional`):\n Whether or not to return the attentions tensors of all attention layers. See ``attentions`` under\n returned tensors for more detail.\n \"\"\"\n residual = hidden_states\n hidden_states, attn_weights, _ = self.self_attn(\n hidden_states=hidden_states,\n attention_mask=attention_mask,\n layer_head_mask=layer_head_mask,\n output_attentions=output_attentions,\n )\n hidden_states = F.dropout(hidden_states, p=self.dropout, training=self.training)\n hidden_states = residual + hidden_states\n hidden_states = self.self_attn_layer_norm(hidden_states)\n\n residual = hidden_states\n hidden_states = self.activation_fn(self.fc1(hidden_states))\n hidden_states = F.dropout(hidden_states, p=self.activation_dropout, training=self.training)\n hidden_states = self.fc2(hidden_states)\n hidden_states = F.dropout(hidden_states, p=self.dropout, training=self.training)\n hidden_states = residual + hidden_states\n hidden_states = self.final_layer_norm(hidden_states)\n\n if torch.isinf(hidden_states).any() or torch.isnan(hidden_states).any():\n clamp_value = torch.finfo(hidden_states.dtype).max - 1000\n hidden_states = torch.clamp(hidden_states, min=-clamp_value, max=clamp_value)\n\n outputs = (hidden_states,)\n\n if output_attentions:\n outputs += (attn_weights,)\n\n return outputs\n\n\nclass BartDecoderLayer(nn.Module):\n def __init__(self, config: BartConfig):\n super().__init__()\n self.embed_dim = config.d_model\n\n self.self_attn = BartAttention(\n embed_dim=self.embed_dim,\n num_heads=config.decoder_attention_heads,\n dropout=config.attention_dropout,\n is_decoder=True,\n )\n self.dropout = config.dropout\n self.activation_fn = ACT2FN[config.activation_function]\n self.activation_dropout = config.activation_dropout\n\n self.self_attn_layer_norm = nn.LayerNorm(self.embed_dim)\n self.encoder_attn = BartAttention(\n self.embed_dim,\n config.decoder_attention_heads,\n dropout=config.attention_dropout,\n is_decoder=True,\n )\n self.encoder_attn_layer_norm = nn.LayerNorm(self.embed_dim)\n self.fc1 = nn.Linear(self.embed_dim, config.decoder_ffn_dim)\n self.fc2 = nn.Linear(config.decoder_ffn_dim, self.embed_dim)\n self.final_layer_norm = nn.LayerNorm(self.embed_dim)\n\n def forward(\n self,\n hidden_states: torch.Tensor,\n attention_mask: Optional[torch.Tensor] = None,\n encoder_hidden_states: Optional[torch.Tensor] = None,\n encoder_attention_mask: Optional[torch.Tensor] = None,\n layer_head_mask: Optional[torch.Tensor] = None,\n encoder_layer_head_mask: Optional[torch.Tensor] = None,\n past_key_value: Optional[Tuple[torch.Tensor]] = None,\n output_attentions: Optional[bool] = False,\n use_cache: Optional[bool] = True,\n ):\n \"\"\"\n Args:\n hidden_states (:obj:`torch.FloatTensor`): input to the layer of shape `(seq_len, batch, embed_dim)`\n attention_mask (:obj:`torch.FloatTensor`): attention mask of size\n `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.\n encoder_hidden_states (:obj:`torch.FloatTensor`): cross attention input to the layer of shape `(seq_len, batch, embed_dim)`\n encoder_attention_mask (:obj:`torch.FloatTensor`): encoder attention mask of size\n `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.\n layer_head_mask (:obj:`torch.FloatTensor`): mask for attention heads in a given layer of size\n `(config.encoder_attention_heads,)`.\n encoder_layer_head_mask (:obj:`torch.FloatTensor`): mask for encoder attention heads in a given layer of\n size `(config.encoder_attention_heads,)`.\n past_key_value (:obj:`Tuple(torch.FloatTensor)`): cached past key and value projection states\n output_attentions (:obj:`bool`, `optional`):\n Whether or not to return the attentions tensors of all attention layers. See ``attentions`` under\n returned tensors for more detail.\n \"\"\"\n residual = hidden_states\n\n # Self Attention\n # decoder uni-directional self-attention cached key/values tuple is at positions 1,2\n self_attn_past_key_value = past_key_value[:2] if past_key_value is not None else None\n # add present self-attn cache to positions 1,2 of present_key_value tuple\n hidden_states, self_attn_weights, present_key_value = self.self_attn(\n hidden_states=hidden_states,\n past_key_value=self_attn_past_key_value,\n attention_mask=attention_mask,\n layer_head_mask=layer_head_mask,\n output_attentions=output_attentions,\n )\n hidden_states = F.dropout(hidden_states, p=self.dropout, training=self.training)\n hidden_states = residual + hidden_states\n hidden_states = self.self_attn_layer_norm(hidden_states)\n\n # Cross-Attention Block\n cross_attn_present_key_value = None\n cross_attn_weights = None\n if encoder_hidden_states is not None:\n residual = hidden_states\n\n # cross_attn cached key/values tuple is at positions 3,4 of present_key_value tuple\n cross_attn_past_key_value = past_key_value[-2:] if past_key_value is not None else None\n hidden_states, cross_attn_weights, cross_attn_present_key_value = self.encoder_attn(\n hidden_states=hidden_states,\n key_value_states=encoder_hidden_states,\n attention_mask=encoder_attention_mask,\n layer_head_mask=encoder_layer_head_mask,\n past_key_value=cross_attn_past_key_value,\n output_attentions=output_attentions,\n )\n hidden_states = F.dropout(hidden_states, p=self.dropout, training=self.training)\n hidden_states = residual + hidden_states\n hidden_states = self.encoder_attn_layer_norm(hidden_states)\n\n # add cross-attn to positions 3,4 of present_key_value tuple\n present_key_value = present_key_value + cross_attn_present_key_value\n\n # Fully Connected\n residual = hidden_states\n hidden_states = self.activation_fn(self.fc1(hidden_states))\n hidden_states = F.dropout(hidden_states, p=self.activation_dropout, training=self.training)\n hidden_states = self.fc2(hidden_states)\n hidden_states = F.dropout(hidden_states, p=self.dropout, training=self.training)\n hidden_states = residual + hidden_states\n hidden_states = self.final_layer_norm(hidden_states)\n\n outputs = (hidden_states,)\n\n if output_attentions:\n outputs += (self_attn_weights, cross_attn_weights)\n\n if use_cache:\n outputs += (present_key_value,)\n\n return outputs\n\n\nclass BartClassificationHead(nn.Module):\n \"\"\"Head for sentence-level classification tasks.\"\"\"\n\n def __init__(\n self,\n input_dim: int,\n inner_dim: int,\n num_classes: int,\n pooler_dropout: float,\n ):\n super().__init__()\n self.dense = nn.Linear(input_dim, inner_dim)\n self.dropout = nn.Dropout(p=pooler_dropout)\n self.out_proj = nn.Linear(inner_dim, num_classes)\n\n def forward(self, hidden_states: torch.Tensor):\n hidden_states = self.dropout(hidden_states)\n hidden_states = self.dense(hidden_states)\n hidden_states = torch.tanh(hidden_states)\n hidden_states = self.dropout(hidden_states)\n hidden_states = self.out_proj(hidden_states)\n return hidden_states\n\n\nclass BartPretrainedModel(PreTrainedModel):\n config_class = BartConfig\n base_model_prefix = \"model\"\n\n def _init_weights(self, module):\n std = self.config.init_std\n if isinstance(module, nn.Linear):\n module.weight.data.normal_(mean=0.0, std=std)\n if module.bias is not None:\n module.bias.data.zero_()\n elif isinstance(module, nn.Embedding):\n module.weight.data.normal_(mean=0.0, std=std)\n if module.padding_idx is not None:\n module.weight.data[module.padding_idx].zero_()\n\n @property\n def dummy_inputs(self):\n pad_token = self.config.pad_token_id\n input_ids = torch.tensor([[0, 6, 10, 4, 2], [0, 8, 12, 2, pad_token]], device=self.device)\n dummy_inputs = {\n \"attention_mask\": input_ids.ne(pad_token),\n \"input_ids\": input_ids,\n }\n return dummy_inputs\n\n\nclass PretrainedBartModel(BartPretrainedModel):\n def __init_subclass__(self):\n warnings.warn(\n \"The class `PretrainedBartModel` has been depreciated, please use `BartPretrainedModel` instead.\",\n FutureWarning,\n )\n\n\nBART_START_DOCSTRING = r\"\"\"\n This model inherits from :class:`~transformers.PreTrainedModel`. Check the superclass documentation for the generic\n methods the library implements for all its model (such as downloading or saving, resizing the input embeddings,\n pruning heads etc.)\n\n This model is also a PyTorch `torch.nn.Module <https://pytorch.org/docs/stable/nn.html#torch.nn.Module>`__\n subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to\n general usage and behavior.\n\n Parameters:\n config (:class:`~transformers.BartConfig`):\n Model configuration class with all the parameters of the model. Initializing with a config file does not\n load the weights associated with the model, only the configuration. Check out the\n :meth:`~transformers.PreTrainedModel.from_pretrained` method to load the model weights.\n\"\"\"\n\nBART_GENERATION_EXAMPLE = r\"\"\"\n Summarization example::\n\n >>> from transformers import BartTokenizer, BartForConditionalGeneration, BartConfig\n\n >>> model = BartForConditionalGeneration.from_pretrained('facebook/bart-large')\n >>> tokenizer = BartTokenizer.from_pretrained('facebook/bart-large')\n\n >>> ARTICLE_TO_SUMMARIZE = \"My friends are cool but they eat too many carbs.\"\n >>> inputs = tokenizer([ARTICLE_TO_SUMMARIZE], max_length=1024, return_tensors='pt')\n\n >>> # Generate Summary\n >>> summary_ids = model.generate(inputs['input_ids'], num_beams=4, max_length=5, early_stopping=True)\n >>> print([tokenizer.decode(g, skip_special_tokens=True, clean_up_tokenization_spaces=False) for g in summary_ids])\n\n Mask filling example::\n\n >>> from transformers import BartTokenizer, BartForConditionalGeneration\n >>> tokenizer = BartTokenizer.from_pretrained('facebook/bart-large')\n >>> TXT = \"My friends are <mask> but they eat too many carbs.\"\n\n >>> model = BartForConditionalGeneration.from_pretrained('facebook/bart-large')\n >>> input_ids = tokenizer([TXT], return_tensors='pt')['input_ids']\n >>> logits = model(input_ids).logits\n\n >>> masked_index = (input_ids[0] == tokenizer.mask_token_id).nonzero().item()\n >>> probs = logits[0, masked_index].softmax(dim=0)\n >>> values, predictions = probs.topk(5)\n\n >>> tokenizer.decode(predictions).split()\n\"\"\"\n\nBART_INPUTS_DOCSTRING = r\"\"\"\n Args:\n input_ids (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`):\n Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide\n it.\n\n Indices can be obtained using :class:`~transformers.BartTokenizer`. See\n :meth:`transformers.PreTrainedTokenizer.encode` and :meth:`transformers.PreTrainedTokenizer.__call__` for\n details.\n\n `What are input IDs? <../glossary.html#input-ids>`__\n attention_mask (:obj:`torch.Tensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):\n Mask to avoid performing attention on padding token indices. Mask values selected in ``[0, 1]``:\n\n - 1 for tokens that are **not masked**,\n - 0 for tokens that are **masked**.\n\n `What are attention masks? <../glossary.html#attention-mask>`__\n decoder_input_ids (:obj:`torch.LongTensor` of shape :obj:`(batch_size, target_sequence_length)`, `optional`):\n Indices of decoder input sequence tokens in the vocabulary.\n\n Indices can be obtained using :class:`~transformers.BartTokenizer`. See\n :meth:`transformers.PreTrainedTokenizer.encode` and :meth:`transformers.PreTrainedTokenizer.__call__` for\n details.\n\n `What are input IDs? <../glossary.html#input-ids>`__\n\n Bart uses the :obj:`eos_token_id` as the starting token for :obj:`decoder_input_ids` generation. If\n :obj:`past_key_values` is used, optionally only the last :obj:`decoder_input_ids` have to be input (see\n :obj:`past_key_values`).\n\n For translation and summarization training, :obj:`decoder_input_ids` should be provided. If no\n :obj:`decoder_input_ids` is provided, the model will create this tensor by shifting the :obj:`input_ids` to\n the right for denoising pre-training following the paper.\n decoder_attention_mask (:obj:`torch.LongTensor` of shape :obj:`(batch_size, target_sequence_length)`, `optional`):\n Default behavior: generate a tensor that ignores pad tokens in :obj:`decoder_input_ids`. Causal mask will\n also be used by default.\n\n If you want to change padding behavior, you should read :func:`modeling_bart._prepare_decoder_inputs` and\n modify to your needs. See diagram 1 in `the paper <https://arxiv.org/abs/1910.13461>`__ for more\n information on the default strategy.\n head_mask (:obj:`torch.Tensor` of shape :obj:`(num_layers, num_heads)`, `optional`):\n Mask to nullify selected heads of the attention modules in the encoder. Mask values selected in ``[0, 1]``:\n\n - 1 indicates the head is **not masked**,\n - 0 indicates the heas is **masked**.\n\n decoder_head_mask (:obj:`torch.Tensor` of shape :obj:`(num_layers, num_heads)`, `optional`):\n Mask to nullify selected heads of the attention modules in the decoder. Mask values selected in ``[0, 1]``:\n\n - 1 indicates the head is **not masked**,\n - 0 indicates the head is **masked**.\n\n encoder_outputs (:obj:`tuple(tuple(torch.FloatTensor)`, `optional`):\n Tuple consists of (:obj:`last_hidden_state`, `optional`: :obj:`hidden_states`, `optional`:\n :obj:`attentions`) :obj:`last_hidden_state` of shape :obj:`(batch_size, sequence_length, hidden_size)`,\n `optional`) is a sequence of hidden-states at the output of the last layer of the encoder. Used in the\n cross-attention of the decoder.\n past_key_values (:obj:`Tuple[Tuple[torch.Tensor]]` of length :obj:`config.n_layers` with each tuple having 2 tuples each of which has 2 tensors of shape :obj:`(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`):\n Contains precomputed key and value hidden-states of the attention blocks. Can be used to speed up decoding.\n\n If :obj:`past_key_values` are used, the user can optionally input only the last :obj:`decoder_input_ids`\n (those that don't have their past key value states given to this model) of shape :obj:`(batch_size, 1)`\n instead of all :obj:`decoder_input_ids`` of shape :obj:`(batch_size, sequence_length)`.\n inputs_embeds (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`):\n Optionally, instead of passing :obj:`input_ids` you can choose to directly pass an embedded representation.\n This is useful if you want more control over how to convert :obj:`input_ids` indices into associated\n vectors than the model's internal embedding lookup matrix.\n decoder_inputs_embeds (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, target_sequence_length, hidden_size)`, `optional`):\n Optionally, instead of passing :obj:`decoder_input_ids` you can choose to directly pass an embedded\n representation. If :obj:`past_key_values` is used, optionally only the last :obj:`decoder_inputs_embeds`\n have to be input (see :obj:`past_key_values`). This is useful if you want more control over how to convert\n :obj:`decoder_input_ids` indices into associated vectors than the model's internal embedding lookup matrix.\n\n If :obj:`decoder_input_ids` and :obj:`decoder_inputs_embeds` are both unset, :obj:`decoder_inputs_embeds`\n takes the value of :obj:`inputs_embeds`.\n use_cache (:obj:`bool`, `optional`):\n If set to :obj:`True`, :obj:`past_key_values` key value states are returned and can be used to speed up\n decoding (see :obj:`past_key_values`).\n output_attentions (:obj:`bool`, `optional`):\n Whether or not to return the attentions tensors of all attention layers. See ``attentions`` under returned\n tensors for more detail.\n output_hidden_states (:obj:`bool`, `optional`):\n Whether or not to return the hidden states of all layers. See ``hidden_states`` under returned tensors for\n more detail.\n return_dict (:obj:`bool`, `optional`):\n Whether or not to return a :class:`~transformers.file_utils.ModelOutput` instead of a plain tuple.\n\"\"\"\n\n\nclass BartEncoder(BartPretrainedModel):\n \"\"\"\n Transformer encoder consisting of *config.encoder_layers* self attention layers. Each layer is a\n :class:`BartEncoderLayer`.\n\n Args:\n config: BartConfig\n embed_tokens (torch.nn.Embedding): output embedding\n \"\"\"\n\n def __init__(self, config: BartConfig, embed_tokens: Optional[nn.Embedding] = None):\n super().__init__(config)\n\n self.dropout = config.dropout\n self.layerdrop = config.encoder_layerdrop\n\n embed_dim = config.d_model\n self.padding_idx = config.pad_token_id\n self.max_source_positions = config.max_position_embeddings\n self.embed_scale = math.sqrt(embed_dim) if config.scale_embedding else 1.0\n\n if embed_tokens is not None:\n self.embed_tokens = embed_tokens\n else:\n self.embed_tokens = nn.Embedding(config.vocab_size, embed_dim, self.padding_idx)\n\n self.embed_positions = BartLearnedPositionalEmbedding(\n config.max_position_embeddings,\n embed_dim,\n )\n self.layers = nn.ModuleList([BartEncoderLayer(config) for _ in range(config.encoder_layers)])\n self.layernorm_embedding = nn.LayerNorm(embed_dim)\n\n self.init_weights()\n\n def forward(\n self,\n input_ids=None,\n attention_mask=None,\n head_mask=None,\n inputs_embeds=None,\n output_attentions=None,\n output_hidden_states=None,\n return_dict=None,\n ):\n r\"\"\"\n Args:\n input_ids (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`):\n Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you\n provide it.\n\n Indices can be obtained using :class:`~transformers.BartTokenizer`. See\n :meth:`transformers.PreTrainedTokenizer.encode` and :meth:`transformers.PreTrainedTokenizer.__call__`\n for details.\n\n `What are input IDs? <../glossary.html#input-ids>`__\n attention_mask (:obj:`torch.Tensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):\n Mask to avoid performing attention on padding token indices. Mask values selected in ``[0, 1]``:\n\n - 1 for tokens that are **not masked**,\n - 0 for tokens that are **masked**.\n\n `What are attention masks? <../glossary.html#attention-mask>`__\n head_mask (:obj:`torch.Tensor` of shape :obj:`(num_layers, num_heads)`, `optional`):\n Mask to nullify selected heads of the attention modules. Mask values selected in ``[0, 1]``:\n\n - 1 indicates the head is **not masked**,\n - 0 indicates the heas is **masked**.\n\n inputs_embeds (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`):\n Optionally, instead of passing :obj:`input_ids` you can choose to directly pass an embedded\n representation. This is useful if you want more control over how to convert :obj:`input_ids` indices\n into associated vectors than the model's internal embedding lookup matrix.\n output_attentions (:obj:`bool`, `optional`):\n Whether or not to return the attentions tensors of all attention layers. See ``attentions`` under\n returned tensors for more detail.\n output_hidden_states (:obj:`bool`, `optional`):\n Whether or not to return the hidden states of all layers. See ``hidden_states`` under returned tensors\n for more detail.\n return_dict (:obj:`bool`, `optional`):\n Whether or not to return a :class:`~transformers.file_utils.ModelOutput` instead of a plain tuple.\n \"\"\"\n output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions\n output_hidden_states = (\n output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states\n )\n return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n\n # retrieve input_ids and inputs_embeds\n if input_ids is not None and inputs_embeds is not None:\n raise ValueError(\"You cannot specify both input_ids and inputs_embeds at the same time\")\n elif input_ids is not None:\n input_shape = input_ids.size()\n input_ids = input_ids.view(-1, input_shape[-1])\n elif inputs_embeds is not None:\n input_shape = inputs_embeds.size()[:-1]\n else:\n raise ValueError(\"You have to specify either input_ids or inputs_embeds\")\n\n if inputs_embeds is None:\n inputs_embeds = self.embed_tokens(input_ids) * self.embed_scale\n\n embed_pos = self.embed_positions(input_shape)\n\n hidden_states = inputs_embeds + embed_pos\n hidden_states = self.layernorm_embedding(hidden_states)\n hidden_states = F.dropout(hidden_states, p=self.dropout, training=self.training)\n\n # expand attention_mask\n if attention_mask is not None:\n # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len]\n attention_mask = _expand_mask(attention_mask, inputs_embeds.dtype)\n\n encoder_states = () if output_hidden_states else None\n all_attentions = () if output_attentions else None\n\n # check if head_mask has a correct number of layers specified if desired\n if head_mask is not None:\n assert head_mask.size()[0] == (\n len(self.layers)\n ), f\"The head_mask should be specified for {len(self.layers)} layers, but it is for {head_mask.size()[0]}.\"\n for idx, encoder_layer in enumerate(self.layers):\n if output_hidden_states:\n encoder_states = encoder_states + (hidden_states,)\n # add LayerDrop (see https://arxiv.org/abs/1909.11556 for description)\n dropout_probability = random.uniform(0, 1)\n if self.training and (dropout_probability < self.layerdrop): # skip the layer\n layer_outputs = (None, None)\n else:\n if getattr(self.config, \"gradient_checkpointing\", False) and self.training:\n\n def create_custom_forward(module):\n def custom_forward(*inputs):\n return module(*inputs, output_attentions)\n\n return custom_forward\n\n layer_outputs = torch.utils.checkpoint.checkpoint(\n create_custom_forward(encoder_layer),\n hidden_states,\n attention_mask,\n (head_mask[idx] if head_mask is not None else None),\n )\n else:\n layer_outputs = encoder_layer(\n hidden_states,\n attention_mask,\n layer_head_mask=(head_mask[idx] if head_mask is not None else None),\n output_attentions=output_attentions,\n )\n\n hidden_states = layer_outputs[0]\n\n if output_attentions:\n all_attentions = all_attentions + (layer_outputs[1],)\n\n if output_hidden_states:\n encoder_states = encoder_states + (hidden_states,)\n\n if not return_dict:\n return tuple(v for v in [hidden_states, encoder_states, all_attentions] if v is not None)\n return BaseModelOutput(\n last_hidden_state=hidden_states, hidden_states=encoder_states, attentions=all_attentions\n )\n\n\nclass BartDecoder(BartPretrainedModel):\n \"\"\"\n Transformer decoder consisting of *config.decoder_layers* layers. Each layer is a :class:`BartDecoderLayer`\n\n Args:\n config: BartConfig\n embed_tokens (torch.nn.Embedding): output embedding\n \"\"\"\n\n def __init__(self, config: BartConfig, embed_tokens: Optional[nn.Embedding] = None):\n super().__init__(config)\n self.dropout = config.dropout\n self.layerdrop = config.decoder_layerdrop\n self.padding_idx = config.pad_token_id\n self.max_target_positions = config.max_position_embeddings\n self.embed_scale = math.sqrt(config.d_model) if config.scale_embedding else 1.0\n\n if embed_tokens is not None:\n self.embed_tokens = embed_tokens\n else:\n self.embed_tokens = nn.Embedding(config.vocab_size, config.d_model, self.padding_idx)\n\n self.embed_positions = BartLearnedPositionalEmbedding(\n config.max_position_embeddings,\n config.d_model,\n )\n self.layers = nn.ModuleList([BartDecoderLayer(config) for _ in range(config.decoder_layers)])\n self.layernorm_embedding = nn.LayerNorm(config.d_model)\n\n self.init_weights()\n\n def get_input_embeddings(self):\n return self.embed_tokens\n\n def set_input_embeddings(self, value):\n self.embed_tokens = value\n\n def _prepare_decoder_attention_mask(self, attention_mask, input_shape, inputs_embeds, past_key_values_length):\n # create causal mask\n # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len]\n combined_attention_mask = None\n if input_shape[-1] > 1:\n combined_attention_mask = _make_causal_mask(\n input_shape, inputs_embeds.dtype, past_key_values_length=past_key_values_length\n ).to(self.device)\n\n if attention_mask is not None:\n # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len]\n expanded_attn_mask = _expand_mask(attention_mask, inputs_embeds.dtype, tgt_len=input_shape[-1])\n combined_attention_mask = (\n expanded_attn_mask if combined_attention_mask is None else expanded_attn_mask + combined_attention_mask\n )\n\n return combined_attention_mask\n\n def forward(\n self,\n input_ids=None,\n attention_mask=None,\n encoder_hidden_states=None,\n encoder_attention_mask=None,\n head_mask=None,\n encoder_head_mask=None,\n past_key_values=None,\n inputs_embeds=None,\n use_cache=None,\n output_attentions=None,\n output_hidden_states=None,\n return_dict=None,\n ):\n r\"\"\"\n Args:\n input_ids (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`):\n Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you\n provide it.\n\n Indices can be obtained using :class:`~transformers.BartTokenizer`. See\n :meth:`transformers.PreTrainedTokenizer.encode` and :meth:`transformers.PreTrainedTokenizer.__call__`\n for details.\n\n `What are input IDs? <../glossary.html#input-ids>`__\n attention_mask (:obj:`torch.Tensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):\n Mask to avoid performing attention on padding token indices. Mask values selected in ``[0, 1]``:\n\n - 1 for tokens that are **not masked**,\n - 0 for tokens that are **masked**.\n\n `What are attention masks? <../glossary.html#attention-mask>`__\n encoder_hidden_states (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, encoder_sequence_length, hidden_size)`, `optional`):\n Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention\n of the decoder.\n encoder_attention_mask (:obj:`torch.LongTensor` of shape :obj:`(batch_size, encoder_sequence_length)`, `optional`):\n Mask to avoid performing cross-attention on padding tokens indices of encoder input_ids. Mask values\n selected in ``[0, 1]``:\n\n - 1 for tokens that are **not masked**,\n - 0 for tokens that are **masked**.\n\n `What are attention masks? <../glossary.html#attention-mask>`__\n head_mask (:obj:`torch.Tensor` of shape :obj:`(num_layers, num_heads)`, `optional`):\n Mask to nullify selected heads of the attention modules. Mask values selected in ``[0, 1]``:\n\n - 1 indicates the head is **not masked**,\n - 0 indicates the heas is **masked**.\n\n encoder_head_mask (:obj:`torch.Tensor` of shape :obj:`(num_layers, num_heads)`, `optional`):\n Mask to nullify selected heads of the attention modules in encoder to avoid performing cross-attention\n on hidden heads. Mask values selected in ``[0, 1]``:\n\n - 1 indicates the head is **not masked**,\n - 0 indicates the heas is **masked**.\n\n past_key_values (:obj:`Tuple[Tuple[torch.Tensor]]` of length :obj:`config.n_layers` with each tuple having 2 tuples each of which has 2 tensors of shape :obj:`(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`):\n Contains precomputed key and value hidden-states of the attention blocks. Can be used to speed up\n decoding.\n\n If :obj:`past_key_values` are used, the user can optionally input only the last\n :obj:`decoder_input_ids` (those that don't have their past key value states given to this model) of\n shape :obj:`(batch_size, 1)` instead of all :obj:`decoder_input_ids`` of shape :obj:`(batch_size,\n sequence_length)`.\n inputs_embeds (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`):\n Optionally, instead of passing :obj:`input_ids` you can choose to directly pass an embedded\n representation. This is useful if you want more control over how to convert :obj:`input_ids` indices\n into associated vectors than the model's internal embedding lookup matrix.\n output_attentions (:obj:`bool`, `optional`):\n Whether or not to return the attentions tensors of all attention layers. See ``attentions`` under\n returned tensors for more detail.\n output_hidden_states (:obj:`bool`, `optional`):\n Whether or not to return the hidden states of all layers. See ``hidden_states`` under returned tensors\n for more detail.\n return_dict (:obj:`bool`, `optional`):\n Whether or not to return a :class:`~transformers.file_utils.ModelOutput` instead of a plain tuple.\n \"\"\"\n output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions\n output_hidden_states = (\n output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states\n )\n use_cache = use_cache if use_cache is not None else self.config.use_cache\n return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n\n # retrieve input_ids and inputs_embeds\n if input_ids is not None and inputs_embeds is not None:\n raise ValueError(\"You cannot specify both decoder_input_ids and decoder_inputs_embeds at the same time\")\n elif input_ids is not None:\n input_shape = input_ids.size()\n input_ids = input_ids.view(-1, input_shape[-1])\n elif inputs_embeds is not None:\n input_shape = inputs_embeds.size()[:-1]\n else:\n raise ValueError(\"You have to specify either decoder_input_ids or decoder_inputs_embeds\")\n\n # past_key_values_length\n past_key_values_length = past_key_values[0][0].shape[2] if past_key_values is not None else 0\n\n if inputs_embeds is None:\n inputs_embeds = self.embed_tokens(input_ids) * self.embed_scale\n\n attention_mask = self._prepare_decoder_attention_mask(\n attention_mask, input_shape, inputs_embeds, past_key_values_length\n )\n\n # expand encoder attention mask\n if encoder_hidden_states is not None and encoder_attention_mask is not None:\n # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len]\n encoder_attention_mask = _expand_mask(encoder_attention_mask, inputs_embeds.dtype, tgt_len=input_shape[-1])\n\n # embed positions\n positions = self.embed_positions(input_shape, past_key_values_length)\n\n hidden_states = inputs_embeds + positions\n hidden_states = self.layernorm_embedding(hidden_states)\n\n hidden_states = F.dropout(hidden_states, p=self.dropout, training=self.training)\n\n # decoder layers\n all_hidden_states = () if output_hidden_states else None\n all_self_attns = () if output_attentions else None\n all_cross_attentions = () if (output_attentions and encoder_hidden_states is not None) else None\n next_decoder_cache = () if use_cache else None\n\n # check if head_mask has a correct number of layers specified if desired\n if head_mask is not None:\n assert head_mask.size()[0] == (\n len(self.layers)\n ), f\"The head_mask should be specified for {len(self.layers)} layers, but it is for {head_mask.size()[0]}.\"\n for idx, decoder_layer in enumerate(self.layers):\n # add LayerDrop (see https://arxiv.org/abs/1909.11556 for description)\n if output_hidden_states:\n all_hidden_states += (hidden_states,)\n dropout_probability = random.uniform(0, 1)\n if self.training and (dropout_probability < self.layerdrop):\n continue\n\n past_key_value = past_key_values[idx] if past_key_values is not None else None\n\n if getattr(self.config, \"gradient_checkpointing\", False) and self.training:\n\n if use_cache:\n logger.warn(\n \"`use_cache=True` is incompatible with `config.gradient_checkpointing=True`. Setting \"\n \"`use_cache=False`...\"\n )\n use_cache = False\n\n def create_custom_forward(module):\n def custom_forward(*inputs):\n # None for past_key_value\n return module(*inputs, output_attentions, use_cache)\n\n return custom_forward\n\n layer_outputs = torch.utils.checkpoint.checkpoint(\n create_custom_forward(decoder_layer),\n hidden_states,\n attention_mask,\n encoder_hidden_states,\n encoder_attention_mask,\n head_mask[idx] if head_mask is not None else None,\n encoder_head_mask[idx] if encoder_head_mask is not None else None,\n None,\n )\n else:\n\n layer_outputs = decoder_layer(\n hidden_states,\n attention_mask=attention_mask,\n encoder_hidden_states=encoder_hidden_states,\n encoder_attention_mask=encoder_attention_mask,\n layer_head_mask=(head_mask[idx] if head_mask is not None else None),\n encoder_layer_head_mask=(encoder_head_mask[idx] if encoder_head_mask is not None else None),\n past_key_value=past_key_value,\n output_attentions=output_attentions,\n use_cache=use_cache,\n )\n hidden_states = layer_outputs[0]\n\n if use_cache:\n next_decoder_cache += (layer_outputs[3 if output_attentions else 1],)\n\n if output_attentions:\n all_self_attns += (layer_outputs[1],)\n\n if encoder_hidden_states is not None:\n all_cross_attentions += (layer_outputs[2],)\n\n # add hidden states from the last decoder layer\n if output_hidden_states:\n all_hidden_states += (hidden_states,)\n\n next_cache = next_decoder_cache if use_cache else None\n if not return_dict:\n return tuple(\n v\n for v in [hidden_states, next_cache, all_hidden_states, all_self_attns, all_cross_attentions]\n if v is not None\n )\n return BaseModelOutputWithPastAndCrossAttentions(\n last_hidden_state=hidden_states,\n past_key_values=next_cache,\n hidden_states=all_hidden_states,\n attentions=all_self_attns,\n cross_attentions=all_cross_attentions,\n )\n\n\n@add_start_docstrings(\n \"The bare BART Model outputting raw hidden-states without any specific head on top.\",\n BART_START_DOCSTRING,\n)\nclass BartModel(BartPretrainedModel):\n def __init__(self, config: BartConfig):\n super().__init__(config)\n\n padding_idx, vocab_size = config.pad_token_id, config.vocab_size\n self.shared = nn.Embedding(vocab_size, config.d_model, padding_idx)\n\n self.encoder = BartEncoder(config, self.shared)\n self.decoder = BartDecoder(config, self.shared)\n\n self.init_weights()\n\n def get_input_embeddings(self):\n return self.shared\n\n def set_input_embeddings(self, value):\n self.shared = value\n self.encoder.embed_tokens = self.shared\n self.decoder.embed_tokens = self.shared\n\n def get_encoder(self):\n return self.encoder\n\n def get_decoder(self):\n return self.decoder\n\n @add_start_docstrings_to_model_forward(BART_INPUTS_DOCSTRING)\n @add_code_sample_docstrings(\n tokenizer_class=_TOKENIZER_FOR_DOC,\n checkpoint=\"facebook/bart-large\",\n output_type=Seq2SeqModelOutput,\n config_class=_CONFIG_FOR_DOC,\n )\n def forward(\n self,\n input_ids=None,\n attention_mask=None,\n decoder_input_ids=None,\n decoder_attention_mask=None,\n head_mask=None,\n decoder_head_mask=None,\n encoder_outputs=None,\n past_key_values=None,\n inputs_embeds=None,\n decoder_inputs_embeds=None,\n use_cache=None,\n output_attentions=None,\n output_hidden_states=None,\n return_dict=None,\n ):\n\n # different to other models, Bart automatically creates decoder_input_ids from\n # input_ids if no decoder_input_ids are provided\n if decoder_input_ids is None and decoder_inputs_embeds is None:\n decoder_input_ids = shift_tokens_right(\n input_ids, self.config.pad_token_id, self.config.decoder_start_token_id\n )\n\n output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions\n output_hidden_states = (\n output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states\n )\n use_cache = use_cache if use_cache is not None else self.config.use_cache\n return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n\n if encoder_outputs is None:\n encoder_outputs = self.encoder(\n input_ids=input_ids,\n attention_mask=attention_mask,\n head_mask=head_mask,\n inputs_embeds=inputs_embeds,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n return_dict=return_dict,\n )\n # If the user passed a tuple for encoder_outputs, we wrap it in a BaseModelOutput when return_dict=True\n elif return_dict and not isinstance(encoder_outputs, BaseModelOutput):\n encoder_outputs = BaseModelOutput(\n last_hidden_state=encoder_outputs[0],\n hidden_states=encoder_outputs[1] if len(encoder_outputs) > 1 else None,\n attentions=encoder_outputs[2] if len(encoder_outputs) > 2 else None,\n )\n\n # decoder outputs consists of (dec_features, past_key_value, dec_hidden, dec_attn)\n decoder_outputs = self.decoder(\n input_ids=decoder_input_ids,\n attention_mask=decoder_attention_mask,\n encoder_hidden_states=encoder_outputs[0],\n encoder_attention_mask=attention_mask,\n head_mask=decoder_head_mask,\n encoder_head_mask=head_mask,\n past_key_values=past_key_values,\n inputs_embeds=decoder_inputs_embeds,\n use_cache=use_cache,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n return_dict=return_dict,\n )\n\n if not return_dict:\n return decoder_outputs + encoder_outputs\n\n return Seq2SeqModelOutput(\n last_hidden_state=decoder_outputs.last_hidden_state,\n past_key_values=decoder_outputs.past_key_values,\n decoder_hidden_states=decoder_outputs.hidden_states,\n decoder_attentions=decoder_outputs.attentions,\n cross_attentions=decoder_outputs.cross_attentions,\n encoder_last_hidden_state=encoder_outputs.last_hidden_state,\n encoder_hidden_states=encoder_outputs.hidden_states,\n encoder_attentions=encoder_outputs.attentions,\n )\n\n\n@add_start_docstrings(\n \"The BART Model with a language modeling head. Can be used for summarization.\", BART_START_DOCSTRING\n)\nclass BartForConditionalGeneration(BartPretrainedModel):\n base_model_prefix = \"model\"\n _keys_to_ignore_on_load_missing = [\n r\"final_logits_bias\",\n r\"encoder\\.version\",\n r\"decoder\\.version\",\n r\"lm_head\\.weight\",\n ]\n\n def __init__(self, config: BartConfig):\n super().__init__(config)\n self.model = BartModel(config)\n self.register_buffer(\"final_logits_bias\", torch.zeros((1, self.model.shared.num_embeddings)))\n self.lm_head = nn.Linear(config.d_model, self.model.shared.num_embeddings, bias=False)\n\n self.init_weights()\n\n def get_encoder(self):\n return self.model.get_encoder()\n\n def get_decoder(self):\n return self.model.get_decoder()\n\n def resize_token_embeddings(self, new_num_tokens: int) -> nn.Embedding:\n new_embeddings = super().resize_token_embeddings(new_num_tokens)\n self._resize_final_logits_bias(new_num_tokens)\n return new_embeddings\n\n def _resize_final_logits_bias(self, new_num_tokens: int) -> None:\n old_num_tokens = self.final_logits_bias.shape[-1]\n if new_num_tokens <= old_num_tokens:\n new_bias = self.final_logits_bias[:, :new_num_tokens]\n else:\n extra_bias = torch.zeros((1, new_num_tokens - old_num_tokens), device=self.final_logits_bias.device)\n new_bias = torch.cat([self.final_logits_bias, extra_bias], dim=1)\n self.register_buffer(\"final_logits_bias\", new_bias)\n\n def get_output_embeddings(self):\n return self.lm_head\n\n def set_output_embeddings(self, new_embeddings):\n self.lm_head = new_embeddings\n\n @add_start_docstrings_to_model_forward(BART_INPUTS_DOCSTRING)\n @replace_return_docstrings(output_type=Seq2SeqLMOutput, config_class=_CONFIG_FOR_DOC)\n @add_end_docstrings(BART_GENERATION_EXAMPLE)\n def forward(\n self,\n input_ids=None,\n attention_mask=None,\n decoder_input_ids=None,\n decoder_attention_mask=None,\n head_mask=None,\n decoder_head_mask=None,\n encoder_outputs=None,\n past_key_values=None,\n inputs_embeds=None,\n decoder_inputs_embeds=None,\n labels=None,\n use_cache=None,\n output_attentions=None,\n output_hidden_states=None,\n return_dict=None,\n ):\n r\"\"\"\n labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):\n Labels for computing the masked language modeling loss. Indices should either be in ``[0, ...,\n config.vocab_size]`` or -100 (see ``input_ids`` docstring). Tokens with indices set to ``-100`` are ignored\n (masked), the loss is only computed for the tokens with labels in ``[0, ..., config.vocab_size]``.\n\n Returns:\n \"\"\"\n return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n\n if labels is not None:\n if decoder_input_ids is None:\n decoder_input_ids = shift_tokens_right(\n labels, self.config.pad_token_id, self.config.decoder_start_token_id\n )\n\n outputs = self.model(\n input_ids,\n attention_mask=attention_mask,\n decoder_input_ids=decoder_input_ids,\n encoder_outputs=encoder_outputs,\n decoder_attention_mask=decoder_attention_mask,\n head_mask=head_mask,\n decoder_head_mask=decoder_head_mask,\n past_key_values=past_key_values,\n inputs_embeds=inputs_embeds,\n decoder_inputs_embeds=decoder_inputs_embeds,\n use_cache=use_cache,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n return_dict=return_dict,\n )\n lm_logits = self.lm_head(outputs[0]) + self.final_logits_bias\n\n masked_lm_loss = None\n if labels is not None:\n loss_fct = CrossEntropyLoss()\n masked_lm_loss = loss_fct(lm_logits.view(-1, self.config.vocab_size), labels.view(-1))\n\n if not return_dict:\n output = (lm_logits,) + outputs[1:]\n return ((masked_lm_loss,) + output) if masked_lm_loss is not None else output\n\n return Seq2SeqLMOutput(\n loss=masked_lm_loss,\n logits=lm_logits,\n past_key_values=outputs.past_key_values,\n decoder_hidden_states=outputs.decoder_hidden_states,\n decoder_attentions=outputs.decoder_attentions,\n cross_attentions=outputs.cross_attentions,\n encoder_last_hidden_state=outputs.encoder_last_hidden_state,\n encoder_hidden_states=outputs.encoder_hidden_states,\n encoder_attentions=outputs.encoder_attentions,\n )\n\n def prepare_inputs_for_generation(\n self,\n decoder_input_ids,\n past=None,\n attention_mask=None,\n head_mask=None,\n use_cache=None,\n encoder_outputs=None,\n **kwargs\n ):\n # cut decoder_input_ids if past is used\n if past is not None:\n decoder_input_ids = decoder_input_ids[:, -1:]\n\n return {\n \"input_ids\": None, # encoder_outputs is defined. input_ids not needed\n \"encoder_outputs\": encoder_outputs,\n \"past_key_values\": past,\n \"decoder_input_ids\": decoder_input_ids,\n \"attention_mask\": attention_mask,\n \"head_mask\": head_mask,\n \"use_cache\": use_cache, # change this to avoid caching (presumably for debugging)\n }\n\n def prepare_decoder_input_ids_from_labels(self, labels: torch.Tensor):\n return shift_tokens_right(labels, self.config.pad_token_id, self.config.decoder_start_token_id)\n\n @staticmethod\n def _reorder_cache(past, beam_idx):\n reordered_past = ()\n for layer_past in past:\n # cached cross_attention states don't have to be reordered -> they are always the same\n reordered_past += (\n tuple(past_state.index_select(0, beam_idx) for past_state in layer_past[:2]) + layer_past[2:],\n )\n return reordered_past\n\n\n@add_start_docstrings(\n \"\"\"\n Bart model with a sequence classification/head on top (a linear layer on top of the pooled output) e.g. for GLUE\n tasks.\n \"\"\",\n BART_START_DOCSTRING,\n)\nclass BartForSequenceClassification(BartPretrainedModel):\n def __init__(self, config: BartConfig, **kwargs):\n super().__init__(config, **kwargs)\n self.model = BartModel(config)\n self.classification_head = BartClassificationHead(\n config.d_model,\n config.d_model,\n config.num_labels,\n config.classifier_dropout,\n )\n self.model._init_weights(self.classification_head.dense)\n self.model._init_weights(self.classification_head.out_proj)\n\n @add_start_docstrings_to_model_forward(BART_INPUTS_DOCSTRING)\n @add_code_sample_docstrings(\n tokenizer_class=_TOKENIZER_FOR_DOC,\n checkpoint=\"facebook/bart-large\",\n output_type=Seq2SeqSequenceClassifierOutput,\n config_class=_CONFIG_FOR_DOC,\n )\n def forward(\n self,\n input_ids=None,\n attention_mask=None,\n decoder_input_ids=None,\n decoder_attention_mask=None,\n head_mask=None,\n decoder_head_mask=None,\n encoder_outputs=None,\n inputs_embeds=None,\n decoder_inputs_embeds=None,\n labels=None,\n use_cache=None,\n output_attentions=None,\n output_hidden_states=None,\n return_dict=None,\n ):\n r\"\"\"\n labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`):\n Labels for computing the sequence classification/regression loss. Indices should be in :obj:`[0, ...,\n config.num_labels - 1]`. If :obj:`config.num_labels > 1` a classification loss is computed (Cross-Entropy).\n \"\"\"\n return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n if labels is not None:\n use_cache = False\n\n if input_ids is None and inputs_embeds is not None:\n raise NotImplementedError(\n f\"Passing input embeddings is currently not supported for {self.__class__.__name__}\"\n )\n\n outputs = self.model(\n input_ids,\n attention_mask=attention_mask,\n decoder_input_ids=decoder_input_ids,\n decoder_attention_mask=decoder_attention_mask,\n head_mask=head_mask,\n decoder_head_mask=decoder_head_mask,\n encoder_outputs=encoder_outputs,\n inputs_embeds=inputs_embeds,\n decoder_inputs_embeds=decoder_inputs_embeds,\n use_cache=use_cache,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n return_dict=return_dict,\n )\n hidden_states = outputs[0] # last hidden state\n\n eos_mask = input_ids.eq(self.config.eos_token_id)\n\n if len(torch.unique(eos_mask.sum(1))) > 1:\n raise ValueError(\"All examples must have the same number of <eos> tokens.\")\n sentence_representation = hidden_states[eos_mask, :].view(hidden_states.size(0), -1, hidden_states.size(-1))[\n :, -1, :\n ]\n logits = self.classification_head(sentence_representation)\n\n loss = None\n if labels is not None:\n loss_fct = CrossEntropyLoss()\n loss = loss_fct(logits.view(-1, self.config.num_labels), labels.view(-1))\n\n if not return_dict:\n output = (logits,) + outputs[1:]\n return ((loss,) + output) if loss is not None else output\n\n return Seq2SeqSequenceClassifierOutput(\n loss=loss,\n logits=logits,\n past_key_values=outputs.past_key_values,\n decoder_hidden_states=outputs.decoder_hidden_states,\n decoder_attentions=outputs.decoder_attentions,\n cross_attentions=outputs.cross_attentions,\n encoder_last_hidden_state=outputs.encoder_last_hidden_state,\n encoder_hidden_states=outputs.encoder_hidden_states,\n encoder_attentions=outputs.encoder_attentions,\n )\n\n\n@add_start_docstrings(\n \"\"\"\n BART Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear\n layer on top of the hidden-states output to compute `span start logits` and `span end logits`).\n \"\"\",\n BART_START_DOCSTRING,\n)\nclass BartForQuestionAnswering(BartPretrainedModel):\n def __init__(self, config):\n super().__init__(config)\n\n config.num_labels = 2\n self.num_labels = config.num_labels\n\n self.model = BartModel(config)\n self.qa_outputs = nn.Linear(config.hidden_size, config.num_labels)\n\n self.model._init_weights(self.qa_outputs)\n\n @add_start_docstrings_to_model_forward(BART_INPUTS_DOCSTRING)\n @add_code_sample_docstrings(\n tokenizer_class=_TOKENIZER_FOR_DOC,\n checkpoint=\"facebook/bart-large\",\n output_type=Seq2SeqQuestionAnsweringModelOutput,\n config_class=_CONFIG_FOR_DOC,\n )\n def forward(\n self,\n input_ids=None,\n attention_mask=None,\n decoder_input_ids=None,\n decoder_attention_mask=None,\n head_mask=None,\n decoder_head_mask=None,\n encoder_outputs=None,\n start_positions=None,\n end_positions=None,\n inputs_embeds=None,\n decoder_inputs_embeds=None,\n use_cache=None,\n output_attentions=None,\n output_hidden_states=None,\n return_dict=None,\n ):\n r\"\"\"\n start_positions (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`):\n Labels for position (index) of the start of the labelled span for computing the token classification loss.\n Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence\n are not taken into account for computing the loss.\n end_positions (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`):\n Labels for position (index) of the end of the labelled span for computing the token classification loss.\n Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence\n are not taken into account for computing the loss.\n \"\"\"\n return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n if start_positions is not None and end_positions is not None:\n use_cache = False\n\n outputs = self.model(\n input_ids,\n attention_mask=attention_mask,\n decoder_input_ids=decoder_input_ids,\n decoder_attention_mask=decoder_attention_mask,\n head_mask=head_mask,\n decoder_head_mask=decoder_head_mask,\n encoder_outputs=encoder_outputs,\n inputs_embeds=inputs_embeds,\n decoder_inputs_embeds=decoder_inputs_embeds,\n use_cache=use_cache,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n return_dict=return_dict,\n )\n\n sequence_output = outputs[0]\n\n logits = self.qa_outputs(sequence_output)\n start_logits, end_logits = logits.split(1, dim=-1)\n start_logits = start_logits.squeeze(-1)\n end_logits = end_logits.squeeze(-1)\n\n total_loss = None\n if start_positions is not None and end_positions is not None:\n # If we are on multi-GPU, split add a dimension\n if len(start_positions.size()) > 1:\n start_positions = start_positions.squeeze(-1)\n if len(end_positions.size()) > 1:\n end_positions = end_positions.squeeze(-1)\n # sometimes the start/end positions are outside our model inputs, we ignore these terms\n ignored_index = start_logits.size(1)\n start_positions.clamp_(0, ignored_index)\n end_positions.clamp_(0, ignored_index)\n\n loss_fct = CrossEntropyLoss(ignore_index=ignored_index)\n start_loss = loss_fct(start_logits, start_positions)\n end_loss = loss_fct(end_logits, end_positions)\n total_loss = (start_loss + end_loss) / 2\n\n if not return_dict:\n output = (\n start_logits,\n end_logits,\n ) + outputs[1:]\n return ((total_loss,) + output) if total_loss is not None else output\n\n return Seq2SeqQuestionAnsweringModelOutput(\n loss=total_loss,\n start_logits=start_logits,\n end_logits=end_logits,\n past_key_values=outputs.past_key_values,\n decoder_hidden_states=outputs.decoder_hidden_states,\n decoder_attentions=outputs.decoder_attentions,\n cross_attentions=outputs.cross_attentions,\n encoder_last_hidden_state=outputs.encoder_last_hidden_state,\n encoder_hidden_states=outputs.encoder_hidden_states,\n encoder_attentions=outputs.encoder_attentions,\n )\n\n\nclass BartDecoderWrapper(BartPretrainedModel):\n \"\"\"\n This wrapper class is a helper class to correctly load pretrained checkpoints when the causal language model is\n used in combination with the :class:`~transformers.EncoderDecoderModel` framework.\n \"\"\"\n\n def __init__(self, config):\n super().__init__(config)\n self.decoder = BartDecoder(config)\n\n def forward(self, *args, **kwargs):\n return self.decoder(*args, **kwargs)\n\n\nclass BartForCausalLM(BartPretrainedModel):\n def __init__(self, config):\n super().__init__(config)\n config = copy.deepcopy(config)\n config.is_decoder = True\n config.is_encoder_decoder = False\n self.model = BartDecoderWrapper(config)\n\n self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False)\n\n self.init_weights()\n\n def get_input_embeddings(self):\n return self.model.decoder.embed_tokens\n\n def set_input_embeddings(self, value):\n self.model.decoder.embed_tokens = value\n\n def get_output_embeddings(self):\n return self.lm_head\n\n def set_output_embeddings(self, new_embeddings):\n self.lm_head = new_embeddings\n\n def set_decoder(self, decoder):\n self.model.decoder = decoder\n\n def get_decoder(self):\n return self.model.decoder\n\n @replace_return_docstrings(output_type=CausalLMOutputWithCrossAttentions, config_class=_CONFIG_FOR_DOC)\n def forward(\n self,\n input_ids=None,\n attention_mask=None,\n encoder_hidden_states=None,\n encoder_attention_mask=None,\n head_mask=None,\n encoder_head_mask=None,\n past_key_values=None,\n inputs_embeds=None,\n labels=None,\n use_cache=None,\n output_attentions=None,\n output_hidden_states=None,\n return_dict=None,\n ):\n r\"\"\"\n Args:\n input_ids (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`):\n Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you\n provide it.\n\n Indices can be obtained using :class:`~transformers.BartTokenizer`. See\n :meth:`transformers.PreTrainedTokenizer.encode` and :meth:`transformers.PreTrainedTokenizer.__call__`\n for details.\n\n `What are input IDs? <../glossary.html#input-ids>`__\n attention_mask (:obj:`torch.Tensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):\n Mask to avoid performing attention on padding token indices. Mask values selected in ``[0, 1]``:\n\n - 1 for tokens that are **not masked**,\n - 0 for tokens that are **masked**.\n\n `What are attention masks? <../glossary.html#attention-mask>`__\n encoder_hidden_states (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`):\n Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention\n if the model is configured as a decoder.\n encoder_attention_mask (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):\n Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used\n in the cross-attention if the model is configured as a decoder. Mask values selected in ``[0, 1]``:\n head_mask (:obj:`torch.Tensor` of shape :obj:`(num_layers, num_heads)`, `optional`):\n Mask to nullify selected heads of the attention modules. Mask values selected in ``[0, 1]``:\n\n - 1 indicates the head is **not masked**,\n - 0 indicates the heas is **masked**.\n\n encoder_head_mask (:obj:`torch.Tensor` of shape :obj:`(num_layers, num_heads)`, `optional`):\n Mask to nullify selected heads of the attention modules in encoder to avoid performing cross-attention\n on hidden heads. Mask values selected in ``[0, 1]``:\n\n - 1 indicates the head is **not masked**,\n - 0 indicates the heas is **masked**.\n\n past_key_values (:obj:`tuple(tuple(torch.FloatTensor))` of length :obj:`config.n_layers` with each tuple having 4 tensors of shape :obj:`(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`):\n Contains precomputed key and value hidden-states of the attention blocks. Can be used to speed up\n decoding.\n\n If :obj:`past_key_values` are used, the user can optionally input only the last ``decoder_input_ids``\n (those that don't have their past key value states given to this model) of shape :obj:`(batch_size, 1)`\n instead of all ``decoder_input_ids`` of shape :obj:`(batch_size, sequence_length)`.\n labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):\n Labels for computing the masked language modeling loss. Indices should either be in ``[0, ...,\n config.vocab_size]`` or -100 (see ``input_ids`` docstring). Tokens with indices set to ``-100`` are\n ignored (masked), the loss is only computed for the tokens with labels in ``[0, ...,\n config.vocab_size]``.\n use_cache (:obj:`bool`, `optional`):\n If set to :obj:`True`, :obj:`past_key_values` key value states are returned and can be used to speed up\n decoding (see :obj:`past_key_values`).\n\n - 1 for tokens that are **not masked**,\n - 0 for tokens that are **masked**.\n output_attentions (:obj:`bool`, `optional`):\n Whether or not to return the attentions tensors of all attention layers. See ``attentions`` under\n returned tensors for more detail.\n output_hidden_states (:obj:`bool`, `optional`):\n Whether or not to return the hidden states of all layers. See ``hidden_states`` under returned tensors\n for more detail.\n return_dict (:obj:`bool`, `optional`):\n Whether or not to return a :class:`~transformers.file_utils.ModelOutput` instead of a plain tuple.\n\n Returns:\n\n Example::\n\n >>> from transformers import BartTokenizer, BartForCausalLM\n\n >>> tokenizer = BartTokenizer.from_pretrained('facebook/bart-large')\n >>> model = BartForCausalLM.from_pretrained('facebook/bart-large', add_cross_attention=False)\n >>> assert model.config.is_decoder, f\"{model.__class__} has to be configured as a decoder.\"\n >>> inputs = tokenizer(\"Hello, my dog is cute\", return_tensors=\"pt\")\n >>> outputs = model(**inputs)\n\n >>> last_hidden_states = outputs.last_hidden_state\n \"\"\"\n\n output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions\n output_hidden_states = (\n output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states\n )\n return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n\n # decoder outputs consists of (dec_features, layer_state, dec_hidden, dec_attn)\n outputs = self.model.decoder(\n input_ids=input_ids,\n attention_mask=attention_mask,\n encoder_hidden_states=encoder_hidden_states,\n encoder_attention_mask=encoder_attention_mask,\n head_mask=head_mask,\n encoder_head_mask=encoder_head_mask,\n past_key_values=past_key_values,\n inputs_embeds=inputs_embeds,\n use_cache=use_cache,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n return_dict=return_dict,\n )\n\n logits = self.lm_head(outputs[0])\n\n loss = None\n if labels is not None:\n loss_fct = CrossEntropyLoss()\n loss = loss_fct(logits.view(-1, self.config.vocab_size), labels.view(-1))\n\n if not return_dict:\n output = (logits,) + outputs[1:]\n return (loss,) + output if loss is not None else output\n\n return CausalLMOutputWithCrossAttentions(\n loss=loss,\n logits=logits,\n past_key_values=outputs.past_key_values,\n hidden_states=outputs.hidden_states,\n attentions=outputs.attentions,\n cross_attentions=outputs.cross_attentions,\n )\n\n def prepare_inputs_for_generation(self, input_ids, past=None, attention_mask=None, use_cache=None, **kwargs):\n # if model is used as a decoder in encoder-decoder model, the decoder attention mask is created on the fly\n if attention_mask is None:\n attention_mask = input_ids.new_ones(input_ids.shape)\n\n if past:\n input_ids = input_ids[:, -1:]\n # first step, decoder_cached_states are empty\n return {\n \"input_ids\": input_ids, # encoder_outputs is defined. input_ids not needed\n \"attention_mask\": attention_mask,\n \"past_key_values\": past,\n \"use_cache\": use_cache,\n }\n\n @staticmethod\n def _reorder_cache(past, beam_idx):\n reordered_past = ()\n for layer_past in past:\n reordered_past += (tuple(past_state.index_select(0, beam_idx) for past_state in layer_past),)\n return reordered_past\n" ]
[ [ "numpy.float32" ], [ "tensorflow.python.framework.dtypes.as_dtype", "tensorflow.core.framework.node_def_pb2.NodeDef" ], [ "torch.nn.functional.softmax", "torch.nn.Dropout", "torch.nn.CrossEntropyLoss", "torch.isinf", "torch.isnan", "torch.nn.functional.dropout", "torch.zeros", "torch.cat", "torch.nn.Embedding", "torch.nn.LayerNorm", "torch.tanh", "torch.nn.Linear", "torch.tensor", "torch.bmm", "torch.arange", "torch.finfo", "torch.clamp" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
giftmischer69/squash
[ "78ce87bf42911b5065e02d9622137a613bc78634" ]
[ "pypefx/shell.py" ]
[ "import logging\nimport os\nfrom cmd import Cmd\nfrom glob import glob\nfrom os import listdir\nfrom os.path import join, isfile\nfrom pathlib import Path\nfrom typing import List\n\nimport yaml\nfrom numpy.core.defchararray import isnumeric\nfrom wasabi import msg\n\nfrom pypefx._version import __version__\nfrom pypefx.memento import Memento\nfrom pypefx.payload import Payload\nfrom pypefx.pipeline import Pipeline\nfrom pypefx.steps import (\n ExportStep,\n SoxTempoStep,\n SoxSpeedStep,\n SoxBassStep,\n SoxDitherStep,\n SoxGainStep,\n PrintStep,\n VstStep,\n Vst32Step,\n SoxCombineType,\n SpleeterStep,\n Step,\n)\n\n\nclass Shell(Cmd):\n def __init__(self, pipeline: Pipeline):\n super().__init__()\n self.prompt = \"fxsh> \"\n self.pipeline = pipeline\n\n def preloop(self) -> None:\n msg.good(f\"Hello from pyepfx Version: {__version__}\")\n self.do_help(\"\")\n self.init_memento()\n\n def do_q(self, line):\n \"\"\" Quits the Shell \"\"\"\n msg.info(\"Goodbye\")\n return True\n\n def do_quit(self, line):\n \"\"\" Quits the Shell \"\"\"\n return self.do_q(line)\n\n def do_display(self, line):\n \"\"\" Displays the current pipeline configuration \"\"\"\n msg.info(f\"Display pipeline: {self.pipeline.name}\")\n for step in self.pipeline.steps:\n msg.info(f\"\\t{type(step).__name__}\")\n if type(step) == SpleeterStep:\n for stp in step.vocal_steps:\n msg.info(f\"\\t\\tvocal_step: {type(stp).__name__}\")\n for stp in step.bass_steps:\n msg.info(f\"\\t\\tbass_step: {type(stp).__name__}\")\n for stp in step.other_steps:\n msg.info(f\"\\t\\tother_step: {type(stp).__name__}\")\n for stp in step.drum_steps:\n msg.info(f\"\\t\\tdrum_step: {type(stp).__name__}\")\n msg.info(f\"\\t\\tcombine_type: {step.combine_type}\")\n\n def do_save(self, line):\n \"\"\" Saves the current pipeline to a file \"\"\"\n if self.pipeline.name == \"untitled\":\n self.pipeline.name = self.ask_string(\"enter project name\")\n\n Path(\"./projects/\").mkdir(exist_ok=True)\n file_path = os.path.join(\"./projects/\", f\"{self.pipeline.name}.yaml\")\n logging.info(f\"Saving pipeline: {self.pipeline.name}\")\n with open(file_path, \"w\") as f:\n f.write(yaml.dump(self.pipeline))\n\n @Memento.undoable\n def do_load(self, line):\n \"\"\" Loads the pipeline from a file \"\"\"\n if line is None or line == \"\" or line.strip() == \"\":\n project_path = self.ask_file_indexed(\"./projects/\", \".yaml\")\n else:\n if line.endswith(\".yaml\"):\n project_path = line\n else:\n project_path = os.path.join(\"./projects/\", f\"{line}.yaml\")\n\n if Path(project_path).exists():\n with open(project_path, \"r\") as f:\n logging.debug(f\"loading pipeline: {project_path}\")\n self.pipeline = yaml.load(f, Loader=yaml.Loader)\n logging.debug(f\"Pipeline: {self.pipeline.name}\")\n logging.debug(yaml.dump(self.pipeline))\n else:\n msg.fail(f\"Project Path does not exist! {project_path}\")\n\n def do_process(self, in_file):\n \"\"\" Processes a song through the pipeline \"\"\"\n # if there is no export step, ask if user wants to export\n # https://stackoverflow.com/a/32705845\n if not any(isinstance(x, ExportStep) for x in self.pipeline.steps):\n if self.ask_bool(\"do you want to export the result?\"):\n out_file = self.ask_string(\n \"enter file name for output file (remember .wav or .mp3 extension)\"\n )\n\n if (\n not out_file.endswith(\".mp3\")\n and not out_file.endswith(\".wav\")\n and not out_file.endswith(\".flac\")\n ):\n out_file += self.ask_indexed([\".mp3\", \".wav\", \".flac\"])\n\n self.pipeline.add_step(ExportStep(out_file))\n\n if not self.is_present(in_file):\n in_file = self.ask_file_indexed(\".\", \".mp3\")\n\n payload = Payload(in_file)\n self.pipeline.process(payload)\n\n @Memento.undoable\n def do_add(self, line):\n \"\"\" Adds a processing step to the pipeline \"\"\"\n step_choices = [\n SoxTempoStep,\n SoxSpeedStep,\n SoxBassStep,\n SoxDitherStep,\n SoxGainStep,\n PrintStep,\n VstStep,\n Vst32Step,\n SpleeterStep,\n ExportStep,\n ]\n step_class = self.ask_step_indexed(step_choices)\n step = None\n if step_class == SoxTempoStep:\n logging.debug(\"Chose: Chose: SoxTempoStep\")\n factor = self.ask_float_with_default(\"enter tempo factor\", 1.0)\n step = SoxTempoStep(factor)\n elif step_class == SoxSpeedStep:\n logging.debug(\"Chose: SoxSpeedStep\")\n factor = self.ask_float_with_default(\"enter tempo factor\", 1.0)\n step = SoxTempoStep(factor)\n elif step_class == SoxBassStep:\n logging.debug(\"Chose: SoxBassStep\")\n gain_db = self.ask_float_with_default(\"enter bass gain db\", 0)\n frequency = self.ask_float_with_default(\"enter bass frequency\", 100)\n slope = self.ask_float_with_default(\"enter bass slope\", 0.5)\n step = SoxBassStep(gain_db, frequency, slope)\n elif step_class == SoxDitherStep:\n logging.debug(\"Chose: SoxDitherStep\")\n step = SoxDitherStep()\n elif step_class == SoxGainStep:\n logging.debug(\"Chose: SoxGainStep\")\n gain_db = self.ask_float_with_default(\"enter gain db\", 0)\n normalize = self.ask_bool(\"normalize audio?\")\n limiter = self.ask_bool(\"use limiter?\")\n step = SoxGainStep(gain_db, normalize, limiter)\n elif step_class == PrintStep:\n logging.debug(\"Chose: PrintStep\")\n step = PrintStep()\n elif step_class == VstStep:\n logging.debug(\"Chose: VstStep\")\n plugin_path = Path(\"./plugins/effects/64bit\").absolute()\n dll = self.ask_file_recursive_indexed(plugin_path, \".dll\")\n fxp = self.ask_file_recursive_indexed(plugin_path, \".fxp\")\n step = VstStep(dll, fxp)\n elif step_class == Vst32Step:\n logging.debug(\"Chose: Vst32Step\")\n plugin_path = Path(\"./plugins/effects/32bit\").absolute()\n dll = self.ask_file_recursive_indexed(plugin_path, \".dll\")\n fxp = self.ask_file_recursive_indexed(plugin_path, \".fxp\")\n step = Vst32Step(dll, fxp)\n elif step_class == SpleeterStep:\n logging.debug(\"Chose: SpleeterStep\")\n bass_steps = self.ask_steps_loop(\n \"add processing steps for the bass part of the song\"\n )\n drum_steps = self.ask_steps_loop(\n \"add processing steps for the drums of the song\"\n )\n vocal_steps = self.ask_steps_loop(\n \"add processing steps for the vocals of the song\"\n )\n other_steps = self.ask_steps_loop(\n \"add processing steps for other parts of the song\"\n )\n combine_types = [SoxCombineType.MERGE, SoxCombineType.MIX, SoxCombineType.CONCATENATE,\n SoxCombineType.SEQUENCE]\n msg.info(\"Choose combine type: \")\n combine_type = self.ask_indexed(combine_types)\n # bass steps\n # drum steps\n # vocal steps\n # other steps\n # combine_type\n step = SpleeterStep(bass_steps, drum_steps, vocal_steps, other_steps, combine_type)\n elif step_class == ExportStep:\n logging.debug(\"Chose: ExportStep\")\n out_file = self.ask_string(\"enter file name for output file\")\n if (\n not out_file.endswith(\".mp3\")\n and not out_file.endswith(\".wav\")\n and not out_file.endswith(\".flac\")\n ):\n out_file += self.ask_indexed([\".mp3\", \".wav\", \".flac\"])\n step = ExportStep(out_file)\n\n if step is not None:\n self.pipeline.add_step(step)\n return\n\n msg.error(\"Something went wrong\")\n\n @Memento.undoable\n def do_remove(self, line):\n \"\"\" removes a step from the pipeline \"\"\"\n step = self.ask_indexed(self.pipeline.steps)\n self.pipeline.steps.remove(step)\n\n @Memento.undoable\n def do_rearrange(self, line):\n \"\"\" moves a step to a different position \"\"\"\n step = self.ask_indexed(self.pipeline.steps)\n index = self.ask_int(\"at which position should the step be?\")\n self.pipeline.steps.remove(step)\n self.pipeline.steps.insert(index, step)\n\n def do_undo(self, line):\n \"\"\" undoes the last operation \"\"\"\n self.pipeline = Memento.undo()\n\n def do_redo(self, line):\n \"\"\" redoes the last operation \"\"\"\n self.pipeline = Memento.redo()\n\n def do_rename(self, line):\n \"\"\" renames the current project \"\"\"\n self.pipeline.name = self.ask_string(\"enter new project name\")\n\n def ask_string(self, prompt):\n return input(f\"{prompt}\\n : \")\n\n def ask_int(self, dialog: str):\n return int(input(f\"{dialog}\\n: \"))\n\n def ask_float_with_default(self, dialog: str, default: float):\n inp = input(f\"{dialog} (default: {default})\\n : \")\n if inp is None or inp == \"\" or inp.strip() == \"\":\n choice = default\n else:\n choice = float(inp)\n return choice\n\n def ask_bool(self, dialog: str):\n return \"y\" in input(f\"{dialog} (y/N)\\n: \").lower()\n\n def ask_file_indexed(self, initial_folder, ext):\n logging.debug(f\"initial_folder: {initial_folder}\")\n if not initial_folder:\n initial_folder = os.getcwd()\n\n only_files = [\n join(initial_folder, f)\n for f in listdir(initial_folder)\n if (isfile(join(initial_folder, f)) and f.lower().endswith(ext))\n ]\n logging.debug(f\"FILES: {only_files}\")\n return self.ask_indexed(only_files)\n\n def ask_file_recursive_indexed(self, initial_folder, ext):\n logging.debug(f\"initial_folder: {initial_folder}\")\n if not initial_folder:\n initial_folder = os.getcwd()\n\n only_files = [\n y\n for x in os.walk(initial_folder)\n for y in glob(os.path.join(x[0], f\"*{ext}\"))\n ]\n logging.debug(f\"FILES: {only_files}\")\n return self.ask_file_name_indexed(only_files)\n\n def ask_indexed(self, options_list):\n for index, option in enumerate(options_list):\n print(f\"({index})\".ljust(5, \" \"), \" \", option)\n choice = self.ask_int(\"enter option number (0-n)\")\n return options_list[choice]\n\n def ask_step_indexed(self, steps_list):\n for index, option in enumerate(steps_list):\n print(f\"({index})\".ljust(5, \" \"), \" \", option.__name__)\n choice = self.ask_int(\"enter option number (0-n)\")\n return steps_list[choice]\n\n def ask_file_name_indexed(self, steps_list):\n for index, option in enumerate(steps_list):\n print(f\"({index})\".ljust(5, \" \"), \" \", Path(option).name)\n choice = self.ask_int(\"enter option number (0-n)\")\n return steps_list[choice]\n\n def ask_steps_loop(self, prompt) -> List[Step]:\n step_choices = [\n SoxBassStep,\n SoxDitherStep,\n SoxGainStep,\n PrintStep,\n VstStep,\n Vst32Step,\n ExportStep,\n ]\n inp = \"\"\n steps = []\n msg.info(prompt)\n while \"q\" not in inp:\n for index, option in enumerate(step_choices):\n print(f\"({index})\".ljust(5, \" \"), \" \", option.__name__)\n inp = input(\"Choose Step (0-n)! (q to quit choosing steps)\\n: \")\n if isnumeric(inp):\n choice = step_choices[int(inp)]\n if choice == SoxBassStep:\n logging.debug(\"Chose: SoxBassStep\")\n gain_db = self.ask_float_with_default(\"enter bass gain db\", 0)\n frequency = self.ask_float_with_default(\"enter bass frequency\", 100)\n slope = self.ask_float_with_default(\"enter bass slope\", 0.5)\n steps.append(SoxBassStep(gain_db, frequency, slope))\n elif choice == SoxDitherStep:\n logging.debug(\"Chose: SoxDitherStep\")\n steps.append(SoxDitherStep())\n elif choice == SoxGainStep:\n logging.debug(\"Chose: SoxGainStep\")\n gain_db = self.ask_float_with_default(\"enter gain db\", 0)\n normalize = self.ask_bool(\"normalize audio?\")\n limiter = self.ask_bool(\"use limiter?\")\n steps.append(SoxGainStep(gain_db, normalize, limiter))\n elif choice == PrintStep:\n logging.debug(\"Chose: PrintStep\")\n steps.append(PrintStep())\n elif choice == VstStep:\n logging.debug(\"Chose: VstStep\")\n plugin_path = Path(\"./plugins/effects/64bit\").absolute()\n dll = self.ask_file_recursive_indexed(plugin_path, \".dll\")\n fxp = self.ask_file_recursive_indexed(plugin_path, \".fxp\")\n steps.append(VstStep(dll, fxp))\n elif choice == Vst32Step:\n logging.debug(\"Chose: Vst32Step\")\n plugin_path = Path(\"./plugins/effects/32bit\").absolute()\n dll = self.ask_file_recursive_indexed(plugin_path, \".dll\")\n fxp = self.ask_file_recursive_indexed(plugin_path, \".fxp\")\n steps.append(Vst32Step(dll, fxp))\n elif choice == ExportStep:\n logging.debug(\"Chose: ExportStep\")\n out_file = self.ask_string(\"enter file name for output file\")\n if (\n not out_file.endswith(\".mp3\")\n and not out_file.endswith(\".wav\")\n and not out_file.endswith(\".flac\")\n ):\n out_file += self.ask_indexed([\".mp3\", \".wav\", \".flac\"])\n steps.append(ExportStep(out_file))\n\n return steps\n\n def is_present(self, in_file):\n return not (in_file is None or in_file == \"\" or not Path(in_file).exists())\n\n @Memento.undoable\n def init_memento(self):\n pass\n" ]
[ [ "numpy.core.defchararray.isnumeric" ] ]
[ { "matplotlib": [], "numpy": [ "1.10", "1.12", "1.11", "1.19", "1.24", "1.13", "1.16", "1.9", "1.18", "1.23", "1.21", "1.22", "1.20", "1.7", "1.15", "1.14", "1.17", "1.8" ], "pandas": [], "scipy": [], "tensorflow": [] } ]
ktobah/client
[ "e7d6ce75f9ab5158139ceed5d86c7afde8a21009" ]
[ "tests/test_data_types.py" ]
[ "import wandb\nfrom wandb import data_types\nimport numpy as np\nimport pytest\nimport PIL\nimport os\nimport matplotlib\nimport six\nimport sys\n\nfrom wandb.data_types import ImageMask, BoundingBoxes2D\n\nmatplotlib.use(\"Agg\")\nfrom click.testing import CliRunner\nimport matplotlib.pyplot as plt\nfrom click.testing import CliRunner\n\nfrom . import utils\n\ndata = np.random.randint(255, size=(1000))\n\n\ndef test_raw_data():\n wbhist = wandb.Histogram(data)\n assert len(wbhist.histogram) == 64\n\n\ndef test_np_histogram():\n wbhist = wandb.Histogram(np_histogram=np.histogram(data))\n assert len(wbhist.histogram) == 10\n\n\ndef test_manual_histogram():\n wbhist = wandb.Histogram(np_histogram=([1, 2, 4], [3, 10, 20, 0]))\n assert len(wbhist.histogram) == 3\n\n\ndef test_invalid_histogram():\n with pytest.raises(ValueError):\n wbhist = wandb.Histogram(np_histogram=([1, 2, 3], [1]))\n\n\nimage = np.zeros((28, 28))\n\ndef test_captions():\n wbone = wandb.Image(image, caption=\"Cool\")\n wbtwo = wandb.Image(image, caption=\"Nice\")\n assert wandb.Image.all_captions([wbone, wbtwo]) == [\"Cool\", \"Nice\"]\n\n\ndef test_bind_image():\n with CliRunner().isolated_filesystem():\n run = wandb.wandb_run.Run()\n wb_image = wandb.Image(image)\n wb_image.bind_to_run(run, 'stuff', 10)\n assert wb_image.is_bound()\n\n with pytest.raises(RuntimeError):\n wb_image.bind_to_run(run, 'stuff', 10)\n\nfull_box = {\n \"position\": {\n \"middle\" : (0.5,0.5), \"width\" : 0.1, \"height\": 0.2\n },\n \"class_label\": \"car\",\n \"box_caption\": \"This is a big car\",\n \"scores\": {\n \"acc\": 0.3\n }\n}\n\n# Helper function return a new dictionary with the key removed\ndef dissoc(d, key):\n new_d = d.copy()\n new_d.pop(key)\n return new_d\n\noptional_keys = [\"class_label\", \"box_caption\", \"scores\"]\nboxes_with_removed_optional_args = [dissoc(full_box, k) for k in optional_keys]\n\ndef test_image_accepts_bounding_boxes():\n with CliRunner().isolated_filesystem():\n run = wandb.wandb_run.Run()\n img = wandb.Image(image, boxes=[full_box])\n img.bind_to_run(run, \"images\", 0)\n img_json = img.to_json(run)\n path = img_json[\"boxes\"][\"path\"]\n assert os.path.exists(os.path.join(run.dir, path))\n\ndef test_image_accepts_bounding_boxes_optional_args():\n with CliRunner().isolated_filesystem():\n run = wandb.wandb_run.Run()\n img = data_types.Image(image, boxes=boxes_with_removed_optional_args)\n img.bind_to_run(run, \"images\", 0)\n img_json = img.to_json(run)\n path = img_json[\"boxes\"][\"path\"]\n assert os.path.exists(os.path.join(run.dir, path))\n\nstandard_mask = {\n \"mask_data\": np.array([[1,2,2,2], [2,3,3,4], [4,4,4,4], [4,4,4,2]]),\n \"class_labels\": { \n 1: \"car\",\n 2: \"pedestrian\", \n 3: \"tractor\", \n 4: \"cthululu\" \n }\n}\n\ndef test_image_accepts_masks():\n with CliRunner().isolated_filesystem():\n run = wandb.wandb_run.Run()\n img = wandb.Image(image, masks={\"overlay\": standard_mask})\n img.bind_to_run(run, \"images\", 0)\n img_json = img.to_json(run)\n path = img_json[\"masks\"][\"overlay\"][\"path\"]\n assert os.path.exists(os.path.join(run.dir, path))\n\ndef test_cant_serialize_to_other_run():\n \"\"\"This isn't implemented yet. Should work eventually.\n \"\"\"\n with CliRunner().isolated_filesystem():\n run = wandb.wandb_run.Run()\n other_run = wandb.wandb_run.Run()\n wb_image = wandb.Image(image)\n\n wb_image.bind_to_run(run, 'stuff', 10)\n\n with pytest.raises(AssertionError):\n wb_image.to_json(other_run)\n\n\ndef test_image_seq_to_json():\n with CliRunner().isolated_filesystem():\n run = wandb.wandb_run.Run()\n wb_image = wandb.Image(image)\n meta = wandb.Image.seq_to_json([wb_image], run, \"test\", 'summary')\n assert os.path.exists(os.path.join(run.dir, 'media', 'images', 'test_summary.png'))\n\n meta_expected = {\n '_type': 'images',\n 'count': 1,\n 'height': 28,\n 'width': 28,\n }\n assert utils.subdict(meta, meta_expected) == meta_expected\n\ndef test_transform_caps_at_65500(caplog):\n large_image = np.random.randint(255, size=(10, 1000))\n large_list = [wandb.Image(large_image)] * 100\n with CliRunner().isolated_filesystem():\n run = wandb.wandb_run.Run()\n meta = wandb.Image.seq_to_json(large_list, run, \"test2\", 0)\n expected = {'_type': 'images', 'count': 65, 'height': 10, 'width': 1000}\n assert utils.subdict(meta, expected) == expected\n assert os.path.exists(os.path.join(run.dir, \"media/images/test2_0.png\"))\n assert 'Only 65 images will be uploaded. The maximum total width for a set of thumbnails is 65,500px, or 65 images, each with a width of 1000 pixels.' in caplog.text\n\ndef test_audio_sample_rates():\n audio1 = np.random.uniform(-1, 1, 44100)\n audio2 = np.random.uniform(-1, 1, 88200)\n wbaudio1 = wandb.Audio(audio1, sample_rate=44100)\n wbaudio2 = wandb.Audio(audio2, sample_rate=88200)\n assert wandb.Audio.sample_rates([wbaudio1, wbaudio2]) == [44100, 88200]\n # test with missing sample rate\n with pytest.raises(ValueError):\n wbaudio3 = wandb.Audio(audio1)\n\n\ndef test_audio_durations():\n audio1 = np.random.uniform(-1, 1, 44100)\n audio2 = np.random.uniform(-1, 1, 88200)\n wbaudio1 = wandb.Audio(audio1, sample_rate=44100)\n wbaudio2 = wandb.Audio(audio2, sample_rate=44100)\n assert wandb.Audio.durations([wbaudio1, wbaudio2]) == [1.0, 2.0]\n\n\ndef test_audio_captions():\n audio = np.random.uniform(-1, 1, 44100)\n sample_rate = 44100\n caption1 = \"This is what a dog sounds like\"\n caption2 = \"This is what a chicken sounds like\"\n # test with all captions\n wbaudio1 = wandb.Audio(audio, sample_rate=sample_rate, caption=caption1)\n wbaudio2 = wandb.Audio(audio, sample_rate=sample_rate, caption=caption2)\n assert wandb.Audio.captions([wbaudio1, wbaudio2]) == [caption1, caption2]\n # test with no captions\n wbaudio3 = wandb.Audio(audio, sample_rate=sample_rate)\n wbaudio4 = wandb.Audio(audio, sample_rate=sample_rate)\n assert wandb.Audio.captions([wbaudio3, wbaudio4]) == False\n # test with some captions\n wbaudio5 = wandb.Audio(audio, sample_rate=sample_rate)\n wbaudio6 = wandb.Audio(audio, sample_rate=sample_rate, caption=caption2)\n assert wandb.Audio.captions([wbaudio5, wbaudio6]) == ['', caption2]\n\n\ndef test_audio_to_json():\n audio = np.zeros(44100)\n with CliRunner().isolated_filesystem():\n run = wandb.wandb_run.Run()\n meta = wandb.Audio.seq_to_json(\n [wandb.Audio(audio, sample_rate=44100)], run, \"test\", 0)\n assert os.path.exists(os.path.join(run.dir, meta['audio'][0]['path']))\n\n meta_expected = {\n '_type': 'audio',\n 'count': 1,\n 'sampleRates': [44100],\n 'durations': [1.0],\n }\n assert utils.subdict(meta, meta_expected) == meta_expected\n\n audio_expected = {\n '_type': 'audio-file',\n 'caption': None,\n 'sample_rate': 44100,\n 'size': 88244,\n }\n assert utils.subdict(meta['audio'][0], audio_expected) == audio_expected\n\n\ndef test_guess_mode():\n image = np.random.randint(255, size=(28, 28, 3))\n wbimg = wandb.Image(image)\n assert wbimg._image.mode == \"RGB\"\n\n\ndef test_pil():\n pil = PIL.Image.new(\"L\", (28, 28))\n img = wandb.Image(pil)\n assert img._image == pil\n\n\ndef test_matplotlib_image():\n plt.plot([1, 2, 2, 4])\n img = wandb.Image(plt)\n assert img._image.width == 640\n\[email protected](sys.version_info < (3, 6), reason=\"No moviepy.editor in py2\")\ndef test_video_numpy():\n with CliRunner().isolated_filesystem():\n run = wandb.wandb_run.Run()\n video = np.random.randint(255, size=(10,3,28,28))\n vid = wandb.Video(video)\n vid.bind_to_run(run, \"videos\", 0)\n assert vid.to_json(run)[\"path\"].endswith(\".gif\")\n\[email protected](sys.version_info < (3, 6), reason=\"No moviepy.editor in py2\")\ndef test_video_numpy_multi():\n with CliRunner().isolated_filesystem():\n run = wandb.wandb_run.Run()\n video = np.random.random(size=(2,10,3,28,28))\n vid = wandb.Video(video)\n vid.bind_to_run(run, \"videos\", 0)\n assert vid.to_json(run)[\"path\"].endswith(\".gif\")\n\[email protected](sys.version_info < (3, 6), reason=\"No moviepy.editor in py2\")\ndef test_video_numpy_invalid():\n run = wandb.wandb_run.Run()\n video = np.random.random(size=(3,28,28))\n with pytest.raises(ValueError):\n vid = wandb.Video(video)\n\ndef test_video_path():\n with CliRunner().isolated_filesystem():\n run = wandb.wandb_run.Run()\n with open(\"video.mp4\", \"w\") as f:\n f.write(\"00000\")\n vid = wandb.Video(\"video.mp4\")\n vid.bind_to_run(run, \"videos\", 0)\n assert vid.to_json(run)[\"path\"].endswith(\".mp4\")\n\ndef test_video_path_invalid():\n run = wandb.wandb_run.Run()\n with CliRunner().isolated_filesystem():\n with open(\"video.avi\", \"w\") as f:\n f.write(\"00000\")\n with pytest.raises(ValueError):\n vid = wandb.Video(\"video.avi\")\n\ndef test_html_str():\n with CliRunner().isolated_filesystem():\n run = wandb.wandb_run.Run()\n html = wandb.Html(\"<html><body><h1>Hello</h1></body></html>\")\n wandb.Html.seq_to_json([html], run, \"rad\", \"summary\")\n assert os.path.exists(os.path.join(run.dir, \"media/html/rad_summary_0.html\"))\n\n\ndef test_html_styles():\n with CliRunner().isolated_filesystem():\n pre = '<base target=\"_blank\"><link rel=\"stylesheet\" type=\"text/css\" href=\"https://app.wandb.ai/normalize.css\" />'\n html = wandb.Html(\"<html><body><h1>Hello</h1></body></html>\")\n assert html.html == \"<html><head>\"+pre + \\\n \"</head><body><h1>Hello</h1></body></html>\"\n html = wandb.Html(\n \"<html><head></head><body><h1>Hello</h1></body></html>\")\n assert html.html == \"<html><head>\"+pre + \\\n \"</head><body><h1>Hello</h1></body></html>\"\n html = wandb.Html(\"<h1>Hello</h1>\")\n assert html.html == pre + \"<h1>Hello</h1>\"\n html = wandb.Html(\"<h1>Hello</h1>\", inject=False)\n assert html.html == \"<h1>Hello</h1>\"\n\n\ndef test_html_file():\n with CliRunner().isolated_filesystem():\n run = wandb.wandb_run.Run()\n with open(\"test.html\", \"w\") as f:\n f.write(\"<html><body><h1>Hello</h1></body></html>\")\n html = wandb.Html(open(\"test.html\"))\n wandb.Html.seq_to_json([html, html], run, \"rad\", \"summary\")\n assert os.path.exists(os.path.join(run.dir, \"media/html/rad_summary_0.html\"))\n assert os.path.exists(os.path.join(run.dir, \"media/html/rad_summary_0.html\"))\n\n\ndef test_table_default():\n table = wandb.Table()\n table.add_data(\"Some awesome text\", \"Positive\", \"Negative\")\n assert table._to_table_json() == {\n \"data\": [[\"Some awesome text\", \"Positive\", \"Negative\"]],\n \"columns\": [\"Input\", \"Output\", \"Expected\"]\n }\n\n\ndef test_table_custom():\n table = wandb.Table([\"Foo\", \"Bar\"])\n table.add_data(\"So\", \"Cool\")\n table.add_row(\"&\", \"Rad\")\n assert table._to_table_json() == {\n \"data\": [[\"So\", \"Cool\"], [\"&\", \"Rad\"]],\n \"columns\": [\"Foo\", \"Bar\"]\n }\n\n\npoint_cloud_1 = np.array([[0, 0, 0, 1],\n [0, 0, 1, 13],\n [0, 1, 0, 2],\n [0, 1, 0, 4]])\n\npoint_cloud_2 = np.array([[0, 0, 0],\n [0, 0, 1],\n [0, 1, 0],\n [0, 1, 0]])\n\npoint_cloud_3 = np.array([[0, 0, 0, 100, 100, 100],\n [0, 0, 1, 100, 100, 100],\n [0, 1, 0, 100, 100, 100],\n [0, 1, 0, 100, 100, 100]])\n\n\ndef test_object3d_numpy():\n obj = wandb.Object3D(point_cloud_1)\n obj = wandb.Object3D(point_cloud_2)\n obj = wandb.Object3D(point_cloud_3)\n\n\ndef test_object3d_obj():\n obj = wandb.Object3D(open(\"tests/fixtures/cube.obj\"))\n\n\ndef test_object3d_gltf():\n obj = wandb.Object3D(open(\"tests/fixtures/Box.gltf\"))\n\n\ndef test_object3d_io():\n f = open(\"tests/fixtures/Box.gltf\")\n body = f.read()\n\n ioObj = six.StringIO(six.u(body))\n obj = wandb.Object3D(ioObj, file_type=\"obj\")\n\n\ndef test_object3d_unsupported_numpy():\n with pytest.raises(ValueError):\n wandb.Object3D(np.array([1]))\n\n with pytest.raises(ValueError):\n wandb.Object3D(np.array([[1, 2], [3, 4], [1, 2]]))\n\n with pytest.raises(ValueError):\n wandb.Object3D(np.array([1, 3, 4, 5, 6, 7, 8, 8, 3]))\n\n with pytest.raises(ValueError):\n wandb.Object3D(np.array([[1, 3, 4, 5, 6, 7, 8, 8, 3]]))\n\n f = open(\"tests/fixtures/Box.gltf\")\n body = f.read()\n ioObj = six.StringIO(six.u(body))\n\n with pytest.raises(ValueError):\n obj = wandb.Object3D(ioObj)\n\n\ndef test_object3d_seq_to_json():\n cwd = os.getcwd()\n\n with CliRunner().isolated_filesystem():\n run = wandb.wandb_run.Run()\n\n obj = wandb.Object3D.seq_to_json([\n wandb.Object3D(open(os.path.join(cwd, \"tests/fixtures/Box.gltf\"))),\n wandb.Object3D(open(os.path.join(cwd, \"tests/fixtures/cube.obj\"))),\n wandb.Object3D(point_cloud_1)\n ], run, \"pc\", 1)\n\n print(obj)\n\n\n assert os.path.exists(os.path.join(run.dir, \"media/object3D/Box_be115756.gltf\"))\n assert os.path.exists(os.path.join(run.dir, \"media/object3D/cube_afff12bc.obj\"))\n assert os.path.exists(os.path.join(run.dir, \n \"media/object3D/pc_1_2.pts.json\"))\n\n assert obj[\"_type\"] == \"object3D\"\n assert obj[\"filenames\"] == [\n \"Box_be115756.gltf\",\n \"cube_afff12bc.obj\",\n \"pc_1_2.pts.json\",\n ]\n\n\ndef test_table_init():\n table = wandb.Table(data=[[\"Some awesome text\", \"Positive\", \"Negative\"]])\n assert table._to_table_json() == {\n \"data\": [[\"Some awesome text\", \"Positive\", \"Negative\"]],\n \"columns\": [\"Input\", \"Output\", \"Expected\"]}\n\ndef test_graph():\n graph = wandb.Graph()\n node_a = data_types.Node('a', 'Node A', size=(4,))\n node_b = data_types.Node('b', 'Node B', size=(16,))\n graph.add_node(node_a)\n graph.add_node(node_b)\n graph.add_edge(node_a, node_b)\n assert graph._to_graph_json() == {\n 'edges': [['a', 'b']],\n 'format': 'keras',\n 'nodes': [{'id': 'a', 'name': 'Node A', 'size': (4,)},\n {'id': 'b', 'name': 'Node B', 'size': (16,)}]}\n\ndef test_numpy_arrays_to_list():\n conv = data_types.numpy_arrays_to_lists\n assert conv(np.array((1,2,))) == [1, 2]\n assert conv([np.array((1,2,))]) == [[1, 2]]\n assert conv(np.array(({'a': [np.array((1,2,))]}, 3))) == [{'a': [[1, 2]]}, 3]\n\n" ]
[ [ "numpy.random.random", "numpy.histogram", "matplotlib.use", "matplotlib.pyplot.plot", "numpy.random.uniform", "numpy.array", "numpy.zeros", "numpy.random.randint" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
AntonFirc/SUR
[ "3173a80731e601cdcc590166a8ba2ef801e60325" ]
[ "speech/speech_keras_predict.py" ]
[ "from pathlib import Path\nfrom tqdm import tqdm\nimport numpy as np\nimport tensorflow as tf\nfrom tensorflow import keras\nimport speech_keras_config as Config\nimport speech_keras_data_man as dm\nimport os\nfrom tensorflow.compat.v1 import ConfigProto\nfrom tensorflow.compat.v1 import InteractiveSession\nimport collections\n\nconfig = ConfigProto()\nconfig.gpu_options.allow_growth = True\nsession = InteractiveSession(config=config)\n\nprint(f\"Restoring model {Config.model_save_filename}\")\nmodel = keras.models.load_model(Config.model_save_filename)\n\nprint(f\"Restoring class names from {Config.class_names_savefile}\")\nclass_names = np.load(Config.class_names_savefile)\n\nEVAL_DIR = Path('./dataset/dev')\nLABEL_DIR = Path('./dataset/eval')\n\nattempts = 0\ntrue_accept = 0\n\nfor eval_speaker in tqdm(EVAL_DIR.iterdir(), 'Eval', len(list(EVAL_DIR.iterdir())), unit='speakers'):\n\n speaker_idx = int(str(eval_speaker).split('/').pop())\n\n for speaker_file in eval_speaker.iterdir():\n if not str(speaker_file).endswith('.wav'):\n continue\n\n samples, sampling_rate = tf.audio.decode_wav(\n tf.io.read_file(str(speaker_file)), desired_channels=1\n )\n if sampling_rate == Config.sampling_rate:\n # Number of slices of 16000 each that can be generated from the noise sample\n slices = int(samples.shape[0] / Config.sampling_rate)\n try:\n samples = tf.split(samples[: slices * Config.sampling_rate], slices)\n segment_ffts = dm.audio_to_fft(samples)\n y_pred = model.predict(segment_ffts)\n tot_probs = np.average(y_pred, axis=0)\n pred_class = int(class_names[np.argmax(tot_probs)])\n # print(f\"Speaker: {speaker_idx} - Predicted: {pred_class}\")\n\n if pred_class == speaker_idx:\n true_accept += 1\n except:\n print(str(speaker_file))\n else:\n print(\"Sampling rate for {} is incorrect. Ignoring it\".format(str(speaker_file)))\n continue\n\n attempts += 1\n\nacc = true_accept / attempts\nprint(f\"Total accuracy: {acc * 100}%\")\n\nresult_file = open(\"speech_keras.txt\", \"w\")\n\nfor speaker_file in tqdm(LABEL_DIR.iterdir(), 'Label', len(list(LABEL_DIR.iterdir())), unit='speakers'):\n if not str(speaker_file).endswith('.wav'):\n continue\n\n samples, sampling_rate = tf.audio.decode_wav(\n tf.io.read_file(str(speaker_file)), desired_channels=1\n )\n if sampling_rate == Config.sampling_rate:\n # Number of slices of 16000 each that can be generated from the noise sample\n slices = int(samples.shape[0] / Config.sampling_rate)\n try:\n samples = tf.split(samples[: slices * Config.sampling_rate], slices)\n segment_ffts = dm.audio_to_fft(samples)\n y_pred = model.predict(segment_ffts)\n\n tot_probs = np.average(y_pred, axis=0)\n pred_class = int(class_names[np.argmax(tot_probs)])\n log_probs = {}\n\n for i in range(len(tot_probs)):\n log_probs[int(class_names[i])] = np.log(tot_probs[i])\n\n log_probs = collections.OrderedDict(sorted(log_probs.items()))\n\n res_line = '{0} {1} {2}\\n'.format(os.path.basename(speaker_file).replace('.wav', ''), pred_class,\n ' '.join(str(x) for x in log_probs.values()))\n\n result_file.write(res_line)\n\n except:\n print(str(speaker_file))\n else:\n print(\"Sampling rate for {} is incorrect. Ignoring it\".format(str(speaker_file)))\n continue\n\nresult_file.close()\n" ]
[ [ "tensorflow.keras.models.load_model", "numpy.log", "tensorflow.compat.v1.ConfigProto", "numpy.argmax", "tensorflow.compat.v1.InteractiveSession", "numpy.load", "tensorflow.split", "numpy.average" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10", "2.7", "1.12", "2.6", "2.2", "1.13", "2.3", "2.4", "1.4", "2.9", "1.5", "1.7", "2.5", "0.12", "1.0", "2.8", "1.2", "2.10" ] } ]
daimon99/distributed
[ "85b3b99bfe25e93cfcaf1d1f9a3f7408fb2e29c1" ]
[ "distributed/dashboard/components/scheduler.py" ]
[ "import logging\nimport math\nimport operator\nimport os\nfrom collections import defaultdict\nfrom numbers import Number\n\nfrom bokeh.core.properties import without_property_validation\nfrom bokeh.io import curdoc\nfrom bokeh.layouts import column, row\nfrom bokeh.models import (\n AdaptiveTicker,\n Arrow,\n BasicTicker,\n BoxSelectTool,\n BoxZoomTool,\n CDSView,\n ColorBar,\n ColumnDataSource,\n DataRange1d,\n GroupFilter,\n HoverTool,\n NumberFormatter,\n NumeralTickFormatter,\n OpenURL,\n Panel,\n PanTool,\n Range1d,\n ResetTool,\n Tabs,\n TapTool,\n Title,\n VeeHead,\n WheelZoomTool,\n value,\n)\nfrom bokeh.models.widgets import DataTable, TableColumn\nfrom bokeh.models.widgets.markups import Div\nfrom bokeh.palettes import Viridis11\nfrom bokeh.plotting import figure\nfrom bokeh.themes import Theme\nfrom bokeh.transform import cumsum, factor_cmap, linear_cmap\nfrom tlz import curry, pipe\nfrom tlz.curried import concat, groupby, map\nfrom tornado import escape\n\nimport dask\nfrom dask import config\nfrom dask.utils import format_bytes, format_time, key_split, parse_timedelta\n\ntry:\n import numpy as np\nexcept ImportError:\n np = False\n\nfrom distributed.dashboard.components import add_periodic_callback\nfrom distributed.dashboard.components.shared import (\n DashboardComponent,\n ProfileServer,\n ProfileTimePlot,\n SystemMonitor,\n)\nfrom distributed.dashboard.utils import BOKEH_VERSION, PROFILING, transpose, update\nfrom distributed.diagnostics.graph_layout import GraphLayout\nfrom distributed.diagnostics.progress_stream import color_of, progress_quads\nfrom distributed.diagnostics.task_stream import TaskStreamPlugin\nfrom distributed.diagnostics.task_stream import color_of as ts_color_of\nfrom distributed.diagnostics.task_stream import colors as ts_color_lookup\nfrom distributed.metrics import time\nfrom distributed.utils import Log, log_errors\n\nif dask.config.get(\"distributed.dashboard.export-tool\"):\n from distributed.dashboard.export_tool import ExportTool\nelse:\n ExportTool = None\n\nlogger = logging.getLogger(__name__)\n\nfrom jinja2 import Environment, FileSystemLoader\n\nenv = Environment(\n loader=FileSystemLoader(\n os.path.join(os.path.dirname(__file__), \"..\", \"..\", \"http\", \"templates\")\n )\n)\n\nBOKEH_THEME = Theme(os.path.join(os.path.dirname(__file__), \"..\", \"theme.yaml\"))\nTICKS_1024 = {\"base\": 1024, \"mantissas\": [1, 2, 4, 8, 16, 32, 64, 128, 256, 512]}\nXLABEL_ORIENTATION = -math.pi / 9 # slanted downwards 20 degrees\n\n\nlogos_dict = {\n \"numpy\": \"statics/images/numpy.png\",\n \"pandas\": \"statics/images/pandas.png\",\n \"builtins\": \"statics/images/python.png\",\n}\n\n\nclass Occupancy(DashboardComponent):\n \"\"\"Occupancy (in time) per worker\"\"\"\n\n def __init__(self, scheduler, **kwargs):\n with log_errors():\n self.scheduler = scheduler\n self.source = ColumnDataSource(\n {\n \"occupancy\": [0, 0],\n \"worker\": [\"a\", \"b\"],\n \"x\": [0.0, 0.1],\n \"y\": [1, 2],\n \"ms\": [1, 2],\n \"color\": [\"red\", \"blue\"],\n \"escaped_worker\": [\"a\", \"b\"],\n }\n )\n\n self.root = figure(\n title=\"Occupancy\",\n tools=\"\",\n toolbar_location=\"above\",\n id=\"bk-occupancy-plot\",\n x_axis_type=\"datetime\",\n min_border_bottom=50,\n **kwargs,\n )\n rect = self.root.rect(\n source=self.source, x=\"x\", width=\"ms\", y=\"y\", height=0.9, color=\"color\"\n )\n rect.nonselection_glyph = None\n\n self.root.xaxis.minor_tick_line_alpha = 0\n self.root.yaxis.visible = False\n self.root.ygrid.visible = False\n # fig.xaxis[0].formatter = NumeralTickFormatter(format='0.0s')\n self.root.x_range.start = 0\n\n tap = TapTool(callback=OpenURL(url=\"./info/worker/@escaped_worker.html\"))\n\n hover = HoverTool()\n hover.tooltips = \"@worker : @occupancy s.\"\n hover.point_policy = \"follow_mouse\"\n self.root.add_tools(hover, tap)\n\n @without_property_validation\n def update(self):\n with log_errors():\n workers = self.scheduler.workers.values()\n\n y = list(range(len(workers)))\n occupancy = [ws.occupancy for ws in workers]\n ms = [occ * 1000 for occ in occupancy]\n x = [occ / 500 for occ in occupancy]\n total = sum(occupancy)\n color = []\n for ws in workers:\n if ws in self.scheduler.idle:\n color.append(\"red\")\n elif ws in self.scheduler.saturated:\n color.append(\"green\")\n else:\n color.append(\"blue\")\n\n if total:\n self.root.title.text = (\n f\"Occupancy -- total time: {format_time(total)} \"\n f\"wall time: {format_time(total / self.scheduler.total_nthreads)}\"\n )\n else:\n self.root.title.text = \"Occupancy\"\n\n if occupancy:\n result = {\n \"occupancy\": occupancy,\n \"worker\": [ws.address for ws in workers],\n \"ms\": ms,\n \"color\": color,\n \"escaped_worker\": [escape.url_escape(ws.address) for ws in workers],\n \"x\": x,\n \"y\": y,\n }\n\n update(self.source, result)\n\n\nclass ProcessingHistogram(DashboardComponent):\n \"\"\"How many tasks are on each worker\"\"\"\n\n def __init__(self, scheduler, **kwargs):\n with log_errors():\n self.last = 0\n self.scheduler = scheduler\n self.source = ColumnDataSource(\n {\"left\": [1, 2], \"right\": [10, 10], \"top\": [0, 0]}\n )\n\n self.root = figure(\n title=\"Tasks Processing (Histogram)\",\n id=\"bk-nprocessing-histogram-plot\",\n name=\"processing\",\n y_axis_label=\"frequency\",\n tools=\"\",\n **kwargs,\n )\n\n self.root.xaxis.minor_tick_line_alpha = 0\n self.root.ygrid.visible = False\n\n self.root.toolbar_location = None\n\n self.root.quad(\n source=self.source,\n left=\"left\",\n right=\"right\",\n bottom=0,\n top=\"top\",\n color=\"deepskyblue\",\n fill_alpha=0.5,\n )\n\n @without_property_validation\n def update(self):\n L = [len(ws.processing) for ws in self.scheduler.workers.values()]\n counts, x = np.histogram(L, bins=40)\n self.source.data.update({\"left\": x[:-1], \"right\": x[1:], \"top\": counts})\n\n\ndef _memory_color(current: int, limit: int) -> str:\n \"\"\"Dynamic color used by WorkersMemory and ClusterMemory\"\"\"\n if limit and current > limit:\n return \"red\"\n if limit and current > limit / 2:\n return \"orange\"\n return \"blue\"\n\n\nclass ClusterMemory(DashboardComponent):\n \"\"\"Total memory usage on the cluster\"\"\"\n\n def __init__(self, scheduler, width=600, **kwargs):\n with log_errors():\n self.scheduler = scheduler\n self.source = ColumnDataSource(\n {\n \"width\": [0] * 4,\n \"x\": [0] * 4,\n \"y\": [0] * 4,\n \"color\": [\"blue\", \"blue\", \"blue\", \"grey\"],\n \"alpha\": [1, 0.7, 0.4, 1],\n \"proc_memory\": [0] * 4,\n \"managed\": [0] * 4,\n \"unmanaged_old\": [0] * 4,\n \"unmanaged_recent\": [0] * 4,\n \"spilled\": [0] * 4,\n }\n )\n\n self.root = figure(\n title=\"Bytes stored on cluster\",\n tools=\"\",\n id=\"bk-cluster-memory-plot\",\n width=int(width / 2),\n name=\"cluster_memory\",\n min_border_bottom=50,\n **kwargs,\n )\n rect = self.root.rect(\n source=self.source,\n x=\"x\",\n y=\"y\",\n width=\"width\",\n height=0.9,\n color=\"color\",\n alpha=\"alpha\",\n )\n rect.nonselection_glyph = None\n\n self.root.axis[0].ticker = BasicTicker(**TICKS_1024)\n self.root.xaxis[0].formatter = NumeralTickFormatter(format=\"0.0 b\")\n self.root.xaxis.major_label_orientation = XLABEL_ORIENTATION\n self.root.xaxis.minor_tick_line_alpha = 0\n self.root.x_range = Range1d(start=0)\n self.root.yaxis.visible = False\n self.root.ygrid.visible = False\n\n self.root.toolbar_location = None\n self.root.yaxis.visible = False\n\n hover = HoverTool(\n point_policy=\"follow_mouse\",\n tooltips=\"\"\"\n <div>\n <span style=\"font-size: 12px; font-weight: bold;\">Process memory (RSS):</span>&nbsp;\n <span style=\"font-size: 10px; font-family: Monaco, monospace;\">@proc_memory{0.00 b}</span>\n </div>\n <div style=\"margin-left: 1em;\">\n <span style=\"font-size: 12px; font-weight: bold;\">Managed:</span>&nbsp;\n <span style=\"font-size: 10px; font-family: Monaco, monospace;\">@managed{0.00 b}</span>\n </div>\n <div style=\"margin-left: 1em;\">\n <span style=\"font-size: 12px; font-weight: bold;\">Unmanaged (old):</span>&nbsp;\n <span style=\"font-size: 10px; font-family: Monaco, monospace;\">@unmanaged_old{0.00 b}</span>\n </div>\n <div style=\"margin-left: 1em;\">\n <span style=\"font-size: 12px; font-weight: bold;\">Unmanaged (recent):</span>&nbsp;\n <span style=\"font-size: 10px; font-family: Monaco, monospace;\">@unmanaged_recent{0.00 b}</span>\n </div>\n <div>\n <span style=\"font-size: 12px; font-weight: bold;\">Spilled to disk:</span>&nbsp;\n <span style=\"font-size: 10px; font-family: Monaco, monospace;\">@spilled{0.00 b}</span>\n </div>\n \"\"\",\n )\n self.root.add_tools(hover)\n\n @without_property_validation\n def update(self):\n with log_errors():\n limit = sum(ws.memory_limit for ws in self.scheduler.workers.values())\n meminfo = self.scheduler.memory\n color = _memory_color(meminfo.process, limit)\n\n width = [\n meminfo.managed_in_memory,\n meminfo.unmanaged_old,\n meminfo.unmanaged_recent,\n meminfo.managed_spilled,\n ]\n\n result = {\n \"width\": width,\n \"x\": [sum(width[:i]) + w / 2 for i, w in enumerate(width)],\n \"color\": [color, color, color, \"grey\"],\n \"proc_memory\": [meminfo.process] * 4,\n \"managed\": [meminfo.managed_in_memory] * 4,\n \"unmanaged_old\": [meminfo.unmanaged_old] * 4,\n \"unmanaged_recent\": [meminfo.unmanaged_recent] * 4,\n \"spilled\": [meminfo.managed_spilled] * 4,\n }\n\n x_end = max(limit, meminfo.process + meminfo.managed_spilled)\n self.root.x_range.end = x_end\n\n title = f\"Bytes stored: {format_bytes(meminfo.process)}\"\n if meminfo.managed_spilled:\n title += f\" + {format_bytes(meminfo.managed_spilled)} spilled to disk\"\n self.root.title.text = title\n\n update(self.source, result)\n\n\nclass WorkersMemory(DashboardComponent):\n \"\"\"Memory usage for single workers\"\"\"\n\n def __init__(self, scheduler, width=600, **kwargs):\n with log_errors():\n self.scheduler = scheduler\n self.source = ColumnDataSource(\n {\n \"width\": [],\n \"x\": [],\n \"y\": [],\n \"color\": [],\n \"alpha\": [],\n \"worker\": [],\n \"escaped_worker\": [],\n \"proc_memory\": [],\n \"managed\": [],\n \"unmanaged_old\": [],\n \"unmanaged_recent\": [],\n \"spilled\": [],\n }\n )\n\n self.root = figure(\n title=\"Bytes stored per worker\",\n tools=\"\",\n id=\"bk-workers-memory-plot\",\n width=int(width / 2),\n name=\"workers_memory\",\n min_border_bottom=50,\n **kwargs,\n )\n rect = self.root.rect(\n source=self.source,\n x=\"x\",\n y=\"y\",\n width=\"width\",\n height=0.9,\n color=\"color\",\n fill_alpha=\"alpha\",\n line_width=0,\n )\n rect.nonselection_glyph = None\n\n self.root.axis[0].ticker = BasicTicker(**TICKS_1024)\n self.root.xaxis[0].formatter = NumeralTickFormatter(format=\"0.0 b\")\n self.root.xaxis.major_label_orientation = XLABEL_ORIENTATION\n self.root.xaxis.minor_tick_line_alpha = 0\n self.root.x_range = Range1d(start=0)\n self.root.yaxis.visible = False\n self.root.ygrid.visible = False\n\n tap = TapTool(callback=OpenURL(url=\"./info/worker/@escaped_worker.html\"))\n self.root.add_tools(tap)\n\n self.root.toolbar_location = None\n self.root.yaxis.visible = False\n\n hover = HoverTool(\n point_policy=\"follow_mouse\",\n tooltips=\"\"\"\n <div>\n <span style=\"font-size: 12px; font-weight: bold;\">Worker:</span>&nbsp;\n <span style=\"font-size: 10px; font-family: Monaco, monospace;\">@worker</span>\n </div>\n <div>\n <span style=\"font-size: 12px; font-weight: bold;\">Process memory (RSS):</span>&nbsp;\n <span style=\"font-size: 10px; font-family: Monaco, monospace;\">@proc_memory{0.00 b}</span>\n </div>\n <div style=\"margin-left: 1em;\">\n <span style=\"font-size: 12px; font-weight: bold;\">Managed:</span>&nbsp;\n <span style=\"font-size: 10px; font-family: Monaco, monospace;\">@managed{0.00 b}</span>\n </div>\n <div style=\"margin-left: 1em;\">\n <span style=\"font-size: 12px; font-weight: bold;\">Unmanaged (old):</span>&nbsp;\n <span style=\"font-size: 10px; font-family: Monaco, monospace;\">@unmanaged_old{0.00 b}</span>\n </div>\n <div style=\"margin-left: 1em;\">\n <span style=\"font-size: 12px; font-weight: bold;\">Unmanaged (recent):</span>&nbsp;\n <span style=\"font-size: 10px; font-family: Monaco, monospace;\">@unmanaged_recent{0.00 b}</span>\n </div>\n <div>\n <span style=\"font-size: 12px; font-weight: bold;\">Spilled to disk:</span>&nbsp;\n <span style=\"font-size: 10px; font-family: Monaco, monospace;\">@spilled{0.00 b}</span>\n </div>\n \"\"\",\n )\n self.root.add_tools(hover)\n\n @without_property_validation\n def update(self):\n def quadlist(i) -> list:\n out = []\n for ii in i:\n out += [ii, ii, ii, ii]\n return out\n\n with log_errors():\n workers = self.scheduler.workers.values()\n\n width = []\n x = []\n color = []\n max_limit = 0\n procmemory = []\n managed = []\n spilled = []\n unmanaged_old = []\n unmanaged_recent = []\n\n for ws in workers:\n meminfo = ws.memory\n limit = getattr(ws, \"memory_limit\", 0)\n max_limit = max(\n max_limit, limit, meminfo.process + meminfo.managed_spilled\n )\n color_i = _memory_color(meminfo.process, limit)\n\n width += [\n meminfo.managed_in_memory,\n meminfo.unmanaged_old,\n meminfo.unmanaged_recent,\n meminfo.managed_spilled,\n ]\n x += [sum(width[-4:i]) + width[i] / 2 for i in range(-4, 0)]\n color += [color_i, color_i, color_i, \"grey\"]\n\n # memory info\n procmemory.append(meminfo.process)\n managed.append(meminfo.managed_in_memory)\n unmanaged_old.append(meminfo.unmanaged_old)\n unmanaged_recent.append(meminfo.unmanaged_recent)\n spilled.append(meminfo.managed_spilled)\n\n result = {\n \"width\": width,\n \"x\": x,\n \"color\": color,\n \"alpha\": [1, 0.7, 0.4, 1] * len(workers),\n \"worker\": quadlist(ws.address for ws in workers),\n \"escaped_worker\": quadlist(\n escape.url_escape(ws.address) for ws in workers\n ),\n \"y\": quadlist(range(len(workers))),\n \"proc_memory\": quadlist(procmemory),\n \"managed\": quadlist(managed),\n \"unmanaged_old\": quadlist(unmanaged_old),\n \"unmanaged_recent\": quadlist(unmanaged_recent),\n \"spilled\": quadlist(spilled),\n }\n # Remove rectangles with width=0\n result = {\n k: [vi for vi, w in zip(v, width) if w] for k, v in result.items()\n }\n\n self.root.x_range.end = max_limit\n update(self.source, result)\n\n\nclass WorkersMemoryHistogram(DashboardComponent):\n \"\"\"Histogram of memory usage, showing how many workers there are in each bucket of\n usage. Replaces the per-worker graph when there are >= 50 workers.\n \"\"\"\n\n def __init__(self, scheduler, **kwargs):\n with log_errors():\n self.last = 0\n self.scheduler = scheduler\n self.source = ColumnDataSource(\n {\"left\": [1, 2], \"right\": [10, 10], \"top\": [0, 0]}\n )\n\n self.root = figure(\n title=\"Bytes stored per worker (Histogram)\",\n name=\"workers_memory\",\n id=\"bk-workers-memory-histogram-plot\",\n y_axis_label=\"frequency\",\n tools=\"\",\n **kwargs,\n )\n\n self.root.xaxis[0].formatter = NumeralTickFormatter(format=\"0.0 b\")\n self.root.xaxis.ticker = AdaptiveTicker(**TICKS_1024)\n self.root.xaxis.major_label_orientation = XLABEL_ORIENTATION\n\n self.root.xaxis.minor_tick_line_alpha = 0\n self.root.ygrid.visible = False\n\n self.root.toolbar_location = None\n\n self.root.quad(\n source=self.source,\n left=\"left\",\n right=\"right\",\n bottom=0,\n top=\"top\",\n color=\"deepskyblue\",\n fill_alpha=0.5,\n )\n\n @without_property_validation\n def update(self):\n nbytes = np.asarray(\n [ws.metrics[\"memory\"] for ws in self.scheduler.workers.values()]\n )\n counts, x = np.histogram(nbytes, bins=40)\n d = {\"left\": x[:-1], \"right\": x[1:], \"top\": counts}\n update(self.source, d)\n\n\nclass BandwidthTypes(DashboardComponent):\n \"\"\"Bar chart showing bandwidth per type\"\"\"\n\n def __init__(self, scheduler, **kwargs):\n with log_errors():\n self.last = 0\n self.scheduler = scheduler\n self.source = ColumnDataSource(\n {\n \"bandwidth\": [1, 2],\n \"bandwidth-half\": [0.5, 1],\n \"type\": [\"a\", \"b\"],\n \"bandwidth_text\": [\"1\", \"2\"],\n }\n )\n\n self.root = figure(\n title=\"Bandwidth by Type\",\n tools=\"\",\n id=\"bk-bandwidth-type-plot\",\n name=\"bandwidth_type_histogram\",\n y_range=[\"a\", \"b\"],\n **kwargs,\n )\n self.root.xaxis.major_label_orientation = -0.5\n rect = self.root.rect(\n source=self.source,\n x=\"bandwidth-half\",\n y=\"type\",\n width=\"bandwidth\",\n height=0.9,\n color=\"blue\",\n )\n self.root.x_range.start = 0\n self.root.xaxis[0].formatter = NumeralTickFormatter(format=\"0.0 b\")\n self.root.xaxis.ticker = AdaptiveTicker(**TICKS_1024)\n rect.nonselection_glyph = None\n\n self.root.xaxis.minor_tick_line_alpha = 0\n self.root.ygrid.visible = False\n\n self.root.toolbar_location = None\n\n hover = HoverTool()\n hover.tooltips = \"@type: @bandwidth_text / s\"\n hover.point_policy = \"follow_mouse\"\n self.root.add_tools(hover)\n\n @without_property_validation\n def update(self):\n with log_errors():\n bw = self.scheduler.bandwidth_types\n self.root.y_range.factors = list(sorted(bw))\n result = {\n \"bandwidth\": list(bw.values()),\n \"bandwidth-half\": [b / 2 for b in bw.values()],\n \"type\": list(bw.keys()),\n \"bandwidth_text\": [format_bytes(x) for x in bw.values()],\n }\n self.root.title.text = \"Bandwidth: \" + format_bytes(\n self.scheduler.bandwidth\n )\n update(self.source, result)\n\n\nclass BandwidthWorkers(DashboardComponent):\n \"\"\"How many tasks are on each worker\"\"\"\n\n def __init__(self, scheduler, **kwargs):\n with log_errors():\n self.last = 0\n self.scheduler = scheduler\n self.source = ColumnDataSource(\n {\n \"bandwidth\": [1, 2],\n \"source\": [\"a\", \"b\"],\n \"destination\": [\"a\", \"b\"],\n \"bandwidth_text\": [\"1\", \"2\"],\n }\n )\n\n values = [hex(x)[2:] for x in range(64, 256)][::-1]\n mapper = linear_cmap(\n field_name=\"bandwidth\",\n palette=[\"#\" + x + x + \"FF\" for x in values],\n low=0,\n high=1,\n )\n\n self.root = figure(\n title=\"Bandwidth by Worker\",\n tools=\"\",\n id=\"bk-bandwidth-worker-plot\",\n name=\"bandwidth_worker_heatmap\",\n x_range=[\"a\", \"b\"],\n y_range=[\"a\", \"b\"],\n **kwargs,\n )\n self.root.xaxis.major_label_orientation = XLABEL_ORIENTATION\n self.root.rect(\n source=self.source,\n x=\"source\",\n y=\"destination\",\n color=mapper,\n height=1,\n width=1,\n )\n\n self.color_map = mapper[\"transform\"]\n color_bar = ColorBar(\n color_mapper=self.color_map,\n label_standoff=12,\n border_line_color=None,\n location=(0, 0),\n )\n color_bar.formatter = NumeralTickFormatter(format=\"0.0 b\")\n color_bar.ticker = AdaptiveTicker(**TICKS_1024)\n self.root.add_layout(color_bar, \"right\")\n\n self.root.toolbar_location = None\n\n hover = HoverTool()\n hover.tooltips = \"\"\"\n <div>\n <p><b>Source:</b> @source </p>\n <p><b>Destination:</b> @destination </p>\n <p><b>Bandwidth:</b> @bandwidth_text / s</p>\n </div>\n \"\"\"\n hover.point_policy = \"follow_mouse\"\n self.root.add_tools(hover)\n\n @without_property_validation\n def update(self):\n with log_errors():\n bw = self.scheduler.bandwidth_workers\n if not bw:\n return\n\n def name(address):\n try:\n ws = self.scheduler.workers[address]\n except KeyError:\n return address\n if ws.name is not None:\n return str(ws.name)\n return address\n\n x, y, value = zip(*[(name(a), name(b), c) for (a, b), c in bw.items()])\n\n self.color_map.high = max(value)\n\n factors = list(sorted(set(x + y)))\n self.root.x_range.factors = factors\n self.root.y_range.factors = factors[::-1]\n\n result = {\n \"source\": x,\n \"destination\": y,\n \"bandwidth\": value,\n \"bandwidth_text\": list(map(format_bytes, value)),\n }\n self.root.title.text = \"Bandwidth: \" + format_bytes(\n self.scheduler.bandwidth\n )\n update(self.source, result)\n\n\nclass WorkerNetworkBandwidth(DashboardComponent):\n \"\"\"Worker network bandwidth chart\n\n Plots horizontal bars with the read_bytes and write_bytes worker state\n \"\"\"\n\n def __init__(self, scheduler, **kwargs):\n with log_errors():\n self.scheduler = scheduler\n self.source = ColumnDataSource(\n {\n \"y_read\": [],\n \"y_write\": [],\n \"x_read\": [],\n \"x_write\": [],\n }\n )\n self.root = figure(\n title=\"Worker Network Bandwidth\",\n tools=\"\",\n id=\"bk-worker-net-bandwidth\",\n name=\"worker_network_bandwidth\",\n **kwargs,\n )\n\n # read_bytes\n self.root.hbar(\n y=\"y_read\",\n right=\"x_read\",\n line_color=None,\n left=0,\n height=0.5,\n fill_color=\"red\",\n legend_label=\"read\",\n source=self.source,\n )\n\n # write_bytes\n self.root.hbar(\n y=\"y_write\",\n right=\"x_write\",\n line_color=None,\n left=0,\n height=0.5,\n fill_color=\"blue\",\n legend_label=\"write\",\n source=self.source,\n )\n\n self.root.axis[0].ticker = BasicTicker(**TICKS_1024)\n self.root.xaxis[0].formatter = NumeralTickFormatter(format=\"0.0 b\")\n self.root.xaxis.major_label_orientation = XLABEL_ORIENTATION\n self.root.xaxis.minor_tick_line_alpha = 0\n self.root.x_range = Range1d(start=0)\n self.root.yaxis.visible = False\n self.root.ygrid.visible = False\n self.root.toolbar_location = None\n self.root.yaxis.visible = False\n\n @without_property_validation\n def update(self):\n with log_errors():\n workers = self.scheduler.workers.values()\n\n h = 0.1\n y_read = [i + 0.75 + i * h for i in range(len(workers))]\n y_write = [i + 0.25 + i * h for i in range(len(workers))]\n\n x_read = []\n x_write = []\n\n for ws in workers:\n x_read.append(ws.metrics[\"read_bytes\"])\n x_write.append(ws.metrics[\"write_bytes\"])\n\n self.root.x_range.end = max(\n max(x_read),\n max(x_write),\n 100_000_000,\n 0.95 * self.root.x_range.end,\n )\n\n result = {\n \"y_read\": y_read,\n \"y_write\": y_write,\n \"x_read\": x_read,\n \"x_write\": x_write,\n }\n\n update(self.source, result)\n\n\nclass ComputePerKey(DashboardComponent):\n \"\"\"Bar chart showing time spend in action by key prefix\"\"\"\n\n def __init__(self, scheduler, **kwargs):\n with log_errors():\n self.last = 0\n self.scheduler = scheduler\n\n es = [p for p in self.scheduler.plugins if isinstance(p, TaskStreamPlugin)]\n if not es:\n self.plugin = TaskStreamPlugin(self.scheduler)\n else:\n self.plugin = es[0]\n\n compute_data = {\n \"times\": [0.2, 0.1],\n \"formatted_time\": [\"0.2 ms\", \"2.8 us\"],\n \"angles\": [3.14, 0.785],\n \"color\": [ts_color_lookup[\"transfer\"], ts_color_lookup[\"compute\"]],\n \"names\": [\"sum\", \"sum_partial\"],\n }\n\n self.compute_source = ColumnDataSource(data=compute_data)\n\n fig = figure(\n title=\"Compute Time Per Task\",\n tools=\"\",\n id=\"bk-Compute-by-key-plot\",\n name=\"compute_time_per_key\",\n x_range=[\"a\", \"b\"],\n **kwargs,\n )\n\n rect = fig.vbar(\n source=self.compute_source,\n x=\"names\",\n top=\"times\",\n width=0.7,\n color=\"color\",\n )\n\n fig.y_range.start = 0\n fig.yaxis.axis_label = \"Time (s)\"\n fig.yaxis[0].formatter = NumeralTickFormatter(format=\"0\")\n fig.yaxis.ticker = AdaptiveTicker(**TICKS_1024)\n fig.xaxis.major_label_orientation = XLABEL_ORIENTATION\n rect.nonselection_glyph = None\n\n fig.xaxis.minor_tick_line_alpha = 0\n fig.xgrid.visible = False\n\n fig.toolbar_location = None\n\n hover = HoverTool()\n hover.tooltips = \"\"\"\n <div>\n <p><b>Name:</b> @names</p>\n <p><b>Time:</b> @formatted_time</p>\n </div>\n \"\"\"\n hover.point_policy = \"follow_mouse\"\n fig.add_tools(hover)\n\n fig.add_layout(\n Title(\n text=\"Note: tasks less than 2% of max are not displayed\",\n text_font_style=\"italic\",\n ),\n \"below\",\n )\n\n self.fig = fig\n tab1 = Panel(child=fig, title=\"Bar Chart\")\n\n fig2 = figure(\n title=\"Compute Time Per Task\",\n tools=\"\",\n id=\"bk-Compute-by-key-pie\",\n name=\"compute_time_per_key-pie\",\n x_range=(-0.5, 1.0),\n **kwargs,\n )\n\n fig2.wedge(\n x=0,\n y=1,\n radius=0.4,\n start_angle=cumsum(\"angles\", include_zero=True),\n end_angle=cumsum(\"angles\"),\n line_color=\"white\",\n fill_color=\"color\",\n legend_field=\"names\",\n source=self.compute_source,\n )\n\n fig2.axis.axis_label = None\n fig2.axis.visible = False\n fig2.grid.grid_line_color = None\n fig2.add_layout(\n Title(\n text=\"Note: tasks less than 2% of max are not displayed\",\n text_font_style=\"italic\",\n ),\n \"below\",\n )\n\n hover = HoverTool()\n hover.tooltips = \"\"\"\n <div>\n <p><b>Name:</b> @names</p>\n <p><b>Time:</b> @formatted_time</p>\n </div>\n \"\"\"\n hover.point_policy = \"follow_mouse\"\n fig2.add_tools(hover)\n self.wedge_fig = fig2\n tab2 = Panel(child=fig2, title=\"Pie Chart\")\n\n self.root = Tabs(tabs=[tab1, tab2])\n\n @without_property_validation\n def update(self):\n with log_errors():\n compute_times = defaultdict(float)\n\n for key, ts in self.scheduler.task_prefixes.items():\n name = key_split(key)\n for action, t in ts.all_durations.items():\n if action == \"compute\":\n compute_times[name] += t\n\n # order by largest time first\n compute_times = sorted(\n compute_times.items(), key=lambda x: x[1], reverse=True\n )\n\n # keep only time which are 2% of max or greater\n if compute_times:\n max_time = compute_times[0][1] * 0.02\n compute_times = [(n, t) for n, t in compute_times if t > max_time]\n compute_colors = list()\n compute_names = list()\n compute_time = list()\n total_time = 0\n for name, t in compute_times:\n compute_names.append(name)\n compute_colors.append(ts_color_of(name))\n compute_time.append(t)\n total_time += t\n\n angles = [t / total_time * 2 * math.pi for t in compute_time]\n\n self.fig.x_range.factors = compute_names\n\n compute_result = dict(\n angles=angles,\n times=compute_time,\n color=compute_colors,\n names=compute_names,\n formatted_time=[format_time(t) for t in compute_time],\n )\n\n update(self.compute_source, compute_result)\n\n\nclass AggregateAction(DashboardComponent):\n \"\"\"Bar chart showing time spend in action by key prefix\"\"\"\n\n def __init__(self, scheduler, **kwargs):\n with log_errors():\n self.last = 0\n self.scheduler = scheduler\n\n es = [p for p in self.scheduler.plugins if isinstance(p, TaskStreamPlugin)]\n if not es:\n self.plugin = TaskStreamPlugin(self.scheduler)\n else:\n self.plugin = es[0]\n\n action_data = {\n \"times\": [0.2, 0.1],\n \"formatted_time\": [\"0.2 ms\", \"2.8 us\"],\n \"color\": [ts_color_lookup[\"transfer\"], ts_color_lookup[\"compute\"]],\n \"names\": [\"transfer\", \"compute\"],\n }\n\n self.action_source = ColumnDataSource(data=action_data)\n\n self.root = figure(\n title=\"Aggregate Per Action\",\n tools=\"\",\n id=\"bk-aggregate-per-action-plot\",\n name=\"aggregate_per_action\",\n x_range=[\"a\", \"b\"],\n **kwargs,\n )\n\n rect = self.root.vbar(\n source=self.action_source,\n x=\"names\",\n top=\"times\",\n width=0.7,\n color=\"color\",\n )\n\n self.root.y_range.start = 0\n self.root.yaxis[0].formatter = NumeralTickFormatter(format=\"0\")\n self.root.yaxis.axis_label = \"Time (s)\"\n self.root.yaxis.ticker = AdaptiveTicker(**TICKS_1024)\n self.root.xaxis.major_label_orientation = XLABEL_ORIENTATION\n self.root.xaxis.major_label_text_font_size = \"16px\"\n rect.nonselection_glyph = None\n\n self.root.xaxis.minor_tick_line_alpha = 0\n self.root.xgrid.visible = False\n\n self.root.toolbar_location = None\n\n hover = HoverTool()\n hover.tooltips = \"\"\"\n <div>\n <p><b>Name:</b> @names</p>\n <p><b>Time:</b> @formatted_time</p>\n </div>\n \"\"\"\n hover.point_policy = \"follow_mouse\"\n self.root.add_tools(hover)\n\n @without_property_validation\n def update(self):\n with log_errors():\n agg_times = defaultdict(float)\n\n for key, ts in self.scheduler.task_prefixes.items():\n for action, t in ts.all_durations.items():\n agg_times[action] += t\n\n # order by largest time first\n agg_times = sorted(agg_times.items(), key=lambda x: x[1], reverse=True)\n\n agg_colors = list()\n agg_names = list()\n agg_time = list()\n for action, t in agg_times:\n agg_names.append(action)\n if action == \"compute\":\n agg_colors.append(\"purple\")\n else:\n agg_colors.append(ts_color_lookup[action])\n agg_time.append(t)\n\n self.root.x_range.factors = agg_names\n self.root.title.text = \"Aggregate Time Per Action\"\n\n action_result = dict(\n times=agg_time,\n color=agg_colors,\n names=agg_names,\n formatted_time=[format_time(t) for t in agg_time],\n )\n\n update(self.action_source, action_result)\n\n\nclass MemoryByKey(DashboardComponent):\n \"\"\"Bar chart showing memory use by key prefix\"\"\"\n\n def __init__(self, scheduler, **kwargs):\n with log_errors():\n self.last = 0\n self.scheduler = scheduler\n self.source = ColumnDataSource(\n {\n \"name\": [\"a\", \"b\"],\n \"nbytes\": [100, 1000],\n \"count\": [1, 2],\n \"color\": [\"blue\", \"blue\"],\n }\n )\n\n self.root = figure(\n title=\"Memory Use\",\n tools=\"\",\n id=\"bk-memory-by-key-plot\",\n name=\"memory_by_key\",\n x_range=[\"a\", \"b\"],\n **kwargs,\n )\n rect = self.root.vbar(\n source=self.source, x=\"name\", top=\"nbytes\", width=0.9, color=\"color\"\n )\n self.root.yaxis[0].formatter = NumeralTickFormatter(format=\"0.0 b\")\n self.root.yaxis.ticker = AdaptiveTicker(**TICKS_1024)\n self.root.xaxis.major_label_orientation = XLABEL_ORIENTATION\n rect.nonselection_glyph = None\n\n self.root.xaxis.minor_tick_line_alpha = 0\n self.root.ygrid.visible = False\n\n self.root.toolbar_location = None\n\n hover = HoverTool()\n hover.tooltips = \"@name: @nbytes_text\"\n hover.tooltips = \"\"\"\n <div>\n <p><b>Name:</b> @name</p>\n <p><b>Bytes:</b> @nbytes_text </p>\n <p><b>Count:</b> @count objects </p>\n </div>\n \"\"\"\n hover.point_policy = \"follow_mouse\"\n self.root.add_tools(hover)\n\n @without_property_validation\n def update(self):\n with log_errors():\n counts = defaultdict(int)\n nbytes = defaultdict(int)\n for ws in self.scheduler.workers.values():\n for ts in ws.has_what:\n ks = key_split(ts.key)\n counts[ks] += 1\n nbytes[ks] += ts.nbytes\n\n names = list(sorted(counts))\n self.root.x_range.factors = names\n result = {\n \"name\": names,\n \"count\": [counts[name] for name in names],\n \"nbytes\": [nbytes[name] for name in names],\n \"nbytes_text\": [format_bytes(nbytes[name]) for name in names],\n \"color\": [color_of(name) for name in names],\n }\n self.root.title.text = \"Total Use: \" + format_bytes(sum(nbytes.values()))\n\n update(self.source, result)\n\n\nclass CurrentLoad(DashboardComponent):\n \"\"\"Tasks and CPU usage on each worker\"\"\"\n\n def __init__(self, scheduler, width=600, **kwargs):\n with log_errors():\n self.last = 0\n self.scheduler = scheduler\n self.source = ColumnDataSource(\n {\n \"nprocessing\": [],\n \"nprocessing-half\": [],\n \"nprocessing-color\": [],\n \"cpu\": [],\n \"cpu-half\": [],\n \"y\": [],\n \"worker\": [],\n \"escaped_worker\": [],\n }\n )\n processing = figure(\n title=\"Tasks Processing\",\n tools=\"\",\n id=\"bk-nprocessing-plot\",\n name=\"processing\",\n width=int(width / 2),\n min_border_bottom=50,\n **kwargs,\n )\n rect = processing.rect(\n source=self.source,\n x=\"nprocessing-half\",\n y=\"y\",\n width=\"nprocessing\",\n height=0.9,\n color=\"nprocessing-color\",\n )\n processing.x_range.start = 0\n rect.nonselection_glyph = None\n\n cpu = figure(\n title=\"CPU Utilization\",\n tools=\"\",\n id=\"bk-cpu-worker-plot\",\n width=int(width / 2),\n name=\"cpu_hist\",\n x_range=(0, 100),\n min_border_bottom=50,\n **kwargs,\n )\n rect = cpu.rect(\n source=self.source,\n x=\"cpu-half\",\n y=\"y\",\n width=\"cpu\",\n height=0.9,\n color=\"blue\",\n )\n rect.nonselection_glyph = None\n\n for fig in (processing, cpu):\n fig.xaxis.minor_tick_line_alpha = 0\n fig.yaxis.visible = False\n fig.ygrid.visible = False\n\n tap = TapTool(\n callback=OpenURL(url=\"./info/worker/@escaped_worker.html\")\n )\n fig.add_tools(tap)\n\n fig.toolbar_location = None\n fig.yaxis.visible = False\n\n hover = HoverTool()\n hover.tooltips = \"@worker : @nprocessing tasks\"\n hover.point_policy = \"follow_mouse\"\n processing.add_tools(hover)\n\n hover = HoverTool()\n hover.tooltips = \"@worker : @cpu %\"\n hover.point_policy = \"follow_mouse\"\n cpu.add_tools(hover)\n\n self.processing_figure = processing\n self.cpu_figure = cpu\n\n @without_property_validation\n def update(self):\n with log_errors():\n workers = self.scheduler.workers.values()\n now = time()\n if not any(ws.processing for ws in workers) and now < self.last + 1:\n return\n self.last = now\n\n cpu = [int(ws.metrics[\"cpu\"]) for ws in workers]\n nprocessing = [len(ws.processing) for ws in workers]\n\n nprocessing_color = []\n for ws in workers:\n if ws in self.scheduler.idle:\n nprocessing_color.append(\"red\")\n elif ws in self.scheduler.saturated:\n nprocessing_color.append(\"green\")\n else:\n nprocessing_color.append(\"blue\")\n\n result = {\n \"cpu\": cpu,\n \"cpu-half\": [c / 2 for c in cpu],\n \"nprocessing\": nprocessing,\n \"nprocessing-half\": [np / 2 for np in nprocessing],\n \"nprocessing-color\": nprocessing_color,\n \"worker\": [ws.address for ws in workers],\n \"escaped_worker\": [escape.url_escape(ws.address) for ws in workers],\n \"y\": list(range(len(workers))),\n }\n\n if self.scheduler.workers:\n xrange = max(ws.nthreads or 1 for ws in workers)\n else:\n xrange = 1\n self.cpu_figure.x_range.end = xrange * 100\n\n update(self.source, result)\n\n\nclass StealingTimeSeries(DashboardComponent):\n def __init__(self, scheduler, **kwargs):\n self.scheduler = scheduler\n self.source = ColumnDataSource(\n {\n \"time\": [time() * 1000, time() * 1000 + 1],\n \"idle\": [0, 0],\n \"saturated\": [0, 0],\n }\n )\n\n x_range = DataRange1d(follow=\"end\", follow_interval=20000, range_padding=0)\n\n self.root = figure(\n title=\"Idle and Saturated Workers Over Time\",\n x_axis_type=\"datetime\",\n tools=\"\",\n x_range=x_range,\n **kwargs,\n )\n self.root.line(source=self.source, x=\"time\", y=\"idle\", color=\"red\")\n self.root.line(source=self.source, x=\"time\", y=\"saturated\", color=\"green\")\n self.root.yaxis.minor_tick_line_color = None\n\n self.root.add_tools(\n ResetTool(), PanTool(dimensions=\"width\"), WheelZoomTool(dimensions=\"width\")\n )\n\n @without_property_validation\n def update(self):\n with log_errors():\n result = {\n \"time\": [time() * 1000],\n \"idle\": [len(self.scheduler.idle)],\n \"saturated\": [len(self.scheduler.saturated)],\n }\n if PROFILING:\n curdoc().add_next_tick_callback(\n lambda: self.source.stream(result, 10000)\n )\n else:\n self.source.stream(result, 10000)\n\n\nclass StealingEvents(DashboardComponent):\n def __init__(self, scheduler, **kwargs):\n self.scheduler = scheduler\n self.steal = scheduler.extensions[\"stealing\"]\n self.last = 0\n self.source = ColumnDataSource(\n {\n \"time\": [time() - 20, time()],\n \"level\": [0, 15],\n \"color\": [\"white\", \"white\"],\n \"duration\": [0, 0],\n \"radius\": [1, 1],\n \"cost_factor\": [0, 10],\n \"count\": [1, 1],\n }\n )\n\n x_range = DataRange1d(follow=\"end\", follow_interval=20000, range_padding=0)\n\n self.root = figure(\n title=\"Stealing Events\",\n x_axis_type=\"datetime\",\n tools=\"\",\n x_range=x_range,\n **kwargs,\n )\n\n self.root.circle(\n source=self.source,\n x=\"time\",\n y=\"level\",\n color=\"color\",\n size=\"radius\",\n alpha=0.5,\n )\n self.root.yaxis.axis_label = \"Level\"\n\n hover = HoverTool()\n hover.tooltips = \"Level: @level, Duration: @duration, Count: @count, Cost factor: @cost_factor\"\n hover.point_policy = \"follow_mouse\"\n\n self.root.add_tools(\n hover,\n ResetTool(),\n PanTool(dimensions=\"width\"),\n WheelZoomTool(dimensions=\"width\"),\n )\n\n def convert(self, msgs):\n \"\"\"Convert a log message to a glyph\"\"\"\n total_duration = 0\n for msg in msgs:\n time, level, key, duration, sat, occ_sat, idl, occ_idl = msg\n total_duration += duration\n\n try:\n color = Viridis11[level]\n except (KeyError, IndexError):\n color = \"black\"\n\n radius = math.sqrt(min(total_duration, 10)) * 30 + 2\n\n d = {\n \"time\": time * 1000,\n \"level\": level,\n \"count\": len(msgs),\n \"color\": color,\n \"duration\": total_duration,\n \"radius\": radius,\n \"cost_factor\": self.steal.cost_multipliers[level],\n }\n\n return d\n\n @without_property_validation\n def update(self):\n with log_errors():\n log = self.scheduler.get_events(topic=\"stealing\")\n current = len(self.scheduler.events[\"stealing\"])\n n = current - self.last\n\n log = [log[-i][1] for i in range(1, n + 1) if isinstance(log[-i][1], list)]\n self.last = current\n\n if log:\n new = pipe(\n log,\n map(groupby(1)),\n map(dict.values),\n concat,\n map(self.convert),\n list,\n transpose,\n )\n if PROFILING:\n curdoc().add_next_tick_callback(\n lambda: self.source.stream(new, 10000)\n )\n else:\n self.source.stream(new, 10000)\n\n\nclass Events(DashboardComponent):\n def __init__(self, scheduler, name, height=150, **kwargs):\n self.scheduler = scheduler\n self.action_ys = dict()\n self.last = 0\n self.name = name\n self.source = ColumnDataSource(\n {\"time\": [], \"action\": [], \"hover\": [], \"y\": [], \"color\": []}\n )\n\n x_range = DataRange1d(follow=\"end\", follow_interval=200000)\n\n self.root = figure(\n title=name,\n x_axis_type=\"datetime\",\n height=height,\n tools=\"\",\n x_range=x_range,\n **kwargs,\n )\n\n self.root.circle(\n source=self.source,\n x=\"time\",\n y=\"y\",\n color=\"color\",\n size=50,\n alpha=0.5,\n **{\"legend_field\" if BOKEH_VERSION >= \"1.4\" else \"legend\": \"action\"},\n )\n self.root.yaxis.axis_label = \"Action\"\n self.root.legend.location = \"top_left\"\n\n hover = HoverTool()\n hover.tooltips = \"@action<br>@hover\"\n hover.point_policy = \"follow_mouse\"\n\n self.root.add_tools(\n hover,\n ResetTool(),\n PanTool(dimensions=\"width\"),\n WheelZoomTool(dimensions=\"width\"),\n )\n\n @without_property_validation\n def update(self):\n with log_errors():\n log = self.scheduler.events[self.name]\n n = self.scheduler.event_counts[self.name] - self.last\n if log:\n log = [log[-i] for i in range(1, n + 1)]\n self.last = self.scheduler.event_counts[self.name]\n\n if log:\n actions = []\n times = []\n hovers = []\n ys = []\n colors = []\n for msg_time, msg in log:\n times.append(msg_time * 1000)\n action = msg[\"action\"]\n actions.append(action)\n try:\n ys.append(self.action_ys[action])\n except KeyError:\n self.action_ys[action] = len(self.action_ys)\n ys.append(self.action_ys[action])\n colors.append(color_of(action))\n hovers.append(\"TODO\")\n\n new = {\n \"time\": times,\n \"action\": actions,\n \"hover\": hovers,\n \"y\": ys,\n \"color\": colors,\n }\n\n if PROFILING:\n curdoc().add_next_tick_callback(\n lambda: self.source.stream(new, 10000)\n )\n else:\n self.source.stream(new, 10000)\n\n\nclass TaskStream(DashboardComponent):\n def __init__(self, scheduler, n_rectangles=1000, clear_interval=\"20s\", **kwargs):\n self.scheduler = scheduler\n self.offset = 0\n es = [p for p in self.scheduler.plugins if isinstance(p, TaskStreamPlugin)]\n if not es:\n self.plugin = TaskStreamPlugin(self.scheduler)\n else:\n self.plugin = es[0]\n self.index = max(0, self.plugin.index - n_rectangles)\n self.workers = dict()\n self.n_rectangles = n_rectangles\n clear_interval = parse_timedelta(clear_interval, default=\"ms\")\n self.clear_interval = clear_interval\n self.last = 0\n self.last_seen = 0\n\n self.source, self.root = task_stream_figure(clear_interval, **kwargs)\n\n # Required for update callback\n self.task_stream_index = [0]\n\n @without_property_validation\n def update(self):\n if self.index == self.plugin.index:\n return\n with log_errors():\n if self.index and len(self.source.data[\"start\"]):\n start = min(self.source.data[\"start\"])\n duration = max(self.source.data[\"duration\"])\n boundary = (self.offset + start - duration) / 1000\n else:\n boundary = self.offset\n rectangles = self.plugin.rectangles(\n istart=self.index, workers=self.workers, start_boundary=boundary\n )\n n = len(rectangles[\"name\"])\n self.index = self.plugin.index\n\n if not rectangles[\"start\"]:\n return\n\n # If it has been a while since we've updated the plot\n if time() > self.last_seen + self.clear_interval:\n new_start = min(rectangles[\"start\"]) - self.offset\n old_start = min(self.source.data[\"start\"])\n old_end = max(\n map(\n operator.add,\n self.source.data[\"start\"],\n self.source.data[\"duration\"],\n )\n )\n\n density = (\n sum(self.source.data[\"duration\"])\n / len(self.workers)\n / (old_end - old_start)\n )\n\n # If whitespace is more than 3x the old width\n if (new_start - old_end) > (old_end - old_start) * 2 or density < 0.05:\n self.source.data.update({k: [] for k in rectangles}) # clear\n self.offset = min(rectangles[\"start\"]) # redefine offset\n\n rectangles[\"start\"] = [x - self.offset for x in rectangles[\"start\"]]\n self.last_seen = time()\n\n # Convert to numpy for serialization speed\n if n >= 10 and np:\n for k, v in rectangles.items():\n if isinstance(v[0], Number):\n rectangles[k] = np.array(v)\n\n if PROFILING:\n curdoc().add_next_tick_callback(\n lambda: self.source.stream(rectangles, self.n_rectangles)\n )\n else:\n self.source.stream(rectangles, self.n_rectangles)\n\n\ndef task_stream_figure(clear_interval=\"20s\", **kwargs):\n \"\"\"\n kwargs are applied to the bokeh.models.plots.Plot constructor\n \"\"\"\n clear_interval = parse_timedelta(clear_interval, default=\"ms\")\n\n source = ColumnDataSource(\n data=dict(\n start=[time() - clear_interval],\n duration=[0.1],\n key=[\"start\"],\n name=[\"start\"],\n color=[\"white\"],\n duration_text=[\"100 ms\"],\n worker=[\"foo\"],\n y=[0],\n worker_thread=[1],\n alpha=[0.0],\n )\n )\n\n x_range = DataRange1d(range_padding=0)\n y_range = DataRange1d(range_padding=0)\n\n root = figure(\n name=\"task_stream\",\n title=\"Task Stream\",\n id=\"bk-task-stream-plot\",\n x_range=x_range,\n y_range=y_range,\n toolbar_location=\"above\",\n x_axis_type=\"datetime\",\n y_axis_location=None,\n tools=\"\",\n min_border_bottom=50,\n **kwargs,\n )\n\n rect = root.rect(\n source=source,\n x=\"start\",\n y=\"y\",\n width=\"duration\",\n height=0.4,\n fill_color=\"color\",\n line_color=\"color\",\n line_alpha=0.6,\n fill_alpha=\"alpha\",\n line_width=3,\n )\n rect.nonselection_glyph = None\n\n root.yaxis.major_label_text_alpha = 0\n root.yaxis.minor_tick_line_alpha = 0\n root.yaxis.major_tick_line_alpha = 0\n root.xgrid.visible = False\n\n hover = HoverTool(\n point_policy=\"follow_mouse\",\n tooltips=\"\"\"\n <div>\n <span style=\"font-size: 12px; font-weight: bold;\">@name:</span>&nbsp;\n <span style=\"font-size: 10px; font-family: Monaco, monospace;\">@duration_text</span>\n </div>\n \"\"\",\n )\n\n tap = TapTool(callback=OpenURL(url=\"./profile?key=@name\"))\n\n root.add_tools(\n hover,\n tap,\n BoxZoomTool(),\n ResetTool(),\n PanTool(dimensions=\"width\"),\n WheelZoomTool(dimensions=\"width\"),\n )\n if ExportTool:\n export = ExportTool()\n export.register_plot(root)\n root.add_tools(export)\n\n return source, root\n\n\nclass TaskGraph(DashboardComponent):\n \"\"\"\n A dynamic node-link diagram for the task graph on the scheduler\n\n See also the GraphLayout diagnostic at\n distributed/diagnostics/graph_layout.py\n \"\"\"\n\n def __init__(self, scheduler, **kwargs):\n self.scheduler = scheduler\n self.layout = GraphLayout(scheduler)\n self.invisible_count = 0 # number of invisible nodes\n\n self.node_source = ColumnDataSource(\n {\"x\": [], \"y\": [], \"name\": [], \"state\": [], \"visible\": [], \"key\": []}\n )\n self.edge_source = ColumnDataSource({\"x\": [], \"y\": [], \"visible\": []})\n\n node_view = CDSView(\n source=self.node_source,\n filters=[GroupFilter(column_name=\"visible\", group=\"True\")],\n )\n edge_view = CDSView(\n source=self.edge_source,\n filters=[GroupFilter(column_name=\"visible\", group=\"True\")],\n )\n\n node_colors = factor_cmap(\n \"state\",\n factors=[\"waiting\", \"processing\", \"memory\", \"released\", \"erred\"],\n palette=[\"gray\", \"green\", \"red\", \"blue\", \"black\"],\n )\n\n self.root = figure(title=\"Task Graph\", **kwargs)\n self.subtitle = Title(text=\" \", text_font_style=\"italic\")\n self.root.add_layout(self.subtitle, \"above\")\n\n self.root.multi_line(\n xs=\"x\",\n ys=\"y\",\n source=self.edge_source,\n line_width=1,\n view=edge_view,\n color=\"black\",\n alpha=0.3,\n )\n rect = self.root.square(\n x=\"x\",\n y=\"y\",\n size=10,\n color=node_colors,\n source=self.node_source,\n view=node_view,\n **{\"legend_field\" if BOKEH_VERSION >= \"1.4\" else \"legend\": \"state\"},\n )\n self.root.xgrid.grid_line_color = None\n self.root.ygrid.grid_line_color = None\n\n hover = HoverTool(\n point_policy=\"follow_mouse\",\n tooltips=\"<b>@name</b>: @state\",\n renderers=[rect],\n )\n tap = TapTool(callback=OpenURL(url=\"info/task/@key.html\"), renderers=[rect])\n rect.nonselection_glyph = None\n self.root.add_tools(hover, tap)\n self.max_items = config.get(\"distributed.dashboard.graph-max-items\", 5000)\n\n @without_property_validation\n def update(self):\n with log_errors():\n # If there are too many tasks in the scheduler we'll disable this\n # compoonents to not overload scheduler or client. Once we drop\n # below the threshold, the data is filled up again as usual\n if len(self.scheduler.tasks) > self.max_items:\n self.subtitle.text = \"Scheduler has too many tasks to display.\"\n for container in [self.node_source, self.edge_source]:\n container.data = {col: [] for col in container.column_names}\n else:\n # occasionally reset the column data source to remove old nodes\n if self.invisible_count > len(self.node_source.data[\"x\"]) / 2:\n self.layout.reset_index()\n self.invisible_count = 0\n update = True\n else:\n update = False\n\n new, self.layout.new = self.layout.new, []\n new_edges = self.layout.new_edges\n self.layout.new_edges = []\n\n self.add_new_nodes_edges(new, new_edges, update=update)\n\n self.patch_updates()\n\n if len(self.scheduler.tasks) == 0:\n self.subtitle.text = \"Scheduler is empty.\"\n else:\n self.subtitle.text = \" \"\n\n @without_property_validation\n def add_new_nodes_edges(self, new, new_edges, update=False):\n if new or update:\n node_key = []\n node_x = []\n node_y = []\n node_state = []\n node_name = []\n edge_x = []\n edge_y = []\n\n x = self.layout.x\n y = self.layout.y\n\n tasks = self.scheduler.tasks\n for key in new:\n try:\n task = tasks[key]\n except KeyError:\n continue\n xx = x[key]\n yy = y[key]\n node_key.append(escape.url_escape(key))\n node_x.append(xx)\n node_y.append(yy)\n node_state.append(task.state)\n node_name.append(task.prefix.name)\n\n for a, b in new_edges:\n try:\n edge_x.append([x[a], x[b]])\n edge_y.append([y[a], y[b]])\n except KeyError:\n pass\n\n node = {\n \"x\": node_x,\n \"y\": node_y,\n \"state\": node_state,\n \"name\": node_name,\n \"key\": node_key,\n \"visible\": [\"True\"] * len(node_x),\n }\n edge = {\"x\": edge_x, \"y\": edge_y, \"visible\": [\"True\"] * len(edge_x)}\n\n if update or not len(self.node_source.data[\"x\"]):\n # see https://github.com/bokeh/bokeh/issues/7523\n self.node_source.data.update(node)\n self.edge_source.data.update(edge)\n else:\n self.node_source.stream(node)\n self.edge_source.stream(edge)\n\n @without_property_validation\n def patch_updates(self):\n \"\"\"\n Small updates like color changes or lost nodes from task transitions\n \"\"\"\n n = len(self.node_source.data[\"x\"])\n m = len(self.edge_source.data[\"x\"])\n\n if self.layout.state_updates:\n state_updates = self.layout.state_updates\n self.layout.state_updates = []\n updates = [(i, c) for i, c in state_updates if i < n]\n self.node_source.patch({\"state\": updates})\n\n if self.layout.visible_updates:\n updates = self.layout.visible_updates\n updates = [(i, c) for i, c in updates if i < n]\n self.layout.visible_updates = []\n self.node_source.patch({\"visible\": updates})\n self.invisible_count += len(updates)\n\n if self.layout.visible_edge_updates:\n updates = self.layout.visible_edge_updates\n updates = [(i, c) for i, c in updates if i < m]\n self.layout.visible_edge_updates = []\n self.edge_source.patch({\"visible\": updates})\n\n def __del__(self):\n self.scheduler.remove_plugin(self.layout)\n\n\nclass TaskGroupGraph(DashboardComponent):\n \"\"\"\n Task Group Graph\n\n Creates a graph layout for TaskGroups on the scheduler. It assigns\n (x, y) locations to all the TaskGroups and lays them out by according\n to their dependencies. The layout gets updated every time that new\n TaskGroups are added.\n\n Each task group node incodes information about task progress, memory,\n and output type into glyphs, as well as a hover tooltip with more detailed\n information on name, computation time, memory, and tasks status.\n \"\"\"\n\n def __init__(self, scheduler, **kwargs):\n self.scheduler = scheduler\n\n self.nodes_layout = {}\n self.arrows_layout = {}\n\n self.old_counter = -1\n\n self.nodes_source = ColumnDataSource(\n {\n \"x\": [],\n \"y\": [],\n \"w_box\": [],\n \"h_box\": [],\n \"name\": [],\n \"tot_tasks\": [],\n \"color\": [],\n \"x_start\": [],\n \"x_end\": [],\n \"y_start\": [],\n \"y_end\": [],\n \"x_end_progress\": [],\n \"mem_alpha\": [],\n \"node_line_width\": [],\n \"comp_tasks\": [],\n \"url_logo\": [],\n \"x_logo\": [],\n \"y_logo\": [],\n \"w_logo\": [],\n \"h_logo\": [],\n \"in_processing\": [],\n \"in_memory\": [],\n \"in_released\": [],\n \"in_erred\": [],\n \"compute_time\": [],\n \"memory\": [],\n }\n )\n\n self.arrows_source = ColumnDataSource({\"xs\": [], \"ys\": [], \"xe\": [], \"ye\": []})\n\n self.root = figure(title=\"Task Groups Graph\", match_aspect=True, **kwargs)\n self.root.axis.visible = False\n self.subtitle = Title(text=\" \", text_font_style=\"italic\")\n self.root.add_layout(self.subtitle, \"above\")\n\n rect = self.root.rect(\n x=\"x\",\n y=\"y\",\n width=\"w_box\",\n height=\"h_box\",\n color=\"color\",\n fill_alpha=\"mem_alpha\",\n line_color=\"black\",\n line_width=\"node_line_width\",\n source=self.nodes_source,\n )\n\n ####plot tg log\n self.root.image_url(\n url=\"url_logo\",\n x=\"x_logo\",\n y=\"y_logo\",\n w=\"w_logo\",\n h=\"h_logo\",\n anchor=\"center\",\n source=self.nodes_source,\n )\n\n # progress bar plain box\n self.root.quad(\n left=\"x_start\",\n right=\"x_end\",\n bottom=\"y_start\",\n top=\"y_end\",\n color=None,\n line_color=\"black\",\n source=self.nodes_source,\n )\n\n # progress bar\n self.root.quad(\n left=\"x_start\",\n right=\"x_end_progress\",\n bottom=\"y_start\",\n top=\"y_end\",\n color=\"color\",\n line_color=None,\n fill_alpha=0.6,\n source=self.nodes_source,\n )\n\n self.arrows = Arrow(\n end=VeeHead(size=8),\n line_color=\"black\",\n line_alpha=0.5,\n line_width=1,\n x_start=\"xs\",\n y_start=\"ys\",\n x_end=\"xe\",\n y_end=\"ye\",\n source=self.arrows_source,\n )\n self.root.add_layout(self.arrows)\n\n self.root.xgrid.grid_line_color = None\n self.root.ygrid.grid_line_color = None\n self.root.x_range.range_padding = 0.5\n self.root.y_range.range_padding = 0.5\n\n hover = HoverTool(\n point_policy=\"follow_mouse\",\n tooltips=\"\"\"\n <div>\n <span style=\"font-size: 12px; font-weight: bold;\">Name:</span>&nbsp;\n <span style=\"font-size: 10px; font-family: Monaco, monospace;\">@name</span>\n </div>\n <div>\n <span style=\"font-size: 12px; font-weight: bold;\">Compute time:</span>&nbsp;\n <span style=\"font-size: 10px; font-family: Monaco, monospace;\">@compute_time</span>\n </div>\n <div>\n <span style=\"font-size: 12px; font-weight: bold;\">Memory:</span>&nbsp;\n <span style=\"font-size: 10px; font-family: Monaco, monospace;\">@memory</span>\n </div>\n <div>\n <span style=\"font-size: 12px; font-weight: bold;\">Tasks:</span>&nbsp;\n <span style=\"font-size: 10px; font-family: Monaco, monospace;\">@tot_tasks</span>\n </div>\n <div style=\"margin-left: 2em;\">\n <span style=\"font-size: 12px; font-weight: bold;\">Completed:</span>&nbsp;\n <span style=\"font-size: 10px; font-family: Monaco, monospace;\">@comp_tasks</span>\n </div>\n <div style=\"margin-left: 2em;\">\n <span style=\"font-size: 12px; font-weight: bold;\">Processing:</span>&nbsp;\n <span style=\"font-size: 10px; font-family: Monaco, monospace;\">@in_processing</span>\n </div>\n <div style=\"margin-left: 2em;\">\n <span style=\"font-size: 12px; font-weight: bold;\">In memory:</span>&nbsp;\n <span style=\"font-size: 10px; font-family: Monaco, monospace;\">@in_memory</span>\n </div>\n <div style=\"margin-left: 2em;\">\n <span style=\"font-size: 12px; font-weight: bold;\">Erred:</span>&nbsp;\n <span style=\"font-size: 10px; font-family: Monaco, monospace;\">@in_erred</span>\n </div>\n <div style=\"margin-left: 2em;\">\n <span style=\"font-size: 12px; font-weight: bold;\">Released:</span>&nbsp;\n <span style=\"font-size: 10px; font-family: Monaco, monospace;\">@in_released</span>\n </div>\n \"\"\",\n renderers=[rect],\n )\n\n self.root.add_tools(hover)\n\n @without_property_validation\n def update_layout(self):\n\n with log_errors():\n # get dependecies per task group\n # in some cases there are tg that have themeselves as dependencies, we remove those.\n dependencies = {\n k: {ds.name for ds in ts.dependencies if ds.name != k}\n for k, ts in self.scheduler.task_groups.items()\n }\n\n import dask\n\n order = dask.order.order(\n dsk={group.name: 1 for k, group in self.scheduler.task_groups.items()},\n dependencies=dependencies,\n )\n\n ordered = sorted(self.scheduler.task_groups, key=order.get)\n\n xs = {}\n ys = {}\n locations = set()\n nodes_layout = {}\n arrows_layout = {}\n for tg in ordered:\n if dependencies[tg]:\n x = max(xs[dep] for dep in dependencies[tg]) + 1\n y = max(ys[dep] for dep in dependencies[tg])\n if (\n len(dependencies[tg]) > 1\n and len({ys[dep] for dep in dependencies[tg]}) == 1\n ):\n y += 1\n else:\n x = 0\n y = max(ys.values()) + 1 if ys else 0\n\n while (x, y) in locations: # avoid collisions by moving up\n y += 1\n\n locations.add((x, y))\n\n xs[tg], ys[tg] = x, y\n\n # info neded for node layout to coulmn data source\n nodes_layout[tg] = {\"x\": xs[tg], \"y\": ys[tg]}\n\n # info needed for arrow layout\n arrows_layout[tg] = {\n \"nstart\": dependencies[tg],\n \"nend\": [tg] * len(dependencies[tg]),\n }\n\n return nodes_layout, arrows_layout\n\n def compute_size(self, x, min_box, max_box):\n start = 0.4\n end = 0.8\n\n y = (end - start) / (max_box - min_box) * (x - min_box) + start\n\n return y\n\n @without_property_validation\n def update(self):\n\n if self.scheduler.transition_counter == self.old_counter:\n return\n self.old_counter = self.scheduler.transition_counter\n\n if not self.scheduler.task_groups:\n self.subtitle.text = \"Scheduler is empty.\"\n else:\n self.subtitle.text = \" \"\n\n if self.nodes_layout.keys() != self.scheduler.task_groups.keys():\n self.nodes_layout, self.arrows_layout = self.update_layout()\n\n nodes_data = {\n \"x\": [],\n \"y\": [],\n \"w_box\": [],\n \"h_box\": [],\n \"name\": [],\n \"color\": [],\n \"tot_tasks\": [],\n \"x_start\": [],\n \"x_end\": [],\n \"y_start\": [],\n \"y_end\": [],\n \"x_end_progress\": [],\n \"mem_alpha\": [],\n \"node_line_width\": [],\n \"comp_tasks\": [],\n \"url_logo\": [],\n \"x_logo\": [],\n \"y_logo\": [],\n \"w_logo\": [],\n \"h_logo\": [],\n \"in_processing\": [],\n \"in_memory\": [],\n \"in_released\": [],\n \"in_erred\": [],\n \"compute_time\": [],\n \"memory\": [],\n }\n\n arrows_data = {\n \"xs\": [],\n \"ys\": [],\n \"xe\": [],\n \"ye\": [],\n }\n\n durations = set()\n nbytes = set()\n for key, tg in self.scheduler.task_groups.items():\n\n if tg.duration and tg.nbytes_total:\n durations.add(tg.duration)\n nbytes.add(tg.nbytes_total)\n\n durations_min = min(durations, default=0)\n durations_max = max(durations, default=0)\n nbytes_min = min(nbytes, default=0)\n nbytes_max = max(nbytes, default=0)\n\n box_dim = {}\n for key, tg in self.scheduler.task_groups.items():\n\n comp_tasks = (\n tg.states[\"released\"] + tg.states[\"memory\"] + tg.states[\"erred\"]\n )\n tot_tasks = sum(tg.states.values())\n\n # compute width and height of boxes\n if (\n tg.duration\n and tg.nbytes_total\n and comp_tasks\n and len(durations) > 1\n and len(nbytes) > 1\n ):\n\n # scale duration (width)\n width_box = self.compute_size(\n tg.duration / comp_tasks * tot_tasks,\n min_box=durations_min / comp_tasks * tot_tasks,\n max_box=durations_max / comp_tasks * tot_tasks,\n )\n\n # need to scale memory (height)\n height_box = self.compute_size(\n tg.nbytes_total / comp_tasks * tot_tasks,\n min_box=nbytes_min / comp_tasks * tot_tasks,\n max_box=nbytes_max / comp_tasks * tot_tasks,\n )\n\n else:\n width_box = 0.6\n height_box = width_box / 2\n\n box_dim[key] = {\"width\": width_box, \"height\": height_box}\n\n for key, tg in self.scheduler.task_groups.items():\n x = self.nodes_layout[key][\"x\"]\n y = self.nodes_layout[key][\"y\"]\n width = box_dim[key][\"width\"]\n height = box_dim[key][\"height\"]\n\n # main boxes layout\n nodes_data[\"x\"].append(x)\n nodes_data[\"y\"].append(y)\n nodes_data[\"w_box\"].append(width)\n nodes_data[\"h_box\"].append(height)\n\n comp_tasks = (\n tg.states[\"released\"] + tg.states[\"memory\"] + tg.states[\"erred\"]\n )\n tot_tasks = sum(tg.states.values())\n\n nodes_data[\"name\"].append(tg.prefix.name)\n\n nodes_data[\"color\"].append(color_of(tg.prefix.name))\n nodes_data[\"tot_tasks\"].append(tot_tasks)\n\n # memory alpha factor by 0.4 if not get's too dark\n nodes_data[\"mem_alpha\"].append(\n (tg.states[\"memory\"] / sum(tg.states.values())) * 0.4\n )\n\n # main box line width\n if tg.states[\"processing\"]:\n nodes_data[\"node_line_width\"].append(5)\n else:\n nodes_data[\"node_line_width\"].append(1)\n\n # progress bar data update\n nodes_data[\"x_start\"].append(x - width / 2)\n nodes_data[\"x_end\"].append(x + width / 2)\n\n nodes_data[\"y_start\"].append(y - height / 2)\n nodes_data[\"y_end\"].append(y - height / 2 + height * 0.4)\n\n nodes_data[\"x_end_progress\"].append(\n x - width / 2 + width * comp_tasks / tot_tasks\n )\n\n # arrows\n arrows_data[\"xs\"] += [\n self.nodes_layout[k][\"x\"] + box_dim[k][\"width\"] / 2\n for k in self.arrows_layout[key][\"nstart\"]\n ]\n arrows_data[\"ys\"] += [\n self.nodes_layout[k][\"y\"] for k in self.arrows_layout[key][\"nstart\"]\n ]\n arrows_data[\"xe\"] += [\n self.nodes_layout[k][\"x\"] - box_dim[k][\"width\"] / 2\n for k in self.arrows_layout[key][\"nend\"]\n ]\n arrows_data[\"ye\"] += [\n self.nodes_layout[k][\"y\"] for k in self.arrows_layout[key][\"nend\"]\n ]\n\n # LOGOS\n if len(tg.types) == 1:\n logo_type = next(iter(tg.types)).split(\".\")[0]\n try:\n url_logo = logos_dict[logo_type]\n except KeyError:\n url_logo = \"\"\n else:\n url_logo = \"\"\n\n nodes_data[\"url_logo\"].append(url_logo)\n\n nodes_data[\"x_logo\"].append(x + width / 3)\n nodes_data[\"y_logo\"].append(y + height / 3)\n\n ratio = width / height\n\n if ratio > 1:\n nodes_data[\"h_logo\"].append(height * 0.3)\n nodes_data[\"w_logo\"].append(width * 0.3 / ratio)\n else:\n nodes_data[\"h_logo\"].append(height * 0.3 * ratio)\n nodes_data[\"w_logo\"].append(width * 0.3)\n\n # compute_time and memory\n nodes_data[\"compute_time\"].append(format_time(tg.duration))\n nodes_data[\"memory\"].append(format_bytes(tg.nbytes_total))\n\n # Add some status to hover\n tasks_processing = tg.states[\"processing\"]\n tasks_memory = tg.states[\"memory\"]\n tasks_relased = tg.states[\"released\"]\n tasks_erred = tg.states[\"erred\"]\n\n nodes_data[\"comp_tasks\"].append(\n f\"{comp_tasks} ({comp_tasks / tot_tasks * 100:.0f} %)\"\n )\n nodes_data[\"in_processing\"].append(\n f\"{tasks_processing} ({tasks_processing/ tot_tasks * 100:.0f} %)\"\n )\n nodes_data[\"in_memory\"].append(\n f\"{tasks_memory} ({tasks_memory/ tot_tasks * 100:.0f} %)\"\n )\n nodes_data[\"in_released\"].append(\n f\"{tasks_relased} ({tasks_relased/ tot_tasks * 100:.0f} %)\"\n )\n nodes_data[\"in_erred\"].append(\n f\"{ tasks_erred} ({tasks_erred/ tot_tasks * 100:.0f} %)\"\n )\n\n self.nodes_source.data.update(nodes_data)\n self.arrows_source.data.update(arrows_data)\n\n\nclass TaskProgress(DashboardComponent):\n \"\"\"Progress bars per task type\"\"\"\n\n def __init__(self, scheduler, **kwargs):\n self.scheduler = scheduler\n\n data = progress_quads(\n dict(all={}, memory={}, erred={}, released={}, processing={})\n )\n self.source = ColumnDataSource(data=data)\n\n x_range = DataRange1d(range_padding=0)\n y_range = Range1d(-8, 0)\n\n self.root = figure(\n id=\"bk-task-progress-plot\",\n title=\"Progress\",\n name=\"task_progress\",\n x_range=x_range,\n y_range=y_range,\n toolbar_location=None,\n tools=\"\",\n min_border_bottom=50,\n **kwargs,\n )\n self.root.line( # just to define early ranges\n x=[0, 0.9], y=[-1, 0], line_color=\"#FFFFFF\", alpha=0.0\n )\n self.root.quad(\n source=self.source,\n top=\"top\",\n bottom=\"bottom\",\n left=\"left\",\n right=\"right\",\n fill_color=\"#aaaaaa\",\n line_color=\"#aaaaaa\",\n fill_alpha=0.1,\n line_alpha=0.3,\n )\n self.root.quad(\n source=self.source,\n top=\"top\",\n bottom=\"bottom\",\n left=\"left\",\n right=\"released-loc\",\n fill_color=\"color\",\n line_color=\"color\",\n fill_alpha=0.6,\n )\n self.root.quad(\n source=self.source,\n top=\"top\",\n bottom=\"bottom\",\n left=\"released-loc\",\n right=\"memory-loc\",\n fill_color=\"color\",\n line_color=\"color\",\n fill_alpha=1.0,\n )\n self.root.quad(\n source=self.source,\n top=\"top\",\n bottom=\"bottom\",\n left=\"memory-loc\",\n right=\"erred-loc\",\n fill_color=\"black\",\n fill_alpha=0.5,\n line_alpha=0,\n )\n self.root.quad(\n source=self.source,\n top=\"top\",\n bottom=\"bottom\",\n left=\"erred-loc\",\n right=\"processing-loc\",\n fill_color=\"gray\",\n fill_alpha=0.35,\n line_alpha=0,\n )\n self.root.text(\n source=self.source,\n text=\"show-name\",\n y=\"bottom\",\n x=\"left\",\n x_offset=5,\n text_font_size=value(\"10pt\"),\n )\n self.root.text(\n source=self.source,\n text=\"done\",\n y=\"bottom\",\n x=\"right\",\n x_offset=-5,\n text_align=\"right\",\n text_font_size=value(\"10pt\"),\n )\n self.root.ygrid.visible = False\n self.root.yaxis.minor_tick_line_alpha = 0\n self.root.yaxis.visible = False\n self.root.xgrid.visible = False\n self.root.xaxis.minor_tick_line_alpha = 0\n self.root.xaxis.visible = False\n\n hover = HoverTool(\n point_policy=\"follow_mouse\",\n tooltips=\"\"\"\n <div>\n <span style=\"font-size: 14px; font-weight: bold;\">Name:</span>&nbsp;\n <span style=\"font-size: 10px; font-family: Monaco, monospace;\">@name</span>\n </div>\n <div>\n <span style=\"font-size: 14px; font-weight: bold;\">All:</span>&nbsp;\n <span style=\"font-size: 10px; font-family: Monaco, monospace;\">@all</span>\n </div>\n <div>\n <span style=\"font-size: 14px; font-weight: bold;\">Memory:</span>&nbsp;\n <span style=\"font-size: 10px; font-family: Monaco, monospace;\">@memory</span>\n </div>\n <div>\n <span style=\"font-size: 14px; font-weight: bold;\">Erred:</span>&nbsp;\n <span style=\"font-size: 10px; font-family: Monaco, monospace;\">@erred</span>\n </div>\n <div>\n <span style=\"font-size: 14px; font-weight: bold;\">Ready:</span>&nbsp;\n <span style=\"font-size: 10px; font-family: Monaco, monospace;\">@processing</span>\n </div>\n \"\"\",\n )\n self.root.add_tools(hover)\n\n @without_property_validation\n def update(self):\n with log_errors():\n state = {\n \"memory\": {},\n \"erred\": {},\n \"released\": {},\n \"processing\": {},\n \"waiting\": {},\n }\n\n for tp in self.scheduler.task_prefixes.values():\n active_states = tp.active_states\n if any(active_states.get(s) for s in state.keys()):\n state[\"memory\"][tp.name] = active_states[\"memory\"]\n state[\"erred\"][tp.name] = active_states[\"erred\"]\n state[\"released\"][tp.name] = active_states[\"released\"]\n state[\"processing\"][tp.name] = active_states[\"processing\"]\n state[\"waiting\"][tp.name] = active_states[\"waiting\"]\n\n state[\"all\"] = {\n k: sum(v[k] for v in state.values()) for k in state[\"memory\"]\n }\n\n if not state[\"all\"] and not len(self.source.data[\"all\"]):\n return\n\n d = progress_quads(state)\n\n update(self.source, d)\n\n totals = {\n k: sum(state[k].values())\n for k in [\"all\", \"memory\", \"erred\", \"released\", \"waiting\"]\n }\n totals[\"processing\"] = totals[\"all\"] - sum(\n v for k, v in totals.items() if k != \"all\"\n )\n\n self.root.title.text = (\n \"Progress -- total: %(all)s, \"\n \"in-memory: %(memory)s, processing: %(processing)s, \"\n \"waiting: %(waiting)s, \"\n \"erred: %(erred)s\" % totals\n )\n\n\nclass WorkerTable(DashboardComponent):\n \"\"\"Status of the current workers\n\n This is two plots, a text-based table for each host and a thin horizontal\n plot laying out hosts by their current memory use.\n \"\"\"\n\n excluded_names = {\n \"executing\",\n \"in_flight\",\n \"in_memory\",\n \"ready\",\n \"time\",\n \"spilled_nbytes\",\n }\n\n def __init__(self, scheduler, width=800, **kwargs):\n self.scheduler = scheduler\n self.names = [\n \"name\",\n \"address\",\n \"nthreads\",\n \"cpu\",\n \"memory\",\n \"memory_limit\",\n \"memory_percent\",\n \"memory_managed\",\n \"memory_unmanaged_old\",\n \"memory_unmanaged_recent\",\n \"memory_spilled\",\n \"num_fds\",\n \"read_bytes\",\n \"write_bytes\",\n \"cpu_fraction\",\n ]\n workers = self.scheduler.workers.values()\n self.extra_names = sorted(\n {\n m\n for ws in workers\n for m, v in ws.metrics.items()\n if m not in self.names and isinstance(v, (str, int, float))\n }\n - self.excluded_names\n )\n\n table_names = [\n \"name\",\n \"address\",\n \"nthreads\",\n \"cpu\",\n \"memory\",\n \"memory_limit\",\n \"memory_percent\",\n \"memory_managed\",\n \"memory_unmanaged_old\",\n \"memory_unmanaged_recent\",\n \"memory_spilled\",\n \"num_fds\",\n \"read_bytes\",\n \"write_bytes\",\n ]\n column_title_renames = {\n \"memory_limit\": \"limit\",\n \"memory_percent\": \"memory %\",\n \"memory_managed\": \"managed\",\n \"memory_unmanaged_old\": \"unmanaged old\",\n \"memory_unmanaged_recent\": \"unmanaged recent\",\n \"memory_spilled\": \"spilled\",\n \"num_fds\": \"# fds\",\n \"read_bytes\": \"read\",\n \"write_bytes\": \"write\",\n }\n\n self.source = ColumnDataSource({k: [] for k in self.names})\n\n columns = {\n name: TableColumn(field=name, title=column_title_renames.get(name, name))\n for name in table_names\n }\n\n formatters = {\n \"cpu\": NumberFormatter(format=\"0 %\"),\n \"memory_percent\": NumberFormatter(format=\"0.0 %\"),\n \"memory\": NumberFormatter(format=\"0.0 b\"),\n \"memory_limit\": NumberFormatter(format=\"0.0 b\"),\n \"memory_managed\": NumberFormatter(format=\"0.0 b\"),\n \"memory_unmanaged_old\": NumberFormatter(format=\"0.0 b\"),\n \"memory_unmanaged_recent\": NumberFormatter(format=\"0.0 b\"),\n \"memory_spilled\": NumberFormatter(format=\"0.0 b\"),\n \"read_bytes\": NumberFormatter(format=\"0 b\"),\n \"write_bytes\": NumberFormatter(format=\"0 b\"),\n \"num_fds\": NumberFormatter(format=\"0\"),\n \"nthreads\": NumberFormatter(format=\"0\"),\n }\n\n table = DataTable(\n source=self.source,\n columns=[columns[n] for n in table_names],\n reorderable=True,\n sortable=True,\n width=width,\n index_position=None,\n )\n\n for name in table_names:\n if name in formatters:\n table.columns[table_names.index(name)].formatter = formatters[name]\n\n extra_names = [\"name\", \"address\"] + self.extra_names\n extra_columns = {\n name: TableColumn(field=name, title=column_title_renames.get(name, name))\n for name in extra_names\n }\n\n extra_table = DataTable(\n source=self.source,\n columns=[extra_columns[n] for n in extra_names],\n reorderable=True,\n sortable=True,\n width=width,\n index_position=None,\n )\n\n hover = HoverTool(\n point_policy=\"follow_mouse\",\n tooltips=\"\"\"\n <div>\n <span style=\"font-size: 10px; font-family: Monaco, monospace;\">Worker (@name): </span>\n <span style=\"font-size: 10px; font-family: Monaco, monospace;\">@memory_percent{0.0 %}</span>\n </div>\n \"\"\",\n )\n\n mem_plot = figure(\n title=\"Memory Use (%)\",\n toolbar_location=None,\n x_range=(0, 1),\n y_range=(-0.1, 0.1),\n height=60,\n width=width,\n tools=\"\",\n min_border_right=0,\n **kwargs,\n )\n mem_plot.circle(\n source=self.source, x=\"memory_percent\", y=0, size=10, fill_alpha=0.5\n )\n mem_plot.ygrid.visible = False\n mem_plot.yaxis.minor_tick_line_alpha = 0\n mem_plot.xaxis.visible = False\n mem_plot.yaxis.visible = False\n mem_plot.add_tools(hover, BoxSelectTool())\n\n hover = HoverTool(\n point_policy=\"follow_mouse\",\n tooltips=\"\"\"\n <div>\n <span style=\"font-size: 10px; font-family: Monaco, monospace;\">Worker (@name): </span>\n <span style=\"font-size: 10px; font-family: Monaco, monospace;\">@cpu_fraction{0 %}</span>\n </div>\n \"\"\",\n )\n\n cpu_plot = figure(\n title=\"CPU Use (%)\",\n toolbar_location=None,\n x_range=(0, 1),\n y_range=(-0.1, 0.1),\n height=60,\n width=width,\n tools=\"\",\n min_border_right=0,\n **kwargs,\n )\n cpu_plot.circle(\n source=self.source, x=\"cpu_fraction\", y=0, size=10, fill_alpha=0.5\n )\n cpu_plot.ygrid.visible = False\n cpu_plot.yaxis.minor_tick_line_alpha = 0\n cpu_plot.xaxis.visible = False\n cpu_plot.yaxis.visible = False\n cpu_plot.add_tools(hover, BoxSelectTool())\n self.cpu_plot = cpu_plot\n\n if \"sizing_mode\" in kwargs:\n sizing_mode = {\"sizing_mode\": kwargs[\"sizing_mode\"]}\n else:\n sizing_mode = {}\n\n components = [cpu_plot, mem_plot, table]\n if self.extra_names:\n components.append(extra_table)\n\n self.root = column(*components, id=\"bk-worker-table\", **sizing_mode)\n\n @without_property_validation\n def update(self):\n data = {name: [] for name in self.names + self.extra_names}\n for i, (addr, ws) in enumerate(\n sorted(self.scheduler.workers.items(), key=lambda kv: str(kv[1].name))\n ):\n minfo = ws.memory\n\n for name in self.names + self.extra_names:\n data[name].append(ws.metrics.get(name, None))\n data[\"name\"][-1] = ws.name if ws.name is not None else i\n data[\"address\"][-1] = ws.address\n if ws.memory_limit:\n data[\"memory_percent\"][-1] = ws.metrics[\"memory\"] / ws.memory_limit\n else:\n data[\"memory_percent\"][-1] = \"\"\n data[\"memory_limit\"][-1] = ws.memory_limit\n data[\"memory_managed\"][-1] = minfo.managed_in_memory\n data[\"memory_unmanaged_old\"][-1] = minfo.unmanaged_old\n data[\"memory_unmanaged_recent\"][-1] = minfo.unmanaged_recent\n data[\"memory_unmanaged_recent\"][-1] = minfo.unmanaged_recent\n data[\"memory_spilled\"][-1] = minfo.managed_spilled\n data[\"cpu\"][-1] = ws.metrics[\"cpu\"] / 100.0\n data[\"cpu_fraction\"][-1] = ws.metrics[\"cpu\"] / 100.0 / ws.nthreads\n data[\"nthreads\"][-1] = ws.nthreads\n\n for name in self.names + self.extra_names:\n if name == \"name\":\n data[name].insert(0, f\"Total ({len(data[name])})\")\n continue\n try:\n if len(self.scheduler.workers) == 0:\n total_data = None\n elif name == \"memory_percent\":\n total_mem = sum(\n ws.memory_limit for ws in self.scheduler.workers.values()\n )\n total_data = (\n (\n sum(\n ws.metrics[\"memory\"]\n for ws in self.scheduler.workers.values()\n )\n / total_mem\n )\n if total_mem\n else \"\"\n )\n elif name == \"cpu\":\n total_data = (\n sum(ws.metrics[\"cpu\"] for ws in self.scheduler.workers.values())\n / 100\n / len(self.scheduler.workers.values())\n )\n elif name == \"cpu_fraction\":\n total_data = (\n sum(ws.metrics[\"cpu\"] for ws in self.scheduler.workers.values())\n / 100\n / sum(ws.nthreads for ws in self.scheduler.workers.values())\n )\n else:\n total_data = sum(data[name])\n\n data[name].insert(0, total_data)\n except TypeError:\n data[name].insert(0, None)\n\n self.source.data.update(data)\n\n\nclass SchedulerLogs:\n def __init__(self, scheduler):\n logs = Log(\n \"\\n\".join(line for level, line in scheduler.get_logs())\n )._repr_html_()\n\n self.root = Div(text=logs)\n\n\ndef systemmonitor_doc(scheduler, extra, doc):\n with log_errors():\n sysmon = SystemMonitor(scheduler, sizing_mode=\"stretch_both\")\n doc.title = \"Dask: Scheduler System Monitor\"\n add_periodic_callback(doc, sysmon, 500)\n\n doc.add_root(sysmon.root)\n doc.template = env.get_template(\"simple.html\")\n doc.template_variables.update(extra)\n doc.theme = BOKEH_THEME\n\n\ndef stealing_doc(scheduler, extra, doc):\n with log_errors():\n occupancy = Occupancy(scheduler)\n stealing_ts = StealingTimeSeries(scheduler)\n stealing_events = StealingEvents(scheduler)\n stealing_events.root.x_range = stealing_ts.root.x_range\n doc.title = \"Dask: Work Stealing\"\n add_periodic_callback(doc, occupancy, 500)\n add_periodic_callback(doc, stealing_ts, 500)\n add_periodic_callback(doc, stealing_events, 500)\n\n doc.add_root(\n row(\n occupancy.root,\n column(\n stealing_ts.root,\n stealing_events.root,\n sizing_mode=\"stretch_both\",\n ),\n )\n )\n\n doc.template = env.get_template(\"simple.html\")\n doc.template_variables.update(extra)\n doc.theme = BOKEH_THEME\n\n\ndef events_doc(scheduler, extra, doc):\n with log_errors():\n events = Events(scheduler, \"all\", height=250)\n events.update()\n add_periodic_callback(doc, events, 500)\n doc.title = \"Dask: Scheduler Events\"\n doc.add_root(column(events.root, sizing_mode=\"scale_width\"))\n doc.template = env.get_template(\"simple.html\")\n doc.template_variables.update(extra)\n doc.theme = BOKEH_THEME\n\n\ndef workers_doc(scheduler, extra, doc):\n with log_errors():\n table = WorkerTable(scheduler)\n table.update()\n add_periodic_callback(doc, table, 500)\n doc.title = \"Dask: Workers\"\n doc.add_root(table.root)\n doc.template = env.get_template(\"simple.html\")\n doc.template_variables.update(extra)\n doc.theme = BOKEH_THEME\n\n\ndef tasks_doc(scheduler, extra, doc):\n with log_errors():\n ts = TaskStream(\n scheduler,\n n_rectangles=dask.config.get(\n \"distributed.scheduler.dashboard.tasks.task-stream-length\"\n ),\n clear_interval=\"60s\",\n sizing_mode=\"stretch_both\",\n )\n ts.update()\n add_periodic_callback(doc, ts, 5000)\n doc.title = \"Dask: Task Stream\"\n doc.add_root(ts.root)\n doc.template = env.get_template(\"simple.html\")\n doc.template_variables.update(extra)\n doc.theme = BOKEH_THEME\n\n\ndef graph_doc(scheduler, extra, doc):\n with log_errors():\n graph = TaskGraph(scheduler, sizing_mode=\"stretch_both\")\n doc.title = \"Dask: Task Graph\"\n graph.update()\n add_periodic_callback(doc, graph, 200)\n doc.add_root(graph.root)\n\n doc.template = env.get_template(\"simple.html\")\n doc.template_variables.update(extra)\n doc.theme = BOKEH_THEME\n\n\ndef tg_graph_doc(scheduler, extra, doc):\n with log_errors():\n tg_graph = TaskGroupGraph(scheduler, sizing_mode=\"stretch_both\")\n doc.title = \"Dask: Task Groups Graph\"\n tg_graph.update()\n add_periodic_callback(doc, tg_graph, 200)\n doc.add_root(tg_graph.root)\n doc.template = env.get_template(\"simple.html\")\n doc.template_variables.update(extra)\n doc.theme = BOKEH_THEME\n\n\ndef status_doc(scheduler, extra, doc):\n with log_errors():\n cluster_memory = ClusterMemory(scheduler, sizing_mode=\"stretch_both\")\n cluster_memory.update()\n add_periodic_callback(doc, cluster_memory, 100)\n doc.add_root(cluster_memory.root)\n\n if len(scheduler.workers) < 50:\n workers_memory = WorkersMemory(scheduler, sizing_mode=\"stretch_both\")\n processing = CurrentLoad(scheduler, sizing_mode=\"stretch_both\")\n\n processing_root = processing.processing_figure\n else:\n workers_memory = WorkersMemoryHistogram(\n scheduler, sizing_mode=\"stretch_both\"\n )\n processing = ProcessingHistogram(scheduler, sizing_mode=\"stretch_both\")\n\n processing_root = processing.root\n\n current_load = CurrentLoad(scheduler, sizing_mode=\"stretch_both\")\n occupancy = Occupancy(scheduler, sizing_mode=\"stretch_both\")\n\n cpu_root = current_load.cpu_figure\n occupancy_root = occupancy.root\n\n workers_memory.update()\n processing.update()\n current_load.update()\n occupancy.update()\n\n add_periodic_callback(doc, workers_memory, 100)\n add_periodic_callback(doc, processing, 100)\n add_periodic_callback(doc, current_load, 100)\n add_periodic_callback(doc, occupancy, 100)\n\n doc.add_root(workers_memory.root)\n\n tab1 = Panel(child=processing_root, title=\"Processing\")\n tab2 = Panel(child=cpu_root, title=\"CPU\")\n tab3 = Panel(child=occupancy_root, title=\"Occupancy\")\n\n proc_tabs = Tabs(tabs=[tab1, tab2, tab3], name=\"processing_tabs\")\n doc.add_root(proc_tabs)\n\n task_stream = TaskStream(\n scheduler,\n n_rectangles=dask.config.get(\n \"distributed.scheduler.dashboard.status.task-stream-length\"\n ),\n clear_interval=\"5s\",\n sizing_mode=\"stretch_both\",\n )\n task_stream.update()\n add_periodic_callback(doc, task_stream, 100)\n doc.add_root(task_stream.root)\n\n task_progress = TaskProgress(scheduler, sizing_mode=\"stretch_both\")\n task_progress.update()\n add_periodic_callback(doc, task_progress, 100)\n doc.add_root(task_progress.root)\n\n doc.title = \"Dask: Status\"\n doc.theme = BOKEH_THEME\n doc.template = env.get_template(\"status.html\")\n doc.template_variables.update(extra)\n\n\n@curry\ndef individual_doc(cls, interval, scheduler, extra, doc, fig_attr=\"root\", **kwargs):\n with log_errors():\n fig = cls(scheduler, sizing_mode=\"stretch_both\", **kwargs)\n fig.update()\n add_periodic_callback(doc, fig, interval)\n doc.add_root(getattr(fig, fig_attr))\n doc.theme = BOKEH_THEME\n\n\ndef individual_profile_doc(scheduler, extra, doc):\n with log_errors():\n prof = ProfileTimePlot(scheduler, sizing_mode=\"stretch_both\", doc=doc)\n doc.add_root(prof.root)\n prof.trigger_update()\n doc.theme = BOKEH_THEME\n\n\ndef individual_profile_server_doc(scheduler, extra, doc):\n with log_errors():\n prof = ProfileServer(scheduler, sizing_mode=\"stretch_both\", doc=doc)\n doc.add_root(prof.root)\n prof.trigger_update()\n doc.theme = BOKEH_THEME\n\n\ndef profile_doc(scheduler, extra, doc):\n with log_errors():\n doc.title = \"Dask: Profile\"\n prof = ProfileTimePlot(scheduler, sizing_mode=\"stretch_both\", doc=doc)\n doc.add_root(prof.root)\n doc.template = env.get_template(\"simple.html\")\n doc.template_variables.update(extra)\n doc.theme = BOKEH_THEME\n\n prof.trigger_update()\n\n\ndef profile_server_doc(scheduler, extra, doc):\n with log_errors():\n doc.title = \"Dask: Profile of Event Loop\"\n prof = ProfileServer(scheduler, sizing_mode=\"stretch_both\", doc=doc)\n doc.add_root(prof.root)\n doc.template = env.get_template(\"simple.html\")\n doc.template_variables.update(extra)\n doc.theme = BOKEH_THEME\n\n prof.trigger_update()\n" ]
[ [ "numpy.array", "numpy.histogram" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
jstaker7/espaloma
[ "d80d280acd608dc04c93966afe15cc3cb74f65a8" ]
[ "espaloma/utils/geometry.py" ]
[ "import numpy as np\n\n\ndef _sample_unit_circle(n_samples: int = 1) -> np.ndarray:\n \"\"\"\n >>> np.isclose(np.linalg.norm(_sample_unit_circle(1)), 1)\n True\n\n \"\"\"\n theta = np.random.rand(n_samples) * 2 * np.pi\n x = np.cos(theta)\n y = np.sin(theta)\n xy = np.array([x, y]).T\n assert xy.shape == (n_samples, 2)\n return xy\n\n\ndef _sample_four_particle_torsion_scan(n_samples: int = 1) -> np.ndarray:\n \"\"\"Generate n_samples random configurations of a 4-particle system abcd where\n * distances ab, bc, cd are constant,\n * angles abc, bcd are constant\n * dihedral angle abcd is uniformly distributed in [0, 2pi]\n\n Returns\n -------\n xyz : np.ndarray, shape = (n_samples, 4, 3)\n\n Notes\n -----\n * Positions of a,b,c are constant, and x-coordinate of d is constant.\n To be more exacting, could add random displacements and rotations.\n \"\"\"\n a = (-3, -1, 0)\n b = (-2, 0, 0)\n c = (-1, 0, 0)\n d = (0, 1, 0)\n\n # form one 3D configuration\n conf = np.array([a, b, c, d])\n assert conf.shape == (4, 3)\n\n # make n_samples copies\n xyz = np.array([conf] * n_samples, dtype=float)\n assert xyz.shape == (n_samples, 4, 3)\n\n # assign y and z coordinates of particle d to unit-circle samples\n xyz[:, 3, 1:] = _sample_unit_circle(n_samples)\n\n return xyz\n\n\ndef _timemachine_signed_torsion_angle(ci, cj, ck, cl):\n \"\"\"Reference implementation from Yutong Zhao's timemachine\n\n Copied directly from\n https://github.com/proteneer/timemachine/blob/1a0ab45e605dc1e28c44ea90f38cb0dedce5c4db/timemachine/potentials/bonded.py#L152-L199\n (but with 3 lines of dead code removed, and delta_r inlined)\n \"\"\"\n\n rij = cj - ci\n rkj = cj - ck\n rkl = cl - ck\n\n n1 = np.cross(rij, rkj)\n n2 = np.cross(rkj, rkl)\n\n y = np.sum(\n np.multiply(\n np.cross(n1, n2), rkj / np.linalg.norm(rkj, axis=-1, keepdims=True)\n ),\n axis=-1,\n )\n x = np.sum(np.multiply(n1, n2), -1)\n\n return np.arctan2(y, x)\n" ]
[ [ "numpy.multiply", "numpy.cos", "numpy.linalg.norm", "numpy.sin", "numpy.arctan2", "numpy.random.rand", "numpy.cross", "numpy.array" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
lubosmj/I2I-GANs
[ "059e3896afc8524825164b612cbe120d72d676e6" ]
[ "examples/travelgan_trainer.py" ]
[ "import os\nimport tensorflow as tf\n\nfrom contextlib import ExitStack\nfrom functools import partial\n\nfrom tensorflow import keras\n\nfrom i2i_gans import parsers, datasets, callbacks, TraVeLGAN\n\n\nclass TraVeLGANParser(parsers.Parser):\n def init_train_subparser(self):\n super().init_train_subparser()\n\n self.train.add_argument(\"--siamese_dim\", type=int, default=1000)\n self.train.add_argument(\"--lambda_travel\", type=float, default=10.0)\n self.train.add_argument(\"--lambda_margin\", type=float, default=10.0)\n self.train.add_argument(\"--lambda_gan\", type=float, default=1.0)\n\n self.train.add_argument(\"--second_domain_B_files\")\n\n\nclass TraVeLGANImageSampler(callbacks.ImageSampler):\n def __init__(self, every_N_epochs, samples_dir, domain_A_dataset, travelgan):\n super().__init__(every_N_epochs, samples_dir)\n\n self.real_A = domain_A_dataset.unbatch().take(self.NUMBER_OF_SAMPLES).batch(1)\n self.travelgan = travelgan\n\n def images_generator(self):\n for inputs in self.real_A:\n outputs = self.travelgan.generator(inputs)\n yield inputs[0], outputs[0]\n\n\ndef preprocessing_model(input_shape=(218, 178, 3), image_size=(128, 128)):\n inputs = keras.layers.Input(shape=input_shape)\n x = keras.layers.ZeroPadding2D(padding=((0,25), (0,0)))(inputs)\n x = keras.layers.experimental.preprocessing.CenterCrop(*image_size)(x)\n x = keras.layers.experimental.preprocessing.Rescaling(1./127.5, offset=-1)(x)\n return keras.Model(inputs=inputs, outputs=x)\n\n\ndef build_cropped_celeba_input_pipeline(domain_files, dataset_size, batch_size, augment):\n d = tf.data.Dataset.list_files(domain_files).take(dataset_size)\n d = d.interleave(datasets.read_image, num_parallel_calls=tf.data.AUTOTUNE)\n\n if augment:\n d = d.map(\n partial(datasets.augment_images, augmentations=augment),\n num_parallel_calls=tf.data.AUTOTUNE,\n )\n\n d = d.batch(batch_size, drop_remainder=True)\n d = d.map(preprocessing_model(), num_parallel_calls=tf.data.AUTOTUNE)\n d = d.prefetch(tf.data.AUTOTUNE)\n return d\n\n\nparser = TraVeLGANParser()\nargs = parser.parse_args()\n\ncheckpoint_filepath = os.path.join(args.checkpoints_dir, \"travelgan_checkpoints.{epoch:03d}\")\nevery_N_epochs = (args.dataset_size // args.batch_size) * args.checkpoints_freq\nmodel_checkpoint_callback = keras.callbacks.ModelCheckpoint(\n filepath=checkpoint_filepath, save_freq=every_N_epochs\n)\n\ntrain_A = build_cropped_celeba_input_pipeline(\n args.domain_A_files, args.dataset_size, args.batch_size, args.augment\n)\nif args.second_domain_B_files:\n train_B = datasets.build_input_pipeline(\n args.domain_B_files, args.dataset_size, args.batch_size, args.augment, cache=False\n )\n train_B2 = datasets.build_input_pipeline(\n args.second_domain_B_files, args.dataset_size, args.batch_size, args.augment, cache=False\n )\n train_B = tf.data.experimental.sample_from_datasets([train_B, train_B2])\nelse:\n train_B = datasets.build_input_pipeline(\n args.domain_B_files, args.dataset_size, args.batch_size, args.augment, cache=False\n )\n\ndataset = tf.data.Dataset.zip((train_A, train_B))\n\nstrategy = tf.distribute.MirroredStrategy()\n\nwith ExitStack() as stack:\n if args.parallel:\n stack.enter_context(strategy.scope())\n\n travelgan = TraVeLGAN(**vars(args))\n travelgan.compile()\n\ntravelgan_sampler = TraVeLGANImageSampler(args.samples_freq, args.samples_dir, train_A, travelgan)\n\ntravelgan.fit(\n dataset,\n epochs=args.epochs,\n batch_size=args.batch_size,\n callbacks=[model_checkpoint_callback, travelgan_sampler],\n)\n" ]
[ [ "tensorflow.data.experimental.sample_from_datasets", "tensorflow.keras.callbacks.ModelCheckpoint", "tensorflow.keras.layers.Input", "tensorflow.keras.layers.experimental.preprocessing.CenterCrop", "tensorflow.keras.Model", "tensorflow.data.Dataset.zip", "tensorflow.data.Dataset.list_files", "tensorflow.keras.layers.ZeroPadding2D", "tensorflow.keras.layers.experimental.preprocessing.Rescaling", "tensorflow.distribute.MirroredStrategy" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
jacobpostman/incubator-tvm
[ "fdef79d317d455eb5c9e9e86feb97416eb594690", "02643d39798c6ec28348235d36d8da626f50d9dd", "02643d39798c6ec28348235d36d8da626f50d9dd" ]
[ "tests/python/unittest/test_tir_buffer.py", "python/tvm/te/hybrid/runtime.py", "tests/python/contrib/test_arm_compute_lib/test_runtime.py" ]
[ "# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\nimport tvm\nfrom tvm import te\nfrom tvm.tir import Buffer\nimport numpy as np\n\ndef test_buffer():\n m = te.size_var('m')\n n = te.size_var('n')\n l = te.size_var('l')\n Ab = tvm.tir.decl_buffer((m, n), \"float32\")\n Bb = tvm.tir.decl_buffer((n, l), \"float32\")\n\n assert isinstance(Ab, tvm.tir.Buffer)\n assert Ab.dtype == \"float32\"\n assert tuple(Ab.shape) == (m, n)\n\n\ndef test_buffer_access_ptr():\n m = te.size_var('m')\n n = te.size_var('n')\n Ab = tvm.tir.decl_buffer((m, n), \"float32\", strides=[n + 1 , 1])\n aptr = Ab.access_ptr(\"rw\")\n assert tvm.ir.structural_equal(aptr.args[3], Ab.strides[0] * m)\n assert aptr.args[0].dtype == Ab.dtype\n assert aptr.args[4].value == Buffer.READ | Buffer.WRITE\n aptr = Ab.access_ptr(\"w\")\n assert aptr.args[4].value == Buffer.WRITE\n\n\ndef test_buffer_access_ptr_offset():\n m = te.size_var('m')\n n = te.size_var('n')\n Ab = tvm.tir.decl_buffer((m, n), \"float32\")\n aptr = Ab.access_ptr(\"rw\", offset=100)\n tvm.testing.assert_prim_expr_equal(aptr.args[2], 100)\n assert aptr.args[4].value == Buffer.READ | Buffer.WRITE\n v = te.size_var('int32')\n aptr = Ab.access_ptr(\"rw\", offset=100 + 100 + v)\n tvm.testing.assert_prim_expr_equal(aptr.args[2], 200 + v)\n assert aptr.args[4].value == Buffer.READ | Buffer.WRITE\n aptr = Ab.access_ptr(\"rw\", offset=tvm.tir.call_extern('int32', \"test_call\", 100 + 100 + v))\n tvm.testing.assert_prim_expr_equal(aptr.args[2], tvm.tir.call_extern('int32', \"test_call\", 200 + v))\n assert aptr.args[4].value == Buffer.READ | Buffer.WRITE\n\n\ndef test_buffer_access_ptr_extent():\n m = te.size_var('m')\n n = te.size_var('n')\n Ab = tvm.tir.decl_buffer((m, n), \"float32\")\n aptr = Ab.access_ptr(\"rw\")\n assert tvm.ir.structural_equal(aptr.args[3], m * n)\n aptr = Ab.access_ptr(\"rw\", offset=100)\n assert tvm.ir.structural_equal(aptr.args[3], m * n - 100)\n Ab = tvm.tir.decl_buffer((m, n), \"float32\", strides=[n + 1 , 1])\n aptr = Ab.access_ptr(\"rw\", offset=100)\n assert tvm.ir.structural_equal(aptr.args[3], Ab.strides[0] * m - 100)\n\n\ndef test_buffer_vload():\n m = te.size_var('m')\n n = te.size_var('n')\n Ab = tvm.tir.decl_buffer((m, n), \"float32\", elem_offset=100)\n load = Ab.vload([2, 3])\n tvm.testing.assert_prim_expr_equal(load.index, n * 2 + 103)\n\n\ndef test_buffer_index_merge_mult_mod():\n m = te.size_var('m')\n n = te.size_var('n')\n s = te.size_var('s')\n k0 = te.size_var('k0')\n k1 = te.size_var('k1')\n A = tvm.tir.decl_buffer((m, n), \"float32\")\n A_stride = tvm.tir.decl_buffer((m, n), \"float32\", strides=(s, 1))\n def assert_simplified_equal(index_simplified, index_direct):\n assert tvm.ir.structural_equal(index_simplified, index_direct),\\\n \"index_simplified=%s, index_direct=%s\" %(index_simplified, index_direct)\n idxd = tvm.tir.indexdiv\n idxm = tvm.tir.indexmod\n # Test Case1\n index_simplified = A_stride.vload(\n (idxd(idxm(k0, k1), s), idxm(idxm(k0, k1), s) + idxd(k0, k1) * k1))\n index_direct = A_stride.vload((0, k0))\n assert_simplified_equal(index_simplified, index_direct)\n\n # Test Case2\n index_simplified = A.vload((idxd(idxm(k0, idxd(k1, s)), n),\n idxm(idxm(k0, idxd(k1, s)), n) + idxm(k0, k1)))\n index_direct = A.vload((0, idxm(k0, k1) + idxm(k0, idxd(k1, s))))\n assert_simplified_equal(index_simplified, index_direct)\n # Test Case3\n index_simplified = A.vload((idxd((idxd(k0, idxd(k1, s)) * idxd(k1, s)), n) +\n idxd(idxm(k0, idxd(k1, s)), n),\n idxm((idxd(k0, idxd(k1, s)) * idxd(k1, s)), n) +\n idxm(idxm(k0, idxd(k1, s)), n)))\n index_direct = A.vload((0, k0))\n assert_simplified_equal(index_simplified, index_direct)\n # Test Case4 (not able to simplify)\n index_simplified = A.vload((idxd(idxm(k0, idxd(k1, s)), n),\n idxm(idxm(k0, idxd(k1, n)), n) + idxm(k0, k1)))\n index_direct = A.vload((0, idxd(idxm(k0, idxd(k1, s)), n) * n +\n (idxm(idxm(k0, idxd(k1, n)), n) + idxm(k0, k1))))\n assert_simplified_equal(index_simplified, index_direct)\n\n\[email protected]_llvm\ndef test_buffer_broadcast():\n m0, m1, m2 = te.size_var(\"m0\"), te.size_var(\"m1\"), te.size_var(\"m2\")\n n0, n1, n2 = te.size_var(\"n0\"), te.size_var(\"n1\"), te.size_var(\"n2\")\n o0, o1, o2 = te.size_var(\"o0\"), te.size_var(\"o1\"), te.size_var(\"o2\")\n\n A = te.placeholder((m0, m1, m2), name='A')\n B = te.placeholder((n0, n1, n2), name='B')\n\n C = te.compute((o0, o1, o2), lambda i, j, k: A[i, j, k] + B[i, j, k], name='C')\n\n Ab = tvm.tir.decl_buffer(A.shape, A.dtype, name=\"Ab\", buffer_type=\"auto_broadcast\")\n Bb = tvm.tir.decl_buffer(B.shape, B.dtype, name=\"Bb\", buffer_type=\"auto_broadcast\")\n s = te.create_schedule(C.op)\n\n def check():\n fadd = tvm.build(s, [A, B, C], target='llvm', name='bcast_add', binds={A:Ab, B:Bb})\n ctx = tvm.cpu(0)\n a = tvm.nd.array(np.random.uniform(size=(2, 4, 3)).astype(A.dtype), ctx)\n b = tvm.nd.array(np.random.uniform(size=(2, 1, 1)).astype(B.dtype), ctx)\n c = tvm.nd.array(np.zeros((2, 4, 3), dtype=C.dtype), ctx)\n fadd(a, b, c)\n tvm.testing.assert_allclose(c.asnumpy(), a.asnumpy() + b.asnumpy())\n\n check()\n\n\[email protected]_llvm\ndef test_buffer_broadcast_expr():\n n0, m0, x = te.size_var('n0'), te.size_var('m0'), te.size_var('x')\n n1, m1 = te.size_var('n1'), te.size_var('m1')\n o0, o1 = te.size_var('o0'), te.size_var('o1')\n\n A = te.placeholder((m0, n0), name='A')\n B = te.placeholder((m1, n1), name='B')\n C = te.compute((o0, o1//x), lambda i, j: A[i, j] + B[i, j], name='C')\n\n Ab = tvm.tir.decl_buffer(A.shape, A.dtype, name=\"Ab\", buffer_type=\"auto_broadcast\")\n Bb = tvm.tir.decl_buffer(B.shape, B.dtype, name=\"Bb\", buffer_type=\"auto_broadcast\")\n Cc = tvm.tir.decl_buffer(C.shape, C.dtype, name=\"Cc\", buffer_type=\"auto_broadcast\")\n s = te.create_schedule(C.op)\n\n def check_stride():\n fadd = tvm.build(s, [A, B, C, o1, x], target='llvm', name='bcast_add',\n binds={A:Ab, B:Bb, C:Cc})\n ctx = tvm.cpu(0)\n a = tvm.nd.array(np.random.uniform(size=(2, 4)).astype(A.dtype), ctx)\n b = tvm.nd.array(np.random.uniform(size=(2, 4)).astype(B.dtype), ctx)\n c = tvm.nd.array(np.zeros((2, 4), dtype=C.dtype), ctx)\n fadd(a, b, c, 4, 1)\n tvm.testing.assert_allclose(c.asnumpy(), a.asnumpy() + b.asnumpy())\n\n def check_no_stride():\n fadd = tvm.build(s, [A, B, C, o1, x], target='llvm', name='bcast_add',\n binds={A: Ab, B: Bb, C: Cc})\n ctx = tvm.cpu(0)\n a = tvm.nd.array(np.random.uniform(size=(1, 4)).astype(A.dtype), ctx)\n b = tvm.nd.array(np.random.uniform(size=(2, 4)).astype(B.dtype), ctx)\n c = tvm.nd.array(np.zeros((2, 4), dtype=C.dtype), ctx)\n fadd(a, b, c, 4, 1)\n tvm.testing.assert_allclose(c.asnumpy(), a.asnumpy() + b.asnumpy())\n\n def check_auto_bind():\n # Let build bind buffers\n fadd = tvm.build(s, [A, B, C, o1, x], target='llvm', name='bcast_add')\n ctx = tvm.cpu(0)\n a = tvm.nd.array(np.random.uniform(size=(1, 4)).astype(A.dtype), ctx)\n b = tvm.nd.array(np.random.uniform(size=(2, 4)).astype(B.dtype), ctx)\n c = tvm.nd.array(np.zeros((2, 4), dtype=C.dtype), ctx)\n fadd(a, b, c, 4, 1)\n tvm.testing.assert_allclose(c.asnumpy(), a.asnumpy() + b.asnumpy())\n\n check_stride()\n check_no_stride()\n check_auto_bind()\n\n\nif __name__ == \"__main__\":\n test_buffer()\n test_buffer_access_ptr()\n test_buffer_access_ptr_offset()\n test_buffer_access_ptr_extent()\n test_buffer_vload()\n test_buffer_index_merge_mult_mod()\n test_buffer_broadcast()\n test_buffer_broadcast_expr()\n", "# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\n\"\"\"Intrinsics of TVM-Python Hybrid Script for Python emulation runtime\"\"\"\n\nimport numpy\nfrom tvm import target\n\n\nclass bind(object): #pylint: disable=invalid-name\n \"\"\"GPU bind software emulataion runtime.\"\"\"\n def __init__(self, _, ext):\n self.ext = ext\n\n def __iter__(self):\n i = 0\n while i < self.ext:\n yield i\n i += 1\n\n\ndef allocate(shape, dtype='float32', scope='global'): #pylint: disable=unused-argument\n \"\"\"Allocate a buffer with given shape\n\n Parameters\n ----------\n shape: Tuple\n The shape of the tensor to be allocated\n dtype: string\n The data type of the tensor\n scope: string\n The storage scope of the tensor\n\n Returns\n -------\n tensor: numpy.array\n The tensor allocated\n \"\"\"\n return numpy.zeros(shape).astype(dtype)\n\n\ndef rsqrt(x):\n \"\"\"\n Computes reciprocal of square root of x element-wise\n\n Parameters\n ----------\n x: Tensor\n\n Returns\n -------\n res: Tensor\n The result of reciprocal of square root of x\n \"\"\"\n return numpy.ones_like(x) / numpy.sqrt(x)\n\n\ndef popcount(x):\n \"\"\"\n Count ones in the binary representation of number x\n\n Parameters\n ----------\n x: Integer\n The number to be counted\n\n Returns\n -------\n cnt: Integer\n The number of ones in the binary representation of number x\n \"\"\"\n cnt = 0\n while x:\n x -= x & -x\n cnt += 1\n return cnt\n\n\ndef sigmoid(x):\n \"\"\"\n Sigmoid function of x, aka 1/(1+exp(-x)).\n\n Parameters\n ----------\n x: a real number\n\n Returns\n -------\n res: a real number\n The result of sigmoid function\n \"\"\"\n return 1 / (1 + numpy.exp(-x))\n\n\ndef max_num_threads(allow_none=True):\n \"\"\"Get max number of threads for GPU targets.\"\"\"\n return target.Target.current(allow_none).max_num_threads\n\n\nHYBRID_GLOBALS = {\n 'unroll' : range,\n 'vectorize' : range,\n 'parallel' : range,\n 'const_range' : range,\n 'bind' : bind,\n 'allocate' : allocate,\n 'output_tensor' : allocate,\n 'sqrt' : numpy.sqrt,\n 'rsqrt' : rsqrt,\n 'log' : numpy.log,\n 'tanh' : numpy.tanh,\n 'power' : numpy.power,\n 'exp' : numpy.exp,\n 'sigmoid' : sigmoid,\n 'popcount' : popcount,\n 'round' : round,\n 'likely' : lambda cond: cond,\n 'uint8' : numpy.uint8,\n 'uint16' : numpy.uint16,\n 'uint32' : numpy.uint32,\n 'uint64' : numpy.uint64,\n 'int8' : numpy.int8,\n 'int16' : numpy.int16,\n 'int32' : numpy.int32,\n 'int64' : numpy.int64,\n 'float16' : numpy.float16,\n 'float32' : numpy.float32,\n 'float64' : numpy.float64,\n 'ceil_div' : lambda a, b: (a + b - 1) // b,\n 'max_num_threads': max_num_threads\n}\n\n\ndef _enter_hybrid_runtime(func):\n \"\"\"Put hybrid runtime variables into the global scope\"\"\"\n _globals = func.__globals__\n intersect = []\n for elem in list(HYBRID_GLOBALS.keys()):\n if elem in _globals.keys():\n intersect.append((elem, _globals[elem]))\n _globals[elem] = HYBRID_GLOBALS[elem]\n return intersect\n\n\ndef _restore_runtime(func, intersect):\n \"\"\"Rollback the modification caused by hybrid runtime\"\"\"\n _globals = func.__globals__\n for elem in list(HYBRID_GLOBALS.keys()):\n _globals.pop(elem)\n for k, v in intersect:\n _globals[k] = v\n", "# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\n\"\"\"Arm Compute Library runtime tests.\"\"\"\n\nimport numpy as np\n\nimport tvm\nfrom tvm import relay\n\nfrom .infrastructure import skip_runtime_test, build_and_run, verify\nfrom .infrastructure import Device\n\n\ndef test_multiple_ops():\n \"\"\"\n Test multiple operators destined for ACL.\n The ACL runtime will expect these ops as 2 separate functions for\n the time being.\n \"\"\"\n Device.load(\"test_config.json\")\n\n if skip_runtime_test():\n return\n\n device = Device()\n np.random.seed(0)\n\n def get_model(input_shape, var_names):\n \"\"\"Return a model and any parameters it may have.\"\"\"\n a = relay.var(next(var_names), shape=input_shape, dtype=\"float32\")\n out = relay.reshape(a, (1, 1, 1000))\n out = relay.reshape(out, (1, 1000))\n return out\n\n inputs = {\n \"a\": tvm.nd.array(np.random.uniform(0, 1, (1, 1, 1, 1000)).astype(\"float32\"))\n }\n\n outputs = []\n for acl in [False, True]:\n func = get_model(inputs[\"a\"].shape, iter(inputs))\n outputs.append(build_and_run(func, inputs, 1, None, device,\n enable_acl=acl, acl_partitions=2)[0])\n verify(outputs, atol=0.002, rtol=0.01)\n\n\ndef test_heterogeneous():\n \"\"\"\n Test to check if offloading only supported operators works,\n while leaving unsupported operators computed via tvm.\n \"\"\"\n Device.load(\"test_config.json\")\n\n if skip_runtime_test():\n return\n\n device = Device()\n np.random.seed(0)\n\n def get_model(input_shape, var_names):\n \"\"\"Return a model and any parameters it may have.\"\"\"\n a = relay.var(next(var_names), shape=input_shape, dtype=\"float32\")\n out = relay.reshape(a, (1, 1, 1000))\n out = relay.sigmoid(out)\n out = relay.reshape(out, (1, 1000))\n return out\n\n inputs = {\n \"a\": tvm.nd.array(np.random.uniform(-127, 128, (1, 1, 1, 1000)).astype(\"float32\"))\n }\n\n outputs = []\n for acl in [False, True]:\n func = get_model(inputs[\"a\"].shape, iter(inputs))\n outputs.append(build_and_run(func, inputs, 1, None, device,\n enable_acl=acl, tvm_ops=1,\n acl_partitions=2)[0])\n verify(outputs, atol=0.002, rtol=0.01)\n\n\ndef test_multiple_runs():\n \"\"\"\n Test that multiple runs of an operator work.\n \"\"\"\n Device.load(\"test_config.json\")\n\n if skip_runtime_test():\n return\n\n device = Device()\n\n def get_model():\n a = relay.var(\"a\", shape=(1, 28, 28, 512), dtype=\"float32\")\n w = tvm.nd.array(np.ones((256, 1, 1, 512), dtype=\"float32\"))\n weights = relay.const(w, \"float32\")\n conv = relay.nn.conv2d(\n a,\n weights,\n kernel_size=(1, 1),\n data_layout=\"NHWC\",\n kernel_layout=\"OHWI\",\n strides=(1, 1),\n padding=(0, 0),\n dilation=(1, 1)\n )\n params = {\"w\": w}\n return conv, params\n\n inputs = {\n \"a\": tvm.nd.array(np.random.uniform(-127, 128, (1, 28, 28, 512)).astype(\"float32\")),\n }\n\n func, params = get_model()\n outputs = build_and_run(func, inputs, 1,\n params, device,\n enable_acl=True,\n no_runs=3)\n verify(outputs, atol=0.002, rtol=0.01)\n\n\nif __name__ == \"__main__\":\n test_multiple_ops()\n test_heterogeneous()\n test_multiple_runs()\n" ]
[ [ "numpy.random.uniform", "numpy.zeros" ], [ "numpy.exp", "numpy.ones_like", "numpy.sqrt", "numpy.zeros" ], [ "numpy.random.uniform", "numpy.random.seed", "numpy.ones" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
malteos/aspect-document-embeddings
[ "0836ea54a9192dbc2b01bb212c7521668bb398af" ]
[ "sentence_transformer_cli.py" ]
[ "#!/usr/bin/env python\nimport logging\nimport os\nimport sys\nfrom typing import Union\n\nimport fire\nimport pyarrow\nfrom sentence_transformers.models import Pooling, Transformer\nfrom smart_open import open\nfrom tqdm import tqdm\nfrom sentence_transformers import SentenceTransformer, losses\nimport torch\n\nfrom torch.utils.data import DataLoader\n\nfrom experiments import basic_logger_config\nfrom experiments.environment import get_env\nfrom experiments.sentence_transformers.dataset import DocumentPairSentencesDataset\nfrom experiments.sentence_transformers.nearest_neighbors_evaluator import NearestNeighborsEvaluator\nfrom experiments.utils import get_local_hf_dataset_path\n\n\nfrom datasets import load_dataset, Dataset\nfrom hf_datasets.paperswithcode_aspects import get_test_split, get_train_split\n\n\nlogging.basicConfig(**basic_logger_config)\nlogger = logging.getLogger(__name__)\nenv = get_env()\n\n\ndef train(\n model_name_or_path: str,\n hf_dataset: str,\n aspect: str,\n fold: Union[int, str],\n output_path: str,\n train_epochs: int = 3,\n train_batch_size: int = 25,\n eval_batch_size: int = 32,\n evaluation_steps: int = 5000,\n train_on_test: bool = False,\n loss: str = 'multiple_negatives_ranking',\n override: bool = False):\n \"\"\"\n\n # $MODEL_NAME $HF_DATASET $ASPECT $FOLD $OUTPUT_DIR --train_epochs=3 --train_batch_size=$TRAIN_BATCH_SIZE --eval_batch_size=$EVAL_BATCH_SIZE\n\n Run with:\n $ export CUDA_VISIBLE_DEVICES=1\n $ ./sentence_transformer_cli.py train scibert-scivocab-uncased paperswithcode_task_docs 1 ./output/st_scibert/1 --train_epochs=3 --train_batch_size=25 --eval_batch_size=32\n\n\n :param loss: Training loss function (choices: multiple_negatives_ranking, cosine)\n :param train_on_test: If True, joint training on train and test set (validation disabled)\n :param aspect:\n :param evaluation_steps:\n :param train_epochs:\n :param model_name_or_path:\n :param hf_dataset:\n :param fold:\n :param output_path:\n :param train_batch_size:\n :param eval_batch_size:\n :param override:\n :return:\n \"\"\"\n\n top_ks = [5,10,25,50]\n # cuda_device = -1\n\n # hf_dataset = 'paperswithcode_task_docs'\n # model_name_or_path = 'scibert-scivocab-uncased'\n # fold = 1\n max_token_length = 336 # ssee pwc_token_stats.ipynb\n nlp_cache_dir = './data/nlp_cache'\n\n # train_batch_size = 25\n # eval_batch_size = 32\n # override = False\n\n # output_path = './output/pwc_task_st/1/sci-bert'\n # output_path = os.path.join(output_path, str(fold), model_name_or_path) # output/1/sci-bert\n\n if os.path.exists(output_path) and not override:\n logger.error(f'Stop. Output path exists already: {output_path}')\n sys.exit(1)\n\n # if cuda_device >= 0:\n # os.environ[\"CUDA_VISIBLE_DEVICES\"] = str(cuda_device)\n\n # device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n\n # Model path from env\n if not os.path.exists(model_name_or_path) and os.path.exists(\n os.path.join(env['bert_dir'], model_name_or_path)):\n model_name_or_path = os.path.join(env['bert_dir'], model_name_or_path)\n\n word_embedding_model = Transformer(model_name_or_path, max_seq_length=max_token_length)\n pooling_model = Pooling(word_embedding_model.get_word_embedding_dimension())\n\n model = SentenceTransformer(modules=[word_embedding_model, pooling_model])\n # tokenizer = BertTokenizer.from_pretrained(model_name_or_path)\n\n # dataset\n docs_ds = load_dataset(get_local_hf_dataset_path(hf_dataset),\n name='docs',\n cache_dir=nlp_cache_dir,\n split='docs')\n train_ds = load_dataset(get_local_hf_dataset_path(hf_dataset),\n name='relations',\n cache_dir=nlp_cache_dir,\n split=get_train_split(aspect, fold))\n test_ds = load_dataset(get_local_hf_dataset_path(hf_dataset),\n name='relations',\n cache_dir=nlp_cache_dir,\n split=get_test_split(aspect, fold))\n\n # filter for positive labels only\n train_ds = train_ds.filter(lambda row: row['label'] == 'y')\n\n logger.info(f'After filtering: {len(train_ds):,}')\n\n # joint training on train and test?\n if train_on_test:\n #\n # import pyarrow\n # from datasets.arrow_dataset import Dataset\n #\n # full_ds_table = pyarrow.concat_tables([train_ds.data, test_ds.data])\n # full_ds = Dataset(arrow_table=full_ds_table)\n raise NotImplementedError('TODO Evaluator')\n else:\n # standard training on test only\n train_sds = DocumentPairSentencesDataset(docs_ds, train_ds, model, max_length=max_token_length, forced_length=0)\n train_sds.tokenize_all_docs()\n\n evaluator = NearestNeighborsEvaluator(model, docs_ds, test_ds, top_ks=top_ks, batch_size=eval_batch_size, show_progress_bar=True)\n\n if loss == 'cosine':\n train_loss = losses.CosineSimilarityLoss(model)\n elif loss == 'multiple_negatives_ranking':\n # A nice advantage of MultipleNegativesRankingLoss is that it only requires positive pairs\n # https://github.com/UKPLab/sentence-transformers/tree/master/examples/training/quora_duplicate_questions\n train_loss = losses.MultipleNegativesRankingLoss(model)\n else:\n raise ValueError(f'Unsupported loss function: {loss}')\n\n train_dl = DataLoader(train_sds, shuffle=True, batch_size=train_batch_size)\n\n # Training\n model.fit(\n train_objectives=[(train_dl, train_loss)],\n epochs=train_epochs, # try 1-4\n warmup_steps=100,\n evaluator=evaluator,\n evaluation_steps=evaluation_steps, # increase to 5000 (full dataset => 20k steps)\n output_path=output_path,\n output_path_ignore_not_empty=True\n )\n\n logger.info('Training done')\n\n\ndef build_vectors(\n st_output_path: str,\n hf_dataset: str,\n aspect: str,\n fold: Union[int, str],\n include_all_docs: bool = False,\n override: bool = False\n ):\n \"\"\"\n\n :param override:\n :param include_all_docs: Generate also vectors for samples from training data\n :param st_output_path: Path to Sentence Transformer model\n :param hf_dataset: Huggingface dataset path or name\n :param aspect:\n :param fold:\n :return:\n \"\"\"\n max_token_length = 336 # ssee pwc_token_stats.ipynb\n nlp_cache_dir = './data/nlp_cache'\n\n out_fn = 'pwc_id2vec__all_docs.w2v.txt' if include_all_docs else 'pwc_id2vec.w2v.txt'\n out_fp = os.path.join(st_output_path, out_fn)\n\n if not os.path.exists(st_output_path):\n logger.error(f'Sentence Transformer directory does not exist: {st_output_path}')\n return\n\n if os.path.exists(out_fp) and not override:\n logger.error(f'Output path exists already and override is disabled: {out_fp}')\n return\n\n # Inference for best model\n best_model = SentenceTransformer(st_output_path)\n best_model.get_sentence_embedding_dimension()\n\n test_ds = load_dataset(get_local_hf_dataset_path(hf_dataset),\n name='relations',\n cache_dir=nlp_cache_dir,\n split=get_test_split(aspect, fold))\n\n docs_ds = load_dataset(get_local_hf_dataset_path(hf_dataset),\n name='docs',\n cache_dir=nlp_cache_dir,\n split='docs')\n test_sds = DocumentPairSentencesDataset(docs_ds, test_ds, best_model)\n\n if include_all_docs:\n # use all document ids\n input_paper_ids = set(docs_ds['paper_id'])\n logger.info(f'All documents in corpus: {len(input_paper_ids):,}')\n\n else:\n # generate vectors from unique test documents only\n input_paper_ids = set(test_ds['from_paper_id']).union(set(test_ds['to_paper_id']))\n\n with open(out_fp, 'w') as f:\n # header\n f.write(f'{len(input_paper_ids)} {best_model.get_sentence_embedding_dimension()}\\n')\n\n # body\n for paper_id in tqdm(input_paper_ids, desc='Inference'):\n vec = [str(v) for v in best_model.encode(test_sds.get_text_from_doc(paper_id), show_progress_bar=False)]\n\n assert len(vec) == best_model.get_sentence_embedding_dimension()\n\n vec_str = ' '.join(vec)\n line = f'{paper_id} {vec_str}\\n'\n f.write(line)\n # break\n logger.info(f'Encoded {len(input_paper_ids):,} into {out_fp}')\n\n\nif __name__ == '__main__':\n fire.Fire()\n sys.exit(0)\n" ]
[ [ "torch.utils.data.DataLoader" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
drunkcoding/huggingface-utils
[ "4baad306857c357d94607076c6ab0cb5d6350cbe" ]
[ "hfutils/monte_carlo.py" ]
[ "import numpy as np\nfrom tqdm import tqdm\n\ndef monte_carlo_execute(func, bounds, dtype, n=100):\n # print(bounds)\n rnd = [np.random.uniform(b_l, b_h+0.01*b_h, n).tolist() for b_l, b_h in bounds]\n rnd_choices = [\n [rnd[i][np.random.randint(0, n)] for i in range(len(bounds))]\n for _ in range(n)\n ]\n\n return np.array(rnd_choices), np.array([func(r) for r in rnd_choices], dtype=dtype)\n\ndef monte_carlo_bounds(func, bounds, dtype, n=100, maxiter=100, tops=10, decay=0.1):\n hist_func_out = None\n hist_func_in = None\n for _ in tqdm(range(maxiter), desc=\"MC Search\"):\n func_in, func_out = monte_carlo_execute(func, bounds, dtype, n)\n # print('func_in', func_in)\n # print('func_out', func_out)\n if hist_func_out is None:\n hist_func_out = func_out\n hist_func_in = func_in\n else:\n hist_func_out = np.append(hist_func_out, func_out, axis=0)\n hist_func_in = np.append(hist_func_in, func_in, axis=0)\n \n idx = np.argpartition(hist_func_out, -tops, order=[d[0] for d in dtype])[-tops:]\n # print(\"idx\", idx)\n bounds_sample = hist_func_in[idx]\n # print(\"bounds_sample\", bounds_sample)\n # print(\"func_out\", hist_func_out[idx])\n \n new_bounds = list(zip(np.min(bounds_sample, axis=0), np.max(bounds_sample, axis=0)))\n # print(new_bounds, func_in)\n # assert len(new_bounds) == len(new_bounds)\n bounds = new_bounds\n\n hist_func_out = hist_func_out[idx]\n hist_func_in = hist_func_in[idx]\n\n # print('hist_func_out', hist_func_out)\n # print('hist_func_in', hist_func_in)\n\n if np.all(np.round(hist_func_in, 3) == np.mean(np.round(hist_func_in, 3), axis=0)):\n break\n\n tops = max(int(tops * (1-decay)), 1)\n\n return bounds\n" ]
[ [ "numpy.min", "numpy.round", "numpy.max", "numpy.append", "numpy.argpartition", "numpy.random.uniform", "numpy.array", "numpy.random.randint" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
zhangyx96/MAPPO
[ "b7535092d5e8f7b0de108191a9229dfa01e1628c" ]
[ "a2c_ppo_acktr/model_sp.py" ]
[ "import numpy as np\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nfrom a2c_ppo_acktr.distributions import Bernoulli, Categorical, DiagGaussian\nfrom a2c_ppo_acktr.utils import init\nimport time\n\nclass Flatten(nn.Module):\n def forward(self, x):\n return x.view(x.size(0), -1)\n\n\nclass Policy(nn.Module):\n def __init__(self, obs_shape, action_space, agent_i, \n agent_num,base=None, dist=None, base_kwargs=None):\n super(Policy, self).__init__()\n self.agent_num = agent_num\n if base_kwargs is None:\n base_kwargs = {}\n if base is None:\n if len(obs_shape) == 3:\n base = CNNBase\n self.base = base(obs_shape[0], agent_num, **base_kwargs)\n elif len(obs_shape) == 1:\n base = MLPBase\n #base = ATTBase\n self.base = base(obs_shape[0], agent_num, **base_kwargs)\n else:\n raise NotImplementedError\n else:\n self.base = base\n # self.base = base(obs_shape[0], **base_kwargs)\n # actor输入维度num_state,critic输入num_state*agent_num\n\n # self.base = base(obs_shape[0], agent_num, agent_i, **base_kwargs)\n\n #import pdb; pdb.set_trace()\n self.agent_i = agent_i\n\n if dist is None:\n if action_space.__class__.__name__ == \"Discrete\":\n num_outputs = action_space.n\n self.dist = Categorical(self.base.output_size, num_outputs)\n elif action_space.__class__.__name__ == \"Box\":\n num_outputs = action_space.shape[0]\n self.dist = DiagGaussian(self.base.output_size, num_outputs)\n elif action_space.__class__.__name__ == \"MultiBinary\":\n num_outputs = action_space.shape[0]\n self.dist = Bernoulli(self.base.output_size, num_outputs)\n else:\n raise NotImplementedError\n else:\n self.dist = dist\n\n @property\n def is_recurrent(self):\n return self.base.is_recurrent\n\n @property\n def recurrent_hidden_state_size(self):\n \"\"\"Size of rnn_hx.\"\"\"\n return self.base.recurrent_hidden_state_size\n\n def forward(self, inputs, rnn_hxs, masks):\n raise NotImplementedError\n\n def act(self, share_inputs, inputs, agent_num, rnn_hxs, masks, deterministic=False):\n value, actor_features, rnn_hxs = self.base(share_inputs, inputs, self.agent_i, rnn_hxs, masks, agent_num)\n dist = self.dist(actor_features)\n if deterministic:\n action = dist.mode()\n else:\n action = dist.sample()\n\n action_log_probs = dist.log_probs(action)\n dist_entropy = dist.entropy().mean()\n\n return value, action, action_log_probs, rnn_hxs\n \n def update_num(self,agent_num):\n self.agent_num = agent_num\n\n def get_value(self, share_inputs, inputs, agent_num, rnn_hxs, masks):\n value, _, _ = self.base(share_inputs, inputs, self.agent_i, rnn_hxs, masks,agent_num=self.agent_num)\n return value\n\n def evaluate_actions(self, share_inputs, inputs, agent_num, rnn_hxs, masks, action):\n value, actor_features, rnn_hxs = self.base(share_inputs, inputs, self.agent_i, rnn_hxs, masks,agent_num=self.agent_num)\n dist = self.dist(actor_features)\n\n action_log_probs = dist.log_probs(action) \n dist_entropy = dist.entropy().mean()\n\n return value, action_log_probs, dist_entropy, rnn_hxs\n\n\nclass NNBase(nn.Module):\n def __init__(self, recurrent, recurrent_input_size, hidden_size):\n super(NNBase, self).__init__()\n\n self._hidden_size = hidden_size\n self._recurrent = recurrent\n\n if recurrent:\n self.gru = nn.GRU(recurrent_input_size, hidden_size)\n for name, param in self.gru.named_parameters():\n if 'bias' in name:\n nn.init.constant_(param, 0)\n elif 'weight' in name:\n nn.init.orthogonal_(param)\n\n @property\n def is_recurrent(self):\n return self._recurrent\n\n @property\n def recurrent_hidden_state_size(self):\n if self._recurrent:\n return self._hidden_size\n return 1\n\n @property\n def output_size(self):\n return self._hidden_size\n\n def _forward_gru(self, x, hxs, masks):\n if x.size(0) == hxs.size(0):\n x, hxs = self.gru(x.unsqueeze(0), (hxs * masks).unsqueeze(0))\n x = x.squeeze(0)\n hxs = hxs.squeeze(0)\n else:\n # x is a (T, N, -1) tensor that has been flatten to (T * N, -1)\n N = hxs.size(0)\n T = int(x.size(0) / N)\n\n # unflatten\n x = x.view(T, N, x.size(1))\n\n # Same deal with masks\n masks = masks.view(T, N)\n\n # Let's figure out which steps in the sequence have a zero for any agent\n # We will always assume t=0 has a zero in it as that makes the logic cleaner\n has_zeros = ((masks[1:] == 0.0) \\\n .any(dim=-1)\n .nonzero()\n .squeeze()\n .cpu())\n\n # +1 to correct the masks[1:]\n if has_zeros.dim() == 0:\n # Deal with scalar\n has_zeros = [has_zeros.item() + 1]\n else:\n has_zeros = (has_zeros + 1).numpy().tolist()\n\n # add t=0 and t=T to the list\n has_zeros = [0] + has_zeros + [T]\n\n hxs = hxs.unsqueeze(0)\n outputs = []\n for i in range(len(has_zeros) - 1):\n # We can now process steps that don't have any zeros in masks together!\n # This is much faster\n start_idx = has_zeros[i]\n end_idx = has_zeros[i + 1]\n\n rnn_scores, hxs = self.gru(\n x[start_idx:end_idx],\n hxs * masks[start_idx].view(1, -1, 1))\n\n outputs.append(rnn_scores)\n\n # assert len(outputs) == T\n # x is a (T, N, -1) tensor\n x = torch.cat(outputs, dim=0)\n # flatten\n x = x.view(T * N, -1)\n hxs = hxs.squeeze(0)\n\n return x, hxs\n\n\nclass CNNBase(NNBase):\n def __init__(self, num_inputs, recurrent=False, hidden_size=512):\n super(CNNBase, self).__init__(recurrent, hidden_size, hidden_size)\n\n init_ = lambda m: init(m, nn.init.orthogonal_, lambda x: nn.init.\n constant_(x, 0), nn.init.calculate_gain('relu'))\n\n self.main = nn.Sequential(\n init_(nn.Conv2d(num_inputs, 32, 8, stride=4)), nn.ReLU(),\n init_(nn.Conv2d(32, 64, 4, stride=2)), nn.ReLU(),\n init_(nn.Conv2d(64, 32, 3, stride=1)), nn.ReLU(), Flatten(),\n init_(nn.Linear(32 * 7 * 7, hidden_size)), nn.ReLU())\n\n init_ = lambda m: init(m, nn.init.orthogonal_, lambda x: nn.init.\n constant_(x, 0))\n\n self.critic_linear = init_(nn.Linear(hidden_size, 1))\n\n self.train()\n\n def forward(self, inputs, rnn_hxs, masks):\n x = self.main(inputs / 255.0)\n\n if self.is_recurrent:\n x, rnn_hxs = self._forward_gru(x, rnn_hxs, masks)\n\n return self.critic_linear(x), x, rnn_hxs\n\nclass MLPBase(NNBase):\n def __init__(self, num_inputs, agent_num, recurrent=False, assign_id=False, hidden_size=100):\n super(MLPBase, self).__init__(recurrent, num_inputs, hidden_size)\n\n if recurrent:\n num_inputs = hidden_size\n\n init_ = lambda m: init(m, nn.init.orthogonal_, lambda x: nn.init.\n constant_(x, 0), np.sqrt(2))\n\n if assign_id:\n self.actor = nn.Sequential(\n init_(nn.Linear(num_inputs + agent_num, hidden_size)), nn.Tanh(),\n init_(nn.Linear(hidden_size, hidden_size)), nn.Tanh())\n\n self.critic = nn.Sequential(\n init_(nn.Linear(num_inputs * agent_num + agent_num, hidden_size)), nn.Tanh(),\n init_(nn.Linear(hidden_size, hidden_size)), nn.Tanh())\n else:\n self.actor = nn.Sequential(\n init_(nn.Linear(num_inputs, hidden_size)), nn.Tanh(),\n init_(nn.Linear(hidden_size, hidden_size)), nn.Tanh())\n\n self.critic = nn.Sequential(\n init_(nn.Linear(num_inputs * agent_num, hidden_size)), nn.Tanh(),\n init_(nn.Linear(hidden_size, hidden_size)), nn.Tanh())\n\n self.critic_linear = init_(nn.Linear(hidden_size, 1))\n\n self.train()\n\n def forward(self, share_inputs, inputs, agent_i, rnn_hxs, masks):\n #import pdb; pdb.set_trace()\n share_obs = share_inputs\n obs = inputs\n\n #if self.is_recurrent:\n # x, rnn_hxs = self._forward_gru(x, rnn_hxs, masks)\n\n hidden_critic = self.critic(share_inputs)\n hidden_actor = self.actor(inputs)\n \n return self.critic_linear(hidden_critic), hidden_actor, rnn_hxs\n\n\nclass ObsEncoder(nn.Module):\n def __init__(self, hidden_size=100):\n super(ObsEncoder, self).__init__()\n \n init_ = lambda m: init(m, nn.init.orthogonal_, lambda x: nn.init.\n constant_(x, 0), np.sqrt(2))\n ''' \n self.self_encoder = nn.Linear(4, hidden_size)\n self.other_agent_encoder = nn.Linear(2, hidden_size)\n self.landmark_encoder = nn.Linear(2, hidden_size)\n self.agent_correlation_mat = nn.Parameter(torch.FloatTensor(hidden_size,hidden_size),requires_grad=True)\n self.agent_correlation_mat.data.fill_(0.25)\n self.landmark_correlation_mat = nn.Parameter(torch.FloatTensor(hidden_size,hidden_size),requires_grad=True)\n self.landmark_correlation_mat.data.fill_(0.25)\n self.fc = nn.Linear(hidden_size, hidden_size)\n self.encoder_linear = nn.Linear(3*hidden_size, hidden_size)\n '''\n self.self_encoder = nn.Sequential(\n init_(nn.Linear(4, hidden_size)), nn.Tanh())\n self.other_agent_encoder = nn.Sequential(\n init_(nn.Linear(2, hidden_size)), nn.Tanh())\n self.landmark_encoder = nn.Sequential(\n init_(nn.Linear(2, hidden_size)), nn.Tanh())\n self.agent_correlation_mat = nn.Parameter(torch.FloatTensor(hidden_size,hidden_size),requires_grad=True)\n nn.init.orthogonal_(self.agent_correlation_mat.data, gain=1)\n self.landmark_correlation_mat = nn.Parameter(torch.FloatTensor(hidden_size,hidden_size),requires_grad=True)\n nn.init.orthogonal_(self.landmark_correlation_mat.data, gain=1)\n self.fc = nn.Sequential(\n init_(nn.Linear(hidden_size, hidden_size)), nn.Tanh())\n #self.encoder_linear = init_(nn.Linear(3*hidden_size, hidden_size))\n # 加上激活函数 效果会有比较大的提升 虽然还是达不到标准\n self.encoder_linear = nn.Sequential(\n init_(nn.Linear(hidden_size * 3, hidden_size)), nn.Tanh(),\n init_(nn.Linear(hidden_size, hidden_size)), nn.Tanh())\n\n self.layer_norm_1 = nn.LayerNorm(hidden_size)\n self.layer_norm_2 = nn.LayerNorm(hidden_size)\n\n # agent_num需要手动设置一下\n def forward(self, inputs, agent_num):\n batch_size = inputs.shape[0]\n obs_dim = inputs.shape[-1]\n landmark_num = int((obs_dim-4)/2)-2*(agent_num-1)\n #landmark_num = int((obs_dim-4-4*(agent_num-1))/3)\n #import pdb; pdb.set_trace()\n self_emb = self.self_encoder(inputs[:, :4])\n other_agent_emb = []\n beta_agent = []\n landmark_emb = []\n beta_landmark = []\n #start = time.time()\n\n agent_beta_ij = torch.matmul(self_emb.view(batch_size,1,-1), self.agent_correlation_mat)\n landmark_beta_ij = torch.matmul(self_emb.view(batch_size,1,-1), self.landmark_correlation_mat) \n\n for i in range(agent_num - 1):\n other_agent_emb.append(inputs[:, 4+2*landmark_num+2*i:4+2*landmark_num+2*(i+1)])\n for i in range(landmark_num):\n landmark_emb.append(inputs[:, 4+2*i:4+2*(i+1)])\n other_agent_emb = torch.stack(other_agent_emb,dim = 1) #(batch_size,n_agents-1,eb_dim)\n other_agent_emb = self.other_agent_encoder(other_agent_emb)\n #beta_agent = torch.stack(beta_agent,dim = 1) \n beta_agent = torch.matmul(agent_beta_ij, other_agent_emb.permute(0,2,1)).squeeze(1)\n landmark_emb = torch.stack(landmark_emb,dim = 1) #(batch_size,n_agents-1,eb_dim)\n landmark_emb = self.landmark_encoder(landmark_emb)\n #beta_landmark = torch.stack(beta_landmark,dim = 1) \n beta_landmark = torch.matmul(landmark_beta_ij, landmark_emb.permute(0,2,1)).squeeze(1)\n alpha_agent = F.softmax(beta_agent,dim = 1).unsqueeze(2) \n alpha_landmark = F.softmax(beta_landmark,dim = 1).unsqueeze(2)\n other_agent_vi = torch.mul(alpha_agent,other_agent_emb)\n # other_agent_vi = self.layer_norm_1(other_agent_vi)\n other_agent_vi = torch.sum(other_agent_vi,dim=1)\n landmark_vi = torch.mul(alpha_landmark,landmark_emb)\n # landmark_vi = self.layer_norm_2(landmark_vi)\n landmark_vi = torch.sum(landmark_vi,dim=1)\n #end = time.time()\n #print(\"time: \", end-start)\n #import pdb; pdb.set_trace()\n gi = self.fc(self_emb)\n f = self.encoder_linear(torch.cat([gi, other_agent_vi, landmark_vi], dim=1))\n return f\n\nclass ATTBase(NNBase):\n def __init__(self, num_inputs, recurrent=False, assign_id=False, hidden_size=100):\n super(ATTBase, self).__init__(recurrent, num_inputs, hidden_size)\n if recurrent:\n num_inputs = hidden_size\n init_ = lambda m: init(m, nn.init.orthogonal_, lambda x: nn.init.\n constant_(x, 0), np.sqrt(2)) \n self.obs_encoder_1 = ObsEncoder(hidden_size=hidden_size)\n #self.encoder = init_(nn.Linear(num_inputs, hidden_size))\n self.obs_encoder_2 = ObsEncoder(hidden_size=hidden_size)\n\n self.correlation_mat = nn.Parameter(torch.FloatTensor(hidden_size,hidden_size),requires_grad=True)\n #self.correlation_mat.data.fill_(0.25)\n nn.init.orthogonal_(self.correlation_mat.data, gain=1)\n\n #self.fc = init_(nn.Linear(hidden_size, hidden_size))\n self.fc = nn.Sequential(\n init_(nn.Linear(hidden_size, hidden_size)), nn.Tanh())\n\n self.critic_linear = nn.Sequential(\n init_(nn.Linear(hidden_size * 2, hidden_size)), nn.Tanh(),\n init_(nn.Linear(hidden_size, 1)))\n self.train()\n\n def forward(self, share_inputs, inputs, agent_i, rnn_hxs, masks, agent_num):\n \"\"\"\n share_inputs: [batch_size, obs_dim*agent_num]\n inputs: [batch_size, obs_dim]\n \"\"\"\n batch_size = inputs.shape[0]\n obs_dim = inputs.shape[-1]\n #start = time.time()\n hidden_actor = self.obs_encoder_1(inputs, agent_num)\n f_ii = self.obs_encoder_2(inputs, agent_num)\n obs_beta_ij = torch.matmul(f_ii.view(batch_size,1,-1), self.correlation_mat)\n obs_encoder = []\n beta = []\n for i in range(agent_num):\n if i != agent_i:\n f_ij = self.obs_encoder_2(share_inputs[:, i*obs_dim:(i+1)*obs_dim], agent_num) #[batch_size, hidden_size]\n obs_encoder.append(f_ij)\n obs_encoder = torch.stack(obs_encoder,dim = 1) #(batch_size,n_agents-1,eb_dim)\n beta = torch.matmul(obs_beta_ij, obs_encoder.permute(0,2,1)).squeeze(1)\n alpha = F.softmax(beta,dim = 1).unsqueeze(2)\n vi = torch.mul(alpha,obs_encoder)\n vi = torch.sum(vi,dim = 1)\n gi = self.fc(f_ii)\n value = self.critic_linear(torch.cat([gi, vi], dim=1))\n\n return value, hidden_actor, rnn_hxs\n\n " ]
[ [ "torch.nn.init.calculate_gain", "torch.nn.functional.softmax", "numpy.sqrt", "torch.cat", "torch.nn.init.constant_", "torch.nn.GRU", "torch.sum", "torch.nn.Conv2d", "torch.nn.LayerNorm", "torch.nn.Tanh", "torch.nn.Linear", "torch.mul", "torch.FloatTensor", "torch.nn.init.orthogonal_", "torch.stack", "torch.nn.ReLU" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
yuehaowang/SoGCN
[ "bd65b2d8667791b79d6174a1dd2ac13b7bd50db5" ]
[ "main_superpixels_graph_classification.py" ]
[ "\n\n\n\n\n\"\"\"\n IMPORTING LIBS\n\"\"\"\nimport dgl\n\nimport numpy as np\nimport os\nimport socket\nimport time\nimport random\nimport glob\nimport argparse, json\nimport pickle\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nimport torch.optim as optim\nfrom torch.utils.data import DataLoader\n\nfrom tensorboardX import SummaryWriter\nfrom tqdm import tqdm\n\nclass DotDict(dict):\n def __init__(self, **kwds):\n self.update(kwds)\n self.__dict__ = self\n\n\n\n\n\n\n\"\"\"\n IMPORTING CUSTOM MODULES/METHODS\n\"\"\"\nfrom nets.superpixels_graph_classification.load_net import gnn_model # import all GNNS\nfrom data.data import LoadData # import dataset\nfrom train.train_superpixels_graph_classification import train_epoch_sparse as train_epoch, evaluate_network_sparse as evaluate_network # import train functions for GCNs\nfrom utils.result import load_model\n\n\n\n\"\"\"\n GPU Setup\n\"\"\"\ndef gpu_setup(use_gpu, gpu_id):\n os.environ[\"CUDA_DEVICE_ORDER\"] = \"PCI_BUS_ID\"\n os.environ[\"CUDA_VISIBLE_DEVICES\"] = str(gpu_id) \n\n if torch.cuda.is_available() and use_gpu:\n print('cuda available with GPU:',torch.cuda.get_device_name(0))\n device = torch.device(\"cuda\")\n else:\n print('cuda not available')\n device = torch.device(\"cpu\")\n return device\n\n\n\"\"\"\n Random Seed Setup\n\"\"\"\ndef set_random_seed(seed, device=None):\n random.seed(seed)\n np.random.seed(seed)\n torch.manual_seed(seed)\n if device and device.type == 'cuda':\n torch.cuda.manual_seed(seed)\n torch.backends.cudnn.deterministic = True\n torch.backends.cudnn.benchmark = False\n\n\n\"\"\"\n VIEWING MODEL CONFIG AND PARAMS\n\"\"\"\ndef view_model_param(MODEL_NAME, net_params, verbose=False):\n model = gnn_model(MODEL_NAME, net_params)\n total_param = 0\n print(\"MODEL DETAILS:\\n\")\n #print(model)\n for param in model.parameters():\n # print(param.data.size())\n total_param += np.prod(list(param.data.size()))\n print('MODEL/Total parameters:', MODEL_NAME, total_param)\n\n if verbose:\n print('\\n== Net Params:')\n print(net_params)\n print('\\n== Model Structure:')\n print(model)\n \n return total_param\n\n\n\"\"\"\n TESTING CODE\n\"\"\"\n\ndef test_pipeline(MODEL_NAME, dataset, device, verbose, out_dir):\n # Load models\n print('\\n>> Loading models...')\n model_ls = load_model(out_dir, device=device, only_best=False, verbose=verbose,\n filter=lambda df: df[df['model'] == MODEL_NAME][df['dataset'] == dataset.name])\n\n\n # Preparing dataset\n print('\\n>> Preparing data...')\n if MODEL_NAME in ['GCN']:\n if model_ls[0]['net_params']['self_loop']:\n print(\"[!] Adding graph self-loops for GCN/GAT models (central node trick).\")\n dataset._add_self_loops()\n if MODEL_NAME in ['SoGCN']:\n if model_ls[0]['net_params']['undirected']:\n print(\"[!] Converting directed graphs to undirected graphs for SoGCN model.\")\n dataset._to_undirected()\n \n testset = dataset.test\n print(\"Test Graphs: \", len(testset))\n # Batching test data\n test_loader = DataLoader(testset, batch_size=model_ls[0]['net_params']['batch_size'], shuffle=False, drop_last=False, collate_fn=dataset.collate)\n\n\n # Test models\n print('\\n>> Testing models...')\n acc_ls = []\n for i, item in enumerate(model_ls):\n model = item['model']\n net_params = item['net_params']\n\n # Set random seed\n set_random_seed(item['seed'], device)\n\n # Evaluate model\n _, test_acc = evaluate_network(model, device, test_loader, 0)\n acc_ls.append(test_acc)\n\n if verbose:\n print('\\nModel #%s' % i)\n print('Test Accuracy: %s' % acc_ls[-1])\n\n print('\\n')\n print('AVG Test Accuracy: %s, s.d.: %s' % (np.mean(acc_ls), np.std(acc_ls)))\n\n\n\"\"\"\n TRAINING CODE\n\"\"\"\n\ndef train_val_pipeline(MODEL_NAME, dataset, params, net_params, dirs):\n t0 = time.time()\n per_epoch_time = []\n \n DATASET_NAME = dataset.name\n \n if MODEL_NAME in ['GCN']:\n if net_params['self_loop']:\n print(\"[!] Adding graph self-loops for GCN/GAT models (central node trick).\")\n dataset._add_self_loops()\n if MODEL_NAME in ['SoGCN']:\n if net_params['undirected']:\n print(\"[!] Converting directed graphs to undirected graphs for SoGCN model.\")\n dataset._to_undirected()\n \n trainset, valset, testset = dataset.train, dataset.val, dataset.test\n \n root_log_dir, root_ckpt_dir, write_file_name, write_config_file = dirs\n device = net_params['device']\n \n # Write the network and optimization hyper-parameters in folder config/\n with open(write_config_file + '.txt', 'w') as f:\n f.write(\"\"\"Dataset: {},\\nModel: {}\\n\\nparams={}\\n\\nnet_params={}\\n\\n\\nTotal Parameters: {}\\n\\n\"\"\".format(DATASET_NAME, MODEL_NAME, params, net_params, net_params['total_param']))\n \n log_dir = os.path.join(root_log_dir, \"RUN_\" + str(0))\n writer = SummaryWriter(log_dir=log_dir)\n \n print(\"Training Graphs: \", len(trainset))\n print(\"Validation Graphs: \", len(valset))\n print(\"Test Graphs: \", len(testset))\n print(\"Number of Classes: \", net_params['n_classes'])\n\n model = gnn_model(MODEL_NAME, net_params)\n model = model.to(device)\n\n optimizer = optim.Adam(model.parameters(), lr=params['init_lr'], weight_decay=params['weight_decay'])\n scheduler = optim.lr_scheduler.ReduceLROnPlateau(optimizer, mode='min',\n factor=params['lr_reduce_factor'],\n patience=params['lr_schedule_patience'],\n verbose=True)\n \n epoch_train_losses, epoch_val_losses = [], []\n epoch_train_accs, epoch_val_accs = [], [] \n \n # Data loaders\n train_loader = DataLoader(trainset, batch_size=params['batch_size'], shuffle=True, drop_last=False, collate_fn=dataset.collate)\n val_loader = DataLoader(valset, batch_size=params['batch_size'], shuffle=False, drop_last=False, collate_fn=dataset.collate)\n test_loader = DataLoader(testset, batch_size=params['batch_size'], shuffle=False, drop_last=False, collate_fn=dataset.collate)\n\n # At any point you can hit Ctrl + C to break out of training early.\n try:\n with tqdm(range(params['epochs']), ascii=True) as t:\n for epoch in t:\n\n t.set_description('Epoch %d' % epoch)\n\n start = time.time()\n\n epoch_train_loss, epoch_train_acc, optimizer = train_epoch(model, optimizer, device, train_loader, epoch)\n\n epoch_val_loss, epoch_val_acc = evaluate_network(model, device, val_loader, epoch)\n _, epoch_test_acc = evaluate_network(model, device, test_loader, epoch) \n \n epoch_train_losses.append(epoch_train_loss)\n epoch_val_losses.append(epoch_val_loss)\n epoch_train_accs.append(epoch_train_acc)\n epoch_val_accs.append(epoch_val_acc)\n\n writer.add_scalar('train/_loss', epoch_train_loss, epoch)\n writer.add_scalar('val/_loss', epoch_val_loss, epoch)\n writer.add_scalar('train/_acc', epoch_train_acc, epoch)\n writer.add_scalar('val/_acc', epoch_val_acc, epoch)\n writer.add_scalar('test/_acc', epoch_test_acc, epoch)\n writer.add_scalar('learning_rate', optimizer.param_groups[0]['lr'], epoch)\n\n \n t.set_postfix(time=time.time()-start, lr=optimizer.param_groups[0]['lr'],\n train_loss=epoch_train_loss, val_loss=epoch_val_loss,\n train_acc=epoch_train_acc, val_acc=epoch_val_acc,\n test_acc=epoch_test_acc) \n\n per_epoch_time.append(time.time()-start)\n\n # Saving checkpoint\n ckpt_dir = os.path.join(root_ckpt_dir, \"RUN_\")\n if not os.path.exists(ckpt_dir):\n os.makedirs(ckpt_dir)\n torch.save(model.state_dict(), '{}.pkl'.format(ckpt_dir + \"/epoch_\" + str(epoch)))\n\n files = glob.glob(ckpt_dir + '/*.pkl')\n for file in files:\n epoch_nb = file.split('_')[-1]\n epoch_nb = int(epoch_nb.split('.')[0])\n if epoch_nb < epoch-1:\n os.remove(file)\n\n scheduler.step(epoch_val_loss)\n\n if optimizer.param_groups[0]['lr'] < params['min_lr']:\n print(\"\\n!! LR EQUAL TO MIN LR SET.\")\n break\n \n # Stop training after params['max_time'] hours\n if time.time()-t0 > params['max_time']*3600:\n print('-' * 89)\n print(\"Max_time for training elapsed {:.2f} hours, so stopping\".format(params['max_time']))\n break\n \n except KeyboardInterrupt:\n print('-' * 89)\n print('Exiting from training early because of KeyboardInterrupt')\n \n _, test_acc = evaluate_network(model, device, test_loader, epoch)\n _, train_acc = evaluate_network(model, device, train_loader, epoch)\n print(\"Test Accuracy: {:.4f}\".format(test_acc))\n print(\"Train Accuracy: {:.4f}\".format(train_acc))\n print(\"Convergence Time (Epochs): {:.4f}\".format(epoch))\n print(\"TOTAL TIME TAKEN: {:.4f}s\".format(time.time()-t0))\n print(\"AVG TIME PER EPOCH: {:.4f}s\".format(np.mean(per_epoch_time)))\n\n writer.close()\n\n \"\"\"\n Write the results in out_dir/results folder\n \"\"\"\n with open(write_file_name + '.txt', 'w') as f:\n f.write(\"\"\"Dataset: {},\\nModel: {}\\n\\nparams={}\\n\\nnet_params={}\\n\\n{}\\n\\nTotal Parameters: {}\\n\\n\n FINAL RESULTS\\nTEST ACCURACY: {:.4f}\\nTRAIN ACCURACY: {:.4f}\\n\\n\n Convergence Time (Epochs): {:.4f}\\nTotal Time Taken: {:.4f} hrs\\nAverage Time Per Epoch: {:.4f} s\\n\\n\\n\"\"\"\\\n .format(DATASET_NAME, MODEL_NAME, params, net_params, model, net_params['total_param'],\n np.mean(np.array(test_acc))*100, np.mean(np.array(train_acc))*100, epoch, (time.time()-t0)/3600, np.mean(per_epoch_time)))\n \n\n\n\n\ndef main():\n\n start_time_str = time.strftime('%Hh%Mm%Ss_on_%b_%d_%Y')\n\n \"\"\"\n USER CONTROLS\n \"\"\"\n \n \n parser = argparse.ArgumentParser()\n parser.add_argument('--config', help=\"Please give a config.json file with training/model/data/param details\")\n parser.add_argument('--gpu_id', help=\"Please give a value for gpu id\")\n parser.add_argument('--model', help=\"Please give a value for model name\")\n parser.add_argument('--dataset', help=\"Please give a value for dataset name\")\n parser.add_argument('--out_dir', help=\"Please give a value for out_dir\")\n parser.add_argument('--seed', help=\"Please give a value for seed\")\n parser.add_argument('--epochs', help=\"Please give a value for epochs\")\n parser.add_argument('--batch_size', help=\"Please give a value for batch_size\")\n parser.add_argument('--init_lr', help=\"Please give a value for init_lr\")\n parser.add_argument('--lr_reduce_factor', help=\"Please give a value for lr_reduce_factor\")\n parser.add_argument('--lr_schedule_patience', help=\"Please give a value for lr_schedule_patience\")\n parser.add_argument('--min_lr', help=\"Please give a value for min_lr\")\n parser.add_argument('--weight_decay', help=\"Please give a value for weight_decay\")\n parser.add_argument('--print_epoch_interval', help=\"Please give a value for print_epoch_interval\") \n parser.add_argument('--L', help=\"Please give a value for L\")\n parser.add_argument('--hidden_dim', help=\"Please give a value for hidden_dim\")\n parser.add_argument('--out_dim', help=\"Please give a value for out_dim\")\n parser.add_argument('--residual', help=\"Please give a value for residual\")\n parser.add_argument('--readout', help=\"Please give a value for readout\")\n parser.add_argument('--gated', help=\"Please give a value for gated\")\n parser.add_argument('--in_feat_dropout', help=\"Please give a value for in_feat_dropout\")\n parser.add_argument('--dropout', help=\"Please give a value for dropout\")\n parser.add_argument('--batch_norm', help=\"Please give a value for batch_norm\")\n parser.add_argument('--self_loop', help=\"Please give a value for self_loop\")\n parser.add_argument('--max_time', help=\"Please give a value for max_time\")\n parser.add_argument('--verbose', help=\"Please give a value for verbose\")\n parser.add_argument('--only_view_params', help=\"Please give a value for only_view_params\")\n parser.add_argument('--undirected', help=\"Please give a value for undirected\")\n parser.add_argument('--max_order', help=\"Please give a value for max_order\")\n parser.add_argument('--gru', help=\"Please give a value for gru\")\n parser.add_argument('--activation', help=\"Please give a value for activation\")\n parser.add_argument('--test', help=\"Please give a value for test\")\n args = parser.parse_args()\n\n if args.config is not None:\n with open(args.config) as f:\n config = json.load(f)\n else:\n config = {'gpu': {'use': False, 'id': 0}, 'params': {}, 'net_params': {}}\n\n\n only_view_params = False\n if args.only_view_params is not None:\n only_view_params = True if args.only_view_params=='True' else False\n\n test_mode = False\n if args.test is not None:\n test_mode = True if args.test=='True' else False\n\n verbose_mode = False\n if args.verbose is not None:\n verbose_mode = True if args.verbose=='True' else False\n \n # device\n if args.gpu_id is not None:\n config['gpu']['id'] = int(args.gpu_id)\n config['gpu']['use'] = True\n device = gpu_setup(config['gpu']['use'], config['gpu']['id'])\n # Model name\n if args.model is not None:\n MODEL_NAME = args.model\n elif 'model' in config:\n MODEL_NAME = config['model']\n else:\n raise Exception('No specified model (--model)')\n # Dataset name\n if args.dataset is not None:\n DATASET_NAME = args.dataset\n elif 'dataset' in config:\n DATASET_NAME = config['dataset']\n else:\n raise Exception('No specified dataset (--dataset)')\n # Out directory\n if args.out_dir is not None:\n out_dir = args.out_dir\n elif 'out_dir' in config:\n out_dir = config['out_dir']\n else:\n raise Exception('No specified out directory (--out_dir)')\n\n\n '''\n Load dataset\n '''\n # Superpixels dataset\n dataset = LoadData(DATASET_NAME)\n\n\n '''\n TEST model pipeline\n '''\n if test_mode:\n print ('=' * 10 + ' TEST mode ' + '=' * 10)\n test_pipeline(MODEL_NAME, dataset, device, verbose_mode, out_dir)\n\n return\n\n\n '''\n TRAIN model pipeline\n '''\n # parameters\n params = config['params']\n if not 'verbose' in params:\n params['verbose'] = False\n if args.seed is not None:\n params['seed'] = int(args.seed)\n if args.epochs is not None:\n params['epochs'] = int(args.epochs)\n if args.batch_size is not None:\n params['batch_size'] = int(args.batch_size)\n if args.init_lr is not None:\n params['init_lr'] = float(args.init_lr)\n if args.lr_reduce_factor is not None:\n params['lr_reduce_factor'] = float(args.lr_reduce_factor)\n if args.lr_schedule_patience is not None:\n params['lr_schedule_patience'] = int(args.lr_schedule_patience)\n if args.min_lr is not None:\n params['min_lr'] = float(args.min_lr)\n if args.weight_decay is not None:\n params['weight_decay'] = float(args.weight_decay)\n if args.print_epoch_interval is not None:\n params['print_epoch_interval'] = int(args.print_epoch_interval)\n if args.max_time is not None:\n params['max_time'] = float(args.max_time)\n if args.verbose is not None:\n params['verbose'] = True if args.verbose=='True' else False\n # network parameters\n net_params = config['net_params']\n net_params['device'] = device\n net_params['gpu_id'] = config['gpu']['id']\n net_params['batch_size'] = params['batch_size']\n if not 'max_order' in net_params:\n net_params['max_order'] = 2\n if not 'gru' in net_params:\n net_params['gru'] = False\n if not 'undirected' in net_params:\n net_params['undirected'] = False\n if not 'activation' in net_params:\n net_params['activation'] = 'relu'\n if args.L is not None:\n net_params['L'] = int(args.L)\n if args.hidden_dim is not None:\n net_params['hidden_dim'] = int(args.hidden_dim)\n if args.out_dim is not None:\n net_params['out_dim'] = int(args.out_dim) \n if args.residual is not None:\n net_params['residual'] = True if args.residual=='True' else False\n if args.readout is not None:\n net_params['readout'] = args.readout\n if args.gated is not None:\n net_params['gated'] = True if args.gated=='True' else False\n if args.in_feat_dropout is not None:\n net_params['in_feat_dropout'] = float(args.in_feat_dropout)\n if args.dropout is not None:\n net_params['dropout'] = float(args.dropout)\n if args.batch_norm is not None:\n net_params['batch_norm'] = True if args.batch_norm=='True' else False\n if args.self_loop is not None:\n net_params['self_loop'] = True if args.self_loop=='True' else False\n if args.undirected is not None:\n net_params['undirected'] = True if args.undirected=='True' else False\n if args.max_order is not None:\n net_params['max_order'] = int(args.max_order)\n if args.gru is not None:\n net_params['gru'] = True if args.gru=='True' else False\n if args.activation is not None:\n net_params['activation'] = args.activation\n\n net_params['in_dim'] = dataset.train[0][0].ndata['feat'][0].size(0)\n net_params['in_dim_edge'] = dataset.train[0][0].edata['feat'][0].size(0)\n num_classes = len(np.unique(np.array(dataset.train[:][1])))\n net_params['n_classes'] = num_classes\n\n # Set random seed\n set_random_seed(params['seed'], device)\n\n # View parameters\n net_params['total_param'] = view_model_param(MODEL_NAME, net_params, params['verbose'])\n if only_view_params:\n print('== View Parameters only ==')\n return\n \n root_log_dir = out_dir + 'logs/' + MODEL_NAME + \"_\" + DATASET_NAME + \"_GPU\" + str(config['gpu']['id']) + \"_\" + start_time_str\n root_ckpt_dir = out_dir + 'checkpoints/' + MODEL_NAME + \"_\" + DATASET_NAME + \"_GPU\" + str(config['gpu']['id']) + \"_\" + start_time_str\n write_file_name = out_dir + 'results/result_' + MODEL_NAME + \"_\" + DATASET_NAME + \"_GPU\" + str(config['gpu']['id']) + \"_\" + start_time_str\n write_config_file = out_dir + 'configs/config_' + MODEL_NAME + \"_\" + DATASET_NAME + \"_GPU\" + str(config['gpu']['id']) + \"_\" + start_time_str\n dirs = root_log_dir, root_ckpt_dir, write_file_name, write_config_file\n\n if not os.path.exists(out_dir + 'results'):\n os.makedirs(out_dir + 'results')\n \n if not os.path.exists(out_dir + 'configs'):\n os.makedirs(out_dir + 'configs')\n\n print('\\nResult output:', write_file_name)\n\n train_val_pipeline(MODEL_NAME, dataset, params, net_params, dirs)\n\n \n \n \nmain() \n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n" ]
[ [ "torch.optim.lr_scheduler.ReduceLROnPlateau", "numpy.random.seed", "torch.cuda.manual_seed", "torch.manual_seed", "torch.utils.data.DataLoader", "numpy.std", "numpy.mean", "torch.cuda.is_available", "torch.cuda.get_device_name", "torch.device", "numpy.array" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
ivychill/reid
[ "6dc8a2ea21dfa8037d26a7184c86e2fb59e3ab9e" ]
[ "test.py" ]
[ "# -*- coding: utf-8 -*-\n\nfrom __future__ import print_function, division\n\nimport argparse\nimport torch\nimport torch.nn as nn\n# import torch.optim as optim\n# from torch.optim import lr_scheduler\nfrom torch.autograd import Variable\nimport torch.backends.cudnn as cudnn\nimport numpy as np\n# import torchvision\nfrom torchvision import datasets, models, transforms\n# import time\nimport os\nimport scipy.io\nimport yaml\nimport math\n# from model import ft_net, ft_net_dense, ft_net_NAS, PCB, PCB_test\nfrom model import ft_net, ft_net_dense, ft_net_NAS\nfrom model import PCB_dense as PCB\nfrom model import PCB_dense_test as PCB_test\nimport output\n\n#fp16\ntry:\n from apex.fp16_utils import *\nexcept ImportError: # will be 3.x series\n print('This is not an error. If you want to use low precision, i.e., fp16, please install the apex with cuda support (https://github.com/NVIDIA/apex) and update pytorch to 1.0')\n######################################################################\n# Options\n# --------\n\nparser = argparse.ArgumentParser(description='Training')\nparser.add_argument('--gpu_ids',default='0', type=str,help='gpu_ids: e.g. 0 0,1,2 0,2')\nparser.add_argument('--which_epoch',default='last', type=str, help='0,1,2,3...or last')\nparser.add_argument('--test_dir',default='../dataset/match/pytorch',type=str, help='./test_data')\nparser.add_argument('--name', default='ft_ResNet50', type=str, help='save model path')\nparser.add_argument('--batchsize', default=256, type=int, help='batchsize')\nparser.add_argument('--use_dense', action='store_true', help='use densenet121' )\nparser.add_argument('--PCB', action='store_true', help='use PCB' )\nparser.add_argument('--multi', action='store_true', help='use multiple query' )\nparser.add_argument('--fp16', action='store_true', help='use fp16.' )\nparser.add_argument('--ms',default='1', type=str, help='multiple_scale: e.g. 1 1,1.1 1,1.1,1.2')\n\nopt = parser.parse_args()\n###load config###\n# load the training config\nconfig_path = os.path.join('./model',opt.name,'opts.yaml')\nwith open(config_path, 'r') as stream:\n config = yaml.load(stream)\nopt.fp16 = config['fp16'] \nopt.PCB = config['PCB']\nopt.use_dense = config['use_dense']\nopt.use_NAS = config['use_NAS']\nopt.stride = config['stride']\n\nif 'nclasses' in config: # tp compatible with old config files\n opt.nclasses = config['nclasses']\nelse: \n opt.nclasses = 751 \n\nstr_ids = opt.gpu_ids.split(',')\n#which_epoch = opt.which_epoch\nname = opt.name\ntest_dir = opt.test_dir\n\ngpu_ids = []\nfor str_id in str_ids:\n id = int(str_id)\n if id >=0:\n gpu_ids.append(id)\n\nprint('We use the scale: %s'%opt.ms)\nstr_ms = opt.ms.split(',')\nms = []\nfor s in str_ms:\n s_f = float(s)\n ms.append(math.sqrt(s_f))\n\n# set gpu ids\nif len(gpu_ids)>0:\n torch.cuda.set_device(gpu_ids[0])\n cudnn.benchmark = True\n\n######################################################################\n# Load Data\n# ---------\n#\n# We will use torchvision and torch.utils.data packages for loading the\n# data.\n#\ndata_transforms = transforms.Compose([\n transforms.Resize((256,128), interpolation=3),\n transforms.ToTensor(),\n transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])\n])\n\nif opt.PCB:\n data_transforms = transforms.Compose([\n transforms.Resize((384,192), interpolation=3),\n transforms.ToTensor(),\n transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]) \n ])\n\n\ndata_dir = test_dir\n\nif opt.multi:\n image_datasets = {x: datasets.ImageFolder( os.path.join(data_dir,x) ,data_transforms) for x in ['gallery','query','multi-query']}\n dataloaders = {x: torch.utils.data.DataLoader(image_datasets[x], batch_size=opt.batchsize,\n shuffle=False, num_workers=16) for x in ['gallery','query','multi-query']}\nelse:\n image_datasets = {x: datasets.ImageFolder( os.path.join(data_dir,x) ,data_transforms) for x in ['gallery','query']}\n dataloaders = {x: torch.utils.data.DataLoader(image_datasets[x], batch_size=opt.batchsize,\n shuffle=False, num_workers=16) for x in ['gallery','query']}\nclass_names = image_datasets['query'].classes\nuse_gpu = torch.cuda.is_available()\n\n######################################################################\n# Load model\n#---------------------------\ndef load_network(network):\n save_path = os.path.join('./model',name,'net_%s.pth'%opt.which_epoch)\n network.load_state_dict(torch.load(save_path))\n return network\n\n\n######################################################################\n# Extract feature\n# ----------------------\n#\n# Extract feature from a trained model.\n#\ndef fliplr(img):\n '''flip horizontal'''\n inv_idx = torch.arange(img.size(3)-1,-1,-1).long() # N x C x H x W\n img_flip = img.index_select(3,inv_idx)\n return img_flip\n\ndef extract_feature(model,dataloaders):\n features = torch.FloatTensor()\n count = 0\n for data in dataloaders:\n img, label = data\n n, c, h, w = img.size()\n count += n\n print(count)\n ff = torch.FloatTensor(n,512).zero_().cuda()\n if opt.PCB:\n # ff = torch.FloatTensor(n,2048,6).zero_().cuda() # we have six parts\n ff = torch.FloatTensor(n,1024,6).zero_().cuda() # we have six parts\n\n for i in range(2):\n if(i==1):\n img = fliplr(img)\n input_img = Variable(img.cuda())\n for scale in ms:\n if scale != 1:\n # bicubic is only available in pytorch>= 1.1\n input_img = nn.functional.interpolate(input_img, scale_factor=scale, mode='bicubic', align_corners=False)\n outputs = model(input_img) \n ff += outputs\n # norm feature\n if opt.PCB:\n # feature size (n,2048,6)\n # 1. To treat every part equally, I calculate the norm for every 2048-dim part feature.\n # 2. To keep the cosine score==1, sqrt(6) is added to norm the whole feature (2048*6).\n fnorm = torch.norm(ff, p=2, dim=1, keepdim=True) * np.sqrt(6) \n ff = ff.div(fnorm.expand_as(ff))\n ff = ff.view(ff.size(0), -1)\n else:\n fnorm = torch.norm(ff, p=2, dim=1, keepdim=True)\n ff = ff.div(fnorm.expand_as(ff))\n\n features = torch.cat((features,ff.data.cpu()), 0)\n return features\n\ndef get_id(img_path):\n camera_id = []\n labels = []\n for path, v in img_path:\n #filename = path.split('/')[-1]\n filename = os.path.basename(path)\n label = filename[0:4]\n camera = filename.split('c')[1]\n if label[0:2]=='-1':\n labels.append(-1)\n else:\n labels.append(int(label))\n camera_id.append(int(camera[0]))\n return camera_id, labels\n\n\n######################################################################\n# Load Collected data Trained model\nprint('-------test-----------')\nif opt.use_dense:\n model_structure = ft_net_dense(opt.nclasses)\nelif opt.use_NAS:\n model_structure = ft_net_NAS(opt.nclasses)\nelse:\n model_structure = ft_net(opt.nclasses, stride = opt.stride)\n\nif opt.PCB:\n model_structure = PCB(opt.nclasses)\n\n#if opt.fp16:\n# model_structure = network_to_half(model_structure)\n\nmodel = load_network(model_structure)\n\n# Remove the final fc layer and classifier layer\nif opt.PCB:\n #if opt.fp16:\n # model = PCB_test(model[1])\n #else:\n model = PCB_test(model)\nelse:\n #if opt.fp16:\n #model[1].model.fc = nn.Sequential()\n #model[1].classifier = nn.Sequential()\n #else:\n model.classifier.classifier = nn.Sequential()\n\n# Change to test mode\nmodel = model.eval()\nif use_gpu:\n model = model.cuda()\n\n# Extract feature\nwith torch.no_grad():\n gallery_feature = extract_feature(model,dataloaders['gallery'])\n query_feature = extract_feature(model,dataloaders['query'])\n if opt.multi:\n mquery_feature = extract_feature(model,dataloaders['multi-query'])\n \n# Save to Matlab for check\nresult = {'gallery_f':gallery_feature.numpy(),'query_f':query_feature.numpy()}\nscipy.io.savemat('pytorch_result.mat',result)\n\nprint(opt.name)\nresult = './model/%s/result.txt'%opt.name\n# os.system('python evaluate_gpu.py | tee -a %s'%result)\n\nif opt.multi:\n result = {'mquery_f':mquery_feature.numpy()}\n scipy.io.savemat('multi_query.mat',result)\n\noutput.gen_submission(image_datasets, query_feature, gallery_feature)" ]
[ [ "torch.nn.Sequential", "torch.norm", "numpy.sqrt", "torch.cuda.set_device", "torch.load", "torch.utils.data.DataLoader", "torch.no_grad", "torch.FloatTensor", "torch.cuda.is_available", "torch.nn.functional.interpolate" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
JaneliaSciComp/exllsm-synapse-detector
[ "bb2683009fb18dfaea2743e15747f6bd86254940" ]
[ "training/ExLLSM_unet_performance_evaluate.py" ]
[ "from skimage.measure import label, regionprops\nimport numpy as np \nimport nrrd\nfrom skimage import io\n\n\ndef tif_read(file_name):\n \"\"\"\n read tif image in (rows,cols,slices) shape\n \"\"\"\n im = io.imread(file_name)\n im_array = np.zeros((im.shape[1],im.shape[2],im.shape[0]), dtype=im.dtype)\n for i in range(im.shape[0]):\n im_array[:,:,i] = im[i]\n return im_array\n\n\ndef calculate_performance(predict_img, ground_truth_img):\n \"\"\" Calculate sensitivity and precision\n sensitivity = TP/(TP+FN)\n precision = TP/(TP+FP)\n args:\n if predict_img is prediction, and ground_truth_img is ground truth, then this function calculates precision\n if predict_img is ground truth, and ground_truth_img is prediction, then this function calculates sensitivity\n \"\"\"\n TP = 0\n FP = 0\n\n assert predict_img.shape == ground_truth_img.shape, \\\n \"Prediction does not have the same shape as ground truth!\"\n \n predict_img[predict_img!=0] = 1\n label_predict_img = label(predict_img, connectivity=3)\n regionprop_predict_img = regionprops(label_predict_img)\n\n for i in range(len(regionprop_predict_img)):\n curr_region = np.zeros(predict_img.shape, dtype=predict_img.dtype)\n curr_region[label_predict_img==regionprop_predict_img[i].label] = 1\n curr_obj = curr_region * ground_truth_img\n num_nonzero = np.count_nonzero(curr_obj)\n if num_nonzero > 0:\n TP += 1\n else:\n FP += 1\n \n return TP/(TP+FP)\n\n\ndef main():\n data_path = '# /image_directory'\n ground_truth = '# groundtruth.tif'\n predict_img = '# unetresult.tif'\n\n ground_truth_img = tif_read(data_path+ground_truth)\t\n predict_img = tif_read(data_path+predict_img)\n\n precision_unet = calculate_performance(predict_img, ground_truth_img)\n sensitivity_unet = calculate_performance(ground_truth_img, predict_img)\n print(\"Unet\")\n print(\"Recall = {}\".format(sensitivity_unet))\n print(\"Precision = {}\".format(precision_unet))\n\n\nif __name__ == \"__main__\":\n main()\n" ]
[ [ "numpy.zeros", "numpy.count_nonzero" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
Namir0806/FETILDA
[ "d4a3e720dccef3ba0221e6d59214e54a11c6fc5b", "d4a3e720dccef3ba0221e6d59214e54a11c6fc5b", "d4a3e720dccef3ba0221e6d59214e54a11c6fc5b", "d4a3e720dccef3ba0221e6d59214e54a11c6fc5b" ]
[ "US-bank-experiments-source-code/unfreeze/finbert-original/max-4-1-hk-finbert-bilstm-hist-1.py", "US-bank-experiments-source-code/unfreeze/finbert-original/finbert-max-3.py", "US-bank-experiments-source-code/freeze/longformer-original/finbert-bilstm-1.py", "FIN10K-experiments-source-code/freeze/finbert-bilstm-2.py" ]
[ "from scipy import stats\nfrom sklearn.svm import SVR\nfrom sklearn.linear_model import LinearRegression\nimport os\nimport random\nimport sys\nimport csv\nimport numpy as np\nimport pandas as pd\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torch.utils.data import TensorDataset, DataLoader, RandomSampler, SequentialSampler\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.metrics import mean_squared_error\nimport transformers\nfrom transformers import AutoConfig, AutoModel, AutoTokenizer \nfrom transformers import AdamW\nfrom torch.cuda.amp import autocast\nimport warnings\nwarnings.simplefilter(action='ignore', category=FutureWarning)\nimport time\nimport tensorflow as tf\n\nfrom sklearn.svm import SVR\nfrom sklearn.kernel_ridge import KernelRidge\nfrom sklearn.linear_model import LinearRegression\n\nstart = time.time()\n\ntorch.cuda.empty_cache()\n\nseed_val = 42\n\nrandom.seed(seed_val)\nnp.random.seed(seed_val)\ntorch.manual_seed(seed_val)\ntorch.cuda.manual_seed_all(seed_val)\ntf.random.set_seed(seed_val)\n\n\nclass BERT_Arch(nn.Module):\n\n def __init__(self, bert):\n\n super(BERT_Arch, self).__init__()\n\n self.bert = bert \n \n # dropout layer\n self.dropout = nn.Dropout(0.1)\n \n # relu activation function\n self.relu = nn.ReLU()\n\n self.leakyrelu = nn.LeakyReLU()\n\n self.elu = nn.ELU()\n\n self.tanh = nn.Tanh()\n\n self.zeros=0\n\n self.totals=0\n\n # dense layer 1\n self.fc1 = nn.Linear(768,600)\n \n # dense layer 2 (Output layer)\n self.fc2 = nn.Linear(601,1)\n\n self.fc3 = nn.Linear(1,1)\n\n #LSTM\n self.hidden_dim = 768 #300\n self.emb_dim = 768\n self.encoder = nn.LSTM(self.emb_dim, self.hidden_dim, num_layers=1, bidirectional=True, dropout=0.1)\n\n\n #Define Attention Network\n def attnetwork(self, encoder_out, final_hidden):\n hidden = final_hidden.squeeze(0)\n attn_weights = torch.bmm(encoder_out, hidden.unsqueeze(2)).squeeze(2)\n soft_attn_weights = F.softmax(attn_weights, 1)\n new_hidden = torch.bmm(encoder_out.transpose(1,2), soft_attn_weights.unsqueeze(2)).squeeze(2)\n return new_hidden, soft_attn_weights\n \n\n #define the forward pass\n def forward(self, sent_id, mask, hist):\n\n cls_vec = []\n chunk_max_weights = []\n aggregate_pooled_hidden_states = []\n\n for i in range(len(sent_id)):\n\n if i < 35:\n\n #print(\"chunk i: \", i)\n\n ip_id = torch.tensor(sent_id[i]).unsqueeze(0).to(device)\n attn_mask = torch.tensor(mask[i]).unsqueeze(0).to(device)\n\n #pass the inputs to the model \n model_outputs = self.bert(input_ids=ip_id, attention_mask=attn_mask)\n cls_hs=model_outputs[1]\n hidden_states = model_outputs[2]\n\n hidden_ijma = (hidden_states[-1] + hidden_states[-2] + hidden_states[-3] + hidden_states[-4])/4\n\n #hidden_ijma = hidden_states[-2]\n\n temp = torch.transpose(torch.mul(hidden_ijma.transpose(1,2), attn_mask),1,2)\n\n #temp = torch.tanh(torch.mean(temp[0],0,True))\n\n temp, ind = torch.max(temp[0],0,True)\n\n temp = torch.tanh(temp)\n\n #temp = torch.mean(temp[0],0,True)\n\n cls_vec.append(cls_hs)\n aggregate_pooled_hidden_states.append(temp)\n\n '''\n col_sum = np.sort(atten[0][0][11].sum(0)[1:-1].detach().cpu().numpy())\n col_sum = col_sum[::-1]\n max_col_sum = max(col_sum) \n top_word_mean = col_sum[:5].mean()\n chunk_max_weights.append(top_word_mean)\n '''\n\n #cls_vec_ = torch.mean(torch.stack(cls_vec, dim=0), dim=0)\n \n cls_vec = torch.stack(aggregate_pooled_hidden_states, dim=0)\n cls_vec = cls_vec.to(torch.float32) #LSTM\n #print(\"cls_vec shape: \", cls_vec.shape, type(cls_vec), cls_vec.dtype)\n\n '''\n x = self.fc1(cls_vec_)\n x = self.relu(x)\n x = self.dropout(x)\n \n\n chunk_weights = (torch.tensor(chunk_max_weights)).unsqueeze(0)\n chunk_weights = chunk_weights.cuda() \n prod1 = torch.bmm(cls_vec.transpose(1,2), chunk_weights.transpose(0,1).unsqueeze(1)) \n prod1 = prod1.transpose(1,2) \n prod1 = prod1.to(torch.float32) \n '''\n\n emb_input = cls_vec\n inputx = self.dropout(emb_input)\n output, (hn, cn) = self.encoder(inputx) #emb_input)\n fbout = output[:, :, :self.hidden_dim]+ output[:, :, self.hidden_dim:] #sum bidir outputs F+B\n fbout = fbout.permute(1,0,2)\n fbhn = (hn[-2,:,:]+hn[-1,:,:]).unsqueeze(0)\n attn_out, attn_weights = self.attnetwork(fbout, fbhn)\n\n '''\n chunk_weights = (torch.tensor(chunk_max_weights)).unsqueeze(0)\n chunk_weights = chunk_weights.cuda() \n prod1 = torch.bmm(cls_vec.transpose(1,2), chunk_weights.transpose(0,1).unsqueeze(1)) \n '''\n\n prod = torch.bmm(cls_vec.transpose(1,2), attn_weights.transpose(0,1).unsqueeze(1)) \n prod_sum = torch.mean(prod, 0).transpose(0,1) \n\n x = prod_sum #attn_out\n \n x = self.fc1(x)\n x =self.leakyrelu(x)\n x = self.dropout(x) \n\n hist = hist.unsqueeze(0)\n\n hist = self.fc3(hist)\n x = torch.cat((x, hist.unsqueeze(0)), dim=1)\n #x = self.dropout(x)\n\n # output layer\n y = self.fc2(x)\n y = self.leakyrelu(y)\n\n\n return x, y\n\n\n# function to train the model\ndef train(epoch):\n\n memory_file = open('memory_max-4-1-hk-finbert_'+str(max_length)+'_'+sec+'_'+bv+'_ep'+str(epoch)+'_lr='+'{:.1e}'.format(learning_rate)+'_bilstm_hist.txt', 'a+')\n model.train()\n\n total_loss, total_accuracy = 0, 0\n \n # empty list to save model predictions\n total_preds = []\n\n total_hist = []\n\n xs = []\n\n\n # iterate over list of documents\n for i in range(len(train_seq)):\n\n memory_file.write(\"doc num: \"+str(i)+\" before train: \"+str(int(torch.cuda.memory_allocated()/1024/1024))+' mem alloced\\n')\n memory_file.write(\"doc num: \"+str(i)+\" before train: \"+str(int(torch.cuda.memory_reserved()/1024/1024))+' mem reserved\\n')\n\n sent_id = train_seq[i]\n mask = train_mask[i]\n hist = train_hist[i] \n labels = train_y[i].unsqueeze(0).unsqueeze(0)\n\n # clear previously calculated gradients \n model.zero_grad() \n\n with autocast():\n # get model predictions for the current batch\n x, preds = model(sent_id, mask, hist)\n\n # compute the loss between actual and predicted values\n #loss = huber_loss(preds, labels)\n loss = mse_loss(preds, labels)\n\n # model predictions are stored on GPU. So, push it to CPU\n preds = preds.detach().cpu().numpy()\n x = x.detach().cpu().numpy().ravel()\n\n # add on to the total loss\n total_loss = total_loss + loss.item()\n\n xs.append(x)\n\n # backward pass to calculate the gradients\n loss.backward()\n\n # clip the the gradients to 1.0. It helps in preventing the exploding gradient problem\n torch.nn.utils.clip_grad_norm_(model.parameters(), 1.0)\n\n # update parameters\n optimizer.step()\n\n # append the model predictions\n total_preds.append(preds)\n\n loss.detach().cpu()\n\n memory_file.write(\"doc num: \"+str(i)+\" after train: \"+str(int(torch.cuda.memory_allocated()/1024/1024))+' mem alloced\\n')\n memory_file.write(\"doc num: \"+str(i)+\" after train: \"+str(int(torch.cuda.memory_reserved()/1024/1024))+' mem reserved\\n')\n memory_file.flush()\n\n \n \n # compute the training loss of the epoch\n avg_loss = total_loss / len(train_seq)\n\n xs = np.array(xs)\n\n # predictions are in the form of (no. of batches, size of batch, no. of classes).\n # reshape the predictions in form of (number of samples, no. of classes)\n total_preds = np.concatenate(total_preds, axis=0)\n #total_hist = np.concatenate(total_hist, axis=0)\n memory_file.close()\n #returns the loss and predictions\n return avg_loss, total_preds , xs\n\n# function for evaluating the model\ndef evaluate():\n\n print(\"\\nEvaluating...\")\n \n # deactivate dropout layers\n model.eval()\n\n total_loss, total_accuracy = 0.0, 0.0\n \n # empty list to save the model predictions\n total_preds = []\n\n total_xs = []\n\n # iterate over list of documents\n for i in range(len(valid_seq)):\n\n sent_id = valid_seq[i]\n mask = valid_mask[i]\n hist = valid_hist[i]\n labels = valid_y[i].unsqueeze(0).unsqueeze(0)\n\n # deactivate autograd\n with torch.no_grad():\n \n with autocast():\n # model predictions\n x, preds = model(sent_id, mask, hist)\n \n # compute the validation loss between actual and predicted values\n loss = mse_loss(preds,labels)\n\n total_loss = total_loss + loss.item()\n\n preds = preds.detach().cpu().numpy()\n\n total_preds.append(preds)\n\n x = x.detach().cpu().numpy().ravel()\n\n total_xs.append(x)\n\n loss.detach().cpu()\n\n # compute the validation loss of the epoch\n avg_loss = total_loss / len(valid_seq) \n\n total_xs = np.array(total_xs)\n\n # reshape the predictions in form of (number of samples, no. of classes)\n total_preds = np.concatenate(total_preds, axis=0)\n\n return avg_loss, total_preds, total_xs\n\ndef test():\n\n # empty list to save the model predictions\n total_xs = []\n\n total_preds=[]\n \n\n for i in range(len(test_seq)):\n\n sent_id = test_seq[i]\n mask = test_mask[i]\n hist = test_hist[i]\n #labels = test_y[i].unsqueeze(0).unsqueeze(0)\n\n with torch.no_grad():\n with autocast():\n x, preds = model(sent_id, mask, hist)\n \n preds = preds.detach().cpu().numpy()\n\n total_preds.append(preds)\n\n x = x.detach().cpu().numpy().ravel()\n\n total_xs.append(x)\n\n \n \n # reshape the predictions in form of (number of samples, no. of classes)\n total_xs = np.array(total_xs)\n\n total_preds = np.concatenate(total_preds, axis=0)\n \n return total_xs, total_preds\n\ndef train_x():\n\n # empty list to save the model predictions\n total_xs = []\n\n total_preds=[]\n \n\n for i in range(len(train_seq)):\n\n sent_id = train_seq[i]\n mask = train_mask[i]\n hist = train_hist[i]\n #labels = test_y[i].unsqueeze(0).unsqueeze(0)\n\n with torch.no_grad():\n with autocast():\n x, preds = model(sent_id, mask, hist)\n \n preds = preds.detach().cpu().numpy()\n\n total_preds.append(preds)\n\n x = x.detach().cpu().numpy().ravel()\n\n total_xs.append(x)\n\n \n \n # reshape the predictions in form of (number of samples, no. of classes)\n total_xs = np.array(total_xs)\n\n total_preds = np.concatenate(total_preds, axis=0)\n \n return total_xs, total_preds\n\n\n# specify GPU\ndevice = torch.device(\"cuda\")\n\nmax_length = int(sys.argv[1]) #append two [CLS] and [SEP] tokens to make 512\nsec = sys.argv[2]\nbv = sys.argv[3]\n\nfname = \"sorted_\"+ sec + \".csv\"\n\n#end_year = int(sys.argv[1])\n#train_years_list = list(range(end_year-5, end_year))\n#print(\"train_years: \", train_years_list)\n\ndf = pd.read_csv(fname)\n#df = df[:10]\n\ntrain_text, rem_text, train_hist, rem_hist, train_labels, rem_labels = train_test_split(df['mda'],\n df['prev_'+bv], \n df[bv],\n shuffle=False,\n train_size=0.8) \n\nvalid_text, test_text, valid_hist, test_hist, valid_labels, test_labels = train_test_split(\n\trem_text,\n\trem_hist,\n\trem_labels,\n shuffle=False,\n\ttest_size=0.5\n)\n\n'''\nval_text, test_text, val_hist, test_hist, val_labels, test_labels = train_test_split(temp_text, \n temp_hist,\n temp_labels,\n shuffle=False,\n test_size=0.2) \n\nval_text = val_text.astype(str)\n'''\n\ntrain_text = train_text.astype(str) \nvalid_text = valid_text.astype(str)\ntest_text = test_text.astype(str)\n\n'''\ndf_train = pd.DataFrame()\ndf_test = pd.DataFrame()\n\nfor y in train_years_list:\n df_train = pd.concat([df_train, pd.read_csv(str(y) + \"_tok.csv\")])\n'''\n\n#bert_path = \"/gpfs/u/home/HPDM/HPDMrawt/scratch/npl_env/sdm21-exps/long_document_fin/\"\n\nbert_path = \"/gpfs/u/home/DLTM/DLTMboxi/scratch/env/hk-finbert/\"\n\nconfig = AutoConfig.from_pretrained(bert_path, output_attentions=True, output_hidden_states = True) \n\n# import BERT-base pretrained model\nbert = AutoModel.from_pretrained(bert_path, config=config) #longformer-base-4096/') \n\n# Load the BERT tokenizer\ntokenizer = AutoTokenizer.from_pretrained(bert_path) #longformer-base-4096/') \n\n#TRAIN\n# tokenize and encode sequences in the training set\ntokens_train = tokenizer.batch_encode_plus(\n train_text.tolist(),\n add_special_tokens=False\n)\n\n#Extract input ids\ntrain_seq_ = tokens_train['input_ids']\n#Split each document into 510 tokens\ntrain_seq = [[train_seq_[j][i:i + max_length] for i in range(0, len(train_seq_[j]), max_length)] for j in range(len(train_seq_))]\n#print(train_seq[0][0])\n#Add [CLS], [SEP] and [PAD] tokens\ntrain_seq = [[[tokenizer.cls_token_id] + train_seq[j][i] + [tokenizer.sep_token_id] if len(train_seq[j][i]) == max_length else [tokenizer.cls_token_id] + train_seq[j][i] +[tokenizer.sep_token_id] + [tokenizer.pad_token_id] * (max_length-len(train_seq[j][i])) for i in range(len(train_seq[j]))] for j in range(len(train_seq))]\n#print(train_seq[0][0])\n#df_train_seq=pd.DataFrame()\n#df_train_seq[\"train_seq\"]=train_seq\n#df_train_seq.to_csv(sec+ \"-train_seq.csv\")\n\n#Extract attention masks\ntrain_mask_ = tokens_train['attention_mask']\n#Split each document into 510 tokens\ntrain_mask = [[train_mask_[j][i:i + max_length] for i in range(0, len(train_mask_[j]), max_length)] for j in range(len(train_mask_))]\n#Add [1] for attention and [0] for [PAD]\ntrain_mask = [[[1] + train_mask[j][i] + [1] if len(train_mask[j][i]) == max_length else [1]+train_mask[j][i]+[1] + [0] * (max_length-len(train_mask[j][i])) for i in range(len(train_mask[j]))] for j in range(len(train_mask))]\n\n#VALID\n# tokenize and encode sequences in the training set\ntokens_valid = tokenizer.batch_encode_plus(\n valid_text.tolist(),\n add_special_tokens=False\n)\n\n#Extract input ids\nvalid_seq_ = tokens_valid['input_ids']\n#Split each document into 510 tokens\nvalid_seq = [[valid_seq_[j][i:i + max_length] for i in range(0, len(valid_seq_[j]), max_length)] for j in range(len(valid_seq_))]\n#print(valid_seq[0][0])\n#Add [CLS], [SEP] and [PAD] tokens\nvalid_seq = [[[tokenizer.cls_token_id] + valid_seq[j][i] + [tokenizer.sep_token_id] if len(valid_seq[j][i]) == max_length else [tokenizer.cls_token_id] + valid_seq[j][i] +[tokenizer.sep_token_id] + [tokenizer.pad_token_id] * (max_length-len(valid_seq[j][i])) for i in range(len(valid_seq[j]))] for j in range(len(valid_seq))]\n#print(valid_seq[0][0])\n#df_valid_seq=pd.DataFrame()\n#df_valid_seq[\"valid_seq\"]=valid_seq\n#df_valid_seq.to_csv(sec+ \"-valid_seq.csv\")\n\n#Extract attention masks\nvalid_mask_ = tokens_valid['attention_mask']\n#Split each document into 510 tokens\nvalid_mask = [[valid_mask_[j][i:i + max_length] for i in range(0, len(valid_mask_[j]), max_length)] for j in range(len(valid_mask_))]\n#Add [1] for attention and [0] for [PAD]\nvalid_mask = [[[1] + valid_mask[j][i] + [1] if len(valid_mask[j][i]) == max_length else [1]+valid_mask[j][i]+[1] + [0] * (max_length-len(valid_mask[j][i])) for i in range(len(valid_mask[j]))] for j in range(len(valid_mask))]\n\n#TEST\n# tokenize and encode sequences in the test set\ntokens_test = tokenizer.batch_encode_plus(\n test_text.tolist(),\n add_special_tokens=False\n)\n\n#Extract input ids\ntest_seq_ = tokens_test['input_ids']\n#Split each document into 510 tokens\ntest_seq = [[test_seq_[j][i:i + max_length] for i in range(0, len(test_seq_[j]), max_length)] for j in range(len(test_seq_))]\n#Add [CLS], [SEP] and [PAD] tokens\ntest_seq = [[[tokenizer.cls_token_id] + test_seq[j][i] + [tokenizer.sep_token_id] if len(test_seq[j][i]) == max_length else [tokenizer.cls_token_id]+test_seq[j][i] + [tokenizer.sep_token_id]+ [tokenizer.pad_token_id] * (max_length-len(test_seq[j][i])) for i in range(len(test_seq[j]))] for j in range(len(test_seq))]\n\n\n#Extract attention masks\ntest_mask_ = tokens_test['attention_mask']\n#Split each document into 510 tokens\ntest_mask = [[test_mask_[j][i:i + max_length] for i in range(0, len(test_mask_[j]), max_length)] for j in range(len(test_mask_))]\n#Add [1] for attention and [0] for [PAD]\ntest_mask = [[[1] + test_mask[j][i] + [1] if len(test_mask[j][i]) == max_length else [1]+test_mask[j][i]+[1] + [0] * (max_length-len(test_mask[j][i])) for i in range(len(test_mask[j]))] for j in range(len(test_mask))]\n\n\ntrain_hist = torch.tensor(train_hist.tolist()).to(device)\ntrain_y = torch.tensor(train_labels.tolist()).to(device)\n\nvalid_hist = torch.tensor(valid_hist.tolist()).to(device)\nvalid_y = torch.tensor(valid_labels.tolist()).to(device)\n\ntest_hist = torch.tensor(test_hist.tolist()).to(device)\ntest_y = torch.tensor(test_labels.tolist()).to(device)\n\n#val_hist = torch.tensor(val_hist.tolist()).to(device)\n#val_y = torch.tensor(val_labels.tolist()).to(device)\n\n# freeze all the parameters\nfor name, param in bert.named_parameters():\n param.requires_grad = True #True\n\n# pass the pre-trained BERT to our define architecture\nmodel = BERT_Arch(bert)\n\n# push the model to GPU\nmodel = model.to(device)\n\n# define the loss function\nmse_loss = nn.MSELoss() \nhuber_loss = nn.L1Loss()\n\n# number of training epochs\ntotal_epochs = int(sys.argv[4])\nstart_epoch = int(sys.argv[5])\nend_epoch = int(sys.argv[6])\nepochs = end_epoch - start_epoch + 1\n#plus = int(sys.argv[5])\n\n# different learning rates\nlearning_rate = float(sys.argv[7])\n\n# set initial loss to previous best\nbest_valid_loss = float('inf')\nbest_epoch = 0\n# empty lists to store training and validation loss of each epoch\ntrain_losses=[]\nvalid_losses=[]\n\n#for each epoch\nfor epoch in range(epochs):\n\n #print('\\n Epoch {:} / {:}'.format(epoch + 1, epochs))\n torch.cuda.empty_cache()\n # define the optimizer\n optimizer = AdamW(model.parameters(),\n lr = learning_rate, eps = 1e-8) # learning rate\n \n \n #train model\n train_loss, _ , xs_final= train(start_epoch+epoch)\n \n #evaluate model\n valid_loss, _ , _ = evaluate()\n\n #save the best model\n if valid_loss < best_valid_loss:\n best_valid_loss = valid_loss\n best_epoch = start_epoch + epoch\n #print(f'\\nTraining Loss: {train_loss:.3f}')\n #xs_train = xs_final\n model_to_save = model.module if hasattr(model, 'module') else model\n torch.save(model_to_save.state_dict(), 'saved_weights_max-4-1-hk-finbert_'+str(max_length)+'_'+sec+'_'+bv+'_ep'+str(total_epochs)+'_lr='+'{:.1e}'.format(learning_rate)+'_bilstm_hist.pt')\n #torch.save(model_to_save.state_dict(), 'saved_weights_max-4-1-hk-finbert_'+str(max_length)+'_'+sec+'_'+bv+'_epoch'+str(start_epoch+epoch)+'_of_'+str(total_epochs)+'_lr='+'{:.1e}'.format(learning_rate)+'_bilstm_hist.pt')\n\n \n # append training and validation loss\n train_losses.append(train_loss)\n valid_losses.append(valid_loss)\n \n print(f'\\nTraining Loss: {train_loss:.10f}')\n print(f'Validation Loss: {valid_loss:.10f}')\n\nvalid_loss_file = open('best_valid_loss_max-4-1-hk-finbert_'+str(max_length)+'_'+sec+'_'+bv+'_ep'+str(total_epochs)+'_lr='+'{:.1e}'.format(learning_rate)+'_bilstm_hist.txt', 'w')\nvalid_loss_file.write(str(best_valid_loss)+\"\\n\")\nvalid_loss_file.write(str(best_epoch))\nvalid_loss_file.close()\n'''\n# pass the pre-trained BERT to our define architecture\nmodel = BERT_Arch(bert)\n\n# push the model to GPU\nmodel = model.to(device)\n\n#load weights of best model\npath = 'saved_weights_max-4-1-hk-finbert_'+str(max_length)+'_'+sec+'_'+bv+'_ep'+str(total_epochs)+'_lr='+'{:.1e}'.format(learning_rate)+'_bilstm_hist.pt'\nmodel.load_state_dict(torch.load(path))\n\nxs_train , _ = train_x()\n\n# get predictions for test data\nvalid_mses = []\ntest_mses = []\n\nmethods = [\"bare\", \"svr\", \"kr\", \"lr\"]\n\n_ , preds, xs_valid = evaluate()\npreds = np.asarray(preds)\nvalid_y = valid_y.cpu().data.numpy()\nvalid_mse = mean_squared_error(valid_y, preds)\nvalid_mses.append(valid_mse)\n\nxs_test, preds = test()\npreds = np.asarray(preds)\ntest_y = test_y.cpu().data.numpy()\ntest_mse = mean_squared_error(test_y, preds)\ntest_mses.append(test_mse)\n\nprint(\"bert mse: \",test_mse)\nlr = LinearRegression()\nkr = KernelRidge(kernel='rbf', alpha=0.1, gamma=0.1)\nsvr = SVR(kernel='rbf', C=0.1, epsilon=0.0001) #linear')\n\nmodels_list = [svr, kr, lr]\n\nfor m in models_list:\n m.fit(xs_train, train_labels.to_numpy())\n\n preds = m.predict(xs_valid)\n valid_mse = mean_squared_error(valid_labels.to_numpy(), preds)\n valid_mses.append(valid_mse)\n\n preds = m.predict(xs_test)\n test_mse = mean_squared_error(test_labels.to_numpy(), preds)\n test_mses.append(test_mse)\n print(m, test_mse,'---',valid_mse)\n\n\nmse = str(test_mses[valid_mses.index(min(valid_mses))])+\"---\"+methods[valid_mses.index(min(valid_mses))]+\"---\"+str(min(valid_mses))\n\n\nspearmanr = (stats.spearmanr(preds, test_y))[0] \nkendallr = (stats.kendalltau(preds, test_y))[0] \n\nprint(\"max-4-1-hk-finbert mse: \", mse)\n\nmse_file = open('mse_max-4-1-hk-finbert_'+str(max_length)+'_'+sec+'_'+bv+'_ep'+str(epochs)+'_lr='+'{:.1e}'.format(learning_rate)+'_bilstm_hist.txt', \"w\")\nmse_file.write(mse + \"\\n\")\nmse_file.write(str(best_valid_loss)+\"\\n\")\nmse_file.write(str(spearmanr) + \"\\n\") \nmse_file.write(str(kendallr) + \"\\n\") \n#mse_file.close()\n\ntest_error = pd.DataFrame() \ntest_error['cik_year'] = test_cik.tolist() \ntest_error['test_y'] = test_y.tolist() \ntest_error['preds'] = [p[0] for p in preds.tolist()] \ntest_error['error'] = test_error['test_y'] - test_error['preds'] \ntest_error.to_csv('error_max-4-1-hk-finbert_'+str(max_length)+'_'+sec+'_'+bv+'_mean_hist.csv', index=False) \n\n\n#Linear Baseline\nlr = LinearRegression().fit(train_hist.cpu().data.numpy().reshape(-1, 1),\n train_y.cpu().data.numpy().reshape(-1, 1))\npreds = lr.predict(test_hist.cpu().data.numpy().reshape(-1, 1))\nlr_mse = mean_squared_error(test_y.reshape(-1, 1), preds)\n\nprint(\"LR mse\", lr_mse)\nmse_file.write(\"Linear mse: \" + str(lr_mse))\nmse_file.close()\n'''\n\nprint(\"Total execution time: \", time.time() - start)\n", "from sklearn.metrics import mean_absolute_error\nfrom sklearn.metrics import r2_score\nfrom scipy import stats\nfrom sklearn.svm import SVR\nfrom sklearn.linear_model import LinearRegression\nimport os\nimport random\nimport sys\nimport csv\nimport numpy as np\nimport pandas as pd\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torch.utils.data import TensorDataset, DataLoader, RandomSampler, SequentialSampler\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.metrics import mean_squared_error\nimport transformers\nfrom transformers import AutoConfig, AutoModel, AutoTokenizer \nfrom transformers import AdamW\nfrom torch.cuda.amp import autocast\nimport warnings\nwarnings.simplefilter(action='ignore', category=FutureWarning)\nimport time\nimport tensorflow as tf\n\nfrom sklearn.svm import SVR\nfrom sklearn.kernel_ridge import KernelRidge\nfrom sklearn.linear_model import LinearRegression\n\nstart = time.time()\n\ntorch.cuda.empty_cache()\n\nseed_val = 42\n\nrandom.seed(seed_val)\nnp.random.seed(seed_val)\ntorch.manual_seed(seed_val)\ntorch.cuda.manual_seed_all(seed_val)\ntf.random.set_seed(seed_val)\n\n\nclass BERT_Arch(nn.Module):\n\n def __init__(self, bert):\n\n super(BERT_Arch, self).__init__()\n\n self.bert = bert \n \n # dropout layer\n self.dropout = nn.Dropout(0.1)\n \n # relu activation function\n self.relu = nn.ReLU()\n\n self.leakyrelu = nn.LeakyReLU()\n\n self.elu = nn.ELU()\n\n self.tanh = nn.Tanh()\n\n self.zeros=0\n\n self.totals=0\n\n # dense layer 1\n self.fc1 = nn.Linear(768,600)\n \n # dense layer 2 (Output layer)\n self.fc2 = nn.Linear(600,1)\n\n self.fc3 = nn.Linear(1,1)\n\n #LSTM\n self.hidden_dim = 300\n self.emb_dim = 768\n self.encoder = nn.LSTM(self.emb_dim, self.hidden_dim, num_layers=1, bidirectional=True, dropout=0.1)\n\n\n #Define Attention Network\n def attnetwork(self, encoder_out, final_hidden):\n hidden = final_hidden.squeeze(0)\n attn_weights = torch.bmm(encoder_out, hidden.unsqueeze(2)).squeeze(2)\n soft_attn_weights = F.softmax(attn_weights, 1)\n new_hidden = torch.bmm(encoder_out.transpose(1,2), soft_attn_weights.unsqueeze(2)).squeeze(2)\n return new_hidden\n \n\n #define the forward pass\n def forward(self, sent_id, mask, hist):\n\n cls_vec = []\n chunk_max_weights = [] \n\n for i in range(len(sent_id)):\n\n if i < 35:\n\n #print(\"chunk i: \", i)\n\n ip_id = torch.tensor(sent_id[i]).unsqueeze(0).to(device)\n attn_mask = torch.tensor(mask[i]).unsqueeze(0).to(device)\n\n #pass the inputs to the model \n model_outputs = self.bert(input_ids=ip_id, attention_mask=attn_mask)\n cls_hs=model_outputs[1]\n atten=model_outputs[2]\n cls_vec.append(cls_hs)\n\n del ip_id\n del attn_mask\n\n '''\n col_sum = np.sort(atten[0][0][11].sum(0)[1:-1].detach().cpu().numpy()) \n col_sum = col_sum[::-1] \n max_col_sum = max(col_sum) \n top_word_mean = col_sum[:15].mean()\n chunk_max_weights.append(top_word_mean)\n '''\n\n cls_vec_ = torch.max(torch.stack(cls_vec, dim=0), dim=0) \n\n '''\n cls_vec = torch.stack(cls_vec, dim=0)\n chunk_weights = (torch.tensor(chunk_max_weights)).unsqueeze(0)\n chunk_weights = chunk_weights.cuda() \n prod1 = torch.bmm(cls_vec.transpose(1,2), chunk_weights.transpose(0,1).unsqueeze(1)) \n prod1 = prod1.transpose(1,2) \n \n cls_vec_ = torch.max(prod1, 0) #torch.stack(cls_vec, dim=0), dim=0)\n '''\n\n #cls_vec = torch.stack(cls_vec, dim=0)\n #cls_vec = cls_vec.to(torch.float32) #LSTM\n #print(\"cls_vec shape: \", cls_vec.shape, type(cls_vec), cls_vec.dtype)\n\n \n x = self.fc1(cls_vec_[0])\n x =self.leakyrelu(x)\n x = self.dropout(x)\n\n self.zeros += torch.numel(x) - torch.count_nonzero(x).item()\n\n self.totals += torch.numel(x)\n \n '''\n\n emb_input = cls_vec\n inputx = self.dropout(emb_input)\n output, (hn, cn) = self.encoder(inputx) #emb_input)\n fbout = output[:, :, :self.hidden_dim]+ output[:, :, self.hidden_dim:] #sum bidir outputs F+B\n fbout = fbout.permute(1,0,2)\n fbhn = (hn[-2,:,:]+hn[-1,:,:]).unsqueeze(0)\n attn_out = self.attnetwork(fbout, fbhn)\n\n x = attn_out\n \n \n hist = hist.unsqueeze(0)\n\n hist = self.fc3(hist)\n x = torch.cat((x, hist.unsqueeze(0)), dim=1)\n #x = self.dropout(x)\n '''\n # output layer\n y = self.fc2(x)\n\n return x, y\n\n\n# function to train the model\ndef train(epoch):\n\n #memory_file = open('memory_finbert_'+str(max_length)+'_'+sec+'_'+bv+'_ep'+str(epoch)+'_lr='+'{:.1e}'.format(learning_rate)+'_bilstm_hist.txt', 'a+')\n model.train()\n\n total_loss, total_accuracy = 0, 0\n \n # empty list to save model predictions\n total_preds = []\n\n total_hist = []\n\n xs = []\n\n\n # iterate over list of documents\n for i in range(len(train_seq)):\n\n #memory_file.write(\"doc num: \"+str(i)+\" before train: \"+str(int(torch.cuda.memory_allocated()/1024/1024))+' mem alloced\\n')\n #memory_file.write(\"doc num: \"+str(i)+\" before train: \"+str(int(torch.cuda.memory_reserved()/1024/1024))+' mem reserved\\n')\n\n sent_id = train_seq[i]\n mask = train_mask[i]\n hist = train_hist[i] \n labels = train_y[i].unsqueeze(0).unsqueeze(0)\n\n # clear previously calculated gradients \n model.zero_grad() \n\n with autocast():\n # get model predictions for the current batch\n x, preds = model(sent_id, mask, hist)\n\n # compute the loss between actual and predicted values\n loss = mse_loss(preds, labels)\n\n # model predictions are stored on GPU. So, push it to CPU\n preds = preds.detach().cpu().numpy()\n x = x.detach().cpu().numpy().ravel()\n\n # add on to the total loss\n total_loss = total_loss + loss.item()\n\n xs.append(x)\n\n # backward pass to calculate the gradients\n loss.backward()\n\n # clip the the gradients to 1.0. It helps in preventing the exploding gradient problem\n torch.nn.utils.clip_grad_norm_(model.parameters(), 1.0)\n\n # update parameters\n optimizer.step()\n\n # append the model predictions\n total_preds.append(preds)\n\n loss.detach().cpu()\n\n #memory_file.write(\"doc num: \"+str(i)+\" after train: \"+str(int(torch.cuda.memory_allocated()/1024/1024))+' mem alloced\\n')\n #memory_file.write(\"doc num: \"+str(i)+\" after train: \"+str(int(torch.cuda.memory_reserved()/1024/1024))+' mem reserved\\n')\n #memory_file.flush()\n\n \n \n # compute the training loss of the epoch\n avg_loss = total_loss / len(train_seq)\n\n xs = np.array(xs)\n\n # predictions are in the form of (no. of batches, size of batch, no. of classes).\n # reshape the predictions in form of (number of samples, no. of classes)\n total_preds = np.concatenate(total_preds, axis=0)\n #total_hist = np.concatenate(total_hist, axis=0)\n #memory_file.close()\n #returns the loss and predictions\n return avg_loss, total_preds , xs\n\n# function for evaluating the model\ndef evaluate():\n\n print(\"\\nEvaluating...\")\n \n # deactivate dropout layers\n model.eval()\n\n total_loss, total_accuracy = 0, 0\n \n # empty list to save the model predictions\n total_preds = []\n\n # iterate over list of documents\n for i in range(len(valid_seq)):\n\n sent_id = valid_seq[i]\n mask = valid_mask[i]\n hist = valid_hist[i]\n labels = valid_y[i].unsqueeze(0).unsqueeze(0)\n\n # deactivate autograd\n with torch.no_grad():\n \n with autocast():\n # model predictions\n x, preds = model(sent_id, mask, hist)\n\n # compute the validation loss between actual and predicted values\n loss = mse_loss(preds,labels)\n\n total_loss = total_loss + loss.item()\n\n preds = preds.detach().cpu().numpy()\n\n total_preds.append(preds)\n\n \n\n # compute the validation loss of the epoch\n avg_loss = total_loss / len(valid_seq) \n\n # reshape the predictions in form of (number of samples, no. of classes)\n total_preds = np.concatenate(total_preds, axis=0)\n\n return avg_loss, total_preds\n\ndef test():\n\n # empty list to save the model predictions\n total_xs = []\n\n total_preds=[]\n \n\n for i in range(len(test_seq)):\n\n sent_id = test_seq[i]\n mask = test_mask[i]\n hist = test_hist[i]\n #labels = test_y[i].unsqueeze(0).unsqueeze(0)\n\n with torch.no_grad():\n with autocast():\n x, preds = model(sent_id, mask, hist)\n x = x.detach().cpu().numpy().ravel()\n preds = preds.detach().cpu().numpy()\n # append the model predictions\n total_xs.append(x)\n total_preds.append(preds)\n\n \n \n # reshape the predictions in form of (number of samples, no. of classes)\n total_xs = np.array(total_xs)\n\n total_preds = np.concatenate(total_preds, axis=0)\n \n return total_xs, total_preds\n\n# specify GPU\ndevice = torch.device(\"cuda\")\n\nmax_length = int(sys.argv[1]) #append two [CLS] and [SEP] tokens to make 512\nsec = sys.argv[2]\nbv = sys.argv[3]\n\n#fname = \"words_\"+ sec + \".csv\"\nfname = \"sorted_\" + sec + \".csv\"\n\n#end_year = int(sys.argv[1])\n#train_years_list = list(range(end_year-5, end_year))\n#print(\"train_years: \", train_years_list)\n\ndf = pd.read_csv(fname)\n#df = df[:10]\n\ntrain_text, rem_text, train_hist, rem_hist, train_labels, rem_labels = train_test_split(df['mda'],\n df['prev_'+bv], \n df[bv],\n shuffle=False,\n train_size=0.8) \n\nvalid_text, test_text, valid_hist, test_hist, valid_labels, test_labels = train_test_split(\n\trem_text,\n\trem_hist,\n\trem_labels,\n shuffle=True,\n\ttest_size=0.5\n)\n\n'''\nval_text, test_text, val_hist, test_hist, val_labels, test_labels = train_test_split(temp_text, \n temp_hist,\n temp_labels,\n shuffle=False,\n test_size=0.2) \n\nval_text = val_text.astype(str)\n'''\n\ntrain_text = train_text.astype(str) \nvalid_text = valid_text.astype(str)\ntest_text = test_text.astype(str)\n\n'''\ndf_train = pd.DataFrame()\ndf_test = pd.DataFrame()\n\nfor y in train_years_list:\n df_train = pd.concat([df_train, pd.read_csv(str(y) + \"_tok.csv\")])\n'''\n\n#bert_path = \"/gpfs/u/home/HPDM/HPDMrawt/scratch/npl_env/sdm21-exps/long_document_fin/\"\n\nbert_path = \"/gpfs/u/home/DLTM/DLTMboxi/scratch/env/hk-finbert/\"\n\nconfig = AutoConfig.from_pretrained(bert_path, output_attentions=True) \n\n# import BERT-base pretrained model\nbert = AutoModel.from_pretrained(bert_path, config=config) #longformer-base-4096/') \n\n# Load the BERT tokenizer\ntokenizer = AutoTokenizer.from_pretrained(bert_path) #longformer-base-4096/') \n\n#TRAIN\n# tokenize and encode sequences in the training set\ntokens_train = tokenizer.batch_encode_plus(\n train_text.tolist(),\n add_special_tokens=False\n)\n\n#Extract input ids\ntrain_seq_ = tokens_train['input_ids']\n#Split each document into 510 tokens\ntrain_seq = [[train_seq_[j][i:i + max_length] for i in range(0, len(train_seq_[j]), max_length)] for j in range(len(train_seq_))]\n#print(train_seq[0][0])\n#Add [CLS], [SEP] and [PAD] tokens\ntrain_seq = [[[tokenizer.cls_token_id] + train_seq[j][i] + [tokenizer.sep_token_id] if len(train_seq[j][i]) == max_length else [tokenizer.cls_token_id] + train_seq[j][i] +[tokenizer.sep_token_id] + [tokenizer.pad_token_id] * (max_length-len(train_seq[j][i])) for i in range(len(train_seq[j]))] for j in range(len(train_seq))]\n#print(train_seq[0][0])\n#df_train_seq=pd.DataFrame()\n#df_train_seq[\"train_seq\"]=train_seq\n#df_train_seq.to_csv(sec+ \"-train_seq.csv\")\n\n#Extract attention masks\ntrain_mask_ = tokens_train['attention_mask']\n#Split each document into 510 tokens\ntrain_mask = [[train_mask_[j][i:i + max_length] for i in range(0, len(train_mask_[j]), max_length)] for j in range(len(train_mask_))]\n#Add [1] for attention and [0] for [PAD]\ntrain_mask = [[[1] + train_mask[j][i] + [1] if len(train_mask[j][i]) == max_length else [1]+train_mask[j][i]+[1] + [0] * (max_length-len(train_mask[j][i])) for i in range(len(train_mask[j]))] for j in range(len(train_mask))]\n\n#VALID\n# tokenize and encode sequences in the training set\ntokens_valid = tokenizer.batch_encode_plus(\n valid_text.tolist(),\n add_special_tokens=False\n)\n\n#Extract input ids\nvalid_seq_ = tokens_valid['input_ids']\n#Split each document into 510 tokens\nvalid_seq = [[valid_seq_[j][i:i + max_length] for i in range(0, len(valid_seq_[j]), max_length)] for j in range(len(valid_seq_))]\n#print(valid_seq[0][0])\n#Add [CLS], [SEP] and [PAD] tokens\nvalid_seq = [[[tokenizer.cls_token_id] + valid_seq[j][i] + [tokenizer.sep_token_id] if len(valid_seq[j][i]) == max_length else [tokenizer.cls_token_id] + valid_seq[j][i] +[tokenizer.sep_token_id] + [tokenizer.pad_token_id] * (max_length-len(valid_seq[j][i])) for i in range(len(valid_seq[j]))] for j in range(len(valid_seq))]\n#print(valid_seq[0][0])\n#df_valid_seq=pd.DataFrame()\n#df_valid_seq[\"valid_seq\"]=valid_seq\n#df_valid_seq.to_csv(sec+ \"-valid_seq.csv\")\n\n#Extract attention masks\nvalid_mask_ = tokens_valid['attention_mask']\n#Split each document into 510 tokens\nvalid_mask = [[valid_mask_[j][i:i + max_length] for i in range(0, len(valid_mask_[j]), max_length)] for j in range(len(valid_mask_))]\n#Add [1] for attention and [0] for [PAD]\nvalid_mask = [[[1] + valid_mask[j][i] + [1] if len(valid_mask[j][i]) == max_length else [1]+valid_mask[j][i]+[1] + [0] * (max_length-len(valid_mask[j][i])) for i in range(len(valid_mask[j]))] for j in range(len(valid_mask))]\n\n#TEST\n# tokenize and encode sequences in the test set\ntokens_test = tokenizer.batch_encode_plus(\n test_text.tolist(),\n add_special_tokens=False\n)\n\n#Extract input ids\ntest_seq_ = tokens_test['input_ids']\n#Split each document into 510 tokens\ntest_seq = [[test_seq_[j][i:i + max_length] for i in range(0, len(test_seq_[j]), max_length)] for j in range(len(test_seq_))]\n#Add [CLS], [SEP] and [PAD] tokens\ntest_seq = [[[tokenizer.cls_token_id] + test_seq[j][i] + [tokenizer.sep_token_id] if len(test_seq[j][i]) == max_length else [tokenizer.cls_token_id]+test_seq[j][i] + [tokenizer.sep_token_id]+ [tokenizer.pad_token_id] * (max_length-len(test_seq[j][i])) for i in range(len(test_seq[j]))] for j in range(len(test_seq))]\n\n\n#Extract attention masks\ntest_mask_ = tokens_test['attention_mask']\n#Split each document into 510 tokens\ntest_mask = [[test_mask_[j][i:i + max_length] for i in range(0, len(test_mask_[j]), max_length)] for j in range(len(test_mask_))]\n#Add [1] for attention and [0] for [PAD]\ntest_mask = [[[1] + test_mask[j][i] + [1] if len(test_mask[j][i]) == max_length else [1]+test_mask[j][i]+[1] + [0] * (max_length-len(test_mask[j][i])) for i in range(len(test_mask[j]))] for j in range(len(test_mask))]\n\n\ntrain_hist = torch.tensor(train_hist.tolist()).to(device)\ntrain_y = torch.tensor(train_labels.tolist()).to(device)\n\nvalid_hist = torch.tensor(valid_hist.tolist()).to(device)\nvalid_y = torch.tensor(valid_labels.tolist()).to(device)\n\ntest_hist = torch.tensor(test_hist.tolist()).to(device)\ntest_y = torch.tensor(test_labels.tolist()).to(device)\n\n#val_hist = torch.tensor(val_hist.tolist()).to(device)\n#val_y = torch.tensor(val_labels.tolist()).to(device)\n\n# freeze all the parameters\nfor name, param in bert.named_parameters():\n param.requires_grad = True #True\n\n# pass the pre-trained BERT to our define architecture\nmodel = BERT_Arch(bert)\n\n# push the model to GPU\nmodel = model.to(device)\n\n#path = 'saved_weights_finbert_'+str(max_length)+'_'+sec+'_'+bv+'_bilstm_ep4.pt' \n#model.load_state_dict(torch.load(path))\n\n# define the loss function\nmse_loss = nn.MSELoss() \n\n# number of training epochs\nepochs = int(sys.argv[4])\n\n# different learning rates\nlearning_rate = float(sys.argv[5])\n\n# set initial loss to infinite\nbest_valid_loss = float('inf')\n\n# empty lists to store training and validation loss of each epoch\ntrain_losses=[]\nvalid_losses=[]\n\n#for each epoch\nfor epoch in range(epochs):\n\n #print('\\n Epoch {:} / {:}'.format(epoch + 1, epochs))\n # define the optimizer\n optimizer = AdamW(model.parameters(),\n lr = learning_rate, eps = 1e-8) # learning rate\n \n #train model\n train_loss, _ , xs_final= train()\n \n #evaluate model\n valid_loss, _ = evaluate()\n\n #save the best model\n if valid_loss < best_valid_loss:\n best_valid_loss = valid_loss\n \n #print(f'\\nTraining Loss: {train_loss:.3f}')\n \n model_to_save = model.module if hasattr(model, 'module') else model\n torch.save(model_to_save.state_dict(), 'saved_weights_finbert_'+str(max_length)+'_'+sec+'_'+bv+'_ep'+str(epochs)+'_lr='+'{:.1e}'.format(learning_rate)+'_max.pt')\n\n \n # append training and validation loss\n train_losses.append(train_loss)\n valid_losses.append(valid_loss)\n \n print(f'\\nTraining Loss: {train_loss:.10f}')\n print(f'Validation Loss: {valid_loss:.10f}')\n\n# pass the pre-trained BERT to our define architecture\nmodel = BERT_Arch(bert)\n\n# push the model to GPU\nmodel = model.to(device)\n\n#load weights of best model\npath = 'saved_weights_finbert_'+str(max_length)+'_'+sec+'_'+bv+'_ep'+str(epochs)+'_lr='+'{:.1e}'.format(learning_rate)+'_max.pt'\nmodel.load_state_dict(torch.load(path))\n\n\n\n# get predictions for test data\n\nmses = []\n\nmethods = [\"bare\", \"svr\", \"kr\", \"lr\"]\n\nxs_test,preds = test()\n\npreds = np.asarray(preds)\n\ntest_y = test_y.cpu().data.numpy()\n\nmse = mean_squared_error(test_y, preds)\nmses.append(mse)\nprint(\"bert mse: \",mse)\nlr = LinearRegression()\nkr = KernelRidge(kernel='rbf', alpha=0.1, gamma=0.1)\nsvr = SVR(kernel='rbf', C=0.1, epsilon=0.0001) #linear')\n\nmodels_list = [svr, kr, lr]\n\nfor model in models_list:\n model.fit(xs_final, train_labels.to_numpy())\n preds = model.predict(xs_test)\n mse = mean_squared_error(test_labels.to_numpy(), preds)\n mses.append(mse)\n print(model, mse)\n\nmse = str(min(mses))+\"---\"+methods[mses.index(min(mses))]+str(epochs)+\"_\"+'{:.1e}'.format(learning_rate)\n\n\nspearmanr = (stats.spearmanr(preds, test_y))[0] \nkendallr = (stats.kendalltau(preds, test_y))[0] \n\nprint(\"finbert mse: \", mse)\n\nmse_file = open('mse_finbert_'+str(max_length)+'_'+sec+'_'+bv+'_ep'+str(epochs)+'_lr='+'{:.1e}'.format(learning_rate)+'_max.txt', \"w\")\nmse_file.write(mse + \"\\n\")\nmse_file.write(str(best_valid_loss)+\"\\n\")\nmse_file.write(str(spearmanr) + \"\\n\") \nmse_file.write(str(kendallr) + \"\\n\") \n#mse_file.close()\n'''\ntest_error = pd.DataFrame() \ntest_error['cik_year'] = test_cik.tolist() \ntest_error['test_y'] = test_y.tolist() \ntest_error['preds'] = [p[0] for p in preds.tolist()] \ntest_error['error'] = test_error['test_y'] - test_error['preds'] \ntest_error.to_csv('error_finbert_'+str(max_length)+'_'+sec+'_'+bv+'_mean_hist.csv', index=False) \n'''\n\n#Linear Baseline\nlr = LinearRegression().fit(train_hist.cpu().data.numpy().reshape(-1, 1),\n train_y.cpu().data.numpy().reshape(-1, 1))\npreds = lr.predict(test_hist.cpu().data.numpy().reshape(-1, 1))\nlr_mse = mean_squared_error(test_y.reshape(-1, 1), preds)\n\nprint(\"LR mse\", lr_mse)\nmse_file.write(\"Linear mse: \" + str(lr_mse))\nmse_file.close()\n\n\nprint(\"Total execution time: \", time.time() - start)\n", "from scipy import stats\nfrom sklearn.svm import SVR\nfrom sklearn.linear_model import LinearRegression\nimport os\nimport random\nimport sys\nimport csv\nimport numpy as np\nimport pandas as pd\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torch.utils.data import TensorDataset, DataLoader, RandomSampler, SequentialSampler\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.metrics import mean_squared_error\nimport transformers\nfrom transformers import AutoConfig, AutoModel, AutoTokenizer \nfrom transformers import AdamW\nfrom torch.cuda.amp import autocast\nimport warnings\nwarnings.simplefilter(action='ignore', category=FutureWarning)\nimport time\nimport tensorflow as tf\n\nfrom sklearn.svm import SVR\nfrom sklearn.kernel_ridge import KernelRidge\nfrom sklearn.linear_model import LinearRegression\n\nstart = time.time()\n\ntorch.cuda.empty_cache()\n\nseed_val = 42\n\nrandom.seed(seed_val)\nnp.random.seed(seed_val)\ntorch.manual_seed(seed_val)\ntorch.cuda.manual_seed_all(seed_val)\ntf.random.set_seed(seed_val)\n\n\nclass BERT_Arch(nn.Module):\n\n def __init__(self, bert):\n\n super(BERT_Arch, self).__init__()\n\n self.bert = bert \n \n # dropout layer\n self.dropout = nn.Dropout(0.1)\n \n # relu activation function\n self.relu = nn.ReLU()\n\n self.leakyrelu = nn.LeakyReLU()\n\n self.elu = nn.ELU()\n\n self.tanh = nn.Tanh()\n\n self.zeros=0\n\n self.totals=0\n\n # dense layer 1\n self.fc1 = nn.Linear(768,600)\n \n # dense layer 2 (Output layer)\n self.fc2 = nn.Linear(600,1)\n\n self.fc3 = nn.Linear(1,1)\n\n #LSTM\n self.hidden_dim = 768 #300\n self.emb_dim = 768\n self.encoder = nn.LSTM(self.emb_dim, self.hidden_dim, num_layers=1, bidirectional=True, dropout=0.1)\n\n\n #Define Attention Network\n def attnetwork(self, encoder_out, final_hidden):\n hidden = final_hidden.squeeze(0)\n attn_weights = torch.bmm(encoder_out, hidden.unsqueeze(2)).squeeze(2)\n soft_attn_weights = F.softmax(attn_weights, 1)\n new_hidden = torch.bmm(encoder_out.transpose(1,2), soft_attn_weights.unsqueeze(2)).squeeze(2)\n return new_hidden, soft_attn_weights\n \n\n #define the forward pass\n def forward(self, sent_id, mask, hist):\n\n cls_vec = []\n chunk_max_weights = [] \n\n for i in range(len(sent_id)):\n\n if i < 5:\n\n #print(\"chunk i: \", i)\n\n ip_id = torch.tensor(sent_id[i]).unsqueeze(0).to(device)\n attn_mask = torch.tensor(mask[i]).unsqueeze(0).to(device)\n\n #pass the inputs to the model \n model_outputs = self.bert(input_ids=ip_id, attention_mask=attn_mask)\n cls_hs=model_outputs[1]\n atten=model_outputs[2]\n cls_vec.append(cls_hs)\n\n del ip_id\n del attn_mask\n \n '''\n col_sum = np.sort(atten[0][0][11].sum(0)[1:-1].detach().cpu().numpy()) \n col_sum = col_sum[::-1] \n max_col_sum = max(col_sum) \n top_word_mean = col_sum[:5].mean()\n chunk_max_weights.append(top_word_mean)\n '''\n\n #cls_vec_ = torch.mean(torch.stack(cls_vec, dim=0), dim=0)\n \n cls_vec = torch.stack(cls_vec, dim=0)\n cls_vec = cls_vec.to(torch.float32) #LSTM\n #print(\"cls_vec shape: \", cls_vec.shape, type(cls_vec), cls_vec.dtype)\n\n '''\n x = self.fc1(cls_vec_)\n x = self.relu(x)\n x = self.dropout(x)\n \n\n chunk_weights = (torch.tensor(chunk_max_weights)).unsqueeze(0)\n chunk_weights = chunk_weights.cuda() \n prod1 = torch.bmm(cls_vec.transpose(1,2), chunk_weights.transpose(0,1).unsqueeze(1)) \n prod1 = prod1.transpose(1,2) \n prod1 = prod1.to(torch.float32) \n '''\n\n emb_input = cls_vec\n inputx = self.dropout(emb_input)\n output, (hn, cn) = self.encoder(inputx) #emb_input)\n fbout = output[:, :, :self.hidden_dim]+ output[:, :, self.hidden_dim:] #sum bidir outputs F+B\n fbout = fbout.permute(1,0,2)\n fbhn = (hn[-2,:,:]+hn[-1,:,:]).unsqueeze(0)\n attn_out, attn_weights = self.attnetwork(fbout, fbhn)\n\n '''\n chunk_weights = (torch.tensor(chunk_max_weights)).unsqueeze(0)\n chunk_weights = chunk_weights.cuda() \n prod1 = torch.bmm(cls_vec.transpose(1,2), chunk_weights.transpose(0,1).unsqueeze(1)) \n '''\n\n prod = torch.bmm(cls_vec.transpose(1,2), attn_weights.transpose(0,1).unsqueeze(1)) \n prod_sum = torch.mean(prod, 0).transpose(0,1) \n\n x = prod_sum #attn_out\n \n x = self.fc1(x)\n x =self.leakyrelu(x)\n x = self.dropout(x) \n\n #hist = hist.unsqueeze(0)\n\n #hist = self.fc3(hist)\n #x = torch.cat((x, hist.unsqueeze(0)), dim=1)\n #x = self.dropout(x)\n\n # output layer\n y = self.fc2(x)\n y = self.leakyrelu(y)\n\n\n return x, y\n\n\n# function to train the model\ndef train(epoch):\n\n memory_file = open('memory_longformer_'+str(max_length)+'_'+sec+'_'+bv+'_ep'+str(epoch)+'_lr='+'{:.1e}'.format(learning_rate)+'_bilstm_hist.txt', 'a+')\n model.train()\n\n total_loss, total_accuracy = 0, 0\n \n # empty list to save model predictions\n total_preds = []\n\n total_hist = []\n\n xs = []\n\n\n # iterate over list of documents\n for i in range(len(train_seq)):\n\n memory_file.write(\"doc num: \"+str(i)+\" before train: \"+str(int(torch.cuda.memory_allocated()/1024/1024))+' mem alloced\\n')\n memory_file.write(\"doc num: \"+str(i)+\" before train: \"+str(int(torch.cuda.memory_reserved()/1024/1024))+' mem reserved\\n')\n\n sent_id = train_seq[i]\n mask = train_mask[i]\n hist = train_hist[i] \n labels = train_y[i].unsqueeze(0).unsqueeze(0)\n\n # clear previously calculated gradients \n model.zero_grad()\n\n memory_file.write(\"doc num: \"+str(i)+\" len(sent_id): \"+str(len(sent_id))+\" \\n\")\n\n with autocast():\n # get model predictions for the current batch\n x, preds = model(sent_id, mask, hist)\n\n # compute the loss between actual and predicted values\n #loss = huber_loss(preds, labels)\n loss = mse_loss(preds, labels)\n\n # model predictions are stored on GPU. So, push it to CPU\n preds = preds.detach().cpu().numpy()\n x = x.detach().cpu().numpy().ravel()\n\n # add on to the total loss\n total_loss = total_loss + loss.item()\n\n xs.append(x)\n\n # backward pass to calculate the gradients\n loss.backward()\n\n # clip the the gradients to 1.0. It helps in preventing the exploding gradient problem\n torch.nn.utils.clip_grad_norm_(model.parameters(), 1.0)\n\n # update parameters\n optimizer.step()\n\n # append the model predictions\n total_preds.append(preds)\n\n loss.detach().cpu()\n\n memory_file.write(\"doc num: \"+str(i)+\" after train: \"+str(int(torch.cuda.memory_allocated()/1024/1024))+' mem alloced\\n')\n memory_file.write(\"doc num: \"+str(i)+\" after train: \"+str(int(torch.cuda.memory_reserved()/1024/1024))+' mem reserved\\n')\n memory_file.flush()\n\n \n \n # compute the training loss of the epoch\n avg_loss = total_loss / len(train_seq)\n\n xs = np.array(xs)\n\n # predictions are in the form of (no. of batches, size of batch, no. of classes).\n # reshape the predictions in form of (number of samples, no. of classes)\n total_preds = np.concatenate(total_preds, axis=0)\n #total_hist = np.concatenate(total_hist, axis=0)\n memory_file.close()\n #returns the loss and predictions\n return avg_loss, total_preds , xs\n\n# function for evaluating the model\ndef evaluate():\n\n print(\"\\nEvaluating...\")\n \n # deactivate dropout layers\n model.eval()\n\n total_loss, total_accuracy = 0.0, 0.0\n \n # empty list to save the model predictions\n total_preds = []\n\n total_xs = []\n\n # iterate over list of documents\n for i in range(len(valid_seq)):\n\n sent_id = valid_seq[i]\n mask = valid_mask[i]\n hist = valid_hist[i]\n labels = valid_y[i].unsqueeze(0).unsqueeze(0)\n\n # deactivate autograd\n with torch.no_grad():\n \n with autocast():\n # model predictions\n x, preds = model(sent_id, mask, hist)\n \n # compute the validation loss between actual and predicted values\n loss = mse_loss(preds,labels)\n\n total_loss = total_loss + loss.item()\n\n preds = preds.detach().cpu().numpy()\n\n total_preds.append(preds)\n\n x = x.detach().cpu().numpy().ravel()\n\n total_xs.append(x)\n\n loss.detach().cpu()\n\n # compute the validation loss of the epoch\n avg_loss = total_loss / len(valid_seq) \n\n total_xs = np.array(total_xs)\n\n # reshape the predictions in form of (number of samples, no. of classes)\n total_preds = np.concatenate(total_preds, axis=0)\n\n return avg_loss, total_preds, total_xs\n\ndef test():\n\n # empty list to save the model predictions\n total_xs = []\n\n total_preds=[]\n \n\n for i in range(len(test_seq)):\n\n sent_id = test_seq[i]\n mask = test_mask[i]\n hist = test_hist[i]\n #labels = test_y[i].unsqueeze(0).unsqueeze(0)\n\n with torch.no_grad():\n with autocast():\n x, preds = model(sent_id, mask, hist)\n \n preds = preds.detach().cpu().numpy()\n\n total_preds.append(preds)\n\n x = x.detach().cpu().numpy().ravel()\n\n total_xs.append(x)\n\n \n \n # reshape the predictions in form of (number of samples, no. of classes)\n total_xs = np.array(total_xs)\n\n total_preds = np.concatenate(total_preds, axis=0)\n \n return total_xs, total_preds\n\ndef train_x():\n\n # empty list to save the model predictions\n total_xs = []\n\n total_preds=[]\n \n\n for i in range(len(train_seq)):\n\n sent_id = train_seq[i]\n mask = train_mask[i]\n hist = train_hist[i]\n #labels = test_y[i].unsqueeze(0).unsqueeze(0)\n\n with torch.no_grad():\n with autocast():\n x, preds = model(sent_id, mask, hist)\n \n preds = preds.detach().cpu().numpy()\n\n total_preds.append(preds)\n\n x = x.detach().cpu().numpy().ravel()\n\n total_xs.append(x)\n\n \n \n # reshape the predictions in form of (number of samples, no. of classes)\n total_xs = np.array(total_xs)\n\n total_preds = np.concatenate(total_preds, axis=0)\n \n return total_xs, total_preds\n\n# specify GPU\ndevice = torch.device(\"cuda\")\n\nmax_length = int(sys.argv[1]) #append two [CLS] and [SEP] tokens to make 512\nsec = sys.argv[2]\nbv = sys.argv[3]\n\nfname = \"sorted_\"+ sec + \".csv\"\n\n#end_year = int(sys.argv[1])\n#train_years_list = list(range(end_year-5, end_year))\n#print(\"train_years: \", train_years_list)\n\ndf = pd.read_csv(fname)\n#df = df[:10]\n\ntrain_text, rem_text, train_hist, rem_hist, train_labels, rem_labels = train_test_split(df['mda'],\n df['prev_'+bv], \n df[bv],\n shuffle=False,\n train_size=0.8) \n\nvalid_text, test_text, valid_hist, test_hist, valid_labels, test_labels = train_test_split(\n\trem_text,\n\trem_hist,\n\trem_labels,\n shuffle=False,\n\ttest_size=0.5\n)\n\n'''\nval_text, test_text, val_hist, test_hist, val_labels, test_labels = train_test_split(temp_text, \n temp_hist,\n temp_labels,\n shuffle=False,\n test_size=0.2) \n\nval_text = val_text.astype(str)\n'''\n\ntrain_text = train_text.astype(str) \nvalid_text = valid_text.astype(str)\ntest_text = test_text.astype(str)\n\n'''\ndf_train = pd.DataFrame()\ndf_test = pd.DataFrame()\n\nfor y in train_years_list:\n df_train = pd.concat([df_train, pd.read_csv(str(y) + \"_tok.csv\")])\n'''\n\n#bert_path = \"/gpfs/u/home/HPDM/HPDMrawt/scratch/npl_env/sdm21-exps/long_document_fin/\"\n\nbert_path = \"/gpfs/u/home/DLTM/DLTMboxi/scratch/env/longformer-base-4096/\"\n\nconfig = AutoConfig.from_pretrained(bert_path, output_attentions=True) \n\n# import BERT-base pretrained model\nbert = AutoModel.from_pretrained(bert_path, config=config) #longformer-base-4096/') \n\n# Load the BERT tokenizer\ntokenizer = AutoTokenizer.from_pretrained(bert_path) #longformer-base-4096/') \n\n#TRAIN\n# tokenize and encode sequences in the training set\ntokens_train = tokenizer.batch_encode_plus(\n train_text.tolist(),\n add_special_tokens=False\n)\n\n#Extract input ids\ntrain_seq_ = tokens_train['input_ids']\n#Split each document into 510 tokens\ntrain_seq = [[train_seq_[j][i:i + max_length] for i in range(0, len(train_seq_[j]), max_length)] for j in range(len(train_seq_))]\n#print(train_seq[0][0])\n#Add [CLS], [SEP] and [PAD] tokens\ntrain_seq = [[[tokenizer.cls_token_id] + train_seq[j][i] + [tokenizer.sep_token_id] if len(train_seq[j][i]) == max_length else [tokenizer.cls_token_id] + train_seq[j][i] +[tokenizer.sep_token_id] + [tokenizer.pad_token_id] * (max_length-len(train_seq[j][i])) for i in range(len(train_seq[j]))] for j in range(len(train_seq))]\n#print(train_seq[0][0])\n#df_train_seq=pd.DataFrame()\n#df_train_seq[\"train_seq\"]=train_seq\n#df_train_seq.to_csv(sec+ \"-train_seq.csv\")\n\n#Extract attention masks\ntrain_mask_ = tokens_train['attention_mask']\n#Split each document into 510 tokens\ntrain_mask = [[train_mask_[j][i:i + max_length] for i in range(0, len(train_mask_[j]), max_length)] for j in range(len(train_mask_))]\n#Add [1] for attention and [0] for [PAD]\ntrain_mask = [[[1] + train_mask[j][i] + [1] if len(train_mask[j][i]) == max_length else [1]+train_mask[j][i]+[1] + [0] * (max_length-len(train_mask[j][i])) for i in range(len(train_mask[j]))] for j in range(len(train_mask))]\n\n#VALID\n# tokenize and encode sequences in the training set\ntokens_valid = tokenizer.batch_encode_plus(\n valid_text.tolist(),\n add_special_tokens=False\n)\n\n#Extract input ids\nvalid_seq_ = tokens_valid['input_ids']\n#Split each document into 510 tokens\nvalid_seq = [[valid_seq_[j][i:i + max_length] for i in range(0, len(valid_seq_[j]), max_length)] for j in range(len(valid_seq_))]\n#print(valid_seq[0][0])\n#Add [CLS], [SEP] and [PAD] tokens\nvalid_seq = [[[tokenizer.cls_token_id] + valid_seq[j][i] + [tokenizer.sep_token_id] if len(valid_seq[j][i]) == max_length else [tokenizer.cls_token_id] + valid_seq[j][i] +[tokenizer.sep_token_id] + [tokenizer.pad_token_id] * (max_length-len(valid_seq[j][i])) for i in range(len(valid_seq[j]))] for j in range(len(valid_seq))]\n#print(valid_seq[0][0])\n#df_valid_seq=pd.DataFrame()\n#df_valid_seq[\"valid_seq\"]=valid_seq\n#df_valid_seq.to_csv(sec+ \"-valid_seq.csv\")\n\n#Extract attention masks\nvalid_mask_ = tokens_valid['attention_mask']\n#Split each document into 510 tokens\nvalid_mask = [[valid_mask_[j][i:i + max_length] for i in range(0, len(valid_mask_[j]), max_length)] for j in range(len(valid_mask_))]\n#Add [1] for attention and [0] for [PAD]\nvalid_mask = [[[1] + valid_mask[j][i] + [1] if len(valid_mask[j][i]) == max_length else [1]+valid_mask[j][i]+[1] + [0] * (max_length-len(valid_mask[j][i])) for i in range(len(valid_mask[j]))] for j in range(len(valid_mask))]\n\n#TEST\n# tokenize and encode sequences in the test set\ntokens_test = tokenizer.batch_encode_plus(\n test_text.tolist(),\n add_special_tokens=False\n)\n\n#Extract input ids\ntest_seq_ = tokens_test['input_ids']\n#Split each document into 510 tokens\ntest_seq = [[test_seq_[j][i:i + max_length] for i in range(0, len(test_seq_[j]), max_length)] for j in range(len(test_seq_))]\n#Add [CLS], [SEP] and [PAD] tokens\ntest_seq = [[[tokenizer.cls_token_id] + test_seq[j][i] + [tokenizer.sep_token_id] if len(test_seq[j][i]) == max_length else [tokenizer.cls_token_id]+test_seq[j][i] + [tokenizer.sep_token_id]+ [tokenizer.pad_token_id] * (max_length-len(test_seq[j][i])) for i in range(len(test_seq[j]))] for j in range(len(test_seq))]\n\n\n#Extract attention masks\ntest_mask_ = tokens_test['attention_mask']\n#Split each document into 510 tokens\ntest_mask = [[test_mask_[j][i:i + max_length] for i in range(0, len(test_mask_[j]), max_length)] for j in range(len(test_mask_))]\n#Add [1] for attention and [0] for [PAD]\ntest_mask = [[[1] + test_mask[j][i] + [1] if len(test_mask[j][i]) == max_length else [1]+test_mask[j][i]+[1] + [0] * (max_length-len(test_mask[j][i])) for i in range(len(test_mask[j]))] for j in range(len(test_mask))]\n\n\ntrain_hist = torch.tensor(train_hist.tolist()).to(device)\ntrain_y = torch.tensor(train_labels.tolist()).to(device)\n\nvalid_hist = torch.tensor(valid_hist.tolist()).to(device)\nvalid_y = torch.tensor(valid_labels.tolist()).to(device)\n\ntest_hist = torch.tensor(test_hist.tolist()).to(device)\ntest_y = torch.tensor(test_labels.tolist()).to(device)\n\n#val_hist = torch.tensor(val_hist.tolist()).to(device)\n#val_y = torch.tensor(val_labels.tolist()).to(device)\n\n# freeze all the parameters\nfor name, param in bert.named_parameters():\n if \"encoder.layer.11\" in name or \"pooler\" in name:\n param.requires_grad = False #True\n\n# pass the pre-trained BERT to our define architecture\nmodel = BERT_Arch(bert)\n\n# push the model to GPU\nmodel = model.to(device)\n\n# define the loss function\nmse_loss = nn.MSELoss() \nhuber_loss = nn.L1Loss()\n\n# number of training epochs\ntotal_epochs = int(sys.argv[4])\nstart_epoch = int(sys.argv[5])\nend_epoch = int(sys.argv[6])\nepochs = end_epoch - start_epoch + 1\n#plus = int(sys.argv[5])\n\n# different learning rates\nlearning_rate = float(sys.argv[7])\n\n# set initial loss to previous best\nbest_valid_loss = float('inf')\nbest_epoch = 0\n# empty lists to store training and validation loss of each epoch\ntrain_losses=[]\nvalid_losses=[]\n\n#for each epoch\nfor epoch in range(epochs):\n\n #print('\\n Epoch {:} / {:}'.format(epoch + 1, epochs))\n torch.cuda.empty_cache()\n # define the optimizer\n optimizer = AdamW(model.parameters(),\n lr = learning_rate, eps = 1e-8) # learning rate\n \n \n #train model\n train_loss, _ , xs_final= train(start_epoch+epoch)\n \n #evaluate model\n valid_loss, _ , _ = evaluate()\n\n #save the best model\n if valid_loss < best_valid_loss:\n best_valid_loss = valid_loss\n best_epoch = start_epoch + epoch\n #print(f'\\nTraining Loss: {train_loss:.3f}')\n #xs_train = xs_final\n model_to_save = model.module if hasattr(model, 'module') else model\n torch.save(model_to_save.state_dict(), 'saved_weights_longformer_'+str(max_length)+'_'+sec+'_'+bv+'_ep'+str(total_epochs)+'_lr='+'{:.1e}'.format(learning_rate)+'_bilstm.pt')\n #torch.save(model_to_save.state_dict(), 'saved_weights_longformer_'+str(max_length)+'_'+sec+'_'+bv+'_epoch'+str(start_epoch+epoch)+'_of_'+str(total_epochs)+'_lr='+'{:.1e}'.format(learning_rate)+'_bilstm.pt')\n\n \n # append training and validation loss\n train_losses.append(train_loss)\n valid_losses.append(valid_loss)\n \n print(f'\\nTraining Loss: {train_loss:.10f}')\n print(f'Validation Loss: {valid_loss:.10f}')\n\nvalid_loss_file = open('best_valid_loss_longformer_'+str(max_length)+'_'+sec+'_'+bv+'_ep'+str(total_epochs)+'_lr='+'{:.1e}'.format(learning_rate)+'_bilstm.txt', 'w')\nvalid_loss_file.write(str(best_valid_loss)+\"\\n\")\nvalid_loss_file.write(str(best_epoch))\nvalid_loss_file.close()\n'''\n# pass the pre-trained BERT to our define architecture\nmodel = BERT_Arch(bert)\n\n# push the model to GPU\nmodel = model.to(device)\n\n#load weights of best model\npath = 'saved_weights_longformer_'+str(max_length)+'_'+sec+'_'+bv+'_ep'+str(total_epochs)+'_lr='+'{:.1e}'.format(learning_rate)+'_bilstm.pt'\nmodel.load_state_dict(torch.load(path))\n\nxs_train , _ = train_x()\n\n# get predictions for test data\nvalid_mses = []\ntest_mses = []\n\nmethods = [\"bare\", \"svr\", \"kr\", \"lr\"]\n\n_ , preds, xs_valid = evaluate()\npreds = np.asarray(preds)\nvalid_y = valid_y.cpu().data.numpy()\nvalid_mse = mean_squared_error(valid_y, preds)\nvalid_mses.append(valid_mse)\n\nxs_test, preds = test()\npreds = np.asarray(preds)\ntest_y = test_y.cpu().data.numpy()\ntest_mse = mean_squared_error(test_y, preds)\ntest_mses.append(test_mse)\n\nprint(\"bert mse: \",test_mse)\nlr = LinearRegression()\nkr = KernelRidge(kernel='rbf', alpha=0.1, gamma=0.1)\nsvr = SVR(kernel='rbf', C=0.1, epsilon=0.0001) #linear')\n\nmodels_list = [svr, kr, lr]\n\nfor m in models_list:\n m.fit(xs_train, train_labels.to_numpy())\n\n preds = m.predict(xs_valid)\n valid_mse = mean_squared_error(valid_labels.to_numpy(), preds)\n valid_mses.append(valid_mse)\n\n preds = m.predict(xs_test)\n test_mse = mean_squared_error(test_labels.to_numpy(), preds)\n test_mses.append(test_mse)\n print(m, test_mse,'---',valid_mse)\n\n\nmse = str(test_mses[valid_mses.index(min(valid_mses))])+\"---\"+methods[valid_mses.index(min(valid_mses))]+\"---\"+str(min(valid_mses))\n\n\nspearmanr = (stats.spearmanr(preds, test_y))[0] \nkendallr = (stats.kendalltau(preds, test_y))[0] \n\nprint(\"longformer mse: \", mse)\n\nmse_file = open('mse_longformer_'+str(max_length)+'_'+sec+'_'+bv+'_ep'+str(epochs)+'_lr='+'{:.1e}'.format(learning_rate)+'_bilstm.txt', \"w\")\nmse_file.write(mse + \"\\n\")\nmse_file.write(str(best_valid_loss)+\"\\n\")\nmse_file.write(str(spearmanr) + \"\\n\") \nmse_file.write(str(kendallr) + \"\\n\") \n#mse_file.close()\n\ntest_error = pd.DataFrame() \ntest_error['cik_year'] = test_cik.tolist() \ntest_error['test_y'] = test_y.tolist() \ntest_error['preds'] = [p[0] for p in preds.tolist()] \ntest_error['error'] = test_error['test_y'] - test_error['preds'] \ntest_error.to_csv('error_longformer_'+str(max_length)+'_'+sec+'_'+bv+'_mean_hist.csv', index=False) \n\n\n#Linear Baseline\nlr = LinearRegression().fit(train_hist.cpu().data.numpy().reshape(-1, 1),\n train_y.cpu().data.numpy().reshape(-1, 1))\npreds = lr.predict(test_hist.cpu().data.numpy().reshape(-1, 1))\nlr_mse = mean_squared_error(test_y.reshape(-1, 1), preds)\n\nprint(\"LR mse\", lr_mse)\nmse_file.write(\"Linear mse: \" + str(lr_mse))\nmse_file.close()\n'''\n\nprint(\"Total execution time: \", time.time() - start)\n", "from scipy import stats\nfrom sklearn.svm import SVR\nfrom sklearn.linear_model import LinearRegression\nimport os\nimport random\nimport sys\nimport csv\nimport numpy as np\nimport pandas as pd\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torch.utils.data import TensorDataset, DataLoader, RandomSampler, SequentialSampler\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.metrics import mean_squared_error\nimport transformers\nfrom transformers import AutoConfig, AutoModel, AutoTokenizer \nfrom transformers import AdamW\nfrom torch.cuda.amp import autocast\nimport warnings\nwarnings.simplefilter(action='ignore', category=FutureWarning)\nimport time\nimport tensorflow as tf\n\nfrom sklearn.svm import SVR\nfrom sklearn.kernel_ridge import KernelRidge\nfrom sklearn.linear_model import LinearRegression\n\nstart = time.time()\n\ntorch.cuda.empty_cache()\n\nseed_val = 42\n\nrandom.seed(seed_val)\nnp.random.seed(seed_val)\ntorch.manual_seed(seed_val)\ntorch.cuda.manual_seed_all(seed_val)\ntf.random.set_seed(seed_val)\n\n\nclass BERT_Arch(nn.Module):\n\n def __init__(self, bert):\n\n super(BERT_Arch, self).__init__()\n\n self.bert = bert \n \n # dropout layer\n self.dropout = nn.Dropout(0.1)\n \n # relu activation function\n self.relu = nn.ReLU()\n\n self.leakyrelu = nn.LeakyReLU()\n\n self.elu = nn.ELU()\n\n self.tanh = nn.Tanh()\n\n self.zeros=0\n\n self.totals=0\n\n # dense layer 1\n self.fc1 = nn.Linear(768,600)\n \n # dense layer 2 (Output layer)\n self.fc2 = nn.Linear(600,1)\n\n self.fc3 = nn.Linear(1,1)\n\n #LSTM\n self.hidden_dim = 768 #300\n self.emb_dim = 768\n self.encoder = nn.LSTM(self.emb_dim, self.hidden_dim, num_layers=1, bidirectional=True, dropout=0.1)\n\n\n #Define Attention Network\n def attnetwork(self, encoder_out, final_hidden):\n hidden = final_hidden.squeeze(0)\n attn_weights = torch.bmm(encoder_out, hidden.unsqueeze(2)).squeeze(2)\n soft_attn_weights = F.softmax(attn_weights, 1)\n new_hidden = torch.bmm(encoder_out.transpose(1,2), soft_attn_weights.unsqueeze(2)).squeeze(2)\n return new_hidden, soft_attn_weights\n \n\n #define the forward pass\n def forward(self, sent_id, mask, hist):\n\n cls_vec = []\n chunk_max_weights = [] \n\n for i in range(len(sent_id)):\n\n if i < 40:\n\n #print(\"chunk i: \", i)\n\n ip_id = torch.tensor(sent_id[i]).unsqueeze(0).to(device)\n attn_mask = torch.tensor(mask[i]).unsqueeze(0).to(device)\n\n #pass the inputs to the model \n model_outputs = self.bert(input_ids=ip_id, attention_mask=attn_mask)\n cls_hs=model_outputs[1]\n atten=model_outputs[2]\n cls_vec.append(cls_hs)\n\n del ip_id\n del attn_mask\n \n '''\n col_sum = np.sort(atten[0][0][11].sum(0)[1:-1].detach().cpu().numpy()) \n col_sum = col_sum[::-1] \n max_col_sum = max(col_sum) \n top_word_mean = col_sum[:5].mean()\n chunk_max_weights.append(top_word_mean)\n '''\n\n #cls_vec_ = torch.mean(torch.stack(cls_vec, dim=0), dim=0)\n \n cls_vec = torch.stack(cls_vec, dim=0)\n cls_vec = cls_vec.to(torch.float32) #LSTM\n #print(\"cls_vec shape: \", cls_vec.shape, type(cls_vec), cls_vec.dtype)\n\n '''\n x = self.fc1(cls_vec_)\n x = self.relu(x)\n x = self.dropout(x)\n \n\n chunk_weights = (torch.tensor(chunk_max_weights)).unsqueeze(0)\n chunk_weights = chunk_weights.cuda() \n prod1 = torch.bmm(cls_vec.transpose(1,2), chunk_weights.transpose(0,1).unsqueeze(1)) \n prod1 = prod1.transpose(1,2) \n prod1 = prod1.to(torch.float32) \n '''\n\n emb_input = cls_vec\n inputx = self.dropout(emb_input)\n output, (hn, cn) = self.encoder(inputx) #emb_input)\n fbout = output[:, :, :self.hidden_dim]+ output[:, :, self.hidden_dim:] #sum bidir outputs F+B\n fbout = fbout.permute(1,0,2)\n fbhn = (hn[-2,:,:]+hn[-1,:,:]).unsqueeze(0)\n attn_out, attn_weights = self.attnetwork(fbout, fbhn)\n\n '''\n chunk_weights = (torch.tensor(chunk_max_weights)).unsqueeze(0)\n chunk_weights = chunk_weights.cuda() \n prod1 = torch.bmm(cls_vec.transpose(1,2), chunk_weights.transpose(0,1).unsqueeze(1)) \n '''\n\n prod = torch.bmm(cls_vec.transpose(1,2), attn_weights.transpose(0,1).unsqueeze(1)) \n prod_sum = torch.mean(prod, 0).transpose(0,1) \n\n x = prod_sum #attn_out\n \n x = self.fc1(x)\n x =self.leakyrelu(x)\n x = self.dropout(x) \n\n #hist = hist.unsqueeze(0)\n\n #hist = self.fc3(hist)\n #x = torch.cat((x, hist.unsqueeze(0)), dim=1)\n #x = self.dropout(x)\n\n # output layer\n y = self.fc2(x)\n y = self.leakyrelu(y)\n\n return x, y\n\n\n# function to train the model\ndef train(epoch):\n\n memory_file = open('memory_finbert_'+str(max_length)+'_'+sec+'_'+bv+'_ep'+str(epoch)+'_lr='+'{:.1e}'.format(learning_rate)+'_bilstm.txt', 'a+')\n model.train()\n\n total_loss, total_accuracy = 0, 0\n \n # empty list to save model predictions\n total_preds = []\n\n total_hist = []\n\n xs = []\n\n\n # iterate over list of documents\n for i in range(len(train_seq)):\n\n memory_file.write(\"doc num: \"+str(i)+\" before train: \"+str(int(torch.cuda.memory_allocated()/1024/1024))+' mem alloced\\n')\n memory_file.write(\"doc num: \"+str(i)+\" before train: \"+str(int(torch.cuda.memory_reserved()/1024/1024))+' mem reserved\\n')\n\n sent_id = train_seq[i]\n mask = train_mask[i]\n hist = train_hist[i] \n labels = train_y[i].unsqueeze(0).unsqueeze(0)\n\n # clear previously calculated gradients \n model.zero_grad() \n\n with autocast():\n # get model predictions for the current batch\n x, preds = model(sent_id, mask, hist)\n\n # compute the loss between actual and predicted values\n #loss = huber_loss(preds, labels)\n loss = mse_loss(preds, labels)\n\n # model predictions are stored on GPU. So, push it to CPU\n preds = preds.detach().cpu().numpy()\n x = x.detach().cpu().numpy().ravel()\n\n # add on to the total loss\n total_loss = total_loss + loss.item()\n\n xs.append(x)\n\n # backward pass to calculate the gradients\n loss.backward()\n\n # clip the the gradients to 1.0. It helps in preventing the exploding gradient problem\n torch.nn.utils.clip_grad_norm_(model.parameters(), 1.0)\n\n # update parameters\n optimizer.step()\n\n # append the model predictions\n total_preds.append(preds)\n\n loss.detach().cpu()\n\n memory_file.write(\"doc num: \"+str(i)+\" after train: \"+str(int(torch.cuda.memory_allocated()/1024/1024))+' mem alloced\\n')\n memory_file.write(\"doc num: \"+str(i)+\" after train: \"+str(int(torch.cuda.memory_reserved()/1024/1024))+' mem reserved\\n')\n memory_file.flush()\n\n \n \n # compute the training loss of the epoch\n avg_loss = total_loss / len(train_seq)\n\n xs = np.array(xs)\n\n # predictions are in the form of (no. of batches, size of batch, no. of classes).\n # reshape the predictions in form of (number of samples, no. of classes)\n total_preds = np.concatenate(total_preds, axis=0)\n #total_hist = np.concatenate(total_hist, axis=0)\n memory_file.close()\n #returns the loss and predictions\n return avg_loss, total_preds , xs\n\n# function for evaluating the model\ndef evaluate():\n\n print(\"\\nEvaluating...\")\n \n # deactivate dropout layers\n model.eval()\n\n total_loss, total_accuracy = 0.0, 0.0\n \n # empty list to save the model predictions\n total_preds = []\n\n total_xs = []\n\n # iterate over list of documents\n for i in range(len(valid_seq)):\n\n sent_id = valid_seq[i]\n mask = valid_mask[i]\n hist = valid_hist[i]\n labels = valid_y[i].unsqueeze(0).unsqueeze(0)\n\n # deactivate autograd\n with torch.no_grad():\n \n with autocast():\n # model predictions\n x, preds = model(sent_id, mask, hist)\n \n # compute the validation loss between actual and predicted values\n loss = mse_loss(preds,labels)\n\n total_loss = total_loss + loss.item()\n\n preds = preds.detach().cpu().numpy()\n\n total_preds.append(preds)\n\n x = x.detach().cpu().numpy().ravel()\n\n total_xs.append(x)\n loss.detach().cpu()\n \n\n # compute the validation loss of the epoch\n avg_loss = total_loss / len(valid_seq) \n\n total_xs = np.array(total_xs)\n\n # reshape the predictions in form of (number of samples, no. of classes)\n total_preds = np.concatenate(total_preds, axis=0)\n\n return avg_loss, total_preds, total_xs\n\ndef test():\n\n # empty list to save the model predictions\n total_xs = []\n\n total_preds=[]\n \n\n for i in range(len(test_seq)):\n\n sent_id = test_seq[i]\n mask = test_mask[i]\n hist = test_hist[i]\n #labels = test_y[i].unsqueeze(0).unsqueeze(0)\n\n with torch.no_grad():\n with autocast():\n x, preds = model(sent_id, mask, hist)\n \n preds = preds.detach().cpu().numpy()\n\n total_preds.append(preds)\n\n x = x.detach().cpu().numpy().ravel()\n\n total_xs.append(x)\n\n \n \n # reshape the predictions in form of (number of samples, no. of classes)\n total_xs = np.array(total_xs)\n\n total_preds = np.concatenate(total_preds, axis=0)\n \n return total_xs, total_preds\n\ndef train_x():\n\n # empty list to save the model predictions\n total_xs = []\n\n total_preds=[]\n \n\n for i in range(len(train_seq)):\n\n sent_id = train_seq[i]\n mask = train_mask[i]\n hist = train_hist[i]\n #labels = test_y[i].unsqueeze(0).unsqueeze(0)\n\n with torch.no_grad():\n with autocast():\n x, preds = model(sent_id, mask, hist)\n \n preds = preds.detach().cpu().numpy()\n\n total_preds.append(preds)\n\n x = x.detach().cpu().numpy().ravel()\n\n total_xs.append(x)\n\n \n \n # reshape the predictions in form of (number of samples, no. of classes)\n total_xs = np.array(total_xs)\n\n total_preds = np.concatenate(total_preds, axis=0)\n \n return total_xs, total_preds\n\n\n# specify GPU\ndevice = torch.device(\"cuda\")\n\nmax_length = int(sys.argv[1]) #append two [CLS] and [SEP] tokens to make 512\nsec = sys.argv[2]\nbv = sys.argv[3]\n\ntrain_fname = \"train-results.csv\"\ntest_fname = \"test-results.csv\"\n\n#end_year = int(sys.argv[1])\n#train_years_list = list(range(end_year-5, end_year))\n#print(\"train_years: \", train_years_list)\n\ndf_train = pd.read_csv(train_fname)\ndf_test = pd.read_csv(test_fname)\n#df = df[:10]\n\ntrain_text, valid_text, train_hist, valid_hist, train_labels, valid_labels = train_test_split(df_train['mda'],\n df_train['prev_'+bv], \n df_train[bv],\n shuffle=False,\n train_size=0.8) \n\ntest_text = df_test['mda']\ntest_hist = df_test['prev_'+bv]\ntest_labels = df_test[bv]\n'''\nval_text, test_text, val_hist, test_hist, val_labels, test_labels = train_test_split(temp_text, \n temp_hist,\n temp_labels,\n shuffle=False,\n test_size=0.2) \n\nval_text = val_text.astype(str)\n'''\n\ntrain_text = train_text.astype(str) \nvalid_text = valid_text.astype(str)\ntest_text = test_text.astype(str)\n\n'''\ndf_train = pd.DataFrame()\ndf_test = pd.DataFrame()\n\nfor y in train_years_list:\n df_train = pd.concat([df_train, pd.read_csv(str(y) + \"_tok.csv\")])\n'''\n\n#bert_path = \"/gpfs/u/home/HPDM/HPDMrawt/scratch/npl_env/sdm21-exps/long_document_fin/\"\n\nbert_path = \"/gpfs/u/home/DLTM/DLTMboxi/scratch/env/hk-finbert/\"\n\nconfig = AutoConfig.from_pretrained(bert_path, output_attentions=True) \n\n# import BERT-base pretrained model\nbert = AutoModel.from_pretrained(bert_path, config=config) #longformer-base-4096/') \n\n# Load the BERT tokenizer\ntokenizer = AutoTokenizer.from_pretrained(bert_path) #longformer-base-4096/') \n\n#TRAIN\n# tokenize and encode sequences in the training set\ntokens_train = tokenizer.batch_encode_plus(\n train_text.tolist(),\n add_special_tokens=False\n)\n\n#Extract input ids\ntrain_seq_ = tokens_train['input_ids']\n#Split each document into 510 tokens\ntrain_seq = [[train_seq_[j][i:i + max_length] for i in range(0, len(train_seq_[j]), max_length)] for j in range(len(train_seq_))]\n#print(train_seq[0][0])\n#Add [CLS], [SEP] and [PAD] tokens\ntrain_seq = [[[tokenizer.cls_token_id] + train_seq[j][i] + [tokenizer.sep_token_id] if len(train_seq[j][i]) == max_length else [tokenizer.cls_token_id] + train_seq[j][i] +[tokenizer.sep_token_id] + [tokenizer.pad_token_id] * (max_length-len(train_seq[j][i])) for i in range(len(train_seq[j]))] for j in range(len(train_seq))]\n#print(train_seq[0][0])\n#df_train_seq=pd.DataFrame()\n#df_train_seq[\"train_seq\"]=train_seq\n#df_train_seq.to_csv(sec+ \"-train_seq.csv\")\n\n#Extract attention masks\ntrain_mask_ = tokens_train['attention_mask']\n#Split each document into 510 tokens\ntrain_mask = [[train_mask_[j][i:i + max_length] for i in range(0, len(train_mask_[j]), max_length)] for j in range(len(train_mask_))]\n#Add [1] for attention and [0] for [PAD]\ntrain_mask = [[[1] + train_mask[j][i] + [1] if len(train_mask[j][i]) == max_length else [1]+train_mask[j][i]+[1] + [0] * (max_length-len(train_mask[j][i])) for i in range(len(train_mask[j]))] for j in range(len(train_mask))]\n\n#VALID\n# tokenize and encode sequences in the training set\ntokens_valid = tokenizer.batch_encode_plus(\n valid_text.tolist(),\n add_special_tokens=False\n)\n\n#Extract input ids\nvalid_seq_ = tokens_valid['input_ids']\n#Split each document into 510 tokens\nvalid_seq = [[valid_seq_[j][i:i + max_length] for i in range(0, len(valid_seq_[j]), max_length)] for j in range(len(valid_seq_))]\n#print(valid_seq[0][0])\n#Add [CLS], [SEP] and [PAD] tokens\nvalid_seq = [[[tokenizer.cls_token_id] + valid_seq[j][i] + [tokenizer.sep_token_id] if len(valid_seq[j][i]) == max_length else [tokenizer.cls_token_id] + valid_seq[j][i] +[tokenizer.sep_token_id] + [tokenizer.pad_token_id] * (max_length-len(valid_seq[j][i])) for i in range(len(valid_seq[j]))] for j in range(len(valid_seq))]\n#print(valid_seq[0][0])\n#df_valid_seq=pd.DataFrame()\n#df_valid_seq[\"valid_seq\"]=valid_seq\n#df_valid_seq.to_csv(sec+ \"-valid_seq.csv\")\n\n#Extract attention masks\nvalid_mask_ = tokens_valid['attention_mask']\n#Split each document into 510 tokens\nvalid_mask = [[valid_mask_[j][i:i + max_length] for i in range(0, len(valid_mask_[j]), max_length)] for j in range(len(valid_mask_))]\n#Add [1] for attention and [0] for [PAD]\nvalid_mask = [[[1] + valid_mask[j][i] + [1] if len(valid_mask[j][i]) == max_length else [1]+valid_mask[j][i]+[1] + [0] * (max_length-len(valid_mask[j][i])) for i in range(len(valid_mask[j]))] for j in range(len(valid_mask))]\n\n#TEST\n# tokenize and encode sequences in the test set\ntokens_test = tokenizer.batch_encode_plus(\n test_text.tolist(),\n add_special_tokens=False\n)\n\n#Extract input ids\ntest_seq_ = tokens_test['input_ids']\n#Split each document into 510 tokens\ntest_seq = [[test_seq_[j][i:i + max_length] for i in range(0, len(test_seq_[j]), max_length)] for j in range(len(test_seq_))]\n#Add [CLS], [SEP] and [PAD] tokens\ntest_seq = [[[tokenizer.cls_token_id] + test_seq[j][i] + [tokenizer.sep_token_id] if len(test_seq[j][i]) == max_length else [tokenizer.cls_token_id]+test_seq[j][i] + [tokenizer.sep_token_id]+ [tokenizer.pad_token_id] * (max_length-len(test_seq[j][i])) for i in range(len(test_seq[j]))] for j in range(len(test_seq))]\n\n\n#Extract attention masks\ntest_mask_ = tokens_test['attention_mask']\n#Split each document into 510 tokens\ntest_mask = [[test_mask_[j][i:i + max_length] for i in range(0, len(test_mask_[j]), max_length)] for j in range(len(test_mask_))]\n#Add [1] for attention and [0] for [PAD]\ntest_mask = [[[1] + test_mask[j][i] + [1] if len(test_mask[j][i]) == max_length else [1]+test_mask[j][i]+[1] + [0] * (max_length-len(test_mask[j][i])) for i in range(len(test_mask[j]))] for j in range(len(test_mask))]\n\n\ntrain_hist = torch.tensor(train_hist.tolist()).to(device)\ntrain_y = torch.tensor(train_labels.tolist()).to(device)\n\nvalid_hist = torch.tensor(valid_hist.tolist()).to(device)\nvalid_y = torch.tensor(valid_labels.tolist()).to(device)\n\ntest_hist = torch.tensor(test_hist.tolist()).to(device)\ntest_y = torch.tensor(test_labels.tolist()).to(device)\n\n#val_hist = torch.tensor(val_hist.tolist()).to(device)\n#val_y = torch.tensor(val_labels.tolist()).to(device)\n\n# freeze all the parameters\nfor name, param in bert.named_parameters():\n if \"encoder.layer.11\" in name or \"pooler\" in name:\n param.requires_grad = False #True\n\n# pass the pre-trained BERT to our define architecture\nmodel = BERT_Arch(bert)\n\n# push the model to GPU\nmodel = model.to(device)\n\n# define the loss function\nmse_loss = nn.MSELoss() \nhuber_loss = nn.L1Loss()\n\n# number of training epochs\ntotal_epochs = int(sys.argv[4])\nstart_epoch = int(sys.argv[5])\nend_epoch = int(sys.argv[6])\nepochs = end_epoch - start_epoch + 1\n#plus = int(sys.argv[5])\n\n# different learning rates\nlearning_rate = float(sys.argv[7])\n\npath = 'saved_weights_finbert_'+str(max_length)+'_'+sec+'_'+bv+'_ep'+str(total_epochs)+'_lr='+'{:.1e}'.format(learning_rate)+'_bilstm.pt'\nmodel.load_state_dict(torch.load(path))\n\n# set initial loss to previous best\nwith open('best_valid_loss_finbert_'+str(max_length)+'_'+sec+'_'+bv+'_ep'+str(total_epochs)+'_lr='+'{:.1e}'.format(learning_rate)+'_bilstm.txt') as f:\n lines = f.readlines()\nbest_valid_loss = float(lines[0])\nbest_epoch = int(lines[1])\n\n# empty lists to store training and validation loss of each epoch\ntrain_losses=[]\nvalid_losses=[]\n\n#for each epoch\nfor epoch in range(epochs):\n\n #print('\\n Epoch {:} / {:}'.format(epoch + 1, epochs))\n torch.cuda.empty_cache()\n # define the optimizer\n optimizer = AdamW(model.parameters(),\n lr = learning_rate, eps = 1e-8) # learning rate\n \n \n #train model\n train_loss, _ , xs_final= train(start_epoch+epoch)\n \n #evaluate model\n valid_loss, _ , _ = evaluate()\n\n #save the best model\n if valid_loss < best_valid_loss:\n best_valid_loss = valid_loss\n best_epoch = start_epoch + epoch\n #print(f'\\nTraining Loss: {train_loss:.3f}')\n #xs_train = xs_final\n model_to_save = model.module if hasattr(model, 'module') else model\n torch.save(model_to_save.state_dict(), 'saved_weights_finbert_'+str(max_length)+'_'+sec+'_'+bv+'_ep'+str(total_epochs)+'_lr='+'{:.1e}'.format(learning_rate)+'_bilstm.pt')\n torch.save(model_to_save.state_dict(), 'saved_weights_finbert_'+str(max_length)+'_'+sec+'_'+bv+'_epoch'+str(start_epoch+epoch)+'_of_'+str(total_epochs)+'_lr='+'{:.1e}'.format(learning_rate)+'_bilstm.pt')\n\n \n # append training and validation loss\n train_losses.append(train_loss)\n valid_losses.append(valid_loss)\n \n print(f'\\nTraining Loss: {train_loss:.10f}')\n print(f'Validation Loss: {valid_loss:.10f}')\n\nvalid_loss_file = open('best_valid_loss_finbert_'+str(max_length)+'_'+sec+'_'+bv+'_ep'+str(total_epochs)+'_lr='+'{:.1e}'.format(learning_rate)+'_bilstm.txt', 'w')\nvalid_loss_file.write(str(best_valid_loss)+\"\\n\")\nvalid_loss_file.write(str(best_epoch))\nvalid_loss_file.close()\n'''\n# pass the pre-trained BERT to our define architecture\nmodel = BERT_Arch(bert)\n\n# push the model to GPU\nmodel = model.to(device)\n\n#load weights of best model\npath = 'saved_weights_finbert_'+str(max_length)+'_'+sec+'_'+bv+'_ep'+str(total_epochs)+'_lr='+'{:.1e}'.format(learning_rate)+'_bilstm.pt'\nmodel.load_state_dict(torch.load(path))\n\nxs_train , _ = train_x()\n\n# get predictions for test data\nvalid_mses = []\ntest_mses = []\n\nmethods = [\"bare\", \"svr\", \"kr\", \"lr\"]\n\n_ , preds, xs_valid = evaluate()\npreds = np.asarray(preds)\nvalid_y = valid_y.cpu().data.numpy()\nvalid_mse = mean_squared_error(valid_y, preds)\nvalid_mses.append(valid_mse)\n\nxs_test, preds = test()\npreds = np.asarray(preds)\ntest_y = test_y.cpu().data.numpy()\ntest_mse = mean_squared_error(test_y, preds)\ntest_mses.append(test_mse)\n\nprint(\"bert mse: \",test_mse)\nlr = LinearRegression()\nkr = KernelRidge(kernel='rbf', alpha=0.1, gamma=0.1)\nsvr = SVR(kernel='rbf', C=0.1, epsilon=0.0001) #linear')\n\nmodels_list = [svr, kr, lr]\n\nfor m in models_list:\n m.fit(xs_train, train_labels.to_numpy())\n\n preds = m.predict(xs_valid)\n valid_mse = mean_squared_error(valid_labels.to_numpy(), preds)\n valid_mses.append(valid_mse)\n\n preds = m.predict(xs_test)\n test_mse = mean_squared_error(test_labels.to_numpy(), preds)\n test_mses.append(test_mse)\n print(m, test_mse,'---',valid_mse)\n\n\nmse = str(test_mses[valid_mses.index(min(valid_mses))])+\"---\"+methods[valid_mses.index(min(valid_mses))]+\"---\"+str(min(valid_mses))\n\n\nspearmanr = (stats.spearmanr(preds, test_y))[0] \nkendallr = (stats.kendalltau(preds, test_y))[0] \n\nprint(\"finbert mse: \", mse)\n\nmse_file = open('mse_finbert_'+str(max_length)+'_'+sec+'_'+bv+'_ep'+str(epochs)+'_lr='+'{:.1e}'.format(learning_rate)+'_bilstm.txt', \"w\")\nmse_file.write(mse + \"\\n\")\nmse_file.write(str(best_valid_loss)+\"\\n\")\nmse_file.write(str(spearmanr) + \"\\n\") \nmse_file.write(str(kendallr) + \"\\n\") \n#mse_file.close()\n\ntest_error = pd.DataFrame() \ntest_error['cik_year'] = test_cik.tolist() \ntest_error['test_y'] = test_y.tolist() \ntest_error['preds'] = [p[0] for p in preds.tolist()] \ntest_error['error'] = test_error['test_y'] - test_error['preds'] \ntest_error.to_csv('error_finbert_'+str(max_length)+'_'+sec+'_'+bv+'_mean_hist.csv', index=False) \n\n\n#Linear Baseline\nlr = LinearRegression().fit(train_hist.cpu().data.numpy().reshape(-1, 1),\n train_y.cpu().data.numpy().reshape(-1, 1))\npreds = lr.predict(test_hist.cpu().data.numpy().reshape(-1, 1))\nlr_mse = mean_squared_error(test_y.reshape(-1, 1), preds)\n\nprint(\"LR mse\", lr_mse)\nmse_file.write(\"Linear mse: \" + str(lr_mse))\nmse_file.close()\n'''\n\nprint(\"Total execution time: \", time.time() - start)\n" ]
[ [ "torch.mean", "torch.nn.functional.softmax", "torch.max", "torch.nn.ELU", "torch.cuda.amp.autocast", "numpy.concatenate", "torch.tanh", "torch.no_grad", "torch.cuda.manual_seed_all", "torch.device", "torch.nn.L1Loss", "tensorflow.random.set_seed", "torch.nn.Dropout", "pandas.read_csv", "torch.cuda.memory_reserved", "torch.tensor", "torch.cuda.empty_cache", "sklearn.model_selection.train_test_split", "torch.nn.Linear", "torch.nn.LeakyReLU", "torch.stack", "numpy.array", "numpy.random.seed", "torch.nn.LSTM", "torch.manual_seed", "torch.nn.Tanh", "torch.nn.ReLU", "torch.nn.MSELoss", "torch.cuda.memory_allocated" ], [ "torch.nn.functional.softmax", "torch.load", "numpy.asarray", "torch.nn.ELU", "sklearn.metrics.mean_squared_error", "numpy.concatenate", "torch.numel", "torch.cuda.amp.autocast", "torch.no_grad", "torch.cuda.manual_seed_all", "torch.device", "scipy.stats.spearmanr", "tensorflow.random.set_seed", "torch.nn.Dropout", "pandas.read_csv", "torch.tensor", "torch.cuda.empty_cache", "sklearn.model_selection.train_test_split", "sklearn.kernel_ridge.KernelRidge", "sklearn.svm.SVR", "torch.nn.Linear", "torch.nn.LeakyReLU", "torch.stack", "numpy.array", "torch.count_nonzero", "numpy.random.seed", "torch.nn.LSTM", "torch.manual_seed", "torch.nn.Tanh", "sklearn.linear_model.LinearRegression", "scipy.stats.kendalltau", "torch.nn.ReLU", "torch.nn.MSELoss" ], [ "torch.mean", "torch.nn.functional.softmax", "torch.nn.ELU", "torch.cuda.amp.autocast", "numpy.concatenate", "torch.no_grad", "torch.cuda.manual_seed_all", "torch.device", "torch.nn.L1Loss", "tensorflow.random.set_seed", "torch.nn.Dropout", "pandas.read_csv", "torch.cuda.memory_reserved", "torch.tensor", "torch.cuda.empty_cache", "sklearn.model_selection.train_test_split", "torch.nn.Linear", "torch.nn.LeakyReLU", "torch.stack", "numpy.array", "numpy.random.seed", "torch.nn.LSTM", "torch.manual_seed", "torch.nn.Tanh", "torch.nn.ReLU", "torch.nn.MSELoss", "torch.cuda.memory_allocated" ], [ "torch.mean", "torch.nn.functional.softmax", "torch.load", "torch.nn.ELU", "torch.cuda.amp.autocast", "numpy.concatenate", "torch.no_grad", "torch.cuda.manual_seed_all", "torch.device", "torch.nn.L1Loss", "tensorflow.random.set_seed", "torch.nn.Dropout", "pandas.read_csv", "torch.cuda.memory_reserved", "torch.tensor", "torch.cuda.empty_cache", "sklearn.model_selection.train_test_split", "torch.nn.Linear", "torch.nn.LeakyReLU", "torch.stack", "numpy.array", "numpy.random.seed", "torch.nn.LSTM", "torch.manual_seed", "torch.nn.Tanh", "torch.nn.ReLU", "torch.nn.MSELoss", "torch.cuda.memory_allocated" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "2.0", "1.4", "1.1", "1.5", "1.2", "1.3" ], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [ "2.0", "1.4", "1.1", "1.5", "1.2", "1.3" ], "scipy": [ "0.13", "1.6", "0.14", "1.10", "0.15", "1.4", "1.3", "1.9", "0.19", "1.5", "0.18", "1.2", "1.7", "0.12", "1.0", "0.17", "0.16", "1.8" ], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [ "2.0", "1.4", "1.1", "1.5", "1.2", "1.3" ], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [ "2.0", "1.4", "1.1", "1.5", "1.2", "1.3" ], "scipy": [], "tensorflow": [] } ]
AaratiAkkapeddi/nnabla-examples
[ "db9e5ad850303c158773aeb275e5c3821b4a3935", "db9e5ad850303c158773aeb275e5c3821b4a3935", "db9e5ad850303c158773aeb275e5c3821b4a3935", "db9e5ad850303c158773aeb275e5c3821b4a3935" ]
[ "reinforcement_learning/dqn/atari_utils.py", "image-translation/stargan/train.py", "video-superresolution/jsigan/inference.py", "image-generation/sagan/helpers.py" ]
[ "# Copyright 2019,2020,2021 Sony Corporation.\n# Copyright 2021 Sony Group Corporation.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport gym\nimport numpy as np\n\n\nclass Squeeze(gym.ObservationWrapper):\n '''Assume wrap_deepmind with scale=True'''\n\n def __init__(self, env):\n from gym import spaces\n gym.ObservationWrapper.__init__(self, env)\n self.observation_space = spaces.Box(\n low=0, high=1.0,\n shape=(84, 84), dtype=np.float32)\n\n def observation(self, observation):\n return np.squeeze(observation)\n\n\ndef make_atari_deepmind(rom_name, valid=False):\n from external.atari_wrappers import make_atari, wrap_deepmind\n env = make_atari(rom_name)\n # framestack is handled by sampler.py\n env = wrap_deepmind(env, episode_life=not valid,\n frame_stack=False, scale=True)\n env = Squeeze(env)\n return env\n", "# Copyright 2019,2020,2021 Sony Corporation.\n# Copyright 2021 Sony Group Corporation.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\nimport os\nimport nnabla as nn\nimport nnabla.functions as F\nimport nnabla.parametric_functions as PF\nimport nnabla.solvers as S\nimport numpy as np\nimport functools\nimport random\nimport datetime\nimport json\nimport model\nimport loss\n\nfrom nnabla.ext_utils import get_extension_context\nfrom args import get_args\nfrom dataloader import stargan_load_func, get_data_dict\nfrom nnabla.monitor import Monitor, MonitorSeries, MonitorTimeElapsed\nfrom nnabla.utils.image_utils import imsave\nfrom nnabla.utils.data_iterator import data_iterator_simple\n\n\ndef saveimage(path, img):\n img = (img * 0.5) + 0.5 # Normalize.\n imsave(path, img, channel_first=True)\n\n\ndef save_results(i, args, img_src, img_trg, lbl_src, lbl_trg, img_rec=None, is_training=True):\n if is_training:\n filenamebase = \"Train_at_iter\"\n else:\n filenamebase = \"Test_at_iter\"\n chosen_idx = np.random.randint(0, args.batch_size)\n target_attr_flags = lbl_trg.d[chosen_idx].reshape(\n lbl_trg.d[chosen_idx].size)\n target_domain = \"_\".join([attr for idx, attr in zip(\n target_attr_flags, args.selected_attrs) if bool(idx) is True])\n source_attr_flags = lbl_src.d[chosen_idx].reshape(\n lbl_src.d[chosen_idx].size)\n source_domain = \"_\".join([attr for idx, attr in zip(\n source_attr_flags, args.selected_attrs) if bool(idx) is True])\n source_x = img_src.d[chosen_idx]\n result_x = img_trg.d[chosen_idx]\n saveimage(\"{}/{}_{}_result_{}.png\".format(args.monitor_path,\n filenamebase, i, target_domain), result_x)\n saveimage(\"{}/{}_{}_source_{}.png\".format(args.monitor_path,\n filenamebase, i, source_domain), source_x)\n\n if img_rec:\n recon_x = img_rec.d[chosen_idx]\n saveimage(\"{}/{}_{}_recon_{}.png\".format(args.monitor_path,\n filenamebase, i, source_domain), recon_x)\n return\n\n\ndef train(args):\n if args.c_dim != len(args.selected_attrs):\n print(\"c_dim must be the same as the num of selected attributes. Modified c_dim.\")\n args.c_dim = len(args.selected_attrs)\n\n # Dump the config information.\n config = dict()\n print(\"Used config:\")\n for k in args.__dir__():\n if not k.startswith(\"_\"):\n config[k] = getattr(args, k)\n print(\"'{}' : {}\".format(k, getattr(args, k)))\n\n # Prepare Generator and Discriminator based on user config.\n generator = functools.partial(\n model.generator, conv_dim=args.g_conv_dim, c_dim=args.c_dim, num_downsample=args.num_downsample, num_upsample=args.num_upsample, repeat_num=args.g_repeat_num)\n discriminator = functools.partial(model.discriminator, image_size=args.image_size,\n conv_dim=args.d_conv_dim, c_dim=args.c_dim, repeat_num=args.d_repeat_num)\n\n x_real = nn.Variable(\n [args.batch_size, 3, args.image_size, args.image_size])\n label_org = nn.Variable([args.batch_size, args.c_dim, 1, 1])\n label_trg = nn.Variable([args.batch_size, args.c_dim, 1, 1])\n\n with nn.parameter_scope(\"dis\"):\n dis_real_img, dis_real_cls = discriminator(x_real)\n\n with nn.parameter_scope(\"gen\"):\n x_fake = generator(x_real, label_trg)\n x_fake.persistent = True # to retain its value during computation.\n\n # get an unlinked_variable of x_fake\n x_fake_unlinked = x_fake.get_unlinked_variable()\n\n with nn.parameter_scope(\"dis\"):\n dis_fake_img, dis_fake_cls = discriminator(x_fake_unlinked)\n\n # ---------------- Define Loss for Discriminator -----------------\n d_loss_real = (-1) * loss.gan_loss(dis_real_img)\n d_loss_fake = loss.gan_loss(dis_fake_img)\n d_loss_cls = loss.classification_loss(dis_real_cls, label_org)\n d_loss_cls.persistent = True\n\n # Gradient Penalty.\n alpha = F.rand(shape=(args.batch_size, 1, 1, 1))\n x_hat = F.mul2(alpha, x_real) + \\\n F.mul2(F.r_sub_scalar(alpha, 1), x_fake_unlinked)\n\n with nn.parameter_scope(\"dis\"):\n dis_for_gp, _ = discriminator(x_hat)\n grads = nn.grad([dis_for_gp], [x_hat])\n\n l2norm = F.sum(grads[0] ** 2.0, axis=(1, 2, 3)) ** 0.5\n d_loss_gp = F.mean((l2norm - 1.0) ** 2.0)\n\n # total discriminator loss.\n d_loss = d_loss_real + d_loss_fake + args.lambda_cls * \\\n d_loss_cls + args.lambda_gp * d_loss_gp\n\n # ---------------- Define Loss for Generator -----------------\n g_loss_fake = (-1) * loss.gan_loss(dis_fake_img)\n g_loss_cls = loss.classification_loss(dis_fake_cls, label_trg)\n g_loss_cls.persistent = True\n\n # Reconstruct Images.\n with nn.parameter_scope(\"gen\"):\n x_recon = generator(x_fake_unlinked, label_org)\n x_recon.persistent = True\n\n g_loss_rec = loss.recon_loss(x_real, x_recon)\n g_loss_rec.persistent = True\n\n # total generator loss.\n g_loss = g_loss_fake + args.lambda_rec * \\\n g_loss_rec + args.lambda_cls * g_loss_cls\n\n # -------------------- Solver Setup ---------------------\n d_lr = args.d_lr # initial learning rate for Discriminator\n g_lr = args.g_lr # initial learning rate for Generator\n solver_dis = S.Adam(alpha=args.d_lr, beta1=args.beta1, beta2=args.beta2)\n solver_gen = S.Adam(alpha=args.g_lr, beta1=args.beta1, beta2=args.beta2)\n\n # register parameters to each solver.\n with nn.parameter_scope(\"dis\"):\n solver_dis.set_parameters(nn.get_parameters())\n\n with nn.parameter_scope(\"gen\"):\n solver_gen.set_parameters(nn.get_parameters())\n\n # -------------------- Create Monitors --------------------\n monitor = Monitor(args.monitor_path)\n monitor_d_cls_loss = MonitorSeries(\n 'real_classification_loss', monitor, args.log_step)\n monitor_g_cls_loss = MonitorSeries(\n 'fake_classification_loss', monitor, args.log_step)\n monitor_loss_dis = MonitorSeries(\n 'discriminator_loss', monitor, args.log_step)\n monitor_recon_loss = MonitorSeries(\n 'reconstruction_loss', monitor, args.log_step)\n monitor_loss_gen = MonitorSeries('generator_loss', monitor, args.log_step)\n monitor_time = MonitorTimeElapsed(\"Training_time\", monitor, args.log_step)\n\n # -------------------- Prepare / Split Dataset --------------------\n using_attr = args.selected_attrs\n dataset, attr2idx, idx2attr = get_data_dict(args.attr_path, using_attr)\n random.seed(313) # use fixed seed.\n random.shuffle(dataset) # shuffle dataset.\n test_dataset = dataset[-2000:] # extract 2000 images for test\n\n if args.num_data:\n # Use training data partially.\n training_dataset = dataset[:min(args.num_data, len(dataset) - 2000)]\n else:\n training_dataset = dataset[:-2000]\n print(\"Use {} images for training.\".format(len(training_dataset)))\n\n # create data iterators.\n load_func = functools.partial(stargan_load_func, dataset=training_dataset,\n image_dir=args.celeba_image_dir, image_size=args.image_size, crop_size=args.celeba_crop_size)\n data_iterator = data_iterator_simple(load_func, len(\n training_dataset), args.batch_size, with_file_cache=False, with_memory_cache=False)\n\n load_func_test = functools.partial(stargan_load_func, dataset=test_dataset,\n image_dir=args.celeba_image_dir, image_size=args.image_size, crop_size=args.celeba_crop_size)\n test_data_iterator = data_iterator_simple(load_func_test, len(\n test_dataset), args.batch_size, with_file_cache=False, with_memory_cache=False)\n\n # Keep fixed test images for intermediate translation visualization.\n test_real_ndarray, test_label_ndarray = test_data_iterator.next()\n test_label_ndarray = test_label_ndarray.reshape(\n test_label_ndarray.shape + (1, 1))\n\n # -------------------- Training Loop --------------------\n one_epoch = data_iterator.size // args.batch_size\n num_max_iter = args.max_epoch * one_epoch\n\n for i in range(num_max_iter):\n # Get real images and labels.\n real_ndarray, label_ndarray = data_iterator.next()\n label_ndarray = label_ndarray.reshape(label_ndarray.shape + (1, 1))\n label_ndarray = label_ndarray.astype(float)\n x_real.d, label_org.d = real_ndarray, label_ndarray\n\n # Generate target domain labels randomly.\n rand_idx = np.random.permutation(label_org.shape[0])\n label_trg.d = label_ndarray[rand_idx]\n\n # ---------------- Train Discriminator -----------------\n # generate fake image.\n x_fake.forward(clear_no_need_grad=True)\n d_loss.forward(clear_no_need_grad=True)\n solver_dis.zero_grad()\n d_loss.backward(clear_buffer=True)\n solver_dis.update()\n\n monitor_loss_dis.add(i, d_loss.d.item())\n monitor_d_cls_loss.add(i, d_loss_cls.d.item())\n monitor_time.add(i)\n\n # -------------- Train Generator --------------\n if (i + 1) % args.n_critic == 0:\n g_loss.forward(clear_no_need_grad=True)\n solver_dis.zero_grad()\n solver_gen.zero_grad()\n x_fake_unlinked.grad.zero()\n g_loss.backward(clear_buffer=True)\n x_fake.backward(grad=None)\n solver_gen.update()\n monitor_loss_gen.add(i, g_loss.d.item())\n monitor_g_cls_loss.add(i, g_loss_cls.d.item())\n monitor_recon_loss.add(i, g_loss_rec.d.item())\n monitor_time.add(i)\n\n if (i + 1) % args.sample_step == 0:\n # save image.\n save_results(i, args, x_real, x_fake,\n label_org, label_trg, x_recon)\n if args.test_during_training:\n # translate images from test dataset.\n x_real.d, label_org.d = test_real_ndarray, test_label_ndarray\n label_trg.d = test_label_ndarray[rand_idx]\n x_fake.forward(clear_no_need_grad=True)\n save_results(i, args, x_real, x_fake, label_org,\n label_trg, None, is_training=False)\n\n # Learning rates get decayed\n if (i + 1) > int(0.5 * num_max_iter) and (i + 1) % args.lr_update_step == 0:\n g_lr = max(0, g_lr - (args.lr_update_step *\n args.g_lr / float(0.5 * num_max_iter)))\n d_lr = max(0, d_lr - (args.lr_update_step *\n args.d_lr / float(0.5 * num_max_iter)))\n solver_gen.set_learning_rate(g_lr)\n solver_dis.set_learning_rate(d_lr)\n print('learning rates decayed, g_lr: {}, d_lr: {}.'.format(g_lr, d_lr))\n\n # Save parameters and training config.\n param_name = 'trained_params_{}.h5'.format(\n datetime.datetime.today().strftime(\"%m%d%H%M\"))\n param_path = os.path.join(args.model_save_path, param_name)\n nn.save_parameters(param_path)\n config[\"pretrained_params\"] = param_name\n\n with open(os.path.join(args.model_save_path, \"training_conf_{}.json\".format(datetime.datetime.today().strftime(\"%m%d%H%M\"))), \"w\") as f:\n json.dump(config, f)\n\n # -------------------- Translation on test dataset --------------------\n for i in range(args.num_test):\n real_ndarray, label_ndarray = test_data_iterator.next()\n label_ndarray = label_ndarray.reshape(label_ndarray.shape + (1, 1))\n label_ndarray = label_ndarray.astype(float)\n x_real.d, label_org.d = real_ndarray, label_ndarray\n\n rand_idx = np.random.permutation(label_org.shape[0])\n label_trg.d = label_ndarray[rand_idx]\n\n x_fake.forward(clear_no_need_grad=True)\n save_results(i, args, x_real, x_fake, label_org,\n label_trg, None, is_training=False)\n\n\ndef main():\n args = get_args()\n ctx = get_extension_context(\n args.context, device_id=args.device_id, type_config=args.type_config)\n nn.set_default_context(ctx)\n train(args)\n\n\nif __name__ == '__main__':\n main()\n", "# Copyright 2021 Sony Corporation.\n# Copyright 2021 Sony Group Corporation.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport time\nimport os\nimport nnabla as nn\nimport nnabla.functions as F\nfrom nnabla.ext_utils import get_extension_context\nimport numpy as np\nfrom utils import get_hw_boundary, trim_patch_boundary, compute_psnr, save_results_yuv\nfrom args import get_config\nfrom ops import model\nfrom data_loader import read_mat_file\n\n\ndef inference():\n \"\"\"\n Inference function to generate high resolution hdr images\n \"\"\"\n conf = get_config()\n ctx = get_extension_context(\n conf.nnabla_context.context, device_id=conf.nnabla_context.device_id)\n nn.set_default_context(ctx)\n\n data, target = read_mat_file(conf.data.lr_sdr_test, conf.data.hr_hdr_test, conf.data.d_name_test,\n conf.data.l_name_test, train=False)\n\n if not os.path.exists(conf.test_img_dir):\n os.makedirs(conf.test_img_dir)\n\n data_sz = data.shape\n target_sz = target.shape\n PATCH_BOUNDARY = 10 # set patch boundary to reduce edge effect around patch edges\n test_loss_PSNR_list_for_epoch = []\n inf_time = []\n start_time = time.time()\n\n test_pred_full = np.zeros((target_sz[1], target_sz[2], target_sz[3]))\n\n print(\"Loading pre trained model.........\", conf.pre_trained_model)\n nn.load_parameters(conf.pre_trained_model)\n\n for index in range(data_sz[0]):\n ###======== Divide Into Patches ========###\n for p in range(conf.test_patch ** 2):\n pH = p // conf.test_patch\n pW = p % conf.test_patch\n sH = data_sz[1] // conf.test_patch\n sW = data_sz[2] // conf.test_patch\n H_low_ind, H_high_ind, W_low_ind, W_high_ind = \\\n get_hw_boundary(\n PATCH_BOUNDARY, data_sz[1], data_sz[2], pH, sH, pW, sW)\n data_test_p = nn.Variable.from_numpy_array(\n data.d[index, H_low_ind: H_high_ind, W_low_ind: W_high_ind, :])\n data_test_sz = data_test_p.shape\n data_test_p = F.reshape(\n data_test_p, (1, data_test_sz[0], data_test_sz[1], data_test_sz[2]))\n st = time.time()\n net = model(data_test_p, conf.scaling_factor)\n net.pred.forward()\n test_pred_temp = net.pred.d\n inf_time.append(time.time() - st)\n test_pred_t = trim_patch_boundary(test_pred_temp, PATCH_BOUNDARY, data_sz[1], data_sz[2],\n pH, sH, pW, sW, conf.scaling_factor)\n #pred_sz = test_pred_t.shape\n test_pred_t = np.squeeze(test_pred_t)\n test_pred_full[pH * sH * conf.scaling_factor: (pH + 1) * sH * conf.scaling_factor,\n pW * sW * conf.scaling_factor: (pW + 1) * sW * conf.scaling_factor, :] = test_pred_t\n\n ###======== Compute PSNR & Print Results========###\n test_GT = np.squeeze(target.d[index, :, :, :])\n test_PSNR = compute_psnr(test_pred_full, test_GT, 1.)\n test_loss_PSNR_list_for_epoch.append(test_PSNR)\n print(\" <Test> [%4d/%4d]-th images, time: %4.4f(minutes), test_PSNR: %.8f[dB] \"\n % (int(index), int(data_sz[0]), (time.time() - start_time) / 60, test_PSNR))\n if conf.save_images:\n # comment for faster testing\n save_results_yuv(test_pred_full, index, conf.test_img_dir)\n test_PSNR_per_epoch = np.mean(test_loss_PSNR_list_for_epoch)\n\n print(\"######### Average Test PSNR: %.8f[dB] #########\" % (\n test_PSNR_per_epoch))\n print(\"######### Estimated Inference Time (per 4K frame): %.8f[s] #########\" %\n (np.mean(inf_time) * conf.test_patch * conf.test_patch))\n\n\nif __name__ == '__main__':\n inference()\n", "# Copyright 2018,2019,2020,2021 Sony Corporation.\n# Copyright 2021 Sony Group Corporation.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\nimport argparse\nfrom nnabla.contrib.context import extension_context\nfrom nnabla.monitor import Monitor, MonitorImage, MonitorImageTile, MonitorSeries, tile_images\nfrom nnabla.utils.data_iterator import data_iterator\nimport os\n\nimport nnabla as nn\nimport nnabla.functions as F\nimport nnabla.logger as logger\nimport nnabla.parametric_functions as PF\nimport nnabla.solvers as S\nimport numpy as np\nfrom scipy import linalg\n\n\ndef generate_random_class(n_classes, batch_size):\n return np.random.choice(np.arange(n_classes),\n batch_size,\n replace=False)\n\n\ndef generate_one_class(class_id, batch_size):\n return np.repeat(class_id, batch_size)\n\n\ndef get_input_and_output(nnp, batch_size, name=\"\"):\n network_name = nnp.get_network_names()[0]\n net = nnp.get_network(network_name, batch_size=batch_size)\n x = list(net.inputs.values())[0]\n y = list(net.outputs.values())[0]\n if name != \"\":\n h = net.variables[name]\n return x, h\n return x, y\n\n\ndef denormalize(x):\n x = (x + 1.0) / 2.0 * 255.0\n return x\n\n\ndef normalize_method(x):\n x = ((x + 1.0) / 2.0 * 255.0).astype(np.uint8)\n return x\n\n\ndef nnp_preprocess(x, a=0.01735, b=-1.99):\n x = a * x + b\n return x\n\n\ndef resize_images(images, oshape=(320, 320)):\n import cv2\n images = images.transpose((0, 2, 3, 1))\n images_ = []\n for img in images:\n # others than bilinear get pretty better\n img = cv2.resize(img, oshape, interpolation=cv2.INTER_CUBIC)\n images_.append(img)\n images_ = np.asarray(images_).transpose(0, 3, 1, 2)\n return images_\n\n\ndef preprocess(x_d, oshape, use_nnp_preprocess):\n x_d = denormalize(x_d)\n x_d = nnp_preprocess(x_d) if use_nnp_preprocess else x_d\n x_d = resize_images(x_d, oshape=oshape)\n return x_d\n\n\ndef resample(batch_size, latent, threshold=np.inf, count=100):\n i = 0\n z_fixded = np.random.randn(batch_size * latent)\n idx_fixed = set()\n while i < count:\n z_data = np.random.randn(batch_size * latent)\n idx_candidate = np.where(np.abs(z_data) < threshold)[0]\n idx_update = np.asarray(list(set(idx_candidate.tolist()) - idx_fixed))\n if len(idx_update) == 0:\n i += 1\n continue\n z_fixded[idx_update] = z_data[idx_update]\n idx_fixed = idx_fixed | set(idx_update.tolist())\n i += 1\n if len(idx_fixed) == batch_size * latent:\n break\n return z_fixded.reshape((batch_size, latent))\n" ]
[ [ "numpy.squeeze" ], [ "numpy.random.permutation", "numpy.random.randint" ], [ "numpy.squeeze", "numpy.zeros", "numpy.mean" ], [ "numpy.abs", "numpy.asarray", "numpy.arange", "numpy.random.randn", "numpy.repeat" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
ahheo/climi
[ "d51d8faedb9bf1b6554733af469d15e1cffdc4e2" ]
[ "climi/pppp/plt_hwmi_map_evolution_obsVSmodel.py" ]
[ "import numpy as np\nimport matplotlib as mpl\nmpl.use('Agg', warn=False, force=True)\nimport matplotlib.pyplot as plt\nimport iris\nimport iris.plot as iplt\nimport os\nimport warnings\nimport logging\nfrom ffff import rPeriod_, schF_keys_\nfrom cccc import extract_period_cube, guessBnds_cube, load_res_, en_mm_, \\\n en_mean_, en_iqr_\nfrom pppp import getAggrArg_, pch_swe_, aligned_cb_, aligned_tx_\n\n\ndef main():\n import argparse\n import yaml\n parser = argparse.ArgumentParser('map plot (hwmi)')\n parser.add_argument(\"controlfile\",\n help=\"yaml file with metadata\")\n args = parser.parse_args()\n with open(args.controlfile, 'r') as ymlfile:\n cfg = yaml.safe_load(ymlfile)\n warnings.filterwarnings(\"ignore\",\n message=\"Collapsing a non-contiguous coordinate.\")\n\n #map options\n cmap = mpl.colors.ListedColormap(cfg['cm'])\n cmap.set_over(cfg['cmo'])\n colorb = np.array(cfg['cbd_' + cfg['v'][0]])\n norm = mpl.colors.BoundaryNorm(colorb, cmap.N)\n\n #periods\n p0s = np.arange(1951, 1977, 5)\n\n #rg_dict\n rg_dict = {'lon': cfg['sub_r']['lon'][cfg['rn']],\n 'lat': cfg['sub_r']['lat'][cfg['rn']]}\n\n #pch_dict\n pch_dict = {'cmap': cmap,\n 'norm': norm}\n\n #directory options\n odir = cfg['root'] + cfg['experiment'] + '/' + cfg['fig']\n os.makedirs(odir, exist_ok=True)\n idir = cfg['root'] + cfg['experiment'] + '/' + cfg['res']\n\n ##############################hist\n\n for i in cfg['aggr_' + cfg['v'][0]]:\n ti = 'evl_CMIP5vsCORDEXvsEOBS-' + i\n fig = plt.figure(figsize = (16, 10))\n fig.subplots_adjust(hspace=0.1, wspace=0.075,\n top=0.95, bottom=0.05,\n left=0.05, right=0.95)\n fnf = odir + cfg['v'] + '_' + ti + cfg['fn_map']\n\n #data analysis options\n arg0, karg0 = getAggrArg_(i)\n\n ##obs\n ddir = idir + 'obs/'\n dn = cfg['dn_obs'][0]\n cube = load_res_(ddir, cfg['v'], dn, cfg['rn'], cfg['sub_r'])\n guessBnds_cube(cube)\n ax = []\n pch = []\n for i, p in enumerate(p0s):\n c1 = extract_period_cube(cube, p, p + 29)\n c0 = c1.collapsed('time', *arg0, **karg0)\n ax0, pch0 = pch_swe_(fig, 3, 7, i + 1, c0, rg_dict, pch_dict,\n ti=rPeriod_([p, p+29], True))\n ax.append(ax0)\n pch.append(pch0)\n c0 = extract_period_cube(cube, 2018, 2018)\n pch_swe_(fig, 3, 7, 7, c0, rg_dict, pch_dict, ti='2018')\n\n ##cmip5\n ddir = idir + 'cmip5/hist/'\n tmp = en_mm_(ddir, cfg['dn_cmp'], cfg, ref='NorESM1-M')\n guessBnds_cube(tmp)\n for i, p in enumerate(p0s):\n c1 = extract_period_cube(tmp, p, p + 29)\n ec = c1.collapsed('time', *arg0, **karg0)\n c0 = en_mean_(ec)\n ax0, pch0 = pch_swe_(fig, 3, 7, i + 8, c0, rg_dict, pch_dict)\n ax.append(ax0)\n pch.append(pch0)\n\n ##cordex\n ddir = idir + 'cordex/hist/'\n tmp = en_mm_(ddir, cfg['dn_cdx'], cfg)\n guessBnds_cube(tmp)\n for i, p in enumerate(p0s):\n c1 = extract_period_cube(tmp, p, p + 29)\n ec = c1.collapsed('time', *arg0, **karg0)\n c0 = en_mean_(ec)\n ax0, pch0 = pch_swe_(fig, 3, 7, i + 15, c0, rg_dict, pch_dict)\n ax.append(ax0)\n pch.append(pch0)\n\n ##cb\n cb = aligned_cb_(fig, ax[6:], pch[0], [.05, .025],\n orientation='vertical', ticks=colorb, shrink=0.75,\n extend='max')\n cb.set_label(cfg['cbti_' + cfg['v'][0]])\n\n ##ylabel_subplot_array\n aligned_tx_(fig, ax[0], 'EOBS', 'lc')\n aligned_tx_(fig, ax[6], 'CMIP5 (ensemble mean)', 'lc')\n aligned_tx_(fig, ax[12], 'CORDEX (ensemble mean)', 'lc')\n\n #fig.tight_layout()\n plt.savefig(fnf, **cfg['sv_opts'])\n plt.close()\n\n\nif __name__ == '__main__':\n main()\n" ]
[ [ "matplotlib.colors.BoundaryNorm", "numpy.arange", "matplotlib.use", "matplotlib.pyplot.savefig", "matplotlib.colors.ListedColormap", "matplotlib.pyplot.close", "numpy.array", "matplotlib.pyplot.figure" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
iqbal-lab-org/paper_pandora2020_analyses
[ "952e348107c3fec60482bb30a91620ee2ce32cb5" ]
[ "scripts/csv_to_paper_plots/precision_recall/preprocess_20_way_nanopore_ROC.py" ]
[ "#!/usr/bin/env python\n# coding: utf-8\n\n# In[1]:\n\n\nimport pandas as pd\n\n\n# In[2]:\n\nimport sys\ndf = pd.read_csv(sys.argv[1], sep=\"\\t\")\ndf\n\n\n# In[3]:\n\n\n# add some custom columns\ndf[\"tool_long_name\"] = df[\"tool\"]\n\ndef get_tool_category(tool):\n if tool == \"pandora_nanopore_nodenovo\":\n return \"pandora no denovo\"\n elif tool == \"pandora_nanopore_withdenovo\":\n return \"pandora with denovo\"\n else:\n return tool.split(\"_\")[0]\n\ndf[\"tool\"] = df[\"tool_long_name\"].apply(get_tool_category)\ndf\n\n\n# In[4]:\n\n\n# save csv\ndf.to_csv(sys.argv[2])\n" ]
[ [ "pandas.read_csv" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "2.0", "1.4", "1.1", "1.5", "1.2", "1.3" ], "scipy": [], "tensorflow": [] } ]
amarczew/stacked-denoising-autoencoder
[ "64e4f62bffc9eb805f8569df0f92d14a7473a9bf" ]
[ "SdA/logistic_sgd.py" ]
[ "\"\"\"\nThis tutorial introduces logistic regression using Theano and stochastic\ngradient descent.\n\nLogistic regression is a probabilistic, linear classifier. It is parametrized\nby a weight matrix :math:`W` and a bias vector :math:`b`. Classification is\ndone by projecting data points onto a set of hyperplanes, the distance to\nwhich is used to determine a class membership probability.\n\nMathematically, this can be written as:\n\n.. math::\n P(Y=i|x, W,b) &= softmax_i(W x + b) \\\\\n &= \\frac {e^{W_i x + b_i}} {\\sum_j e^{W_j x + b_j}}\n\n\nThe output of the model or prediction is then done by taking the argmax of\nthe vector whose i'th element is P(Y=i|x).\n\n.. math::\n\n y_{pred} = argmax_i P(Y=i|x,W,b)\n\n\nThis tutorial presents a stochastic gradient descent optimization method\nsuitable for large datasets.\n\n\nReferences:\n\n - textbooks: \"Pattern Recognition and Machine Learning\" -\n Christopher M. Bishop, section 4.3.2\n\n\"\"\"\n\nfrom __future__ import print_function\n\n__docformat__ = 'restructedtext en'\n\nimport six.moves.cPickle as pickle\nimport gzip\nimport os\nimport sys\nimport timeit\n\nimport numpy\n\nimport theano\nimport theano.tensor as T\n\n\nclass LogisticRegression(object):\n \"\"\"Multi-class Logistic Regression Class\n\n The logistic regression is fully described by a weight matrix :math:`W`\n and bias vector :math:`b`. Classification is done by projecting data\n points onto a set of hyperplanes, the distance to which is used to\n determine a class membership probability.\n \"\"\"\n\n def __init__(self, input, n_in, n_out):\n \"\"\" Initialize the parameters of the logistic regression\n\n :type input: theano.tensor.TensorType\n :param input: symbolic variable that describes the input of the\n architecture (one minibatch)\n\n :type n_in: int\n :param n_in: number of input units, the dimension of the space in\n which the datapoints lie\n\n :type n_out: int\n :param n_out: number of output units, the dimension of the space in\n which the labels lie\n\n \"\"\"\n # start-snippet-1\n # initialize with 0 the weights W as a matrix of shape (n_in, n_out)\n self.W = theano.shared(\n value=numpy.zeros(\n (n_in, n_out),\n dtype=theano.config.floatX\n ),\n name='W',\n borrow=True\n )\n # initialize the biases b as a vector of n_out 0s\n self.b = theano.shared(\n value=numpy.zeros(\n (n_out,),\n dtype=theano.config.floatX\n ),\n name='b',\n borrow=True\n )\n\n # symbolic expression for computing the matrix of class-membership\n # probabilities\n # Where:\n # W is a matrix where column-k represent the separation hyperplane for\n # class-k\n # x is a matrix where row-j represents input training sample-j\n # b is a vector where element-k represent the free parameter of\n # hyperplane-k\n self.p_y_given_x = T.nnet.softmax(T.dot(input, self.W) + self.b)\n\n # symbolic description of how to compute prediction as class whose\n # probability is maximal\n self.y_pred = T.argmax(self.p_y_given_x, axis=1)\n # end-snippet-1\n\n # parameters of the model\n self.params = [self.W, self.b]\n\n # keep track of model input\n self.input = input\n\n def negative_log_likelihood(self, y):\n \"\"\"Return the mean of the negative log-likelihood of the prediction\n of this model under a given target distribution.\n\n .. math::\n\n \\frac{1}{|\\mathcal{D}|} \\mathcal{L} (\\theta=\\{W,b\\}, \\mathcal{D}) =\n \\frac{1}{|\\mathcal{D}|} \\sum_{i=0}^{|\\mathcal{D}|}\n \\log(P(Y=y^{(i)}|x^{(i)}, W,b)) \\\\\n \\ell (\\theta=\\{W,b\\}, \\mathcal{D})\n\n :type y: theano.tensor.TensorType\n :param y: corresponds to a vector that gives for each example the\n correct label\n\n Note: we use the mean instead of the sum so that\n the learning rate is less dependent on the batch size\n \"\"\"\n # start-snippet-2\n # y.shape[0] is (symbolically) the number of rows in y, i.e.,\n # number of examples (call it n) in the minibatch\n # T.arange(y.shape[0]) is a symbolic vector which will contain\n # [0,1,2,... n-1] T.log(self.p_y_given_x) is a matrix of\n # Log-Probabilities (call it LP) with one row per example and\n # one column per class LP[T.arange(y.shape[0]),y] is a vector\n # v containing [LP[0,y[0]], LP[1,y[1]], LP[2,y[2]], ...,\n # LP[n-1,y[n-1]]] and T.mean(LP[T.arange(y.shape[0]),y]) is\n # the mean (across minibatch examples) of the elements in v,\n # i.e., the mean log-likelihood across the minibatch.\n return -T.mean(T.log(self.p_y_given_x)[T.arange(y.shape[0]), y])\n # end-snippet-2\n\n def errors(self, y):\n \"\"\"Return a float representing the number of errors in the minibatch\n over the total number of examples of the minibatch ; zero one\n loss over the size of the minibatch\n\n :type y: theano.tensor.TensorType\n :param y: corresponds to a vector that gives for each example the\n correct label\n \"\"\"\n\n # check if y has same dimension of y_pred\n if y.ndim != self.y_pred.ndim:\n raise TypeError(\n 'y should have the same shape as self.y_pred',\n ('y', y.type, 'y_pred', self.y_pred.type)\n )\n # check if y is of the correctdatatype\n if y.dtype.startswith('int'):\n # the T.neq operator returns a vector of 0s and 1s, where 1\n # represents a mistake in prediction\n return T.mean(T.neq(self.y_pred, y))\n else:\n raise NotImplementedError()\n\n\ndef load_data(dataset):\n ''' Loads the dataset\n\n :type dataset: string\n :param dataset: the path to the dataset (here MNIST)\n '''\n\n #############\n # LOAD DATA #\n #############\n\n # Download the MNIST dataset if it is not present\n data_dir, data_file = os.path.split(dataset)\n if data_dir == \"\" and not os.path.isfile(dataset):\n # Check if dataset is in the data directory.\n new_path = os.path.join(\n os.path.split(__file__)[0],\n \"data\",\n dataset\n )\n if os.path.isfile(new_path) or data_file == 'mnist.pkl.gz':\n dataset = new_path\n\n if (not os.path.isfile(dataset)) and data_file == 'mnist.pkl.gz':\n from six.moves import urllib\n origin = (\n 'http://www.iro.umontreal.ca/~lisa/deep/data/mnist/mnist.pkl.gz'\n )\n print('Downloading data from %s' % origin)\n urllib.request.urlretrieve(origin, dataset)\n\n print('... loading data')\n\n # Load the dataset\n with gzip.open(dataset, 'rb') as f:\n try:\n train_set, valid_set, test_set = pickle.load(f, encoding='latin1')\n except:\n train_set, valid_set, test_set = pickle.load(f)\n # train_set, valid_set, test_set format: tuple(input, target)\n # input is a numpy.ndarray of 2 dimensions (a matrix)\n # where each row corresponds to an example. target is a\n # numpy.ndarray of 1 dimension (vector) that has the same length as\n # the number of rows in the input. It should give the target\n # to the example with the same index in the input.\n\n def shared_dataset(data_xy, borrow=True):\n \"\"\" Function that loads the dataset into shared variables\n\n The reason we store our dataset in shared variables is to allow\n Theano to copy it into the GPU memory (when code is run on GPU).\n Since copying data into the GPU is slow, copying a minibatch everytime\n is needed (the default behaviour if the data is not in a shared\n variable) would lead to a large decrease in performance.\n \"\"\"\n data_x, data_y = data_xy\n shared_x = theano.shared(numpy.asarray(data_x,\n dtype=theano.config.floatX),\n borrow=borrow)\n shared_y = theano.shared(numpy.asarray(data_y,\n dtype=theano.config.floatX),\n borrow=borrow)\n # When storing data on the GPU it has to be stored as floats\n # therefore we will store the labels as ``floatX`` as well\n # (``shared_y`` does exactly that). But during our computations\n # we need them as ints (we use labels as index, and if they are\n # floats it doesn't make sense) therefore instead of returning\n # ``shared_y`` we will have to cast it to int. This little hack\n # lets ous get around this issue\n return shared_x, T.cast(shared_y, 'int32')\n\n test_set_x, test_set_y = shared_dataset(test_set)\n valid_set_x, valid_set_y = shared_dataset(valid_set)\n train_set_x, train_set_y = shared_dataset(train_set)\n\n rval = [(train_set_x, train_set_y), (valid_set_x, valid_set_y),\n (test_set_x, test_set_y)]\n return rval\n\n\ndef sgd_optimization_mnist(learning_rate=0.13, n_epochs=1000,\n dataset='mnist.pkl.gz',\n batch_size=600):\n \"\"\"\n Demonstrate stochastic gradient descent optimization of a log-linear\n model\n\n This is demonstrated on MNIST.\n\n :type learning_rate: float\n :param learning_rate: learning rate used (factor for the stochastic\n gradient)\n\n :type n_epochs: int\n :param n_epochs: maximal number of epochs to run the optimizer\n\n :type dataset: string\n :param dataset: the path of the MNIST dataset file from\n http://www.iro.umontreal.ca/~lisa/deep/data/mnist/mnist.pkl.gz\n\n \"\"\"\n datasets = load_data(dataset)\n\n train_set_x, train_set_y = datasets[0]\n valid_set_x, valid_set_y = datasets[1]\n test_set_x, test_set_y = datasets[2]\n\n # compute number of minibatches for training, validation and testing\n n_train_batches = train_set_x.get_value(borrow=True).shape[0] // batch_size\n n_valid_batches = valid_set_x.get_value(borrow=True).shape[0] // batch_size\n n_test_batches = test_set_x.get_value(borrow=True).shape[0] // batch_size\n\n ######################\n # BUILD ACTUAL MODEL #\n ######################\n print('... building the model')\n\n # allocate symbolic variables for the data\n index = T.lscalar() # index to a [mini]batch\n\n # generate symbolic variables for input (x and y represent a\n # minibatch)\n x = T.matrix('x') # data, presented as rasterized images\n y = T.ivector('y') # labels, presented as 1D vector of [int] labels\n\n # construct the logistic regression class\n # Each MNIST image has size 28*28\n classifier = LogisticRegression(input=x, n_in=28 * 28, n_out=10)\n\n # the cost we minimize during training is the negative log likelihood of\n # the model in symbolic format\n cost = classifier.negative_log_likelihood(y)\n\n # compiling a Theano function that computes the mistakes that are made by\n # the model on a minibatch\n test_model = theano.function(\n inputs=[index],\n outputs=classifier.errors(y),\n givens={\n x: test_set_x[index * batch_size: (index + 1) * batch_size],\n y: test_set_y[index * batch_size: (index + 1) * batch_size]\n }\n )\n\n validate_model = theano.function(\n inputs=[index],\n outputs=classifier.errors(y),\n givens={\n x: valid_set_x[index * batch_size: (index + 1) * batch_size],\n y: valid_set_y[index * batch_size: (index + 1) * batch_size]\n }\n )\n\n # compute the gradient of cost with respect to theta = (W,b)\n g_W = T.grad(cost=cost, wrt=classifier.W)\n g_b = T.grad(cost=cost, wrt=classifier.b)\n\n # start-snippet-3\n # specify how to update the parameters of the model as a list of\n # (variable, update expression) pairs.\n updates = [(classifier.W, classifier.W - learning_rate * g_W),\n (classifier.b, classifier.b - learning_rate * g_b)]\n\n # compiling a Theano function `train_model` that returns the cost, but in\n # the same time updates the parameter of the model based on the rules\n # defined in `updates`\n train_model = theano.function(\n inputs=[index],\n outputs=cost,\n updates=updates,\n givens={\n x: train_set_x[index * batch_size: (index + 1) * batch_size],\n y: train_set_y[index * batch_size: (index + 1) * batch_size]\n }\n )\n # end-snippet-3\n\n ###############\n # TRAIN MODEL #\n ###############\n print('... training the model')\n # early-stopping parameters\n patience = 5000 # look as this many examples regardless\n patience_increase = 2 # wait this much longer when a new best is\n # found\n improvement_threshold = 0.995 # a relative improvement of this much is\n # considered significant\n validation_frequency = min(n_train_batches, patience // 2)\n # go through this many\n # minibatche before checking the network\n # on the validation set; in this case we\n # check every epoch\n\n best_validation_loss = numpy.inf\n test_score = 0.\n start_time = timeit.default_timer()\n\n done_looping = False\n epoch = 0\n while (epoch < n_epochs) and (not done_looping):\n epoch = epoch + 1\n for minibatch_index in range(n_train_batches):\n\n minibatch_avg_cost = train_model(minibatch_index)\n # iteration number\n iter = (epoch - 1) * n_train_batches + minibatch_index\n\n if (iter + 1) % validation_frequency == 0:\n # compute zero-one loss on validation set\n validation_losses = [validate_model(i)\n for i in range(n_valid_batches)]\n this_validation_loss = numpy.mean(validation_losses)\n\n print(\n 'epoch %i, minibatch %i/%i, validation error %f %%' %\n (\n epoch,\n minibatch_index + 1,\n n_train_batches,\n this_validation_loss * 100.\n )\n )\n\n # if we got the best validation score until now\n if this_validation_loss < best_validation_loss:\n #improve patience if loss improvement is good enough\n if this_validation_loss < best_validation_loss * \\\n improvement_threshold:\n patience = max(patience, iter * patience_increase)\n\n best_validation_loss = this_validation_loss\n # test it on the test set\n\n test_losses = [test_model(i)\n for i in range(n_test_batches)]\n test_score = numpy.mean(test_losses)\n\n print(\n (\n ' epoch %i, minibatch %i/%i, test error of'\n ' best model %f %%'\n ) %\n (\n epoch,\n minibatch_index + 1,\n n_train_batches,\n test_score * 100.\n )\n )\n\n # save the best model\n with open('best_model.pkl', 'wb') as f:\n pickle.dump(classifier, f)\n\n if patience <= iter:\n done_looping = True\n break\n\n end_time = timeit.default_timer()\n print(\n (\n 'Optimization complete with best validation score of %f %%,'\n 'with test performance %f %%'\n )\n % (best_validation_loss * 100., test_score * 100.)\n )\n print('The code run for %d epochs, with %f epochs/sec' % (\n epoch, 1. * epoch / (end_time - start_time)))\n print(('The code for file ' +\n os.path.split(__file__)[1] +\n ' ran for %.1fs' % ((end_time - start_time))), file=sys.stderr)\n\n\ndef predict():\n \"\"\"\n An example of how to load a trained model and use it\n to predict labels.\n \"\"\"\n\n # load the saved model\n classifier = pickle.load(open('best_model.pkl'))\n\n # compile a predictor function\n predict_model = theano.function(\n inputs=[classifier.input],\n outputs=classifier.y_pred)\n\n # We can test it on some examples from test test\n dataset='mnist.pkl.gz'\n datasets = load_data(dataset)\n test_set_x, test_set_y = datasets[2]\n test_set_x = test_set_x.get_value()\n\n predicted_values = predict_model(test_set_x[:10])\n print(\"Predicted values for the first 10 examples in test set:\")\n print(predicted_values)\n\n\nif __name__ == '__main__':\n sgd_optimization_mnist()\n" ]
[ [ "numpy.asarray", "numpy.zeros", "numpy.mean" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
bigboyabhisthi/reaction-network
[ "b84f16b7261ecd62d7aa8e2681907f6ea0c35565" ]
[ "src/rxn_network/costs/softplus.py" ]
[ "\" Implementation of the softplus cost function\"\nfrom typing import List\n\nimport numpy as np\n\nfrom rxn_network.core import CostFunction\nfrom rxn_network.reactions import ComputedReaction\n\n\nclass Softplus(CostFunction):\n \"\"\"\n The softplus cost function is a smooth version of the Rectified Linear Unit (\n ReLU) function commonly used in neural networks. It has the property that the\n output goes to 0 as the input goes to negative infinity, but the output\n approaches a linear scaling as the input goes to positive infinity. This is an\n especially useful mapping for applying it to determine costs in reaction networks.\n \"\"\"\n\n def __init__(\n self,\n temp: float = 300,\n params: List[str] = [\"energy_per_atom\"],\n weights: List[float] = [1.0],\n ):\n \"\"\"\n Args:\n temp: Temperature [K].\n params: List of data dictionary keys for function parameters used in the\n softplus function. Defaults to [\"energy_per_atom\"]\n weights: List of corresponding values by which to weight the\n function parameters. Defaults to [1.0].\n \"\"\"\n self.temp = temp\n self.params = params\n self.weights = np.array(weights)\n\n def evaluate(self, rxn: ComputedReaction) -> float:\n \"\"\"\n Calculates the ost of reaction based on the initialized parameters and weights.\n\n Args:\n rxn: A computed reaction.\n\n Returns:\n The cost of the reaction.\n \"\"\"\n values = []\n for p in self.params:\n if rxn.data and p in rxn.data:\n value = rxn.data[p]\n elif hasattr(rxn, p):\n value = getattr(rxn, p)\n else:\n raise ValueError(f\"Reaction is missing parameter {p}!\")\n values.append(value)\n\n values = np.array(values)\n total = np.dot(values, self.weights)\n\n return self._softplus(total, self.temp)\n\n @staticmethod\n def _softplus(x: float, t: float) -> float:\n \"The mathematical formula for the softplus function\"\n return np.log(1 + (273 / t) * np.exp(x))\n\n def __repr__(self):\n return (\n f\"Softplus with parameters: \"\n f\"{' '.join([(f'{k} ({v})') for k, v in zip(self.params, self.weights)])}\"\n )\n" ]
[ [ "numpy.exp", "numpy.dot", "numpy.array" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
ML-PSE/Machine_Learning_for_PSE
[ "b53578d7cc0e0eca4907527b188a60de06d6710e", "b53578d7cc0e0eca4907527b188a60de06d6710e", "b53578d7cc0e0eca4907527b188a60de06d6710e", "b53578d7cc0e0eca4907527b188a60de06d6710e", "b53578d7cc0e0eca4907527b188a60de06d6710e" ]
[ "Chapter_SupportVectorMachines/polymerPlantData_Softsensing_SVR.py", "Chapter_LatentVariable2/DimensionalityReduction_FDA.py", "Chapter_DecisionTrees_EnsembleLearning/SoftSensing_WastewaterPlant_XGBoost.py", "Chapter_LatentVariable2/FaultClassification_FDA.py", "Chapter_Clustering_GMM/ProcessMonitoring_GMM.py" ]
[ "##%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\r\n## SVR model with polymer plant data\r\n## %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\r\n\r\n#%% import required packages\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\n\r\n#%% read data\r\ndata = np.loadtxt('polymer.dat')\r\nX = data[:,0:10]\r\nY = data[:,10:]\r\ny = Y[:,2]\r\n\r\n#%% fit SVR model\r\nfrom sklearn.svm import SVR\r\nfrom sklearn.model_selection import GridSearchCV\r\n\r\nmodel = SVR(epsilon=0.01) # default epsilon = 0.1\r\nparam_grid = [{'gamma': np.linspace(0.1e-05,5,100), 'C': np.linspace(0.01,5000,100)}]\r\ngs = GridSearchCV(model, param_grid, scoring='neg_mean_squared_error', cv=10, verbose=2)\r\n\r\ngs.fit(X, y)\r\nprint('Optimal hyperparameter:', gs.best_params_)\r\n\r\n#%% predict using the best model\r\ny_predicted_SVR = gs.predict(X) \r\n\r\n#%% plots of raw and predicted data\r\nplt.figure()\r\nplt.plot(y, y_predicted_SVR, '.', markeredgecolor='k', markeredgewidth=0.5, ms=9)\r\nplt.plot(y, y, '-r', linewidth=0.5)\r\nplt.xlabel('measured data'), plt.ylabel('predicted data ')\r\n\r\n#%% metrics\r\nfrom sklearn.metrics import r2_score\r\nprint('R2:', r2_score(y, y_predicted_SVR))", "##%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\r\n## Dimensionality reduction via FDA for TE data\r\n## %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\r\n\r\n#%% import required packages\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\n\r\n#%% fetch TEP data for faults 5,10,19 \r\nTEdata_Fault5_train = np.loadtxt('d05.dat')\r\nTEdata_Fault10_train = np.loadtxt('d10.dat')\r\nTEdata_Fault19_train = np.loadtxt('d19.dat')\r\nTEdata_Faulty_train = np.vstack((TEdata_Fault5_train, TEdata_Fault10_train, TEdata_Fault19_train))\r\n\r\n# select variables as done in Lee et al.\r\nxmeas = TEdata_Faulty_train[:,0:22]\r\nxmv = TEdata_Faulty_train[:,41:52]\r\ndata_Faulty_train = np.hstack((xmeas, xmv))\r\n\r\n# generate sample labels\r\nn_rows_train = TEdata_Fault5_train.shape[0]\r\ny_train = np.concatenate((5*np.ones(n_rows_train,), 10*np.ones(n_rows_train,), 19*np.ones(n_rows_train,)))\r\n \r\n#%% scale data\r\nfrom sklearn.preprocessing import StandardScaler\r\nscaler = StandardScaler()\r\nFaultydata_train_scaled = scaler.fit_transform(data_Faulty_train)\r\n\r\n#%% visualize all scaled variables\r\nplt.figure()\r\nplt.plot(Faultydata_train_scaled)\r\nplt.show()\r\n \r\n#%% fit LDA model\r\nfrom sklearn.discriminant_analysis import LinearDiscriminantAnalysis\r\nlda = LinearDiscriminantAnalysis()\r\nscores_train_lda = lda.fit_transform(Faultydata_train_scaled, y_train)\r\n\r\n#%% visualize LDA scores\r\nplt.figure()\r\nplt.plot(scores_train_lda[0:n_rows_train,0], scores_train_lda[0:n_rows_train,1], 'b.', label='Fault 5')\r\nplt.plot(scores_train_lda[n_rows_train:2*n_rows_train,0], scores_train_lda[n_rows_train:2*n_rows_train,1], 'r.', label='Fault 10')\r\nplt.plot(scores_train_lda[2*n_rows_train:3*n_rows_train,0], scores_train_lda[2*n_rows_train:3*n_rows_train,1], 'm.', label='Fault 19')\r\nplt.legend()\r\nplt.xlabel('FD1 (training data)')\r\nplt.ylabel('FD2 (training data)')\r\n\r\n#%% fit PCA model\r\nfrom sklearn.decomposition import PCA\r\npca = PCA(n_components=2)\r\nscores_train_pca = pca.fit_transform(Faultydata_train_scaled)\r\n\r\n#%% visualize PCA scores\r\nplt.figure()\r\nplt.plot(scores_train_pca[0:n_rows_train,0], scores_train_pca[0:n_rows_train,1], 'b.', label='Fault 5')\r\nplt.plot(scores_train_pca[n_rows_train:2*n_rows_train,0], scores_train_pca[n_rows_train:2*n_rows_train,1], 'r.', label='Fault 10')\r\nplt.plot(scores_train_pca[2*n_rows_train:3*n_rows_train,0], scores_train_pca[2*n_rows_train:3*n_rows_train,1], 'm.', label='Fault 19')\r\nplt.legend()\r\nplt.xlabel('PC1 (training data)')\r\nplt.ylabel('PC2 (training data)')\r\n\r\n##%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\r\n## Visualize test data \r\n## %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\r\n#%% fetch TE data\r\nTEdata_Fault5_test = np.loadtxt('d05_te.dat')\r\nTEdata_Fault5_test = TEdata_Fault5_test[160:,:]\r\nTEdata_Fault10_test = np.loadtxt('d10_te.dat')\r\nTEdata_Fault10_test = TEdata_Fault10_test[160:,:]\r\nTEdata_Fault19_test = np.loadtxt('d19_te.dat')\r\nTEdata_Fault19_test = TEdata_Fault19_test[160:,:]\r\nTEdata_Faulty_test = np.vstack((TEdata_Fault5_test, TEdata_Fault10_test, TEdata_Fault19_test))\r\n\r\n# select variables as done in Lee et al.\r\nxmeas = TEdata_Faulty_test[:,0:22]\r\nxmv = TEdata_Faulty_test[:,41:52]\r\ndata_Faulty_test = np.hstack((xmeas, xmv))\r\n\r\n# generate sample labels\r\nn_rows_test = TEdata_Fault5_test.shape[0]\r\ny_test = np.concatenate((5*np.ones(n_rows_test,), 10*np.ones(n_rows_test,), 19*np.ones(n_rows_test,)))\r\n\r\n#%% scale data, transform via LDA & PCA models\r\nFaultydata_test_scaled = scaler.transform(data_Faulty_test)\r\nscores_test_lda = lda.transform(Faultydata_test_scaled)\r\nscores_test_pca = pca.transform(Faultydata_test_scaled)\r\n\r\n#%% visualize LDA & PCA scores\r\nplt.figure()\r\nplt.plot(scores_test_lda[0:n_rows_test,0], scores_test_lda[0:n_rows_test,1], 'b.', label='Fault 5')\r\nplt.plot(scores_test_lda[n_rows_test:2*n_rows_test,0], scores_test_lda[n_rows_test:2*n_rows_test,1], 'r.', label='Fault 10')\r\nplt.plot(scores_test_lda[2*n_rows_test:3*n_rows_test,0], scores_test_lda[2*n_rows_test:3*n_rows_test,1], 'm.', label='Fault 19')\r\nplt.legend()\r\nplt.xlabel('FD1 (test data)')\r\nplt.ylabel('FD2 (test data)')\r\n\r\nplt.figure()\r\nplt.plot(scores_test_pca[0:n_rows_test,0], scores_test_pca[0:n_rows_test,1], 'b.', label='Fault 5')\r\nplt.plot(scores_test_pca[n_rows_test:2*n_rows_test,0], scores_test_pca[n_rows_test:2*n_rows_test,1], 'r.', label='Fault 10')\r\nplt.plot(scores_test_pca[2*n_rows_test:3*n_rows_test,0], scores_test_pca[2*n_rows_test:3*n_rows_test,1], 'm.', label='Fault 19')\r\nplt.legend()\r\nplt.xlabel('PC1 (test data)')\r\nplt.ylabel('PC2 (test data)')\r\n\r\n\r\n", "##%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\r\n## Soft sensing via XGBoost on UCI Wastewater Treatment Plant data\r\n## %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\r\n\r\n#%% read data\r\nimport pandas as pd\r\n\r\ndata_raw = pd.read_csv('water-treatment.data', header=None,na_values=\"?\" ) # dataset uses '?' to denote missing value\r\nX_raw = data_raw.iloc[:,1:23]\r\ny_raw = data_raw.iloc[:,29]\r\n\r\n#%% handle missing data\r\n# generate a dataframe from inputs dataframe and output series\r\ndata = pd.concat([X_raw, y_raw], axis=1)\r\n\r\n# check for presence of missing values\r\nprint(data.info())\r\n\r\n# remove rows with missing data\r\ndata.dropna(axis=0, how='any', inplace=True)\r\n\r\nprint('Number of samples remaining:', data.shape[0])\r\n\r\n#%% separate inputs and output\r\nX = data.iloc[:,:-1]\r\ny = data.iloc[:,-1]\r\n\r\n#%% plot 1st input and output to show variability\r\nfrom matplotlib import pyplot as plt\r\nplt.figure()\r\nplt.plot(X.iloc[:,0].values, color='brown', linestyle = ':', marker='.', linewidth=0.5, markeredgecolor = 'k')\r\nplt.xlabel('Sample #')\r\nplt.ylabel('Input flow to plant')\r\n\r\nplt.figure()\r\nplt.plot(X.iloc[:,8].values, color='brown', linestyle = ':', marker='.', linewidth=0.5, markeredgecolor = 'k')\r\nplt.xlabel('Sample #')\r\nplt.ylabel('Input conductivity to plant')\r\n\r\nplt.figure()\r\nplt.plot(y.values, color='navy', linestyle = ':', marker='.', linewidth=0.5, markeredgecolor = 'k')\r\nplt.xlabel('Sample #')\r\nplt.ylabel('Output Conductivity')\r\n\r\n#%% separate fitting, validation, and test data\r\nfrom sklearn.model_selection import train_test_split\r\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.2, random_state = 100)\r\nX_fit, X_val, y_fit, y_val = train_test_split(X_train, y_train, test_size = 0.3, random_state = 100)\r\n\r\n#%% fit XGBoost model\r\nimport xgboost \r\nmodel = xgboost.XGBRegressor(max_depth=3, learning_rate=0.1, random_state=100)\r\nmodel.fit(X_fit, y_fit, eval_set=[(X_val, y_val)], early_stopping_rounds=2)\r\n\r\n#%% predict and plot\r\ny_train_predicted = model.predict(X_train)\r\ny_test_predicted = model.predict(X_test)\r\n\r\nplt.figure()\r\nplt.plot(y_train, y_train_predicted, '.', markeredgecolor='k', markeredgewidth=0.5, ms=9)\r\nplt.plot(y_train, y_train, '-r', linewidth=0.5)\r\nplt.xlabel('raw training data')\r\nplt.ylabel('prediction')\r\n\r\nplt.figure()\r\nplt.plot(y_test, y_test_predicted, '.', markeredgecolor='k', markeredgewidth=0.5, ms=9)\r\nplt.plot(y_test, y_test, '-r', linewidth=0.5)\r\nplt.xlabel('raw test data')\r\nplt.ylabel('prediction')\r\n\r\n#%% check training vs test accuracy\r\nfrom sklearn.metrics import r2_score\r\nprint('Accuracy over training data: ', r2_score(y_train, y_train_predicted))\r\nprint('Accuracy over test data: ', r2_score(y_test, y_test_predicted))\r\n\r\n\r\n\r\n", "##%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\r\n## Fault classification via FDA\r\n## %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\r\n\r\n#%% import required packages\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\n\r\n#%% fetch TEP data for faults 5,10,19 \r\nTEdata_Fault5_train = np.loadtxt('d05.dat')\r\nTEdata_Fault10_train = np.loadtxt('d10.dat')\r\nTEdata_Fault19_train = np.loadtxt('d19.dat')\r\nTEdata_Faulty_train = np.vstack((TEdata_Fault5_train, TEdata_Fault10_train, TEdata_Fault19_train))\r\n\r\n# select variables as done in Lee et al.\r\nxmeas = TEdata_Faulty_train[:,0:22]\r\nxmv = TEdata_Faulty_train[:,41:52]\r\ndata_Faulty_train = np.hstack((xmeas, xmv))\r\n\r\n# generate sample labels\r\nn_rows_train = TEdata_Fault5_train.shape[0]\r\ny_train = np.concatenate((5*np.ones(n_rows_train,), 10*np.ones(n_rows_train,), 19*np.ones(n_rows_train,)))\r\n \r\n#%% scale data\r\nfrom sklearn.preprocessing import StandardScaler\r\nscaler = StandardScaler()\r\nFaultydata_train_scaled = scaler.fit_transform(data_Faulty_train)\r\n\r\n#%% visualize all scaled variables\r\nplt.figure()\r\nplt.plot(Faultydata_train_scaled)\r\nplt.show()\r\n \r\n#%% fit LDA model\r\nfrom sklearn.discriminant_analysis import LinearDiscriminantAnalysis\r\nlda = LinearDiscriminantAnalysis()\r\nscores_train_lda = lda.fit_transform(Faultydata_train_scaled, y_train)\r\n\r\n#%% visualize LDA scores\r\nplt.figure()\r\nplt.plot(scores_train_lda[0:n_rows_train,0], scores_train_lda[0:n_rows_train,1], 'b.', label='Fault 5')\r\nplt.plot(scores_train_lda[n_rows_train:2*n_rows_train,0], scores_train_lda[n_rows_train:2*n_rows_train,1], 'r.', label='Fault 10')\r\nplt.plot(scores_train_lda[2*n_rows_train:3*n_rows_train,0], scores_train_lda[2*n_rows_train:3*n_rows_train,1], 'm.', label='Fault 19')\r\nplt.legend()\r\nplt.xlabel('FD1 (training data)')\r\nplt.ylabel('FD2 (training data)')\r\n\r\n#%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\r\n## Control limit determination for fault5 class\r\n## %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\r\nimport scipy.stats\r\nNj = n_rows_train\r\nk = 2\r\n\r\nalpha = 0.01# 99% control limit\r\nT2_CL = k*(Nj**2-1)*scipy.stats.f.ppf(1-alpha,k,Nj-k)/(Nj*(Nj-k))\r\n\r\n#%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\r\n## Fault classification with fault 5 test data\r\n## %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\r\n# mean and covariance for Fault 5 class\r\nscores_train_lda_Fault5 = scores_train_lda[0:n_rows_train,:]\r\ncov_scores_train_Fault5 = np.cov(scores_train_lda_Fault5.T)\r\nmean_scores_train_Fault5 = np.mean(scores_train_lda_Fault5, axis = 0)\r\n\r\n#%% fetch TE test dta for fault 5\r\nTEdata_Fault5_test = np.loadtxt('d05_te.dat')\r\nTEdata_Fault5_test = TEdata_Fault5_test[160:,:]\r\nn_rows_test = TEdata_Fault5_test.shape[0]\r\n\r\n# select variables as done in Lee et al.\r\nxmeas = TEdata_Fault5_test[:,0:22]\r\nxmv = TEdata_Fault5_test[:,41:52]\r\ndata_Faulty_test = np.hstack((xmeas, xmv))\r\n\r\n#%% scale data and transform\r\nFaultydata_test_scaled = scaler.transform(data_Faulty_test)\r\nscores_test_lda = lda.transform(Faultydata_test_scaled)\r\n\r\n#%% compute T2 statistic for test data for Fault 5 class\r\nT2_test = np.zeros((n_rows_test,))\r\nfor sample in range(n_rows_test):\r\n score_sample = scores_test_lda[sample,:]\r\n score_sample_centered = score_sample - mean_scores_train_Fault5\r\n T2_test[sample] = np.dot(np.dot(score_sample_centered[np.newaxis,:],np.linalg.inv(cov_scores_train_Fault5)),score_sample_centered[np.newaxis,:].T)\r\n\r\n#%% plot test prediction\r\noutsideCL_flag = T2_test > T2_CL\r\ninsideCL_flag = T2_test <= T2_CL\r\nplt.figure()\r\nplt.plot(scores_test_lda[outsideCL_flag,0], scores_test_lda[outsideCL_flag,1], 'k.', label='outside Fault 5 boundary')\r\nplt.plot(scores_test_lda[insideCL_flag,0], scores_test_lda[insideCL_flag,1], 'b.', label='inside Fault 5 boundary')\r\nplt.xlabel('FD1 (test data)')\r\nplt.ylabel('FD2 (test data)')\r\nplt.legend()\r\n\r\nprint('Percentage of samples correctly diagnosed as Fault 5: ', 100*np.sum(T2_test < T2_CL)/n_rows_test)\r\n", "##%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\r\n## Process Monitoring of Etch data\r\n## %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\r\n\r\n#%% import required packages\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\nfrom sklearn.mixture import GaussianMixture\r\n\r\n#%% fetch data\r\nimport scipy.io\r\n\r\nmatlab_data = scipy.io.loadmat('MACHINE_Data.mat', struct_as_record = False)\r\nEtch_data = matlab_data['LAMDATA']\r\ncalibration_dataAll = Etch_data[0,0].calibration # calibration_dataAll[i,0] corresponds to a 2D data from ith batch where columns correspond to different variables \r\n\r\nvariable_names = Etch_data[0,0].variables\r\n\r\n##%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\r\n## perform Multiway PCA\r\n## %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\r\n\r\n#%% generate unfolded data matrix\r\nn_vars = variable_names.size - 2 # first 2 columns are not process variables\r\nn_samples = 85 # following the work of He et al.\r\n\r\nunfolded_dataMatrix = np.empty((1,n_vars*n_samples))\r\nfor expt in range(calibration_dataAll.size):\r\n calibration_expt = calibration_dataAll[expt,0][5:90,2:] # removing first 5 measurements as done in He et al.\r\n \r\n if calibration_expt.shape[0] < 85:\r\n continue\r\n \r\n unfolded_row = np.ravel(calibration_expt, order='F')[np.newaxis,:]\r\n unfolded_dataMatrix = np.vstack((unfolded_dataMatrix, unfolded_row))\r\n\r\nunfolded_dataMatrix = unfolded_dataMatrix[1:,:]\r\n\r\n#%% scale data\r\nfrom sklearn.preprocessing import StandardScaler\r\n\r\nscaler = StandardScaler()\r\ndata_train_normal = scaler.fit_transform(unfolded_dataMatrix)\r\n \r\n#%% fit PCA model to calibration data\r\nfrom sklearn.decomposition import PCA\r\n\r\npca = PCA(n_components = 3) # following the work of He et al.\r\nscore_train = pca.fit_transform(data_train_normal)\r\n\r\n#%% visualize in 2D\r\nplt.figure()\r\nplt.scatter(score_train[:,0],score_train[:,1])\r\nplt.xlabel('PC1 scores')\r\nplt.ylabel('PC2 scores')\r\n\r\n##%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\r\n## GMM on PCA scores\r\n## %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\r\n#%% finding # of components via BIC \r\nBICs = []\r\nlowestBIC = np.inf\r\nfor n_cluster in range(1, 10):\r\n gmm = GaussianMixture(n_components = n_cluster, random_state = 100)\r\n gmm.fit(score_train)\r\n BIC = gmm.bic(score_train)\r\n BICs.append(BIC)\r\n \r\n if BIC < lowestBIC:\r\n optimal_n_cluster = n_cluster \r\n lowestBIC = BIC\r\n\r\nplt.figure()\r\nplt.plot(range(1,10), BICs, marker='o')\r\nplt.xlabel('Number of components')\r\nplt.ylabel('BIC')\r\nplt.show()\r\n\r\n#%% fit GMM model to metal-etch data\r\ngmm = GaussianMixture(n_components = optimal_n_cluster, random_state = 100)\r\ncluster_label = gmm.fit_predict(score_train)\r\n\r\nplt.figure()\r\nplt.scatter(score_train[:, 0], score_train[:, 1], c = cluster_label, s=20, cmap='viridis')\r\nplt.xlabel('PC1 scores')\r\nplt.ylabel('PC2 scores')\r\n\r\ncluster_centers = gmm.means_\r\ncluster_plot_labels = ['Cluster ' + str(i+1) for i in range(optimal_n_cluster)]\r\nfor i in range(optimal_n_cluster):\r\n plt.scatter(cluster_centers[i, 0], cluster_centers[i, 1], c='red', s=20, marker = '*', alpha=0.5)\r\n plt.annotate(cluster_plot_labels[i], (cluster_centers[i,0], cluster_centers[i,1]))\r\n\r\n##%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\r\n## Fault detection metric for training data\r\n## %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\r\n\r\n#%% global mahalonobis distance metric\r\nDglobal_train = np.zeros((score_train.shape[0],))\r\n\r\nfor i in range(score_train.shape[0]):\r\n x = score_train[i,:,np.newaxis]\r\n probs = gmm.predict_proba(x.T)\r\n \r\n for component in range(3):\r\n Dlocal = np.dot(np.dot((x-gmm.means_[component,:,np.newaxis]).T,np.linalg.inv(gmm.covariances_[component,:])),(x-gmm.means_[component,:,np.newaxis]))\r\n Dglobal_train[i] = Dglobal_train[i] + probs[0,component]*Dlocal\r\n\r\n#%% Dglobal control limit\r\nN = score_train.shape[0]\r\nr = 3\r\n\r\nalpha = 0.05 # 95% control limit\r\nDglobal_CL = r*(N**2-1)*scipy.stats.f.ppf(1-alpha,r,N-r)/(N*(N-r))\r\n\r\n#%% Dglobal plot with CL\r\nplt.figure()\r\nplt.plot(Dglobal_train)\r\nplt.plot([1,len(Dglobal_train)],[Dglobal_CL, Dglobal_CL], color='red')\r\nplt.xlabel('Sample #')\r\nplt.ylabel('D_global for training data')\r\nplt.show()\r\n\r\n##%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\r\n## test data\r\n## %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\r\n\r\n#%% fetch test data and unfold\r\ntest_dataAll = Etch_data[0,0].test\r\n\r\nunfolded_TestdataMatrix = np.empty((1,n_vars*n_samples))\r\nfor expt in range(test_dataAll.size):\r\n test_expt = test_dataAll[expt,0][5:90,2:]\r\n \r\n if test_expt.shape[0] < 85:\r\n continue\r\n \r\n unfolded_row = np.ravel(test_expt, order='F')[np.newaxis,:]\r\n unfolded_TestdataMatrix = np.vstack((unfolded_TestdataMatrix, unfolded_row))\r\n\r\nunfolded_TestdataMatrix = unfolded_TestdataMatrix[1:,:]\r\n\r\n#%% PCA on fault data\r\ndata_test_normal = scaler.transform(unfolded_TestdataMatrix)\r\nscore_test = pca.transform(data_test_normal)\r\n\r\n#%% visualize in 2D (both test and calibration data)\r\nplt.figure()\r\nplt.scatter(score_train[:,0],score_train[:,1], c='blue', alpha=0.1)\r\nplt.scatter(score_test[:,0],score_test[:,1], c='red', marker = '*')\r\nplt.xlabel('PC1 scores')\r\nplt.ylabel('PC2 scores')\r\n\r\n#%% compute Dglobal_test\r\nDglobal_test = np.zeros((score_test.shape[0],))\r\n\r\nfor i in range(score_test.shape[0]):\r\n x = score_test[i,:,np.newaxis]\r\n probs = gmm.predict_proba(x.T)\r\n \r\n for component in range(3):\r\n Dlocal = np.dot(np.dot((x-gmm.means_[component,:,np.newaxis]).T,np.linalg.inv(gmm.covariances_[component,:])),(x-gmm.means_[component,:,np.newaxis]))\r\n Dglobal_test[i] = Dglobal_test[i] + probs[0,component]*Dlocal\r\n\r\n#%% Dglobal plot with CL\r\nplt.figure()\r\nplt.plot(Dglobal_test, marker = '*')\r\nplt.plot([1,len(Dglobal_test)],[Dglobal_CL,Dglobal_CL], color='red')\r\nplt.xlabel('Sample #')\r\nplt.ylabel('D_global for test data')\r\nplt.show()\r\n\r\nprint('Number of faults identified: ', np.sum(Dglobal_test > Dglobal_CL), ' out of ', len(Dglobal_test))\r\n" ]
[ [ "sklearn.model_selection.GridSearchCV", "sklearn.metrics.r2_score", "numpy.linspace", "sklearn.svm.SVR", "matplotlib.pyplot.plot", "matplotlib.pyplot.ylabel", "matplotlib.pyplot.xlabel", "numpy.loadtxt", "matplotlib.pyplot.figure" ], [ "numpy.hstack", "matplotlib.pyplot.legend", "numpy.ones", "matplotlib.pyplot.plot", "numpy.loadtxt", "matplotlib.pyplot.ylabel", "sklearn.discriminant_analysis.LinearDiscriminantAnalysis", "matplotlib.pyplot.xlabel", "sklearn.preprocessing.StandardScaler", "matplotlib.pyplot.show", "sklearn.decomposition.PCA", "numpy.vstack", "matplotlib.pyplot.figure" ], [ "pandas.concat", "pandas.read_csv", "sklearn.metrics.r2_score", "sklearn.model_selection.train_test_split", "matplotlib.pyplot.plot", "matplotlib.pyplot.ylabel", "matplotlib.pyplot.xlabel", "matplotlib.pyplot.figure" ], [ "numpy.hstack", "matplotlib.pyplot.legend", "numpy.linalg.inv", "numpy.ones", "matplotlib.pyplot.plot", "numpy.loadtxt", "matplotlib.pyplot.ylabel", "numpy.cov", "numpy.mean", "sklearn.discriminant_analysis.LinearDiscriminantAnalysis", "matplotlib.pyplot.xlabel", "sklearn.preprocessing.StandardScaler", "matplotlib.pyplot.show", "numpy.zeros", "numpy.sum", "numpy.vstack", "matplotlib.pyplot.figure" ], [ "sklearn.mixture.GaussianMixture", "numpy.sum", "matplotlib.pyplot.scatter", "numpy.linalg.inv", "numpy.vstack", "matplotlib.pyplot.annotate", "numpy.ravel", "matplotlib.pyplot.plot", "matplotlib.pyplot.ylabel", "matplotlib.pyplot.xlabel", "sklearn.preprocessing.StandardScaler", "matplotlib.pyplot.show", "numpy.zeros", "sklearn.decomposition.PCA", "numpy.empty", "matplotlib.pyplot.figure" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [ "2.0", "1.4", "1.3", "1.1", "1.5", "1.2" ], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
nutalk/Image-coregistration-translation-rotation
[ "3aee41120f80b9ed17f996dd38ff7a828887fdba" ]
[ "demo_image_coregestration.py" ]
[ "'''\ndemo_image_coregestration.py\n\nCoregister two 2D (single channel) same size images differing by translation and rotation\n'''\n\nimport numpy as np\nimport matplotlib.image as mpimg \nimport matplotlib.pyplot as plt \nfrom utils.coreg_utils import ImageTranslate, ImageRotate\nfrom utils.translation_coreg_mutual_info import MutualInfoDiffEvolutionTranslation\nfrom utils.rotation_coreg_mutual_info import MutualInfoDiffEvolutionRotationCartesian\nimport random\n\n# range for taking a random shift (pixels) and rotation angle (degrees)\nlimit_left = -10.00;\nlimit_right = 10.00;\n\nmu, sigma = 0, 4 # mean and standard deviation of added Gaussian noise\nNiter = 50; # number of iterations\n\n# load test image\nimage_ref = np.array(mpimg.imread('test_image.gif'));\nimage_ref = image_ref.astype(np.float64);\nNrows, Ncols = image_ref.shape;\n\n# generate random independent row and column pixel shift, and rotation angle\nshift_row_rand = round(random.uniform(limit_left, limit_right), 2);\nshift_col_rand = round(random.uniform(limit_left, limit_right), 2);\nrot_angle_rand = round(random.uniform(limit_left, limit_right), 2);\n\n# generated dummy image, shifted and rotated\nimage_shifted = ImageTranslate(image_ref.copy(), shift_row_rand, shift_col_rand);\nimage_rotated = ImageRotate(image_shifted.copy(), rot_angle_rand);\n\n# add independent Gaussian noise for reference image (image_ref) and rotated image (image_rotated)\nimage_ref += np.random.normal(mu, sigma, size = (Nrows, Ncols));\nimage_rotated += np.random.normal(mu, sigma, size = (Nrows, Ncols));\n\n# determine rotation angle \nbounds = [(limit_left*2, limit_right*2)]; # bounds for sub-pixel level coregestration \nrot_angle = MutualInfoDiffEvolutionRotationCartesian(image_ref.copy(), image_rotated.copy(), bounds, Niter);\n\n# apply rotation correction\nimage_coreg = ImageRotate(image_rotated.copy(), -rot_angle);\n\n# determine translation\nbounds = [(limit_left*2, limit_right*2), (limit_left*2, limit_right*2)]; # bounds for sub-pixel level coregestration \nshift_row, shift_col = MutualInfoDiffEvolutionTranslation(image_ref.copy(), image_coreg.copy(), bounds, Niter);\n\n# apply translation correction\nimage_coreg = ImageTranslate(image_coreg.copy(), -shift_row, -shift_col);\n\nprint('Expected rotation angle: ' + str(rot_angle_rand))\nprint('Determined rotation angle: ' + str(rot_angle))\nprint('Expected translation Row: ' + str(shift_row_rand) + ' Col: ' + str(shift_col_rand))\nprint('Determined translation Row: ' + str(shift_row) + ' Col: ' + str(shift_col))\n\nfig_width, fig_height = 10, 5\nplt.figure(figsize=(fig_width, fig_height))\n\n# plot results\nplt.subplot(1,3,1)\nplt.imshow(image_ref)\nplt.title('Ref Image')\nplt.subplot(1,3,2)\nplt.imshow(image_rotated)\nplt.title('Dummy Image')\nplt.subplot(1,3,3)\nplt.imshow(image_coreg)\nplt.title('Coreg Image')\n\n\n" ]
[ [ "matplotlib.pyplot.imshow", "matplotlib.pyplot.title", "matplotlib.image.imread", "numpy.random.normal", "matplotlib.pyplot.subplot", "matplotlib.pyplot.figure" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
Liang813/zhusuan
[ "4386b2a12ae4f4ed8e694e504e51d7dcdfd6f22a", "4386b2a12ae4f4ed8e694e504e51d7dcdfd6f22a" ]
[ "zhusuan/distributions/utils.py", "examples/topic_models/lntm_mcem.py" ]
[ "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nfrom __future__ import absolute_import\nfrom __future__ import division\n\nimport tensorflow as tf\nimport numpy as np\n\n\n__all__ = [\n 'log_combination',\n 'explicit_broadcast',\n 'maybe_explicit_broadcast',\n 'is_same_dynamic_shape',\n]\n\n\ndef log_combination(n, ks):\n \"\"\"\n Compute the log combination function.\n\n .. math::\n\n \\\\log \\\\binom{n}{k_1, k_2, \\\\dots} = \\\\log n! - \\\\sum_{i}\\\\log k_i!\n\n :param n: A N-D `float` Tensor. Can broadcast to match `tf.shape(ks)[:-1]`.\n :param ks: A (N + 1)-D `float` Tensor. Each slice `[i, j, ..., k, :]` is\n a vector of `[k_1, k_2, ...]`.\n\n :return: A N-D Tensor of type same as `n`.\n \"\"\"\n return tf.lgamma(n + 1) - tf.reduce_sum(tf.lgamma(ks + 1), axis=-1)\n\n\ndef explicit_broadcast(x, y, x_name, y_name):\n \"\"\"\n Explicit broadcast two Tensors to have the same shape.\n\n :return: x, y after broadcast.\n \"\"\"\n try:\n x *= tf.ones_like(y, dtype=x.dtype)\n y *= tf.ones_like(x, dtype=y.dtype)\n except ValueError:\n raise ValueError(\n \"{} and {} cannot broadcast to match. ({} vs. {})\".format(\n x_name, y_name, x.get_shape(), y.get_shape()))\n return x, y\n\n\ndef maybe_explicit_broadcast(x, y, x_name, y_name):\n \"\"\"\n Explicit broadcast two Tensors to have the same shape if necessary.\n\n :return: x, y after broadcast.\n \"\"\"\n if not (x.get_shape() and y.get_shape()):\n x, y = explicit_broadcast(x, y, x_name, y_name)\n else:\n if x.get_shape().ndims != y.get_shape().ndims:\n x, y = explicit_broadcast(x, y, x_name, y_name)\n elif x.get_shape().is_fully_defined() and \\\n y.get_shape().is_fully_defined():\n if x.get_shape() != y.get_shape():\n x, y = explicit_broadcast(x, y, x_name, y_name)\n else:\n # Below code seems to induce a BUG when this function is\n # called in HMC. Probably due to tensorflow's not supporting\n # control flow edge from an op inside the body to outside.\n # We should further fix this.\n #\n # x, y = tf.cond(\n # is_same_dynamic_shape(x, y),\n # lambda: (x, y),\n # lambda: explicit_broadcast(x, y, x_name, y_name))\n x, y = explicit_broadcast(x, y, x_name, y_name)\n return x, y\n\n\ndef is_same_dynamic_shape(x, y):\n \"\"\"\n Whether `x` and `y` has the same dynamic shape.\n\n :param x: A Tensor.\n :param y: A Tensor.\n\n :return: A scalar Tensor of `bool`.\n \"\"\"\n # There is a BUG of Tensorflow for not doing static shape inference\n # right in nested tf.cond()'s, so we are not comparing x and y's\n # shape directly but working with their concatenations.\n return tf.cond(\n tf.equal(tf.rank(x), tf.rank(y)),\n lambda: tf.reduce_all(tf.equal(\n tf.concat([tf.shape(x), tf.shape(y)], 0),\n tf.concat([tf.shape(y), tf.shape(x)], 0))),\n lambda: tf.convert_to_tensor(False, tf.bool))\n\n\ndef floating_dtypes():\n \"\"\"Return a list of supported floating dtypes.\"\"\"\n return [tf.float16, tf.float32, tf.float64]\n\n\ndef integer_dtypes():\n \"\"\"Return a list of supported integer dtypes.\"\"\"\n return [tf.int16, tf.int32, tf.int64]\n\n\ndef assert_same_dtype_in(tensors_with_name, dtypes=None):\n \"\"\"\n Whether all types of tensors in `tensors_with_name` are the same and in the\n allowed `dtypes`.\n\n :param tensors_with_name: A list of (tensor, tensor_name).\n :param dtypes: A list of allowed dtypes. If `None`, then all dtypes are\n allowed.\n\n :return: The dtype of `tensors`.\n \"\"\"\n dtypes_set = set(dtypes) if dtypes else None\n expected_dtype = None\n for tensor, tensor_name in tensors_with_name:\n if dtypes_set and (tensor.dtype not in dtypes_set):\n if len(dtypes) == 1:\n raise TypeError(\n '{}({}) must have dtype {}.'.format(\n tensor_name, tensor.dtype, dtypes[0]))\n else:\n raise TypeError(\n '{}({}) must have a dtype in {}.'.format(\n tensor_name, tensor.dtype, dtypes))\n if not expected_dtype:\n expected_dtype = tensor.dtype\n elif expected_dtype != tensor.dtype:\n tensor0, tensor0_name = tensors_with_name[0]\n raise TypeError(\n '{}({}) must have the same dtype as {}({}).'.format(\n tensor_name, tensor.dtype,\n tensor0_name, tensor0.dtype))\n\n return expected_dtype\n\n\ndef assert_same_float_dtype(tensors_with_name):\n \"\"\"\n Whether all tensors in `tensors_with_name` have the same floating type.\n\n :param tensors_with_name: A list of (tensor, tensor_name).\n :return: The type of `tensors`.\n \"\"\"\n return assert_same_dtype_in(tensors_with_name, floating_dtypes())\n\n\ndef assert_same_float_or_int_dtype(tensors_with_name):\n \"\"\"\n Whether all tensors in `tensors_with_name` have the same floating or\n integer type.\n\n :param tensors_with_name: A list of (tensor, tensor_name).\n :return: The type of `tensors`.\n \"\"\"\n available_dtypes = floating_dtypes() + integer_dtypes()\n return assert_same_dtype_in(tensors_with_name, available_dtypes)\n\n\ndef assert_dtype_in_dtypes(dtype, dtypes):\n \"\"\"Assert a dtype is in a list of dtypes.\"\"\"\n if not dtype in dtypes:\n raise TypeError(\"`dtype`({}) not in {}\".format(dtype, dtypes))\n\n\ndef assert_dtype_is_float(dtype):\n \"\"\"Assert a dtype is in [`tf.float16`, `tf.float32`, `tf.float64`]\"\"\"\n assert_dtype_in_dtypes(dtype, floating_dtypes())\n\n\ndef assert_dtype_is_int_or_float(dtype):\n \"\"\"\n Assert a dtype is int (`tf.int16`, `tf.int32`, `tf.int64`) or float (\n `tf.float16`, `tf.float32`, `tf.float64`).\n \"\"\"\n assert_dtype_in_dtypes(dtype, integer_dtypes() + floating_dtypes())\n\n\ndef get_shape_list(tensor):\n \"\"\"\n When the rank of `tensor` is known from the static shape, return a list\n where each item is either an `int` (known from the static shape) or a\n scalar `int32` Tensor (picked from the dynamic shape).\n\n When the rank of `tensor` is unknown, return `None`.\n\n :param tensor: A `tf.Tensor`.\n :return: A list or `None`.\n \"\"\"\n static_shape = tensor.get_shape()\n if not static_shape:\n return None\n dynamic_shape = tf.shape(tensor)\n ret = [(val or dynamic_shape[i])\n for i, val in enumerate(static_shape.as_list())]\n return ret\n\n\ndef get_shape_at(tensor, axis):\n \"\"\"\n Similar to `tf.shape(tensor)[axis]`, but return a constant when possible.\n\n :param tensor: A Tensor.\n :param axis: `int`.\n\n :return: The shape along the axis specified.\n \"\"\"\n sizes_of_axes = get_shape_list(tensor)\n if sizes_of_axes:\n return sizes_of_axes[axis]\n return tf.shape(tensor)[axis]\n\n\ndef assert_rank_at_least(tensor, k, name):\n \"\"\"\n Whether the rank of `tensor` is at least k.\n\n :param tensor: A Tensor to be checked.\n :param k: The least rank allowed.\n :param name: The name of `tensor` for error message.\n\n :return: The checked tensor.\n \"\"\"\n static_shape = tensor.get_shape()\n shape_err_msg = '{} should have rank >= {}.'.format(name, k)\n if static_shape and (static_shape.ndims < k):\n raise ValueError(shape_err_msg)\n if not static_shape:\n _assert_shape_op = tf.assert_rank_at_least(\n tensor, k, message=shape_err_msg)\n with tf.control_dependencies([_assert_shape_op]):\n tensor = tf.identity(tensor)\n return tensor\n\n\ndef assert_rank_at_least_one(tensor, name):\n \"\"\"\n Whether the rank of `tensor` is at least one.\n\n :param tensor: A Tensor to be checked.\n :param name: The name of `tensor` for error message.\n\n :return: The checked tensor.\n \"\"\"\n return assert_rank_at_least(tensor, 1, name)\n\n\ndef assert_scalar(tensor, name):\n \"\"\"\n Whether the `tensor` is a scalar (0-D tensor).\n\n :param tensor: A Tensor to be checked.\n :param name: The name of `tensor` for error message.\n\n :return: The checked tensor.\n \"\"\"\n static_shape = tensor.get_shape()\n shape_err_msg = name + \" should be a scalar (0-D tensor).\"\n if static_shape and (static_shape.ndims >= 1):\n raise ValueError(shape_err_msg)\n else:\n _assert_shape_op = tf.assert_rank(tensor, 0, message=shape_err_msg)\n with tf.control_dependencies([_assert_shape_op]):\n tensor = tf.identity(tensor)\n return tensor\n\n\ndef assert_positive_int32_scalar(value, name):\n \"\"\"\n Whether `value` is a integer(or 0-D `tf.int32` tensor) and positive.\n If `value` is the instance of built-in type, it will be checked directly.\n Otherwise, it will be converted to a `tf.int32` tensor and checked.\n\n :param value: The value to be checked.\n :param name: The name of `value` used in error message.\n\n :return: The checked value.\n \"\"\"\n if isinstance(value, (int, float)):\n if isinstance(value, int) and value > 0:\n return value\n elif isinstance(value, float):\n raise TypeError(name + \" must be integer\")\n elif value <= 0:\n raise ValueError(name + \" must be positive\")\n else:\n try:\n tensor = tf.convert_to_tensor(value, tf.int32)\n except (TypeError, ValueError):\n raise TypeError(name + ' must be (convertible to) tf.int32')\n _assert_rank_op = tf.assert_rank(\n tensor, 0,\n message=name + \" should be a scalar (0-D Tensor).\")\n _assert_positive_op = tf.assert_greater(\n tensor, tf.constant(0, tf.int32),\n message=name + \" must be positive\")\n with tf.control_dependencies([_assert_rank_op,\n _assert_positive_op]):\n tensor = tf.identity(tensor)\n return tensor\n\n\ndef open_interval_standard_uniform(shape, dtype):\n \"\"\"\n Return samples from uniform distribution in unit open interval (0, 1).\n\n :param shape: The shape of generated samples.\n :param dtype: The dtype of generated samples.\n\n :return: A Tensor of samples.\n \"\"\"\n return tf.random_uniform(\n shape=shape,\n minval=np.finfo(dtype.as_numpy_dtype).tiny,\n maxval=1.,\n dtype=dtype)\n\n\ndef ensure_logstd_std_order_change(name, sentinel):\n \"\"\"Make sure the order of logstd/std has changed to std/logstd.\"\"\"\n if sentinel is not None:\n raise ValueError(\n \"The order of logstd/std has changed to std/logstd since 0.3.1. \"\n \"Please use named arguments: {}(mean, std=..., ...) or \"\n \"{}(mean, logstd=..., ...).\".format(name, name))\n", "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\"\"\"\nLogistic-normal topic models using Monte-Carlo EM\nDense implementation, O(n_docs*n_topics*n_vocab)\n\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import print_function\nfrom __future__ import division\nimport sys\nimport os\nimport time\n\nimport tensorflow as tf\nfrom six.moves import range, zip\nfrom copy import copy\nimport numpy as np\nimport zhusuan as zs\nfrom zhusuan.evaluation import AIS\n\nfrom examples import conf\nfrom examples.utils import dataset\n\n\n# Delta in LNTM is corresponding to eta in LDA(Blei et al., 2003),\n# which governs the prior of parameter in topic->word categorical distribution.\n# Larger log_delta leads to sparser topics.\nlog_delta = 10.0\n\n\[email protected]_bayesian_net(scope='lntm')\ndef lntm(n_chains, n_docs, n_topics, n_vocab, eta_mean, eta_logstd):\n bn = zs.BayesianNet()\n eta_mean = tf.tile(tf.expand_dims(eta_mean, 0), [n_docs, 1])\n eta = bn.normal('eta', eta_mean, logstd=eta_logstd, n_samples=n_chains,\n group_ndims=1)\n theta = tf.nn.softmax(eta)\n beta = bn.normal('beta', tf.zeros([n_topics, n_vocab]),\n logstd=log_delta, group_ndims=1)\n phi = tf.nn.softmax(beta)\n # doc_word: Document-word matrix\n doc_word = tf.matmul(tf.reshape(theta, [-1, n_topics]), phi)\n doc_word = tf.reshape(doc_word, [n_chains, n_docs, n_vocab])\n bn.unnormalized_multinomial('x', tf.log(doc_word), normalize_logits=False,\n dtype=tf.float32)\n return bn\n\n\nif __name__ == \"__main__\":\n tf.set_random_seed(1237)\n\n # Load nips dataset\n data_name = 'nips'\n data_path = os.path.join(conf.data_dir, data_name + '.pkl.gz')\n X, vocab = dataset.load_uci_bow(data_name, data_path)\n training_size = 1200\n X_train = X[:training_size, :]\n X_test = X[training_size:, :]\n\n # Define model training parameters\n batch_size = 100\n n_topics = 100\n n_vocab = X_train.shape[1]\n n_chains = 1\n\n num_e_steps = 5\n hmc = zs.HMC(step_size=1e-3, n_leapfrogs=20, adapt_step_size=True,\n target_acceptance_rate=0.6)\n epochs = 100\n learning_rate_0 = 1.0\n t0 = 10\n\n # Padding\n rem = batch_size - X_train.shape[0] % batch_size\n if rem < batch_size:\n X_train = np.vstack((X_train, np.zeros((rem, n_vocab))))\n\n iters = X_train.shape[0] // batch_size\n Eta = np.zeros((n_chains, X_train.shape[0], n_topics), dtype=np.float32)\n Eta_mean = np.zeros(n_topics, dtype=np.float32)\n Eta_logstd = np.zeros(n_topics, dtype=np.float32)\n\n # Build the computation graph\n x = tf.placeholder(tf.float32, shape=[batch_size, n_vocab], name='x')\n eta_mean = tf.placeholder(tf.float32, shape=[n_topics], name='eta_mean')\n eta_logstd = tf.placeholder(tf.float32, shape=[n_topics],\n name='eta_logstd')\n eta = tf.Variable(tf.zeros([n_chains, batch_size, n_topics]), name='eta')\n eta_ph = tf.placeholder(tf.float32, shape=[n_chains, batch_size, n_topics],\n name='eta_ph')\n beta = tf.Variable(tf.zeros([n_topics, n_vocab]), name='beta')\n phi = tf.nn.softmax(beta)\n init_eta_ph = tf.assign(eta, eta_ph)\n\n def e_obj(bn):\n return bn.cond_log_prob('eta') + bn.cond_log_prob('x')\n\n # E step: sample eta using HMC\n model = lntm(n_chains, batch_size, n_topics, n_vocab, eta_mean, eta_logstd)\n model.log_joint = e_obj\n sample_op, hmc_info = hmc.sample(model,\n observed={'x': x, 'beta': beta},\n latent={'eta': eta})\n # M step: optimize beta\n bn = model.observe(eta=eta, x=x, beta=beta)\n log_p_beta, log_px = bn.cond_log_prob(['beta', 'x'])\n log_p_beta = tf.reduce_sum(log_p_beta)\n log_px = tf.reduce_sum(tf.reduce_mean(log_px, axis=0))\n log_joint_beta = log_p_beta + log_px\n learning_rate_ph = tf.placeholder(tf.float32, shape=[], name='lr')\n optimizer = tf.train.AdamOptimizer(learning_rate_ph)\n infer = optimizer.minimize(-log_joint_beta, var_list=[beta])\n\n # Below is the evaluation part.\n # Variables whose name starts with '_' is only used in the evaluation part,\n # to be distinguished from those variables used in the training part above.\n\n n_docs_test = X_test.shape[0]\n _n_chains = 25\n _n_temperatures = 1000\n\n _x = tf.placeholder(tf.float32, shape=[n_docs_test, n_vocab], name='x')\n _eta = tf.Variable(tf.zeros([_n_chains, n_docs_test, n_topics]),\n name='eta')\n\n _model = lntm(_n_chains, n_docs_test, n_topics, n_vocab,\n eta_mean, eta_logstd)\n _model.log_joint = e_obj\n proposal_model = copy(_model)\n\n def log_prior(bn):\n return bn.cond_log_prob('eta')\n\n proposal_model.log_joint = log_prior\n _hmc = zs.HMC(step_size=0.01, n_leapfrogs=20, adapt_step_size=True,\n target_acceptance_rate=0.6)\n ais = AIS(_model, proposal_model, _hmc,\n observed={'x': _x, 'beta': beta},\n latent={'eta': _eta},\n n_temperatures=_n_temperatures)\n\n # Run the inference\n with tf.Session() as sess:\n sess.run(tf.global_variables_initializer())\n\n for epoch in range(1, epochs + 1):\n time_epoch = -time.time()\n learning_rate = learning_rate_0 * (t0 / (t0 + epoch))**2\n perm = list(range(X_train.shape[0]))\n np.random.shuffle(perm)\n X_train = X_train[perm, :]\n Eta = Eta[:, perm, :]\n lls = []\n accs = []\n for t in range(iters):\n x_batch = X_train[t*batch_size: (t+1)*batch_size]\n old_eta = Eta[:, t*batch_size: (t+1)*batch_size, :]\n\n # E step\n sess.run(init_eta_ph, feed_dict={eta_ph: old_eta})\n for j in range(num_e_steps):\n _, new_eta, acc = sess.run(\n [sample_op, hmc_info.samples['eta'],\n hmc_info.acceptance_rate],\n feed_dict={x: x_batch,\n eta_mean: Eta_mean,\n eta_logstd: Eta_logstd})\n accs.append(acc)\n # Store eta for the persistent chain\n if j + 1 == num_e_steps:\n Eta[:, t*batch_size: (t+1)*batch_size, :] = new_eta\n\n # M step\n _, ll = sess.run(\n [infer, log_px],\n feed_dict={x: x_batch,\n eta_mean: Eta_mean,\n eta_logstd: Eta_logstd,\n learning_rate_ph: learning_rate})\n lls.append(ll)\n\n # Update hyper-parameters\n Eta_mean = np.mean(Eta, axis=(0, 1))\n Eta_logstd = np.log(np.std(Eta, axis=(0, 1)) + 1e-6)\n\n time_epoch += time.time()\n print('Epoch {} ({:.1f}s): Perplexity = {:.2f}, acc = {:.3f}, '\n 'eta mean = {:.2f}, logstd = {:.2f}'\n .format(epoch, time_epoch,\n np.exp(-np.sum(lls) / np.sum(X_train)),\n np.mean(accs), np.mean(Eta_mean),\n np.mean(Eta_logstd)))\n\n # Output topics\n p = sess.run(phi)\n for k in range(n_topics):\n rank = list(zip(list(p[k, :]), range(n_vocab)))\n rank.sort()\n rank.reverse()\n sys.stdout.write('Topic {}, eta mean = {:.2f} stdev = {:.2f}: '\n .format(k, Eta_mean[k], np.exp(Eta_logstd[k])))\n for i in range(10):\n sys.stdout.write(vocab[rank[i][1]] + ' ')\n sys.stdout.write('\\n')\n\n # Run AIS\n print(\"Evaluating test perplexity using AIS...\")\n time_ais = -time.time()\n ll_lb = ais.run(sess, feed_dict={_x: X_test,\n eta_mean: Eta_mean,\n eta_logstd: Eta_logstd})\n time_ais += time.time()\n print('>> Test (AIS) ({:.1f}s)\\n'\n '>> log likelihood lower bound = {}\\n'\n '>> perplexity upper bound = {}'\n .format(time_ais, ll_lb,\n np.exp(-ll_lb * n_docs_test / np.sum(X_test))))\n" ]
[ [ "tensorflow.convert_to_tensor", "tensorflow.constant", "tensorflow.assert_rank_at_least", "tensorflow.control_dependencies", "tensorflow.shape", "tensorflow.assert_rank", "tensorflow.ones_like", "tensorflow.identity", "numpy.finfo", "tensorflow.lgamma", "tensorflow.rank" ], [ "tensorflow.zeros", "tensorflow.reduce_sum", "numpy.mean", "tensorflow.train.AdamOptimizer", "numpy.exp", "numpy.std", "tensorflow.Session", "numpy.zeros", "tensorflow.placeholder", "tensorflow.global_variables_initializer", "tensorflow.set_random_seed", "numpy.sum", "tensorflow.nn.softmax", "tensorflow.reduce_mean", "tensorflow.reshape", "tensorflow.assign", "tensorflow.expand_dims", "numpy.random.shuffle", "tensorflow.log" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10", "1.12", "1.4", "1.5", "1.7", "0.12", "1.0", "1.2" ] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10" ] } ]
dvoram/open_spiel
[ "aaff14f482acdcbe834e962abaebceca934e8095", "aaff14f482acdcbe834e962abaebceca934e8095" ]
[ "open_spiel/python/algorithms/cfr.py", "open_spiel/python/rl_environment.py" ]
[ "# Copyright 2019 DeepMind Technologies Ltd. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Python implementation of the counterfactual regret minimization algorithm.\n\nOne iteration of CFR consists of:\n1) Compute current strategy from regrets (e.g. using Regret Matching).\n2) Compute values using the current strategy\n3) Compute regrets from these values\n\nThe average policy is what converges to a Nash Equilibrium.\n\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport collections\nimport numpy as np\n\nfrom open_spiel.python import policy\n\n_INITIAL_POSITIVE_VALUE = 1e-15\n\n\ndef _initialize_uniform_policy(state, cumulative_regret, cumulative_policy):\n \"\"\"Initializes `cumulative_regret` and `cumulative_policy`.\n\n Set `cumulative_regret` and `cumulative_policy` to _INITIAL_POSITIVE_VALUE\n for all (infostate, action).\n\n Args:\n state: The current state in the tree walk. This should be the root node when\n we call this function from a CFR solver.\n cumulative_regret: The dictionary mapping infostates, to\n {action: cumulative regrets}.\n cumulative_policy: Save as above for the cumulative policy.\n \"\"\"\n if state.is_terminal():\n return\n\n if state.is_chance_node():\n for action, unused_action_prob in state.chance_outcomes():\n _initialize_uniform_policy(\n state.child(action), cumulative_regret, cumulative_policy)\n return\n\n current_player = state.current_player()\n info_state = state.information_state(current_player)\n legal_actions = state.legal_actions(current_player)\n\n for action in legal_actions:\n cumulative_policy[info_state][action] = _INITIAL_POSITIVE_VALUE\n cumulative_regret[info_state][action] = _INITIAL_POSITIVE_VALUE\n _initialize_uniform_policy(\n state.child(action), cumulative_regret, cumulative_policy)\n\n\nclass _CFRSolver(object):\n \"\"\"Implements the Counterfactual Regret Minimization (CFR) algorithm.\n\n The algorithm computes an approximate Nash policy for 2 player zero-sum games.\n\n CFR can be view as a policy iteration algorithm. Importantly, the policies\n themselves do not converge to a Nash policy, but their average does.\n\n The main iteration loop is implemented in `evaluate_and_update_policy`:\n\n ```python\n game = pyspiel.load_game(\"game_name\")\n initial_state = game.new_initial_state()\n\n cfr_solver = CFRSolver(game)\n\n for i in range(num_iterations):\n cfr.evaluate_and_update_policy()\n ```\n\n Once the policy has converged, the average policy (which converges to the Nash\n policy) can be computed:\n ```python\n average_policy = cfr_solver.ComputeAveragePolicy()\n ```\n \"\"\"\n\n def __init__(self, game, initialize_cumulative_values, alternating_updates,\n linear_averaging, regret_matching_plus):\n # pyformat: disable\n \"\"\"Initializer.\n\n Args:\n game: The `pyspiel.Game` to run on.\n initialize_cumulative_values: Whether to initialize the average policy to\n the uniform policy (and the initial cumulative regret to an epsilon\n value). This is independent of the first CFR iteration, which, when the\n policy is fixed during traversal and we perform non alternating updates,\n will also compute the uniform policy and add it to the average of\n policies.\n alternating_updates: If `True`, alternating updates are performed: for\n each player, we compute and update the cumulative regrets and policies.\n In that case, and when the policy is frozen during tree traversal, the\n cache is reset after each update for one player.\n Otherwise, the update is simultaneous.\n linear_averaging: Whether to use linear averaging, i.e.\n cumulative_policy[info_state][action] += (\n iteration_number * reach_prob * action_prob)\n\n or not:\n\n cumulative_policy[info_state][action] += reach_prob * action_prob\n regret_matching_plus: Whether to use Regret Matching+:\n cumulative_regrets = max(cumulative_regrets + regrets, 0)\n or simply regret matching:\n cumulative_regrets = cumulative_regrets + regrets\n \"\"\"\n # pyformat: enable\n self._game = game\n self._num_players = game.num_players()\n self._root_node = self._game.new_initial_state()\n\n # Map from information states string representations and actions to the\n # counterfactual regrets, accumulated over the policy iterations\n self._cumulative_regret = collections.defaultdict(\n lambda: collections.defaultdict(float))\n # Same as above for the cumulative of the policy probabilities computed\n # during the policy iterations\n self._cumulative_policy = collections.defaultdict(\n lambda: collections.defaultdict(float))\n if initialize_cumulative_values:\n _initialize_uniform_policy(self._root_node, self._cumulative_regret,\n self._cumulative_policy)\n\n self._policy = {}\n\n self._root_node = self._game.new_initial_state()\n\n # This is for returning the current average policy to a caller\n self._average_policy = policy.TabularPolicy(game)\n\n self._linear_averaging = linear_averaging\n self._iteration = 0 # For possible linear-averaging.\n\n self._alternating_updates = alternating_updates\n self._regret_matching_plus = regret_matching_plus\n\n def _apply_regret_matching_plus_reset(self):\n \"\"\"Resets negative cumulative regrets to 0.\n\n Regret Matching+ corresponds to the following cumulative regrets update:\n cumulative_regrets = max(cumulative_regrets + regrets, 0)\n\n This must be done at the level of the information set, and thus cannot be\n done during the tree traversal (which is done on histories). It is thus\n performed as an additional step.\n \"\"\"\n for action_to_cum_regret in self._cumulative_regret.values():\n for action, cumulative_regret in action_to_cum_regret.items():\n if cumulative_regret < 0:\n action_to_cum_regret[action] = 0\n\n def evaluate_and_update_policy(self):\n \"\"\"Performs a single step of policy evaluation and policy improvement.\"\"\"\n self._iteration += 1\n if self._alternating_updates:\n for player in range(self._game.num_players()):\n self._compute_counterfactual_regret_for_player(\n self._root_node, np.ones(self._game.num_players() + 1), player)\n if self._regret_matching_plus:\n self._apply_regret_matching_plus_reset()\n self._policy.clear()\n else:\n self._compute_counterfactual_regret_for_player(\n self._root_node, np.ones(self._game.num_players() + 1), player=None)\n if self._regret_matching_plus:\n self._apply_regret_matching_plus_reset()\n\n self._policy.clear()\n\n def average_policy(self):\n \"\"\"Returns the average of all policies iterated.\n\n This average policy converges to a Nash policy as the number of iterations\n increases.\n\n The policy is computed using the accumulated policy probabilities computed\n using `evaluate_and_update_policy`.\n\n Returns:\n A `policy.TabularPolicy` object, giving the policy for both players.\n \"\"\"\n for info_state, info_state_policies_sum in self._cumulative_policy.items():\n state_policy = self._average_policy.policy_for_key(info_state)\n probabilities_sum = sum(info_state_policies_sum.values())\n for action, action_prob_sum in info_state_policies_sum.items():\n state_policy[action] = action_prob_sum / probabilities_sum\n return self._average_policy\n\n def _compute_counterfactual_regret_for_player(self, state,\n reach_probabilities, player):\n \"\"\"Increments the cumulative regrets and policy for `player`.\n\n Args:\n state: The initial game state to analyze from.\n reach_probabilities: The probability for each player of reaching `state`\n as a numpy array [prob for player 0, for player 1,..., for chance].\n `player_reach_probabilities[player]` will work in all cases.\n player: The 0-indexed player to update the values for. If `None`, the\n update for all players will be performed.\n\n Returns:\n The utility of `state` for all players, assuming all players follow the\n current policy defined by `self.Policy`.\n \"\"\"\n if state.is_terminal():\n return np.asarray(state.returns())\n\n if state.is_chance_node():\n state_value = 0.0\n for action, action_prob in state.chance_outcomes():\n assert action_prob > 0\n new_state = state.child(action)\n new_reach_probabilities = reach_probabilities.copy()\n new_reach_probabilities[-1] *= action_prob\n state_value += action_prob * self._compute_counterfactual_regret_for_player(\n new_state, new_reach_probabilities, player)\n return state_value\n\n current_player = state.current_player()\n info_state = state.information_state(current_player)\n legal_actions = state.legal_actions(current_player)\n\n # No need to continue on this history branch as no update will be performed\n # for any player.\n # The value we return here is not used in practice. If the conditional\n # statement is True, then the last taken action has probability 0 of\n # occurring, so the returned value is not impacting the parent node value.\n if all(reach_probabilities[:-1] == 0):\n return np.zeros(self._num_players)\n\n state_value = np.zeros(self._num_players)\n\n # The utilities of the children states are computed recursively. As the\n # regrets are added to the information state regrets for each state in that\n # information state, the recursive call can only be made once per child\n # state. Therefore, the utilities are cached.\n children_utilities = {}\n\n info_state_policy = self._compute_policy_or_get_it_from_cache(\n info_state, legal_actions)\n for action, action_prob in info_state_policy.items():\n new_state = state.child(action)\n new_reach_probabilities = reach_probabilities.copy()\n new_reach_probabilities[current_player] *= action_prob\n child_utility = self._compute_counterfactual_regret_for_player(\n new_state, reach_probabilities=new_reach_probabilities, player=player)\n\n state_value += action_prob * child_utility\n children_utilities[action] = child_utility\n\n # If we are performing alternating updates, and the current player is not\n # the current_player, we skip the cumulative values update.\n # If we are performing simultaneous updates, we do update the cumulative\n # values.\n simulatenous_updates = player is None\n if not simulatenous_updates and current_player != player:\n return state_value\n\n reach_prob = reach_probabilities[current_player]\n counterfactual_reach_prob = (\n np.prod(reach_probabilities[:current_player]) *\n np.prod(reach_probabilities[current_player + 1:]))\n state_value_for_player = state_value[current_player]\n\n for action, action_prob in info_state_policy.items():\n cfr_regret = counterfactual_reach_prob * (\n children_utilities[action][current_player] - state_value_for_player)\n\n self._cumulative_regret[info_state][action] += cfr_regret\n if self._linear_averaging:\n self._cumulative_policy[info_state][\n action] += self._iteration * reach_prob * action_prob\n else:\n self._cumulative_policy[info_state][action] += reach_prob * action_prob\n\n return state_value\n\n def _compute_policy_or_get_it_from_cache(self, info_state, legal_actions):\n \"\"\"Returns an {action: prob} dictionary for the policy on `info_state`.\"\"\"\n retrieved_state = self._policy.get(info_state)\n\n if retrieved_state is not None:\n return self._policy[info_state]\n\n policy_for_state = self._regret_matching(info_state, legal_actions)\n self._policy[info_state] = policy_for_state\n return policy_for_state\n\n def _regret_matching(self, info_state, legal_actions):\n \"\"\"Returns an info state policy by applying regret-matching.\n\n Args:\n info_state: a string key for the information set.\n legal_actions: the list of legal actions at this state.\n\n Returns:\n info_state_policy: a dict of action -> prob for all legal actions.\n \"\"\"\n regrets = self._cumulative_regret[info_state].values()\n sum_positive_regrets = sum((regret for regret in regrets if regret > 0))\n\n info_state_policy = {}\n if sum_positive_regrets > 0:\n for action in legal_actions:\n positive_action_regret = max(\n 0.0, self._cumulative_regret[info_state][action])\n info_state_policy[action] = (\n positive_action_regret / sum_positive_regrets)\n else:\n for action in legal_actions:\n info_state_policy[action] = 1.0 / len(legal_actions)\n return info_state_policy\n\n\nclass CFRPlusSolver(_CFRSolver):\n \"\"\"CFR+ implementation.\n\n The algorithm computes an approximate Nash policy for 2 player zero-sum games.\n More generally, it should approach a no-regret set, which corresponds to the\n set of coarse-correlated equilibria. See https://arxiv.org/abs/1305.0034\n\n CFR can be view as a policy iteration algorithm. Importantly, the policies\n themselves do not converge to a Nash policy, but their average does.\n\n See https://poker.cs.ualberta.ca/publications/2015-ijcai-cfrplus.pdf\n\n CFR+ is CFR with the following modifications:\n - use Regret Matching+ instead of Regret Matching.\n - use alternating updates instead of simultaneous updates.\n - use linear averaging.\n\n Usage:\n\n ```python\n game = pyspiel.load_game(\"game_name\")\n initial_state = game.new_initial_state()\n\n cfr_solver = CFRSolver(game)\n\n for i in range(num_iterations):\n cfr.evaluate_and_update_policy()\n ```\n\n Once the policy has converged, the average policy (which converges to the Nash\n policy) can be computed:\n ```python\n average_policy = cfr_solver.ComputeAveragePolicy()\n ```\n \"\"\"\n\n def __init__(self, game):\n super(CFRPlusSolver, self).__init__(\n game,\n initialize_cumulative_values=True,\n regret_matching_plus=True,\n alternating_updates=True,\n linear_averaging=True)\n\n\nclass CFRSolver(_CFRSolver):\n \"\"\"Implements the Counterfactual Regret Minimization (CFR) algorithm.\n\n See https://poker.cs.ualberta.ca/publications/NIPS07-cfr.pdf\n\n NOTE: We use alternating updates (which was not the case in the original\n paper) because it has been proved to be far more efficient.\n \"\"\"\n\n def __init__(self, game):\n super(CFRSolver, self).__init__(\n game,\n initialize_cumulative_values=False,\n regret_matching_plus=False,\n alternating_updates=True,\n linear_averaging=False)\n", "# Copyright 2019 DeepMind Technologies Ltd. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Reinforcement Learning (RL) Environment for Open Spiel.\n\nThis module wraps Open Spiel Python interface providing an RL-friendly API. It\ncovers both turn-based and simultaneous move games. Interactions between agents\nand the underlying game occur mostly through the `reset` and `step` methods,\nwhich return a `TimeStep` structure (see its docstrings for more info).\n\nThe following example illustrates the interaction dynamics. Consider a 2-player\nKuhn Poker (turn-based game). Agents have access to the `observations` (a dict)\nfield from `TimeSpec`, containing the following members:\n * `info_state`: list containing the game information state for each player. The\n size of the list always correspond to the number of players. E.g.:\n [[0, 1, 0, 0, 0, 0], [1, 0, 0, 0, 0, 0]].\n * `legal_actions`: list containing legal action ID lists (one for each player).\n E.g.: [[0, 1], [0]], which corresponds to actions 0 and 1 being valid for\n player 0 (the 1st player) and action 0 being valid for player 1 (2nd player).\n * `current_player`: zero-based integer representing the player to make a move.\n\nAt each `step` call, the environment expects a singleton list with the action\n(as it's a turn-based game), e.g.: [1]. This (zero-based) action must correspond\nto the player specified at `current_player`. The game (which is at decision\nnode) will process the action and take as many steps necessary to cover chance\nnodes, halting at a new decision or final node. Finally, a new `TimeStep`is\nreturned to the agent.\n\nSimultaneous-move games follow analogous dynamics. The only differences is the\nenvironment expects a list of actions, one per player. Note the `current_player`\nfield is \"irrelevant\" here, admitting a constant value defined in spiel.h, which\ndefaults to -2 (module level constant `SIMULTANEOUS_PLAYER_ID`).\n\nSee open_spiel/python/examples/rl_example.py for example usages.\n\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport collections\nimport logging\nimport enum\nimport numpy as np\n\nimport pyspiel\n\nSIMULTANEOUS_PLAYER_ID = pyspiel.PlayerId.SIMULTANEOUS\n\n\nclass TimeStep(\n collections.namedtuple(\n \"TimeStep\", [\"observations\", \"rewards\", \"discounts\", \"step_type\"])):\n \"\"\"Returned with every call to `step` and `reset`.\n\n A `TimeStep` contains the data emitted by a game at each step of interaction.\n A `TimeStep` holds an `observation` (list of dicts, one per player),\n associated lists of `rewards`, `discounts` and a `step_type`.\n\n The first `TimeStep` in a sequence will have `StepType.FIRST`. The final\n `TimeStep` will have `StepType.LAST`. All other `TimeStep`s in a sequence will\n have `StepType.MID.\n\n Attributes:\n observations: a list of dicts containing observations per player.\n rewards: A list of scalars (one per player), or `None` if `step_type` is\n `StepType.FIRST`, i.e. at the start of a sequence.\n discounts: A list of discount values in the range `[0, 1]` (one per player),\n or `None` if `step_type` is `StepType.FIRST`.\n step_type: A `StepType` enum value.\n \"\"\"\n __slots__ = ()\n\n def first(self):\n return self.step_type == StepType.FIRST\n\n def mid(self):\n return self.step_type == StepType.MID\n\n def last(self):\n return self.step_type == StepType.LAST\n\n def is_simultaneous_move(self):\n return self.observations[\"current_player\"] == SIMULTANEOUS_PLAYER_ID\n\n\nclass StepType(enum.Enum):\n \"\"\"Defines the status of a `TimeStep` within a sequence.\"\"\"\n\n FIRST = 0 # Denotes the first `TimeStep` in a sequence.\n MID = 1 # Denotes any `TimeStep` in a sequence that is not FIRST or LAST.\n LAST = 2 # Denotes the last `TimeStep` in a sequence.\n\n def first(self):\n return self is StepType.FIRST\n\n def mid(self):\n return self is StepType.MID\n\n def last(self):\n return self is StepType.LAST\n\n\n# Global pyspiel members\ndef registered_games():\n return pyspiel.registered_games()\n\n\nclass Environment(object):\n \"\"\"Open Spiel reinforcement learning environment class.\"\"\"\n\n def __init__(self, game_name, discount=1.0, seed=None, **kwargs):\n \"\"\"Constructor.\n\n Args:\n game_name: string, Open Spiel game name.\n discount: float, discount used in non-initial steps. Defaults to 1.0.\n seed: int, random number generator seed. Defaults to None.\n **kwargs: dict, additional settings passed to the Open Spiel game.\n \"\"\"\n self._rng = np.random.RandomState(seed)\n\n game_settings = {\n key: pyspiel.GameParameter(val) for (key, val) in kwargs.items()\n }\n logging.info(\"Using game settings: %s\", game_settings)\n self._game = pyspiel.load_game(game_name, game_settings)\n self._num_players = self._game.num_players()\n self._state = None\n self._should_reset = True\n\n # Discount returned at non-initial steps.\n self._discounts = [discount] * self._num_players\n\n # Decide whether to use observation or information_state\n if self._game.get_type().provides_information_state_as_normalized_vector:\n self._use_observation = False\n elif self._game.get_type().provides_observation_as_normalized_vector:\n self._use_observation = True\n else:\n raise ValueError(\"Game must provide either information state or \"\n \"observation as a normalized vector\")\n\n def step(self, actions):\n \"\"\"Updates the environment according to `actions` and returns a `TimeStep`.\n\n If the environment returned a `TimeStep` with `StepType.LAST` at the\n previous step, this call to `step` will start a new sequence and `actions`\n will be ignored.\n\n This method will also start a new sequence if called after the environment\n has been constructed and `reset` has not been called. Again, in this case\n `actions` will be ignored.\n\n Args:\n actions: a list containing one action per player, following specifications\n defined in `action_spec()`.\n\n Returns:\n A `TimeStep` namedtuple containing:\n observation: list of dicts containing one observations per player, each\n corresponding to `observation_spec()`.\n reward: list of rewards at this timestep, or None if step_type is\n `StepType.FIRST`.\n discount: list of discounts in the range [0, 1], or None if step_type is\n `StepType.FIRST`.\n step_type: A `StepType` value.\n \"\"\"\n assert len(actions) == self.num_actions_per_step, (\n \"Invalid number of actions! Expected {}\".format(self.num_players))\n if self._should_reset:\n return self.reset()\n\n if self.is_turn_based:\n self._state.apply_action(actions[0])\n else:\n self._state.apply_actions(actions)\n self._sample_external_events()\n\n observations = {\"info_state\": [], \"legal_actions\": [], \"current_player\": []}\n rewards = []\n step_type = StepType.LAST if self._state.is_terminal() else StepType.MID\n self._should_reset = step_type == StepType.LAST\n\n cur_rewards = self._state.rewards()\n for player_id in range(self.num_players):\n rewards.append(cur_rewards[player_id])\n observations[\"info_state\"].append(\n self._state.observation_as_normalized_vector(player_id) if self\n ._use_observation else self._state\n .information_state_as_normalized_vector(player_id))\n\n observations[\"legal_actions\"].append(self._state.legal_actions(player_id))\n observations[\"current_player\"] = self._state.current_player()\n\n return TimeStep(\n observations=observations,\n rewards=rewards,\n discounts=self._discounts,\n step_type=step_type)\n\n def reset(self):\n \"\"\"Starts a new sequence and returns the first `TimeStep` of this sequence.\n\n Returns:\n A `TimeStep` namedtuple containing:\n observations: list of dicts containing one observations per player, each\n corresponding to `observation_spec()`.\n rewards: list of rewards at this timestep, or None if step_type is\n `StepType.FIRST`.\n discounts: list of discounts in the range [0, 1], or None if step_type\n is `StepType.FIRST`.\n step_type: A `StepType` value.\n \"\"\"\n self._should_reset = False\n self._state = self._game.new_initial_state()\n self._sample_external_events()\n\n observations = {\"info_state\": [], \"legal_actions\": [], \"current_player\": []}\n for player_id in range(self.num_players):\n observations[\"info_state\"].append(\n self._state.observation_as_normalized_vector(player_id) if self\n ._use_observation else self._state\n .information_state_as_normalized_vector(player_id))\n observations[\"legal_actions\"].append(self._state.legal_actions(player_id))\n observations[\"current_player\"] = self._state.current_player()\n\n return TimeStep(\n observations=observations,\n rewards=None,\n discounts=None,\n step_type=StepType.FIRST)\n\n def _sample_external_events(self):\n \"\"\"Sample chance events until we get to a decision node.\"\"\"\n while self._state.is_chance_node():\n if self._state.is_terminal():\n return\n actions, probs = zip(*self._state.chance_outcomes())\n action = self._rng.choice(actions, p=probs)\n self._state.apply_action(action)\n\n def observation_spec(self):\n \"\"\"Defines the observation per player provided by the environment.\n\n Each dict member will contain its expected structure and shape. E.g.: for\n Kuhn Poker {\"info_state\": (6,), \"legal_actions\": (2,), \"current_player\": ()}\n\n Returns:\n A specification dict describing the observation fields and shapes.\n \"\"\"\n return dict(\n info_state=tuple([\n self._game.observation_normalized_vector_size()\n if self._use_observation else\n self._game.information_state_normalized_vector_size()\n ]),\n legal_actions=(self._game.num_distinct_actions(),),\n current_player=(),\n )\n\n def action_spec(self):\n \"\"\"Defines per player action specifications.\n\n Specifications include action boundaries and their data type.\n E.g.: for Kuhn Poker {\"num_actions\": 2, \"min\": 0, \"max\":1, \"dtype\": int}\n\n Returns:\n A specification dict containing per player action properties.\n \"\"\"\n return dict(\n num_actions=self._game.num_distinct_actions(),\n min=0,\n max=self._game.num_distinct_actions() - 1,\n dtype=int,\n )\n\n # Game properties\n @property\n def name(self):\n return self._game.get_type().short_name\n\n @property\n def num_players(self):\n return self._game.num_players()\n\n @property\n def num_actions_per_step(self):\n return 1 if self.is_turn_based else self.num_players\n\n # New RL calls for more advanced use cases (e.g. search + RL).\n @property\n def is_turn_based(self):\n return self._game.get_type(\n ).dynamics == pyspiel.GameType.Dynamics.SEQUENTIAL\n\n @property\n def max_game_length(self):\n return self._game.max_game_length()\n\n @property\n def is_chance_node(self):\n return self._state.is_chance_node()\n\n @property\n def game(self):\n return self._game\n\n def set_state(self, unused_new_state):\n \"\"\"Updates the game state.\"\"\"\n # TODO(author3): add set/get state methods\n pass\n\n @property\n def get_state(self):\n # TODO(author3): add set/get state methods\n pass\n" ]
[ [ "numpy.zeros", "numpy.prod" ], [ "numpy.random.RandomState" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
zhangzylogo/arrow
[ "e095ca5748e20cf81b6b8ddc128a916976e4cdea" ]
[ "python/pyarrow/tests/test_parquet.py" ]
[ "# -*- coding: utf-8 -*-\n# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\n\nfrom collections import OrderedDict\nimport datetime\nimport decimal\nimport io\nimport json\nimport os\nimport six\nimport pickle\nimport pytest\n\nimport numpy as np\n\nimport pyarrow as pa\nfrom pyarrow.compat import guid, u, BytesIO, unichar, PY2\nfrom pyarrow.pandas_compat import _pandas_api\nfrom pyarrow.tests import util\nfrom pyarrow.filesystem import LocalFileSystem, FileSystem\n\ntry:\n import pyarrow.parquet as pq\nexcept ImportError:\n pq = None\n\n\ntry:\n import pandas as pd\n import pandas.util.testing as tm\n from .pandas_examples import dataframe_with_arrays, dataframe_with_lists\nexcept ImportError:\n pd = tm = None\n\n\n# Marks all of the tests in this module\n# Ignore these with pytest ... -m 'not parquet'\npytestmark = pytest.mark.parquet\n\n\[email protected](scope='module')\ndef datadir(datadir):\n return datadir / 'parquet'\n\n\ndef _write_table(table, path, **kwargs):\n # So we see the ImportError somewhere\n import pyarrow.parquet as pq\n\n if _pandas_api.is_data_frame(table):\n table = pa.Table.from_pandas(table)\n\n pq.write_table(table, path, **kwargs)\n return table\n\n\ndef _read_table(*args, **kwargs):\n return pq.read_table(*args, **kwargs)\n\n\ndef _roundtrip_table(table, read_table_kwargs=None,\n write_table_kwargs=None):\n read_table_kwargs = read_table_kwargs or {}\n write_table_kwargs = write_table_kwargs or {}\n\n buf = io.BytesIO()\n _write_table(table, buf, **write_table_kwargs)\n buf.seek(0)\n return _read_table(buf, **read_table_kwargs)\n\n\ndef _check_roundtrip(table, expected=None, read_table_kwargs=None,\n **write_table_kwargs):\n if expected is None:\n expected = table\n\n read_table_kwargs = read_table_kwargs or {}\n\n # intentionally check twice\n result = _roundtrip_table(table, read_table_kwargs=read_table_kwargs,\n write_table_kwargs=write_table_kwargs)\n assert result.equals(expected)\n result = _roundtrip_table(result, read_table_kwargs=read_table_kwargs,\n write_table_kwargs=write_table_kwargs)\n assert result.equals(expected)\n\n\ndef _roundtrip_pandas_dataframe(df, write_kwargs):\n table = pa.Table.from_pandas(df)\n\n buf = io.BytesIO()\n _write_table(table, buf, **write_kwargs)\n\n buf.seek(0)\n table1 = _read_table(buf)\n return table1.to_pandas()\n\n\[email protected]('dtype', [int, float])\ndef test_single_pylist_column_roundtrip(tempdir, dtype):\n filename = tempdir / 'single_{}_column.parquet'.format(dtype.__name__)\n data = [pa.array(list(map(dtype, range(5))))]\n table = pa.Table.from_arrays(data, names=['a'])\n _write_table(table, filename)\n table_read = _read_table(filename)\n for i in range(table.num_columns):\n col_written = table[i]\n col_read = table_read[i]\n assert table.field(i).name == table_read.field(i).name\n assert col_read.num_chunks == 1\n data_written = col_written.chunk(0)\n data_read = col_read.chunk(0)\n assert data_written.equals(data_read)\n\n\ndef alltypes_sample(size=10000, seed=0, categorical=False):\n np.random.seed(seed)\n arrays = {\n 'uint8': np.arange(size, dtype=np.uint8),\n 'uint16': np.arange(size, dtype=np.uint16),\n 'uint32': np.arange(size, dtype=np.uint32),\n 'uint64': np.arange(size, dtype=np.uint64),\n 'int8': np.arange(size, dtype=np.int16),\n 'int16': np.arange(size, dtype=np.int16),\n 'int32': np.arange(size, dtype=np.int32),\n 'int64': np.arange(size, dtype=np.int64),\n 'float32': np.arange(size, dtype=np.float32),\n 'float64': np.arange(size, dtype=np.float64),\n 'bool': np.random.randn(size) > 0,\n # TODO(wesm): Test other timestamp resolutions now that arrow supports\n # them\n 'datetime': np.arange(\"2016-01-01T00:00:00.001\", size,\n dtype='datetime64[ms]'),\n 'str': pd.Series([str(x) for x in range(size)]),\n 'empty_str': [''] * size,\n 'str_with_nulls': [None] + [str(x) for x in range(size - 2)] + [None],\n 'null': [None] * size,\n 'null_list': [None] * 2 + [[None] * (x % 4) for x in range(size - 2)],\n }\n if categorical:\n arrays['str_category'] = arrays['str'].astype('category')\n return pd.DataFrame(arrays)\n\n\[email protected]\[email protected]('chunk_size', [None, 1000])\ndef test_pandas_parquet_2_0_roundtrip(tempdir, chunk_size):\n df = alltypes_sample(size=10000, categorical=True)\n\n filename = tempdir / 'pandas_roundtrip.parquet'\n arrow_table = pa.Table.from_pandas(df)\n assert arrow_table.schema.pandas_metadata is not None\n\n _write_table(arrow_table, filename, version=\"2.0\",\n coerce_timestamps='ms', chunk_size=chunk_size)\n table_read = pq.read_pandas(filename)\n assert table_read.schema.pandas_metadata is not None\n\n assert arrow_table.schema.metadata == table_read.schema.metadata\n\n df_read = table_read.to_pandas()\n tm.assert_frame_equal(df, df_read)\n\n\ndef test_set_data_page_size():\n arr = pa.array([1, 2, 3] * 1000000)\n t = pa.Table.from_arrays([arr], names=['f0'])\n\n # 128K, 256K, 512K\n page_sizes = [2 << 16, 2 << 17, 2 << 18]\n for target_page_size in page_sizes:\n _check_roundtrip(t, data_page_size=target_page_size)\n\n\[email protected]\ndef test_chunked_table_write():\n # ARROW-232\n df = alltypes_sample(size=10)\n\n batch = pa.RecordBatch.from_pandas(df)\n table = pa.Table.from_batches([batch] * 3)\n _check_roundtrip(table, version='2.0')\n\n df, _ = dataframe_with_lists()\n batch = pa.RecordBatch.from_pandas(df)\n table = pa.Table.from_batches([batch] * 3)\n _check_roundtrip(table, version='2.0')\n\n\[email protected]\ndef test_no_memory_map(tempdir):\n df = alltypes_sample(size=10)\n\n table = pa.Table.from_pandas(df)\n _check_roundtrip(table, read_table_kwargs={'memory_map': False},\n version='2.0')\n\n filename = str(tempdir / 'tmp_file')\n with open(filename, 'wb') as f:\n _write_table(table, f, version='2.0')\n table_read = pq.read_pandas(filename, memory_map=False)\n assert table_read.equals(table)\n\n\ndef test_special_chars_filename(tempdir):\n table = pa.Table.from_arrays([pa.array([42])], [\"ints\"])\n filename = \"foo # bar\"\n path = tempdir / filename\n assert not path.exists()\n _write_table(table, str(path))\n assert path.exists()\n table_read = _read_table(str(path))\n assert table_read.equals(table)\n\n\[email protected]\ndef test_empty_table_roundtrip():\n df = alltypes_sample(size=10)\n\n # Create a non-empty table to infer the types correctly, then slice to 0\n table = pa.Table.from_pandas(df)\n table = pa.Table.from_arrays(\n [col.chunk(0)[:0] for col in table.itercolumns()],\n names=table.schema.names)\n\n assert table.schema.field('null').type == pa.null()\n assert table.schema.field('null_list').type == pa.list_(pa.null())\n _check_roundtrip(table, version='2.0')\n\n\[email protected]\ndef test_empty_table_no_columns():\n df = pd.DataFrame()\n empty = pa.Table.from_pandas(df, preserve_index=False)\n _check_roundtrip(empty)\n\n\ndef test_empty_lists_table_roundtrip():\n # ARROW-2744: Shouldn't crash when writing an array of empty lists\n arr = pa.array([[], []], type=pa.list_(pa.int32()))\n table = pa.Table.from_arrays([arr], [\"A\"])\n _check_roundtrip(table)\n\n\[email protected]\ndef test_pandas_parquet_datetime_tz():\n s = pd.Series([datetime.datetime(2017, 9, 6)])\n s = s.dt.tz_localize('utc')\n\n s.index = s\n\n # Both a column and an index to hit both use cases\n df = pd.DataFrame({'tz_aware': s,\n 'tz_eastern': s.dt.tz_convert('US/Eastern')},\n index=s)\n\n f = BytesIO()\n\n arrow_table = pa.Table.from_pandas(df)\n\n _write_table(arrow_table, f, coerce_timestamps='ms')\n f.seek(0)\n\n table_read = pq.read_pandas(f)\n\n df_read = table_read.to_pandas()\n tm.assert_frame_equal(df, df_read)\n\n\[email protected]\[email protected](six.PY2, reason='datetime.timezone is available since '\n 'python version 3.2')\ndef test_datetime_timezone_tzinfo():\n value = datetime.datetime(2018, 1, 1, 1, 23, 45,\n tzinfo=datetime.timezone.utc)\n df = pd.DataFrame({'foo': [value]})\n\n _roundtrip_pandas_dataframe(df, write_kwargs={})\n\n\[email protected]\ndef test_pandas_parquet_custom_metadata(tempdir):\n df = alltypes_sample(size=10000)\n\n filename = tempdir / 'pandas_roundtrip.parquet'\n arrow_table = pa.Table.from_pandas(df)\n assert b'pandas' in arrow_table.schema.metadata\n\n _write_table(arrow_table, filename, version='2.0', coerce_timestamps='ms')\n\n metadata = pq.read_metadata(filename).metadata\n assert b'pandas' in metadata\n\n js = json.loads(metadata[b'pandas'].decode('utf8'))\n assert js['index_columns'] == [{'kind': 'range',\n 'name': None,\n 'start': 0, 'stop': 10000,\n 'step': 1}]\n\n\[email protected]\ndef test_pandas_parquet_column_multiindex(tempdir):\n df = alltypes_sample(size=10)\n df.columns = pd.MultiIndex.from_tuples(\n list(zip(df.columns, df.columns[::-1])),\n names=['level_1', 'level_2']\n )\n\n filename = tempdir / 'pandas_roundtrip.parquet'\n arrow_table = pa.Table.from_pandas(df)\n assert arrow_table.schema.pandas_metadata is not None\n\n _write_table(arrow_table, filename, version='2.0', coerce_timestamps='ms')\n\n table_read = pq.read_pandas(filename)\n df_read = table_read.to_pandas()\n tm.assert_frame_equal(df, df_read)\n\n\[email protected]\ndef test_pandas_parquet_2_0_roundtrip_read_pandas_no_index_written(tempdir):\n df = alltypes_sample(size=10000)\n\n filename = tempdir / 'pandas_roundtrip.parquet'\n arrow_table = pa.Table.from_pandas(df, preserve_index=False)\n js = arrow_table.schema.pandas_metadata\n assert not js['index_columns']\n # ARROW-2170\n # While index_columns should be empty, columns needs to be filled still.\n assert js['columns']\n\n _write_table(arrow_table, filename, version='2.0', coerce_timestamps='ms')\n table_read = pq.read_pandas(filename)\n\n js = table_read.schema.pandas_metadata\n assert not js['index_columns']\n\n assert arrow_table.schema.metadata == table_read.schema.metadata\n\n df_read = table_read.to_pandas()\n tm.assert_frame_equal(df, df_read)\n\n\[email protected]\ndef test_pandas_parquet_1_0_roundtrip(tempdir):\n size = 10000\n np.random.seed(0)\n df = pd.DataFrame({\n 'uint8': np.arange(size, dtype=np.uint8),\n 'uint16': np.arange(size, dtype=np.uint16),\n 'uint32': np.arange(size, dtype=np.uint32),\n 'uint64': np.arange(size, dtype=np.uint64),\n 'int8': np.arange(size, dtype=np.int16),\n 'int16': np.arange(size, dtype=np.int16),\n 'int32': np.arange(size, dtype=np.int32),\n 'int64': np.arange(size, dtype=np.int64),\n 'float32': np.arange(size, dtype=np.float32),\n 'float64': np.arange(size, dtype=np.float64),\n 'bool': np.random.randn(size) > 0,\n 'str': [str(x) for x in range(size)],\n 'str_with_nulls': [None] + [str(x) for x in range(size - 2)] + [None],\n 'empty_str': [''] * size\n })\n filename = tempdir / 'pandas_roundtrip.parquet'\n arrow_table = pa.Table.from_pandas(df)\n _write_table(arrow_table, filename, version='1.0')\n table_read = _read_table(filename)\n df_read = table_read.to_pandas()\n\n # We pass uint32_t as int64_t if we write Parquet version 1.0\n df['uint32'] = df['uint32'].values.astype(np.int64)\n\n tm.assert_frame_equal(df, df_read)\n\n\[email protected]\ndef test_multiple_path_types(tempdir):\n # Test compatibility with PEP 519 path-like objects\n path = tempdir / 'zzz.parquet'\n df = pd.DataFrame({'x': np.arange(10, dtype=np.int64)})\n _write_table(df, path)\n table_read = _read_table(path)\n df_read = table_read.to_pandas()\n tm.assert_frame_equal(df, df_read)\n\n # Test compatibility with plain string paths\n path = str(tempdir) + 'zzz.parquet'\n df = pd.DataFrame({'x': np.arange(10, dtype=np.int64)})\n _write_table(df, path)\n table_read = _read_table(path)\n df_read = table_read.to_pandas()\n tm.assert_frame_equal(df, df_read)\n\n\[email protected]\ndef test_pandas_column_selection(tempdir):\n size = 10000\n np.random.seed(0)\n df = pd.DataFrame({\n 'uint8': np.arange(size, dtype=np.uint8),\n 'uint16': np.arange(size, dtype=np.uint16)\n })\n filename = tempdir / 'pandas_roundtrip.parquet'\n arrow_table = pa.Table.from_pandas(df)\n _write_table(arrow_table, filename)\n table_read = _read_table(filename, columns=['uint8'])\n df_read = table_read.to_pandas()\n\n tm.assert_frame_equal(df[['uint8']], df_read)\n\n # ARROW-4267: Selection of duplicate columns still leads to these columns\n # being read uniquely.\n table_read = _read_table(filename, columns=['uint8', 'uint8'])\n df_read = table_read.to_pandas()\n\n tm.assert_frame_equal(df[['uint8']], df_read)\n\n\ndef _random_integers(size, dtype):\n # We do not generate integers outside the int64 range\n platform_int_info = np.iinfo('int_')\n iinfo = np.iinfo(dtype)\n return np.random.randint(max(iinfo.min, platform_int_info.min),\n min(iinfo.max, platform_int_info.max),\n size=size).astype(dtype)\n\n\ndef _test_dataframe(size=10000, seed=0):\n np.random.seed(seed)\n df = pd.DataFrame({\n 'uint8': _random_integers(size, np.uint8),\n 'uint16': _random_integers(size, np.uint16),\n 'uint32': _random_integers(size, np.uint32),\n 'uint64': _random_integers(size, np.uint64),\n 'int8': _random_integers(size, np.int8),\n 'int16': _random_integers(size, np.int16),\n 'int32': _random_integers(size, np.int32),\n 'int64': _random_integers(size, np.int64),\n 'float32': np.random.randn(size).astype(np.float32),\n 'float64': np.arange(size, dtype=np.float64),\n 'bool': np.random.randn(size) > 0,\n 'strings': [tm.rands(10) for i in range(size)],\n 'all_none': [None] * size,\n 'all_none_category': [None] * size\n })\n # TODO(PARQUET-1015)\n # df['all_none_category'] = df['all_none_category'].astype('category')\n return df\n\n\[email protected]\ndef test_pandas_parquet_native_file_roundtrip(tempdir):\n df = _test_dataframe(10000)\n arrow_table = pa.Table.from_pandas(df)\n imos = pa.BufferOutputStream()\n _write_table(arrow_table, imos, version=\"2.0\")\n buf = imos.getvalue()\n reader = pa.BufferReader(buf)\n df_read = _read_table(reader).to_pandas()\n tm.assert_frame_equal(df, df_read)\n\n\[email protected]\ndef test_parquet_incremental_file_build(tempdir):\n df = _test_dataframe(100)\n df['unique_id'] = 0\n\n arrow_table = pa.Table.from_pandas(df, preserve_index=False)\n out = pa.BufferOutputStream()\n\n writer = pq.ParquetWriter(out, arrow_table.schema, version='2.0')\n\n frames = []\n for i in range(10):\n df['unique_id'] = i\n arrow_table = pa.Table.from_pandas(df, preserve_index=False)\n writer.write_table(arrow_table)\n\n frames.append(df.copy())\n\n writer.close()\n\n buf = out.getvalue()\n result = _read_table(pa.BufferReader(buf))\n\n expected = pd.concat(frames, ignore_index=True)\n tm.assert_frame_equal(result.to_pandas(), expected)\n\n\[email protected]\ndef test_read_pandas_column_subset(tempdir):\n df = _test_dataframe(10000)\n arrow_table = pa.Table.from_pandas(df)\n imos = pa.BufferOutputStream()\n _write_table(arrow_table, imos, version=\"2.0\")\n buf = imos.getvalue()\n reader = pa.BufferReader(buf)\n df_read = pq.read_pandas(reader, columns=['strings', 'uint8']).to_pandas()\n tm.assert_frame_equal(df[['strings', 'uint8']], df_read)\n\n\[email protected]\ndef test_pandas_parquet_empty_roundtrip(tempdir):\n df = _test_dataframe(0)\n arrow_table = pa.Table.from_pandas(df)\n imos = pa.BufferOutputStream()\n _write_table(arrow_table, imos, version=\"2.0\")\n buf = imos.getvalue()\n reader = pa.BufferReader(buf)\n df_read = _read_table(reader).to_pandas()\n tm.assert_frame_equal(df, df_read)\n\n\[email protected]\ndef test_pandas_parquet_pyfile_roundtrip(tempdir):\n filename = tempdir / 'pandas_pyfile_roundtrip.parquet'\n size = 5\n df = pd.DataFrame({\n 'int64': np.arange(size, dtype=np.int64),\n 'float32': np.arange(size, dtype=np.float32),\n 'float64': np.arange(size, dtype=np.float64),\n 'bool': np.random.randn(size) > 0,\n 'strings': ['foo', 'bar', None, 'baz', 'qux']\n })\n\n arrow_table = pa.Table.from_pandas(df)\n\n with filename.open('wb') as f:\n _write_table(arrow_table, f, version=\"1.0\")\n\n data = io.BytesIO(filename.read_bytes())\n\n table_read = _read_table(data)\n df_read = table_read.to_pandas()\n tm.assert_frame_equal(df, df_read)\n\n\[email protected]\ndef test_pandas_parquet_configuration_options(tempdir):\n size = 10000\n np.random.seed(0)\n df = pd.DataFrame({\n 'uint8': np.arange(size, dtype=np.uint8),\n 'uint16': np.arange(size, dtype=np.uint16),\n 'uint32': np.arange(size, dtype=np.uint32),\n 'uint64': np.arange(size, dtype=np.uint64),\n 'int8': np.arange(size, dtype=np.int16),\n 'int16': np.arange(size, dtype=np.int16),\n 'int32': np.arange(size, dtype=np.int32),\n 'int64': np.arange(size, dtype=np.int64),\n 'float32': np.arange(size, dtype=np.float32),\n 'float64': np.arange(size, dtype=np.float64),\n 'bool': np.random.randn(size) > 0\n })\n filename = tempdir / 'pandas_roundtrip.parquet'\n arrow_table = pa.Table.from_pandas(df)\n\n for use_dictionary in [True, False]:\n _write_table(arrow_table, filename, version='2.0',\n use_dictionary=use_dictionary)\n table_read = _read_table(filename)\n df_read = table_read.to_pandas()\n tm.assert_frame_equal(df, df_read)\n\n for write_statistics in [True, False]:\n _write_table(arrow_table, filename, version='2.0',\n write_statistics=write_statistics)\n table_read = _read_table(filename)\n df_read = table_read.to_pandas()\n tm.assert_frame_equal(df, df_read)\n\n for compression in ['NONE', 'SNAPPY', 'GZIP', 'LZ4', 'ZSTD']:\n _write_table(arrow_table, filename, version='2.0',\n compression=compression)\n table_read = _read_table(filename)\n df_read = table_read.to_pandas()\n tm.assert_frame_equal(df, df_read)\n\n\ndef make_sample_file(table_or_df):\n if isinstance(table_or_df, pa.Table):\n a_table = table_or_df\n else:\n a_table = pa.Table.from_pandas(table_or_df)\n\n buf = io.BytesIO()\n _write_table(a_table, buf, compression='SNAPPY', version='2.0',\n coerce_timestamps='ms')\n\n buf.seek(0)\n return pq.ParquetFile(buf)\n\n\[email protected]\ndef test_parquet_metadata_api():\n df = alltypes_sample(size=10000)\n df = df.reindex(columns=sorted(df.columns))\n df.index = np.random.randint(0, 1000000, size=len(df))\n\n fileh = make_sample_file(df)\n ncols = len(df.columns)\n\n # Series of sniff tests\n meta = fileh.metadata\n repr(meta)\n assert meta.num_rows == len(df)\n assert meta.num_columns == ncols + 1 # +1 for index\n assert meta.num_row_groups == 1\n assert meta.format_version == '2.0'\n assert 'parquet-cpp' in meta.created_by\n assert isinstance(meta.serialized_size, int)\n assert isinstance(meta.metadata, dict)\n\n # Schema\n schema = fileh.schema\n assert meta.schema is schema\n assert len(schema) == ncols + 1 # +1 for index\n repr(schema)\n\n col = schema[0]\n repr(col)\n assert col.name == df.columns[0]\n assert col.max_definition_level == 1\n assert col.max_repetition_level == 0\n assert col.max_repetition_level == 0\n\n assert col.physical_type == 'BOOLEAN'\n assert col.converted_type == 'NONE'\n\n with pytest.raises(IndexError):\n schema[ncols + 1] # +1 for index\n\n with pytest.raises(IndexError):\n schema[-1]\n\n # Row group\n for rg in range(meta.num_row_groups):\n rg_meta = meta.row_group(rg)\n assert isinstance(rg_meta, pq.RowGroupMetaData)\n repr(rg_meta)\n\n for col in range(rg_meta.num_columns):\n col_meta = rg_meta.column(col)\n assert isinstance(col_meta, pq.ColumnChunkMetaData)\n repr(col_meta)\n\n with pytest.raises(IndexError):\n meta.row_group(-1)\n\n with pytest.raises(IndexError):\n meta.row_group(meta.num_row_groups + 1)\n\n rg_meta = meta.row_group(0)\n assert rg_meta.num_rows == len(df)\n assert rg_meta.num_columns == ncols + 1 # +1 for index\n assert rg_meta.total_byte_size > 0\n\n with pytest.raises(IndexError):\n col_meta = rg_meta.column(-1)\n\n with pytest.raises(IndexError):\n col_meta = rg_meta.column(ncols + 2)\n\n col_meta = rg_meta.column(0)\n assert col_meta.file_offset > 0\n assert col_meta.file_path == '' # created from BytesIO\n assert col_meta.physical_type == 'BOOLEAN'\n assert col_meta.num_values == 10000\n assert col_meta.path_in_schema == 'bool'\n assert col_meta.is_stats_set is True\n assert isinstance(col_meta.statistics, pq.Statistics)\n assert col_meta.compression == 'SNAPPY'\n assert col_meta.encodings == ('PLAIN', 'RLE')\n assert col_meta.has_dictionary_page is False\n assert col_meta.dictionary_page_offset is None\n assert col_meta.data_page_offset > 0\n assert col_meta.total_compressed_size > 0\n assert col_meta.total_uncompressed_size > 0\n with pytest.raises(NotImplementedError):\n col_meta.has_index_page\n with pytest.raises(NotImplementedError):\n col_meta.index_page_offset\n\n\[email protected]\[email protected](\n (\n 'data',\n 'type',\n 'physical_type',\n 'min_value',\n 'max_value',\n 'null_count',\n 'num_values',\n 'distinct_count'\n ),\n [\n ([1, 2, 2, None, 4], pa.uint8(), 'INT32', 1, 4, 1, 4, 0),\n ([1, 2, 2, None, 4], pa.uint16(), 'INT32', 1, 4, 1, 4, 0),\n ([1, 2, 2, None, 4], pa.uint32(), 'INT32', 1, 4, 1, 4, 0),\n ([1, 2, 2, None, 4], pa.uint64(), 'INT64', 1, 4, 1, 4, 0),\n ([-1, 2, 2, None, 4], pa.int8(), 'INT32', -1, 4, 1, 4, 0),\n ([-1, 2, 2, None, 4], pa.int16(), 'INT32', -1, 4, 1, 4, 0),\n ([-1, 2, 2, None, 4], pa.int32(), 'INT32', -1, 4, 1, 4, 0),\n ([-1, 2, 2, None, 4], pa.int64(), 'INT64', -1, 4, 1, 4, 0),\n (\n [-1.1, 2.2, 2.3, None, 4.4], pa.float32(),\n 'FLOAT', -1.1, 4.4, 1, 4, 0\n ),\n (\n [-1.1, 2.2, 2.3, None, 4.4], pa.float64(),\n 'DOUBLE', -1.1, 4.4, 1, 4, 0\n ),\n (\n [u'', u'b', unichar(1000), None, u'aaa'], pa.binary(),\n 'BYTE_ARRAY', b'', unichar(1000).encode('utf-8'), 1, 4, 0\n ),\n (\n [True, False, False, True, True], pa.bool_(),\n 'BOOLEAN', False, True, 0, 5, 0\n ),\n (\n [b'\\x00', b'b', b'12', None, b'aaa'], pa.binary(),\n 'BYTE_ARRAY', b'\\x00', b'b', 1, 4, 0\n ),\n ]\n)\ndef test_parquet_column_statistics_api(data, type, physical_type, min_value,\n max_value, null_count, num_values,\n distinct_count):\n df = pd.DataFrame({'data': data})\n schema = pa.schema([pa.field('data', type)])\n table = pa.Table.from_pandas(df, schema=schema, safe=False)\n fileh = make_sample_file(table)\n\n meta = fileh.metadata\n\n rg_meta = meta.row_group(0)\n col_meta = rg_meta.column(0)\n\n stat = col_meta.statistics\n assert stat.has_min_max\n assert _close(type, stat.min, min_value)\n assert _close(type, stat.max, max_value)\n assert stat.null_count == null_count\n assert stat.num_values == num_values\n # TODO(kszucs) until parquet-cpp API doesn't expose HasDistinctCount\n # method, missing distinct_count is represented as zero instead of None\n assert stat.distinct_count == distinct_count\n assert stat.physical_type == physical_type\n\n\ndef _close(type, left, right):\n if type == pa.float32():\n return abs(left - right) < 1E-7\n elif type == pa.float64():\n return abs(left - right) < 1E-13\n else:\n return left == right\n\n\ndef test_statistics_convert_logical_types(tempdir):\n # ARROW-5166, ARROW-4139\n\n # (min, max, type)\n cases = [(10, 11164359321221007157, pa.uint64()),\n (10, 4294967295, pa.uint32()),\n (u\"ähnlich\", u\"öffentlich\", pa.utf8()),\n (datetime.time(10, 30, 0, 1000), datetime.time(15, 30, 0, 1000),\n pa.time32('ms')),\n (datetime.time(10, 30, 0, 1000), datetime.time(15, 30, 0, 1000),\n pa.time64('us')),\n (datetime.datetime(2019, 6, 24, 0, 0, 0, 1000),\n datetime.datetime(2019, 6, 25, 0, 0, 0, 1000),\n pa.timestamp('ms')),\n (datetime.datetime(2019, 6, 24, 0, 0, 0, 1000),\n datetime.datetime(2019, 6, 25, 0, 0, 0, 1000),\n pa.timestamp('us'))]\n\n for i, (min_val, max_val, typ) in enumerate(cases):\n t = pa.Table.from_arrays([pa.array([min_val, max_val], type=typ)],\n ['col'])\n path = str(tempdir / ('example{}.parquet'.format(i)))\n pq.write_table(t, path, version='2.0')\n pf = pq.ParquetFile(path)\n stats = pf.metadata.row_group(0).column(0).statistics\n assert stats.min == min_val\n assert stats.max == max_val\n\n\ndef test_parquet_write_disable_statistics(tempdir):\n table = pa.Table.from_pydict(\n {'a': pa.array([1, 2, 3]), 'b': pa.array(['a', 'b', 'c'])})\n _write_table(table, tempdir / 'data.parquet')\n meta = pq.read_metadata(tempdir / 'data.parquet')\n for col in [0, 1]:\n cc = meta.row_group(0).column(col)\n assert cc.is_stats_set is True\n assert cc.statistics is not None\n\n _write_table(table, tempdir / 'data2.parquet', write_statistics=False)\n meta = pq.read_metadata(tempdir / 'data2.parquet')\n for col in [0, 1]:\n cc = meta.row_group(0).column(col)\n assert cc.is_stats_set is False\n assert cc.statistics is None\n\n _write_table(table, tempdir / 'data3.parquet', write_statistics=['a'])\n meta = pq.read_metadata(tempdir / 'data3.parquet')\n cc_a = meta.row_group(0).column(0)\n assert cc_a.is_stats_set is True\n assert cc_a.statistics is not None\n cc_b = meta.row_group(0).column(1)\n assert cc_b.is_stats_set is False\n assert cc_b.statistics is None\n\n\[email protected]\ndef test_compare_schemas():\n df = alltypes_sample(size=10000)\n\n fileh = make_sample_file(df)\n fileh2 = make_sample_file(df)\n fileh3 = make_sample_file(df[df.columns[::2]])\n\n # ParquetSchema\n assert isinstance(fileh.schema, pq.ParquetSchema)\n assert fileh.schema.equals(fileh.schema)\n assert fileh.schema == fileh.schema\n assert fileh.schema.equals(fileh2.schema)\n assert fileh.schema == fileh2.schema\n assert fileh.schema != 'arbitrary object'\n assert not fileh.schema.equals(fileh3.schema)\n assert fileh.schema != fileh3.schema\n\n # ColumnSchema\n assert isinstance(fileh.schema[0], pq.ColumnSchema)\n assert fileh.schema[0].equals(fileh.schema[0])\n assert fileh.schema[0] == fileh.schema[0]\n assert not fileh.schema[0].equals(fileh.schema[1])\n assert fileh.schema[0] != fileh.schema[1]\n assert fileh.schema[0] != 'arbitrary object'\n\n\ndef test_validate_schema_write_table(tempdir):\n # ARROW-2926\n simple_fields = [\n pa.field('POS', pa.uint32()),\n pa.field('desc', pa.string())\n ]\n\n simple_schema = pa.schema(simple_fields)\n\n # simple_table schema does not match simple_schema\n simple_from_array = [pa.array([1]), pa.array(['bla'])]\n simple_table = pa.Table.from_arrays(simple_from_array, ['POS', 'desc'])\n\n path = tempdir / 'simple_validate_schema.parquet'\n\n with pq.ParquetWriter(path, simple_schema,\n version='2.0',\n compression='snappy', flavor='spark') as w:\n with pytest.raises(ValueError):\n w.write_table(simple_table)\n\n\[email protected]\ndef test_column_of_arrays(tempdir):\n df, schema = dataframe_with_arrays()\n\n filename = tempdir / 'pandas_roundtrip.parquet'\n arrow_table = pa.Table.from_pandas(df, schema=schema)\n _write_table(arrow_table, filename, version=\"2.0\", coerce_timestamps='ms')\n table_read = _read_table(filename)\n df_read = table_read.to_pandas()\n tm.assert_frame_equal(df, df_read)\n\n\[email protected]\ndef test_coerce_timestamps(tempdir):\n from collections import OrderedDict\n # ARROW-622\n arrays = OrderedDict()\n fields = [pa.field('datetime64',\n pa.list_(pa.timestamp('ms')))]\n arrays['datetime64'] = [\n np.array(['2007-07-13T01:23:34.123456789',\n None,\n '2010-08-13T05:46:57.437699912'],\n dtype='datetime64[ms]'),\n None,\n None,\n np.array(['2007-07-13T02',\n None,\n '2010-08-13T05:46:57.437699912'],\n dtype='datetime64[ms]'),\n ]\n\n df = pd.DataFrame(arrays)\n schema = pa.schema(fields)\n\n filename = tempdir / 'pandas_roundtrip.parquet'\n arrow_table = pa.Table.from_pandas(df, schema=schema)\n\n _write_table(arrow_table, filename, version=\"2.0\", coerce_timestamps='us')\n table_read = _read_table(filename)\n df_read = table_read.to_pandas()\n\n df_expected = df.copy()\n for i, x in enumerate(df_expected['datetime64']):\n if isinstance(x, np.ndarray):\n df_expected['datetime64'][i] = x.astype('M8[us]')\n\n tm.assert_frame_equal(df_expected, df_read)\n\n with pytest.raises(ValueError):\n _write_table(arrow_table, filename, version='2.0',\n coerce_timestamps='unknown')\n\n\[email protected]\ndef test_coerce_timestamps_truncated(tempdir):\n \"\"\"\n ARROW-2555: Test that we can truncate timestamps when coercing if\n explicitly allowed.\n \"\"\"\n dt_us = datetime.datetime(year=2017, month=1, day=1, hour=1, minute=1,\n second=1, microsecond=1)\n dt_ms = datetime.datetime(year=2017, month=1, day=1, hour=1, minute=1,\n second=1)\n\n fields_us = [pa.field('datetime64', pa.timestamp('us'))]\n arrays_us = {'datetime64': [dt_us, dt_ms]}\n\n df_us = pd.DataFrame(arrays_us)\n schema_us = pa.schema(fields_us)\n\n filename = tempdir / 'pandas_truncated.parquet'\n table_us = pa.Table.from_pandas(df_us, schema=schema_us)\n\n _write_table(table_us, filename, version=\"2.0\", coerce_timestamps='ms',\n allow_truncated_timestamps=True)\n table_ms = _read_table(filename)\n df_ms = table_ms.to_pandas()\n\n arrays_expected = {'datetime64': [dt_ms, dt_ms]}\n df_expected = pd.DataFrame(arrays_expected)\n tm.assert_frame_equal(df_expected, df_ms)\n\n\[email protected]\ndef test_column_of_lists(tempdir):\n df, schema = dataframe_with_lists(parquet_compatible=True)\n\n filename = tempdir / 'pandas_roundtrip.parquet'\n arrow_table = pa.Table.from_pandas(df, schema=schema)\n _write_table(arrow_table, filename, version='2.0')\n table_read = _read_table(filename)\n df_read = table_read.to_pandas()\n\n if PY2:\n # assert_frame_equal fails when comparing datetime.date and\n # np.datetime64, even with check_datetimelike_compat=True so\n # convert the values to np.datetime64 instead\n for col in ['date32[day]_list', 'date64[ms]_list']:\n df[col] = df[col].apply(\n lambda x: list(map(np.datetime64, x)) if x else x\n )\n\n tm.assert_frame_equal(df, df_read)\n\n\[email protected]\ndef test_date_time_types(tempdir):\n t1 = pa.date32()\n data1 = np.array([17259, 17260, 17261], dtype='int32')\n a1 = pa.array(data1, type=t1)\n\n t2 = pa.date64()\n data2 = data1.astype('int64') * 86400000\n a2 = pa.array(data2, type=t2)\n\n t3 = pa.timestamp('us')\n start = pd.Timestamp('2001-01-01').value / 1000\n data3 = np.array([start, start + 1, start + 2], dtype='int64')\n a3 = pa.array(data3, type=t3)\n\n t4 = pa.time32('ms')\n data4 = np.arange(3, dtype='i4')\n a4 = pa.array(data4, type=t4)\n\n t5 = pa.time64('us')\n a5 = pa.array(data4.astype('int64'), type=t5)\n\n t6 = pa.time32('s')\n a6 = pa.array(data4, type=t6)\n\n ex_t6 = pa.time32('ms')\n ex_a6 = pa.array(data4 * 1000, type=ex_t6)\n\n t7 = pa.timestamp('ns')\n start = pd.Timestamp('2001-01-01').value\n data7 = np.array([start, start + 1000, start + 2000],\n dtype='int64')\n a7 = pa.array(data7, type=t7)\n\n table = pa.Table.from_arrays([a1, a2, a3, a4, a5, a6, a7],\n ['date32', 'date64', 'timestamp[us]',\n 'time32[s]', 'time64[us]',\n 'time32_from64[s]',\n 'timestamp[ns]'])\n\n # date64 as date32\n # time32[s] to time32[ms]\n expected = pa.Table.from_arrays([a1, a1, a3, a4, a5, ex_a6, a7],\n ['date32', 'date64', 'timestamp[us]',\n 'time32[s]', 'time64[us]',\n 'time32_from64[s]',\n 'timestamp[ns]'])\n\n _check_roundtrip(table, expected=expected, version='2.0')\n\n t0 = pa.timestamp('ms')\n data0 = np.arange(4, dtype='int64')\n a0 = pa.array(data0, type=t0)\n\n t1 = pa.timestamp('us')\n data1 = np.arange(4, dtype='int64')\n a1 = pa.array(data1, type=t1)\n\n t2 = pa.timestamp('ns')\n data2 = np.arange(4, dtype='int64')\n a2 = pa.array(data2, type=t2)\n\n table = pa.Table.from_arrays([a0, a1, a2],\n ['ts[ms]', 'ts[us]', 'ts[ns]'])\n expected = pa.Table.from_arrays([a0, a1, a2],\n ['ts[ms]', 'ts[us]', 'ts[ns]'])\n\n # int64 for all timestamps supported by default\n filename = tempdir / 'int64_timestamps.parquet'\n _write_table(table, filename, version='2.0')\n parquet_schema = pq.ParquetFile(filename).schema\n for i in range(3):\n assert parquet_schema.column(i).physical_type == 'INT64'\n read_table = _read_table(filename)\n assert read_table.equals(expected)\n\n t0_ns = pa.timestamp('ns')\n data0_ns = np.array(data0 * 1000000, dtype='int64')\n a0_ns = pa.array(data0_ns, type=t0_ns)\n\n t1_ns = pa.timestamp('ns')\n data1_ns = np.array(data1 * 1000, dtype='int64')\n a1_ns = pa.array(data1_ns, type=t1_ns)\n\n expected = pa.Table.from_arrays([a0_ns, a1_ns, a2],\n ['ts[ms]', 'ts[us]', 'ts[ns]'])\n\n # int96 nanosecond timestamps produced upon request\n filename = tempdir / 'explicit_int96_timestamps.parquet'\n _write_table(table, filename, version='2.0',\n use_deprecated_int96_timestamps=True)\n parquet_schema = pq.ParquetFile(filename).schema\n for i in range(3):\n assert parquet_schema.column(i).physical_type == 'INT96'\n read_table = _read_table(filename)\n assert read_table.equals(expected)\n\n # int96 nanosecond timestamps implied by flavor 'spark'\n filename = tempdir / 'spark_int96_timestamps.parquet'\n _write_table(table, filename, version='2.0',\n flavor='spark')\n parquet_schema = pq.ParquetFile(filename).schema\n for i in range(3):\n assert parquet_schema.column(i).physical_type == 'INT96'\n read_table = _read_table(filename)\n assert read_table.equals(expected)\n\n\ndef test_timestamp_restore_timezone():\n # ARROW-5888, restore timezone from serialized metadata\n ty = pa.timestamp('ms', tz='America/New_York')\n arr = pa.array([1, 2, 3], type=ty)\n t = pa.table([arr], names=['f0'])\n _check_roundtrip(t)\n\n\[email protected]\ndef test_list_of_datetime_time_roundtrip():\n # ARROW-4135\n times = pd.to_datetime(['09:00', '09:30', '10:00', '10:30', '11:00',\n '11:30', '12:00'])\n df = pd.DataFrame({'time': [times.time]})\n _roundtrip_pandas_dataframe(df, write_kwargs={})\n\n\[email protected]\ndef test_parquet_version_timestamp_differences():\n i_s = pd.Timestamp('2010-01-01').value / 1000000000 # := 1262304000\n\n d_s = np.arange(i_s, i_s + 10, 1, dtype='int64')\n d_ms = d_s * 1000\n d_us = d_ms * 1000\n d_ns = d_us * 1000\n\n a_s = pa.array(d_s, type=pa.timestamp('s'))\n a_ms = pa.array(d_ms, type=pa.timestamp('ms'))\n a_us = pa.array(d_us, type=pa.timestamp('us'))\n a_ns = pa.array(d_ns, type=pa.timestamp('ns'))\n\n names = ['ts:s', 'ts:ms', 'ts:us', 'ts:ns']\n table = pa.Table.from_arrays([a_s, a_ms, a_us, a_ns], names)\n\n # Using Parquet version 1.0, seconds should be coerced to milliseconds\n # and nanoseconds should be coerced to microseconds by default\n expected = pa.Table.from_arrays([a_ms, a_ms, a_us, a_us], names)\n _check_roundtrip(table, expected)\n\n # Using Parquet version 2.0, seconds should be coerced to milliseconds\n # and nanoseconds should be retained by default\n expected = pa.Table.from_arrays([a_ms, a_ms, a_us, a_ns], names)\n _check_roundtrip(table, expected, version='2.0')\n\n # Using Parquet version 1.0, coercing to milliseconds or microseconds\n # is allowed\n expected = pa.Table.from_arrays([a_ms, a_ms, a_ms, a_ms], names)\n _check_roundtrip(table, expected, coerce_timestamps='ms')\n\n # Using Parquet version 2.0, coercing to milliseconds or microseconds\n # is allowed\n expected = pa.Table.from_arrays([a_us, a_us, a_us, a_us], names)\n _check_roundtrip(table, expected, version='2.0', coerce_timestamps='us')\n\n # TODO: after pyarrow allows coerce_timestamps='ns', tests like the\n # following should pass ...\n\n # Using Parquet version 1.0, coercing to nanoseconds is not allowed\n # expected = None\n # with pytest.raises(NotImplementedError):\n # _roundtrip_table(table, coerce_timestamps='ns')\n\n # Using Parquet version 2.0, coercing to nanoseconds is allowed\n # expected = pa.Table.from_arrays([a_ns, a_ns, a_ns, a_ns], names)\n # _check_roundtrip(table, expected, version='2.0', coerce_timestamps='ns')\n\n # For either Parquet version, coercing to nanoseconds is allowed\n # if Int96 storage is used\n expected = pa.Table.from_arrays([a_ns, a_ns, a_ns, a_ns], names)\n _check_roundtrip(table, expected,\n use_deprecated_int96_timestamps=True)\n _check_roundtrip(table, expected, version='2.0',\n use_deprecated_int96_timestamps=True)\n\n\ndef test_large_list_records():\n # This was fixed in PARQUET-1100\n\n list_lengths = np.random.randint(0, 500, size=50)\n list_lengths[::10] = 0\n\n list_values = [list(map(int, np.random.randint(0, 100, size=x)))\n if i % 8 else None\n for i, x in enumerate(list_lengths)]\n\n a1 = pa.array(list_values)\n\n table = pa.Table.from_arrays([a1], ['int_lists'])\n _check_roundtrip(table)\n\n\ndef test_sanitized_spark_field_names():\n a0 = pa.array([0, 1, 2, 3, 4])\n name = 'prohib; ,\\t{}'\n table = pa.Table.from_arrays([a0], [name])\n\n result = _roundtrip_table(table, write_table_kwargs={'flavor': 'spark'})\n\n expected_name = 'prohib______'\n assert result.schema[0].name == expected_name\n\n\[email protected]\ndef test_spark_flavor_preserves_pandas_metadata():\n df = _test_dataframe(size=100)\n df.index = np.arange(0, 10 * len(df), 10)\n df.index.name = 'foo'\n\n result = _roundtrip_pandas_dataframe(df, {'version': '2.0',\n 'flavor': 'spark'})\n tm.assert_frame_equal(result, df)\n\n\ndef test_fixed_size_binary():\n t0 = pa.binary(10)\n data = [b'fooooooooo', None, b'barooooooo', b'quxooooooo']\n a0 = pa.array(data, type=t0)\n\n table = pa.Table.from_arrays([a0],\n ['binary[10]'])\n _check_roundtrip(table)\n\n\[email protected]\ndef test_multithreaded_read():\n df = alltypes_sample(size=10000)\n\n table = pa.Table.from_pandas(df)\n\n buf = io.BytesIO()\n _write_table(table, buf, compression='SNAPPY', version='2.0')\n\n buf.seek(0)\n table1 = _read_table(buf, use_threads=True)\n\n buf.seek(0)\n table2 = _read_table(buf, use_threads=False)\n\n assert table1.equals(table2)\n\n\[email protected]\ndef test_min_chunksize():\n data = pd.DataFrame([np.arange(4)], columns=['A', 'B', 'C', 'D'])\n table = pa.Table.from_pandas(data.reset_index())\n\n buf = io.BytesIO()\n _write_table(table, buf, chunk_size=-1)\n\n buf.seek(0)\n result = _read_table(buf)\n\n assert result.equals(table)\n\n with pytest.raises(ValueError):\n _write_table(table, buf, chunk_size=0)\n\n\[email protected]\ndef test_pass_separate_metadata():\n # ARROW-471\n df = alltypes_sample(size=10000)\n\n a_table = pa.Table.from_pandas(df)\n\n buf = io.BytesIO()\n _write_table(a_table, buf, compression='snappy', version='2.0')\n\n buf.seek(0)\n metadata = pq.read_metadata(buf)\n\n buf.seek(0)\n\n fileh = pq.ParquetFile(buf, metadata=metadata)\n\n tm.assert_frame_equal(df, fileh.read().to_pandas())\n\n\[email protected]\ndef test_read_single_row_group():\n # ARROW-471\n N, K = 10000, 4\n df = alltypes_sample(size=N)\n\n a_table = pa.Table.from_pandas(df)\n\n buf = io.BytesIO()\n _write_table(a_table, buf, row_group_size=N / K,\n compression='snappy', version='2.0')\n\n buf.seek(0)\n\n pf = pq.ParquetFile(buf)\n\n assert pf.num_row_groups == K\n\n row_groups = [pf.read_row_group(i) for i in range(K)]\n result = pa.concat_tables(row_groups)\n tm.assert_frame_equal(df, result.to_pandas())\n\n\[email protected]\ndef test_read_single_row_group_with_column_subset():\n N, K = 10000, 4\n df = alltypes_sample(size=N)\n a_table = pa.Table.from_pandas(df)\n\n buf = io.BytesIO()\n _write_table(a_table, buf, row_group_size=N / K,\n compression='snappy', version='2.0')\n\n buf.seek(0)\n pf = pq.ParquetFile(buf)\n\n cols = list(df.columns[:2])\n row_groups = [pf.read_row_group(i, columns=cols) for i in range(K)]\n result = pa.concat_tables(row_groups)\n tm.assert_frame_equal(df[cols], result.to_pandas())\n\n # ARROW-4267: Selection of duplicate columns still leads to these columns\n # being read uniquely.\n row_groups = [pf.read_row_group(i, columns=cols + cols) for i in range(K)]\n result = pa.concat_tables(row_groups)\n tm.assert_frame_equal(df[cols], result.to_pandas())\n\n\[email protected]\ndef test_read_multiple_row_groups():\n N, K = 10000, 4\n df = alltypes_sample(size=N)\n\n a_table = pa.Table.from_pandas(df)\n\n buf = io.BytesIO()\n _write_table(a_table, buf, row_group_size=N / K,\n compression='snappy', version='2.0')\n\n buf.seek(0)\n\n pf = pq.ParquetFile(buf)\n\n assert pf.num_row_groups == K\n\n result = pf.read_row_groups(range(K))\n tm.assert_frame_equal(df, result.to_pandas())\n\n\[email protected]\ndef test_read_multiple_row_groups_with_column_subset():\n N, K = 10000, 4\n df = alltypes_sample(size=N)\n a_table = pa.Table.from_pandas(df)\n\n buf = io.BytesIO()\n _write_table(a_table, buf, row_group_size=N / K,\n compression='snappy', version='2.0')\n\n buf.seek(0)\n pf = pq.ParquetFile(buf)\n\n cols = list(df.columns[:2])\n result = pf.read_row_groups(range(K), columns=cols)\n tm.assert_frame_equal(df[cols], result.to_pandas())\n\n # ARROW-4267: Selection of duplicate columns still leads to these columns\n # being read uniquely.\n result = pf.read_row_groups(range(K), columns=cols + cols)\n tm.assert_frame_equal(df[cols], result.to_pandas())\n\n\[email protected]\ndef test_scan_contents():\n N, K = 10000, 4\n df = alltypes_sample(size=N)\n a_table = pa.Table.from_pandas(df)\n\n buf = io.BytesIO()\n _write_table(a_table, buf, row_group_size=N / K,\n compression='snappy', version='2.0')\n\n buf.seek(0)\n pf = pq.ParquetFile(buf)\n\n assert pf.scan_contents() == 10000\n assert pf.scan_contents(df.columns[:4]) == 10000\n\n\[email protected]\ndef test_parquet_piece_read(tempdir):\n df = _test_dataframe(1000)\n table = pa.Table.from_pandas(df)\n\n path = tempdir / 'parquet_piece_read.parquet'\n _write_table(table, path, version='2.0')\n\n piece1 = pq.ParquetDatasetPiece(path)\n\n result = piece1.read()\n assert result.equals(table)\n\n\[email protected]\ndef test_parquet_piece_open_and_get_metadata(tempdir):\n df = _test_dataframe(100)\n table = pa.Table.from_pandas(df)\n\n path = tempdir / 'parquet_piece_read.parquet'\n _write_table(table, path, version='2.0')\n\n piece = pq.ParquetDatasetPiece(path)\n table1 = piece.read()\n assert isinstance(table1, pa.Table)\n meta1 = piece.get_metadata()\n assert isinstance(meta1, pq.FileMetaData)\n\n assert table == table1\n\n\ndef test_parquet_piece_basics():\n path = '/baz.parq'\n\n piece1 = pq.ParquetDatasetPiece(path)\n piece2 = pq.ParquetDatasetPiece(path, row_group=1)\n piece3 = pq.ParquetDatasetPiece(\n path, row_group=1, partition_keys=[('foo', 0), ('bar', 1)])\n\n assert str(piece1) == path\n assert str(piece2) == '/baz.parq | row_group=1'\n assert str(piece3) == 'partition[foo=0, bar=1] /baz.parq | row_group=1'\n\n assert piece1 == piece1\n assert piece2 == piece2\n assert piece3 == piece3\n assert piece1 != piece3\n\n\ndef test_partition_set_dictionary_type():\n set1 = pq.PartitionSet('key1', [u('foo'), u('bar'), u('baz')])\n set2 = pq.PartitionSet('key2', [2007, 2008, 2009])\n\n assert isinstance(set1.dictionary, pa.StringArray)\n assert isinstance(set2.dictionary, pa.IntegerArray)\n\n set3 = pq.PartitionSet('key2', [datetime.datetime(2007, 1, 1)])\n with pytest.raises(TypeError):\n set3.dictionary\n\n\[email protected]\ndef test_read_partitioned_directory(tempdir):\n fs = LocalFileSystem.get_instance()\n _partition_test_for_filesystem(fs, tempdir)\n\n\[email protected]\ndef test_create_parquet_dataset_multi_threaded(tempdir):\n fs = LocalFileSystem.get_instance()\n base_path = tempdir\n\n _partition_test_for_filesystem(fs, base_path)\n\n manifest = pq.ParquetManifest(base_path, filesystem=fs,\n metadata_nthreads=1)\n dataset = pq.ParquetDataset(base_path, filesystem=fs, metadata_nthreads=16)\n assert len(dataset.pieces) > 0\n partitions = dataset.partitions\n assert len(partitions.partition_names) > 0\n assert partitions.partition_names == manifest.partitions.partition_names\n assert len(partitions.levels) == len(manifest.partitions.levels)\n\n\[email protected]\ndef test_equivalency(tempdir):\n fs = LocalFileSystem.get_instance()\n base_path = tempdir\n\n integer_keys = [0, 1]\n string_keys = ['a', 'b', 'c']\n boolean_keys = [True, False]\n partition_spec = [\n ['integer', integer_keys],\n ['string', string_keys],\n ['boolean', boolean_keys]\n ]\n\n df = pd.DataFrame({\n 'integer': np.array(integer_keys, dtype='i4').repeat(15),\n 'string': np.tile(np.tile(np.array(string_keys, dtype=object), 5), 2),\n 'boolean': np.tile(np.tile(np.array(boolean_keys, dtype='bool'), 5),\n 3),\n }, columns=['integer', 'string', 'boolean'])\n\n _generate_partition_directories(fs, base_path, partition_spec, df)\n\n # Old filters syntax:\n # integer == 1 AND string != b AND boolean == True\n dataset = pq.ParquetDataset(\n base_path, filesystem=fs,\n filters=[('integer', '=', 1), ('string', '!=', 'b'),\n ('boolean', '==', True)]\n )\n table = dataset.read()\n result_df = (table.to_pandas().reset_index(drop=True))\n\n assert 0 not in result_df['integer'].values\n assert 'b' not in result_df['string'].values\n assert False not in result_df['boolean'].values\n\n # filters in disjunctive normal form:\n # (integer == 1 AND string != b AND boolean == True) OR\n # (integer == 2 AND boolean == False)\n # TODO(ARROW-3388): boolean columns are reconstructed as string\n filters = [\n [\n ('integer', '=', 1),\n ('string', '!=', 'b'),\n ('boolean', '==', 'True')\n ],\n [('integer', '=', 0), ('boolean', '==', 'False')]\n ]\n dataset = pq.ParquetDataset(base_path, filesystem=fs, filters=filters)\n table = dataset.read()\n result_df = table.to_pandas().reset_index(drop=True)\n\n # Check that all rows in the DF fulfill the filter\n # Pandas 0.23.x has problems with indexing constant memoryviews in\n # categoricals. Thus we need to make an explicity copy here with np.array.\n df_filter_1 = (np.array(result_df['integer']) == 1) \\\n & (np.array(result_df['string']) != 'b') \\\n & (np.array(result_df['boolean']) == 'True')\n df_filter_2 = (np.array(result_df['integer']) == 0) \\\n & (np.array(result_df['boolean']) == 'False')\n assert df_filter_1.sum() > 0\n assert df_filter_2.sum() > 0\n assert result_df.shape[0] == (df_filter_1.sum() + df_filter_2.sum())\n\n # Check for \\0 in predicate values. Until they are correctly implemented\n # in ARROW-3391, they would otherwise lead to weird results with the\n # current code.\n with pytest.raises(NotImplementedError):\n filters = [[('string', '==', b'1\\0a')]]\n pq.ParquetDataset(base_path, filesystem=fs, filters=filters)\n with pytest.raises(NotImplementedError):\n filters = [[('string', '==', u'1\\0a')]]\n pq.ParquetDataset(base_path, filesystem=fs, filters=filters)\n\n\[email protected]\ndef test_cutoff_exclusive_integer(tempdir):\n fs = LocalFileSystem.get_instance()\n base_path = tempdir\n\n integer_keys = [0, 1, 2, 3, 4]\n partition_spec = [\n ['integers', integer_keys],\n ]\n N = 5\n\n df = pd.DataFrame({\n 'index': np.arange(N),\n 'integers': np.array(integer_keys, dtype='i4'),\n }, columns=['index', 'integers'])\n\n _generate_partition_directories(fs, base_path, partition_spec, df)\n\n dataset = pq.ParquetDataset(\n base_path, filesystem=fs,\n filters=[\n ('integers', '<', 4),\n ('integers', '>', 1),\n ]\n )\n table = dataset.read()\n result_df = (table.to_pandas()\n .sort_values(by='index')\n .reset_index(drop=True))\n\n result_list = [x for x in map(int, result_df['integers'].values)]\n assert result_list == [2, 3]\n\n\[email protected]\[email protected](\n raises=TypeError,\n reason='Loss of type information in creation of categoricals.'\n)\ndef test_cutoff_exclusive_datetime(tempdir):\n fs = LocalFileSystem.get_instance()\n base_path = tempdir\n\n date_keys = [\n datetime.date(2018, 4, 9),\n datetime.date(2018, 4, 10),\n datetime.date(2018, 4, 11),\n datetime.date(2018, 4, 12),\n datetime.date(2018, 4, 13)\n ]\n partition_spec = [\n ['dates', date_keys]\n ]\n N = 5\n\n df = pd.DataFrame({\n 'index': np.arange(N),\n 'dates': np.array(date_keys, dtype='datetime64'),\n }, columns=['index', 'dates'])\n\n _generate_partition_directories(fs, base_path, partition_spec, df)\n\n dataset = pq.ParquetDataset(\n base_path, filesystem=fs,\n filters=[\n ('dates', '<', \"2018-04-12\"),\n ('dates', '>', \"2018-04-10\")\n ]\n )\n table = dataset.read()\n result_df = (table.to_pandas()\n .sort_values(by='index')\n .reset_index(drop=True))\n\n expected = pd.Categorical(\n np.array([datetime.date(2018, 4, 11)], dtype='datetime64'),\n categories=np.array(date_keys, dtype='datetime64'))\n\n assert result_df['dates'].values == expected\n\n\[email protected]\ndef test_inclusive_integer(tempdir):\n fs = LocalFileSystem.get_instance()\n base_path = tempdir\n\n integer_keys = [0, 1, 2, 3, 4]\n partition_spec = [\n ['integers', integer_keys],\n ]\n N = 5\n\n df = pd.DataFrame({\n 'index': np.arange(N),\n 'integers': np.array(integer_keys, dtype='i4'),\n }, columns=['index', 'integers'])\n\n _generate_partition_directories(fs, base_path, partition_spec, df)\n\n dataset = pq.ParquetDataset(\n base_path, filesystem=fs,\n filters=[\n ('integers', '<=', 3),\n ('integers', '>=', 2),\n ]\n )\n table = dataset.read()\n result_df = (table.to_pandas()\n .sort_values(by='index')\n .reset_index(drop=True))\n\n result_list = [int(x) for x in map(int, result_df['integers'].values)]\n assert result_list == [2, 3]\n\n\[email protected]\ndef test_inclusive_set(tempdir):\n fs = LocalFileSystem.get_instance()\n base_path = tempdir\n\n integer_keys = [0, 1]\n string_keys = ['a', 'b', 'c']\n boolean_keys = [True, False]\n partition_spec = [\n ['integer', integer_keys],\n ['string', string_keys],\n ['boolean', boolean_keys]\n ]\n\n df = pd.DataFrame({\n 'integer': np.array(integer_keys, dtype='i4').repeat(15),\n 'string': np.tile(np.tile(np.array(string_keys, dtype=object), 5), 2),\n 'boolean': np.tile(np.tile(np.array(boolean_keys, dtype='bool'), 5),\n 3),\n }, columns=['integer', 'string', 'boolean'])\n\n _generate_partition_directories(fs, base_path, partition_spec, df)\n\n dataset = pq.ParquetDataset(\n base_path, filesystem=fs,\n filters=[('integer', 'in', {1}), ('string', 'in', {'a', 'b'}),\n ('boolean', 'in', {True})]\n )\n table = dataset.read()\n result_df = (table.to_pandas().reset_index(drop=True))\n\n assert 0 not in result_df['integer'].values\n assert 'c' not in result_df['string'].values\n assert False not in result_df['boolean'].values\n\n\[email protected]\ndef test_invalid_pred_op(tempdir):\n fs = LocalFileSystem.get_instance()\n base_path = tempdir\n\n integer_keys = [0, 1, 2, 3, 4]\n partition_spec = [\n ['integers', integer_keys],\n ]\n N = 5\n\n df = pd.DataFrame({\n 'index': np.arange(N),\n 'integers': np.array(integer_keys, dtype='i4'),\n }, columns=['index', 'integers'])\n\n _generate_partition_directories(fs, base_path, partition_spec, df)\n\n with pytest.raises(ValueError):\n pq.ParquetDataset(base_path,\n filesystem=fs,\n filters=[\n ('integers', '=<', 3),\n ])\n\n with pytest.raises(ValueError):\n pq.ParquetDataset(base_path,\n filesystem=fs,\n filters=[\n ('integers', 'in', set()),\n ])\n\n with pytest.raises(ValueError):\n pq.ParquetDataset(base_path,\n filesystem=fs,\n filters=[\n ('integers', '!=', {3}),\n ])\n\n\[email protected]\ndef test_filters_read_table(tempdir):\n # test that filters keyword is passed through in read_table\n fs = LocalFileSystem.get_instance()\n base_path = tempdir\n\n integer_keys = [0, 1, 2, 3, 4]\n partition_spec = [\n ['integers', integer_keys],\n ]\n N = 5\n\n df = pd.DataFrame({\n 'index': np.arange(N),\n 'integers': np.array(integer_keys, dtype='i4'),\n }, columns=['index', 'integers'])\n\n _generate_partition_directories(fs, base_path, partition_spec, df)\n\n table = pq.read_table(\n base_path, filesystem=fs, filters=[('integers', '<', 3)])\n assert table.num_rows == 3\n\n table = pq.read_table(\n base_path, filesystem=fs, filters=[[('integers', '<', 3)]])\n assert table.num_rows == 3\n\n table = pq.read_pandas(\n base_path, filters=[('integers', '<', 3)])\n assert table.num_rows == 3\n\n\[email protected]_fixture\ndef s3_example():\n access_key = os.environ['PYARROW_TEST_S3_ACCESS_KEY']\n secret_key = os.environ['PYARROW_TEST_S3_SECRET_KEY']\n bucket_name = os.environ['PYARROW_TEST_S3_BUCKET']\n\n import s3fs\n fs = s3fs.S3FileSystem(key=access_key, secret=secret_key)\n\n test_dir = guid()\n\n bucket_uri = 's3://{0}/{1}'.format(bucket_name, test_dir)\n fs.mkdir(bucket_uri)\n yield fs, bucket_uri\n fs.rm(bucket_uri, recursive=True)\n\n\[email protected]\[email protected]\ndef test_read_partitioned_directory_s3fs(s3_example):\n from pyarrow.filesystem import S3FSWrapper\n\n fs, bucket_uri = s3_example\n wrapper = S3FSWrapper(fs)\n _partition_test_for_filesystem(wrapper, bucket_uri)\n\n # Check that we can auto-wrap\n dataset = pq.ParquetDataset(bucket_uri, filesystem=fs)\n dataset.read()\n\n\ndef _partition_test_for_filesystem(fs, base_path):\n foo_keys = [0, 1]\n bar_keys = ['a', 'b', 'c']\n partition_spec = [\n ['foo', foo_keys],\n ['bar', bar_keys]\n ]\n N = 30\n\n df = pd.DataFrame({\n 'index': np.arange(N),\n 'foo': np.array(foo_keys, dtype='i4').repeat(15),\n 'bar': np.tile(np.tile(np.array(bar_keys, dtype=object), 5), 2),\n 'values': np.random.randn(N)\n }, columns=['index', 'foo', 'bar', 'values'])\n\n _generate_partition_directories(fs, base_path, partition_spec, df)\n\n dataset = pq.ParquetDataset(base_path, filesystem=fs)\n table = dataset.read()\n result_df = (table.to_pandas()\n .sort_values(by='index')\n .reset_index(drop=True))\n\n expected_df = (df.sort_values(by='index')\n .reset_index(drop=True)\n .reindex(columns=result_df.columns))\n expected_df['foo'] = pd.Categorical(df['foo'], categories=foo_keys)\n expected_df['bar'] = pd.Categorical(df['bar'], categories=bar_keys)\n\n assert (result_df.columns == ['index', 'values', 'foo', 'bar']).all()\n\n tm.assert_frame_equal(result_df, expected_df)\n\n\ndef _generate_partition_directories(fs, base_dir, partition_spec, df):\n # partition_spec : list of lists, e.g. [['foo', [0, 1, 2],\n # ['bar', ['a', 'b', 'c']]\n # part_table : a pyarrow.Table to write to each partition\n DEPTH = len(partition_spec)\n\n def _visit_level(base_dir, level, part_keys):\n name, values = partition_spec[level]\n for value in values:\n this_part_keys = part_keys + [(name, value)]\n\n level_dir = base_dir / '{0}={1}'.format(name, value)\n fs.mkdir(level_dir)\n\n if level == DEPTH - 1:\n # Generate example data\n file_path = level_dir / guid()\n\n filtered_df = _filter_partition(df, this_part_keys)\n part_table = pa.Table.from_pandas(filtered_df)\n with fs.open(file_path, 'wb') as f:\n _write_table(part_table, f)\n assert fs.exists(file_path)\n\n (level_dir / '_SUCCESS').touch()\n else:\n _visit_level(level_dir, level + 1, this_part_keys)\n (level_dir / '_SUCCESS').touch()\n\n _visit_level(base_dir, 0, [])\n\n\ndef _test_read_common_metadata_files(fs, base_path):\n N = 100\n df = pd.DataFrame({\n 'index': np.arange(N),\n 'values': np.random.randn(N)\n }, columns=['index', 'values'])\n\n base_path = str(base_path)\n data_path = os.path.join(base_path, 'data.parquet')\n\n table = pa.Table.from_pandas(df)\n\n with fs.open(data_path, 'wb') as f:\n _write_table(table, f)\n\n metadata_path = os.path.join(base_path, '_common_metadata')\n with fs.open(metadata_path, 'wb') as f:\n pq.write_metadata(table.schema, f)\n\n dataset = pq.ParquetDataset(base_path, filesystem=fs)\n assert dataset.common_metadata_path == str(metadata_path)\n\n with fs.open(data_path) as f:\n common_schema = pq.read_metadata(f).schema\n assert dataset.schema.equals(common_schema)\n\n # handle list of one directory\n dataset2 = pq.ParquetDataset([base_path], filesystem=fs)\n assert dataset2.schema.equals(dataset.schema)\n\n\[email protected]\ndef test_read_common_metadata_files(tempdir):\n fs = LocalFileSystem.get_instance()\n _test_read_common_metadata_files(fs, tempdir)\n\n\[email protected]\ndef test_read_metadata_files(tempdir):\n fs = LocalFileSystem.get_instance()\n\n N = 100\n df = pd.DataFrame({\n 'index': np.arange(N),\n 'values': np.random.randn(N)\n }, columns=['index', 'values'])\n\n data_path = tempdir / 'data.parquet'\n\n table = pa.Table.from_pandas(df)\n\n with fs.open(data_path, 'wb') as f:\n _write_table(table, f)\n\n metadata_path = tempdir / '_metadata'\n with fs.open(metadata_path, 'wb') as f:\n pq.write_metadata(table.schema, f)\n\n dataset = pq.ParquetDataset(tempdir, filesystem=fs)\n assert dataset.metadata_path == str(metadata_path)\n\n with fs.open(data_path) as f:\n metadata_schema = pq.read_metadata(f).schema\n assert dataset.schema.equals(metadata_schema)\n\n\[email protected]\ndef test_read_schema(tempdir):\n N = 100\n df = pd.DataFrame({\n 'index': np.arange(N),\n 'values': np.random.randn(N)\n }, columns=['index', 'values'])\n\n data_path = tempdir / 'test.parquet'\n\n table = pa.Table.from_pandas(df)\n _write_table(table, data_path)\n\n read1 = pq.read_schema(data_path)\n read2 = pq.read_schema(data_path, memory_map=True)\n assert table.schema.equals(read1, check_metadata=False)\n assert table.schema.equals(read2, check_metadata=False)\n\n assert table.schema.metadata[b'pandas'] == read1.metadata[b'pandas']\n\n\ndef _filter_partition(df, part_keys):\n predicate = np.ones(len(df), dtype=bool)\n\n to_drop = []\n for name, value in part_keys:\n to_drop.append(name)\n\n # to avoid pandas warning\n if isinstance(value, (datetime.date, datetime.datetime)):\n value = pd.Timestamp(value)\n\n predicate &= df[name] == value\n\n return df[predicate].drop(to_drop, axis=1)\n\n\[email protected]\ndef test_read_multiple_files(tempdir):\n nfiles = 10\n size = 5\n\n dirpath = tempdir / guid()\n dirpath.mkdir()\n\n test_data = []\n paths = []\n for i in range(nfiles):\n df = _test_dataframe(size, seed=i)\n\n # Hack so that we don't have a dtype cast in v1 files\n df['uint32'] = df['uint32'].astype(np.int64)\n\n path = dirpath / '{}.parquet'.format(i)\n\n table = pa.Table.from_pandas(df)\n _write_table(table, path)\n\n test_data.append(table)\n paths.append(path)\n\n # Write a _SUCCESS.crc file\n (dirpath / '_SUCCESS.crc').touch()\n\n def read_multiple_files(paths, columns=None, use_threads=True, **kwargs):\n dataset = pq.ParquetDataset(paths, **kwargs)\n return dataset.read(columns=columns, use_threads=use_threads)\n\n result = read_multiple_files(paths)\n expected = pa.concat_tables(test_data)\n\n assert result.equals(expected)\n\n # Read with provided metadata\n metadata = pq.read_metadata(paths[0])\n\n result2 = read_multiple_files(paths, metadata=metadata)\n assert result2.equals(expected)\n\n result3 = pa.localfs.read_parquet(dirpath, schema=metadata.schema)\n assert result3.equals(expected)\n\n # Read column subset\n to_read = [0, 2, 6, result.num_columns - 1]\n\n col_names = [result.field(i).name for i in to_read]\n out = pa.localfs.read_parquet(dirpath, columns=col_names)\n expected = pa.Table.from_arrays([result.column(i) for i in to_read],\n names=col_names,\n metadata=result.schema.metadata)\n assert out.equals(expected)\n\n # Read with multiple threads\n pa.localfs.read_parquet(dirpath, use_threads=True)\n\n # Test failure modes with non-uniform metadata\n bad_apple = _test_dataframe(size, seed=i).iloc[:, :4]\n bad_apple_path = tempdir / '{}.parquet'.format(guid())\n\n t = pa.Table.from_pandas(bad_apple)\n _write_table(t, bad_apple_path)\n\n bad_meta = pq.read_metadata(bad_apple_path)\n\n with pytest.raises(ValueError):\n read_multiple_files(paths + [bad_apple_path])\n\n with pytest.raises(ValueError):\n read_multiple_files(paths, metadata=bad_meta)\n\n mixed_paths = [bad_apple_path, paths[0]]\n\n with pytest.raises(ValueError):\n read_multiple_files(mixed_paths, schema=bad_meta.schema)\n\n with pytest.raises(ValueError):\n read_multiple_files(mixed_paths)\n\n\[email protected]\ndef test_dataset_read_pandas(tempdir):\n nfiles = 5\n size = 5\n\n dirpath = tempdir / guid()\n dirpath.mkdir()\n\n test_data = []\n frames = []\n paths = []\n for i in range(nfiles):\n df = _test_dataframe(size, seed=i)\n df.index = np.arange(i * size, (i + 1) * size)\n df.index.name = 'index'\n\n path = dirpath / '{}.parquet'.format(i)\n\n table = pa.Table.from_pandas(df)\n _write_table(table, path)\n test_data.append(table)\n frames.append(df)\n paths.append(path)\n\n dataset = pq.ParquetDataset(dirpath)\n columns = ['uint8', 'strings']\n result = dataset.read_pandas(columns=columns).to_pandas()\n expected = pd.concat([x[columns] for x in frames])\n\n tm.assert_frame_equal(result, expected)\n\n\[email protected]\ndef test_dataset_no_memory_map(tempdir):\n # ARROW-2627: Check that we can use ParquetDataset without memory-mapping\n dirpath = tempdir / guid()\n dirpath.mkdir()\n\n df = _test_dataframe(10, seed=0)\n path = dirpath / '{}.parquet'.format(0)\n table = pa.Table.from_pandas(df)\n _write_table(table, path, version='2.0')\n\n # TODO(wesm): Not sure how to easily check that memory mapping is _not_\n # used. Mocking is not especially easy for pa.memory_map\n dataset = pq.ParquetDataset(dirpath, memory_map=False)\n assert dataset.pieces[0].read().equals(table)\n\n\[email protected]\[email protected]('preserve_index', [True, False, None])\ndef test_dataset_read_pandas_common_metadata(tempdir, preserve_index):\n # ARROW-1103\n nfiles = 5\n size = 5\n\n dirpath = tempdir / guid()\n dirpath.mkdir()\n\n test_data = []\n frames = []\n paths = []\n for i in range(nfiles):\n df = _test_dataframe(size, seed=i)\n df.index = pd.Index(np.arange(i * size, (i + 1) * size), name='index')\n\n path = dirpath / '{}.parquet'.format(i)\n\n table = pa.Table.from_pandas(df, preserve_index=preserve_index)\n\n # Obliterate metadata\n table = table.replace_schema_metadata(None)\n assert table.schema.metadata is None\n\n _write_table(table, path)\n test_data.append(table)\n frames.append(df)\n paths.append(path)\n\n # Write _metadata common file\n table_for_metadata = pa.Table.from_pandas(\n df, preserve_index=preserve_index\n )\n pq.write_metadata(table_for_metadata.schema, dirpath / '_metadata')\n\n dataset = pq.ParquetDataset(dirpath)\n columns = ['uint8', 'strings']\n result = dataset.read_pandas(columns=columns).to_pandas()\n expected = pd.concat([x[columns] for x in frames])\n expected.index.name = (\n df.index.name if preserve_index is not False else None)\n tm.assert_frame_equal(result, expected)\n\n\ndef _make_example_multifile_dataset(base_path, nfiles=10, file_nrows=5):\n test_data = []\n paths = []\n for i in range(nfiles):\n df = _test_dataframe(file_nrows, seed=i)\n path = base_path / '{}.parquet'.format(i)\n\n test_data.append(_write_table(df, path))\n paths.append(path)\n return paths\n\n\[email protected]\ndef test_ignore_private_directories(tempdir):\n dirpath = tempdir / guid()\n dirpath.mkdir()\n\n paths = _make_example_multifile_dataset(dirpath, nfiles=10,\n file_nrows=5)\n\n # private directory\n (dirpath / '_impala_staging').mkdir()\n\n dataset = pq.ParquetDataset(dirpath)\n assert set(map(str, paths)) == set(x.path for x in dataset.pieces)\n\n\[email protected]\ndef test_ignore_hidden_files_dot(tempdir):\n dirpath = tempdir / guid()\n dirpath.mkdir()\n\n paths = _make_example_multifile_dataset(dirpath, nfiles=10,\n file_nrows=5)\n\n with (dirpath / '.DS_Store').open('wb') as f:\n f.write(b'gibberish')\n\n with (dirpath / '.private').open('wb') as f:\n f.write(b'gibberish')\n\n dataset = pq.ParquetDataset(dirpath)\n assert set(map(str, paths)) == set(x.path for x in dataset.pieces)\n\n\[email protected]\ndef test_ignore_hidden_files_underscore(tempdir):\n dirpath = tempdir / guid()\n dirpath.mkdir()\n\n paths = _make_example_multifile_dataset(dirpath, nfiles=10,\n file_nrows=5)\n\n with (dirpath / '_committed_123').open('wb') as f:\n f.write(b'abcd')\n\n with (dirpath / '_started_321').open('wb') as f:\n f.write(b'abcd')\n\n dataset = pq.ParquetDataset(dirpath)\n assert set(map(str, paths)) == set(x.path for x in dataset.pieces)\n\n\[email protected]\ndef test_multiindex_duplicate_values(tempdir):\n num_rows = 3\n numbers = list(range(num_rows))\n index = pd.MultiIndex.from_arrays(\n [['foo', 'foo', 'bar'], numbers],\n names=['foobar', 'some_numbers'],\n )\n\n df = pd.DataFrame({'numbers': numbers}, index=index)\n table = pa.Table.from_pandas(df)\n\n filename = tempdir / 'dup_multi_index_levels.parquet'\n\n _write_table(table, filename)\n result_table = _read_table(filename)\n assert table.equals(result_table)\n\n result_df = result_table.to_pandas()\n tm.assert_frame_equal(result_df, df)\n\n\[email protected]\ndef test_write_error_deletes_incomplete_file(tempdir):\n # ARROW-1285\n df = pd.DataFrame({'a': list('abc'),\n 'b': list(range(1, 4)),\n 'c': np.arange(3, 6).astype('u1'),\n 'd': np.arange(4.0, 7.0, dtype='float64'),\n 'e': [True, False, True],\n 'f': pd.Categorical(list('abc')),\n 'g': pd.date_range('20130101', periods=3),\n 'h': pd.date_range('20130101', periods=3,\n tz='US/Eastern'),\n 'i': pd.date_range('20130101', periods=3, freq='ns')})\n\n pdf = pa.Table.from_pandas(df)\n\n filename = tempdir / 'tmp_file'\n try:\n _write_table(pdf, filename)\n except pa.ArrowException:\n pass\n\n assert not filename.exists()\n\n\[email protected]\ndef test_noncoerced_nanoseconds_written_without_exception(tempdir):\n # ARROW-1957: the Parquet version 2.0 writer preserves Arrow\n # nanosecond timestamps by default\n n = 9\n df = pd.DataFrame({'x': range(n)},\n index=pd.DatetimeIndex(start='2017-01-01',\n freq='1n',\n periods=n))\n tb = pa.Table.from_pandas(df)\n\n filename = tempdir / 'written.parquet'\n try:\n pq.write_table(tb, filename, version='2.0')\n except Exception:\n pass\n assert filename.exists()\n\n recovered_table = pq.read_table(filename)\n assert tb.equals(recovered_table)\n\n # Loss of data thru coercion (without explicit override) still an error\n filename = tempdir / 'not_written.parquet'\n with pytest.raises(ValueError):\n pq.write_table(tb, filename, coerce_timestamps='ms', version='2.0')\n\n\ndef test_read_non_existent_file(tempdir):\n path = 'non-existent-file.parquet'\n try:\n pq.read_table(path)\n except Exception as e:\n assert path in e.args[0]\n\n\ndef test_read_table_doesnt_warn(datadir):\n with pytest.warns(None) as record:\n pq.read_table(datadir / 'v0.7.1.parquet')\n\n assert len(record) == 0\n\n\ndef _test_write_to_dataset_with_partitions(base_path,\n filesystem=None,\n schema=None,\n index_name=None):\n # ARROW-1400\n output_df = pd.DataFrame({'group1': list('aaabbbbccc'),\n 'group2': list('eefeffgeee'),\n 'num': list(range(10)),\n 'nan': [pd.np.nan] * 10,\n 'date': np.arange('2017-01-01', '2017-01-11',\n dtype='datetime64[D]')})\n cols = output_df.columns.tolist()\n partition_by = ['group1', 'group2']\n output_table = pa.Table.from_pandas(output_df, schema=schema, safe=False,\n preserve_index=False)\n pq.write_to_dataset(output_table, base_path, partition_by,\n filesystem=filesystem)\n\n metadata_path = os.path.join(base_path, '_common_metadata')\n\n if filesystem is not None:\n with filesystem.open(metadata_path, 'wb') as f:\n pq.write_metadata(output_table.schema, f)\n else:\n pq.write_metadata(output_table.schema, metadata_path)\n\n # ARROW-2891: Ensure the output_schema is preserved when writing a\n # partitioned dataset\n dataset = pq.ParquetDataset(base_path,\n filesystem=filesystem,\n validate_schema=True)\n # ARROW-2209: Ensure the dataset schema also includes the partition columns\n dataset_cols = set(dataset.schema.to_arrow_schema().names)\n assert dataset_cols == set(output_table.schema.names)\n\n input_table = dataset.read()\n input_df = input_table.to_pandas()\n\n # Read data back in and compare with original DataFrame\n # Partitioned columns added to the end of the DataFrame when read\n input_df_cols = input_df.columns.tolist()\n assert partition_by == input_df_cols[-1 * len(partition_by):]\n\n # Partitioned columns become 'categorical' dtypes\n input_df = input_df[cols]\n for col in partition_by:\n output_df[col] = output_df[col].astype('category')\n assert output_df.equals(input_df)\n\n\ndef _test_write_to_dataset_no_partitions(base_path, filesystem=None):\n # ARROW-1400\n output_df = pd.DataFrame({'group1': list('aaabbbbccc'),\n 'group2': list('eefeffgeee'),\n 'num': list(range(10)),\n 'date': np.arange('2017-01-01', '2017-01-11',\n dtype='datetime64[D]')})\n cols = output_df.columns.tolist()\n output_table = pa.Table.from_pandas(output_df)\n\n if filesystem is None:\n filesystem = LocalFileSystem.get_instance()\n\n # Without partitions, append files to root_path\n n = 5\n for i in range(n):\n pq.write_to_dataset(output_table, base_path,\n filesystem=filesystem)\n output_files = [file for file in filesystem.ls(base_path)\n if file.endswith(\".parquet\")]\n assert len(output_files) == n\n\n # Deduplicated incoming DataFrame should match\n # original outgoing Dataframe\n input_table = pq.ParquetDataset(base_path,\n filesystem=filesystem).read()\n input_df = input_table.to_pandas()\n input_df = input_df.drop_duplicates()\n input_df = input_df[cols]\n assert output_df.equals(input_df)\n\n\[email protected]\ndef test_write_to_dataset_with_partitions(tempdir):\n _test_write_to_dataset_with_partitions(str(tempdir))\n\n\[email protected]\ndef test_write_to_dataset_with_partitions_and_schema(tempdir):\n schema = pa.schema([pa.field('group1', type=pa.string()),\n pa.field('group2', type=pa.string()),\n pa.field('num', type=pa.int64()),\n pa.field('nan', type=pa.int32()),\n pa.field('date', type=pa.timestamp(unit='us'))])\n _test_write_to_dataset_with_partitions(str(tempdir), schema=schema)\n\n\[email protected]\ndef test_write_to_dataset_with_partitions_and_index_name(tempdir):\n _test_write_to_dataset_with_partitions(str(tempdir),\n index_name='index_name')\n\n\[email protected]\ndef test_write_to_dataset_no_partitions(tempdir):\n _test_write_to_dataset_no_partitions(str(tempdir))\n\n\[email protected]\ndef test_write_to_dataset_with_partitions_and_custom_filenames(tempdir):\n output_df = pd.DataFrame({'group1': list('aaabbbbccc'),\n 'group2': list('eefeffgeee'),\n 'num': list(range(10)),\n 'nan': [pd.np.nan] * 10,\n 'date': np.arange('2017-01-01', '2017-01-11',\n dtype='datetime64[D]')})\n partition_by = ['group1', 'group2']\n output_table = pa.Table.from_pandas(output_df)\n path = str(tempdir)\n\n def partition_filename_callback(keys):\n return \"{0}-{1}.parquet\".format(*keys)\n\n pq.write_to_dataset(output_table, path,\n partition_by, partition_filename_callback)\n\n dataset = pq.ParquetDataset(path)\n\n # ARROW-3538: Ensure partition filenames match the given pattern\n # defined in the local function partition_filename_callback\n expected_basenames = [\n 'a-e.parquet', 'a-f.parquet',\n 'b-e.parquet', 'b-f.parquet',\n 'b-g.parquet', 'c-e.parquet'\n ]\n output_basenames = [os.path.basename(p.path) for p in dataset.pieces]\n\n assert sorted(expected_basenames) == sorted(output_basenames)\n\n\[email protected]_memory\ndef test_large_table_int32_overflow():\n size = np.iinfo('int32').max + 1\n\n arr = np.ones(size, dtype='uint8')\n\n parr = pa.array(arr, type=pa.uint8())\n\n table = pa.Table.from_arrays([parr], names=['one'])\n f = io.BytesIO()\n _write_table(table, f)\n\n\ndef _simple_table_roundtrip(table):\n stream = pa.BufferOutputStream()\n _write_table(table, stream)\n buf = stream.getvalue()\n return _read_table(buf)\n\n\[email protected]\[email protected]_memory\ndef test_binary_array_overflow_to_chunked():\n # ARROW-3762\n\n # 2^31 + 1 bytes\n values = [b'x'] + [\n b'x' * (1 << 20)\n ] * 2 * (1 << 10)\n df = pd.DataFrame({'byte_col': values})\n\n tbl = pa.Table.from_pandas(df, preserve_index=False)\n read_tbl = _simple_table_roundtrip(tbl)\n\n col0_data = read_tbl[0]\n assert isinstance(col0_data, pa.ChunkedArray)\n\n # Split up into 2GB chunks\n assert col0_data.num_chunks == 2\n\n assert tbl.equals(read_tbl)\n\n\[email protected]\[email protected]_memory\ndef test_list_of_binary_large_cell():\n # ARROW-4688\n data = []\n\n # TODO(wesm): handle chunked children\n # 2^31 - 1 bytes in a single cell\n # data.append([b'x' * (1 << 20)] * 2047 + [b'x' * ((1 << 20) - 1)])\n\n # A little under 2GB in cell each containing approximately 10MB each\n data.extend([[b'x' * 1000000] * 10] * 214)\n\n arr = pa.array(data)\n table = pa.Table.from_arrays([arr], ['chunky_cells'])\n read_table = _simple_table_roundtrip(table)\n assert table.equals(read_table)\n\n\[email protected]\ndef test_index_column_name_duplicate(tempdir):\n data = {\n 'close': {\n pd.Timestamp('2017-06-30 01:31:00'): 154.99958999999998,\n pd.Timestamp('2017-06-30 01:32:00'): 154.99958999999998,\n },\n 'time': {\n pd.Timestamp('2017-06-30 01:31:00'): pd.Timestamp(\n '2017-06-30 01:31:00'\n ),\n pd.Timestamp('2017-06-30 01:32:00'): pd.Timestamp(\n '2017-06-30 01:32:00'\n ),\n }\n }\n path = str(tempdir / 'data.parquet')\n dfx = pd.DataFrame(data).set_index('time', drop=False)\n tdfx = pa.Table.from_pandas(dfx)\n _write_table(tdfx, path)\n arrow_table = _read_table(path)\n result_df = arrow_table.to_pandas()\n tm.assert_frame_equal(result_df, dfx)\n\n\[email protected]\ndef test_parquet_nested_convenience(tempdir):\n # ARROW-1684\n df = pd.DataFrame({\n 'a': [[1, 2, 3], None, [4, 5], []],\n 'b': [[1.], None, None, [6., 7.]],\n })\n\n path = str(tempdir / 'nested_convenience.parquet')\n\n table = pa.Table.from_pandas(df, preserve_index=False)\n _write_table(table, path)\n\n read = pq.read_table(path, columns=['a'])\n tm.assert_frame_equal(read.to_pandas(), df[['a']])\n\n read = pq.read_table(path, columns=['a', 'b'])\n tm.assert_frame_equal(read.to_pandas(), df)\n\n\[email protected]\ndef test_backwards_compatible_index_naming(datadir):\n expected_string = b\"\"\"\\\ncarat cut color clarity depth table price x y z\n 0.23 Ideal E SI2 61.5 55.0 326 3.95 3.98 2.43\n 0.21 Premium E SI1 59.8 61.0 326 3.89 3.84 2.31\n 0.23 Good E VS1 56.9 65.0 327 4.05 4.07 2.31\n 0.29 Premium I VS2 62.4 58.0 334 4.20 4.23 2.63\n 0.31 Good J SI2 63.3 58.0 335 4.34 4.35 2.75\n 0.24 Very Good J VVS2 62.8 57.0 336 3.94 3.96 2.48\n 0.24 Very Good I VVS1 62.3 57.0 336 3.95 3.98 2.47\n 0.26 Very Good H SI1 61.9 55.0 337 4.07 4.11 2.53\n 0.22 Fair E VS2 65.1 61.0 337 3.87 3.78 2.49\n 0.23 Very Good H VS1 59.4 61.0 338 4.00 4.05 2.39\"\"\"\n expected = pd.read_csv(io.BytesIO(expected_string), sep=r'\\s{2,}',\n index_col=None, header=0, engine='python')\n table = _read_table(datadir / 'v0.7.1.parquet')\n result = table.to_pandas()\n tm.assert_frame_equal(result, expected)\n\n\[email protected]\ndef test_backwards_compatible_index_multi_level_named(datadir):\n expected_string = b\"\"\"\\\ncarat cut color clarity depth table price x y z\n 0.23 Ideal E SI2 61.5 55.0 326 3.95 3.98 2.43\n 0.21 Premium E SI1 59.8 61.0 326 3.89 3.84 2.31\n 0.23 Good E VS1 56.9 65.0 327 4.05 4.07 2.31\n 0.29 Premium I VS2 62.4 58.0 334 4.20 4.23 2.63\n 0.31 Good J SI2 63.3 58.0 335 4.34 4.35 2.75\n 0.24 Very Good J VVS2 62.8 57.0 336 3.94 3.96 2.48\n 0.24 Very Good I VVS1 62.3 57.0 336 3.95 3.98 2.47\n 0.26 Very Good H SI1 61.9 55.0 337 4.07 4.11 2.53\n 0.22 Fair E VS2 65.1 61.0 337 3.87 3.78 2.49\n 0.23 Very Good H VS1 59.4 61.0 338 4.00 4.05 2.39\"\"\"\n expected = pd.read_csv(\n io.BytesIO(expected_string), sep=r'\\s{2,}',\n index_col=['cut', 'color', 'clarity'],\n header=0, engine='python'\n ).sort_index()\n\n table = _read_table(datadir / 'v0.7.1.all-named-index.parquet')\n result = table.to_pandas()\n tm.assert_frame_equal(result, expected)\n\n\[email protected]\ndef test_backwards_compatible_index_multi_level_some_named(datadir):\n expected_string = b\"\"\"\\\ncarat cut color clarity depth table price x y z\n 0.23 Ideal E SI2 61.5 55.0 326 3.95 3.98 2.43\n 0.21 Premium E SI1 59.8 61.0 326 3.89 3.84 2.31\n 0.23 Good E VS1 56.9 65.0 327 4.05 4.07 2.31\n 0.29 Premium I VS2 62.4 58.0 334 4.20 4.23 2.63\n 0.31 Good J SI2 63.3 58.0 335 4.34 4.35 2.75\n 0.24 Very Good J VVS2 62.8 57.0 336 3.94 3.96 2.48\n 0.24 Very Good I VVS1 62.3 57.0 336 3.95 3.98 2.47\n 0.26 Very Good H SI1 61.9 55.0 337 4.07 4.11 2.53\n 0.22 Fair E VS2 65.1 61.0 337 3.87 3.78 2.49\n 0.23 Very Good H VS1 59.4 61.0 338 4.00 4.05 2.39\"\"\"\n expected = pd.read_csv(\n io.BytesIO(expected_string),\n sep=r'\\s{2,}', index_col=['cut', 'color', 'clarity'],\n header=0, engine='python'\n ).sort_index()\n expected.index = expected.index.set_names(['cut', None, 'clarity'])\n\n table = _read_table(datadir / 'v0.7.1.some-named-index.parquet')\n result = table.to_pandas()\n tm.assert_frame_equal(result, expected)\n\n\[email protected]\ndef test_backwards_compatible_column_metadata_handling(datadir):\n expected = pd.DataFrame(\n {'a': [1, 2, 3], 'b': [.1, .2, .3],\n 'c': pd.date_range(\"2017-01-01\", periods=3, tz='Europe/Brussels')})\n expected.index = pd.MultiIndex.from_arrays(\n [['a', 'b', 'c'],\n pd.date_range(\"2017-01-01\", periods=3, tz='Europe/Brussels')],\n names=['index', None])\n\n path = datadir / 'v0.7.1.column-metadata-handling.parquet'\n table = _read_table(path)\n result = table.to_pandas()\n tm.assert_frame_equal(result, expected)\n\n table = _read_table(path, columns=['a'])\n result = table.to_pandas()\n tm.assert_frame_equal(result, expected[['a']].reset_index(drop=True))\n\n\ndef _make_dataset_for_pickling(tempdir, N=100):\n path = tempdir / 'data.parquet'\n fs = LocalFileSystem.get_instance()\n\n df = pd.DataFrame({\n 'index': np.arange(N),\n 'values': np.random.randn(N)\n }, columns=['index', 'values'])\n table = pa.Table.from_pandas(df)\n\n num_groups = 3\n with pq.ParquetWriter(path, table.schema) as writer:\n for i in range(num_groups):\n writer.write_table(table)\n\n reader = pq.ParquetFile(path)\n assert reader.metadata.num_row_groups == num_groups\n\n metadata_path = tempdir / '_metadata'\n with fs.open(metadata_path, 'wb') as f:\n pq.write_metadata(table.schema, f)\n\n dataset = pq.ParquetDataset(tempdir, filesystem=fs)\n assert dataset.metadata_path == str(metadata_path)\n\n return dataset\n\n\[email protected]\[email protected]('pickler', [\n pytest.param(pickle, id='builtin'),\n pytest.param(pytest.importorskip('cloudpickle'), id='cloudpickle')\n])\ndef test_pickle_dataset(tempdir, datadir, pickler):\n def is_pickleable(obj):\n return obj == pickler.loads(pickler.dumps(obj))\n\n dataset = _make_dataset_for_pickling(tempdir)\n\n assert is_pickleable(dataset)\n assert is_pickleable(dataset.metadata)\n assert is_pickleable(dataset.metadata.schema)\n assert len(dataset.metadata.schema)\n for column in dataset.metadata.schema:\n assert is_pickleable(column)\n\n for piece in dataset.pieces:\n assert is_pickleable(piece)\n metadata = piece.get_metadata()\n assert metadata.num_row_groups\n for i in range(metadata.num_row_groups):\n assert is_pickleable(metadata.row_group(i))\n\n\[email protected]\ndef test_decimal_roundtrip(tempdir):\n num_values = 10\n\n columns = {}\n for precision in range(1, 39):\n for scale in range(0, precision + 1):\n with util.random_seed(0):\n random_decimal_values = [\n util.randdecimal(precision, scale)\n for _ in range(num_values)\n ]\n column_name = ('dec_precision_{:d}_scale_{:d}'\n .format(precision, scale))\n columns[column_name] = random_decimal_values\n\n expected = pd.DataFrame(columns)\n filename = tempdir / 'decimals.parquet'\n string_filename = str(filename)\n table = pa.Table.from_pandas(expected)\n _write_table(table, string_filename)\n result_table = _read_table(string_filename)\n result = result_table.to_pandas()\n tm.assert_frame_equal(result, expected)\n\n\[email protected]\[email protected](\n raises=pa.ArrowException, reason='Parquet does not support negative scale'\n)\ndef test_decimal_roundtrip_negative_scale(tempdir):\n expected = pd.DataFrame({'decimal_num': [decimal.Decimal('1.23E4')]})\n filename = tempdir / 'decimals.parquet'\n string_filename = str(filename)\n t = pa.Table.from_pandas(expected)\n _write_table(t, string_filename)\n result_table = _read_table(string_filename)\n result = result_table.to_pandas()\n tm.assert_frame_equal(result, expected)\n\n\[email protected]\ndef test_parquet_writer_context_obj(tempdir):\n df = _test_dataframe(100)\n df['unique_id'] = 0\n\n arrow_table = pa.Table.from_pandas(df, preserve_index=False)\n out = pa.BufferOutputStream()\n\n with pq.ParquetWriter(out, arrow_table.schema, version='2.0') as writer:\n\n frames = []\n for i in range(10):\n df['unique_id'] = i\n arrow_table = pa.Table.from_pandas(df, preserve_index=False)\n writer.write_table(arrow_table)\n\n frames.append(df.copy())\n\n buf = out.getvalue()\n result = _read_table(pa.BufferReader(buf))\n\n expected = pd.concat(frames, ignore_index=True)\n tm.assert_frame_equal(result.to_pandas(), expected)\n\n\[email protected]\ndef test_parquet_writer_context_obj_with_exception(tempdir):\n df = _test_dataframe(100)\n df['unique_id'] = 0\n\n arrow_table = pa.Table.from_pandas(df, preserve_index=False)\n out = pa.BufferOutputStream()\n error_text = 'Artificial Error'\n\n try:\n with pq.ParquetWriter(out,\n arrow_table.schema,\n version='2.0') as writer:\n\n frames = []\n for i in range(10):\n df['unique_id'] = i\n arrow_table = pa.Table.from_pandas(df, preserve_index=False)\n writer.write_table(arrow_table)\n frames.append(df.copy())\n if i == 5:\n raise ValueError(error_text)\n except Exception as e:\n assert str(e) == error_text\n\n buf = out.getvalue()\n result = _read_table(pa.BufferReader(buf))\n\n expected = pd.concat(frames, ignore_index=True)\n tm.assert_frame_equal(result.to_pandas(), expected)\n\n\[email protected]\ndef test_zlib_compression_bug():\n # ARROW-3514: \"zlib deflate failed, output buffer too small\"\n table = pa.Table.from_arrays([pa.array(['abc', 'def'])], ['some_col'])\n f = io.BytesIO()\n pq.write_table(table, f, compression='gzip')\n\n f.seek(0)\n roundtrip = pq.read_table(f)\n tm.assert_frame_equal(roundtrip.to_pandas(), table.to_pandas())\n\n\[email protected]\ndef test_merging_parquet_tables_with_different_pandas_metadata(tempdir):\n # ARROW-3728: Merging Parquet Files - Pandas Meta in Schema Mismatch\n schema = pa.schema([\n pa.field('int', pa.int16()),\n pa.field('float', pa.float32()),\n pa.field('string', pa.string())\n ])\n df1 = pd.DataFrame({\n 'int': np.arange(3, dtype=np.uint8),\n 'float': np.arange(3, dtype=np.float32),\n 'string': ['ABBA', 'EDDA', 'ACDC']\n })\n df2 = pd.DataFrame({\n 'int': [4, 5],\n 'float': [1.1, None],\n 'string': [None, None]\n })\n table1 = pa.Table.from_pandas(df1, schema=schema, preserve_index=False)\n table2 = pa.Table.from_pandas(df2, schema=schema, preserve_index=False)\n\n assert not table1.schema.equals(table2.schema)\n assert table1.schema.equals(table2.schema, check_metadata=False)\n\n writer = pq.ParquetWriter(tempdir / 'merged.parquet', schema=schema)\n writer.write_table(table1)\n writer.write_table(table2)\n\n\ndef test_empty_row_groups(tempdir):\n # ARROW-3020\n table = pa.Table.from_arrays([pa.array([], type='int32')], ['f0'])\n\n path = tempdir / 'empty_row_groups.parquet'\n\n num_groups = 3\n with pq.ParquetWriter(path, table.schema) as writer:\n for i in range(num_groups):\n writer.write_table(table)\n\n reader = pq.ParquetFile(path)\n assert reader.metadata.num_row_groups == num_groups\n\n for i in range(num_groups):\n assert reader.read_row_group(i).equals(table)\n\n\[email protected]\ndef test_parquet_writer_with_caller_provided_filesystem():\n out = pa.BufferOutputStream()\n\n class CustomFS(FileSystem):\n def __init__(self):\n self.path = None\n self.mode = None\n\n def open(self, path, mode='rb'):\n self.path = path\n self.mode = mode\n return out\n\n fs = CustomFS()\n fname = 'expected_fname.parquet'\n df = _test_dataframe(100)\n table = pa.Table.from_pandas(df, preserve_index=False)\n\n with pq.ParquetWriter(fname, table.schema, filesystem=fs, version='2.0') \\\n as writer:\n writer.write_table(table)\n\n assert fs.path == fname\n assert fs.mode == 'wb'\n assert out.closed\n\n buf = out.getvalue()\n table_read = _read_table(pa.BufferReader(buf))\n df_read = table_read.to_pandas()\n tm.assert_frame_equal(df_read, df)\n\n # Should raise ValueError when filesystem is passed with file-like object\n with pytest.raises(ValueError) as err_info:\n pq.ParquetWriter(pa.BufferOutputStream(), table.schema, filesystem=fs)\n expected_msg = (\"filesystem passed but where is file-like, so\"\n \" there is nothing to open with filesystem.\")\n assert str(err_info) == expected_msg\n\n\ndef test_writing_empty_lists():\n # ARROW-2591: [Python] Segmentation fault issue in pq.write_table\n arr1 = pa.array([[], []], pa.list_(pa.int32()))\n table = pa.Table.from_arrays([arr1], ['list(int32)'])\n _check_roundtrip(table)\n\n\ndef test_write_nested_zero_length_array_chunk_failure():\n # Bug report in ARROW-3792\n cols = OrderedDict(\n int32=pa.int32(),\n list_string=pa.list_(pa.string())\n )\n data = [[], [OrderedDict(int32=1, list_string=('G',)), ]]\n\n # This produces a table with a column like\n # <Column name='list_string' type=ListType(list<item: string>)>\n # [\n # [],\n # [\n # [\n # \"G\"\n # ]\n # ]\n # ]\n #\n # Each column is a ChunkedArray with 2 elements\n my_arrays = [pa.array(batch, type=pa.struct(cols)).flatten()\n for batch in data]\n my_batches = [pa.RecordBatch.from_arrays(batch, pa.schema(cols))\n for batch in my_arrays]\n tbl = pa.Table.from_batches(my_batches, pa.schema(cols))\n _check_roundtrip(tbl)\n\n\[email protected]\ndef test_partitioned_dataset(tempdir):\n # ARROW-3208: Segmentation fault when reading a Parquet partitioned dataset\n # to a Parquet file\n path = tempdir / \"ARROW-3208\"\n df = pd.DataFrame({\n 'one': [-1, 10, 2.5, 100, 1000, 1, 29.2],\n 'two': [-1, 10, 2, 100, 1000, 1, 11],\n 'three': [0, 0, 0, 0, 0, 0, 0]\n })\n table = pa.Table.from_pandas(df)\n pq.write_to_dataset(table, root_path=str(path),\n partition_cols=['one', 'two'])\n table = pq.ParquetDataset(path).read()\n pq.write_table(table, path / \"output.parquet\")\n\n\ndef test_read_column_invalid_index():\n table = pa.table([pa.array([4, 5]), pa.array([\"foo\", \"bar\"])],\n names=['ints', 'strs'])\n bio = pa.BufferOutputStream()\n pq.write_table(table, bio)\n f = pq.ParquetFile(bio.getvalue())\n assert f.reader.read_column(0).to_pylist() == [4, 5]\n assert f.reader.read_column(1).to_pylist() == [\"foo\", \"bar\"]\n for index in (-1, 2):\n with pytest.raises((ValueError, IndexError)):\n f.reader.read_column(index)\n\n\ndef test_direct_read_dictionary():\n # ARROW-3325\n repeats = 10\n nunique = 5\n\n data = [\n [tm.rands(10) for i in range(nunique)] * repeats,\n\n ]\n table = pa.table(data, names=['f0'])\n\n bio = pa.BufferOutputStream()\n pq.write_table(table, bio)\n contents = bio.getvalue()\n\n result = pq.read_table(pa.BufferReader(contents),\n read_dictionary=['f0'])\n\n # Compute dictionary-encoded subfield\n expected = pa.table([table[0].dictionary_encode()], names=['f0'])\n assert result.equals(expected)\n\n\ndef test_dataset_read_dictionary(tempdir):\n path = tempdir / \"ARROW-3325-dataset\"\n t1 = pa.table([[tm.rands(10) for i in range(5)] * 10], names=['f0'])\n t2 = pa.table([[tm.rands(10) for i in range(5)] * 10], names=['f0'])\n pq.write_to_dataset(t1, root_path=str(path))\n pq.write_to_dataset(t2, root_path=str(path))\n\n result = pq.ParquetDataset(path, read_dictionary=['f0']).read()\n\n # The order of the chunks is non-deterministic\n ex_chunks = [t1[0].chunk(0).dictionary_encode(),\n t2[0].chunk(0).dictionary_encode()]\n\n assert result[0].num_chunks == 2\n c0, c1 = result[0].chunk(0), result[0].chunk(1)\n if c0.equals(ex_chunks[0]):\n assert c1.equals(ex_chunks[1])\n else:\n assert c0.equals(ex_chunks[1])\n assert c1.equals(ex_chunks[0])\n\n\ndef test_direct_read_dictionary_subfield():\n repeats = 10\n nunique = 5\n\n data = [\n [[tm.rands(10)] for i in range(nunique)] * repeats,\n ]\n table = pa.table(data, names=['f0'])\n\n bio = pa.BufferOutputStream()\n pq.write_table(table, bio)\n contents = bio.getvalue()\n result = pq.read_table(pa.BufferReader(contents),\n read_dictionary=['f0.list.item'])\n\n arr = pa.array(data[0])\n values_as_dict = arr.values.dictionary_encode()\n\n inner_indices = values_as_dict.indices.cast('int32')\n new_values = pa.DictionaryArray.from_arrays(inner_indices,\n values_as_dict.dictionary)\n\n offsets = pa.array(range(51), type='int32')\n expected_arr = pa.ListArray.from_arrays(offsets, new_values)\n expected = pa.table([expected_arr], names=['f0'])\n\n assert result.equals(expected)\n assert result[0].num_chunks == 1\n\n\[email protected]\ndef test_dataset_metadata(tempdir):\n path = tempdir / \"ARROW-1983-dataset\"\n\n # create and write a test dataset\n df = pd.DataFrame({\n 'one': [1, 2, 3],\n 'two': [-1, -2, -3],\n 'three': [[1, 2], [2, 3], [3, 4]],\n })\n table = pa.Table.from_pandas(df)\n\n metadata_list = []\n pq.write_to_dataset(table, root_path=str(path),\n partition_cols=['one', 'two'],\n metadata_collector=metadata_list)\n\n # open the dataset and collect metadata from pieces:\n dataset = pq.ParquetDataset(path)\n metadata_list2 = [p.get_metadata() for p in dataset.pieces]\n\n # compare metadata list content:\n assert len(metadata_list) == len(metadata_list2)\n for md, md2 in zip(metadata_list, metadata_list2):\n d = md.to_dict()\n d2 = md2.to_dict()\n # serialized_size is initialized in the reader:\n assert d.pop('serialized_size') == 0\n assert d2.pop('serialized_size') > 0\n assert d == d2\n\n\ndef test_parquet_file_too_small(tempdir):\n path = str(tempdir / \"test.parquet\")\n with pytest.raises(pa.ArrowIOError,\n match='size is 0 bytes'):\n with open(path, 'wb') as f:\n pass\n pq.read_table(path)\n\n with pytest.raises(pa.ArrowIOError,\n match='size is 4 bytes'):\n with open(path, 'wb') as f:\n f.write(b'ffff')\n pq.read_table(path)\n\n\[email protected]\ndef test_categorical_index_survives_roundtrip():\n # ARROW-3652, addressed by ARROW-3246\n df = pd.DataFrame([['a', 'b'], ['c', 'd']], columns=['c1', 'c2'])\n df['c1'] = df['c1'].astype('category')\n df = df.set_index(['c1'])\n\n table = pa.Table.from_pandas(df)\n bos = pa.BufferOutputStream()\n pq.write_table(table, bos)\n ref_df = pq.read_pandas(bos.getvalue()).to_pandas()\n assert isinstance(ref_df.index, pd.CategoricalIndex)\n assert ref_df.index.equals(df.index)\n\n\[email protected]\ndef test_categorical_order_survives_roundtrip():\n # ARROW-6302\n df = pd.DataFrame({\"a\": pd.Categorical(\n [\"a\", \"b\", \"c\", \"a\"], categories=[\"b\", \"c\", \"d\"], ordered=True)})\n\n table = pa.Table.from_pandas(df)\n bos = pa.BufferOutputStream()\n pq.write_table(table, bos)\n\n contents = bos.getvalue()\n result = pq.read_pandas(contents).to_pandas()\n\n tm.assert_frame_equal(result, df)\n\n\ndef test_dictionary_array_automatically_read():\n # ARROW-3246\n\n # Make a large dictionary, a little over 4MB of data\n dict_length = 4000\n dict_values = pa.array([('x' * 1000 + '_{}'.format(i))\n for i in range(dict_length)])\n\n num_chunks = 10\n chunk_size = 100\n chunks = []\n for i in range(num_chunks):\n indices = np.random.randint(0, dict_length,\n size=chunk_size).astype(np.int32)\n chunks.append(pa.DictionaryArray.from_arrays(pa.array(indices),\n dict_values))\n\n table = pa.table([pa.chunked_array(chunks)], names=['f0'])\n\n bio = pa.BufferOutputStream()\n pq.write_table(table, bio)\n contents = bio.getvalue()\n result = pq.read_table(pa.BufferReader(contents))\n\n assert result.equals(table)\n\n # The only key in the metadata was the Arrow schema key\n assert result.schema.metadata is None\n\n\[email protected]\ndef test_pandas_categorical_na_type_row_groups():\n # ARROW-5085\n df = pd.DataFrame({\"col\": [None] * 100, \"int\": [1.0] * 100})\n df_category = df.astype({\"col\": \"category\", \"int\": \"category\"})\n table = pa.Table.from_pandas(df)\n table_cat = pa.Table.from_pandas(df_category)\n buf = pa.BufferOutputStream()\n\n # it works\n pq.write_table(table_cat, buf, version=\"2.0\", chunk_size=10)\n result = pq.read_table(buf.getvalue())\n\n # Result is non-categorical\n assert result[0].equals(table[0])\n assert result[1].equals(table[1])\n\n\[email protected]\ndef test_pandas_categorical_roundtrip():\n # ARROW-5480, this was enabled by ARROW-3246\n\n # Have one of the categories unobserved and include a null (-1)\n codes = np.array([2, 0, 0, 2, 0, -1, 2], dtype='int32')\n categories = ['foo', 'bar', 'baz']\n df = pd.DataFrame({'x': pd.Categorical.from_codes(\n codes, categories=categories)})\n\n buf = pa.BufferOutputStream()\n pq.write_table(pa.table(df), buf)\n\n result = pq.read_table(buf.getvalue()).to_pandas()\n assert result.x.dtype == 'category'\n assert (result.x.cat.categories == categories).all()\n tm.assert_frame_equal(result, df)\n\n\[email protected]\ndef test_multi_dataset_metadata(tempdir):\n filenames = [\"ARROW-1983-dataset.0\", \"ARROW-1983-dataset.1\"]\n metapath = str(tempdir / \"_metadata\")\n\n # create a test dataset\n df = pd.DataFrame({\n 'one': [1, 2, 3],\n 'two': [-1, -2, -3],\n 'three': [[1, 2], [2, 3], [3, 4]],\n })\n table = pa.Table.from_pandas(df)\n\n # write dataset twice and collect/merge metadata\n _meta = None\n for filename in filenames:\n meta = []\n pq.write_table(table, str(tempdir / filename),\n metadata_collector=meta)\n meta[0].set_file_path(filename)\n if _meta is None:\n _meta = meta[0]\n else:\n _meta.append_row_groups(meta[0])\n\n # Write merged metadata-only file\n with open(metapath, \"wb\") as f:\n _meta.write_metadata_file(f)\n\n # Read back the metadata\n meta = pq.read_metadata(metapath)\n md = meta.to_dict()\n _md = _meta.to_dict()\n for key in _md:\n if key != 'serialized_size':\n assert _md[key] == md[key]\n assert _md['num_columns'] == 3\n assert _md['num_rows'] == 6\n assert _md['num_row_groups'] == 2\n assert _md['serialized_size'] == 0\n assert md['serialized_size'] > 0\n\n\[email protected]\ndef test_filter_before_validate_schema(tempdir):\n # ARROW-4076 apply filter before schema validation\n # to avoid checking unneeded schemas\n\n # create partitioned dataset with mismatching schemas which would\n # otherwise raise if first validation all schemas\n dir1 = tempdir / 'A=0'\n dir1.mkdir()\n table1 = pa.Table.from_pandas(pd.DataFrame({'B': [1, 2, 3]}))\n pq.write_table(table1, dir1 / 'data.parquet')\n\n dir2 = tempdir / 'A=1'\n dir2.mkdir()\n table2 = pa.Table.from_pandas(pd.DataFrame({'B': ['a', 'b', 'c']}))\n pq.write_table(table2, dir2 / 'data.parquet')\n\n # read single file using filter\n table = pq.read_table(tempdir, filters=[[('A', '==', 0)]])\n assert table.column('B').equals(pa.chunked_array([[1, 2, 3]]))\n" ]
[ [ "pandas.Categorical.from_codes", "pandas.concat", "pandas.to_datetime", "numpy.random.seed", "pandas.Timestamp", "numpy.arange", "pandas.Categorical", "pandas.DataFrame", "pandas.MultiIndex.from_arrays", "pandas.util.testing.assert_frame_equal", "numpy.ones", "pandas.DatetimeIndex", "numpy.random.randn", "numpy.iinfo", "pandas.util.testing.rands", "pandas.date_range", "numpy.array", "numpy.random.randint" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "1.4", "0.19", "1.1", "1.5", "1.2", "0.24", "0.20", "1.0", "0.25", "1.3" ], "scipy": [], "tensorflow": [] } ]
isabella232/tensorboard
[ "77cf61f74dd57e4f3a6256e3972335bbd82feb51", "77cf61f74dd57e4f3a6256e3972335bbd82feb51" ]
[ "tensorboard/data/provider_test.py", "tensorboard/scripts/generate_testdata.py" ]
[ "# Copyright 2019 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Unit tests for `tensorboard.data.provider`.\"\"\"\n\n\nimport numpy as np\nimport six\n\nfrom tensorboard import test as tb_test\nfrom tensorboard.data import provider\n\n\nclass DataProviderTest(tb_test.TestCase):\n def test_abstract(self):\n with six.assertRaisesRegex(self, TypeError, \"abstract class\"):\n provider.DataProvider()\n\n\nclass ExperimentMetadataTest(tb_test.TestCase):\n def test_attributes(self):\n e1 = provider.ExperimentMetadata(\n experiment_name=\"FooExperiment\",\n experiment_description=\"Experiment on Foo\",\n creation_time=1.25,\n )\n self.assertEqual(e1.experiment_name, \"FooExperiment\")\n self.assertEqual(e1.experiment_description, \"Experiment on Foo\")\n self.assertEqual(e1.creation_time, 1.25)\n\n\nclass RunTest(tb_test.TestCase):\n def test_eq(self):\n a1 = provider.Run(run_id=\"a\", run_name=\"aa\", start_time=1.25)\n a2 = provider.Run(run_id=\"a\", run_name=\"aa\", start_time=1.25)\n b = provider.Run(run_id=\"b\", run_name=\"bb\", start_time=-1.75)\n self.assertEqual(a1, a2)\n self.assertNotEqual(a1, b)\n self.assertNotEqual(b, object())\n\n def test_repr(self):\n x = provider.Run(run_id=\"alpha\", run_name=\"bravo\", start_time=1.25)\n repr_ = repr(x)\n self.assertIn(repr(x.run_id), repr_)\n self.assertIn(repr(x.run_name), repr_)\n self.assertIn(repr(x.start_time), repr_)\n\n\nclass ScalarTimeSeriesTest(tb_test.TestCase):\n def _scalar_time_series(\n self, max_step, max_wall_time, plugin_content, description, display_name\n ):\n # Helper to use explicit kwargs.\n return provider.ScalarTimeSeries(\n max_step=max_step,\n max_wall_time=max_wall_time,\n plugin_content=plugin_content,\n description=description,\n display_name=display_name,\n )\n\n def test_repr(self):\n x = provider.ScalarTimeSeries(\n max_step=77,\n max_wall_time=1234.5,\n plugin_content=b\"AB\\xCD\\xEF!\\x00\",\n description=\"test test\",\n display_name=\"one two\",\n )\n repr_ = repr(x)\n self.assertIn(repr(x.max_step), repr_)\n self.assertIn(repr(x.max_wall_time), repr_)\n self.assertIn(repr(x.plugin_content), repr_)\n self.assertIn(repr(x.description), repr_)\n self.assertIn(repr(x.display_name), repr_)\n\n def test_eq(self):\n x1 = self._scalar_time_series(77, 1234.5, b\"\\x12\", \"one\", \"two\")\n x2 = self._scalar_time_series(77, 1234.5, b\"\\x12\", \"one\", \"two\")\n x3 = self._scalar_time_series(66, 4321.0, b\"\\x7F\", \"hmm\", \"hum\")\n self.assertEqual(x1, x2)\n self.assertNotEqual(x1, x3)\n self.assertNotEqual(x1, object())\n\n def test_hash(self):\n x1 = self._scalar_time_series(77, 1234.5, b\"\\x12\", \"one\", \"two\")\n x2 = self._scalar_time_series(77, 1234.5, b\"\\x12\", \"one\", \"two\")\n x3 = self._scalar_time_series(66, 4321.0, b\"\\x7F\", \"hmm\", \"hum\")\n self.assertEqual(hash(x1), hash(x2))\n # The next check is technically not required by the `__hash__`\n # contract, but _should_ pass; failure on this assertion would at\n # least warrant some scrutiny.\n self.assertNotEqual(hash(x1), hash(x3))\n\n\nclass ScalarDatumTest(tb_test.TestCase):\n def test_repr(self):\n x = provider.ScalarDatum(step=123, wall_time=234.5, value=-0.125)\n repr_ = repr(x)\n self.assertIn(repr(x.step), repr_)\n self.assertIn(repr(x.wall_time), repr_)\n self.assertIn(repr(x.value), repr_)\n\n def test_eq(self):\n x1 = provider.ScalarDatum(step=12, wall_time=0.25, value=1.25)\n x2 = provider.ScalarDatum(step=12, wall_time=0.25, value=1.25)\n x3 = provider.ScalarDatum(step=23, wall_time=3.25, value=-0.5)\n self.assertEqual(x1, x2)\n self.assertNotEqual(x1, x3)\n self.assertNotEqual(x1, object())\n\n def test_hash(self):\n x1 = provider.ScalarDatum(step=12, wall_time=0.25, value=1.25)\n x2 = provider.ScalarDatum(step=12, wall_time=0.25, value=1.25)\n x3 = provider.ScalarDatum(step=23, wall_time=3.25, value=-0.5)\n self.assertEqual(hash(x1), hash(x2))\n # The next check is technically not required by the `__hash__`\n # contract, but _should_ pass; failure on this assertion would at\n # least warrant some scrutiny.\n self.assertNotEqual(hash(x1), hash(x3))\n\n\nclass TensorTimeSeriesTest(tb_test.TestCase):\n def _tensor_time_series(\n self, max_step, max_wall_time, plugin_content, description, display_name\n ):\n # Helper to use explicit kwargs.\n return provider.TensorTimeSeries(\n max_step=max_step,\n max_wall_time=max_wall_time,\n plugin_content=plugin_content,\n description=description,\n display_name=display_name,\n )\n\n def test_repr(self):\n x = provider.TensorTimeSeries(\n max_step=77,\n max_wall_time=1234.5,\n plugin_content=b\"AB\\xCD\\xEF!\\x00\",\n description=\"test test\",\n display_name=\"one two\",\n )\n repr_ = repr(x)\n self.assertIn(repr(x.max_step), repr_)\n self.assertIn(repr(x.max_wall_time), repr_)\n self.assertIn(repr(x.plugin_content), repr_)\n self.assertIn(repr(x.description), repr_)\n self.assertIn(repr(x.display_name), repr_)\n\n def test_eq(self):\n x1 = self._tensor_time_series(77, 1234.5, b\"\\x12\", \"one\", \"two\")\n x2 = self._tensor_time_series(77, 1234.5, b\"\\x12\", \"one\", \"two\")\n x3 = self._tensor_time_series(66, 4321.0, b\"\\x7F\", \"hmm\", \"hum\")\n self.assertEqual(x1, x2)\n self.assertNotEqual(x1, x3)\n self.assertNotEqual(x1, object())\n\n def test_hash(self):\n x1 = self._tensor_time_series(77, 1234.5, b\"\\x12\", \"one\", \"two\")\n x2 = self._tensor_time_series(77, 1234.5, b\"\\x12\", \"one\", \"two\")\n x3 = self._tensor_time_series(66, 4321.0, b\"\\x7F\", \"hmm\", \"hum\")\n self.assertEqual(hash(x1), hash(x2))\n # The next check is technically not required by the `__hash__`\n # contract, but _should_ pass; failure on this assertion would at\n # least warrant some scrutiny.\n self.assertNotEqual(hash(x1), hash(x3))\n\n\nclass TensorDatumTest(tb_test.TestCase):\n def test_repr(self):\n x = provider.TensorDatum(\n step=123, wall_time=234.5, numpy=np.array(-0.25)\n )\n repr_ = repr(x)\n self.assertIn(repr(x.step), repr_)\n self.assertIn(repr(x.wall_time), repr_)\n self.assertIn(repr(x.numpy), repr_)\n\n def test_eq(self):\n nd = np.array\n x1 = provider.TensorDatum(step=12, wall_time=0.25, numpy=nd([1.0, 2.0]))\n x2 = provider.TensorDatum(step=12, wall_time=0.25, numpy=nd([1.0, 2.0]))\n x3 = provider.TensorDatum(\n step=23, wall_time=3.25, numpy=nd([-0.5, -2.5])\n )\n self.assertEqual(x1, x2)\n self.assertNotEqual(x1, x3)\n self.assertNotEqual(x1, object())\n\n def test_eq_with_rank0_tensor(self):\n x1 = provider.TensorDatum(\n step=12, wall_time=0.25, numpy=np.array([1.25])\n )\n x2 = provider.TensorDatum(\n step=12, wall_time=0.25, numpy=np.array([1.25])\n )\n x3 = provider.TensorDatum(\n step=23, wall_time=3.25, numpy=np.array([1.25])\n )\n self.assertEqual(x1, x2)\n self.assertNotEqual(x1, x3)\n self.assertNotEqual(x1, object())\n\n def test_hash(self):\n x = provider.TensorDatum(\n step=12, wall_time=0.25, numpy=np.array([1.25])\n )\n with six.assertRaisesRegex(self, TypeError, \"unhashable type\"):\n hash(x)\n\n\nclass BlobSequenceTimeSeriesTest(tb_test.TestCase):\n def _blob_sequence_time_series(\n self,\n max_step,\n max_wall_time,\n max_length,\n plugin_content,\n description,\n display_name,\n ):\n # Helper to use explicit kwargs.\n return provider.BlobSequenceTimeSeries(\n max_step=max_step,\n max_wall_time=max_wall_time,\n max_length=max_length,\n plugin_content=plugin_content,\n description=description,\n display_name=display_name,\n )\n\n def test_repr(self):\n x = provider.BlobSequenceTimeSeries(\n max_step=77,\n max_wall_time=1234.5,\n max_length=6,\n plugin_content=b\"AB\\xCD\\xEF!\\x00\",\n description=\"test test\",\n display_name=\"one two\",\n )\n repr_ = repr(x)\n self.assertIn(repr(x.max_step), repr_)\n self.assertIn(repr(x.max_wall_time), repr_)\n self.assertIn(repr(x.max_length), repr_)\n self.assertIn(repr(x.plugin_content), repr_)\n self.assertIn(repr(x.description), repr_)\n self.assertIn(repr(x.display_name), repr_)\n\n def test_eq(self):\n x1 = self._blob_sequence_time_series(\n 77, 1234.5, 6, b\"\\x12\", \"one\", \"two\"\n )\n x2 = self._blob_sequence_time_series(\n 77, 1234.5, 6, b\"\\x12\", \"one\", \"two\"\n )\n x3 = self._blob_sequence_time_series(\n 66, 4321.0, 7, b\"\\x7F\", \"hmm\", \"hum\"\n )\n self.assertEqual(x1, x2)\n self.assertNotEqual(x1, x3)\n self.assertNotEqual(x1, object())\n\n def test_hash(self):\n x1 = self._blob_sequence_time_series(\n 77, 1234.5, 6, b\"\\x12\", \"one\", \"two\"\n )\n x2 = self._blob_sequence_time_series(\n 77, 1234.5, 6, b\"\\x12\", \"one\", \"two\"\n )\n x3 = self._blob_sequence_time_series(\n 66, 4321.0, 7, b\"\\x7F\", \"hmm\", \"hum\"\n )\n self.assertEqual(hash(x1), hash(x2))\n # The next check is technically not required by the `__hash__`\n # contract, but _should_ pass; failure on this assertion would at\n # least warrant some scrutiny.\n self.assertNotEqual(hash(x1), hash(x3))\n\n\nclass BlobReferenceTest(tb_test.TestCase):\n def test_repr(self):\n x = provider.BlobReference(url=\"foo\", blob_key=\"baz\")\n repr_ = repr(x)\n self.assertIn(repr(x.url), repr_)\n self.assertIn(repr(x.blob_key), repr_)\n\n def test_eq(self):\n x1 = provider.BlobReference(url=\"foo\", blob_key=\"baz\")\n x2 = provider.BlobReference(url=\"foo\", blob_key=\"baz\")\n x3 = provider.BlobReference(url=\"foo\", blob_key=\"qux\")\n self.assertEqual(x1, x2)\n self.assertNotEqual(x1, x3)\n self.assertNotEqual(x1, object())\n\n def test_hash(self):\n x1 = provider.BlobReference(url=\"foo\", blob_key=\"baz\")\n x2 = provider.BlobReference(url=\"foo\", blob_key=\"baz\")\n x3 = provider.BlobReference(url=\"foo\", blob_key=\"qux\")\n self.assertEqual(hash(x1), hash(x2))\n # The next check is technically not required by the `__hash__`\n # contract, but _should_ pass; failure on this assertion would at\n # least warrant some scrutiny.\n self.assertNotEqual(hash(x1), hash(x3))\n\n\nclass BlobSequenceDatumTest(tb_test.TestCase):\n def test_repr(self):\n x = provider.BlobSequenceDatum(\n step=123, wall_time=234.5, values=(\"foo\", \"bar\", \"baz\")\n )\n repr_ = repr(x)\n self.assertIn(repr(x.step), repr_)\n self.assertIn(repr(x.wall_time), repr_)\n self.assertIn(repr(x.values), repr_)\n\n def test_eq(self):\n x1 = provider.BlobSequenceDatum(\n step=12, wall_time=0.25, values=(\"foo\", \"bar\", \"baz\")\n )\n x2 = provider.BlobSequenceDatum(\n step=12, wall_time=0.25, values=(\"foo\", \"bar\", \"baz\")\n )\n x3 = provider.BlobSequenceDatum(\n step=23, wall_time=3.25, values=(\"qux\",)\n )\n self.assertEqual(x1, x2)\n self.assertNotEqual(x1, x3)\n self.assertNotEqual(x1, object())\n\n def test_hash(self):\n x1 = provider.BlobSequenceDatum(\n step=12, wall_time=0.25, values=(\"foo\", \"bar\", \"baz\")\n )\n x2 = provider.BlobSequenceDatum(\n step=12, wall_time=0.25, values=(\"foo\", \"bar\", \"baz\")\n )\n x3 = provider.BlobSequenceDatum(\n step=23, wall_time=3.25, values=(\"qux\",)\n )\n self.assertEqual(hash(x1), hash(x2))\n # The next check is technically not required by the `__hash__`\n # contract, but _should_ pass; failure on this assertion would at\n # least warrant some scrutiny.\n self.assertNotEqual(hash(x1), hash(x3))\n\n\nclass RunTagFilterTest(tb_test.TestCase):\n def test_defensive_copy(self):\n runs = [\"r1\"]\n tags = [\"t1\"]\n f = provider.RunTagFilter(runs, tags)\n runs.append(\"r2\")\n tags.pop()\n self.assertEqual(frozenset(f.runs), frozenset([\"r1\"]))\n self.assertEqual(frozenset(f.tags), frozenset([\"t1\"]))\n\n def test_validates_runs_tags(self):\n # Accidentally passed scalar strings\n with six.assertRaisesRegex(self, TypeError, \"runs:.*got.*str.*myrun\"):\n provider.RunTagFilter(runs=\"myrun\")\n with six.assertRaisesRegex(self, TypeError, \"tags:.*got.*str.*mytag\"):\n provider.RunTagFilter(tags=\"mytag\")\n\n # Passed collections with non-string elements\n with six.assertRaisesRegex(\n self, TypeError, \"runs:.*got item of type.*NoneType.*None\"\n ):\n provider.RunTagFilter(runs=[None])\n with six.assertRaisesRegex(\n self, TypeError, \"tags:.*got item of type.*int.*3\"\n ):\n provider.RunTagFilter(tags=[\"one\", \"two\", 3])\n\n def test_repr(self):\n x = provider.RunTagFilter(runs=[\"one\", \"two\"], tags=[\"three\", \"four\"])\n repr_ = repr(x)\n self.assertIn(repr(x.runs), repr_)\n self.assertIn(repr(x.tags), repr_)\n\n\nif __name__ == \"__main__\":\n tb_test.main()\n", "# Copyright 2015 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Generate some standard test data for debugging TensorBoard.\"\"\"\n\n\nimport bisect\nimport math\nimport os\nimport os.path\nimport random\nimport shutil\n\nfrom absl import app\nfrom absl import flags\nimport numpy as np\nfrom six.moves import xrange # pylint: disable=redefined-builtin\nimport tensorflow as tf\n\n\nflags.DEFINE_string(\n \"target\",\n None,\n \"\"\"The directory where serialized data will be written\"\"\",\n)\n\nflags.DEFINE_boolean(\n \"overwrite\",\n False,\n \"\"\"Whether to remove and overwrite TARGET if it already exists.\"\"\",\n)\n\nFLAGS = flags.FLAGS\n\n# Hardcode a start time and reseed so script always generates the same data.\n_start_time = 0\nrandom.seed(0)\n\n\ndef _MakeHistogramBuckets():\n v = 1e-12\n buckets = []\n neg_buckets = []\n while v < 1e20:\n buckets.append(v)\n neg_buckets.append(-v)\n v *= 1.1\n # Should include DBL_MAX, but won't bother for test data.\n return neg_buckets[::-1] + [0] + buckets\n\n\ndef _MakeHistogram(values):\n \"\"\"Convert values into a histogram proto using logic from histogram.cc.\"\"\"\n limits = _MakeHistogramBuckets()\n counts = [0] * len(limits)\n for v in values:\n idx = bisect.bisect_left(limits, v)\n counts[idx] += 1\n\n limit_counts = [\n (limits[i], counts[i]) for i in xrange(len(limits)) if counts[i]\n ]\n bucket_limit = [lc[0] for lc in limit_counts]\n bucket = [lc[1] for lc in limit_counts]\n sum_sq = sum(v * v for v in values)\n return tf.compat.v1.HistogramProto(\n min=min(values),\n max=max(values),\n num=len(values),\n sum=sum(values),\n sum_squares=sum_sq,\n bucket_limit=bucket_limit,\n bucket=bucket,\n )\n\n\ndef WriteScalarSeries(writer, tag, f, n=5):\n \"\"\"Write a series of scalar events to writer, using f to create values.\"\"\"\n step = 0\n wall_time = _start_time\n for i in xrange(n):\n v = f(i)\n value = tf.Summary.Value(tag=tag, simple_value=v)\n summary = tf.Summary(value=[value])\n event = tf.Event(wall_time=wall_time, step=step, summary=summary)\n writer.add_event(event)\n step += 1\n wall_time += 10\n\n\ndef WriteHistogramSeries(writer, tag, mu_sigma_tuples, n=20):\n \"\"\"Write a sequence of normally distributed histograms to writer.\"\"\"\n step = 0\n wall_time = _start_time\n for [mean, stddev] in mu_sigma_tuples:\n data = [random.normalvariate(mean, stddev) for _ in xrange(n)]\n histo = _MakeHistogram(data)\n summary = tf.Summary(value=[tf.Summary.Value(tag=tag, histo=histo)])\n event = tf.Event(wall_time=wall_time, step=step, summary=summary)\n writer.add_event(event)\n step += 10\n wall_time += 100\n\n\ndef WriteImageSeries(writer, tag, n_images=1):\n \"\"\"Write a few dummy images to writer.\"\"\"\n step = 0\n session = tf.compat.v1.Session()\n p = tf.compat.v1.placeholder(\"uint8\", (1, 4, 4, 3))\n s = tf.compat.v1.summary.image(tag, p)\n for _ in xrange(n_images):\n im = np.random.random_integers(0, 255, (1, 4, 4, 3))\n summ = session.run(s, feed_dict={p: im})\n writer.add_summary(summ, step)\n step += 20\n session.close()\n\n\ndef WriteAudioSeries(writer, tag, n_audio=1):\n \"\"\"Write a few dummy audio clips to writer.\"\"\"\n step = 0\n session = tf.compat.v1.Session()\n\n min_frequency_hz = 440\n max_frequency_hz = 880\n sample_rate = 4000\n duration_frames = sample_rate // 2 # 0.5 seconds.\n frequencies_per_run = 1\n num_channels = 2\n\n p = tf.compat.v1.placeholder(\n \"float32\", (frequencies_per_run, duration_frames, num_channels)\n )\n s = tf.compat.v1.summary.audio(tag, p, sample_rate)\n\n for _ in xrange(n_audio):\n # Generate a different frequency for each channel to show stereo works.\n frequencies = np.random.random_integers(\n min_frequency_hz,\n max_frequency_hz,\n size=(frequencies_per_run, num_channels),\n )\n tiled_frequencies = np.tile(frequencies, (1, duration_frames))\n tiled_increments = np.tile(\n np.arange(0, duration_frames), (num_channels, 1)\n ).T.reshape(1, duration_frames * num_channels)\n tones = np.sin(\n 2.0 * np.pi * tiled_frequencies * tiled_increments / sample_rate\n )\n tones = tones.reshape(\n frequencies_per_run, duration_frames, num_channels\n )\n\n summ = session.run(s, feed_dict={p: tones})\n writer.add_summary(summ, step)\n step += 20\n session.close()\n\n\ndef GenerateTestData(path):\n \"\"\"Generates the test data directory.\"\"\"\n run1_path = os.path.join(path, \"run1\")\n os.makedirs(run1_path)\n writer1 = tf.summary.FileWriter(run1_path)\n WriteScalarSeries(writer1, \"foo/square\", lambda x: x * x)\n WriteScalarSeries(writer1, \"bar/square\", lambda x: x * x)\n WriteScalarSeries(writer1, \"foo/sin\", math.sin)\n WriteScalarSeries(writer1, \"foo/cos\", math.cos)\n WriteHistogramSeries(\n writer1, \"histo1\", [[0, 1], [0.3, 1], [0.5, 1], [0.7, 1], [1, 1]]\n )\n WriteImageSeries(writer1, \"im1\")\n WriteImageSeries(writer1, \"im2\")\n WriteAudioSeries(writer1, \"au1\")\n\n run2_path = os.path.join(path, \"run2\")\n os.makedirs(run2_path)\n writer2 = tf.summary.FileWriter(run2_path)\n WriteScalarSeries(writer2, \"foo/square\", lambda x: x * x * 2)\n WriteScalarSeries(writer2, \"bar/square\", lambda x: x * x * 3)\n WriteScalarSeries(writer2, \"foo/cos\", lambda x: math.cos(x) * 2)\n WriteHistogramSeries(\n writer2, \"histo1\", [[0, 2], [0.3, 2], [0.5, 2], [0.7, 2], [1, 2]]\n )\n WriteHistogramSeries(\n writer2, \"histo2\", [[0, 1], [0.3, 1], [0.5, 1], [0.7, 1], [1, 1]]\n )\n WriteImageSeries(writer2, \"im1\")\n WriteAudioSeries(writer2, \"au2\")\n\n graph_def = tf.compat.v1.GraphDef()\n node1 = graph_def.node.add()\n node1.name = \"a\"\n node1.op = \"matmul\"\n node2 = graph_def.node.add()\n node2.name = \"b\"\n node2.op = \"matmul\"\n node2.input.extend([\"a:0\"])\n\n writer1.add_graph(graph_def)\n node3 = graph_def.node.add()\n node3.name = \"c\"\n node3.op = \"matmul\"\n node3.input.extend([\"a:0\", \"b:0\"])\n writer2.add_graph(graph_def)\n writer1.close()\n writer2.close()\n\n\ndef main(unused_argv=None):\n target = FLAGS.target\n if not target:\n print(\"The --target flag is required.\")\n return -1\n if os.path.exists(target):\n if FLAGS.overwrite:\n if os.path.isdir(target):\n shutil.rmtree(target)\n else:\n os.remove(target)\n else:\n print(\n \"Refusing to overwrite target %s without --overwrite\" % target\n )\n return -2\n GenerateTestData(target)\n return 0\n\n\nif __name__ == \"__main__\":\n app.run(main)\n" ]
[ [ "numpy.array" ], [ "tensorflow.summary.FileWriter", "numpy.arange", "tensorflow.compat.v1.summary.image", "numpy.tile", "tensorflow.compat.v1.summary.audio", "tensorflow.compat.v1.Session", "numpy.sin", "tensorflow.compat.v1.placeholder", "tensorflow.Summary.Value", "numpy.random.random_integers", "tensorflow.Event", "tensorflow.compat.v1.GraphDef", "tensorflow.Summary" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
czhongyu/information-extraction
[ "6cf9905bed5ee9c33706854cd6ceae04194aa5e4", "6cf9905bed5ee9c33706854cd6ceae04194aa5e4" ]
[ "pytorch/classification/rcnn/model.py", "pytorch/classification/lstm-pooling/model.py" ]
[ "import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torch.autograd import Variable\n\n \nclass RCNN(nn.Module):\n def __init__(self, vocab_size, embed_dim, output_dim, hidden_dim, num_layers, dropout, weight):\n super(RCNN, self).__init__()\n self.embedding = nn.Embedding(vocab_size, embed_dim)\n self.embedding.weight.data.copy_(torch.from_numpy(weight))\n self.lstm = nn.LSTM(embed_dim, hidden_dim, num_layers=num_layers, bidirectional=True, dropout=dropout)\n self.linear = nn.Linear(2 * hidden_dim + embed_dim, hidden_dim)\n self.fc = nn.Linear(hidden_dim, output_dim)\n self.dropout = nn.Dropout(dropout)\n\n def forward(self, text):\n # input = input.permute(1, 0, 2)\n embeds = self.embedding(text)\n embeds = embeds.permute(1, 0, 2)\n # embeds = self.dropout(embeds)\n # self.lstm.flatten_parameters()\n output, (hidden, _) = self.lstm(embeds)\n\n output = torch.cat((output, embeds), 2)\n output = output.permute(1, 0, 2)\n output = self.linear(output).permute(0, 2, 1)\n\n pool = F.max_pool1d(output, output.size(2)).squeeze(2)\n # hidden = self.dropout(hidden)\n # pool = self.dropout(pool)\n\n # output = self.fc(hidden.squeeze(0))\n output = self.fc(pool)\n return output\n ", "import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torch.autograd import Variable\n\n \nclass LSTM_pooling(nn.Module):\n def __init__(self, vocab_size, embed_dim, output_dim, hidden_dim, num_layers, dropout, weight):\n super().__init__()\n self.embedding = nn.Embedding(vocab_size, embed_dim)\n self.embedding.weight.data.copy_(torch.from_numpy(weight))\n \n self.lstm = nn.LSTM(embed_dim, hidden_dim, num_layers=num_layers, bidirectional=True, dropout=dropout)\n self.fc = nn.Linear(hidden_dim * 2, output_dim)\n self.dropout = nn.Dropout(dropout)\n\n def forward(self, text):\n text = text.permute(1, 0)\n embeds = self.embedding(text)\n embeds = self.dropout(embeds)\n output, (hidden, _) = self.lstm(embeds)\n\n pool = F.max_pool1d(output.permute(1, 2, 0), output.size(0)).squeeze(2)\n pool = self.dropout(pool)\n\n output = self.fc(pool)\n return output\n" ]
[ [ "torch.nn.Dropout", "torch.cat", "torch.nn.LSTM", "torch.from_numpy", "torch.nn.Embedding", "torch.nn.Linear" ], [ "torch.nn.Dropout", "torch.nn.LSTM", "torch.from_numpy", "torch.nn.Embedding", "torch.nn.Linear" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
PiCEulHer/interpret
[ "3d69de83bbf6f1bd22e686406b0895689ea2047d" ]
[ "python/interpret-core/interpret/glassbox/ebm/test/test_internal.py" ]
[ "# Copyright (c) 2019 Microsoft Corporation\n# Distributed under the MIT software license\n\nfrom ..internal import Native, Booster\n\nimport numpy as np\nimport ctypes as ct\nfrom contextlib import closing\n\ndef test_booster_internals():\n with Booster(\n model_type=\"classification\",\n n_classes=2,\n features_categorical=np.array([0], dtype=ct.c_int64, order=\"C\"), \n features_bin_count=np.array([2], dtype=ct.c_int64, order=\"C\"),\n feature_groups=[[0]],\n X_train=np.array([[0]], dtype=ct.c_int64, order=\"C\"),\n y_train=np.array([0], dtype=ct.c_int64, order=\"C\"),\n w_train=np.array([1], dtype=np.float64, order=\"C\"),\n scores_train=None,\n X_val=np.array([[0]], dtype=ct.c_int64, order=\"C\"),\n y_val=np.array([0], dtype=ct.c_int64, order=\"C\"),\n w_val=np.array([1], dtype=np.float64, order=\"C\"),\n scores_val=None,\n n_inner_bags=0,\n random_state=42,\n optional_temp_params=None,\n ) as booster:\n gain = booster.generate_model_update(\n feature_group_index=0,\n generate_update_options=Native.GenerateUpdateOptions_Default,\n learning_rate=0.01,\n min_samples_leaf=2,\n max_leaves=np.array([2], dtype=ct.c_int64, order=\"C\"),\n )\n assert gain == 0\n\n splits = booster.get_model_update_splits()\n assert len(splits) == 1\n assert len(splits[0]) == 0\n\n model_update = booster.get_model_update_expanded()\n assert len(model_update.shape) == 1\n assert model_update.shape[0] == 2\n assert model_update[0] < 0\n\n booster.set_model_update_expanded(0, model_update)\n\n metric = booster.apply_model_update()\n assert 0 < metric\n\n model = booster.get_best_model()\n assert len(model) == 1\n assert len(model[0].shape) == 1\n assert model[0].shape[0] == 2\n assert model[0][0] < 0\n\n\ndef test_one_class():\n with Booster(\n model_type=\"classification\",\n n_classes=1,\n features_categorical=np.array([0], dtype=ct.c_int64, order=\"C\"), \n features_bin_count=np.array([2], dtype=ct.c_int64, order=\"C\"),\n feature_groups=[[0]],\n X_train=np.array([[0, 1, 0]], dtype=ct.c_int64, order=\"C\"),\n y_train=np.array([0, 0, 0], dtype=ct.c_int64, order=\"C\"),\n w_train=np.array([1, 1, 1], dtype=np.float64, order=\"C\"),\n scores_train=None,\n X_val=np.array([[1, 0, 1]], dtype=ct.c_int64, order=\"C\"),\n y_val=np.array([0, 0, 0], dtype=ct.c_int64, order=\"C\"),\n w_val=np.array([1, 1, 1], dtype=np.float64, order=\"C\"),\n scores_val=None,\n n_inner_bags=0,\n random_state=42,\n optional_temp_params=None,\n ) as booster:\n gain = booster.generate_model_update(\n feature_group_index=0,\n generate_update_options=Native.GenerateUpdateOptions_Default,\n learning_rate=0.01,\n min_samples_leaf=2,\n max_leaves=np.array([2], dtype=ct.c_int64, order=\"C\"),\n )\n assert gain == 0\n\n splits = booster.get_model_update_splits()\n assert len(splits) == 1\n assert len(splits[0]) == 0\n\n model_update = booster.get_model_update_expanded()\n assert model_update is None\n\n booster.set_model_update_expanded(0, model_update)\n\n metric = booster.apply_model_update()\n assert metric == 0\n\n model = booster.get_best_model()\n assert len(model) == 1\n assert model[0] is None\n\ndef test_hist():\n np.random.seed(0)\n X_col = np.random.random_sample((1000,))\n counts, values = np.histogram(X_col, bins=\"doane\")\n\n X_col = np.concatenate(([np.nan], X_col))\n \n native = Native.get_native_singleton()\n n_cuts = native.get_histogram_cut_count(X_col)\n\n cuts = native.cut_uniform(X_col, n_cuts)\n discretized = native.discretize(X_col, cuts)\n bin_counts = np.bincount(discretized, minlength=len(cuts) + 2)\n edges = np.concatenate(([np.nanmin(X_col)], cuts, [np.nanmax(X_col)]))\n\n assert bin_counts[0] == 1\n assert(np.sum(bin_counts) == 1000 + 1)\n bin_counts = bin_counts[1:]\n\n assert np.array_equal(counts, bin_counts)\n assert np.allclose(values, edges)\n\ndef test_cut_winsorized():\n np.random.seed(0)\n X_col = np.arange(-10, 90)\n X_col = np.concatenate(([np.nan], [-np.inf], [-np.inf], X_col, [np.inf], [np.inf], [np.inf]))\n \n native = Native.get_native_singleton()\n\n cuts = native.cut_winsorized(X_col, 10)\n discretized = native.discretize(X_col, cuts)\n bin_counts = np.bincount(discretized, minlength=len(cuts) + 2)\n\n assert len(cuts) == 10\n assert(np.sum(bin_counts) == 106)\n assert bin_counts[0] == 1\n\ndef test_suggest_graph_bound():\n native = Native.get_native_singleton()\n cuts=[25, 50, 75]\n (low_graph_bound, high_graph_bound) = native.suggest_graph_bounds(cuts, 24, 76)\n assert low_graph_bound < 25\n assert 75 < high_graph_bound\n\ndef test_suggest_graph_bound_no_min_max():\n native = Native.get_native_singleton()\n cuts=[25, 50, 75]\n (low_graph_bound, high_graph_bound) = native.suggest_graph_bounds(cuts)\n assert low_graph_bound < 25\n assert 75 < high_graph_bound\n\ndef test_suggest_graph_bound_no_cuts():\n native = Native.get_native_singleton()\n cuts=[]\n (low_graph_bound, high_graph_bound) = native.suggest_graph_bounds(cuts, 24, 76)\n assert low_graph_bound <= 24\n assert 76 <= high_graph_bound\n\n\n\n" ]
[ [ "numpy.nanmax", "numpy.allclose", "numpy.array_equal", "numpy.random.seed", "numpy.arange", "numpy.nanmin", "numpy.random.random_sample", "numpy.concatenate", "numpy.array", "numpy.histogram", "numpy.sum" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
GregorySchwing/wolfCalibration
[ "9ff7ca7f0d144da407c14f0f4e9a202c4691de2d" ]
[ "validation/Free_Energy/signac/project.py" ]
[ "\"\"\"GOMC's setup for signac, signac-flow, signac-dashboard for this study.\"\"\"\n# project.py\n\n\nimport flow\n# from flow.environment import StandardEnvironment\nimport mbuild as mb\nimport mbuild.formats.charmm_writer as mf_charmm\nimport mbuild.formats.gomc_conf_writer as gomc_control\nimport numpy as np\n\nfrom alchemlyb.parsing.gomc import extract_dHdl, extract_u_nk\nfrom alchemlyb.estimators import MBAR, BAR, TI\nimport alchemlyb.preprocessing.subsampling as ss\nimport pandas as pd\nimport numpy as np\nimport os\n\nimport unyt as u\nfrom flow import FlowProject, aggregator\nfrom flow.environment import DefaultSlurmEnvironment\n\nfrom src.utils.forcefields import get_ff_path\nfrom src.utils.forcefields import get_molecule_path\nfrom templates.NAMD_conf_template import generate_namd_equilb_control_file\n\n\nclass Project(FlowProject):\n \"\"\"Subclass of FlowProject to provide custom methods and attributes.\"\"\"\n\n def __init__(self):\n super().__init__()\n\n\nclass Grid(DefaultSlurmEnvironment): # Grid(StandardEnvironment):\n \"\"\"Subclass of DefaultSlurmEnvironment for WSU's Grid cluster.\"\"\"\n\n hostname_pattern = r\".*\\.grid\\.wayne\\.edu\"\n template = \"grid.sh\"\n\n\n\n# ******************************************************\n# users typical variables, but not all (start)\n# ******************************************************\n# set binary path to gomc binary files (the bin folder).\n# If the gomc binary files are callable directly from the terminal without a path,\n# please just enter and empty string (i.e., \"\" or '')\n\n# WSU grid binary paths\ngomc_binary_path = \"/wsu/home/go/go24/go2432/wolf/GOMC/bin\"\nnamd_binary_path = \"/wsu/home/go/go24/go2432/NAMD_2.14_Linux-x86_64-multicore-CUDA\"\n\n# brads workstation binary paths\n#gomc_binary_path = \"/home/brad/Programs/GOMC/GOMC_dev_1_21_22/bin\"\n#namd_binary_path = \"/home/brad/Programs/NAMD/NAMD_2.14_RTX_3080_build_Source_CUDA\"\n\n# number of simulation steps\ngomc_steps_equilb_design_ensemble = 10 * 10**6 # set value for paper = 10 * 10**6\ngomc_steps_lamda_production = 50 * 10**6 # set value for paper = 50 * 10**6\n\ngomc_output_data_every_X_steps = 100 * 10**3 # set value for paper = 100 * 10**3\ngomc_free_energy_output_data_every_X_steps = 10 * 10**3 # set value for paper = 10 * 10**3\n\n\n# Free energy calcs: set free energy data in doc\n# this number will generate the lamdas\n# set the number of lambda spacings, which includes 0 to 1\nnumber_of_lambda_spacing_including_zero_int = 11\n\n\n# force field (FF) file for all simulations in that job\n# Note: do not add extensions\nnamd_ff_filename_str = \"in_namd_FF\"\ngomc_ff_filename_str = \"in_gomc_FF\"\n\n# initial mosdef structure and coordinates\n# Note: do not add extensions\nmosdef_structure_box_0_name_str = \"mosdef_box_0\"\n\n# melt equilb simulation runs GOMC control file input and simulation outputs\n# Note: do not add extensions\nnamd_equilb_NPT_control_file_name_str = \"namd_equilb_NPT\"\n\n# The equilb using the ensemble used for the simulation design, which\n# includes the simulation runs GOMC control file input and simulation outputs\n# Note: do not add extensions\ngomc_equilb_design_ensemble_control_file_name_str = \"gomc_equilb_design_ensemble\"\n\n# The production run using the ensemble used for the simulation design, which\n# includes the simulation runs GOMC control file input and simulation outputs\n# Note: do not add extensions\ngomc_production_control_file_name_str = \"gomc_production_run\"\n\n# Analysis (each replicates averages):\n# Output text (txt) file names for each replicates averages\n# directly put in each replicate folder (.txt, .dat, etc)\noutput_replicate_txt_file_name_box_0 = \"analysis_avg_data_box_0.txt\"\n\n# Analysis (averages and std. devs. of # all the replcates):\n# Output text (txt) file names for the averages and std. devs. of all the replcates,\n# including the extention (.txt, .dat, etc)\noutput_avg_std_of_replicates_txt_file_name_box_0 = \"analysis_avg_std_of_replicates_box_0.txt\"\n\n\n\nwalltime_mosdef_hr = 24\nwalltime_namd_hr = 24\nwalltime_gomc_equilbrium_hr = 72\nwalltime_gomc_production_hr = 368\nwalltime_gomc_analysis_hr = 4\nmemory_needed = 16\n\n\n\n# forcefield names dict\nforcefield_residue_to_ff_filename_dict = {\n \"TIP4\": \"tip4p_2005.xml\",\n \"Ne\": \"nobel_gas_vrabec_LB_mixing.xml\",\n \"Rn\": \"nobel_gas_vrabec_LB_mixing.xml\",\n \"ETOH\": \"trappe-ua.xml\",\n}\n\n\n# smiles of mol2 file input a .mol2 file or smiles as a string\nsmiles_or_mol2_name_to_value_dict = {\n \"TIP4\": 'tip4p.mol2',\n \"Ne\": \"Ne\",\n \"Rn\": \"Rn\",\n \"ETOH\": \"ethanol.mol2\"\n}\n\n\n# get the paths to the smiles or mol2 files\nsmiles_or_mol2 = {}\nfor smiles_or_mol2_iter_i in list(smiles_or_mol2_name_to_value_dict.keys()):\n smiles_or_mol2.update(\n {str(smiles_or_mol2_iter_i):\n {\"use_smiles\": get_molecule_path(\n str(smiles_or_mol2_name_to_value_dict[str(smiles_or_mol2_iter_i)]))[0],\n \"smiles_or_mol2\": get_molecule_path(\n str(smiles_or_mol2_name_to_value_dict[str(smiles_or_mol2_iter_i)]))[1],\n }\n }\n )\n\n# get the paths to the FF xmls\nforcefield_dict = {}\nfor forcefield_dict_iter_i in list(forcefield_residue_to_ff_filename_dict.keys()):\n forcefield_dict.update(\n {str(forcefield_dict_iter_i): get_ff_path(\n forcefield_residue_to_ff_filename_dict[str(forcefield_dict_iter_i)])\n }\n )\nprint(\"*********************\")\nprint(\"*********************\")\nprint(\"smiles_or_mol2 = \" +str(smiles_or_mol2))\nprint(\"forcefield_dict = \" +str(forcefield_dict))\nprint(\"*********************\")\nprint(\"*********************\")\n\n# ******************************************************\n# users typical variables, but not all (end)\n# ******************************************************\n\n\n# ******************************************************\n# signac and GOMC-MOSDEF code (start)\n# ******************************************************\n\n# ******************************************************\n# ******************************************************\n# create some initial variable to be store in each jobs\n# directory in an additional json file, and test\n# to see if they are written (start).\n# ******************************************************\n# ******************************************************\n\n# set the default directory\nproject_directory_path = str(os.getcwd())\nprint(\"project_directory_path = \" +str(project_directory_path))\n\n\n# ******************************************************\n# ******************************************************\n# functions for selecting/grouping/aggregating in different ways (start)\n# ******************************************************\n# ******************************************************\n\ndef statepoint_without_replica(job):\n keys = sorted(tuple(i for i in job.sp.keys() if i not in {\"replica_number_int\"}))\n return [(key, job.sp[key]) for key in keys]\n\ndef statepoint_without_temperature(job):\n keys = sorted(tuple(i for i in job.sp.keys() if i not in {\"production_temperature_K\"}))\n return [(key, job.sp[key]) for key in keys]\n\n# ******************************************************\n# ******************************************************\n# functions for selecting/grouping/aggregating in different ways (end)\n# ******************************************************\n# ******************************************************\n\n\n# ******************************************************\n# ******************************************************\n# functions for free energy calcs MBAR, TI, and BAR for getting delta free energy and delta error (start)\n# ******************************************************\n# ******************************************************\n\ndef get_delta_TI_or_MBAR(TI_or_MBAR_estimate, k_b_T):\n \"\"\" Return the change in free energy and standard deviation for the MBAR and TI estimates.\n\n \"\"\"\n delta = TI_or_MBAR_estimate.delta_f_.iloc[0, -1] * k_b_T\n std_delta = TI_or_MBAR_estimate.d_delta_f_.iloc[0, -1] * k_b_T\n return delta, std_delta\n\n\ndef get_delta_BAR(BAR_estimate, k_b_T):\n \"\"\" Return the change in free energy and standard deviation for the BAR estimates.\n\n \"\"\"\n error_estimate = 0.0\n\n for i in range(len(BAR_estimate.d_delta_f_) - 1):\n error_estimate += BAR_estimate.d_delta_f_.values[i][i + 1] ** 2\n\n delta = BAR_estimate.delta_f_.iloc[0, -1] * k_b_T\n std_delta = k_b_T * error_estimate ** 0.5\n return delta, std_delta\n\n# ******************************************************\n# ******************************************************\n# functions for free energy calcs MBAR, TI, and BAR for getting delta free energy and delta error (end)\n# ******************************************************\n# ******************************************************\n\[email protected]\ndef part_1a_initial_data_input_to_json(job):\n \"\"\"Check that the initial job data is written to the json files.\"\"\"\n data_written_bool = False\n if job.isfile(f\"{'signac_job_document.json'}\"):\n data_written_bool = True\n\n return data_written_bool\n\n\[email protected](part_1a_initial_data_input_to_json)\[email protected]_directives(\n {\n \"np\": 1,\n \"ngpu\": 0,\n \"memory\": memory_needed,\n \"walltime\": walltime_mosdef_hr,\n }\n)\[email protected]_job\ndef initial_parameters(job):\n \"\"\"Set the initial job parameters into the jobs doc json file.\"\"\"\n # select\n\n # set free energy data in doc\n # Free energy calcs\n # lamda generator\n\n LambdaVDW_list = []\n InitialState_list = []\n for lamda_i in range(0, int(number_of_lambda_spacing_including_zero_int)):\n lambda_space_increments = 1 / int(number_of_lambda_spacing_including_zero_int - 1)\n LambdaVDW_list.append(np.round(lamda_i * lambda_space_increments, decimals=8))\n InitialState_list.append(lamda_i)\n print(\"*********************\")\n print(\"*********************\")\n print(\"LambdaVDW_list = \" + str(LambdaVDW_list))\n print(\"InitialState_list = \" + str(InitialState_list))\n print(\"*********************\")\n print(\"*********************\")\n if LambdaVDW_list[0] != 0 and LambdaVDW_list[-1] != 1 :\n raise ValueError(\"ERROR: The selected lambda list values do not start with a 0 and end 1.\")\n\n job.doc.LambdaVDW_list = LambdaVDW_list\n job.doc.InitialState_list = InitialState_list\n\n # set the GOMC production ensemble temp, pressure, molecule, box dimenstion and residue names\n job.doc.production_ensemble = \"NVT\"\n job.doc.production_pressure_bar = (1 * u.atm).to('bar')\n job.doc.production_temperature_K = job.sp.production_temperature_K\n\n job.doc.N_liquid_solvent = 1000\n job.doc.N_liquid_solute = 1\n\n job.doc.liq_box_lengths_ang = 31.07 * u.angstrom\n\n job.doc.Rcut_ang = 15 * u.angstrom # this is the Rcut for GOMC it is the Rswitch for NAMD\n job.doc.Rcut_for_switch_namd_ang = 17 * u.angstrom # Switch Rcut for NAMD's Switch function\n job.doc.neighbor_list_dist_namd_ang = 22 * u.angstrom # NAMD's neighbor list\n\n # list replica seed numbers\n replica_no_to_seed_dict = {\n 0: 0,\n 1: 1,\n 2: 2,\n 3: 3,\n 4: 4,\n 5: 5,\n 6: 6,\n 7: 7,\n 8: 8,\n 9: 9,\n 10: 10,\n 11: 11,\n 12: 12,\n 13: 13,\n 14: 14,\n 15: 15,\n 16: 16,\n 17: 17,\n 18: 18,\n 19: 19,\n 20: 20,\n }\n\n job.doc.replica_number_int = replica_no_to_seed_dict.get(\n int(job.sp.replica_number_int)\n )\n\n # set solvent and solute in doc\n job.doc.solvent = \"TIP4\"\n job.doc.solute = job.sp.solute\n\n # set rcut, ewalds\n if job.doc.solvent in [\"TIP4\", \"TIP3\"] and job.doc.solute in [\"He\", \"Ne\", \"Kr\", \"Ar\", \"Xe\", \"Rn\", \"ETOH\"]:\n job.doc.namd_node_ncpu = 1\n job.doc.namd_node_ngpu = 1\n\n job.doc.gomc_ncpu = 1 # 1 is optimal but I want data quick. run time is set for 1 cpu\n job.doc.gomc_ngpu = 1\n\n else:\n raise ValueError(\n \"ERROR: The solvent and solute do are not set up to selected the mixing rules or electrostatics \"\n )\n\n # get the namd binary paths\n if job.doc.namd_node_ngpu == 0:\n job.doc.namd_cpu_or_gpu = \"CPU\"\n\n elif job.doc.namd_node_ngpu == 1:\n job.doc.namd_cpu_or_gpu = \"GPU\"\n\n else:\n raise ValueError(\n \"Tee NAMD CPU and GPU can not be determined as force field (FF) is not available in the selection, \"\n \"or GPU selection is is not 0 or 1.\"\n )\n\n # get the gomc binary paths\n if job.doc.gomc_ngpu == 0:\n job.doc.gomc_cpu_or_gpu = \"CPU\"\n\n elif job.doc.gomc_ngpu == 1:\n job.doc.gomc_cpu_or_gpu = \"GPU\"\n\n else:\n raise ValueError(\n \"The GOMC CPU and GPU can not be determined as force field (FF) is not available in the selection, \"\n \"or GPU selection is is not 0 or 1.\"\n )\n\n # set the initial iteration number of the simulation\n job.doc.gomc_equilb_design_ensemble_dict = {}\n job.doc.gomc_production_run_ensemble_dict = {}\n\n\n if job.doc.production_ensemble == \"NPT\":\n job.doc.namd_equilb_NPT_gomc_binary_file = f\"namd2\"\n job.doc.gomc_equilb_design_ensemble_gomc_binary_file = f\"GOMC_{job.doc.gomc_cpu_or_gpu}_NPT\"\n job.doc.gomc_production_ensemble_gomc_binary_file = f\"GOMC_{job.doc.gomc_cpu_or_gpu}_NPT\"\n\n elif job.doc.production_ensemble == \"NVT\":\n job.doc.namd_equilb_NPT_gomc_binary_file = f\"namd2\"\n job.doc.gomc_equilb_design_ensemble_gomc_binary_file = f\"GOMC_{job.doc.gomc_cpu_or_gpu}_NPT\"\n job.doc.gomc_production_ensemble_gomc_binary_file = f\"GOMC_{job.doc.gomc_cpu_or_gpu}_NVT\"\n\n else:\n raise ValueError(\n \"ERROR: The 'GCMC', 'GEMC_NVT', 'GEMC_NPT' ensembles is not currently available for this project.py \"\n )\n\n\n# ******************************************************\n# ******************************************************\n# create some initial variable to be store in each jobs\n# directory in an additional json file, and test\n# to see if they are written (end).\n# ******************************************************\n# ******************************************************\n\n# ******************************************************\n# ******************************************************\n# check if GOMC psf, pdb, and force field (FF) files were written (start)\n# ******************************************************\n# ******************************************************\n\n# check if GOMC-MOSDEF wrote the gomc files\n# @Project.pre(select_production_ensemble)\[email protected]\[email protected]_job\ndef mosdef_input_written(job):\n \"\"\"Check that the mosdef files (psf, pdb, and force field (FF) files) are written .\"\"\"\n file_written_bool = False\n\n if (\n job.isfile(f\"{namd_ff_filename_str}.inp\")\n and job.isfile(f\"{gomc_ff_filename_str}.inp\")\n and job.isfile(\n f\"{mosdef_structure_box_0_name_str}.psf\"\n )\n and job.isfile(\n f\"{mosdef_structure_box_0_name_str}.pdb\"\n )\n ):\n file_written_bool = True\n\n return file_written_bool\n\n\n# ******************************************************\n# ******************************************************\n# check if GOMC psf, pdb, and FF files were written (end)\n# ******************************************************\n# ******************************************************\n\n# ******************************************************\n# ******************************************************\n# check if GOMC control file was written (start)\n# ******************************************************\n# ******************************************************\n# function for checking if the GOMC control file is written\ndef gomc_control_file_written(job, control_filename_str):\n \"\"\"General check that the gomc control files are written.\"\"\"\n file_written_bool = False\n control_file = f\"{control_filename_str}.conf\"\n\n if job.isfile(control_file):\n with open(job.fn(f\"{control_file}\"), \"r\") as fp:\n out_gomc = fp.readlines()\n for i, line in enumerate(out_gomc):\n if \"OutputName\" in line:\n split_move_line = line.split()\n if split_move_line[0] == \"OutputName\":\n file_written_bool = True\n\n return file_written_bool\n\n# function for checking if the NAMD control file is written\ndef namd_control_file_written(job, control_filename_str):\n \"\"\"General check that the NAMD control files are written.\"\"\"\n file_written_bool = False\n control_file = f\"{control_filename_str}.conf\"\n if job.isfile(control_file):\n with open(job.fn(f\"{control_file}\"), \"r\") as fp:\n out_namd = fp.readlines()\n for i, line in enumerate(out_namd):\n if \"cellBasisVector1\" in line:\n split_move_line = line.split()\n if split_move_line[0] == \"cellBasisVector1\":\n file_written_bool = True\n\n return file_written_bool\n\n\n# checking if the NAMD control file is written for the melt equilb NVT run\[email protected]\[email protected]_job\ndef part_2a_namd_equilb_NPT_control_file_written(job):\n \"\"\"General check that the namd_equilb_NPT_control_file\n (high temperature to set temp NAMD control file) is written.\"\"\"\n return namd_control_file_written(job, namd_equilb_NPT_control_file_name_str)\n\n# checking if the GOMC control file is written for the equilb run with the selected ensemble\[email protected]\[email protected]_job\ndef part_2b_gomc_equilb_design_ensemble_control_file_written(job):\n \"\"\"General check that the gomc_equilb_design_ensemble (run temperature) gomc control file is written.\"\"\"\n try:\n for initial_state_i in list(job.doc.InitialState_list):\n try:\n gomc_control_file_written(\n job,\n job.doc.gomc_equilb_design_ensemble_dict[\n str(initial_state_i)\n ][\"output_name_control_file_name\"],\n )\n except:\n return False\n return True\n except:\n return False\n\n# checking if the GOMC control file is written for the production run\[email protected]\[email protected]_job\ndef part_2c_gomc_production_control_file_written(job):\n \"\"\"General check that the gomc_production_control_file (run temperature) is written.\"\"\"\n try:\n for initial_state_i in list(job.doc.InitialState_list):\n try:\n return gomc_control_file_written(\n job,\n job.doc.gomc_production_run_ensemble_dict[\n str(initial_state_i)\n ][\"output_name_control_file_name\"],\n )\n except:\n return False\n return True\n except:\n return False\n\n# ******************************************************\n# ******************************************************\n# check if GOMC control file was written (end)\n# ******************************************************\n# ******************************************************\n\n# ******************************************************\n# ******************************************************\n# check if GOMC simulations started (start)\n# ******************************************************\n# ******************************************************\n# function for checking if GOMC simulations are started\ndef gomc_simulation_started(job, control_filename_str):\n \"\"\"General check to see if the gomc simulation is started.\"\"\"\n output_started_bool = False\n if job.isfile(\"out_{}.dat\".format(control_filename_str)) and job.isfile(\n \"{}_merged.psf\".format(control_filename_str)\n ):\n output_started_bool = True\n\n return output_started_bool\n\n# function for checking if NAMD simulations are started\ndef namd_simulation_started(job, control_filename_str):\n \"\"\"General check to see if the namd simulation is started.\"\"\"\n output_started_bool = False\n if job.isfile(\"out_{}.dat\".format(control_filename_str)) and job.isfile(\n \"{}.restart.xsc\".format(control_filename_str)\n ):\n output_started_bool = True\n\n return output_started_bool\n\n\n# check if melt equilb_NVT namd run is started\[email protected]\[email protected]_job\ndef part_3a_output_namd_equilb_NPT_started(job):\n \"\"\"Check to see if the namd_equilb_NPT_control_file is started\n (high temperature to set temperature in NAMD control file).\"\"\"\n return namd_simulation_started(job, namd_equilb_NPT_control_file_name_str)\n\n\n# check if equilb_with design ensemble GOMC run is started\[email protected]\[email protected]_job\ndef part_3b_output_gomc_equilb_design_ensemble_started(job):\n \"\"\"Check to see if the gomc_equilb_design_ensemble simulation is started (set temperature).\"\"\"\n try:\n for initial_state_i in list(job.doc.InitialState_list):\n try:\n if job.isfile(\n \"out_{}.dat\".format(\n job.doc.gomc_equilb_design_ensemble_dict[\n str(initial_state_i)\n ][\"output_name_control_file_name\"]\n )\n ):\n gomc_simulation_started(\n job,\n job.doc.gomc_equilb_design_ensemble_dict[\n str(initial_state_i)\n ][\"output_name_control_file_name\"],\n )\n\n else:\n return False\n except:\n return False\n\n return True\n except:\n return False\n\n# check if production GOMC run is started by seeing if the GOMC consol file and the merged psf exist\[email protected]\[email protected]_job\ndef part_part_3c_output_gomc_production_run_started(job):\n \"\"\"Check to see if the gomc production run simulation is started (set temperature).\"\"\"\n try:\n for initial_state_i in list(job.doc.InitialState_list):\n try:\n if job.isfile(\n \"out_{}.dat\".format(\n job.doc.gomc_production_run_ensemble_dict[\n str(initial_state_i)\n ][\"output_name_control_file_name\"]\n )\n ):\n gomc_simulation_started(\n job,\n job.doc.gomc_production_run_ensemble_dict[\n str(initial_state_i)\n ][\"output_name_control_file_name\"],\n )\n else:\n return False\n except:\n return False\n return True\n except:\n return False\n# ******************************************************\n# ******************************************************\n# check if GOMC simulations started (end)\n# ******************************************************\n# ******************************************************\n\n# ******************************************************\n# ******************************************************\n# check if GOMC and NAMD simulation are completed properly (start)\n# ******************************************************\n# ******************************************************\n# function for checking if GOMC simulations are completed properly\ndef gomc_sim_completed_properly(job, control_filename_str):\n \"\"\"General check to see if the gomc simulation was completed properly.\"\"\"\n job_run_properly_bool = False\n output_log_file = \"out_{}.dat\".format(control_filename_str)\n if job.isfile(output_log_file):\n with open(job.fn(f\"{output_log_file}\"), \"r\") as fp:\n out_gomc = fp.readlines()\n for i, line in enumerate(out_gomc):\n if \"Move\" in line:\n split_move_line = line.split()\n if (\n split_move_line[0] == \"Move\"\n and split_move_line[1] == \"Type\"\n and split_move_line[2] == \"Mol.\"\n and split_move_line[3] == \"Kind\"\n ):\n job_run_properly_bool = True\n else:\n job_run_properly_bool = False\n\n return job_run_properly_bool\n\n# function for checking if NAMD simulations are completed properly\ndef namd_sim_completed_properly(job, control_filename_str):\n \"\"\"General check to see if the namd simulation was completed properly.\"\"\"\n job_run_properly_bool = False\n output_log_file = \"out_{}.dat\".format(control_filename_str)\n if job.isfile(output_log_file):\n with open(job.fn(f\"{output_log_file}\"), \"r\") as fp:\n out_namd = fp.readlines()\n for i, line in enumerate(out_namd):\n if \"WallClock:\" in line:\n split_move_line = line.split()\n if (split_move_line[0] == \"WallClock:\"\n and split_move_line[2] == \"CPUTime:\"\n and split_move_line[4] == \"Memory:\"\n ):\n job_run_properly_bool = True\n else:\n job_run_properly_bool = False\n\n return job_run_properly_bool\n\n# check if melt equilb NVT GOMC run completed by checking the end of the GOMC consol file\[email protected]\[email protected]_job\ndef part_4a_job_namd_equilb_NPT_completed_properly(job):\n \"\"\"Check to see if the namd_equilb_NPT_control_file was completed properly\n (high temperature to set temperature NAMD control file).\"\"\"\n x = namd_sim_completed_properly(\n job, namd_equilb_NPT_control_file_name_str\n )\n #print(f'namd check = {x}')\n return namd_sim_completed_properly(\n job, namd_equilb_NPT_control_file_name_str\n )\n\n\n# check if equilb selected ensemble GOMC run completed by checking the end of the GOMC consol file\[email protected]\[email protected]_job\ndef part_4b_job_gomc_equilb_design_ensemble_completed_properly(job):\n \"\"\"Check to see if the gomc_equilb_design_ensemble simulation was completed properly (set temperature).\"\"\"\n try:\n for initial_state_i in list(job.doc.InitialState_list):\n try:\n filename_4b_iter = job.doc.gomc_equilb_design_ensemble_dict[\n str(initial_state_i)\n ][\"output_name_control_file_name\"]\n\n if gomc_sim_completed_properly(\n job,\n filename_4b_iter,\n ) is False:\n print(\"gomc_equilb_design_ensemble incomplete state \" + str(initial_state_i))\n return False\n except:\n return False\n return True\n except:\n return False\n\n# check if production GOMC run completed by checking the end of the GOMC consol file\[email protected]\[email protected]_job\ndef part_4c_job_production_run_completed_properly(job):\n \"\"\"Check to see if the gomc production run simulation was completed properly (set temperature).\"\"\"\n try:\n for initial_state_i in list(job.doc.InitialState_list):\n try:\n filename_4c_iter = job.doc.gomc_production_run_ensemble_dict[\n str(initial_state_i)\n ][\"output_name_control_file_name\"]\n if gomc_sim_completed_properly(\n job,\n filename_4c_iter,\n ) is False:\n print(\"Isn't finished \",filename_4c_iter)\n return False\n\n # check specifically for the FE files\n if job.isfile(f'Free_Energy_BOX_0_{filename_4c_iter}.dat') is False:\n print(\"Isn't finished \",f'Free_Energy_BOX_0_{filename_4c_iter}.dat')\n return False\n\n except:\n return False\n return True\n except:\n return False\n\n# ******************************************************\n# ******************************************************\n# check if GOMC and NAMD simulation are completed properly (end)\n# ******************************************************\n# ******************************************************\n\n# ******************************************************\n# ******************************************************\n# check if GOMC anaylsis is completed properly (start)\n# ******************************************************\n# ******************************************************\n\n# check if analysis is done for the individual replicates wrote the gomc files\[email protected](part_4c_job_production_run_completed_properly)\[email protected]\[email protected]_job\ndef part_5a_analysis_individual_simulation_averages_completed(job):\n \"\"\"Check that the individual simulation averages files are written .\"\"\"\n file_written_bool = False\n if (\n job.isfile(\n f\"{output_replicate_txt_file_name_box_0}\"\n )\n ):\n file_written_bool = True\n\n return file_written_bool\n\n\n# check if analysis for averages of all the replicates is completed\[email protected](part_5a_analysis_individual_simulation_averages_completed)\[email protected]\ndef part_5b_analysis_replica_averages_completed(*jobs):\n \"\"\"Check that the simulation replicate average and std. dev. files are written.\"\"\"\n file_written_bool_list = []\n all_file_written_bool_pass = False\n for job in jobs:\n file_written_bool = False\n\n if (\n job.isfile(\n f\"../../analysis/{output_avg_std_of_replicates_txt_file_name_box_0}\"\n )\n ):\n file_written_bool = True\n\n file_written_bool_list.append(file_written_bool)\n\n if False not in file_written_bool_list:\n all_file_written_bool_pass = True\n\n return all_file_written_bool_pass\n\n\n# ******************************************************\n# ******************************************************\n# check if GOMC anaylsis is completed properly (end)\n# ******************************************************\n# ******************************************************\n\n# ******************************************************\n# ******************************************************\n# build system, with option to write the force field (force field (FF)), pdb, psf files.\n# Note: this is needed to write GOMC control file, even if a restart (start)\n# ******************************************************\n# build system\ndef build_charmm(job, write_files=True):\n \"\"\"Build the Charmm object and potentially write the pdb, psd, and force field (FF) files.\"\"\"\n print(\"#**********************\")\n print(\"Started: GOMC Charmm Object\")\n print(\"#**********************\")\n mbuild_box_seed_no = job.doc.replica_number_int\n\n solvent = mb.load(smiles_or_mol2[job.doc.solvent]['smiles_or_mol2'],\n smiles=smiles_or_mol2[job.doc.solvent]['use_smiles']\n )\n solvent.name = job.doc.solvent\n\n if job.doc.solvent not in [\"TIP4\"]:\n solvent.energy_minimize(forcefield=forcefield_dict[job.doc.solvent], steps=10 ** 5)\n\n if job.sp.solute in [\"He\", \"Ne\", \"Kr\", \"Ar\", \"Xe\", \"Rn\"]:\n solute = mb.Compound(name=job.doc.solute)\n else:\n solute = mb.load(smiles_or_mol2[job.sp.solute]['smiles_or_mol2'],\n smiles=smiles_or_mol2[job.sp.solute]['use_smiles']\n )\n solute.name = job.sp.solute\n\n # only put the FF molecules in the simulation in the dictionaly input into the Chamm object.\n minimal_forcefield_dict = {solute.name: forcefield_dict[solute.name],\n solvent.name: forcefield_dict[solvent.name]\n }\n\n solute.energy_minimize(forcefield=forcefield_dict[job.sp.solute], steps=10 ** 5)\n\n bead_to_atom_name_dict = {\n \"_LP\": \"LP\",\n }\n residues_list = [solute.name, solvent.name]\n print(\"residues_list = \" +str(residues_list ))\n\n if job.doc.solvent in [\"TIP4\", \"TIP3\"]:\n gomc_fix_bonds_angles_residues_list = [solvent.name]\n else:\n gomc_fix_bonds_angles_residues_list = None\n\n print('Running: filling liquid box')\n box_0 = mb.fill_box(compound=[solute, solvent],\n n_compounds=[job.doc.N_liquid_solute, job.doc.N_liquid_solvent],\n box=[u.unyt_quantity(job.doc.liq_box_lengths_ang, 'angstrom').to_value(\"nm\"),\n u.unyt_quantity(job.doc.liq_box_lengths_ang, 'angstrom').to_value(\"nm\"),\n u.unyt_quantity(job.doc.liq_box_lengths_ang, 'angstrom').to_value(\"nm\"),\n ],\n seed=mbuild_box_seed_no\n )\n print('Completed: filling liquid box')\n\n print('Running: GOMC FF file, and the psf and pdb files')\n if job.doc.production_ensemble in [\"NVT\", \"NPT\"]:\n print('Running: namd_charmm')\n namd_charmm = mf_charmm.Charmm(\n box_0,\n mosdef_structure_box_0_name_str,\n structure_box_1=None,\n filename_box_1=None,\n ff_filename= namd_ff_filename_str,\n forcefield_selection=minimal_forcefield_dict,\n residues=residues_list,\n bead_to_atom_name_dict=bead_to_atom_name_dict,\n gomc_fix_bonds_angles=None,\n )\n\n print('Running: gomc_charmm')\n gomc_charmm = mf_charmm.Charmm(\n box_0,\n mosdef_structure_box_0_name_str,\n structure_box_1=None,\n filename_box_1=None,\n ff_filename= gomc_ff_filename_str,\n forcefield_selection=minimal_forcefield_dict,\n residues=residues_list,\n bead_to_atom_name_dict=bead_to_atom_name_dict,\n gomc_fix_bonds_angles=gomc_fix_bonds_angles_residues_list,\n )\n\n else:\n raise ValueError(\"ERROR: The GCMC and GEMC ensembles are not supported in this script.\")\n\n if write_files == True:\n gomc_charmm.write_inp()\n\n namd_charmm.write_inp()\n\n namd_charmm.write_psf()\n\n namd_charmm.write_pdb()\n\n print(\"#**********************\")\n print(\"Completed: GOMC Charmm Object\")\n print(\"#**********************\")\n\n return [namd_charmm, gomc_charmm]\n\n\n# ******************************************************\n# ******************************************************\n# build system, with option to write the force field (FF), pdb, psf files.\n# Note: this is needed to write GOMC control file, even if a restart (end)\n# ******************************************************\n\n\n# ******************************************************\n# ******************************************************\n# Creating GOMC files (pdb, psf, force field (FF), and gomc control files (start)\n# ******************************************************\n# ******************************************************\[email protected](part_1a_initial_data_input_to_json)\[email protected](part_2a_namd_equilb_NPT_control_file_written)\[email protected](part_2b_gomc_equilb_design_ensemble_control_file_written)\[email protected](part_2c_gomc_production_control_file_written)\[email protected](mosdef_input_written)\[email protected]_directives(\n {\n \"np\": 1,\n \"ngpu\": 0,\n \"memory\": memory_needed,\n \"walltime\": walltime_mosdef_hr,\n }\n)\[email protected]_job\ndef build_psf_pdb_ff_gomc_conf(job):\n \"\"\"Build the Charmm object and write the pdb, psd, and force field (FF)\n files for all the simulations in the workspace.\"\"\"\n [namd_charmm_object_with_files, gomc_charmm_object_with_files] = build_charmm(job, write_files=True)\n\n FreeEnergyCalc = [True, int(gomc_free_energy_output_data_every_X_steps)]\n MoleculeType = [job.sp.solute, 1]\n\n use_ElectroStatics = True\n VDWGeometricSigma = False\n Exclude = \"1-4\"\n\n # common variables\n cutoff_style = \"VDW\"\n if cutoff_style != \"VDW\":\n raise ValueError(\"ERROR: this project is only set up for the SWITCH cutoff style for NAMD\"\n \"and VDW for GOMC. Therefore, the cutoff style selected must be VDW. \"\n \"Rswitch for namd only so the r_switch_dist_start and \"\n \"r_switch_dist_end must be supplied for NAMD. GOMC will then move to VDW \"\n \"with the switch dist (r_switch_dist_start) as the cutoff with LRC.\")\n\n production_temperature_K = (job.sp.production_temperature_K * u.K).to_value(\"K\")\n\n production_pressure_bar = (job.doc.production_pressure_bar * u.bar).to_value(\"bar\")\n\n box_lengths_ang = [u.unyt_quantity(job.doc.liq_box_lengths_ang, 'angstrom').to_value(\"angstrom\"),\n u.unyt_quantity(job.doc.liq_box_lengths_ang, 'angstrom').to_value(\"angstrom\"),\n u.unyt_quantity(job.doc.liq_box_lengths_ang, 'angstrom').to_value(\"angstrom\"),\n ]\n\n seed_no = job.doc.replica_number_int\n\n namd_template_path_str = os.path.join(project_directory_path, \"templates/NAMD_conf_template.conf\")\n\n if job.doc.solvent in [\"TIP3\"] or job.sp.solute in [\"TIP3\"]:\n namd_uses_water = True\n namd_water_model = 'tip3'\n elif job.doc.solvent in [\"TIP4\"] or job.sp.solute in [\"TIP4\"]:\n namd_uses_water = True\n namd_water_model = 'tip4'\n else:\n namd_uses_water = False\n namd_water_model= None\n\n # generate the namd file\n # NOTE: the production and melt temps are converted to intergers so they can be ramped down\n # from hot to cool to equilibrate the system.\n generate_namd_equilb_control_file(template_path_filename=namd_template_path_str,\n namd_path_conf_filename=namd_equilb_NPT_control_file_name_str,\n namd_path_file_output_names=namd_equilb_NPT_control_file_name_str,\n namd_uses_water=namd_uses_water,\n namd_water_model=namd_water_model,\n namd_electrostatics_bool=use_ElectroStatics,\n namd_vdw_geometric_sigma_bool=VDWGeometricSigma,\n namd_psf_path_filename=f\"{mosdef_structure_box_0_name_str}.psf\",\n namd_pdb_path_filename=f\"{mosdef_structure_box_0_name_str}.pdb\",\n namd_ff_path_filename=f\"{namd_ff_filename_str}.inp\",\n namd_production_temp_K= int(production_temperature_K),\n namd_production_pressure_bar=production_pressure_bar,\n electrostatic_1_4=namd_charmm_object_with_files.coul_1_4,\n non_bonded_cutoff=job.doc.Rcut_for_switch_namd_ang,\n non_bonded_switch_distance=job.doc.Rcut_ang,\n pairlist_distance=job.doc.neighbor_list_dist_namd_ang,\n box_lengths=box_lengths_ang,\n )\n\n print(\"#**********************\")\n print(\"Completed: namd_equilb_NPT GOMC control file writing\")\n print(\"#**********************\")\n # ******************************************************\n # namd_equilb_NPT - psf, pdb, force field (FF) file writing and GOMC control file writing (end)\n # ******************************************************\n\n\n # ******************************************************\n # equilb selected_ensemble, if NVT -> NPT - GOMC control file writing (start)\n # Note: the control files are written for the max number of gomc_equilb_design_ensemble runs\n # so the Charmm object only needs created 1 time.\n # ******************************************************\n print(\"#**********************\")\n print(\"Started: equilb NPT or GEMC-NVT GOMC control file writing\")\n print(\"#**********************\")\n\n for initial_state_sims_i in list(job.doc.InitialState_list):\n namd_restart_pdb_psf_file_name_str = mosdef_structure_box_0_name_str\n\n restart_control_file_name_str = namd_equilb_NPT_control_file_name_str\n output_name_control_file_name = \"{}_initial_state_{}\".format(\n gomc_equilb_design_ensemble_control_file_name_str, initial_state_sims_i\n )\n\n job.doc.gomc_equilb_design_ensemble_dict.update(\n {\n initial_state_sims_i: {\n \"restart_control_file_name\": restart_control_file_name_str,\n \"output_name_control_file_name\": output_name_control_file_name,\n }\n }\n )\n\n # calc MC steps\n MC_steps = int(gomc_steps_equilb_design_ensemble)\n EqSteps = 1000\n\n # output all data and calc frequecy\n output_true_list_input = [\n True,\n int(gomc_output_data_every_X_steps),\n ]\n output_false_list_input = [\n False,\n int(gomc_output_data_every_X_steps),\n ]\n\n if job.doc.solvent in [\"TIP4\", \"TIP3\"] \\\n and job.doc.solute in [\"He\", \"Ne\", \"Kr\", \"Ar\", \"Xe\", \"Rn\", \"ETOH\"]:\n used_ensemble = \"NPT\"\n if job.doc.production_ensemble in [\"NVT\", \"NPT\"]:\n VolFreq = (0.01,)\n MultiParticleFreq = (None,)\n IntraSwapFreq = (0.0,)\n CrankShaftFreq = (None,)\n SwapFreq = (None,)\n DisFreq = (0.39,)\n RotFreq = (0.3,)\n RegrowthFreq = (0.3,)\n\n else:\n raise ValueError(\n \"Moleules MC move ratios not listed for this solvent and solute or ensemble \"\n \"in the GOMC control file writer.\"\n )\n\n Coordinates_box_0 = \"{}.pdb\".format(\n namd_restart_pdb_psf_file_name_str\n )\n Structure_box_0 = \"{}.psf\".format(\n namd_restart_pdb_psf_file_name_str\n )\n binCoordinates_box_0 = \"{}.restart.coor\".format(\n restart_control_file_name_str\n )\n extendedSystem_box_0 = \"{}.restart.xsc\".format(\n restart_control_file_name_str\n )\n\n gomc_control.write_gomc_control_file(\n gomc_charmm_object_with_files,\n output_name_control_file_name,\n used_ensemble,\n MC_steps,\n production_temperature_K,\n ff_psf_pdb_file_directory=None,\n check_input_files_exist=False,\n Parameters=\"{}.inp\".format(gomc_ff_filename_str),\n Restart=True,\n RestartCheckpoint=True,\n ExpertMode=False,\n Coordinates_box_0=Coordinates_box_0,\n Structure_box_0=Structure_box_0,\n binCoordinates_box_0=binCoordinates_box_0,\n extendedSystem_box_0=extendedSystem_box_0,\n binVelocities_box_0=None,\n Coordinates_box_1=None,\n Structure_box_1=None,\n binCoordinates_box_1=None,\n extendedSystem_box_1=None,\n binVelocities_box_1=None,\n input_variables_dict={\n \"PRNG\": seed_no,\n \"Pressure\": production_pressure_bar,\n \"Ewald\": use_ElectroStatics,\n \"ElectroStatic\": use_ElectroStatics,\n \"VDWGeometricSigma\": VDWGeometricSigma,\n \"Rcut\": job.doc.Rcut_ang,\n \"Exclude\": Exclude,\n \"VolFreq\": VolFreq[-1],\n \"MultiParticleFreq\": MultiParticleFreq[-1],\n \"IntraSwapFreq\": IntraSwapFreq[-1],\n \"CrankShaftFreq\": CrankShaftFreq[-1],\n \"SwapFreq\": SwapFreq[-1],\n \"DisFreq\": DisFreq[-1],\n \"RotFreq\": RotFreq[-1],\n \"RegrowthFreq\": RegrowthFreq[-1],\n \"OutputName\": output_name_control_file_name,\n \"EqSteps\": EqSteps,\n \"PressureCalc\": output_false_list_input,\n \"RestartFreq\": output_true_list_input,\n \"CheckpointFreq\": output_true_list_input,\n \"ConsoleFreq\": output_true_list_input,\n \"BlockAverageFreq\": output_true_list_input,\n \"HistogramFreq\": output_false_list_input,\n \"CoordinatesFreq\": output_false_list_input,\n \"DCDFreq\": output_true_list_input,\n \"Potential\": cutoff_style,\n \"LRC\": True,\n \"RcutLow\": 0,\n \"CBMC_First\": 12,\n \"CBMC_Nth\": 10,\n \"CBMC_Ang\": 50,\n \"CBMC_Dih\": 50,\n \"FreeEnergyCalc\": FreeEnergyCalc,\n \"MoleculeType\": MoleculeType,\n \"InitialState\": initial_state_sims_i,\n \"LambdaVDW\": list(job.doc.LambdaVDW_list),\n # \"LambdaCoulomb\": None,\n },\n )\n print(\"#**********************\")\n print(\"Completed: equilb NPT or GEMC-NVT GOMC control file writing\")\n print(\"#**********************\")\n\n # ******************************************************\n # equilb selected_ensemble, if NVT -> NPT - GOMC control file writing (end)\n # Note: the control files are written for the max number of gomc_equilb_design_ensemble runs\n # so the Charmm object only needs created 1 time.\n # ******************************************************\n\n # ******************************************************\n # production NPT or GEMC-NVT - GOMC control file writing (start)\n # ******************************************************\n\n print(\"#**********************\")\n print(\"Started: production NPT or GEMC-NVT GOMC control file writing\")\n print(\"#**********************\")\n\n output_name_control_file_name = \"{}_initial_state_{}\".format(\n gomc_production_control_file_name_str, initial_state_sims_i\n )\n restart_control_file_name_str = \"{}_initial_state_{}\".format(\n gomc_equilb_design_ensemble_control_file_name_str, int(initial_state_sims_i)\n )\n job.doc.gomc_production_run_ensemble_dict.update(\n {\n initial_state_sims_i: {\n \"restart_control_file_name\": restart_control_file_name_str,\n \"output_name_control_file_name\": output_name_control_file_name,\n }\n }\n )\n\n # calc MC steps\n MC_steps = int(gomc_steps_lamda_production)\n EqSteps = 1000\n\n\n # output all data and calc frequecy\n output_true_list_input = [\n True,\n int(gomc_output_data_every_X_steps),\n ]\n output_false_list_input = [\n False,\n int(gomc_output_data_every_X_steps),\n ]\n \n \n if job.doc.solvent in [\"TIP4\", \"TIP3\"] \\\n and job.doc.solute in [\"He\", \"Ne\", \"Kr\", \"Ar\", \"Xe\", \"Rn\", \"ETOH\"]:\n used_ensemble = job.doc.production_ensemble\n if job.doc.production_ensemble in [\"NVT\", \"NPT\"]:\n if job.doc.production_ensemble in [\"NVT\"]:\n VolFreq = (0.00,)\n MultiParticleFreq = (None,)\n IntraSwapFreq = (0.0,)\n CrankShaftFreq = (None,)\n SwapFreq = (None,)\n DisFreq = (0.4,)\n RotFreq = (0.3,)\n RegrowthFreq = (0.3,)\n\n elif job.doc.production_ensemble in [\"NPT\"]:\n VolFreq = (0.01,)\n MultiParticleFreq = (None,)\n IntraSwapFreq = (0.0,)\n CrankShaftFreq = (None,)\n SwapFreq = (None,)\n DisFreq = (0.39,)\n RotFreq = (0.3,)\n RegrowthFreq = (0.3,)\n\n else:\n raise ValueError(\n \"Moleules MC move ratios not listed for this solvent and solute or ensemble \"\n \"in the GOMC control file writer.\"\n )\n\n Coordinates_box_0 = \"{}_BOX_0_restart.pdb\".format(\n restart_control_file_name_str\n )\n Structure_box_0 = \"{}_BOX_0_restart.psf\".format(\n restart_control_file_name_str\n )\n binCoordinates_box_0 = \"{}_BOX_0_restart.coor\".format(\n restart_control_file_name_str\n )\n extendedSystem_box_0 = \"{}_BOX_0_restart.xsc\".format(\n restart_control_file_name_str\n )\n\n\n gomc_control.write_gomc_control_file(\n gomc_charmm_object_with_files,\n output_name_control_file_name,\n used_ensemble,\n MC_steps,\n production_temperature_K,\n ff_psf_pdb_file_directory=None,\n check_input_files_exist=False,\n Parameters=\"{}.inp\".format(gomc_ff_filename_str),\n Restart=True,\n RestartCheckpoint=True,\n ExpertMode=False,\n Coordinates_box_0=Coordinates_box_0,\n Structure_box_0=Structure_box_0,\n binCoordinates_box_0=binCoordinates_box_0,\n extendedSystem_box_0=extendedSystem_box_0,\n binVelocities_box_0=None,\n Coordinates_box_1=None,\n Structure_box_1=None,\n binCoordinates_box_1=None,\n extendedSystem_box_1=None,\n binVelocities_box_1=None,\n input_variables_dict={\n \"PRNG\": seed_no,\n \"Pressure\": production_pressure_bar,\n \"Ewald\": use_ElectroStatics,\n \"ElectroStatic\": use_ElectroStatics,\n \"VDWGeometricSigma\": VDWGeometricSigma,\n \"Rcut\": job.doc.Rcut_ang,\n \"Exclude\": Exclude,\n \"VolFreq\": VolFreq[-1],\n \"MultiParticleFreq\": MultiParticleFreq[-1],\n \"IntraSwapFreq\": IntraSwapFreq[-1],\n \"CrankShaftFreq\": CrankShaftFreq[-1],\n \"SwapFreq\": SwapFreq[-1],\n \"DisFreq\": DisFreq[-1],\n \"RotFreq\": RotFreq[-1],\n \"RegrowthFreq\": RegrowthFreq[-1],\n \"OutputName\": output_name_control_file_name,\n \"EqSteps\": EqSteps,\n \"PressureCalc\": output_false_list_input,\n \"RestartFreq\": output_true_list_input,\n \"CheckpointFreq\": output_true_list_input,\n \"ConsoleFreq\": output_true_list_input,\n \"BlockAverageFreq\": output_true_list_input,\n \"HistogramFreq\": output_false_list_input,\n \"CoordinatesFreq\": output_false_list_input,\n \"DCDFreq\": output_true_list_input,\n \"Potential\": cutoff_style,\n \"LRC\": True,\n \"RcutLow\": 0,\n \"CBMC_First\": 12,\n \"CBMC_Nth\": 10,\n \"CBMC_Ang\": 50,\n \"CBMC_Dih\": 50,\n \"FreeEnergyCalc\": FreeEnergyCalc,\n \"MoleculeType\": MoleculeType,\n \"InitialState\": initial_state_sims_i,\n \"LambdaVDW\": list(job.doc.LambdaVDW_list),\n #\"LambdaCoulomb\": None,\n },\n )\n\n print(\"#**********************\")\n print(\"Completed: production NPT or GEMC-NVT GOMC control file writing\")\n print(\"#**********************\")\n # ******************************************************\n # production NPT or GEMC-NVT - GOMC control file writing (end)\n # ******************************************************\n\n\n# ******************************************************\n# ******************************************************\n# Creating GOMC files (pdb, psf, force field (FF), and gomc control files (end)\n# ******************************************************\n# ******************************************************\n\n# ******************************************************\n# ******************************************************\n# namd_equilb_NPT -starting the NAMD simulations (start)\n# ******************************************************\n# ******************************************************\[email protected](mosdef_input_written)\[email protected](part_2a_namd_equilb_NPT_control_file_written)\[email protected](part_3a_output_namd_equilb_NPT_started)\[email protected](part_4a_job_namd_equilb_NPT_completed_properly)\[email protected]_directives(\n {\n \"np\": lambda job: job.doc.namd_node_ncpu,\n \"ngpu\": lambda job: job.doc.namd_node_ngpu,\n \"memory\": memory_needed,\n \"walltime\": walltime_namd_hr,\n }\n)\[email protected]_job\[email protected]\ndef run_namd_equilb_NPT_gomc_command(job):\n \"\"\"Run the namd_equilb_NPT simulation.\"\"\"\n print(\"#**********************\")\n print(\"# Started the run_namd_equilb_NPT_gomc_command.\")\n print(\"#**********************\")\n\n control_file_name_str = namd_equilb_NPT_control_file_name_str\n\n print(f\"Running simulation job id {job}\")\n run_command = \"{}/{} +p{} {}.conf > out_{}.dat\".format(\n str(namd_binary_path),\n str(job.doc.namd_equilb_NPT_gomc_binary_file),\n str(job.doc.namd_node_ncpu),\n str(control_file_name_str),\n str(control_file_name_str),\n )\n\n print('namd run_command = ' + str(run_command))\n\n return run_command\n\n\n# ******************************************************\n# ******************************************************\n# namd_equilb_NPT -starting the NAMD simulations (end)\n# ******************************************************\n# ******************************************************\n\n\n# ******************************************************\n# ******************************************************\n# equilb NPT - starting the GOMC simulation (start)\n# ******************************************************\n# ******************************************************\n\nfor initial_state_j in range(0, number_of_lambda_spacing_including_zero_int):\n @Project.pre(part_2a_namd_equilb_NPT_control_file_written)\n @Project.pre(part_4a_job_namd_equilb_NPT_completed_properly)\n @Project.post(part_3b_output_gomc_equilb_design_ensemble_started)\n @Project.post(part_4b_job_gomc_equilb_design_ensemble_completed_properly)\n @Project.operation.with_directives(\n {\n \"np\": lambda job: job.doc.gomc_ncpu,\n \"ngpu\": lambda job: job.doc.gomc_ngpu,\n \"memory\": memory_needed,\n \"walltime\": walltime_gomc_equilbrium_hr,\n },\n name = f\"gomc_equilb_design_ensemble_initial_state_{initial_state_j}\"\n )\n @flow.with_job\n @flow.cmd\n def run_equilb_run_gomc_command(job, *, initial_state_j=initial_state_j):\n \"\"\"Run the gomc_equilb_run_ensemble simulation.\"\"\"\n control_file_name_str = job.doc.gomc_equilb_design_ensemble_dict[\n str(initial_state_j)\n ][\"output_name_control_file_name\"]\n\n print(f\"Running simulation job id {job}\")\n run_command = \"{}/{} +p{} {}.conf > out_{}.dat\".format(\n str(gomc_binary_path),\n str(job.doc.gomc_equilb_design_ensemble_gomc_binary_file),\n str(job.doc.gomc_ncpu),\n str(control_file_name_str),\n str(control_file_name_str),\n )\n\n print('gomc equilbrium_run run_command = ' + str(run_command))\n\n return run_command\n# *****************************************\n# ******************************************************\n# equilb NPT - starting the GOMC simulation (end)\n# ******************************************************\n# ******************************************************\n\n\n# ******************************************************\n# ******************************************************\n# production run - starting the GOMC simulation (start)\n# ******************************************************\n# ******************************************************\nfor initial_state_i in range(0, number_of_lambda_spacing_including_zero_int):\n @Project.pre(part_2c_gomc_production_control_file_written)\n @Project.pre(part_4b_job_gomc_equilb_design_ensemble_completed_properly)\n @Project.post(part_part_3c_output_gomc_production_run_started)\n @Project.post(part_4c_job_production_run_completed_properly)\n @Project.operation.with_directives(\n {\n \"np\": lambda job: job.doc.gomc_ncpu,\n \"ngpu\": lambda job: job.doc.gomc_ngpu,\n \"memory\": memory_needed,\n \"walltime\": walltime_gomc_production_hr,\n },\n name = f\"gomc_production_ensemble_initial_state_{initial_state_i}\"\n )\n @flow.with_job\n @flow.cmd\n def run_production_run_gomc_command(job, *, initial_state_i=initial_state_i):\n \"\"\"Run the gomc_production_ensemble simulation.\"\"\"\n\n control_file_name_str = job.doc.gomc_production_run_ensemble_dict[\n str(initial_state_i)\n ][\"output_name_control_file_name\"]\n\n print(f\"Running simulation job id {job}\")\n run_command = \"{}/{} +p{} {}.conf > out_{}.dat\".format(\n str(gomc_binary_path),\n str(job.doc.gomc_production_ensemble_gomc_binary_file),\n str(job.doc.gomc_ncpu),\n str(control_file_name_str),\n str(control_file_name_str),\n )\n\n print('gomc production run_command = ' + str(run_command))\n\n return run_command\n\n# ******************************************************\n# ******************************************************\n# production run - starting the GOMC simulation (end)\n# ******************************************************\n# ******************************************************\n\n# ******************************************************\n# ******************************************************\n# data analysis - get the average data from each individual simulation (start)\n# ******************************************************\n# ******************************************************\n\[email protected]_directives(\n {\n \"np\": 1,\n \"ngpu\": 0,\n \"memory\": memory_needed,\n \"walltime\": walltime_gomc_analysis_hr,\n }\n)\[email protected](\n lambda *jobs: all(\n part_4c_job_production_run_completed_properly(job)\n for job in jobs\n )\n)\[email protected](part_4c_job_production_run_completed_properly)\[email protected](part_5a_analysis_individual_simulation_averages_completed)\[email protected]_job\ndef part_5a_analysis_individual_simulation_averages(*jobs):\n # remove the total averaged replicate data and all analysis data after this,\n # as it is no longer valid when adding more simulations\n if os.path.isfile(f'../../analysis/{output_avg_std_of_replicates_txt_file_name_box_0}'):\n os.remove(f'../../analysis/{output_avg_std_of_replicates_txt_file_name_box_0}')\n\n output_column_temp_title = 'temp_K' # column title title for temp\n output_column_solute_title = 'solute' # column title title for temp\n output_column_dFE_MBAR_title = 'dFE_MBAR_kcal_per_mol' # column title title for delta_MBAR\n output_column_dFE_MBAR_std_title = 'dFE_MBAR_std_kcal_per_mol' # column title title for ds_MBAR\n output_column_dFE_TI_title = 'dFE_TI_kcal_per_mol' # column title title for delta_MBAR\n output_column_dFE_TI_std_title = 'dFE_TI_std_kcal_per_mol' # column title title for ds_MBAR\n output_column_dFE_BAR_title = 'dFE_BAR_kcal_per_mol' # column title title for delta_MBAR\n output_column_dFE_BAR_std_title = 'dFE_BAR_std_kcal_per_mol' # column title title for ds_MBAR\n\n\n # get the averages from each individual simulation and write the csv's.\n for job in jobs:\n files = []\n k_b = 1.9872036E-3 # kcal/mol/K\n temperature = job.sp.production_temperature_K\n k_b_T = temperature * k_b\n\n for initial_state_iter in range(0, number_of_lambda_spacing_including_zero_int):\n reading_filename_box_0_iter = f'Free_Energy_BOX_0_{gomc_production_control_file_name_str}_' \\\n f'initial_state_{initial_state_iter}.dat'\n files.append(reading_filename_box_0_iter)\n\n # for TI estimator\n dHdl = pd.concat([extract_dHdl(job.fn(f), T=temperature) for f in files])\n ti = TI().fit(dHdl)\n delta_ti, delta_std_ti = get_delta_TI_or_MBAR(ti, k_b_T)\n\n # for MBAR estimator\n u_nk = pd.concat([extract_u_nk(job.fn(f), T=temperature) for f in files])\n mbar = MBAR().fit(u_nk)\n delta_mbar, delta_std_mbar = get_delta_TI_or_MBAR(mbar, k_b_T)\n\n # for BAR estimator\n bar = BAR().fit(u_nk)\n delta_bar, delta_std_bar = get_delta_BAR(bar, k_b_T)\n\n # write the data out in each job\n box_0_replicate_data_txt_file = open(job.fn(output_replicate_txt_file_name_box_0), \"w\")\n box_0_replicate_data_txt_file.write(\n f\"{output_column_temp_title: <30} \"\n f\"{output_column_solute_title: <30} \"\n f\"{output_column_dFE_MBAR_title: <30} \"\n f\"{output_column_dFE_MBAR_std_title: <30} \"\n f\"{output_column_dFE_TI_title: <30} \"\n f\"{output_column_dFE_TI_std_title: <30} \"\n f\"{output_column_dFE_BAR_title: <30} \"\n f\"{output_column_dFE_BAR_std_title: <30} \"\n f\" \\n\"\n )\n box_0_replicate_data_txt_file.write(\n f\"{job.sp.production_temperature_K: <30} \"\n f\"{job.sp.solute: <30} \"\n f\"{delta_mbar: <30} \"\n f\"{delta_std_mbar: <30} \"\n f\"{delta_ti: <30} \"\n f\"{delta_std_ti: <30} \"\n f\"{delta_bar: <30} \"\n f\"{delta_std_bar: <30} \"\n f\" \\n\"\n )\n\n\n# ******************************************************\n# ******************************************************\n# data analysis - get the average data from each individual simulation (end)\n# ******************************************************\n# ******************************************************\n\n\n# ******************************************************\n# ******************************************************\n# data analysis - get the average and std. dev. from/across all the replicates (start)\n# ******************************************************\n# ******************************************************\n\[email protected](key=statepoint_without_replica,\n sort_by=\"production_temperature_K\",\n sort_ascending=True\n)\[email protected]_directives(\n {\n \"np\": 1,\n \"ngpu\": 0,\n \"memory\": memory_needed,\n \"walltime\": walltime_gomc_analysis_hr,\n }\n)\n\[email protected](lambda *jobs: all(part_5a_analysis_individual_simulation_averages_completed(j)\n for j in jobs[0]._project))\[email protected](part_4c_job_production_run_completed_properly)\[email protected](part_5a_analysis_individual_simulation_averages_completed)\[email protected](part_5b_analysis_replica_averages_completed)\ndef part_5b_analysis_replica_averages(*jobs):\n # ***************************************************\n # create the required lists and file labels for the replicates (start)\n # ***************************************************\n # output and labels\n output_column_temp_title = 'temp_K' # column title title for temp\n output_column_temp_std_title = 'temp_std_K' # column title title for temp\n output_column_solute_title = 'solute' # column title title for temp\n output_column_dFE_MBAR_title = 'dFE_MBAR_kcal_per_mol' # column title title for delta_MBAR\n output_column_dFE_MBAR_std_title = 'dFE_MBAR_std_kcal_per_mol' # column title title for ds_MBAR\n output_column_dFE_TI_title = 'dFE_TI_kcal_per_mol' # column title title for delta_MBAR\n output_column_dFE_TI_std_title = 'dFE_TI_std_kcal_per_mol' # column title title for ds_MBAR\n output_column_dFE_BAR_title = 'dFE_BAR_kcal_per_mol' # column title title for delta_MBAR\n output_column_dFE_BAR_std_title = 'dFE_BAR_std_kcal_per_mol' # column title title for ds_MBAR\n\n # get the list used in this function\n temp_repilcate_list = []\n solute_repilcate_list = []\n\n delta_MBAR_repilcate_box_0_list = []\n delta_TI_repilcate_box_0_list = []\n delta_BAR_repilcate_box_0_list = []\n\n\n output_txt_file_header = f\"{output_column_temp_title: <30} \" \\\n f\"{output_column_temp_std_title: <30} \" \\\n f\"{output_column_solute_title: <30} \"\\\n f\"{output_column_dFE_MBAR_title: <30} \"\\\n f\"{output_column_dFE_MBAR_std_title: <30} \"\\\n f\"{output_column_dFE_TI_title: <3 0} \"\\\n f\"{output_column_dFE_TI_std_title: <30} \"\\\n f\"{output_column_dFE_BAR_title: <30} \"\\\n f\"{output_column_dFE_BAR_std_title: <30} \"\\\n f\"\\n\"\n\n\n write_file_path_and_name_box_0 = f'analysis/{output_avg_std_of_replicates_txt_file_name_box_0}'\n if os.path.isfile(write_file_path_and_name_box_0):\n box_box_0_data_txt_file = open(write_file_path_and_name_box_0, \"a\")\n else:\n box_box_0_data_txt_file = open(write_file_path_and_name_box_0, \"w\")\n box_box_0_data_txt_file.write(output_txt_file_header)\n\n\n # ***************************************************\n # create the required lists and file labels for the replicates (end)\n # ***************************************************\n\n for job in jobs:\n\n # *************************\n # drawing in data from single file and extracting specific rows from box 0 (start)\n # *************************\n reading_file_box_box_0 = job.fn(output_replicate_txt_file_name_box_0)\n\n data_box_box_0 = pd.read_csv(reading_file_box_box_0, sep='\\s+', header=0, na_values='NaN', index_col=False)\n data_box_box_0 = pd.DataFrame(data_box_box_0)\n\n temp_repilcate_list.append(data_box_box_0.loc[:, output_column_temp_title][0])\n solute_repilcate_list.append(data_box_box_0.loc[:, output_column_solute_title][0])\n\n delta_MBAR_repilcate_box_0_list.append(data_box_box_0.loc[:, output_column_dFE_MBAR_title][0])\n delta_TI_repilcate_box_0_list.append(data_box_box_0.loc[:, output_column_dFE_TI_title][0])\n delta_BAR_repilcate_box_0_list.append(data_box_box_0.loc[:, output_column_dFE_BAR_title][0])\n\n # *************************\n # drawing in data from single file and extracting specific rows from box 0 (end)\n # *************************\n\n\n # *************************\n # get the replica means and std.devs (start)\n # *************************\n temp_mean = np.mean(temp_repilcate_list)\n temp_std = np.std(temp_repilcate_list, ddof=1)\n\n solute_iter = solute_repilcate_list[0]\n\n delta_MBAR_mean_box_box_0 = np.mean(delta_MBAR_repilcate_box_0_list)\n delta_TI_mean_box_box_0 = np.mean(delta_TI_repilcate_box_0_list)\n delta_BAR_mean_box_box_0 = np.mean(delta_BAR_repilcate_box_0_list)\n\n delta_std_MBAR_mean_box_box_0 = np.std(delta_MBAR_repilcate_box_0_list, ddof=1)\n delta_std_TI_mean_box_box_0 = np.std(delta_TI_repilcate_box_0_list, ddof=1)\n delta_std_BAR_mean_box_box_0 = np.std(delta_BAR_repilcate_box_0_list, ddof=1)\n\n # *************************\n # get the replica means and std.devs (end)\n # *************************\n\n # ************************************\n # write the analysis data files for the liquid and vapor boxes (start)\n # ************************************\n\n box_box_0_data_txt_file.write(\n f\"{temp_mean: <30} \"\n f\"{temp_std: <30} \"\n f\"{solute_iter: <30} \"\n f\"{delta_MBAR_mean_box_box_0: <30} \"\n f\"{delta_std_MBAR_mean_box_box_0: <30} \"\n f\"{delta_TI_mean_box_box_0: <30} \"\n f\"{delta_std_TI_mean_box_box_0: <30} \"\n f\"{delta_BAR_mean_box_box_0: <30} \"\n f\"{delta_std_BAR_mean_box_box_0: <30} \"\n f\" \\n\"\n )\n\n # ************************************\n # write the analysis data files for the liquid and vapor boxes (end)\n # ************************************\n\n\n# ******************************************************\n# ******************************************************\n# data analysis - get the average and std. dev. from/across all the replicates (end)\n# ******************************************************\n# ******************************************************\n\n\n# ******************************************************\n# ******************************************************\n# signac end code (start)\n# ******************************************************\n# ******************************************************\nif __name__ == \"__main__\":\n pr = Project()\n pr.main()\n# ******************************************************\n# ******************************************************\n# signac end code (end)\n# ******************************************************\n# ******************************************************\n" ]
[ [ "pandas.read_csv", "pandas.DataFrame", "numpy.round", "numpy.std", "numpy.mean" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "2.0", "1.4", "1.3", "1.1", "1.5", "1.2" ], "scipy": [], "tensorflow": [] } ]
csc-training/geocomputing
[ "1e8043c864fb663526d1c15cfd3bb390a1379181" ]
[ "machineLearning/04_cnn_solaris/08_1_train.py" ]
[ "import solaris as sol\nimport torch\nimport rasterio\nimport rasterio.merge\nimport pandas as pd\nimport time\nimport os\nfrom PredictSpruceForestsModel import PredictSpruceForestsModel\nimport sys\n\n### The first (and only) input argument for this script is the folder where data exists\nif len(sys.argv) != 2:\n print('Please give the data directory')\n sys.exit()\n\nbase_folder=sys.argv[1]\n\n### This is the folder of this file. We use it to fetch the .yml files\nscript_folder = os.path.dirname(os.path.realpath(__file__))\n\n### Folder where our training, label, prediction and result tiles will be\ntile_output_folder = os.path.join(base_folder)\n\n### This script's training and label tile folders\ntraining_image_tile_subfolder = os.path.join(tile_output_folder,\"image_training_tiles_650\")\ntraining_label_tile_subfolder = os.path.join(tile_output_folder, \"label_tiles_650\")\n\ndef checkGPUavailability():\n device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n if device.type == 'cuda':\n print(\"We have a GPU available! The model is: \",torch.cuda.get_device_name(0))\n else:\n print(\"Sadly no GPU available. :( you have settle with a CPU. Good luck!\")\n\ndef createTileListFiles():\n #### Create lists of the tile filenames\n list_of_training_tiles = os.listdir(training_image_tile_subfolder)\n list_of_label_tiles = os.listdir(training_label_tile_subfolder)\n\n ### Add the whole path to the filenames\n list_of_training_tiles = [os.path.join(training_image_tile_subfolder, i) for i in list_of_training_tiles]\n list_of_label_tiles = [os.path.join(training_label_tile_subfolder, i) for i in list_of_label_tiles]\n\n ### Sort the two lists used in training so they match\n list_of_training_tiles.sort()\n list_of_label_tiles.sort()\n\n ### Create a pandas dataframe that has the training image filepath and label image filepath as columns and write it to csv\n training_filename_df = pd.DataFrame({'image': list_of_training_tiles, 'label': list_of_label_tiles})\n training_filename_df.to_csv(os.path.join(script_folder, 'tile_filepaths_for_training.csv'), encoding='utf-8')\n\ndef trainModel(config,custom_model_dict):\n ### This function trains the convolutional neural network model\n\n start_time = time.time()\n print('Training the model...')\n trainer = sol.nets.train.Trainer(config, custom_model_dict=custom_model_dict)\n trainer.train()\n end_time = time.time()\n\n print('training took {} seconds'.format(end_time - start_time))\n\n\ndef main():\n ### Let's check if we have a valid GPU at use\n checkGPUavailability()\n\n ### Let's split the input images to tiles that the CNN can use\n createTileListFiles()\n\n ### Let's load the configuration .yml file for the prediction phase\n training_config = sol.utils.config.parse(os.path.join(script_folder,'config_training.yml'))\n custom_model_dict = {'model_name': 'PredictSpruceForestsModel', 'weight_path': None, 'weight_url': None,\n 'arch': PredictSpruceForestsModel}\n\n\n trainModel(training_config,custom_model_dict)\n\n\nif __name__ == '__main__':\n ### This part just runs the main method and times it\n start = time.time()\n main()\n end = time.time()\n print(\"Script completed in \" + str(round(((end - start) / 60), 3)) + \" minutes\")\n" ]
[ [ "torch.cuda.is_available", "pandas.DataFrame", "torch.cuda.get_device_name" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "0.19", "1.1", "1.5", "1.2", "0.24", "0.20", "1.0", "0.25", "1.3" ], "scipy": [], "tensorflow": [] } ]
malexmad/League-Of-Legend-App
[ "94b6957d462ee29f5536112af39c04093faa92b7", "94b6957d462ee29f5536112af39c04093faa92b7" ]
[ "pages/predictions.py", "pages/predictions1.py" ]
[ "import dash\nimport dash_bootstrap_components as dbc\nimport dash_core_components as dcc\nimport dash_html_components as html\nfrom dash.dependencies import Input, Output\nfrom joblib import load\npipe2 = load('assets/pipe2.joblib')\n\nfrom app import app\nCHANGE_COLOR = {'color': 'black',}\n\n\ncolumn1 = dbc.Col(\n [\n dcc.Markdown('### Gold Difference'),\n dcc.Markdown('### @ 15 Minutes'),\n dcc.Markdown('### Blue Advantage+'),\n dcc.Markdown('### Red Advantage-'),\n dcc.Slider(\n id='min_15', \n min=-5000, \n max=5000, \n step=250, \n value=None, \n marks={n: str(n) for n in range(-5000,6000,1000)}, \n className='mb-5', \n ),\n dcc.Markdown('#### First Dragon'), \n dcc.Dropdown(\n id='First_Dragon', \n options = [\n {'label': 'Blue', 'value': 0}, \n {'label': 'Red', 'value': 1}, \n ], \n value = None, \n className='mb-5', \n style=CHANGE_COLOR,\n ),\n dcc.Markdown('#### First Tower'), \n dcc.Dropdown(\n id='First_Tower', \n options = [\n {'label': 'Blue', 'value': 0}, \n {'label': 'Red', 'value': 1}, \n ], \n value = None, \n className='mb-5',\n style=CHANGE_COLOR,\n ),\n dcc.Markdown('#### First Herald'), \n dcc.Dropdown(\n id='First_Herald', \n options = [\n {'label': 'No Team', 'value': 0}, \n {'label': 'Blue', 'value': -1}, \n {'label': 'Red', 'value': 1}, \n ], \n value = None, \n className='mb-5', \n style=CHANGE_COLOR,\n ),\n dcc.Markdown('#### First Blood'), \n dcc.Dropdown(\n id='First_Blood', \n options = [\n {'label': 'Blue', 'value': 0}, \n {'label': 'Red', 'value': 1}, \n ], \n value = None, \n className='mb-5',\n style=CHANGE_COLOR,\n ),\n ],\n md=4,\n)\n\ncolumn2 = dbc.Col(\n [\n html.H2('Winning Team ', className='text-center'), \n html.H3(id='prediction-content', className='text-center'),\n html.Img(src='assets/lolvictory.jpg', className='rounded mx-auto d-block')\n ]\n)\n\nlayout = dbc.Row([column1, column2])\n\nimport pandas as pd\n\[email protected](\n Output('prediction-content', 'children'),\n [Input('min_15', 'value'), Input('First_Dragon', 'value'), Input('First_Tower', 'value'), \n Input('First_Herald', 'value'), Input('First_Blood', 'value')\n ])\ndef predict(min_15, First_Dragon, First_Tower, First_Herald, First_Blood):\n df = pd.DataFrame(\n columns=['min_15', 'First_dragon', 'First_Tower', 'First_Herald', 'First_Blood'], \n data=[[min_15, First_Dragon, First_Tower,First_Herald, First_Blood]]\n )\n y_pred = pipe2.predict(df)[0]\n if y_pred == 0:\n y_pred = 'Blue Team'\n else:\n y_pred = 'Red Team'\n return y_pred", "import dash\nimport dash_bootstrap_components as dbc\nimport dash_core_components as dcc\nimport dash_html_components as html\nfrom dash.dependencies import Input, Output\nfrom joblib import load\npipe3 = load('assets/pipe3.joblib')\n\nfrom app import app\nCHANGE_COLOR = {'color': 'black',}\n\n\ncolumn1 = dbc.Col(\n [\n dcc.Markdown('#### First Blood'), \n dcc.Dropdown(\n id='First_Blood', \n options = [\n {'label': 'Blue', 'value': 0}, \n {'label': 'Red', 'value': 1}, \n ], \n value = None, \n className='mb-5',\n style=CHANGE_COLOR,\n ),\n dcc.Markdown('#### First Dragon'), \n dcc.Dropdown(\n id='First_Dragon', \n options = [\n {'label': 'Blue', 'value': 0}, \n {'label': 'Red', 'value': 1}, \n ], \n value = None, \n className='mb-5',\n style=CHANGE_COLOR,\n ),\n dcc.Markdown('#### First Tower'), \n dcc.Dropdown(\n id='First_Tower', \n options = [\n {'label': 'Blue', 'value': 0}, \n {'label': 'Red', 'value': 1}, \n ], \n value = None, \n className='mb-5',\n style=CHANGE_COLOR,\n ),\n dcc.Markdown('#### First Herald'), \n dcc.Dropdown(\n id='First_Herald', \n options = [\n {'label': 'No Team', 'value': 0}, \n {'label': 'Blue', 'value': -1}, \n {'label': 'Red', 'value': 1}, \n ], \n value = None, \n className='mb-5', \n style=CHANGE_COLOR,\n ),\n dcc.Markdown('#### First Baron'), \n dcc.Dropdown(\n id='First_Baron', \n options = [\n {'label': 'No Team', 'value': 0},\n {'label': 'Blue', 'value': -1},\n {'label': 'Red', 'value': 1}, \n ], \n value = None, \n className='mb-5', \n style=CHANGE_COLOR,\n ),\n ],\n md=4,\n)\n\ncolumn2 = dbc.Col(\n [\n html.H2('Winning Team ', className='text-center'), \n html.H3(id='prediction-content1', className='text-center'),\n html.Img(src='assets/lolvictory.jpg', className='rounded mx-auto d-block')\n ]\n)\n\nlayout = dbc.Row([column1, column2])\n\nimport pandas as pd\n\[email protected](\n Output('prediction-content1', 'children'),\n [Input('First_Dragon', 'value'), Input('First_Tower', 'value'), \n Input('First_Herald', 'value'), Input('First_Blood', 'value'), Input('First_Baron', 'value')\n ])\ndef predict(First_Dragon, First_Tower, First_Herald, First_Blood, First_Baron):\n df1 = pd.DataFrame(\n columns=['First_dragon', 'First_Tower', 'First_Herald', 'First_Blood', 'First_Baron'], \n data=[[First_Dragon, First_Tower,First_Herald, First_Blood, First_Baron]]\n )\n y_pred1 = pipe3.predict(df1)[0]\n if y_pred1 == 0:\n y_pred1 = 'Blue Team'\n else:\n y_pred1 = 'Red Team'\n return y_pred1" ]
[ [ "pandas.DataFrame" ], [ "pandas.DataFrame" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "0.19", "1.1", "1.5", "1.2", "0.24", "0.20", "1.0", "0.25", "1.3" ], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "0.19", "1.1", "1.5", "1.2", "0.24", "0.20", "1.0", "0.25", "1.3" ], "scipy": [], "tensorflow": [] } ]
Dongfang1021/Python_data_analysis_notebook
[ "210c8bbe1b17736e639bbdbcae19df795fb702d5", "210c8bbe1b17736e639bbdbcae19df795fb702d5", "210c8bbe1b17736e639bbdbcae19df795fb702d5", "210c8bbe1b17736e639bbdbcae19df795fb702d5", "210c8bbe1b17736e639bbdbcae19df795fb702d5" ]
[ "Python-Data-Cleaning-Cookbook-master/3_TakingMeasureOfData/5. stats_continuous.py", "Python-Data-Cleaning-Cookbook-master/7_Aggregating/4. groupby_more.py", "Python-Data-Cleaning-Cookbook-master/5._Visualization/5. scatter_plots.py", "Python-Data-Cleaning-Cookbook-master/4_OutliersMultivariate/5. regression_influence.py", "Python-Data-Cleaning-Cookbook-master/OutliersMultivariate/outliers_univariate.py" ]
[ "# import pandas, numpy, and matplotlib\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\npd.set_option('display.width', 75)\npd.set_option('display.max_columns', 7)\npd.set_option('display.max_rows', 20)\npd.options.display.float_format = '{:,.2f}'.format\ncovidtotals = pd.read_pickle(\"data/covidtotals.pkl\")\n\n# look at a few rows of the covid cases data\ncovidtotals.shape\n\ncovidtotals.sample(2).T\ncovidtotals.dtypes\n\n# get descriptive statistics on the cumulative values\ncovidtotals.describe()\ntotvars = ['location','total_cases','total_deaths',\n 'total_cases_pm','total_deaths_pm']\ncovidtotals[totvars].quantile(np.arange(0.0, 1.1, 0.1))\n\n# view the distribution of total cases\nplt.hist(covidtotals['total_cases']/1000, bins=12)\nplt.title(\"Total Covid Cases (in thousands)\")\nplt.xlabel('Cases')\nplt.ylabel(\"Number of Countries\")\nplt.show()\n", "# import pandas, load the nls97 feather file\nimport pandas as pd\npd.set_option('display.width', 90)\npd.set_option('display.max_columns', 10)\npd.set_option('display.max_rows', 30)\npd.options.display.float_format = '{:,.0f}'.format\nnls97 = pd.read_pickle(\"data/nls97.pkl\")\n\n# review the structure of the nls97 data\nnls97.iloc[:,0:7].info()\n\n# look again at some of the data\ncatvars = ['gender','maritalstatus','highestdegree']\n\nfor col in catvars:\n print(col, nls97[col].value_counts(sort=False), sep=\"\\n\\n\", end=\"\\n\\n\\n\")\n\n\n# review some descriptive statistics\ncontvars = ['satmath','satverbal','weeksworked06','gpaoverall',\n 'childathome']\n\nnls97[contvars].describe()\n\n# look at sat math scores by gender\nnls97.groupby('gender')['satmath'].mean()\n\n# look at sat math scores by gender and highest degree earned\nnls97.groupby(['gender','highestdegree'])['satmath'].mean()\n\n# look at sat math and verbal scores by gender and highest degree earned\nnls97.groupby(['gender','highestdegree'])[['satmath','satverbal']].mean()\n\n# add max and standard deviations\nnls97.groupby(['gender','highestdegree'])['gpaoverall'].agg(['count','mean','max','std'])\n\n# use a dictionary for more complicated aggregations\npd.options.display.float_format = '{:,.1f}'.format\naggdict = {'weeksworked06':['count', 'mean', 'max','std'], 'childathome':['count', 'mean', 'max', 'std']}\nnls97.groupby(['highestdegree']).agg(aggdict)\nnls97.groupby(['maritalstatus']).agg(aggdict)\n", "# import pandas, matplotlib, and seaborn\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom mpl_toolkits.mplot3d import Axes3D\nimport seaborn as sns\npd.set_option('display.width', 80)\npd.set_option('display.max_columns', 12)\npd.set_option('display.max_rows', 200)\npd.options.display.float_format = '{:,.0f}'.format\nlandtemps = pd.read_pickle(\"data/landtemps2019avgs.pkl\")\n\n# run a scatter plot latitude by avgtemp\nplt.scatter(x=\"latabs\", y=\"avgtemp\", data=landtemps)\nplt.xlabel(\"Latitude (N or S)\")\nplt.ylabel(\"Average Temperature (Celsius)\")\nplt.yticks(np.arange(-60, 40, step=20))\nplt.title(\"Latitude and Average Temperature in 2019\")\nplt.show()\n\n# show the high elevation points in a different color\nlow, high = landtemps.loc[landtemps.elevation<=1000], landtemps.loc[landtemps.elevation>1000]\nplt.scatter(x=\"latabs\", y=\"avgtemp\", c=\"blue\", data=low)\nplt.scatter(x=\"latabs\", y=\"avgtemp\", c=\"red\", data=high)\nplt.legend(('low elevation', 'high elevation'))\nplt.xlabel(\"Latitude (N or S)\")\nplt.ylabel(\"Average Temperature (Celsius)\")\nplt.title(\"Latitude and Average Temperature in 2019\")\nplt.show()\n\n# show this as a 3D plot\nfig = plt.figure()\nplt.suptitle(\"Latitude, Temperature, and Elevation in 2019\")\nax.set_title('Three D')\nax = plt.axes(projection='3d')\nax.set_xlabel(\"Elevation\")\nax.set_ylabel(\"Latitude\")\nax.set_zlabel(\"Avg Temp\")\nax.scatter3D(low.elevation, low.latabs, low.avgtemp, label=\"low elevation\", c=\"blue\")\nax.scatter3D(high.elevation, high.latabs, high.avgtemp, label=\"high elevation\", c=\"red\")\nax.legend()\nplt.show()\n\n# show scatter plot with a regression line\nsns.regplot(x=\"latabs\", y=\"avgtemp\", color=\"blue\", data=landtemps)\nplt.title(\"Latitude and Average Temperature in 2019\")\nplt.xlabel(\"Latitude (N or S)\")\nplt.ylabel(\"Average Temperature\")\nplt.show()\n\n# show scatter plot with different regression lines by elevation group\nlandtemps['elevation_group'] = np.where(landtemps.elevation<=1000,'low','high')\nsns.lmplot(x=\"latabs\", y=\"avgtemp\", hue=\"elevation_group\", palette=dict(low=\"blue\", high=\"red\"), legend_out=False, data=landtemps)\nplt.xlabel(\"Latitude (N or S)\")\nplt.ylabel(\"Average Temperature\")\nplt.legend(('low elevation', 'high elevation'), loc='lower left')\nplt.yticks(np.arange(-60, 40, step=20))\nplt.title(\"Latitude and Average Temperature in 2019\")\nplt.tight_layout()\nplt.show()\n\n# check some average temperatures above the regression lines\nhigh.loc[(high.latabs>38) & (high.avgtemp>=18),\\\n ['station','country','latabs','elevation','avgtemp']]\nlow.loc[(low.latabs>47) & (low.avgtemp>=14),\\\n ['station','country','latabs','elevation','avgtemp']]\n\n# check some average temperatures below the regression lines\nhigh.loc[(high.latabs<5) & (high.avgtemp<18),\\\n ['station','country','latabs','elevation','avgtemp']]\nlow.loc[(low.latabs<50) & (low.avgtemp<-9),\\\n ['station','country','latabs','elevation','avgtemp']]\n\n", "# import pandas, numpy, matplotlib, statsmodels, and load the covid totals data\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport statsmodels.api as sm\npd.set_option('display.width', 85)\npd.options.display.float_format = '{:,.2f}'.format\n\ncovidtotals = pd.read_pickle(\"data/covidtotals.pkl\")\n\n# create an analysis file\nxvars = ['pop_density','median_age','gdp_per_capita']\n\ncovidanalysis = covidtotals.loc[:,['total_cases_pm'] + xvars].dropna()\ncovidanalysis.describe()\n\n# fit a linear regression model\ndef getlm(df):\n Y = df.total_cases_pm\n X = df[['pop_density','median_age','gdp_per_capita']]\n X = sm.add_constant(X)\n return sm.OLS(Y, X).fit()\n\nlm = getlm(covidanalysis)\nlm.summary()\n\n# identify countries with an outsized influence on the model\ninfluence = lm.get_influence().summary_frame()\ninfluence.loc[influence.cooks_d>0.5, ['cooks_d']]\ncovidanalysis.loc[influence.cooks_d>0.5]\n\n# do an influence plot\nfig, ax = plt.subplots()\nsm.graphics.influence_plot(lm, ax = ax, criterion=\"cooks\")\nplt.show()\n\n# show a model without the outliers\ncovidanalysisminusoutliers = covidanalysis.loc[influence.cooks_d<0.5]\n\nlm = getlm(covidanalysisminusoutliers)\nlm.summary()\n", "# import pandas, numpy, and matplotlib\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport statsmodels.api as sm\nfrom statsmodels.graphics.gofplots import qqline\nimport scipy.stats as scistat\npd.set_option('display.width', 85)\npd.set_option('display.max_columns', 6)\npd.set_option('display.max_rows', 20)\npd.options.display.float_format = '{:,.0f}'.format\ncovidtotals = pd.read_pickle(\"data/covidtotals.pkl\")\n\n# set up the cumulative and demographic columns\ntotvars = ['location','total_cases','total_deaths','total_cases_pm',\n 'total_deaths_pm']\ndemovars = ['population','pop_density','median_age','gdp_per_capita',\n 'hosp_beds']\n\n# get descriptive statistics on the cumulative values\ncovidtotalsonly = covidtotals.loc[:, totvars]\ncovidtotalsonly.describe()\npd.options.display.float_format = '{:,.2f}'.format\ncovidtotalsonly.quantile(np.arange(0.0, 1.1, 0.1))\ncovidtotalsonly.skew()\ncovidtotalsonly.kurtosis()\n\n# test for normality\ndef testnorm(var, df):\n stat, p = scistat.shapiro(df[var])\n return p\n\ntestnorm(\"total_cases\", covidtotalsonly)\ntestnorm(\"total_deaths\", covidtotalsonly)\ntestnorm(\"total_cases_pm\", covidtotalsonly)\ntestnorm(\"total_deaths_pm\", covidtotalsonly)\n\n# show a qqplot for total cases and total cases per million\nsm.qqplot(covidtotalsonly[['total_cases']].sort_values(['total_cases']), line='s')\nplt.title(\"QQ Plot of Total Cases\")\n\nsm.qqplot(covidtotals[['total_cases_pm']].sort_values(['total_cases_pm']), line='s')\nplt.title(\"QQ Plot of Total Cases Per Million\")\nplt.show()\n\n# show outliers for total cases\nthirdq, firstq = covidtotalsonly.total_cases.quantile(0.75), covidtotalsonly.total_cases.quantile(0.25)\ninterquartilerange = 1.5*(thirdq-firstq)\noutlierhigh, outlierlow = interquartilerange+thirdq, firstq-interquartilerange\nprint(outlierlow, outlierhigh, sep=\" <--> \")\n\n# generate a table of outliers and save it to Excel\ndef getoutliers():\n dfout = pd.DataFrame(columns=covidtotals.columns, data=None)\n for col in covidtotalsonly.columns[1:]:\n thirdq, firstq = covidtotalsonly[col].quantile(0.75),\\\n covidtotalsonly[col].quantile(0.25)\n interquartilerange = 1.5*(thirdq-firstq)\n outlierhigh, outlierlow = interquartilerange+thirdq,\\\n firstq-interquartilerange\n df = covidtotals.loc[(covidtotals[col]>outlierhigh) | \\\n (covidtotals[col]<outlierlow)]\n df = df.assign(varname = col, threshlow = outlierlow,\\\n threshhigh = outlierhigh)\n dfout = pd.concat([dfout, df])\n return dfout\n\noutliers = getoutliers()\noutliers.varname.value_counts(sort=False)\noutliers.to_excel(\"views/outlierscases.xlsx\")\n\n# look a little more closely at outliers for cases per million\noutliers.loc[outliers.varname==\"total_cases_pm\",\\\n totvars + demovars].\\\n sort_values(['total_cases_pm'], ascending=False)\n\n# show the total cases histogram again\nplt.hist(covidtotalsonly['total_cases']/1000, bins=7)\nplt.title(\"Total Covid Cases (thousands)\")\nplt.xlabel('Cases')\nplt.ylabel(\"Number of Countries\")\nplt.show()\n\n# do a log transformation of the covid data\ncovidlogs = covidtotalsonly.copy()\nfor col in covidtotalsonly.columns[1:]:\n covidlogs[col] = np.log(covidlogs[col]+1)\n\nplt.hist(covidlogs['total_cases'], bins=7)\nplt.title(\"Total Covid Cases (log)\")\nplt.xlabel('Cases')\nplt.ylabel(\"Number of Countries\")\nplt.show()\n\n\n" ]
[ [ "matplotlib.pyplot.title", "numpy.arange", "matplotlib.pyplot.xlabel", "pandas.set_option", "pandas.read_pickle", "matplotlib.pyplot.hist", "matplotlib.pyplot.show", "matplotlib.pyplot.ylabel" ], [ "pandas.set_option", "pandas.read_pickle" ], [ "matplotlib.pyplot.legend", "matplotlib.pyplot.tight_layout", "matplotlib.pyplot.suptitle", "matplotlib.pyplot.scatter", "matplotlib.pyplot.title", "matplotlib.pyplot.figure", "numpy.arange", "matplotlib.pyplot.axes", "matplotlib.pyplot.xlabel", "pandas.set_option", "pandas.read_pickle", "numpy.where", "matplotlib.pyplot.show", "matplotlib.pyplot.ylabel" ], [ "pandas.set_option", "pandas.read_pickle", "matplotlib.pyplot.subplots", "matplotlib.pyplot.show" ], [ "numpy.log", "pandas.concat", "matplotlib.pyplot.title", "numpy.arange", "pandas.DataFrame", "scipy.stats.shapiro", "matplotlib.pyplot.xlabel", "pandas.set_option", "pandas.read_pickle", "matplotlib.pyplot.hist", "matplotlib.pyplot.show", "matplotlib.pyplot.ylabel" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "0.19", "1.1", "1.5", "1.2", "0.24", "0.20", "1.0", "0.25", "1.3" ], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "0.19", "1.1", "1.5", "1.2", "0.24", "0.20", "1.0", "0.25", "1.3" ], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "0.19", "1.1", "1.5", "1.2", "0.24", "0.20", "1.0", "0.25", "1.3" ], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "0.19", "1.1", "1.5", "1.2", "0.24", "0.20", "1.0", "0.25", "1.3" ], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "0.19", "1.1", "1.5", "1.2", "0.24", "0.20", "1.0", "0.25", "1.3" ], "scipy": [ "0.13", "1.6", "0.14", "1.10", "0.15", "1.4", "0.16", "1.9", "0.19", "1.5", "0.18", "1.2", "1.7", "0.12", "1.0", "0.17", "1.3", "1.8" ], "tensorflow": [] } ]
zarahz/MARL-and-Markets
[ "3591a160e098e7251b9e7c7b59c6d0ab08ba0779", "3591a160e098e7251b9e7c7b59c6d0ab08ba0779" ]
[ "Coloring/learning/utils/agent.py", "Coloring/environment/colors.py" ]
[ "from learning.dqn.model import DQNModel\nfrom learning.ppo.model import ACModel\n\nimport torch\n\nfrom learning.utils.storage import get_model_state\nfrom learning.utils.format import get_obss_preprocessor\n\n\nclass Agent:\n \"\"\"An agent - It is able to choose an action given an observation for visualization\"\"\"\n\n def __init__(self, algo, agent_index, obs_space, action_space, model_dir,\n device=None):\n obs_space, self.preprocess_obss = get_obss_preprocessor(\n obs_space)\n \n self.algo = algo\n self.device = device\n\n if algo == \"ppo\":\n self.model = ACModel(obs_space, action_space)\n all_states = get_model_state(model_dir, \"model_state\")\n else:\n self.model = DQNModel(obs_space, action_space)\n all_states = get_model_state(model_dir, \"target_state\")\n\n try:\n state = all_states[agent_index]\n except IndexError:\n if algo == \"ppo\":\n all_states = get_model_state(model_dir, \"model_state\")\n else:\n all_states = get_model_state(model_dir, \"target_state\")\n\n state_len = len(all_states)\n state = all_states[agent_index % state_len]\n self.model.load_state_dict(state)\n self.model.to(self.device)\n self.model.eval()\n\n def get_ppo_actions(self, obss, agent):\n agent_obs = [None]*len(obss)\n for index in range(len(obss)):\n agent_obs[index] = obss[index][agent]\n preprocessed_obss = self.preprocess_obss(agent_obs, device=self.device)\n\n with torch.no_grad():\n if self.model.recurrent:\n dist, _ = self.model(\n preprocessed_obss)\n else:\n dist, _ = self.model(preprocessed_obss)\n\n actions = dist.sample()\n\n return actions.cpu().numpy()\n\n def get_dqn_actions(self, obss, agent):\n agent_obs = [None]*len(obss)\n for index in range(len(obss)):\n agent_obs[index] = obss[index][agent]\n preprocessed_obss = self.preprocess_obss(agent_obs, device=self.device)\n\n with torch.no_grad():\n result = self.model(preprocessed_obss.image) # .unsqueeze(0)\n action = [env_res.max(0)[1] for env_res in result][0]\n\n return action.cpu().numpy()\n\n def get_action(self, obs, agent):\n if self.algo == \"ppo\":\n return self.get_ppo_actions([obs], agent)\n else:\n return self.get_dqn_actions([obs], agent)\n\n", "import numpy as np\n\n# Map of color names to RGB values\nCOLORS = {\n 'black': np.array([0, 0, 0]),\n 'white': np.array([255, 255, 255])\n}\n# Used to map colors to integers\nCOLOR_TO_IDX = {\n 'black': 0,\n 'white': 1\n}\n\nCOLOR_NAMES = []\nCOLOR_VALUES = []\nIDX_TO_COLOR = {}\n\n\ndef update_global_color_variables():\n \"\"\"Overwrite all global color variables when new colors are added \n to COLORS and COLOR_TO_IDX\"\"\"\n COLOR_NAMES.extend(sorted(list(COLORS.keys())))\n COLOR_VALUES.extend(list(COLORS.values()))\n IDX_TO_COLOR.update(dict(zip(COLOR_TO_IDX.values(), COLOR_TO_IDX.keys())))\n\n\ndef random_rgb_values():\n \"\"\"pick random rgb values\"\"\"\n red = np.random.randint(0, 255)\n green = np.random.randint(0, 255)\n blue = np.random.randint(0, 255)\n return red, green, blue\n\n\ndef generate_colors(amount):\n \"\"\"Generate Random unique colors based on set amount of agents\"\"\"\n for _ in range(amount):\n # check if the color already exists\n while True:\n red, green, blue = random_rgb_values()\n for color in COLORS:\n same_values = COLORS[color] == np.array([red, green, blue])\n if same_values.all():\n continue\n break\n color_name = \"{}.{}.{}\".format(red, green, blue)\n # save values\n COLORS[color_name] = np.array([red, green, blue])\n # i+2 since black and white is already defined\n COLOR_TO_IDX[color_name] = len(COLOR_TO_IDX)\n update_global_color_variables()\n" ]
[ [ "torch.no_grad" ], [ "numpy.array", "numpy.random.randint" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
dervischooch/face-id-with-medical-masks
[ "4f5f0cd696d7ea3e6d627592c5378d451bf82b26" ]
[ "masked_face_sdk/crop_utils.py" ]
[ "import numpy as np\n\n\ndef create_square_crop_by_detection(frame: np.ndarray, box: list) -> np.ndarray:\n \"\"\"\n Rebuild detection box to square shape\n Args:\n frame: rgb image in np.uint8 format\n box: list with follow structure: [x1, y1, x2, y2]\n Returns:\n Image crop by box with square shape\n \"\"\"\n w = box[2] - box[0]\n h = box[3] - box[1]\n cx = box[0] + w // 2\n cy = box[1] + h // 2\n radius = max(w, h) // 2\n exist_box = []\n pads = []\n\n # y top\n if cy - radius >= 0:\n exist_box.append(cy - radius)\n pads.append(0)\n else:\n exist_box.append(0)\n pads.append(-(cy - radius))\n\n # y bottom\n if cy + radius >= frame.shape[0]:\n exist_box.append(frame.shape[0] - 1)\n pads.append(cy + radius - frame.shape[0] + 1)\n else:\n exist_box.append(cy + radius)\n pads.append(0)\n\n # x left\n if cx - radius >= 0:\n exist_box.append(cx - radius)\n pads.append(0)\n else:\n exist_box.append(0)\n pads.append(-(cx - radius))\n\n # x right\n if cx + radius >= frame.shape[1]:\n exist_box.append(frame.shape[1] - 1)\n pads.append(cx + radius - frame.shape[1] + 1)\n else:\n exist_box.append(cx + radius)\n pads.append(0)\n\n exist_crop = frame[\n exist_box[0]:exist_box[1],\n exist_box[2]:exist_box[3]\n ]\n croped = np.pad(\n exist_crop,\n (\n (pads[0], pads[1]),\n (pads[2], pads[3]),\n (0, 0)\n ),\n 'constant'\n )\n return croped\n" ]
[ [ "numpy.pad" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
xing-lab-pitt/dynamo-release
[ "76c1f2a270dd6722b88f4700aac1a1a725a0c261", "76c1f2a270dd6722b88f4700aac1a1a725a0c261", "76c1f2a270dd6722b88f4700aac1a1a725a0c261" ]
[ "dynamo/plot/preprocess.py", "dynamo/estimation/tsc/twostep.py", "dynamo/plot/space.py" ]
[ "import numpy as np\nimport pandas as pd\nfrom scipy.sparse import issparse, csr_matrix\nfrom anndata import AnnData\nfrom typing import Optional, Union, Sequence\nimport matplotlib\nfrom matplotlib.axes import Axes\n\nfrom ..preprocessing import preprocess as pp\nfrom ..preprocessing.preprocess_monocle_utils import top_table\nfrom .utils import save_fig\nfrom ..tools.utils import update_dict, get_mapper\nfrom ..preprocessing.utils import detect_experiment_datatype\nfrom ..dynamo_logger import main_warning\nfrom ..configuration import DynamoAdataKeyManager\n\n\ndef basic_stats(\n adata: AnnData,\n group: Optional[str] = None,\n figsize: tuple = (4, 3),\n save_show_or_return: str = \"show\",\n save_kwargs: dict = {},\n):\n \"\"\"Plot the basic statics (nGenes, nCounts and pMito) of each category of adata.\n\n Parameters\n ----------\n adata: :class:`~anndata.AnnData`\n an Annodata object\n group: `string` (default: None)\n Which group to facets the data into subplots. Default is None, or no faceting will be used.\n figsize:\n Figure size of each facet.\n save_show_or_return: {'show', 'save', 'return'} (default: `show`)\n Whether to save, show or return the figure.\n save_kwargs: `dict` (default: `{}`)\n A dictionary that will passed to the save_fig function. By default it is an empty dictionary and the save_fig\n function will use the {\"path\": None, \"prefix\": 'basic_stats', \"dpi\": None, \"ext\": 'pdf', \"transparent\": True,\n \"close\": True, \"verbose\": True} as its parameters. Otherwise you can provide a dictionary that properly modify\n those keys according to your needs.\n\n Returns\n -------\n A violin plot that shows the fraction of each category, produced by seaborn.\n \"\"\"\n\n import matplotlib.pyplot as plt\n import seaborn as sns\n\n if len(adata.obs.columns.intersection([\"nGenes\", \"nCounts\", \"pMito\"])) != 3:\n from ..preprocessing.utils import basic_stats\n\n basic_stats(adata)\n\n df = pd.DataFrame(\n {\n \"nGenes\": adata.obs[\"nGenes\"],\n \"nCounts\": adata.obs[\"nCounts\"],\n \"pMito\": adata.obs[\"pMito\"],\n },\n index=adata.obs.index,\n )\n\n if group is not None and group in adata.obs.columns:\n df[\"group\"] = adata.obs.loc[:, group]\n res = df.melt(value_vars=[\"nGenes\", \"nCounts\", \"pMito\"], id_vars=[\"group\"])\n else:\n res = df.melt(value_vars=[\"nGenes\", \"nCounts\", \"pMito\"])\n\n # https://wckdouglas.github.io/2016/12/seaborn_annoying_title\n g = sns.FacetGrid(\n res,\n col=\"variable\",\n sharex=False,\n sharey=False,\n margin_titles=True,\n hue=\"variable\",\n height=figsize[1],\n aspect=figsize[0] / figsize[1],\n )\n\n if group is None:\n g.map_dataframe(sns.violinplot, x=\"variable\", y=\"value\")\n g.set_xticklabels([])\n g.set(xticks=[])\n else:\n if res[\"group\"].dtype.name == \"category\":\n xticks = res[\"group\"].cat.categories\n else:\n xticks = np.sort(res[\"group\"].unique())\n kws = dict(order=xticks)\n\n g.map_dataframe(sns.violinplot, x=\"group\", y=\"value\", **kws)\n g.set_xticklabels(rotation=-30)\n\n [plt.setp(ax.texts, text=\"\") for ax in g.axes.flat] # remove the original texts\n # important to add this before setting titles\n g.set_titles(row_template=\"{row_name}\", col_template=\"{col_name}\")\n\n g.set_xlabels(\"\")\n g.set_ylabels(\"\")\n g.set(ylim=(0, None))\n\n if save_show_or_return == \"save\":\n s_kwargs = {\n \"path\": None,\n \"prefix\": \"basic_stats\",\n \"dpi\": None,\n \"ext\": \"pdf\",\n \"transparent\": True,\n \"close\": True,\n \"verbose\": True,\n }\n s_kwargs = update_dict(s_kwargs, save_kwargs)\n save_fig(**s_kwargs)\n elif save_show_or_return == \"show\":\n import matplotlib.pyplot as plt\n\n plt.tight_layout()\n plt.show()\n elif save_show_or_return == \"return\":\n return g\n\n\ndef show_fraction(\n adata: AnnData,\n genes: Optional[list] = None,\n group: Optional[str] = None,\n figsize: tuple = (4, 3),\n save_show_or_return: str = \"show\",\n save_kwargs: dict = {},\n):\n \"\"\"Plot the fraction of each category of data used in the velocity estimation.\n\n Parameters\n ----------\n adata: :class:`~anndata.AnnData`\n an Annodata object\n genes: `list` like:\n The list of gene names from which the fraction will be calculated.\n group: `string` (default: None)\n Which group to facets the data into subplots. Default is None, or no faceting will be used.\n figsize: `string` (default: (4, 3))\n Figure size of each facet.\n save_show_or_return: {'show', 'save', 'return'} (default: `show`)\n Whether to save, show or return the figure.\n save_kwargs: `dict` (default: `{}`)\n A dictionary that will passed to the save_fig function. By default it is an empty dictionary and the save_fig\n function will use the {\"path\": None, \"prefix\": 'show_fraction', \"dpi\": None, \"ext\": 'pdf', \"transparent\": True,\n \"close\": True, \"verbose\": True} as its parameters. Otherwise you can provide a dictionary that properly modify\n those keys according to your needs.\n\n Returns\n -------\n A violin plot that shows the fraction of each category, produced by seaborn.\n \"\"\"\n\n import matplotlib.pyplot as plt\n import seaborn as sns\n\n if genes is not None:\n genes = list(adata.var_names.intersection(genes))\n\n if len(genes) == 0:\n raise Exception(\"The gene list you provided doesn't much any genes from the adata object.\")\n\n mode = None\n if pd.Series([\"spliced\", \"unspliced\"]).isin(adata.layers.keys()).all():\n mode = \"splicing\"\n elif pd.Series([\"new\", \"total\"]).isin(adata.layers.keys()).all():\n mode = \"labelling\"\n elif pd.Series([\"uu\", \"ul\", \"su\", \"sl\"]).isin(adata.layers.keys()).all():\n mode = \"full\"\n\n if not (mode in [\"labelling\", \"splicing\", \"full\"]):\n raise Exception(\"your data doesn't seem to have either splicing or labeling or both information\")\n\n if mode == \"labelling\":\n new_mat, total_mat = (\n (adata.layers[\"new\"], adata.layers[\"total\"])\n if genes is None\n else (\n adata[:, genes].layers[\"new\"],\n adata[:, genes].layers[\"total\"],\n )\n )\n\n new_cell_sum, tot_cell_sum = (\n (np.sum(new_mat, 1), np.sum(total_mat, 1))\n if not issparse(new_mat)\n else (new_mat.sum(1).A1, total_mat.sum(1).A1)\n )\n\n new_frac_cell = new_cell_sum / tot_cell_sum\n old_frac_cell = 1 - new_frac_cell\n df = pd.DataFrame(\n {\"new_frac_cell\": new_frac_cell, \"old_frac_cell\": old_frac_cell},\n index=adata.obs.index,\n )\n\n if group is not None and group in adata.obs.keys():\n df[\"group\"] = adata.obs[group]\n res = df.melt(value_vars=[\"new_frac_cell\", \"old_frac_cell\"], id_vars=[\"group\"])\n else:\n res = df.melt(value_vars=[\"new_frac_cell\", \"old_frac_cell\"])\n\n elif mode == \"splicing\":\n if \"ambiguous\" in adata.layers.keys():\n ambiguous = adata.layers[\"ambiguous\"] if genes is None else adata[:, genes].layers[\"ambiguous\"]\n else:\n ambiguous = csr_matrix(np.array([[0]])) if issparse(adata.layers[\"unspliced\"]) else np.array([[0]])\n\n unspliced_mat, spliced_mat, ambiguous_mat = (\n adata.layers[\"unspliced\"] if genes is None else adata[:, genes].layers[\"unspliced\"],\n adata.layers[\"spliced\"] if genes is None else adata[:, genes].layers[\"spliced\"],\n ambiguous,\n )\n un_cell_sum, sp_cell_sum = (\n (np.sum(unspliced_mat, 1), np.sum(spliced_mat, 1))\n if not issparse(unspliced_mat)\n else (unspliced_mat.sum(1).A1, spliced_mat.sum(1).A1)\n )\n\n if \"ambiguous\" in adata.layers.keys():\n am_cell_sum = ambiguous_mat.sum(1).A1 if issparse(unspliced_mat) else np.sum(ambiguous_mat, 1)\n tot_cell_sum = un_cell_sum + sp_cell_sum + am_cell_sum\n un_frac_cell, sp_frac_cell, am_frac_cell = (\n un_cell_sum / tot_cell_sum,\n sp_cell_sum / tot_cell_sum,\n am_cell_sum / tot_cell_sum,\n )\n df = pd.DataFrame(\n {\n \"unspliced\": un_frac_cell,\n \"spliced\": sp_frac_cell,\n \"ambiguous\": am_frac_cell,\n },\n index=adata.obs.index,\n )\n else:\n tot_cell_sum = un_cell_sum + sp_cell_sum\n un_frac_cell, sp_frac_cell = (\n un_cell_sum / tot_cell_sum,\n sp_cell_sum / tot_cell_sum,\n )\n df = pd.DataFrame(\n {\"unspliced\": un_frac_cell, \"spliced\": sp_frac_cell},\n index=adata.obs.index,\n )\n\n if group is not None and group in adata.obs.columns:\n df[\"group\"] = adata.obs.loc[:, group]\n res = (\n df.melt(\n value_vars=[\"unspliced\", \"spliced\", \"ambiguous\"],\n id_vars=[\"group\"],\n )\n if \"ambiguous\" in adata.layers.keys()\n else df.melt(value_vars=[\"unspliced\", \"spliced\"], id_vars=[\"group\"])\n )\n else:\n res = (\n df.melt(value_vars=[\"unspliced\", \"spliced\", \"ambiguous\"])\n if \"ambiguous\" in adata.layers.keys()\n else df.melt(value_vars=[\"unspliced\", \"spliced\"])\n )\n\n elif mode == \"full\":\n uu, ul, su, sl = (\n adata.layers[\"uu\"] if genes is None else adata[:, genes].layers[\"uu\"],\n adata.layers[\"ul\"] if genes is None else adata[:, genes].layers[\"ul\"],\n adata.layers[\"su\"] if genes is None else adata[:, genes].layers[\"su\"],\n adata.layers[\"sl\"] if genes is None else adata[:, genes].layers[\"sl\"],\n )\n uu_sum, ul_sum, su_sum, sl_sum = (\n (np.sum(uu, 1), np.sum(ul, 1), np.sum(su, 1), np.sum(sl, 1))\n if not issparse(uu)\n else (\n uu.sum(1).A1,\n ul.sum(1).A1,\n su.sum(1).A1,\n sl.sum(1).A1,\n )\n )\n\n tot_cell_sum = uu_sum + ul_sum + su_sum + sl_sum\n uu_frac, ul_frac, su_frac, sl_frac = (\n uu_sum / tot_cell_sum,\n ul_sum / tot_cell_sum,\n su_sum / tot_cell_sum,\n sl_sum / tot_cell_sum,\n )\n df = pd.DataFrame(\n {\n \"uu_frac\": uu_frac,\n \"ul_frac\": ul_frac,\n \"su_frac\": su_frac,\n \"sl_frac\": sl_frac,\n },\n index=adata.obs.index,\n )\n\n if group is not None and group in adata.obs.keys():\n df[\"group\"] = adata.obs[group]\n res = df.melt(\n value_vars=[\"uu_frac\", \"ul_frac\", \"su_frac\", \"sl_frac\"],\n id_vars=[\"group\"],\n )\n else:\n res = df.melt(value_vars=[\"uu_frac\", \"ul_frac\", \"su_frac\", \"sl_frac\"])\n\n g = sns.FacetGrid(\n res,\n col=\"variable\",\n sharex=False,\n sharey=False,\n margin_titles=True,\n hue=\"variable\",\n height=figsize[1],\n aspect=figsize[0] / figsize[1],\n )\n if group is None:\n g.map_dataframe(sns.violinplot, x=\"variable\", y=\"value\")\n g.set_xticklabels([])\n g.set(xticks=[])\n else:\n if res[\"group\"].dtype.name == \"category\":\n xticks = res[\"group\"].cat.categories\n else:\n xticks = np.sort(res[\"group\"].unique())\n kws = dict(order=xticks)\n\n g.map_dataframe(sns.violinplot, x=\"group\", y=\"value\", **kws)\n g.set_xticklabels(rotation=-30)\n\n [plt.setp(ax.texts, text=\"\") for ax in g.axes.flat] # remove the original texts\n # important to add this before setting titles\n g.set_titles(row_template=\"{row_name}\", col_template=\"{col_name}\")\n\n g.set_xlabels(\"\")\n g.set_ylabels(\"Fraction\")\n g.set(ylim=(0, None))\n\n if save_show_or_return == \"save\":\n s_kwargs = {\n \"path\": None,\n \"prefix\": \"show_fraction\",\n \"dpi\": None,\n \"ext\": \"pdf\",\n \"transparent\": True,\n \"close\": True,\n \"verbose\": True,\n }\n s_kwargs = update_dict(s_kwargs, save_kwargs)\n\n save_fig(**s_kwargs)\n elif save_show_or_return == \"show\":\n plt.tight_layout()\n plt.show()\n elif save_show_or_return == \"return\":\n return g\n\n\ndef variance_explained(\n adata: AnnData,\n threshold: float = 0.002,\n n_pcs: Optional[int] = None,\n figsize: tuple = (4, 3),\n save_show_or_return: str = \"show\",\n save_kwargs: dict = {},\n):\n \"\"\"Plot the accumulative variance explained by the principal components.\n\n Parameters\n ----------\n adata: :class:`~anndata.AnnData`\n threshold: `float` (default: `0.002`)\n The threshold for the second derivative of the cumulative sum of the variance for each principal component.\n This threshold is used to determine the number of principal component used for downstream non-linear\n dimension reduction.\n n_pcs: `int` (default: `None`)\n Number of principal components.\n figsize: `string` (default: (4, 3))\n Figure size of each facet.\n save_show_or_return: {'show', 'save', 'return'} (default: `show`)\n Whether to save, show or return the figure.\n save_kwargs: `dict` (default: `{}`)\n A dictionary that will passed to the save_fig function. By default it is an empty dictionary and the\n save_fig function will use the {\"path\": None, \"prefix\": 'variance_explained', \"dpi\": None, \"ext\": 'pdf',\n \"transparent\": True, \"close\": True, \"verbose\": True} as its parameters. Otherwise you can provide a\n dictionary that properly modify those keys according to your needs.\n\n Returns\n -------\n Nothing but make a matplotlib based plot for showing the cumulative variance explained by each PC.\n \"\"\"\n\n import matplotlib.pyplot as plt\n\n var_ = adata.uns[\"explained_variance_ratio_\"]\n _, ax = plt.subplots(figsize=figsize)\n ax.plot(var_, c=\"r\")\n tmp = np.diff(np.diff(np.cumsum(var_)) > threshold)\n n_comps = n_pcs if n_pcs is not None else np.where(tmp)[0][0] if np.any(tmp) else 20\n ax.axvline(n_comps, c=\"r\")\n ax.set_xlabel(\"PCs\")\n ax.set_ylabel(\"Variance explained\")\n ax.set_xticks(list(ax.get_xticks()) + [n_comps])\n ax.set_xlim(0, len(var_))\n\n if save_show_or_return == \"save\":\n s_kwargs = {\n \"path\": None,\n \"prefix\": \"variance_explained\",\n \"dpi\": None,\n \"ext\": \"pdf\",\n \"transparent\": True,\n \"close\": True,\n \"verbose\": True,\n }\n s_kwargs = update_dict(s_kwargs, save_kwargs)\n\n save_fig(**s_kwargs)\n elif save_show_or_return == \"show\":\n plt.tight_layout()\n plt.show()\n elif save_show_or_return == \"return\":\n return ax\n\n\ndef biplot(\n adata: AnnData,\n pca_components: Sequence[int] = [0, 1],\n pca_key: str = \"X_pca\",\n loading_key: str = \"PCs\",\n figsize: tuple = (6, 4),\n scale_pca_embedding: bool = False,\n draw_pca_embedding: bool = False,\n save_show_or_return: str = \"show\",\n save_kwargs: dict = {},\n ax: Union[matplotlib.axes._subplots.SubplotBase, None] = None,\n):\n \"\"\"A biplot overlays a score plot and a loadings plot in a single graph. In such a plot, points are the projected\n observations; vectors are the projected variables. If the data are well-approximated by the first two principal\n components, a biplot enables you to visualize high-dimensional data by using a two-dimensional graph. See more at:\n https://blogs.sas.com/content/iml/2019/11/06/what-are-biplots.html\n\n In general, the score plot and the loadings plot will have different scales. Consequently, you need to rescale the\n vectors or observations (or both) when you overlay the score and loadings plots. There are four common choices of\n scaling. Each scaling emphasizes certain geometric relationships between pairs of observations (such as distances),\n between pairs of variables (such as angles), or between observations and variables. This article discusses the\n geometry behind two-dimensional biplots and shows how biplots enable you to understand relationships in multivariate\n data.\n\n Parameters\n ----------\n adata:\n An Annodata object that has pca and loading information prepared.\n pca_components:\n The pca components that will be used to draw the biplot.\n pca_key:\n A key to the pca embedding matrix, in `.obsm`.\n loading_key:\n A key to the pca loading matrix, in either `.uns` or `.obsm`.\n figsize:\n The figure size.\n scale_pca_embedding:\n Whether to scale the pca embedding.\n draw_pca_embedding:\n Whether to draw the pca embedding.\n save_show_or_return: {'show', 'save', 'return'} (default: `show`)\n Whether to save, show or return the figure.\n save_kwargs: `dict` (default: `{}`)\n A dictionary that will passed to the save_fig function. By default it is an empty dictionary and the\n save_fig function will use the {\"path\": None, \"prefix\": 'biplot', \"dpi\": None, \"ext\": 'pdf',\n \"transparent\": True, \"close\": True, \"verbose\": True} as its parameters. Otherwise you can provide a\n dictionary that properly modify those keys according to your needs.\n ax\n An ax where the biplot will be appended to.\n\n Returns\n -------\n If save_show_or_return is not `return`, return nothing but plot or save the biplot; otherwise return an axes\n with the biplot in it.\n \"\"\"\n\n import matplotlib.pyplot as plt\n\n if loading_key in adata.uns.keys():\n PCs = adata.uns[loading_key]\n elif loading_key in adata.varm.keys():\n PCs = adata.varm[loading_key]\n else:\n raise Exception(f\"No PC matrix {loading_key} found in neither .uns nor .varm.\")\n\n # rotation matrix\n xvector = PCs[:, pca_components[0]]\n yvector = PCs[:, pca_components[1]]\n\n # pca components\n xs = adata.obsm[pca_key][:, pca_components[0]]\n ys = adata.obsm[pca_key][:, pca_components[1]]\n\n # scale pca component\n if scale_pca_embedding:\n scalex = 1.0 / (xs.max() - xs.min())\n scaley = 1.0 / (ys.max() - ys.min())\n else:\n scalex, scaley = 1, 1\n\n genes = adata.var_names[adata.var.use_for_pca]\n\n if ax is None:\n fig, ax = plt.subplots(1, 1, figsize=figsize)\n for i in range(len(xvector)):\n # arrows project features, e.g. genes, as vectors onto PC axes\n ax.arrow(0, 0, xvector[i] * max(xs), yvector[i] * max(ys), color=\"r\", width=0.0005, head_width=0.0025)\n ax.text(xvector[i] * max(xs) * 1.01, yvector[i] * max(ys) * 1.01, genes[i], color=\"r\")\n\n ax.set_xlabel(\"PC\" + str(pca_components[0]))\n ax.set_ylabel(\"PC\" + str(pca_components[1]))\n if draw_pca_embedding:\n for i in range(len(xs)):\n # circles project cells\n ax.plot(xs[i] * scalex, ys[i] * scaley, \"b\", alpha=0.1)\n ax.text(xs[i] * scalex * 1.01, ys[i] * scaley * 1.01, list(adata.obs.cluster)[i], color=\"b\", alpha=0.1)\n\n if save_show_or_return == \"save\":\n s_kwargs = {\n \"path\": None,\n \"prefix\": \"biplot\",\n \"dpi\": None,\n \"ext\": \"pdf\",\n \"transparent\": True,\n \"close\": True,\n \"verbose\": True,\n }\n s_kwargs = update_dict(s_kwargs, save_kwargs)\n\n save_fig(**s_kwargs)\n elif save_show_or_return == \"show\":\n plt.tight_layout()\n plt.show()\n else:\n return ax\n\n\ndef loading(\n adata: AnnData,\n n_pcs: int = 10,\n loading_key: str = \"PCs\",\n n_top_genes: int = 10,\n ncol: int = 5,\n figsize: tuple = (6, 4),\n save_show_or_return: str = \"show\",\n save_kwargs: dict = {},\n):\n \"\"\"Plot the top absolute pca loading genes.\n\n Red text are positive loading genes while black negative loading genes.\n\n Parameters\n ----------\n adata:\n An Annodata object that has pca and loading information prepared.\n n_pcs:\n Number of pca.\n loading_key:\n A key to the pca loading matrix, in either `.uns` or `.obsm`.\n n_top_genes:\n Number of top genes with highest absolute loading score.\n ncol:\n Number of panels on the resultant figure.\n figsize:\n Figure size.\n save_show_or_return: {'show', 'save', 'return'} (default: `show`)\n Whether to save, show or return the figure.\n save_kwargs: `dict` (default: `{}`)\n A dictionary that will passed to the save_fig function. By default it is an empty dictionary and the\n save_fig function will use the {\"path\": None, \"prefix\": 'biplot', \"dpi\": None, \"ext\": 'pdf',\n \"transparent\": True, \"close\": True, \"verbose\": True} as its parameters. Otherwise you can provide a\n dictionary that properly modify those keys according to your needs.\n\n Returns\n -------\n If save_show_or_return is not `return`, return nothing but plot or save the biplot; otherwise return an axes\n with the loading plot in it.\n \"\"\"\n\n import matplotlib.pyplot as plt\n\n if loading_key in adata.uns.keys():\n PCs = adata.uns[loading_key]\n elif loading_key in adata.varm.keys():\n PCs = adata.varm[loading_key]\n else:\n raise Exception(f\"No PC matrix {loading_key} found in neither .uns nor .varm.\")\n\n if n_pcs is None:\n n_pcs = PCs.shape[1]\n\n x = np.arange(n_top_genes)\n genes = adata.var_names[adata.var.use_for_pca]\n\n nrow, ncol = int(n_pcs / ncol), min([ncol, n_pcs])\n fig, axes = plt.subplots(nrow, ncol, figsize=(figsize[0] * ncol, figsize[1] * nrow))\n\n for i in np.arange(n_pcs):\n cur_row, cur_col = int(i / ncol), i % ncol\n\n cur_pc = PCs[:, i]\n cur_sign = np.sign(cur_pc)\n cur_pc = np.abs(cur_pc)\n sort_ind, sort_val = np.argsort(cur_pc)[::-1], np.sort(cur_pc)[::-1]\n axes[cur_row, cur_col].scatter(x, sort_val[: len(x)])\n for j in x:\n axes[cur_row, cur_col].text(\n x[j], sort_val[j] * 1.01, genes[sort_ind[j]], color=\"r\" if cur_sign[sort_ind[j]] > 0 else \"k\"\n )\n\n axes[cur_row, cur_col].set_title(\"PC \" + str(i))\n\n if save_show_or_return == \"save\":\n s_kwargs = {\n \"path\": None,\n \"prefix\": \"loading\",\n \"dpi\": None,\n \"ext\": \"pdf\",\n \"transparent\": True,\n \"close\": True,\n \"verbose\": True,\n }\n s_kwargs = update_dict(s_kwargs, save_kwargs)\n\n save_fig(**s_kwargs)\n elif save_show_or_return == \"show\":\n plt.tight_layout()\n plt.show()\n else:\n return axes\n\n\ndef feature_genes(\n adata: AnnData,\n layer: str = \"X\",\n mode: Union[None, str] = None,\n figsize: tuple = (4, 3),\n save_show_or_return: str = \"show\",\n save_kwargs: dict = {},\n):\n \"\"\"Plot selected feature genes on top of the mean vs. dispersion scatterplot.\n\n Parameters\n ----------\n adata: :class:`~anndata.AnnData`\n AnnData object\n layer: `str` (default: `X`)\n The data from a particular layer (include X) used for making the feature gene plot.\n mode: None or `str` (default: `None`)\n The method to select the feature genes (can be either `dispersion`, `gini` or `SVR`).\n figsize: `string` (default: (4, 3))\n Figure size of each facet.\n save_show_or_return: {'show', 'save', 'return'} (default: `show`)\n Whether to save, show or return the figure.\n save_kwargs: `dict` (default: `{}`)\n A dictionary that will passed to the save_fig function. By default it is an empty dictionary and the\n save_fig function will use the {\"path\": None, \"prefix\": 'feature_genes', \"dpi\": None, \"ext\": 'pdf',\n \"transparent\": True, \"close\": True, \"verbose\": True} as its parameters. Otherwise you can provide a\n dictionary that properly modify those keys according to your needs.\n\n Returns\n -------\n Nothing but plots the selected feature genes via the mean, CV plot.\n \"\"\"\n\n import matplotlib.pyplot as plt\n\n mode = adata.uns[\"feature_selection\"] if mode is None else mode\n\n layer = DynamoAdataKeyManager.get_available_layer_keys(adata, layer, include_protein=False)[0]\n\n uns_store_key = None\n if mode == \"dispersion\":\n uns_store_key = \"dispFitInfo\" if layer in [\"raw\", \"X\"] else layer + \"_dispFitInfo\"\n\n table = top_table(adata, layer)\n x_min, x_max = (\n np.nanmin(table[\"mean_expression\"]),\n np.nanmax(table[\"mean_expression\"]),\n )\n elif mode == \"SVR\":\n prefix = \"\" if layer == \"X\" else layer + \"_\"\n uns_store_key = \"velocyto_SVR\" if layer == \"raw\" or layer == \"X\" else layer + \"_velocyto_SVR\"\n\n if not np.all(pd.Series([prefix + \"log_m\", prefix + \"score\"]).isin(adata.var.columns)):\n raise Exception(\"Looks like you have not run support vector machine regression yet, try run SVRs first.\")\n else:\n table = adata.var.loc[:, [prefix + \"log_m\", prefix + \"log_cv\", prefix + \"score\"]]\n table = table.loc[\n np.isfinite(table[prefix + \"log_m\"]) & np.isfinite(table[prefix + \"log_cv\"]),\n :,\n ]\n x_min, x_max = (\n np.nanmin(table[prefix + \"log_m\"]),\n np.nanmax(table[prefix + \"log_m\"]),\n )\n\n ordering_genes = adata.var[\"use_for_pca\"] if \"use_for_pca\" in adata.var.columns else None\n\n mu_linspace = np.linspace(x_min, x_max, num=1000)\n fit = (\n adata.uns[uns_store_key][\"disp_func\"](mu_linspace)\n if mode == \"dispersion\"\n else adata.uns[uns_store_key][\"SVR\"](mu_linspace.reshape(-1, 1))\n )\n\n plt.figure(figsize=figsize)\n plt.plot(mu_linspace, fit, alpha=0.4, color=\"r\")\n valid_ind = (\n table.index.isin(ordering_genes.index[ordering_genes])\n if ordering_genes is not None\n else np.ones(table.shape[0], dtype=bool)\n )\n\n valid_disp_table = table.iloc[valid_ind, :]\n if mode == \"dispersion\":\n ax = plt.scatter(\n valid_disp_table[\"mean_expression\"],\n valid_disp_table[\"dispersion_empirical\"],\n s=3,\n alpha=1,\n color=\"xkcd:red\",\n )\n elif mode == \"SVR\":\n ax = plt.scatter(\n valid_disp_table[prefix + \"log_m\"],\n valid_disp_table[prefix + \"log_cv\"],\n s=3,\n alpha=1,\n color=\"xkcd:red\",\n )\n\n neg_disp_table = table.iloc[~valid_ind, :]\n\n if mode == \"dispersion\":\n ax = plt.scatter(\n neg_disp_table[\"mean_expression\"],\n neg_disp_table[\"dispersion_empirical\"],\n s=3,\n alpha=0.5,\n color=\"xkcd:grey\",\n )\n elif mode == \"SVR\":\n ax = plt.scatter(\n neg_disp_table[prefix + \"log_m\"],\n neg_disp_table[prefix + \"log_cv\"],\n s=3,\n alpha=0.5,\n color=\"xkcd:grey\",\n )\n\n # plt.xlim((0, 100))\n if mode == \"dispersion\":\n plt.xscale(\"log\")\n plt.yscale(\"log\")\n plt.xlabel(\"Mean (log)\")\n plt.ylabel(\"Dispersion (log)\") if mode == \"dispersion\" else plt.ylabel(\"CV (log)\")\n\n if save_show_or_return == \"save\":\n s_kwargs = {\n \"path\": None,\n \"prefix\": \"feature_genes\",\n \"dpi\": None,\n \"ext\": \"pdf\",\n \"transparent\": True,\n \"close\": True,\n \"verbose\": True,\n }\n s_kwargs = update_dict(s_kwargs, save_kwargs)\n\n save_fig(**s_kwargs)\n elif save_show_or_return == \"show\":\n plt.tight_layout()\n plt.show()\n elif save_show_or_return == \"return\":\n return ax\n\n\ndef exp_by_groups(\n adata: AnnData,\n genes: list,\n layer: Optional[str] = None,\n group: Optional[str] = None,\n use_ratio: bool = False,\n use_smoothed: bool = True,\n log: bool = True,\n angle: int = 0,\n re_order: bool = True,\n figsize: tuple = (4, 3),\n save_show_or_return: str = \"show\",\n save_kwargs: dict = {},\n):\n \"\"\"Plot the (labeled) expression values of genes across different groups (time points).\n\n This function can be used as a sanity check about the labeled species to see whether they increase or decrease\n across time for a kinetic or degradation experiment, etc.\n\n Parameters\n ----------\n adata: :class:`~anndata.AnnData`\n an Annodata object\n genes: `list`\n The list of genes that you want to plot the gene expression.\n group: `string` (default: None)\n Which group information to plot aganist (as elements on x-axis). Default is None, or no groups will be used.\n Normally you should supply the column that indicates the time related to the labeling experiment. For\n example, it can be either the labeling time for a kinetic experiment or the chase time for a degradation\n experiment.\n use_ratio: `bool` (default: False)\n Whether to plot the fraction of expression (for example NTR, new to total ratio) over groups.\n use_smoothed: `bool` (default: 'True')\n Whether to use the smoothed data as gene expression.\n log: `bool` (default: `True`)\n Whether to log1p transform the expression data.\n angle: `float` (default: `0`)\n The angle to rotate the xtick labels for the purpose of avoiding overlapping between text.\n re_order: `bool` (default: `True`)\n Whether to reorder categories before drawing groups on the x-axis.\n figsize: `string` (default: (4, 3))\n Figure size of each facet.\n save_show_or_return: {'show', 'save', 'return'} (default: `show`)\n Whether to save, show or return the figure.\n save_kwargs: `dict` (default: `{}`)\n A dictionary that will passed to the save_fig function. By default it is an empty dictionary and the\n save_fig function will use the {\"path\": None, \"prefix\": 'exp_by_groups', \"dpi\": None, \"ext\": 'pdf',\n \"transparent\": True, \"close\": True, \"verbose\": True} as its parameters. Otherwise you can provide a\n dictionary that properly modify those keys according to your needs.\n\n Returns\n -------\n A violin plot that shows each gene's expression (row) across different groups (time), produced by seaborn.\n \"\"\"\n\n import matplotlib.pyplot as plt\n import seaborn as sns\n\n valid_genes = adata.var_names.intersection(genes)\n if len(valid_genes) == 0:\n raise ValueError(\"The adata object doesn't include any gene from the list you provided!\")\n if group is not None and group not in adata.obs.keys():\n raise ValueError(f\"The group {group} is not existed in your adata object!\")\n\n (\n has_splicing,\n has_labeling,\n splicing_labeling,\n has_protein,\n ) = detect_experiment_datatype(adata)\n if (has_splicing + has_labeling) == 0:\n layer = \"X\" if layer is None else layer\n elif has_splicing and not has_labeling:\n layer = \"X_spliced\" if layer is None else layer\n elif not has_splicing and has_labeling:\n layer = \"X_new\" if layer is None else layer\n elif has_splicing and has_labeling:\n layer = \"X_new\" if layer is None else layer\n\n if use_smoothed:\n mapper = get_mapper()\n layer = mapper[layer]\n\n if layer != \"X\" and layer not in adata.layers.keys():\n raise ValueError(f\"The layer {layer} is not existed in your adata object!\")\n\n exprs = adata[:, valid_genes].X if layer == \"X\" else adata[:, valid_genes].layers[layer]\n exprs = exprs.A if issparse(exprs) else exprs\n if use_ratio:\n (\n has_splicing,\n has_labeling,\n splicing_labeling,\n has_protein,\n ) = detect_experiment_datatype(adata)\n if has_labeling:\n if layer.startswith(\"X_\") or layer.startswith(\"M_\"):\n tot = (\n adata[:, valid_genes].layers[mapper[\"X_total\"]]\n if use_smoothed\n else adata[:, valid_genes].layers[\"X_total\"]\n )\n tot = tot.A if issparse(tot) else tot\n exprs = exprs / tot\n else:\n exprs = exprs\n else:\n if layer.startswith(\"X_\") or layer.startswith(\"M_\"):\n tot = (\n adata[:, valid_genes].layers[mapper[\"X_unspliced\"]]\n + adata[:, valid_genes].layers[mapper[\"X_spliced\"]]\n if use_smoothed\n else adata[:, valid_genes].layers[\"X_unspliced\"] + adata[:, valid_genes].layers[\"X_spliced\"]\n )\n tot = tot.A if issparse(tot) else tot\n exprs = exprs / tot\n else:\n exprs = exprs\n\n df = (\n pd.DataFrame(np.log1p(exprs), index=adata.obs_names, columns=valid_genes)\n if log\n else pd.DataFrame(np.log1p(exprs), index=adata.obs_names, columns=valid_genes)\n )\n\n if group is not None and group in adata.obs.columns:\n df[\"group\"] = adata.obs[group]\n res = df.melt(id_vars=[\"group\"])\n else:\n df[\"group\"] = 1\n res = df.melt(id_vars=[\"group\"])\n\n if res[\"group\"].dtype.name == \"category\":\n xticks = res[\"group\"].cat.categories.sort_values() if re_order else res[\"group\"].cat.categories\n else:\n xticks = np.sort(res[\"group\"].unique())\n\n kws = dict(order=xticks)\n\n # https://wckdouglas.github.io/2016/12/seaborn_annoying_title\n g = sns.FacetGrid(\n res,\n row=\"variable\",\n sharex=False,\n sharey=False,\n margin_titles=True,\n hue=\"variable\",\n height=figsize[1],\n aspect=figsize[0] / figsize[1],\n )\n g.map_dataframe(sns.violinplot, x=\"group\", y=\"value\", **kws)\n g.map_dataframe(sns.pointplot, x=\"group\", y=\"value\", color=\"k\", **kws)\n if group is None:\n g.set_xticklabels([])\n g.set(xticks=[])\n else:\n g.set_xticklabels(rotation=angle)\n\n [plt.setp(ax.texts, text=\"\") for ax in g.axes.flat] # remove the original texts\n # important to add this before setting titles\n g.set_titles(row_template=\"{row_name}\", col_template=\"{col_name}\")\n\n if log:\n g.set_ylabels(\"log(Expression + 1)\")\n else:\n g.set_ylabels(\"Expression\")\n\n g.set_xlabels(\"\")\n g.set(ylim=(0, None))\n\n if save_show_or_return == \"save\":\n s_kwargs = {\n \"path\": None,\n \"prefix\": \"exp_by_groups\",\n \"dpi\": None,\n \"ext\": \"pdf\",\n \"transparent\": True,\n \"close\": True,\n \"verbose\": True,\n }\n s_kwargs = update_dict(s_kwargs, save_kwargs)\n\n save_fig(**s_kwargs)\n elif save_show_or_return == \"show\":\n plt.tight_layout()\n plt.show()\n elif save_show_or_return == \"return\":\n return g\n\n\ndef highest_frac_genes(\n adata: AnnData,\n n_top: int = 30,\n gene_prefix_list: list = None,\n show_individual_prefix_gene: bool = False,\n show: Optional[bool] = True,\n save_path: str = None,\n ax: Optional[Axes] = None,\n gene_annotations: Optional[list] = None,\n gene_annotation_key: str = \"use_for_pca\",\n log: bool = False,\n store_key: str = \"highest_frac_genes\",\n orient: str = \"v\",\n figsize: Union[list, None] = None,\n layer: Union[str, None] = None,\n title: Union[str, None] = None,\n v_rotation: float = 35,\n **kwargs,\n):\n \"\"\"[summary]\n\n Parameters\n ----------\n adata : AnnData\n [description]\n n_top : int, optional\n [description], by default 30\n gene_prefix_list : list, optional\n A list of gene name prefix, by default None\n show_individual_prefix_gene: bool, optional\n [description], by default False\n show : Optional[bool], optional\n [description], by default True\n save_path : str, optional\n [description], by default None\n ax : Optional[Axes], optional\n [description], by default None\n gene_annotations : Optional[list], optional\n Annotations for genes, or annotations for gene prefix subsets, by default None\n gene_annotation_key : str, optional\n gene annotations key in adata.var, by default \"use_for_pca\".\n This option is not available for gene_prefix_list and thus users should\n pass gene_annotations argument for the prefix list.\n log : bool, optional\n [description], by default False\n store_key : str, optional\n [description], by default \"expr_percent\"\n\n Returns\n -------\n [type]\n [description]\n \"\"\"\n import seaborn as sns\n import matplotlib.pyplot as plt\n\n if ax is None:\n length = n_top * 0.4\n if figsize is None:\n if orient == \"v\":\n fig, ax = plt.subplots(figsize=(length, 5))\n else:\n fig, ax = plt.subplots(figsize=(7, length))\n else:\n fig, ax = plt.subplots(figsize=figsize)\n if log:\n ax.set_xscale(\"log\")\n\n adata = pp.highest_frac_genes(\n adata,\n store_key=store_key,\n n_top=n_top,\n layer=layer,\n gene_prefix_list=gene_prefix_list,\n show_individual_prefix_gene=show_individual_prefix_gene,\n )\n if adata is None:\n # something wrong with user input or compute_top_genes_df\n return\n top_genes_df, selected_indices = (\n adata.uns[store_key][\"top_genes_df\"],\n adata.uns[store_key][\"selected_indices\"],\n )\n\n # TODO use top genes_df dataframe; however this logic currently\n # does not fit subset logics and may fail tests.\n\n # main_info(\"Using prexisting top_genes_df in .uns.\")\n # top_genes_df = adata.uns[\"top_genes_df\"]\n\n # draw plots\n sns.boxplot(\n data=top_genes_df,\n orient=orient,\n ax=ax,\n fliersize=1,\n showmeans=True,\n **kwargs,\n )\n\n if gene_annotations is None:\n if gene_annotation_key in adata.var:\n gene_annotations = adata.var[gene_annotation_key][selected_indices]\n\n else:\n main_warning(\n \"%s not in adata.var, ignoring the gene annotation key when plotting\",\n indent_level=2,\n )\n\n if orient == \"v\":\n ax.set_xticklabels(ax.get_xticklabels(), rotation=v_rotation, ha=\"right\")\n ax.set_xlabel(\"genes\")\n ax.set_ylabel(\"fractions of total counts\")\n\n if gene_annotations is not None:\n ax2 = ax.twiny()\n ax2.set_xlim(ax.get_ylim())\n ax2.set_xticks(ax.get_yticks())\n ax2.set_xticks(list(range(len(gene_annotations))))\n ax2.set_xticklabels(gene_annotations, rotation=v_rotation, ha=\"left\")\n ax2.set_xlabel(gene_annotation_key)\n elif orient == \"h\":\n ax.set_xlabel(\"fractions of total counts\")\n ax.set_ylabel(\"genes\")\n if gene_annotations is not None:\n ax2 = ax.twinx()\n ax2.set_ylim(ax.get_ylim())\n ax2.set_yticks(ax.get_yticks())\n ax2.set_yticks(list(range(len(gene_annotations))))\n ax2.set_yticklabels(gene_annotations)\n ax2.set_ylabel(gene_annotation_key)\n else:\n raise NotImplementedError()\n\n if title is None:\n if layer is None:\n ax.set_title(\"Rank by gene expression fraction\")\n else:\n ax.set_title(\"Rank by %s fraction\" % layer)\n if show:\n plt.show()\n\n if save_path:\n s_kwargs = {\n \"path\": save_path,\n \"prefix\": \"plot_highest_gene\",\n \"dpi\": None,\n \"ext\": \"pdf\",\n \"transparent\": True,\n \"close\": True,\n \"verbose\": True,\n }\n save_fig(**s_kwargs)\n\n return ax\n # if save_show_or_return == \"save\":\n # s_kwargs = {\n # \"path\": save_path,\n # \"prefix\": \"plot_highest_gene\",\n # \"dpi\": None,\n # \"ext\": \"pdf\",\n # \"transparent\": True,\n # \"close\": True,\n # \"verbose\": True,\n # }\n # s_kwargs.update(kwargs)\n # save_fig(save_path, **s_kargs)\n # elif save_show_or_return == \"show\":\n # plt.show()\n # else:\n # return ax\n", "from tqdm import tqdm\nimport numpy as np\nfrom scipy.sparse import issparse\nfrom ...tools.utils import (\n find_extreme,\n elem_prod,\n calc_R2,\n calc_norm_loglikelihood,\n)\nfrom ..csc.utils_velocity import fit_linreg, fit_stochastic_linreg\n\n\ndef fit_slope_stochastic(S, U, US, S2, perc_left=None, perc_right=5):\n n_var = S.shape[0]\n k, all_r2, all_logLL = np.zeros(n_var), np.zeros(n_var), np.zeros(n_var)\n\n for i, s, u, us, s2 in tqdm(\n zip(np.arange(n_var), S, U, US, S2),\n \"Estimate slope k via linear regression.\",\n ):\n u = u.A.flatten() if issparse(u) else u.flatten()\n s = s.A.flatten() if issparse(s) else s.flatten()\n us = us.A.flatten() if issparse(us) else us.flatten()\n s2 = s2.A.flatten() if issparse(s2) else s2.flatten()\n\n mask = find_extreme(u, s, perc_left=perc_left, perc_right=perc_right)\n k[i] = fit_stochastic_linreg(u[mask], s[mask], us[mask], s2[mask])\n\n all_r2[i] = calc_R2(s, u, k[i])\n all_logLL[i] = calc_norm_loglikelihood(s, u, k[i])\n\n return k, 0, all_r2, all_logLL\n\n\ndef fit_labeling_synthesis(new, total, t, intercept=False, perc_left=None, perc_right=None):\n T = np.unique(t)\n K = np.zeros(len(T))\n R2 = np.zeros(len(T))\n for i in range(len(T)):\n n = new[t == T[i]]\n r = total[t == T[i]]\n eind = find_extreme(n, r, perc_left=perc_left, perc_right=perc_right)\n K[i], _, R2[i], _ = fit_linreg(r[eind], n[eind], intercept=intercept)\n return K, R2\n\n\ndef compute_gamma_synthesis(K, T):\n gamma, _, r2, _ = fit_linreg(T, -np.log(1 - K))\n return gamma, r2\n\n\ndef compute_velocity_synthesis(N, R, gamma, t):\n k = 1 - np.exp(-np.einsum(\"i,j->ij\", t, gamma))\n V = elem_prod(gamma, N) / k - elem_prod(gamma, R)\n return V\n\n\ndef lin_reg_gamma_synthesis(R, N, time, perc_right=100):\n n_var = R.shape[0]\n mean_R2, gamma, r2 = np.zeros(n_var), np.zeros(n_var), np.zeros(n_var)\n K_list, K_fit_list = [None] * n_var, [None] * n_var\n for i, r, n in tqdm(\n zip(np.arange(n_var), R, N),\n \"Estimate gamma via linear regression of t vs. -ln(1-K)\",\n ):\n r = r.A.flatten() if issparse(r) else r.flatten()\n n = n.A.flatten() if issparse(n) else n.flatten()\n\n K_list[i], R2 = fit_labeling_synthesis(n, r, time, perc_right=perc_right)\n gamma[i], r2[i] = compute_gamma_synthesis(K_list[i], np.unique(time))\n K_fit_list[i] = np.unique(time) * gamma[i]\n mean_R2[i] = np.mean(R2)\n\n return gamma, r2, K_list, mean_R2, K_fit_list\n", "import numpy as np\nfrom typing import Union\nimport anndata\nfrom .scatters import (\n scatters,\n docstrings,\n)\n\nfrom ..tl import compute_smallest_distance\nfrom ..dynamo_logger import main_critical, main_info, main_finish_progress, main_log_time, main_warning\n\ndocstrings.delete_params(\"scatters.parameters\", \"adata\", \"basis\", \"figsize\")\n\n\[email protected]_indent(4)\ndef space(\n adata: anndata.AnnData,\n color: Union[list, str, None] = None,\n genes: Union[list, None] = [],\n gene_cmaps=None,\n space: str = \"spatial\",\n width: float = 6,\n marker: str = \".\",\n pointsize: Union[float, None] = None,\n dpi: int = 100,\n ps_sample_num: int = 1000,\n alpha: float = 0.8,\n stack_genes: bool = False,\n stack_genes_threshold: float = 0.01,\n stack_colors_legend_size: int = 10,\n figsize=None,\n *args,\n **kwargs\n):\n \"\"\"\\\n Scatter plot for physical coordinates of each cell.\n\n Parameters\n ----------\n adata:\n an Annodata object that contain the physical coordinates for each bin/cell, etc.\n genes:\n The gene list that will be used to plot the gene expression on the same scatter plot. Each gene will have a\n different color. Can be a single gene name string and we will convert it to a list.\n color: `string` (default: `ntr`)\n Any or any list of column names or gene names, etc. that will be used for coloring cells. If `color` is not None, stack_genes will be disabled automatically because `color` can contain non numerical values.\n space: `str`\n The key to space coordinates.\n stack_genes:\n whether to show all gene plots on the same plot\n stack_genes_threshold:\n lower bound of gene values that will be drawn on the plot.\n stack_colors_legend_size:\n control the size of legend when stacking genes\n alpha: `float`\n The alpha value of the scatter points.\n width: `int`\n marker:\n a string representing some marker from matplotlib\n https://matplotlib.org/stable/api/markers_api.html#module-matplotlib.markers\n pointsize: `float`\n The size of the points on the scatter plot.\n dpi: `float`, (default: 100.0)\n The resolution of the figure in dots-per-inch. Dots per inches (dpi) determines how many pixels the figure\n comprises. dpi is different from ppi or points per inches. Note that most elements like lines, markers,\n texts have a size given in points so you can convert the points to inches. Matplotlib figures use Points per\n inch (ppi) of 72. A line with thickness 1 point will be 1./72. inch wide. A text with fontsize 12 points\n will be 12./72. inch heigh. Of course if you change the figure size in inches, points will not change, so a\n larger figure in inches still has the same size of the elements.Changing the figure size is thus like taking\n a piece of paper of a different size. Doing so, would of course not change the width of the line drawn with\n the same pen. On the other hand, changing the dpi scales those elements. At 72 dpi, a line of 1 point size\n is one pixel strong. At 144 dpi, this line is 2 pixels strong. A larger dpi will therefore act like a\n magnifying glass. All elements are scaled by the magnifying power of the lens. see more details at answer 2\n by @ImportanceOfBeingErnest:\n https://stackoverflow.com/questions/47633546/relationship-between-dpi-and-figure-size\n ps_sample_num: `int`\n The number of bins / cells that will be sampled to estimate the distance between different bin / cells.\n\n %(scatters.parameters.no_adata|basis|figsize)s\n\n Returns\n -------\n plots gene or cell feature of the adata object on the physical spatial coordinates.\n \"\"\"\n main_info(\"Plotting spatial info on adata\")\n main_log_time()\n if color is not None and stack_genes:\n main_warning(\n \"Set `stack_genes` to False because `color` argument cannot be used with stack_genes. If you would like to stack genes (or other numeical values), please pass gene expression like column names into `gene` argument.\"\n )\n stack_genes = False\n\n genes = [genes] if type(genes) is str else list(genes)\n # concatenate genes and colors for scatters plot\n if color is not None and genes is not None:\n color = [color] if type(color) is str else list(color)\n genes.extend(color)\n\n show_colorbar = True\n if stack_genes:\n main_warning(\"disable side colorbar due to colorbar scale (numeric tick) related issue.\")\n show_colorbar = False\n\n if genes is None or (len(genes) == 0):\n if color is not None:\n genes = color\n else:\n main_critical(\"No genes provided. Please check your argument passed in.\")\n return\n if \"X_\" + space in adata.obsm_keys():\n space_key = space\n elif space in adata.obsm_keys():\n if space.startswith(\"X_\"):\n space_key = space.split(\"X_\")[1]\n else:\n # scatters currently will append \"X_\" to the basis, so we need to create the `X_{space}` key.\n # In future, extend scatters to directly plot coordinates in space key without append \"X_\"\n if \"X_\" + space not in adata.obsm_keys():\n adata.obsm[\"X_\" + space] = adata.obsm[space]\n space_key = space\n\n ptp_vec = adata.obsm[\"X_\" + space_key].ptp(0)\n # calculate the figure size based on the width and the ratio between width and height\n # from the physical coordinate.\n if figsize is None:\n figsize = (width, ptp_vec[1] / ptp_vec[0] * width + 0.3)\n\n # calculate point size based on minimum radius\n if pointsize is None:\n pointsize = compute_smallest_distance(adata.obsm[\"X_\" + space_key], sample_num=ps_sample_num)\n # here we will scale the point size by the dpi and the figure size in inch.\n pointsize *= figsize[0] / ptp_vec[0] * dpi\n # meaning of s in scatters:\n # https://stackoverflow.com/questions/14827650/pyplot-scatter-plot-marker-size/47403507#47403507\n # Note that np.sqrt(adata.shape[0]) / 16000.0 is used in pl.scatters\n pointsize = pointsize ** 2 * np.sqrt(adata.shape[0]) / 16000.0\n\n main_info(\"estimated point size for plotting each cell in space: %f\" % (pointsize))\n\n # here we should pass different point size, type (square or hexogon, etc), etc.\n res = scatters(\n adata,\n marker=marker,\n basis=space_key,\n color=genes,\n figsize=figsize,\n pointsize=pointsize,\n dpi=dpi,\n alpha=alpha,\n stack_colors=stack_genes,\n stack_colors_threshold=stack_genes_threshold,\n stack_colors_title=\"stacked spatial genes\",\n show_colorbar=show_colorbar,\n stack_colors_legend_size=stack_colors_legend_size,\n stack_colors_cmaps=gene_cmaps,\n *args,\n **kwargs,\n )\n\n main_finish_progress(\"space plot\")\n return res\n" ]
[ [ "numpy.nanmax", "pandas.Series", "numpy.linspace", "numpy.nanmin", "numpy.cumsum", "pandas.DataFrame", "matplotlib.pyplot.plot", "numpy.any", "numpy.where", "matplotlib.pyplot.tight_layout", "scipy.sparse.issparse", "numpy.arange", "numpy.log1p", "matplotlib.pyplot.figure", "matplotlib.pyplot.xlabel", "numpy.argsort", "matplotlib.pyplot.show", "numpy.array", "numpy.sum", "matplotlib.pyplot.ylabel", "numpy.abs", "matplotlib.pyplot.scatter", "numpy.isfinite", "matplotlib.pyplot.yscale", "matplotlib.pyplot.subplots", "numpy.sort", "numpy.ones", "numpy.sign", "matplotlib.pyplot.setp", "matplotlib.pyplot.xscale" ], [ "numpy.log", "scipy.sparse.issparse", "numpy.einsum", "numpy.unique", "numpy.arange", "numpy.mean", "numpy.zeros" ], [ "numpy.sqrt" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "1.3", "0.19", "1.1", "1.5", "0.24", "0.20", "1.0", "0.25", "1.2" ], "scipy": [ "1.7", "1.0", "0.10", "1.2", "0.14", "0.19", "1.5", "0.12", "0.17", "0.13", "1.6", "1.4", "1.9", "1.3", "1.10", "0.15", "0.18", "0.16", "1.8" ], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [ "1.7", "1.0", "0.10", "1.2", "0.14", "0.19", "1.5", "0.12", "0.17", "0.13", "1.6", "1.4", "1.9", "1.3", "1.10", "0.15", "0.18", "0.16", "1.8" ], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
shushu-qin/deeponet
[ "5bbe066279bba055ad80e04c364140363c87634a" ]
[ "seq2seq/learner/integrator/hamiltonian/stormer_verlet.py" ]
[ "\"\"\"\r\n@author: jpzxshi\r\n\"\"\"\r\nimport numpy as np\r\nimport torch\r\n\r\nfrom ...utils import grad\r\n\r\nclass SV:\r\n '''Stormer-Verlet scheme.\r\n '''\r\n def __init__(self, H, dH, iterations=10, order=4, N=1):\r\n '''\r\n H: H(x) or None\r\n dH: dp,dq=dH(p,q) or None\r\n ``iterations`` is encouraged to be 1 if H is separable.\r\n '''\r\n self.H = H\r\n self.dH = dH\r\n self.iterations = iterations\r\n self.order = order\r\n self.N = N\r\n \r\n def __sv2(self, x, h):\r\n '''Order 2.\r\n x: np.ndarray or torch.Tensor of shape [dim] or [num, dim].\r\n h: int\r\n '''\r\n dim = x.shape[-1] if isinstance(x, np.ndarray) else x.size(-1)\r\n d = int(dim / 2)\r\n p0, q0 = (x[..., :d], x[..., d:])\r\n p1, q1 = p0, q0\r\n if callable(self.dH):\r\n for _ in range(self.iterations):\r\n p1 = p0 - h / 2 * self.dH(p1, q0)[1]\r\n q1 = q0 + h / 2 * self.dH(p1, q0)[0]\r\n p2, q2 = p1, q1 \r\n for _ in range(self.iterations):\r\n q2 = q1 + h / 2 * self.dH(p1, q2)[0]\r\n p2 = p1 - h / 2 * self.dH(p1, q2)[1]\r\n return np.hstack([p2, q2]) if isinstance(x, np.ndarray) else torch.cat([p2, q2], dim=-1)\r\n elif isinstance(x, torch.Tensor):\r\n for _ in range(self.iterations):\r\n x = torch.cat([p1, q0], dim=-1).requires_grad_(True)\r\n dH = grad(self.H(x), x, create_graph=False)\r\n p1 = p0 - h / 2 * dH[..., d:]\r\n q1 = q0 + h / 2 * dH[..., :d]\r\n p2, q2 = p1, q1\r\n for _ in range(self.iterations):\r\n x = torch.cat([p1, q2], dim=-1).requires_grad_(True)\r\n dH = grad(self.H(x), x, create_graph=False)\r\n q2 = q1 + h / 2 * dH[..., :d]\r\n p2 = p1 - h / 2 * dH[..., d:]\r\n return torch.cat([p2, q2], dim=-1)\r\n else:\r\n raise ValueError\r\n \r\n def __sv4(self, x, h):\r\n '''Order 4.\r\n '''\r\n r1 = 1 / (2 - 2 ** (1 / 3))\r\n r2 = - 2 ** (1 / 3) / (2 - 2 ** (1 / 3))\r\n return self.__sv2(self.__sv2(self.__sv2(x, r1 * h), r2 * h), r1 * h)\r\n \r\n def __sv6(self, x, h):\r\n '''Order 6\r\n '''\r\n r1 = 1 / (2 - 2 ** (1 / 5))\r\n r2 = - 2 ** (1 / 5) / (2 - 2 ** (1 / 5))\r\n return self.__sv4(self.__sv4(self.__sv4(x, r1 * h), r2 * h), r1 * h)\r\n \r\n def solve(self, x, h):\r\n if self.order == 2:\r\n solver = self.__sv2\r\n elif self.order == 4:\r\n solver = self.__sv4\r\n elif self.order == 6:\r\n solver = self.__sv6\r\n else:\r\n raise NotImplementedError\r\n for _ in range(self.N):\r\n x = solver(x, h / self.N)\r\n return x\r\n \r\n def flow(self, x, h, steps):\r\n dim = x.shape[-1] if isinstance(x, np.ndarray) else x.size(-1)\r\n size = len(x.shape) if isinstance(x, np.ndarray) else len(x.size())\r\n X = [x]\r\n for i in range(steps):\r\n X.append(self.solve(X[-1], h))\r\n shape = [steps + 1, dim] if size == 1 else [-1, steps + 1, dim]\r\n return np.hstack(X).reshape(shape) if isinstance(x, np.ndarray) else torch.cat(X, dim=-1).view(shape)" ]
[ [ "numpy.hstack", "torch.cat" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
akhilvasvani/machinelearningbasics
[ "5d1a05add8b6b316011cb3e1db4144940161e2b3" ]
[ "supervised_learning/examples/adaboost.py" ]
[ "from __future__ import division, print_function\nimport numpy as np\nfrom sklearn import datasets\n\n# Import helper functions\nfrom mlfromscratch.supervised_learning import Adaboost\nfrom mlfromscratch.utils.data_manipulation import train_test_split\nfrom mlfromscratch.utils.data_operation import accuracy_score\nfrom mlfromscratch.utils import Plot\n\n\ndef main():\n data = datasets.load_digits()\n X = data.data\n y = data.target\n\n digit1 = 1\n digit2 = 8\n idx = np.append(np.where(y == digit1)[0], np.where(y == digit2)[0])\n y = data.target[idx]\n # Change labels to {-1, 1}\n y[y == digit1] = -1\n y[y == digit2] = 1\n X = data.data[idx]\n\n X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.5)\n\n # Adaboost classification with 5 weak classifiers\n clf = Adaboost(n_clf=5)\n clf.fit(X_train, y_train)\n y_pred = clf.predict(X_test)\n\n accuracy = accuracy_score(y_test, y_pred)\n print (\"Accuracy:\", accuracy)\n\n # Reduce dimensions to 2d using pca and plot the results\n Plot().plot_in_2d(X_test, y_pred, title=\"Adaboost\", accuracy=accuracy)\n\n\nif __name__ == \"__main__\":\n main()\n" ]
[ [ "numpy.where", "sklearn.datasets.load_digits" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
shraddhazpy/keras
[ "21a78464c191c40a90ed4e3ddfed747ae994703e", "21a78464c191c40a90ed4e3ddfed747ae994703e", "21a78464c191c40a90ed4e3ddfed747ae994703e", "21a78464c191c40a90ed4e3ddfed747ae994703e", "b96518a22bfd92a29811e507dec0b34248a8a3f5" ]
[ "keras/tests/integration_test.py", "keras/preprocessing/image_test.py", "keras/mixed_precision/model_test.py", "keras/engine/keras_tensor_test.py", "keras/layers/serialization.py" ]
[ "# Copyright 2016 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Integration tests for Keras.\"\"\"\n\nimport tensorflow.compat.v2 as tf\n\nimport os\nimport random\n\nimport numpy as np\n\nimport keras\nfrom keras import keras_parameterized\nfrom keras import testing_utils\nfrom keras.layers.legacy_rnn import rnn_cell_impl as rnn_cell\nfrom keras.legacy_tf_layers import base as base_layer\nfrom keras.utils import np_utils\n\n\nclass KerasIntegrationTest(keras_parameterized.TestCase):\n\n def _save_and_reload_model(self, model):\n self.temp_dir = self.get_temp_dir()\n fpath = os.path.join(self.temp_dir,\n 'test_model_%s' % (random.randint(0, 1e7),))\n if tf.executing_eagerly():\n save_format = 'tf'\n else:\n if (not isinstance(model, keras.Sequential) and\n not model._is_graph_network):\n return model # Not supported\n save_format = 'h5'\n model.save(fpath, save_format=save_format)\n model = keras.models.load_model(fpath)\n return model\n\n\n@keras_parameterized.run_with_all_model_types\n@keras_parameterized.run_all_keras_modes\nclass VectorClassificationIntegrationTest(keras_parameterized.TestCase):\n\n def test_vector_classification(self):\n np.random.seed(1337)\n (x_train, y_train), _ = testing_utils.get_test_data(\n train_samples=100,\n test_samples=0,\n input_shape=(10,),\n num_classes=2)\n y_train = np_utils.to_categorical(y_train)\n\n model = testing_utils.get_model_from_layers(\n [keras.layers.Dense(16, activation='relu'),\n keras.layers.Dropout(0.1),\n keras.layers.Dense(y_train.shape[-1], activation='softmax')],\n input_shape=x_train.shape[1:])\n model.compile(\n loss='categorical_crossentropy',\n optimizer=keras.optimizers.optimizer_v2.adam.Adam(0.005),\n metrics=['acc'],\n run_eagerly=testing_utils.should_run_eagerly())\n history = model.fit(x_train, y_train, epochs=10, batch_size=10,\n validation_data=(x_train, y_train),\n verbose=2)\n self.assertGreater(history.history['val_acc'][-1], 0.7)\n _, val_acc = model.evaluate(x_train, y_train)\n self.assertAlmostEqual(history.history['val_acc'][-1], val_acc)\n predictions = model.predict(x_train)\n self.assertEqual(predictions.shape, (x_train.shape[0], 2))\n\n def test_vector_classification_shared_model(self):\n # Test that Sequential models that feature internal updates\n # and internal losses can be shared.\n np.random.seed(1337)\n (x_train, y_train), _ = testing_utils.get_test_data(\n train_samples=100,\n test_samples=0,\n input_shape=(10,),\n num_classes=2)\n y_train = np_utils.to_categorical(y_train)\n\n base_model = testing_utils.get_model_from_layers(\n [keras.layers.Dense(16,\n activation='relu',\n kernel_regularizer=keras.regularizers.l2(1e-5),\n bias_regularizer=keras.regularizers.l2(1e-5)),\n keras.layers.BatchNormalization()],\n input_shape=x_train.shape[1:])\n x = keras.layers.Input(x_train.shape[1:])\n y = base_model(x)\n y = keras.layers.Dense(y_train.shape[-1], activation='softmax')(y)\n model = keras.models.Model(x, y)\n model.compile(\n loss='categorical_crossentropy',\n optimizer=keras.optimizers.optimizer_v2.adam.Adam(0.005),\n metrics=['acc'],\n run_eagerly=testing_utils.should_run_eagerly())\n self.assertLen(model.losses, 2)\n if not tf.executing_eagerly():\n self.assertLen(model.get_updates_for(x), 2)\n history = model.fit(x_train, y_train, epochs=10, batch_size=10,\n validation_data=(x_train, y_train),\n verbose=2)\n self.assertGreater(history.history['val_acc'][-1], 0.7)\n _, val_acc = model.evaluate(x_train, y_train)\n self.assertAlmostEqual(history.history['val_acc'][-1], val_acc)\n predictions = model.predict(x_train)\n self.assertEqual(predictions.shape, (x_train.shape[0], 2))\n\n\n@keras_parameterized.run_all_keras_modes\nclass SequentialIntegrationTest(KerasIntegrationTest):\n\n def test_sequential_save_and_pop(self):\n # Test the following sequence of actions:\n # - construct a Sequential model and train it\n # - save it\n # - load it\n # - pop its last layer and add a new layer instead\n # - continue training\n np.random.seed(1337)\n (x_train, y_train), _ = testing_utils.get_test_data(\n train_samples=100,\n test_samples=0,\n input_shape=(10,),\n num_classes=2)\n y_train = np_utils.to_categorical(y_train)\n model = keras.Sequential([\n keras.layers.Dense(16, activation='relu'),\n keras.layers.Dropout(0.1),\n keras.layers.Dense(y_train.shape[-1], activation='softmax')\n ])\n model.compile(\n loss='categorical_crossentropy',\n optimizer=keras.optimizers.optimizer_v2.adam.Adam(0.005),\n metrics=['acc'],\n run_eagerly=testing_utils.should_run_eagerly())\n model.fit(x_train, y_train, epochs=1, batch_size=10,\n validation_data=(x_train, y_train),\n verbose=2)\n model = self._save_and_reload_model(model)\n\n model.pop()\n model.add(keras.layers.Dense(y_train.shape[-1], activation='softmax'))\n\n model.compile(\n loss='categorical_crossentropy',\n optimizer=keras.optimizers.optimizer_v2.adam.Adam(0.005),\n metrics=['acc'],\n run_eagerly=testing_utils.should_run_eagerly())\n history = model.fit(x_train, y_train, epochs=10, batch_size=10,\n validation_data=(x_train, y_train),\n verbose=2)\n self.assertGreater(history.history['val_acc'][-1], 0.7)\n model = self._save_and_reload_model(model)\n _, val_acc = model.evaluate(x_train, y_train)\n self.assertAlmostEqual(history.history['val_acc'][-1], val_acc)\n predictions = model.predict(x_train)\n self.assertEqual(predictions.shape, (x_train.shape[0], 2))\n\n\n# See b/122473407\n@keras_parameterized.run_all_keras_modes(always_skip_v1=True)\nclass TimeseriesClassificationIntegrationTest(keras_parameterized.TestCase):\n\n @keras_parameterized.run_with_all_model_types\n def test_timeseries_classification(self):\n np.random.seed(1337)\n (x_train, y_train), _ = testing_utils.get_test_data(\n train_samples=100,\n test_samples=0,\n input_shape=(4, 10),\n num_classes=2)\n y_train = np_utils.to_categorical(y_train)\n\n layers = [\n keras.layers.LSTM(5, return_sequences=True),\n keras.layers.GRU(y_train.shape[-1], activation='softmax')\n ]\n model = testing_utils.get_model_from_layers(\n layers, input_shape=x_train.shape[1:])\n model.compile(\n loss='categorical_crossentropy',\n optimizer=keras.optimizers.optimizer_v2.adam.Adam(0.005),\n metrics=['acc'],\n run_eagerly=testing_utils.should_run_eagerly())\n history = model.fit(x_train, y_train, epochs=15, batch_size=10,\n validation_data=(x_train, y_train),\n verbose=2)\n self.assertGreater(history.history['val_acc'][-1], 0.7)\n _, val_acc = model.evaluate(x_train, y_train)\n self.assertAlmostEqual(history.history['val_acc'][-1], val_acc)\n predictions = model.predict(x_train)\n self.assertEqual(predictions.shape, (x_train.shape[0], 2))\n\n def test_timeseries_classification_sequential_tf_rnn(self):\n np.random.seed(1337)\n (x_train, y_train), _ = testing_utils.get_test_data(\n train_samples=100,\n test_samples=0,\n input_shape=(4, 10),\n num_classes=2)\n y_train = np_utils.to_categorical(y_train)\n\n with base_layer.keras_style_scope():\n model = keras.models.Sequential()\n model.add(keras.layers.RNN(rnn_cell.LSTMCell(5), return_sequences=True,\n input_shape=x_train.shape[1:]))\n model.add(keras.layers.RNN(rnn_cell.GRUCell(y_train.shape[-1],\n activation='softmax',\n dtype=tf.float32)))\n model.compile(\n loss='categorical_crossentropy',\n optimizer=keras.optimizers.optimizer_v2.adam.Adam(0.005),\n metrics=['acc'],\n run_eagerly=testing_utils.should_run_eagerly())\n\n history = model.fit(x_train, y_train, epochs=15, batch_size=10,\n validation_data=(x_train, y_train),\n verbose=2)\n self.assertGreater(history.history['val_acc'][-1], 0.7)\n _, val_acc = model.evaluate(x_train, y_train)\n self.assertAlmostEqual(history.history['val_acc'][-1], val_acc)\n predictions = model.predict(x_train)\n self.assertEqual(predictions.shape, (x_train.shape[0], 2))\n\n\n@keras_parameterized.run_with_all_model_types\n@keras_parameterized.run_all_keras_modes\nclass ImageClassificationIntegrationTest(keras_parameterized.TestCase):\n\n def test_image_classification(self):\n np.random.seed(1337)\n (x_train, y_train), _ = testing_utils.get_test_data(\n train_samples=100,\n test_samples=0,\n input_shape=(10, 10, 3),\n num_classes=2)\n y_train = np_utils.to_categorical(y_train)\n\n layers = [\n keras.layers.Conv2D(4, 3, padding='same', activation='relu'),\n keras.layers.Conv2D(8, 3, padding='same'),\n keras.layers.BatchNormalization(),\n keras.layers.Conv2D(8, 3, padding='same'),\n keras.layers.Flatten(),\n keras.layers.Dense(y_train.shape[-1], activation='softmax')\n ]\n model = testing_utils.get_model_from_layers(\n layers, input_shape=x_train.shape[1:])\n model.compile(\n loss='categorical_crossentropy',\n optimizer=keras.optimizers.optimizer_v2.adam.Adam(0.005),\n metrics=['acc'],\n run_eagerly=testing_utils.should_run_eagerly())\n history = model.fit(x_train, y_train, epochs=10, batch_size=10,\n validation_data=(x_train, y_train),\n verbose=2)\n self.assertGreater(history.history['val_acc'][-1], 0.7)\n _, val_acc = model.evaluate(x_train, y_train)\n self.assertAlmostEqual(history.history['val_acc'][-1], val_acc)\n predictions = model.predict(x_train)\n self.assertEqual(predictions.shape, (x_train.shape[0], 2))\n\n\n@keras_parameterized.run_all_keras_modes\nclass ActivationV2IntegrationTest(keras_parameterized.TestCase):\n \"\"\"Tests activation function V2 in model exporting and loading.\n\n This test is to verify in TF 2.x, when 'tf.nn.softmax' is used as an\n activation function, its model exporting and loading work as expected.\n Check b/123041942 for details.\n \"\"\"\n\n def test_serialization_v2_model(self):\n np.random.seed(1337)\n (x_train, y_train), _ = testing_utils.get_test_data(\n train_samples=100,\n test_samples=0,\n input_shape=(10,),\n num_classes=2)\n y_train = np_utils.to_categorical(y_train)\n\n model = keras.Sequential([\n keras.layers.Flatten(input_shape=x_train.shape[1:]),\n keras.layers.Dense(10, activation=tf.nn.relu),\n # To mimic 'tf.nn.softmax' used in TF 2.x.\n keras.layers.Dense(y_train.shape[-1], activation=tf.math.softmax),\n ])\n\n # Check if 'softmax' is in model.get_config().\n last_layer_activation = model.get_layer(index=2).get_config()['activation']\n self.assertEqual(last_layer_activation, 'softmax')\n\n model.compile(\n loss='categorical_crossentropy',\n optimizer=keras.optimizers.optimizer_v2.adam.Adam(0.005),\n metrics=['accuracy'],\n run_eagerly=testing_utils.should_run_eagerly())\n model.fit(x_train, y_train, epochs=2, batch_size=10,\n validation_data=(x_train, y_train),\n verbose=2)\n\n output_path = os.path.join(self.get_temp_dir(), 'tf_keras_saved_model')\n model.save(output_path, save_format='tf')\n loaded_model = keras.models.load_model(output_path)\n self.assertEqual(model.summary(), loaded_model.summary())\n\nif __name__ == '__main__':\n tf.test.main()\n", "# Copyright 2016 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Tests for image preprocessing utils.\"\"\"\n\nimport tensorflow.compat.v2 as tf\n\nimport os\nimport shutil\nimport tempfile\n\nfrom absl.testing import parameterized\nimport numpy as np\nfrom keras import keras_parameterized\nfrom keras import layers\nfrom keras import testing_utils\nfrom keras.engine import sequential\nfrom keras.preprocessing import image as preprocessing_image\n\ntry:\n import PIL # pylint:disable=g-import-not-at-top\nexcept ImportError:\n PIL = None\n\n\ndef _generate_test_images():\n img_w = img_h = 20\n rgb_images = []\n gray_images = []\n for _ in range(8):\n bias = np.random.rand(img_w, img_h, 1) * 64\n variance = np.random.rand(img_w, img_h, 1) * (255 - 64)\n imarray = np.random.rand(img_w, img_h, 3) * variance + bias\n im = preprocessing_image.array_to_img(imarray, scale=False)\n rgb_images.append(im)\n\n imarray = np.random.rand(img_w, img_h, 1) * variance + bias\n im = preprocessing_image.array_to_img(imarray, scale=False)\n gray_images.append(im)\n\n return [rgb_images, gray_images]\n\n\n@testing_utils.run_v2_only\nclass TestImage(keras_parameterized.TestCase):\n\n def test_smart_resize(self):\n test_input = np.random.random((20, 40, 3))\n output = preprocessing_image.smart_resize(test_input, size=(50, 50))\n self.assertIsInstance(output, np.ndarray)\n self.assertListEqual(list(output.shape), [50, 50, 3])\n output = preprocessing_image.smart_resize(test_input, size=(10, 10))\n self.assertListEqual(list(output.shape), [10, 10, 3])\n output = preprocessing_image.smart_resize(test_input, size=(100, 50))\n self.assertListEqual(list(output.shape), [100, 50, 3])\n output = preprocessing_image.smart_resize(test_input, size=(5, 15))\n self.assertListEqual(list(output.shape), [5, 15, 3])\n\n @parameterized.named_parameters(\n ('size1', (50, 50)),\n ('size2', (10, 10)),\n ('size3', (100, 50)),\n ('size4', (5, 15)))\n def test_smart_resize_tf_dataset(self, size):\n test_input_np = np.random.random((2, 20, 40, 3))\n test_ds = tf.data.Dataset.from_tensor_slices(test_input_np)\n\n resize = lambda img: preprocessing_image.smart_resize(img, size=size)\n test_ds = test_ds.map(resize)\n for sample in test_ds.as_numpy_iterator():\n self.assertIsInstance(sample, np.ndarray)\n self.assertListEqual(list(sample.shape), [size[0], size[1], 3])\n\n def test_smart_resize_batch(self):\n img = np.random.random((2, 20, 40, 3))\n out = preprocessing_image.smart_resize(img, size=(20, 20))\n self.assertListEqual(list(out.shape), [2, 20, 20, 3])\n self.assertAllClose(out, img[:, :, 10:-10, :])\n\n def test_smart_resize_errors(self):\n with self.assertRaisesRegex(ValueError, 'a tuple of 2 integers'):\n preprocessing_image.smart_resize(\n np.random.random((20, 20, 2)), size=(10, 5, 3))\n with self.assertRaisesRegex(ValueError, 'incorrect rank'):\n preprocessing_image.smart_resize(np.random.random((2, 4)), size=(10, 5))\n with self.assertRaisesRegex(ValueError, 'incorrect rank'):\n preprocessing_image.smart_resize(\n np.random.random((2, 4, 4, 5, 3)), size=(10, 5))\n\n def test_image_data_generator(self):\n if PIL is None:\n return # Skip test if PIL is not available.\n\n for test_images in _generate_test_images():\n img_list = []\n for im in test_images:\n img_list.append(preprocessing_image.img_to_array(im)[None, ...])\n\n images = np.vstack(img_list)\n generator = preprocessing_image.ImageDataGenerator(\n featurewise_center=True,\n samplewise_center=True,\n featurewise_std_normalization=True,\n samplewise_std_normalization=True,\n zca_whitening=True,\n rotation_range=90.,\n width_shift_range=0.1,\n height_shift_range=0.1,\n shear_range=0.5,\n zoom_range=0.2,\n channel_shift_range=0.,\n brightness_range=(1, 5),\n fill_mode='nearest',\n cval=0.5,\n horizontal_flip=True,\n vertical_flip=True)\n # Basic test before fit\n x = np.random.random((32, 10, 10, 3))\n generator.flow(x)\n\n # Fit\n generator.fit(images, augment=True)\n\n for x, _ in generator.flow(\n images,\n np.arange(images.shape[0]),\n shuffle=True):\n self.assertEqual(x.shape[1:], images.shape[1:])\n break\n\n def test_image_data_generator_with_split_value_error(self):\n with self.assertRaises(ValueError):\n preprocessing_image.ImageDataGenerator(validation_split=5)\n\n def test_image_data_generator_invalid_data(self):\n generator = preprocessing_image.ImageDataGenerator(\n featurewise_center=True,\n samplewise_center=True,\n featurewise_std_normalization=True,\n samplewise_std_normalization=True,\n zca_whitening=True,\n data_format='channels_last')\n\n # Test fit with invalid data\n with self.assertRaises(ValueError):\n x = np.random.random((3, 10, 10))\n generator.fit(x)\n # Test flow with invalid data\n with self.assertRaises(ValueError):\n generator.flow(np.arange(5))\n # Invalid number of channels: will work but raise a warning\n x = np.random.random((32, 10, 10, 5))\n generator.flow(x)\n\n with self.assertRaises(ValueError):\n generator = preprocessing_image.ImageDataGenerator(\n data_format='unknown')\n\n generator = preprocessing_image.ImageDataGenerator(zoom_range=(2., 2.))\n\n def test_image_data_generator_fit(self):\n generator = preprocessing_image.ImageDataGenerator(\n featurewise_center=True,\n samplewise_center=True,\n featurewise_std_normalization=True,\n samplewise_std_normalization=True,\n zca_whitening=True,\n data_format='channels_last')\n # Test grayscale\n x = np.random.random((32, 10, 10, 1))\n generator.fit(x)\n # Test RBG\n x = np.random.random((32, 10, 10, 3))\n generator.fit(x)\n generator = preprocessing_image.ImageDataGenerator(\n featurewise_center=True,\n samplewise_center=True,\n featurewise_std_normalization=True,\n samplewise_std_normalization=True,\n zca_whitening=True,\n data_format='channels_first')\n # Test grayscale\n x = np.random.random((32, 1, 10, 10))\n generator.fit(x)\n # Test RBG\n x = np.random.random((32, 3, 10, 10))\n generator.fit(x)\n\n def test_directory_iterator(self):\n if PIL is None:\n return # Skip test if PIL is not available.\n\n num_classes = 2\n\n temp_dir = self.get_temp_dir()\n self.addCleanup(shutil.rmtree, temp_dir)\n\n # create folders and subfolders\n paths = []\n for cl in range(num_classes):\n class_directory = 'class-{}'.format(cl)\n classpaths = [\n class_directory, os.path.join(class_directory, 'subfolder-1'),\n os.path.join(class_directory, 'subfolder-2'), os.path.join(\n class_directory, 'subfolder-1', 'sub-subfolder')\n ]\n for path in classpaths:\n os.mkdir(os.path.join(temp_dir, path))\n paths.append(classpaths)\n\n # save the images in the paths\n count = 0\n filenames = []\n for test_images in _generate_test_images():\n for im in test_images:\n # rotate image class\n im_class = count % num_classes\n # rotate subfolders\n classpaths = paths[im_class]\n filename = os.path.join(classpaths[count % len(classpaths)],\n 'image-{}.jpg'.format(count))\n filenames.append(filename)\n im.save(os.path.join(temp_dir, filename))\n count += 1\n\n # Test image loading util\n fname = os.path.join(temp_dir, filenames[0])\n _ = preprocessing_image.load_img(fname)\n _ = preprocessing_image.load_img(fname, grayscale=True)\n _ = preprocessing_image.load_img(fname, target_size=(10, 10))\n _ = preprocessing_image.load_img(fname, target_size=(10, 10),\n interpolation='bilinear')\n\n # create iterator\n generator = preprocessing_image.ImageDataGenerator()\n dir_iterator = generator.flow_from_directory(temp_dir)\n\n # check number of classes and images\n self.assertEqual(len(dir_iterator.class_indices), num_classes)\n self.assertEqual(len(dir_iterator.classes), count)\n self.assertEqual(set(dir_iterator.filenames), set(filenames))\n\n def preprocessing_function(x):\n \"\"\"This will fail if not provided by a Numpy array.\n\n Note: This is made to enforce backward compatibility.\n\n Args:\n x: A numpy array.\n\n Returns:\n An array of zeros with the same shape as the given array.\n \"\"\"\n self.assertEqual(x.shape, (26, 26, 3))\n self.assertIs(type(x), np.ndarray)\n return np.zeros_like(x)\n\n # Test usage as Sequence\n generator = preprocessing_image.ImageDataGenerator(\n preprocessing_function=preprocessing_function)\n dir_seq = generator.flow_from_directory(\n str(temp_dir),\n target_size=(26, 26),\n color_mode='rgb',\n batch_size=3,\n class_mode='categorical')\n self.assertEqual(len(dir_seq), count // 3 + 1)\n x1, y1 = dir_seq[1]\n self.assertEqual(x1.shape, (3, 26, 26, 3))\n self.assertEqual(y1.shape, (3, num_classes))\n x1, y1 = dir_seq[5]\n self.assertTrue((x1 == 0).all())\n\n def directory_iterator_with_validation_split_test_helper(\n self, validation_split):\n if PIL is None:\n return # Skip test if PIL is not available.\n\n num_classes = 2\n tmp_folder = tempfile.mkdtemp(prefix='test_images')\n\n # create folders and subfolders\n paths = []\n for cl in range(num_classes):\n class_directory = 'class-{}'.format(cl)\n classpaths = [\n class_directory,\n os.path.join(class_directory, 'subfolder-1'),\n os.path.join(class_directory, 'subfolder-2'),\n os.path.join(class_directory, 'subfolder-1', 'sub-subfolder')\n ]\n for path in classpaths:\n os.mkdir(os.path.join(tmp_folder, path))\n paths.append(classpaths)\n\n # save the images in the paths\n count = 0\n filenames = []\n for test_images in _generate_test_images():\n for im in test_images:\n # rotate image class\n im_class = count % num_classes\n # rotate subfolders\n classpaths = paths[im_class]\n filename = os.path.join(classpaths[count % len(classpaths)],\n 'image-{}.jpg'.format(count))\n filenames.append(filename)\n im.save(os.path.join(tmp_folder, filename))\n count += 1\n\n # create iterator\n generator = preprocessing_image.ImageDataGenerator(\n validation_split=validation_split)\n\n with self.assertRaises(ValueError):\n generator.flow_from_directory(tmp_folder, subset='foo')\n\n num_validation = int(count * validation_split)\n num_training = count - num_validation\n train_iterator = generator.flow_from_directory(\n tmp_folder, subset='training')\n self.assertEqual(train_iterator.samples, num_training)\n\n valid_iterator = generator.flow_from_directory(\n tmp_folder, subset='validation')\n self.assertEqual(valid_iterator.samples, num_validation)\n\n # check number of classes and images\n self.assertEqual(len(train_iterator.class_indices), num_classes)\n self.assertEqual(len(train_iterator.classes), num_training)\n self.assertEqual(\n len(set(train_iterator.filenames) & set(filenames)), num_training)\n\n model = sequential.Sequential([layers.Flatten(), layers.Dense(2)])\n model.compile(optimizer='sgd', loss='mse')\n model.fit(train_iterator, epochs=1)\n\n shutil.rmtree(tmp_folder)\n\n @keras_parameterized.run_all_keras_modes\n def test_directory_iterator_with_validation_split_25_percent(self):\n self.directory_iterator_with_validation_split_test_helper(0.25)\n\n @keras_parameterized.run_all_keras_modes\n def test_directory_iterator_with_validation_split_40_percent(self):\n self.directory_iterator_with_validation_split_test_helper(0.40)\n\n @keras_parameterized.run_all_keras_modes\n def test_directory_iterator_with_validation_split_50_percent(self):\n self.directory_iterator_with_validation_split_test_helper(0.50)\n\n def test_img_utils(self):\n if PIL is None:\n return # Skip test if PIL is not available.\n\n height, width = 10, 8\n\n # Test channels_first data format\n x = np.random.random((3, height, width))\n img = preprocessing_image.array_to_img(\n x, data_format='channels_first')\n self.assertEqual(img.size, (width, height))\n x = preprocessing_image.img_to_array(\n img, data_format='channels_first')\n self.assertEqual(x.shape, (3, height, width))\n # Test 2D\n x = np.random.random((1, height, width))\n img = preprocessing_image.array_to_img(\n x, data_format='channels_first')\n self.assertEqual(img.size, (width, height))\n x = preprocessing_image.img_to_array(\n img, data_format='channels_first')\n self.assertEqual(x.shape, (1, height, width))\n\n # Test channels_last data format\n x = np.random.random((height, width, 3))\n img = preprocessing_image.array_to_img(x, data_format='channels_last')\n self.assertEqual(img.size, (width, height))\n x = preprocessing_image.img_to_array(img, data_format='channels_last')\n self.assertEqual(x.shape, (height, width, 3))\n # Test 2D\n x = np.random.random((height, width, 1))\n img = preprocessing_image.array_to_img(x, data_format='channels_last')\n self.assertEqual(img.size, (width, height))\n x = preprocessing_image.img_to_array(img, data_format='channels_last')\n self.assertEqual(x.shape, (height, width, 1))\n\n def test_batch_standardize(self):\n if PIL is None:\n return # Skip test if PIL is not available.\n\n # ImageDataGenerator.standardize should work on batches\n for test_images in _generate_test_images():\n img_list = []\n for im in test_images:\n img_list.append(preprocessing_image.img_to_array(im)[None, ...])\n\n images = np.vstack(img_list)\n generator = preprocessing_image.ImageDataGenerator(\n featurewise_center=True,\n samplewise_center=True,\n featurewise_std_normalization=True,\n samplewise_std_normalization=True,\n zca_whitening=True,\n rotation_range=90.,\n width_shift_range=0.1,\n height_shift_range=0.1,\n shear_range=0.5,\n zoom_range=0.2,\n channel_shift_range=0.,\n brightness_range=(1, 5),\n fill_mode='nearest',\n cval=0.5,\n horizontal_flip=True,\n vertical_flip=True)\n generator.fit(images, augment=True)\n\n transformed = np.copy(images)\n for i, im in enumerate(transformed):\n transformed[i] = generator.random_transform(im)\n transformed = generator.standardize(transformed)\n\n def test_img_transforms(self):\n x = np.random.random((3, 200, 200))\n _ = preprocessing_image.random_rotation(x, 20)\n _ = preprocessing_image.random_shift(x, 0.2, 0.2)\n _ = preprocessing_image.random_shear(x, 2.)\n _ = preprocessing_image.random_zoom(x, (0.5, 0.5))\n _ = preprocessing_image.apply_channel_shift(x, 2, 2)\n _ = preprocessing_image.apply_affine_transform(x, 2)\n with self.assertRaises(ValueError):\n preprocessing_image.random_zoom(x, (0, 0, 0))\n _ = preprocessing_image.random_channel_shift(x, 2.)\n\n\nif __name__ == '__main__':\n tf.test.main()\n", "# Copyright 2019 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Tests keras.Model works properly with mixed precision.\"\"\"\n\nimport tensorflow.compat.v2 as tf\n\nimport os\n\nfrom absl import flags\nfrom absl.testing import parameterized\nimport numpy as np\nfrom keras import backend\nfrom keras import combinations\nfrom keras import keras_parameterized\nfrom keras import layers\nfrom keras import models\nfrom keras.optimizers import optimizer_v1\nfrom keras import testing_utils\nfrom keras.applications import densenet\nfrom keras.applications import efficientnet\nfrom keras.applications import inception_resnet_v2\nfrom keras.applications import inception_v3\nfrom keras.applications import mobilenet\nfrom keras.applications import nasnet\nfrom keras.applications import resnet\nfrom keras.applications import vgg16\nfrom keras.applications import xception\nfrom keras.engine import base_layer_utils\nfrom keras.engine import input_spec\nfrom keras.engine import sequential\nfrom keras.layers import core\nfrom keras.mixed_precision import get_layer_policy\nfrom keras.mixed_precision import loss_scale_optimizer\nfrom keras.mixed_precision import policy\nfrom keras.mixed_precision import test_util as mp_test_util\nfrom keras.optimizers.optimizer_v2 import gradient_descent\nfrom keras.saving import save\nfrom keras.utils import generic_utils\n\n\n# If called outside any strategy.scope() calls, this will return the default\n# strategy.\ndefault_strategy_fn = tf.distribute.get_strategy\n\n\ndef create_mirrored_strategy():\n \"\"\"Create a MirroredStrategy, using a GPU if it is available.\"\"\"\n if tf.config.list_logical_devices('GPU'):\n return tf.distribute.MirroredStrategy(['cpu:0', 'gpu:0'])\n else:\n return tf.distribute.MirroredStrategy(['cpu:0'])\n\n\nTESTCASES = ({\n 'testcase_name': 'base',\n 'strategy_fn': default_strategy_fn\n}, {\n 'testcase_name': 'distribute',\n 'strategy_fn': create_mirrored_strategy\n})\n\n\nclass KerasModelTest(keras_parameterized.TestCase):\n \"\"\"Test mixed precision with Keras models.\"\"\"\n\n def _skip_if_strategy_unsupported(self, strategy_fn):\n if (strategy_fn != default_strategy_fn and\n testing_utils.get_model_type() == 'subclass'):\n self.skipTest('Non-default strategies are unsupported with subclassed '\n 'models')\n\n def _skip_if_save_format_unsupported(self, save_format):\n model_type = testing_utils.get_model_type()\n if save_format == 'h5' and model_type == 'subclass':\n self.skipTest('Saving subclassed models with the HDF5 format is '\n 'unsupported')\n if (save_format == 'tf' and model_type == 'subclass' and\n not tf.executing_eagerly()):\n self.skipTest('b/148820505: This combination of features is currently '\n 'broken.')\n\n @keras_parameterized.run_with_all_model_types\n @keras_parameterized.run_all_keras_modes\n @parameterized.named_parameters(\n {\n 'testcase_name': 'base',\n 'strategy_fn': default_strategy_fn\n }, {\n 'testcase_name': 'distribute',\n 'strategy_fn': create_mirrored_strategy,\n }, {\n 'testcase_name': 'operator',\n 'strategy_fn': create_mirrored_strategy,\n 'use_operator': True\n }, {\n 'testcase_name': 'regularizer',\n 'strategy_fn': create_mirrored_strategy,\n 'use_regularizer': True\n }, {\n 'testcase_name': 'get_config',\n 'strategy_fn': create_mirrored_strategy,\n 'get_config': True,\n 'use_regularizer': True,\n }, {\n 'testcase_name': 'saved_model',\n 'strategy_fn': default_strategy_fn,\n 'save_format': 'tf',\n 'use_regularizer': True,\n }, {\n 'testcase_name': 'saved_model_input_spec',\n 'strategy_fn': default_strategy_fn,\n 'save_format': 'tf',\n 'use_regularizer': True,\n 'use_input_spec': True,\n }, {\n 'testcase_name': 'h5',\n 'strategy_fn': default_strategy_fn,\n 'save_format': 'h5',\n 'use_regularizer': True,\n }, {\n 'testcase_name': 'saved_model_distribute',\n 'strategy_fn': create_mirrored_strategy,\n 'save_format': 'tf',\n 'use_regularizer': True,\n }, {\n 'testcase_name': 'saved_model_input_spec_distribute',\n 'strategy_fn': create_mirrored_strategy,\n 'save_format': 'tf',\n 'use_regularizer': True,\n 'use_input_spec': True,\n }, {\n 'testcase_name': 'h5_distribute',\n 'strategy_fn': create_mirrored_strategy,\n 'save_format': 'h5',\n 'use_regularizer': True,\n }, {\n 'testcase_name': 'saved_model_v1_policy',\n 'strategy_fn': create_mirrored_strategy,\n 'use_v1_policy': True,\n 'save_format': 'tf',\n })\n def test_model(self,\n strategy_fn,\n use_operator=False,\n use_regularizer=False,\n policy_name='mixed_float16',\n get_config=False,\n save_format=None,\n use_input_spec=False,\n use_v1_policy=False):\n self._skip_if_strategy_unsupported(strategy_fn)\n self._skip_if_save_format_unsupported(save_format)\n if use_regularizer:\n weight_regularizer = mp_test_util.IdentityRegularizer()\n activity_regularizer = mp_test_util.ReduceSumRegularizer()\n else:\n weight_regularizer = activity_regularizer = None\n with strategy_fn().scope():\n cls = policy.PolicyV1 if use_v1_policy else policy.Policy\n with policy.policy_scope(cls(policy_name)):\n layer = mp_test_util.MultiplyLayer(\n assert_type=tf.float16,\n use_operator=use_operator,\n regularizer=weight_regularizer,\n activity_regularizer=activity_regularizer,\n input_shape=(1,))\n if use_input_spec:\n layer.input_spec = input_spec.InputSpec(shape=(None, 1))\n model = testing_utils.get_model_from_layers([layer], input_shape=(1,),\n input_dtype=tf.float16)\n if get_config:\n config = model.get_config()\n model = model.__class__.from_config(\n config,\n custom_objects={'MultiplyLayer': mp_test_util.MultiplyLayer})\n (layer,) = (layer for layer in model.layers\n if isinstance(layer, mp_test_util.MultiplyLayer))\n\n def loss_fn(y_true, y_pred):\n del y_true\n return tf.reduce_mean(y_pred)\n\n # Learning rate is small enough that if applied to a float16 variable,\n # the variable will not change. So this tests the learning rate not\n # applied to a float16 value, but instead the float32 variable.\n opt = gradient_descent.SGD(2**-14)\n # Use a fixed loss scale, as this test will fail if gradients are\n # skipped for a step due to dynamic loss scaling.\n opt = loss_scale_optimizer.LossScaleOptimizer(opt, dynamic=False,\n initial_scale=8)\n model.compile(\n opt,\n loss=loss_fn,\n run_eagerly=testing_utils.should_run_eagerly())\n\n x = np.ones((2, 1))\n y = np.ones((2, 1))\n dataset = tf.data.Dataset.from_tensor_slices((x, y)).batch(2)\n model.fit(dataset)\n # Variable starts at 1, and should have gradient of 2 ** -14 subtracted\n # from it.\n expected = 1 - 2**-14\n if use_regularizer:\n # Weight and activity regularizer each add another 2 ** -14 to the\n # gradient.\n expected -= 2 * 2**-14\n self.assertEqual(backend.eval(layer.v), expected)\n\n if save_format:\n with generic_utils.CustomObjectScope(\n {'MultiplyLayer': mp_test_util.MultiplyLayer, 'loss_fn': loss_fn}):\n self._test_saving(model, dataset, save_format, use_regularizer)\n\n def _test_saving(self, model, dataset, save_format, use_regularizer):\n # Save and load model, asserting variable does not change\n save_path = os.path.join(self.get_temp_dir(), 'model')\n model.save(save_path, save_format=save_format)\n model = save.load_model(save_path)\n (layer,) = (layer for layer in model.layers\n if 'MultiplyLayer' in layer.__class__.__name__)\n expected = 1 - 2**-14\n if use_regularizer:\n expected -= 2 * 2**-14\n self.assertEqual(backend.eval(layer.v), expected)\n\n # Continue training, and assert variable is correct value\n model.fit(dataset)\n new_expected = expected - 2 ** -14\n if use_regularizer:\n new_expected -= 2 * 2 ** -14\n self.assertEqual(backend.eval(layer.v), new_expected)\n\n # Load saved model again, and assert variable is previous value\n model = save.load_model(save_path)\n (layer,) = (layer for layer in model.layers\n if 'MultiplyLayer' in layer.__class__.__name__)\n self.assertEqual(backend.eval(layer.v), expected)\n\n # Ensure various dtype-related aspects of the layer are correct\n self.assertEqual(layer.dtype, 'float32')\n self.assertEqual(get_layer_policy.get_layer_policy(layer).name,\n 'mixed_float16')\n self.assertEqual(layer.v.dtype, 'float32')\n self.assertEqual(layer(np.ones((2, 1))).dtype, 'float16')\n\n # Loading a model always loads with a v2 Policy, even if saved with a\n # PolicyV1.\n self.assertEqual(type(model.dtype_policy), policy.Policy)\n self.assertEqual(layer.get_config()['dtype'],\n {'class_name': 'Policy', 'config': {\n 'name': 'mixed_float16'}})\n\n @keras_parameterized.run_all_keras_modes\n @parameterized.named_parameters(\n {\n 'testcase_name': 'base',\n 'strategy_fn': default_strategy_fn\n }, {\n 'testcase_name': 'distribute',\n 'strategy_fn': create_mirrored_strategy,\n })\n def test_fixed_loss_scaling(self,\n strategy_fn):\n # Note: We do not test mixed precision in this method, only loss scaling.\n loss_scale = 8.\n batch_size = 4\n with strategy_fn().scope():\n x = layers.Input(shape=(1,), batch_size=batch_size)\n layer = mp_test_util.MultiplyLayer()\n y = layer(x)\n\n # The gradient of 'y' at this point is 1. With loss scaling, the gradient\n # is 'loss_scale'. We divide by the batch size since the loss is averaged\n # across batch elements.\n expected_gradient = loss_scale / batch_size\n identity_with_grad_check_fn = (\n mp_test_util.create_identity_with_grad_check_fn([expected_gradient]))\n y = core.Lambda(identity_with_grad_check_fn)(y)\n model = models.Model(inputs=x, outputs=y)\n\n def loss_fn(y_true, y_pred):\n del y_true\n return tf.reduce_mean(y_pred)\n\n opt = gradient_descent.SGD(1.)\n opt = loss_scale_optimizer.LossScaleOptimizer(opt, dynamic=False,\n initial_scale=loss_scale)\n model.compile(\n opt,\n loss=loss_fn,\n run_eagerly=testing_utils.should_run_eagerly())\n\n self.assertEqual(backend.eval(layer.v), 1)\n x = np.ones((batch_size, 1))\n y = np.ones((batch_size, 1))\n dataset = tf.data.Dataset.from_tensor_slices((x, y)).batch(batch_size)\n model.fit(dataset)\n # Variable starts at 1, and should have gradient of 1 subtracted from it.\n expected = 0\n self.assertEqual(backend.eval(layer.v), expected)\n\n @keras_parameterized.run_all_keras_modes\n @parameterized.named_parameters(\n {\n 'testcase_name': 'base',\n 'strategy_fn': default_strategy_fn\n }, {\n 'testcase_name': 'distribute',\n 'strategy_fn': create_mirrored_strategy,\n }, {\n 'testcase_name': 'loss_scaling',\n 'strategy_fn': create_mirrored_strategy,\n 'use_loss_scaling': True\n })\n def test_advanced_model(self, strategy_fn, use_loss_scaling=False):\n # The advanced model tests mixed-precision-related features that would occur\n # in a resnet50 model. It tests a model that has:\n # * Multiple layers, some which use auto-cast variables and some which do\n # not\n # * Regularization on some variables and not others.\n # * A fixed loss scale (if use_loss_scaling is True)\n\n strategy = strategy_fn()\n if use_loss_scaling:\n loss_scale = 8.\n learning_rate = 2**-14\n\n with strategy.scope():\n with policy.policy_scope(policy.Policy('mixed_float16')):\n x = layers.Input(shape=(1,), batch_size=2)\n layer1 = mp_test_util.MultiplyLayer(\n assert_type=tf.float16,\n regularizer=mp_test_util.IdentityRegularizer(),\n use_operator=True)\n layer2 = mp_test_util.MultiplyLayerWithoutAutoCast(\n assert_type=tf.float16, use_operator=True)\n layer3 = mp_test_util.MultiplyLayer(assert_type=tf.float16,\n use_operator=False)\n layer4 = mp_test_util.MultiplyLayerWithoutAutoCast(\n assert_type=tf.float16,\n regularizer=mp_test_util.IdentityRegularizer(),\n use_operator=False)\n y = layer1(x)\n y = layer2(y)\n y = layer3(y)\n y = layer4(y)\n if use_loss_scaling:\n # The gradient of 'y' at this point is 1. With loss scaling, the\n # gradient is 'loss_scale'. We divide by the batch size of 2 since the\n # loss is averaged across batch elements.\n expected_gradient = loss_scale / 2\n identity_with_grad_check_fn = (\n mp_test_util.create_identity_with_grad_check_fn(\n expected_dtype=tf.float16,\n expected_gradient=[expected_gradient]))\n y = core.Lambda(identity_with_grad_check_fn)(y)\n model = models.Model(inputs=x, outputs=y)\n\n def loss_fn(y_true, y_pred):\n del y_true\n return tf.reduce_mean(y_pred)\n\n opt = gradient_descent.SGD(learning_rate)\n if use_loss_scaling:\n opt = loss_scale_optimizer.LossScaleOptimizer(\n opt, dynamic=False, initial_scale=loss_scale)\n model.compile(\n opt,\n loss=loss_fn,\n run_eagerly=testing_utils.should_run_eagerly())\n\n x = np.ones((2, 1))\n y = np.ones((2, 1))\n dataset = tf.data.Dataset.from_tensor_slices((x, y)).batch(2)\n model.fit(dataset)\n for layer in (layer1, layer2, layer3, layer4):\n if layer.losses:\n # Layer has weight regularizer\n self.assertEqual(backend.eval(layer.v), 1 - 2 * learning_rate)\n else:\n # Layer does not have weight regularizer\n self.assertEqual(backend.eval(layer.v), 1 - learning_rate)\n\n @keras_parameterized.run_all_keras_modes(always_skip_v1=True)\n @parameterized.named_parameters(\n {\n 'testcase_name': 'base',\n 'strategy_fn': default_strategy_fn\n }, {\n 'testcase_name': 'distribute',\n 'strategy_fn': create_mirrored_strategy,\n }, {\n 'testcase_name': 'pass_loss_scale_to_policy',\n 'strategy_fn': create_mirrored_strategy,\n 'pass_loss_scale_to_policy': True,\n }, {\n 'testcase_name': 'get_config',\n 'strategy_fn': create_mirrored_strategy,\n 'get_config': True,\n }, {\n 'testcase_name': 'get_config_v1_lso',\n 'strategy_fn': create_mirrored_strategy,\n 'get_config': True,\n 'use_v1_loss_scale_optimizer': True,\n }, {\n 'testcase_name': 'get_config_and_pass_loss_scale_to_policy',\n 'strategy_fn': create_mirrored_strategy,\n 'get_config': True,\n 'pass_loss_scale_to_policy': True,\n })\n def test_dynamic_loss_scaling(self,\n strategy_fn,\n pass_loss_scale_to_policy=False,\n get_config=False,\n use_v1_loss_scale_optimizer=False):\n strategy = strategy_fn()\n initial_loss_scale = 2.\n batch_size = 4\n expected_gradient = backend.variable([initial_loss_scale / batch_size],\n dtype=tf.float16)\n # If this variable is set to True, the model below will have NaN gradients\n have_nan_gradients = backend.variable(False, dtype=tf.bool)\n with strategy.scope():\n opt = gradient_descent.SGD(1.)\n if pass_loss_scale_to_policy:\n loss_scale = tf.mixed_precision.experimental.DynamicLossScale(\n initial_loss_scale=initial_loss_scale, increment_period=2)\n p = policy.PolicyV1('mixed_float16', loss_scale=loss_scale)\n elif use_v1_loss_scale_optimizer:\n loss_scale = tf.mixed_precision.experimental.DynamicLossScale(\n initial_loss_scale=initial_loss_scale, increment_period=2)\n p = policy.Policy('mixed_float16')\n opt = loss_scale_optimizer.LossScaleOptimizerV1(\n opt, loss_scale)\n else:\n p = policy.Policy('mixed_float16')\n opt = loss_scale_optimizer.LossScaleOptimizer(\n opt, initial_scale=initial_loss_scale, dynamic_growth_steps=2)\n with policy.policy_scope(p):\n x = layers.Input(\n shape=(1,), batch_size=batch_size, dtype=tf.float16)\n layer = mp_test_util.MultiplyLayer(assert_type=tf.float16)\n y = layer(x)\n identity_with_nan_grads = (\n mp_test_util.create_identity_with_nan_gradients_fn(\n have_nan_gradients))\n y = core.Lambda(identity_with_nan_grads)(y)\n identity_with_grad_check_fn = (\n mp_test_util.create_identity_with_grad_check_fn(\n expected_dtype=tf.float16,\n expected_gradient=expected_gradient))\n y = core.Lambda(identity_with_grad_check_fn)(y)\n model = models.Model(inputs=x, outputs=y)\n if get_config:\n config = model.get_config()\n model = model.__class__.from_config(\n config,\n custom_objects={'MultiplyLayer': mp_test_util.MultiplyLayer})\n (layer,) = (layer for layer in model.layers\n if isinstance(layer, mp_test_util.MultiplyLayer))\n\n def loss_fn(y_true, y_pred):\n del y_true\n return tf.reduce_mean(y_pred)\n\n model.compile(\n opt,\n loss=loss_fn,\n run_eagerly=testing_utils.should_run_eagerly())\n\n self.assertEqual(backend.eval(layer.v), 1)\n x = np.ones((batch_size, 1))\n y = np.ones((batch_size, 1))\n dataset = tf.data.Dataset.from_tensor_slices((x, y)).batch(batch_size)\n model.fit(dataset)\n # The variables starts with 1 and has a gradient of 1, so will go down by 1\n # each step.\n self.assertEqual(backend.eval(layer.v), 0)\n\n model.fit(dataset)\n self.assertEqual(backend.eval(layer.v), -1)\n\n # There have been two steps without NaNs, so the loss scale will double\n backend.set_value(expected_gradient,\n backend.get_value(expected_gradient * 2))\n model.fit(dataset)\n self.assertEqual(backend.eval(layer.v), -2)\n\n # Next test with NaN gradients.\n backend.set_value(have_nan_gradients, True)\n model.fit(dataset)\n # Variable should not be updated\n self.assertEqual(backend.eval(layer.v), -2)\n\n # Test with finite gradients again\n backend.set_value(have_nan_gradients, False)\n # The loss scale will be halved due to the NaNs, so the gradient will also\n # be halved\n backend.set_value(expected_gradient,\n backend.get_value(expected_gradient / 2))\n model.fit(dataset)\n self.assertEqual(backend.eval(layer.v), -3)\n\n @combinations.generate(combinations.combine(mode=['graph', 'eager']))\n def test_loss_scale_optimizer_overrides_policy_v1_loss_scale(self):\n with policy.policy_scope(policy.PolicyV1('float32', loss_scale=10.)):\n opt = gradient_descent.SGD(1.)\n opt = loss_scale_optimizer.LossScaleOptimizer(opt, dynamic=False,\n initial_scale=5.)\n x = layers.Input(shape=(1,))\n y = mp_test_util.MultiplyLayer()(x)\n model = models.Model(x, y)\n model.compile(opt, loss='mse')\n self.assertEqual(self.evaluate(model.optimizer.loss_scale), 5.)\n\n @combinations.generate(combinations.combine(mode=['graph', 'eager']))\n def test_policy_v1_without_loss_scale(self):\n with policy.policy_scope(policy.PolicyV1('mixed_float16',\n loss_scale=None)):\n opt = gradient_descent.SGD(1.)\n x = layers.Input(shape=(1,))\n y = mp_test_util.MultiplyLayer()(x)\n model = models.Model(x, y)\n model.compile(opt, loss='mse')\n self.assertNotIsInstance(model.optimizer,\n loss_scale_optimizer.LossScaleOptimizer)\n\n @combinations.generate(combinations.combine(mode=['graph', 'eager']))\n def test_pass_invalid_optimizer_with_loss_scaling(self):\n with policy.policy_scope(policy.PolicyV1('float32', loss_scale=10.)):\n x = layers.Input(shape=(1,))\n y = mp_test_util.MultiplyLayer()(x)\n model = models.Model(x, y)\n if tf.executing_eagerly():\n error_msg = 'Use a `tf.keras` Optimizer instead'\n else:\n error_msg = 'optimizer\" must be an instance of '\n with self.assertRaisesRegex(ValueError, error_msg):\n model.compile(optimizer_v1.SGD(1.), 'mse')\n\n @combinations.generate(combinations.combine(mode=['graph', 'eager']))\n def test_functional_model_loss_dtype(self):\n with policy.policy_scope('float16'):\n x = layers.Input(shape=(1,))\n y = mp_test_util.MultiplyLayer()(x)\n model = models.Model(x, y)\n model.add_loss(tf.cast(y, 'float32'))\n # The loss should not be casted to the policy's dtype.\n self.assertEqual(model.losses[0].dtype, 'float32')\n\n @keras_parameterized.run_all_keras_modes\n @parameterized.named_parameters(\n {\n 'testcase_name': 'base',\n 'strategy_fn': default_strategy_fn,\n }, {\n 'testcase_name': 'distribute',\n 'strategy_fn': create_mirrored_strategy,\n }, {\n 'testcase_name': 'base_h5',\n 'strategy_fn': default_strategy_fn,\n 'h5': True,\n }, {\n 'testcase_name': 'distribute_h5',\n 'strategy_fn': create_mirrored_strategy,\n 'h5': True,\n })\n def test_save_weights_with_autocast_vars(self, strategy_fn, h5=False):\n with strategy_fn().scope():\n with policy.policy_scope('mixed_float16'):\n x = layers.Input(shape=(1,), batch_size=2)\n layer = mp_test_util.MultiplyLayer(assert_type=tf.float16)\n y = layer(x)\n model = models.Model(inputs=x, outputs=y)\n\n model.set_weights([np.array(100.)])\n x = np.ones((2, 1))\n self.assertAllClose(backend.get_value(model(x)), x * 100.)\n suffix = '.h5' if h5 else ''\n weights_file = os.path.join(self.get_temp_dir(), 'weights' + suffix)\n model.save_weights(weights_file)\n\n model.set_weights([np.array(200.)])\n self.assertAllClose(backend.get_value(model(x)), x * 200.)\n model.load_weights(weights_file)\n self.assertAllClose(backend.get_value(model(x)), x * 100.)\n self.assertEqual(model.get_weights(), [np.array(100.)])\n\n @keras_parameterized.run_all_keras_modes\n @parameterized.named_parameters(\n {\n 'testcase_name': 'base',\n 'strategy_fn': default_strategy_fn,\n }, {\n 'testcase_name': 'distribute',\n 'strategy_fn': create_mirrored_strategy,\n }, {\n 'testcase_name': 'different_var_name',\n 'strategy_fn': default_strategy_fn,\n 'var_name': 'w'\n }, {\n 'testcase_name': 'different_var_name_distribute',\n 'strategy_fn': create_mirrored_strategy,\n 'var_name': 'w'\n })\n def test_save_slot_variables_with_autocast_vars(self,\n strategy_fn,\n var_name='v'):\n p = policy.Policy('mixed_float16')\n with strategy_fn().scope(), policy.policy_scope(p):\n x = layers.Input(shape=(2,), batch_size=2)\n # Having a var_name other than 'v' tests that a fixed bug (b/134713714)\n # does not reoccur. The bug was that a crash would occur when saving a\n # checkpoint where an AutoCastVariable with a slot variable would have a\n # different name than the layer attribute's name (layer.v in this case).\n layer = mp_test_util.MultiplyLayer(assert_type=tf.float16,\n var_name=var_name)\n y = layer(x)\n model = models.Model(inputs=x, outputs=y)\n opt = gradient_descent.SGD(1., 1.)\n opt = loss_scale_optimizer.LossScaleOptimizer(opt, dynamic=False,\n initial_scale=1)\n model.compile(\n optimizer=opt,\n loss='mse',\n run_eagerly=testing_utils.should_run_eagerly())\n\n model.fit(np.ones((2, 2)), np.zeros((2, 2)), batch_size=2)\n weights_file = os.path.join(self.get_temp_dir(), 'weights')\n model.save_weights(weights_file)\n saved_slot = backend.get_value(opt.get_slot(layer.v, 'momentum'))\n\n model.fit(np.ones((2, 2)), np.zeros((2, 2)), batch_size=2)\n new_slot = backend.get_value(opt.get_slot(layer.v, 'momentum'))\n self.assertNotEqual(new_slot, saved_slot)\n\n model.load_weights(weights_file)\n restored_slot = backend.get_value(opt.get_slot(layer.v, 'momentum'))\n self.assertEqual(restored_slot, saved_slot)\n\n @keras_parameterized.run_all_keras_modes\n @parameterized.named_parameters(*TESTCASES)\n def test_save_weights_with_dynamic_loss_scaling(self, strategy_fn):\n strategy = strategy_fn()\n if (isinstance(strategy, tf.distribute.MirroredStrategy) and\n not tf.executing_eagerly()):\n # TODO(b/121381184): Enable running the test in this case.\n return\n\n # Create and run model.\n with strategy.scope():\n x = layers.Input(shape=(2,), batch_size=2, dtype=tf.float32)\n y = mp_test_util.MultiplyLayer(assert_type=tf.float32)(x)\n model = models.Model(inputs=x, outputs=y)\n\n opt = gradient_descent.SGD(1.)\n opt = loss_scale_optimizer.LossScaleOptimizer(\n opt, initial_scale=1., dynamic_growth_steps=2.)\n model.compile(\n optimizer=opt,\n loss='mse',\n run_eagerly=testing_utils.should_run_eagerly())\n # Run for 3 steps (6 examples with a batch size of 2)\n model.fit(np.zeros((6, 2)), np.zeros((6, 2)), batch_size=2)\n self.assertEqual(backend.get_value(opt.loss_scale), 2)\n self.assertEqual(backend.get_value(opt.dynamic_counter), 1)\n\n # Save model weights.\n save_prefix = os.path.join(self.get_temp_dir(), 'ckpt')\n model.save_weights(save_prefix)\n\n # Run model again for 1 step (2 examples with a batch size of 2)\n model.fit(np.zeros((2, 2)), np.zeros((2, 2)), batch_size=2)\n self.assertEqual(backend.get_value(opt.loss_scale), 4)\n self.assertEqual(backend.get_value(opt.dynamic_counter), 0)\n\n # Load model weights and ensure loss scale weights are restored.\n model.load_weights(save_prefix)\n self.assertEqual(backend.get_value(opt.loss_scale), 2)\n self.assertEqual(backend.get_value(opt.dynamic_counter), 1)\n\n @keras_parameterized.run_all_keras_modes\n def test_restore_old_loss_scale_checkpoint(self):\n # Ensure a checkpoint from TF 2.2 can be loaded. The checkpoint format\n # of LossScaleOptimizer changed, but old checkpoints can still be loaded\n opt = gradient_descent.SGD(0.1, momentum=0.1)\n opt = loss_scale_optimizer.LossScaleOptimizer(opt)\n model = sequential.Sequential([core.Dense(2,)])\n\n # The checkpoint and expected values were obtained from the program in\n # testdata/BUILD.\n ckpt_dir = os.path.join(\n flags.FLAGS['test_srcdir'].value,\n 'org_keras/keras',\n 'mixed_precision/testdata/lso_ckpt_tf2.2')\n # ckpt_dir = test.test_src_dir_path(\n # 'python/keras/mixed_precision/testdata/lso_ckpt_tf2.2')\n model.load_weights(os.path.join(ckpt_dir, 'ckpt'))\n model.compile(opt, 'mse', run_eagerly=testing_utils.should_run_eagerly())\n model(np.zeros((2, 2))) # Create model weights\n opt._create_all_weights(model.weights)\n expected_kernel = np.array([[9.229685, 10.901115], [10.370763, 9.757362]])\n expected_slot = np.array([[10.049943, 9.917691], [10.049943, 9.917691]])\n self.assertAllClose(self.evaluate(model.weights[0]), expected_kernel)\n self.assertAllClose(\n self.evaluate(opt.get_slot(model.weights[0], 'momentum')),\n expected_slot)\n self.assertEqual(self.evaluate(opt.loss_scale), 32768)\n self.assertEqual(self.evaluate(opt.dynamic_counter), 1)\n\n # Check restoring works even after the model is compiled and the weights\n # have been created.\n model.fit(np.random.normal(size=(2, 2)), np.random.normal(size=(2, 2)))\n self.assertNotAllClose(self.evaluate(model.weights[0]), expected_kernel)\n self.assertNotAllClose(\n self.evaluate(opt.get_slot(model.weights[0], 'momentum')),\n expected_slot)\n model.load_weights(os.path.join(ckpt_dir, 'ckpt'))\n self.assertAllClose(self.evaluate(model.weights[0]), expected_kernel)\n self.assertAllClose(\n self.evaluate(opt.get_slot(model.weights[0], 'momentum')),\n expected_slot)\n self.assertEqual(self.evaluate(opt.loss_scale), 32768)\n self.assertEqual(self.evaluate(opt.dynamic_counter), 1)\n\n def test_restore_old_saved_model(self):\n saved_model_dir = os.path.join(\n flags.FLAGS['test_srcdir'].value,\n 'org_keras/keras',\n 'mixed_precision/testdata/lso_savedmodel_tf2.2')\n # saved_model_dir = test.test_src_dir_path(\n # 'python/keras/mixed_precision/testdata/'\n # 'lso_savedmodel_tf2.2')\n model = save.load_model(saved_model_dir)\n expected_kernel = np.array([[9.229685, 10.901115], [10.370763, 9.757362]])\n self.assertAllClose(backend.eval(model.weights[0]), expected_kernel)\n self.assertEqual(type(model.optimizer),\n loss_scale_optimizer.LossScaleOptimizer)\n\n @keras_parameterized.run_all_keras_modes\n @parameterized.named_parameters(\n {\n 'testcase_name': 'base',\n 'strategy_fn': default_strategy_fn,\n }, {\n 'testcase_name': 'distribute',\n 'strategy_fn': create_mirrored_strategy,\n }, {\n 'testcase_name': 'use_v1_lso',\n 'strategy_fn': create_mirrored_strategy,\n 'use_v1_loss_scale_optimizer': True\n }, {\n 'testcase_name': 'base_h5',\n 'strategy_fn': default_strategy_fn,\n 'h5': True,\n }, {\n 'testcase_name': 'distribute_h5',\n 'strategy_fn': create_mirrored_strategy,\n 'h5': True,\n })\n def test_save_model_with_dynamic_loss_scaling(\n self, strategy_fn, h5=False, use_v1_loss_scale_optimizer=False):\n # TODO(reedwm): Support and test saving model with a mixed_[b]float16 policy\n # as well.\n strategy = strategy_fn()\n if (isinstance(strategy, tf.distribute.MirroredStrategy) and\n not tf.executing_eagerly()):\n # TODO(b/121381184): Enable running the test in this case.\n return\n\n # Create and run model.\n with strategy.scope():\n x = layers.Input(shape=(2,), batch_size=2, dtype=tf.float32)\n y = mp_test_util.MultiplyLayer()(x)\n model = models.Model(inputs=x, outputs=y)\n\n opt = gradient_descent.SGD(1.)\n if use_v1_loss_scale_optimizer:\n loss_scale = tf.mixed_precision.experimental.DynamicLossScale(\n initial_loss_scale=1., increment_period=2.)\n opt = loss_scale_optimizer.LossScaleOptimizerV1(opt, loss_scale)\n else:\n opt = loss_scale_optimizer.LossScaleOptimizer(opt, initial_scale=1.,\n dynamic_growth_steps=2.)\n model.compile(\n optimizer=opt,\n loss='mse',\n run_eagerly=testing_utils.should_run_eagerly())\n # Run for 3 steps (6 examples with a batch size of 2)\n model.fit(np.ones((6, 2)), np.zeros((6, 2)), batch_size=2)\n self.assertEqual(backend.get_value(opt.loss_scale), 2)\n self.assertEqual(backend.get_value(opt.dynamic_counter), 1)\n (weight,) = model.trainable_weights\n orig_weight = backend.get_value(weight)\n\n # Save model weights.\n save_path = os.path.join(self.get_temp_dir(), 'model')\n model.save(save_path, save_format='h5' if h5 else 'tf')\n\n # Run model again for 1 step (2 examples with a batch size of 2)\n model.fit(np.ones((2, 2)), np.zeros((2, 2)), batch_size=2)\n new_weight = backend.get_value(weight)\n self.assertNotEqual(new_weight, orig_weight)\n self.assertEqual(backend.get_value(opt.loss_scale), 4)\n self.assertEqual(backend.get_value(opt.dynamic_counter), 0)\n\n # Load model weights and ensure loss scale weights are restored.\n model = save.load_model(\n save_path, custom_objects={'MultiplyLayer': mp_test_util.MultiplyLayer})\n (weight,) = model.trainable_weights\n loaded_weight = backend.get_value(weight)\n self.assertEqual(loaded_weight, orig_weight)\n # Currently the loss scale isn't always saved when the model is saved with\n # Model.save(). So we assert the loss scale either has the value when it was\n # saved, or the value it was initialized with.\n # TODO(reedwm): Always save/restore the loss scale with Model.save().\n self.assertIn(backend.get_value(model.optimizer.loss_scale), (1, 2))\n self.assertIn(backend.get_value(model.optimizer.dynamic_counter), (0, 1))\n\n # Test optimizer attributes and type\n self.assertEqual(model.optimizer.initial_scale, 1.)\n self.assertEqual(model.optimizer.dynamic_growth_steps, 2.)\n self.assertEqual(type(model.optimizer),\n loss_scale_optimizer.LossScaleOptimizer)\n\n\nclass ApplicationModelTest(keras_parameterized.TestCase):\n \"\"\"Tests that application models can be built with mixed precision.\n\n This does not test that such models can be trained in mixed precision, as\n doing so takes too much time for a unit test.\n \"\"\"\n\n @parameterized.named_parameters(\n ('densenet', densenet.DenseNet121),\n ('efficientnet', efficientnet.EfficientNetB0),\n ('inception_resnet_v2', inception_resnet_v2.InceptionResNetV2),\n ('inception_v3', inception_v3.InceptionV3),\n ('mobilenet', mobilenet.MobileNet),\n ('nasnet', nasnet.NASNetMobile),\n ('vgg16', vgg16.VGG16),\n ('xception', xception.Xception),\n ('resnet50', resnet.ResNet50),\n )\n def test_application_model(self, app):\n # Run on CPU since model weights may exhaust GPU memory\n with policy.policy_scope('mixed_float16'), tf.device('/CPU:0'):\n app(weights=None)\n\n\nif __name__ == '__main__':\n base_layer_utils.enable_v2_dtype_behavior()\n tf.test.main()\n", "# Copyright 2019 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"InputSpec tests.\"\"\"\n# pylint: disable=g-bad-import-order\n\nimport tensorflow.compat.v2 as tf\n\nfrom absl.testing import parameterized\nfrom keras import keras_parameterized\nfrom keras import layers\nfrom keras import testing_utils\nfrom keras.engine import keras_tensor\nfrom keras.engine import training\n\n\nclass CustomTypeSpec(tf.TypeSpec):\n \"\"\"Stubbed-out custom type spec, for testing.\"\"\"\n\n def __init__(self, shape, dtype):\n self.shape = tf.TensorShape(shape)\n self.dtype = tf.dtypes.as_dtype(dtype)\n\n # Stub implementations for all the TypeSpec methods:\n value_type = None\n _to_components = lambda self, value: None\n _from_components = lambda self, components: None\n _component_specs = property(lambda self: None)\n _serialize = lambda self: (self.shape, self.dtype)\n\n\nclass CustomTypeSpec2(CustomTypeSpec):\n \"\"\"Adds a with_shape method to CustomTypeSpec.\"\"\"\n\n def with_shape(self, new_shape):\n return CustomTypeSpec2(new_shape, self.dtype)\n\n\n@testing_utils.run_v2_only\nclass KerasTensorTest(keras_parameterized.TestCase):\n\n def test_repr_and_string(self):\n kt = keras_tensor.KerasTensor(\n type_spec=tf.TensorSpec(shape=(1, 2, 3), dtype=tf.float32))\n expected_str = (\"KerasTensor(type_spec=TensorSpec(shape=(1, 2, 3), \"\n \"dtype=tf.float32, name=None))\")\n expected_repr = \"<KerasTensor: shape=(1, 2, 3) dtype=float32>\"\n self.assertEqual(expected_str, str(kt))\n self.assertEqual(expected_repr, repr(kt))\n\n kt = keras_tensor.KerasTensor(\n type_spec=tf.TensorSpec(shape=(2,), dtype=tf.int32),\n inferred_value=[2, 3])\n expected_str = (\"KerasTensor(type_spec=TensorSpec(shape=(2,), \"\n \"dtype=tf.int32, name=None), inferred_value=[2, 3])\")\n expected_repr = (\n \"<KerasTensor: shape=(2,) dtype=int32 inferred_value=[2, 3]>\")\n self.assertEqual(expected_str, str(kt))\n self.assertEqual(expected_repr, repr(kt))\n\n kt = keras_tensor.KerasTensor(\n type_spec=tf.SparseTensorSpec(\n shape=(1, 2, 3), dtype=tf.float32))\n expected_str = (\"KerasTensor(type_spec=SparseTensorSpec(\"\n \"TensorShape([1, 2, 3]), tf.float32))\")\n expected_repr = (\n \"<KerasTensor: type_spec=SparseTensorSpec(\"\n \"TensorShape([1, 2, 3]), tf.float32)>\")\n self.assertEqual(expected_str, str(kt))\n self.assertEqual(expected_repr, repr(kt))\n\n inp = layers.Input(shape=(3, 5))\n kt = layers.Dense(10)(inp)\n expected_str = (\n \"KerasTensor(type_spec=TensorSpec(shape=(None, 3, 10), \"\n \"dtype=tf.float32, name=None), name='dense/BiasAdd:0', \"\n \"description=\\\"created by layer 'dense'\\\")\")\n expected_repr = (\n \"<KerasTensor: shape=(None, 3, 10) dtype=float32 (created \"\n \"by layer 'dense')>\")\n self.assertEqual(expected_str, str(kt))\n self.assertEqual(expected_repr, repr(kt))\n\n kt = tf.reshape(kt, shape=(3, 5, 2))\n expected_str = (\n \"KerasTensor(type_spec=TensorSpec(shape=(3, 5, 2), dtype=tf.float32, \"\n \"name=None), name='tf.reshape/Reshape:0', description=\\\"created \"\n \"by layer 'tf.reshape'\\\")\")\n expected_repr = (\"<KerasTensor: shape=(3, 5, 2) dtype=float32 (created \"\n \"by layer 'tf.reshape')>\")\n self.assertEqual(expected_str, str(kt))\n self.assertEqual(expected_repr, repr(kt))\n\n kts = tf.unstack(kt)\n for i in range(3):\n expected_str = (\n \"KerasTensor(type_spec=TensorSpec(shape=(5, 2), dtype=tf.float32, \"\n \"name=None), name='tf.unstack/unstack:%s', description=\\\"created \"\n \"by layer 'tf.unstack'\\\")\" % (i,))\n expected_repr = (\"<KerasTensor: shape=(5, 2) dtype=float32 \"\n \"(created by layer 'tf.unstack')>\")\n self.assertEqual(expected_str, str(kts[i]))\n self.assertEqual(expected_repr, repr(kts[i]))\n\n @parameterized.parameters(\n {\"property_name\": \"values\"},\n {\"property_name\": \"indices\"},\n {\"property_name\": \"dense_shape\"},\n )\n def test_sparse_instance_property(self, property_name):\n inp = layers.Input(shape=[3], sparse=True)\n out = getattr(inp, property_name)\n model = training.Model(inp, out)\n\n x = tf.SparseTensor([[0, 0], [0, 1], [1, 1], [1, 2]], [1, 2, 3, 4], [2, 3])\n expected_property = getattr(x, property_name)\n self.assertAllEqual(model(x), expected_property)\n\n # Test that it works with serialization and deserialization as well\n model_config = model.get_config()\n model2 = training.Model.from_config(model_config)\n self.assertAllEqual(model2(x), expected_property)\n\n @parameterized.parameters([\n (tf.TensorSpec([2, 3], tf.int32), [2, 3]),\n (tf.RaggedTensorSpec([2, None]), [2, None]),\n (tf.SparseTensorSpec([8]), [8]),\n (CustomTypeSpec([3, 8], tf.int32), [3, 8]),\n ])\n def test_shape(self, spec, expected_shape):\n kt = keras_tensor.KerasTensor(spec)\n self.assertEqual(kt.shape.as_list(), expected_shape)\n\n @parameterized.parameters([\n (tf.TensorSpec([8, 3], tf.int32), [8, 3], [8, 3]),\n (tf.TensorSpec([None, 3], tf.int32), [8, 3], [8, 3]),\n (tf.TensorSpec([8, 3], tf.int32), [None, 3], [8, 3]),\n (tf.TensorSpec(None, tf.int32), [8, 3], [8, 3]),\n (tf.TensorSpec(None, tf.int32), [8, None], [8, None]),\n (tf.TensorSpec(None, tf.int32), None, None),\n (tf.RaggedTensorSpec([2, None, None]), [2, None, 5], [2, None, 5]),\n (tf.SparseTensorSpec([8]), [8], [8]),\n (CustomTypeSpec2([3, None], tf.int32), [3, 8], [3, 8]),\n ])\n def test_set_shape(self, spec, new_shape, expected_shape):\n kt = keras_tensor.KerasTensor(spec)\n kt.set_shape(new_shape)\n if expected_shape is None:\n self.assertIsNone(kt.type_spec.shape.rank)\n else:\n self.assertEqual(kt.type_spec.shape.as_list(), expected_shape)\n self.assertTrue(kt.type_spec.is_compatible_with(spec))\n\n def test_set_shape_error(self):\n spec = CustomTypeSpec([3, None], tf.int32)\n kt = keras_tensor.KerasTensor(spec)\n with self.assertRaisesRegex(\n ValueError, \"Keras requires TypeSpec to have a `with_shape` method\"):\n kt.set_shape([3, 3])\n\n def test_set_shape_equals_expected_shape(self):\n # Tests b/203201161: DenseSpec has both a _shape and a _shape_tuple field,\n # and we need to be sure both get updated.\n kt = keras_tensor.KerasTensor(tf.TensorSpec([8, None], tf.int32))\n kt.set_shape([8, 3])\n self.assertEqual(kt.type_spec, tf.TensorSpec([8, 3], tf.int32))\n\n def test_type_spec_with_shape_equals_expected_shape(self):\n # Tests b/203201161: DenseSpec has both a _shape and a _shape_tuple field,\n # and we need to be sure both get updated.\n spec1 = tf.TensorSpec([8, None], tf.int32)\n spec2 = keras_tensor.type_spec_with_shape(spec1, [8, 3])\n expected = tf.TensorSpec([8, 3], tf.int32)\n self.assertEqual(spec2, expected)\n\n def test_missing_shape_error(self):\n spec = CustomTypeSpec(None, tf.int32)\n del spec.shape\n with self.assertRaisesRegex(\n ValueError,\n \"KerasTensor only supports TypeSpecs that have a shape field; .*\"):\n keras_tensor.KerasTensor(spec)\n\n def test_wrong_shape_type_error(self):\n spec = CustomTypeSpec(None, tf.int32)\n spec.shape = \"foo\"\n with self.assertRaisesRegex(\n TypeError, \"KerasTensor requires that wrapped TypeSpec's shape is a \"\n \"TensorShape; .*\"):\n keras_tensor.KerasTensor(spec)\n\n def test_missing_dtype_error(self):\n spec = CustomTypeSpec(None, tf.int32)\n del spec.dtype\n kt = keras_tensor.KerasTensor(spec)\n with self.assertRaisesRegex(\n AttributeError,\n \"KerasTensor wraps TypeSpec .* which does not have a dtype.\"):\n kt.dtype # pylint: disable=pointless-statement\n\n def test_wrong_dtype_type_error(self):\n spec = CustomTypeSpec(None, tf.int32)\n spec.dtype = \"foo\"\n kt = keras_tensor.KerasTensor(spec)\n with self.assertRaisesRegex(\n TypeError,\n \"KerasTensor requires that wrapped TypeSpec's dtype is a DType; .*\"):\n kt.dtype # pylint: disable=pointless-statement\n\n\nif __name__ == \"__main__\":\n tf.test.main()\n", "# Copyright 2015 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Layer serialization/deserialization functions.\"\"\"\n\nimport tensorflow.compat.v2 as tf\n# pylint: disable=g-bad-import-order,g-direct-tensorflow-import,unused-import,wildcard-import\n\nimport threading\nfrom keras.engine import base_layer\nfrom keras.engine import input_layer\nfrom keras.engine import input_spec\nfrom keras.layers import activation\nfrom keras.layers import attention\nfrom keras.layers import convolutional\nfrom keras.layers import convolutional_recurrent\nfrom keras.layers import core\nfrom keras.layers import cudnn_recurrent\nfrom keras.layers import einsum_dense\nfrom keras.layers import embeddings\nfrom keras.layers import locally_connected\nfrom keras.layers import merging\nfrom keras.layers import noise\nfrom keras.layers import pooling\nfrom keras.layers import recurrent\nfrom keras.layers import recurrent_v2\nfrom keras.layers import regularization\nfrom keras.layers import reshaping\nfrom keras.layers import rnn_cell_wrapper_v2\nfrom keras.layers import wrappers\nfrom keras.layers.normalization import batch_normalization\nfrom keras.layers.normalization import batch_normalization_v1\nfrom keras.layers.normalization import layer_normalization\nfrom keras.layers.normalization import unit_normalization\nfrom keras.layers.preprocessing import category_encoding\nfrom keras.layers.preprocessing import discretization\nfrom keras.layers.preprocessing import hashing\nfrom keras.layers.preprocessing import hashed_crossing\nfrom keras.layers.preprocessing import image_preprocessing\nfrom keras.layers.preprocessing import integer_lookup\nfrom keras.layers.preprocessing import normalization as preprocessing_normalization\nfrom keras.layers.preprocessing import string_lookup\nfrom keras.layers.preprocessing import text_vectorization\nfrom keras.saving.saved_model import json_utils\nfrom keras.utils import generic_utils\nfrom keras.utils import tf_inspect as inspect\nfrom tensorflow.python.util.tf_export import keras_export\n\nALL_MODULES = (base_layer, input_layer, activation, attention, convolutional,\n convolutional_recurrent, core, cudnn_recurrent, embeddings,\n einsum_dense, locally_connected, merging, batch_normalization_v1,\n layer_normalization, unit_normalization, pooling,\n image_preprocessing, recurrent, regularization, reshaping,\n wrappers, hashing, hashed_crossing, category_encoding,\n discretization, integer_lookup, preprocessing_normalization,\n string_lookup, text_vectorization)\nALL_V2_MODULES = (rnn_cell_wrapper_v2, batch_normalization, layer_normalization,\n recurrent_v2)\n# ALL_OBJECTS is meant to be a global mutable. Hence we need to make it\n# thread-local to avoid concurrent mutations.\nLOCAL = threading.local()\n\n\ndef populate_deserializable_objects():\n \"\"\"Populates dict ALL_OBJECTS with every built-in layer.\"\"\"\n global LOCAL\n if not hasattr(LOCAL, 'ALL_OBJECTS'):\n LOCAL.ALL_OBJECTS = {}\n LOCAL.GENERATED_WITH_V2 = None\n\n if LOCAL.ALL_OBJECTS and LOCAL.GENERATED_WITH_V2 == tf.__internal__.tf2.enabled(\n ):\n # Objects dict is already generated for the proper TF version:\n # do nothing.\n return\n\n LOCAL.ALL_OBJECTS = {}\n LOCAL.GENERATED_WITH_V2 = tf.__internal__.tf2.enabled()\n\n base_cls = base_layer.Layer\n generic_utils.populate_dict_with_module_objects(\n LOCAL.ALL_OBJECTS,\n ALL_MODULES,\n obj_filter=lambda x: inspect.isclass(x) and issubclass(x, base_cls))\n\n # Overwrite certain V1 objects with V2 versions\n if tf.__internal__.tf2.enabled():\n generic_utils.populate_dict_with_module_objects(\n LOCAL.ALL_OBJECTS,\n ALL_V2_MODULES,\n obj_filter=lambda x: inspect.isclass(x) and issubclass(x, base_cls))\n\n # These deserialization aliases are added for backward compatibility,\n # as in TF 1.13, \"BatchNormalizationV1\" and \"BatchNormalizationV2\"\n # were used as class name for v1 and v2 version of BatchNormalization,\n # respectively. Here we explicitly convert them to their canonical names.\n LOCAL.ALL_OBJECTS[\n 'BatchNormalizationV1'] = batch_normalization_v1.BatchNormalization\n LOCAL.ALL_OBJECTS[\n 'BatchNormalizationV2'] = batch_normalization.BatchNormalization\n\n # Prevent circular dependencies.\n from keras import models # pylint: disable=g-import-not-at-top\n from keras.premade_models.linear import LinearModel # pylint: disable=g-import-not-at-top\n from keras.premade_models.wide_deep import WideDeepModel # pylint: disable=g-import-not-at-top\n from keras.feature_column.sequence_feature_column import SequenceFeatures # pylint: disable=g-import-not-at-top\n\n LOCAL.ALL_OBJECTS['Input'] = input_layer.Input\n LOCAL.ALL_OBJECTS['InputSpec'] = input_spec.InputSpec\n LOCAL.ALL_OBJECTS['Functional'] = models.Functional\n LOCAL.ALL_OBJECTS['Model'] = models.Model\n LOCAL.ALL_OBJECTS['SequenceFeatures'] = SequenceFeatures\n LOCAL.ALL_OBJECTS['Sequential'] = models.Sequential\n LOCAL.ALL_OBJECTS['LinearModel'] = LinearModel\n LOCAL.ALL_OBJECTS['WideDeepModel'] = WideDeepModel\n\n if tf.__internal__.tf2.enabled():\n from keras.feature_column.dense_features_v2 import DenseFeatures # pylint: disable=g-import-not-at-top\n LOCAL.ALL_OBJECTS['DenseFeatures'] = DenseFeatures\n else:\n from keras.feature_column.dense_features import DenseFeatures # pylint: disable=g-import-not-at-top\n LOCAL.ALL_OBJECTS['DenseFeatures'] = DenseFeatures\n\n # Merging layers, function versions.\n LOCAL.ALL_OBJECTS['add'] = merging.add\n LOCAL.ALL_OBJECTS['subtract'] = merging.subtract\n LOCAL.ALL_OBJECTS['multiply'] = merging.multiply\n LOCAL.ALL_OBJECTS['average'] = merging.average\n LOCAL.ALL_OBJECTS['maximum'] = merging.maximum\n LOCAL.ALL_OBJECTS['minimum'] = merging.minimum\n LOCAL.ALL_OBJECTS['concatenate'] = merging.concatenate\n LOCAL.ALL_OBJECTS['dot'] = merging.dot\n\n\n@keras_export('keras.layers.serialize')\ndef serialize(layer):\n \"\"\"Serializes a `Layer` object into a JSON-compatible representation.\n\n Args:\n layer: The `Layer` object to serialize.\n\n Returns:\n A JSON-serializable dict representing the object's config.\n\n Example:\n\n ```python\n from pprint import pprint\n model = tf.keras.models.Sequential()\n model.add(tf.keras.Input(shape=(16,)))\n model.add(tf.keras.layers.Dense(32, activation='relu'))\n\n pprint(tf.keras.layers.serialize(model))\n # prints the configuration of the model, as a dict.\n \"\"\"\n return generic_utils.serialize_keras_object(layer)\n\n\n@keras_export('keras.layers.deserialize')\ndef deserialize(config, custom_objects=None):\n \"\"\"Instantiates a layer from a config dictionary.\n\n Args:\n config: dict of the form {'class_name': str, 'config': dict}\n custom_objects: dict mapping class names (or function names) of custom\n (non-Keras) objects to class/functions\n\n Returns:\n Layer instance (may be Model, Sequential, Network, Layer...)\n\n Example:\n\n ```python\n # Configuration of Dense(32, activation='relu')\n config = {\n 'class_name': 'Dense',\n 'config': {\n 'activation': 'relu',\n 'activity_regularizer': None,\n 'bias_constraint': None,\n 'bias_initializer': {'class_name': 'Zeros', 'config': {}},\n 'bias_regularizer': None,\n 'dtype': 'float32',\n 'kernel_constraint': None,\n 'kernel_initializer': {'class_name': 'GlorotUniform',\n 'config': {'seed': None}},\n 'kernel_regularizer': None,\n 'name': 'dense',\n 'trainable': True,\n 'units': 32,\n 'use_bias': True\n }\n }\n dense_layer = tf.keras.layers.deserialize(config)\n ```\n \"\"\"\n populate_deserializable_objects()\n return generic_utils.deserialize_keras_object(\n config,\n module_objects=LOCAL.ALL_OBJECTS,\n custom_objects=custom_objects,\n printable_module_name='layer')\n\n\ndef get_builtin_layer(class_name):\n \"\"\"Returns class if `class_name` is registered, else returns None.\"\"\"\n if not hasattr(LOCAL, 'ALL_OBJECTS'):\n populate_deserializable_objects()\n return LOCAL.ALL_OBJECTS.get(class_name)\n\n\ndef deserialize_from_json(json_string, custom_objects=None):\n \"\"\"Instantiates a layer from a JSON string.\"\"\"\n populate_deserializable_objects()\n config = json_utils.decode_and_deserialize(\n json_string,\n module_objects=LOCAL.ALL_OBJECTS,\n custom_objects=custom_objects)\n return deserialize(config, custom_objects)\n" ]
[ [ "tensorflow.compat.v2.executing_eagerly", "tensorflow.compat.v2.test.main", "numpy.random.seed" ], [ "tensorflow.compat.v2.data.Dataset.from_tensor_slices", "numpy.random.random", "tensorflow.compat.v2.test.main", "numpy.arange", "numpy.copy", "numpy.zeros_like", "numpy.random.rand", "numpy.vstack" ], [ "tensorflow.compat.v2.data.Dataset.from_tensor_slices", "tensorflow.compat.v2.executing_eagerly", "tensorflow.compat.v2.test.main", "tensorflow.compat.v2.mixed_precision.experimental.DynamicLossScale", "tensorflow.compat.v2.device", "tensorflow.compat.v2.reduce_mean", "tensorflow.compat.v2.cast", "numpy.ones", "numpy.random.normal", "tensorflow.compat.v2.config.list_logical_devices", "tensorflow.compat.v2.distribute.MirroredStrategy", "numpy.array", "numpy.zeros" ], [ "tensorflow.compat.v2.unstack", "tensorflow.compat.v2.test.main", "tensorflow.compat.v2.reshape", "tensorflow.compat.v2.RaggedTensorSpec", "tensorflow.compat.v2.SparseTensorSpec", "tensorflow.compat.v2.SparseTensor", "tensorflow.compat.v2.TensorShape", "tensorflow.compat.v2.dtypes.as_dtype", "tensorflow.compat.v2.TensorSpec" ], [ "tensorflow.python.util.tf_export.keras_export", "tensorflow.compat.v2.__internal__.tf2.enabled" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
ramiro050/pytorch
[ "dc4f12d9cc15476c545e3a1bb7a74e23d5b0ddf5" ]
[ "test/test_nn.py" ]
[ "# Owner(s): [\"module: nn\"]\n\nimport contextlib\nimport math\nimport random\nimport string\nimport unittest\nimport io\nimport unittest.mock as mock\nimport itertools\nimport warnings\nimport pickle\nfrom copy import deepcopy\nfrom itertools import repeat, product\nfrom functools import reduce, partial\nfrom operator import mul\nfrom collections import OrderedDict\n\nimport torch\n\n# TODO: remove this global setting\n# NN tests use double as the default dtype\ntorch.set_default_dtype(torch.double)\n\nfrom torch._six import inf, nan\nimport torch.autograd.forward_ad as fwAD\nimport torch.backends.cudnn as cudnn\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.nn.init as init\nimport torch.nn.utils.rnn as rnn_utils\nfrom torch.nn.utils import clip_grad_norm_, clip_grad_value_\nimport torch.nn.utils.parametrize as parametrize\nimport torch.nn.utils.prune as prune\nfrom torch.nn.utils import parameters_to_vector, vector_to_parameters\nfrom torch.nn import Parameter\nfrom torch.nn.parameter import UninitializedParameter, UninitializedBuffer\nfrom torch.nn.parallel._functions import Broadcast\nfrom torch.testing._internal.common_dtype import integral_types, floating_types_and, get_all_math_dtypes, \\\n floating_and_complex_types_and\nfrom torch.testing._internal.common_utils import freeze_rng_state, run_tests, TestCase, skipIfNoLapack, skipIfRocm, \\\n skipIfRocmVersionLessThan, skipIfNotMiopenSuggestNHWC, TEST_NUMPY, TEST_SCIPY, TEST_WITH_CROSSREF, TEST_WITH_ROCM, \\\n download_file, get_function_arglist, load_tests, \\\n suppress_warnings, TemporaryFileName, TEST_WITH_UBSAN, IS_PPC, \\\n parametrize as parametrize_test, subtest, instantiate_parametrized_tests, set_default_dtype\nfrom torch.testing._internal.common_cuda import TEST_CUDA, TEST_MULTIGPU, TEST_CUDNN, TEST_CUDNN_VERSION\nfrom torch.testing._internal.common_nn import NNTestCase, NewModuleTest, CriterionTest, \\\n module_tests, criterion_tests, loss_reference_fns, \\\n ctcloss_reference, new_module_tests, single_batch_reference_fn\nfrom torch.testing._internal.common_device_type import expectedFailureXLA, instantiate_device_type_tests, dtypes, \\\n dtypesIfCUDA, precisionOverride, skipCUDAIfNoCudnn, skipCUDAIfCudnnVersionLessThan, onlyCUDA, onlyCPU, \\\n skipCUDAIfRocm, skipCUDAIf, skipCUDAIfNotRocm, skipCUDAIfRocmVersionLessThan, skipCUDAIfNotMiopenSuggestNHWC, \\\n onlyNativeDeviceTypes, deviceCountAtLeast, largeTensorTest, expectedFailureMeta, skipMeta, get_all_device_types, \\\n disableMkldnn, skipCPUIfNoMkldnn, disablecuDNN, skipCUDAIfMiopen, skipCUDAIfNoMiopen\nfrom torch.nn import MultiheadAttention\n\nfrom hypothesis import given\nfrom torch.testing import make_tensor\nimport torch.testing._internal.hypothesis_utils as hu\nfrom torch.testing._internal.common_utils import _assertGradAndGradgradChecks, gradcheck, gradgradcheck, \\\n GRADCHECK_NONDET_TOL\nfrom torch.testing._internal.common_utils import dtype2prec_DONTUSE\nfrom torch.testing._internal.common_cuda import tf32_on_and_off, tf32_is_not_fp32, tf32_off, tf32_on\nfrom torch.types import _TensorOrTensors\n\n\nAMPERE_OR_ROCM = TEST_WITH_ROCM or tf32_is_not_fp32()\n\n# load_tests from common_utils is used to automatically filter tests for\n# sharding on sandcastle. This line silences flake warnings\nload_tests = load_tests\n\nif TEST_SCIPY:\n from scipy import stats\n import scipy.signal\n import scipy.ndimage\n\nif TEST_NUMPY:\n import numpy as np\n\n\n# WARNING: If you add a new top-level test case to this file, you MUST\n# update test/run_test.py to list it, otherwise it will NOT be run in\n# CI.\n\n\nclass PackedSequenceTest(TestCase):\n\n _type_by_name = {\n 'torch.DoubleTensor': (torch.DoubleTensor, 'double'),\n 'torch.FloatTensor': (torch.FloatTensor, 'float'),\n # We leave out `'torch.HalfTensor': (torch.HalfTensor, 'half'),`\n # because of an error in `pad_packed_sequence`\n # > AttributeError: 'torch.HalfTensor' object has no attribute 'fill_'\n 'torch.LongTensor': (torch.LongTensor, 'long'),\n 'torch.IntTensor': (torch.IntTensor, 'int'),\n 'torch.ShortTensor': (torch.ShortTensor, 'short'),\n 'torch.CharTensor': (torch.CharTensor, 'char'),\n 'torch.ByteTensor': (torch.ByteTensor, 'byte'),\n }\n\n def __init__(self, *args, **kwargs):\n super(PackedSequenceTest, self).__init__(*args, **kwargs)\n self.batch_size = 5\n self.max_length = 6\n\n def _ordered_sequence(self, tensor_type):\n \"\"\"Create ordered list of random sequences\"\"\"\n seqs = [tensor_type(random.randint(1, self.max_length))\n for _ in range(self.batch_size)]\n if tensor_type == torch.ByteTensor:\n seqs = [s.random_(0, 256) for s in seqs]\n else:\n seqs = [s.random_(-128, 128) for s in seqs]\n ordered = sorted(seqs, key=len, reverse=True)\n return ordered\n\n def _padded_sequence(self, tensor_type):\n \"\"\"Create Tensor of random padded sequences\"\"\"\n ordered = self._ordered_sequence(tensor_type)\n lengths = [len(i) for i in ordered]\n padded_tensor = rnn_utils.pad_sequence(ordered)\n return padded_tensor, lengths\n\n def test_type_casts(self):\n \"\"\"Test type casting of `PackedSequence` against type casting of tensor\"\"\"\n for _, (input_type, _) in self._type_by_name.items():\n for expected_type_str, (_, cast_str) in self._type_by_name.items():\n for enforce_sorted in [True, False]:\n padded, lengths = self._padded_sequence(input_type)\n packed = rnn_utils.pack_padded_sequence(\n padded, lengths, enforce_sorted=enforce_sorted)\n # Apply cast to `PackedSequence` instance and unpack\n masked = getattr(packed, cast_str)()\n unpacked, lengths_out = rnn_utils.pad_packed_sequence(masked)\n self.assertEqual(unpacked.type(), expected_type_str)\n\n def test_wrong_order(self):\n a = torch.ones(25, 300)\n b = torch.ones(22, 300)\n b_a = rnn_utils.pad_sequence([b, a])\n self.assertRaises(\n RuntimeError,\n lambda: rnn_utils.pack_padded_sequence(b_a, [22, 25], enforce_sorted=True))\n\n def test_pad_sequence_with_tensor_sequences(self):\n seq_tuple_input = torch.nn.utils.rnn.pad_sequence(\n (torch.tensor([[7, 6]]), torch.tensor([[-7, -1]]))\n )\n seq_tensor_input = torch.nn.utils.rnn.pad_sequence(\n torch.tensor([[[7, 6]], [[-7, -1]]])\n )\n self.assertEqual(seq_tuple_input, seq_tensor_input)\n self.assertEqual(seq_tuple_input.shape, torch.Size([1, 2, 2]))\n\n def test_pad_sequence_with_non_iterable_sequences(self):\n msg = r\"Expected iterable for input sequences, but got arg of type\"\n with self.assertRaisesRegex(RuntimeError, msg):\n torch.nn.utils.rnn.pad_sequence(5)\n\n def test_total_length(self):\n padded, lengths = self._padded_sequence(torch.FloatTensor)\n max_length = max(lengths)\n packed = rnn_utils.pack_padded_sequence(padded, lengths)\n # test ValueError if total_length < max_length\n for total_length in (-1, 0, max_length - 1):\n for batch_first in (True, False):\n def err_fn():\n rnn_utils.pad_packed_sequence(packed, batch_first=batch_first,\n total_length=total_length)\n self.assertRaisesRegex(ValueError,\n r'Expected total_length to be at least the '\n r'length of the longest sequence in input',\n err_fn)\n # test that pad_packed_sequence returns results of correct length\n for batch_first in (True, False):\n no_extra_pad, _ = rnn_utils.pad_packed_sequence(packed, batch_first=batch_first)\n for total_length_delta in (0, 1, 8):\n total_length = max_length + total_length_delta\n unpacked, lengths_out = rnn_utils.pad_packed_sequence(packed, batch_first=batch_first,\n total_length=total_length)\n self.assertEqual(lengths, lengths_out)\n self.assertEqual(unpacked.size(1 if batch_first else 0), total_length)\n if total_length_delta == 0:\n ref_output = no_extra_pad\n elif batch_first:\n extra_pad = no_extra_pad.new_zeros(self.batch_size, total_length_delta)\n ref_output = torch.cat([no_extra_pad, extra_pad], 1)\n else:\n extra_pad = no_extra_pad.new_zeros(total_length_delta, self.batch_size)\n ref_output = torch.cat([no_extra_pad, extra_pad], 0)\n self.assertEqual(unpacked, ref_output)\n\n def test_to(self):\n for enforce_sorted in (True, False):\n padded, lengths = self._padded_sequence(torch.IntTensor)\n a = rnn_utils.pack_padded_sequence(\n padded, lengths, enforce_sorted=enforce_sorted).cpu()\n\n self.assertIs(a, a.to('cpu'))\n self.assertIs(a, a.cpu())\n self.assertIs(a, a.to('cpu', dtype=torch.int32))\n self.assertEqual(a.long(), a.to(torch.int64))\n\n if torch.cuda.is_available():\n for cuda in ['cuda', 'cuda:0' if torch.cuda.device_count() == 1 else 'cuda:1']:\n b = a.cuda(device=cuda)\n self.assertIs(b, b.to(cuda))\n self.assertIs(b, b.cuda())\n self.assertEqual(a, b.to('cpu'))\n self.assertEqual(b, a.to(cuda))\n self.assertEqual(a, b.to('cpu', dtype=torch.int32))\n self.assertIs(b, b.to(dtype=torch.int32))\n self.assertEqual(b.long(), b.to(dtype=torch.int64))\n\n def test_to_memory_format(self):\n m = torch.nn.Conv2d(in_channels=16, out_channels=32, kernel_size=2, bias=True)\n m = m.to(memory_format=torch.channels_last)\n for param in m.parameters():\n if param.dim() == 4:\n self.assertTrue(param.is_contiguous(memory_format=torch.channels_last))\n\nclass TestAvgPool(TestCase):\n def _sum_pool2d(self, x, kernel_size):\n windows = torch.nn.functional.unfold(x, kernel_size=kernel_size, stride=kernel_size)\n return torch.sum(windows, dim=1)\n\n def _sum_pool3d(self, x, kernel_size):\n # Because unfold does not support 3D sliding window we will split tensor to multiple tensors and calculate sum\n h = kernel_size[0]\n splited_x = [t.sum(0) for t in x.split(h) if t.size(0) == h]\n # sum_pool2d assumes tensor in (1, 1, n, m) view, so unsqueeze two times\n splited_x = [self._sum_pool2d(t.unsqueeze(0).unsqueeze(0), kernel_size[1:]) for t in splited_x]\n joined_x = torch.cat(splited_x)\n return joined_x.view(1, joined_x.numel())\n\n def _avg_pool2d(self, x, kernel_size):\n size = reduce((lambda x, y: x * y), kernel_size)\n return self._sum_pool2d(x, kernel_size) / size\n\n def _avg_pool3d(self, x, kernel_size):\n size = reduce((lambda x, y: x * y), kernel_size)\n return self._sum_pool3d(x, kernel_size) / size\n\n def test_doubletensor_avg_pool2d(self):\n n, m = 5, 8\n input = torch.rand(1, 1, n, m)\n for i in range(1, n + 1):\n for j in range(1, m + 1):\n actual = torch.nn.functional.avg_pool2d(input[0], (i, j))\n actual = actual.view(1, actual.numel())\n expected = self._avg_pool2d(input, (i, j))\n self.assertEqual(actual, expected, rtol=0, atol=1e-5)\n\n def test_avg_pool2d_with_zero_divisor(self):\n self.assertRaisesRegex(RuntimeError, \"divisor must be not zero\",\n lambda: F.avg_pool2d(torch.zeros(3, 3, 3), (2, 2), divisor_override=0))\n\n def test_doubletensor_avg_pool2d_with_divisor(self):\n n, m = 3, 3\n input = torch.rand(1, 1, n, m)\n for i in range(1, n + 1):\n for j in range(1, m + 1):\n for divisor in [1, 7, i * j]:\n actual = F.avg_pool2d(input[0], (i, j), divisor_override=divisor)\n actual = actual.view(1, actual.numel())\n expected = self._sum_pool2d(input, (i, j)) / divisor\n self.assertEqual(actual, expected, rtol=0, atol=1e-5)\n\n def test_doubletensor_avg_pool3d(self):\n h, w, d = 5, 6, 7\n input = torch.rand(h, w, d)\n for i in range(1, h + 1):\n for j in range(1, w + 1):\n for k in range(1, d + 1):\n actual = torch.nn.functional.avg_pool3d(input.unsqueeze(0), (i, j, k))\n actual = actual.view(1, actual.numel())\n expected = self._avg_pool3d(input, (i, j, k))\n self.assertEqual(actual, expected, rtol=0, atol=1e-5)\n\n def test_doubletensor_avg_pool3d_with_divisor(self):\n h, w, d = 6, 5, 7\n input = torch.rand(h, w, d)\n for i in range(1, h + 1):\n for j in range(1, w + 1):\n for k in range(1, d + 1):\n for divisor in [1, 7, i * j]:\n actual = torch.nn.functional.avg_pool3d(input.unsqueeze(0), (i, j, k), divisor_override=divisor)\n actual = actual.view(1, actual.numel())\n expected = self._sum_pool3d(input, (i, j, k)) / divisor\n self.assertEqual(actual, expected, rtol=0, atol=1e-5)\n\n def test_avg_pool3d_with_zero_divisor(self):\n self.assertRaisesRegex(RuntimeError, \"divisor must be not zero\",\n lambda: F.avg_pool3d(torch.zeros(3, 3, 3, 3), (2, 2, 2), divisor_override=0))\n\n def test_avg_pool1d_ceil_mode(self):\n # Regression test for gh-36977\n x = 10 * torch.randn((1, 16, 4))\n y = torch.nn.functional.avg_pool1d(\n x, ceil_mode=True, count_include_pad=True, kernel_size=1, stride=2)\n self.assertTrue(not torch.isnan(y).any())\n\n if TEST_CUDA:\n y = torch.nn.functional.avg_pool1d(\n x.to('cuda'), ceil_mode=True, count_include_pad=True, kernel_size=1, stride=2)\n self.assertTrue(not torch.isnan(y).any())\n\n\n def test_avg_pool2d_ceil_mode(self):\n # Regression test for gh-36977\n x = 10 * torch.randn((1, 16, 4, 4))\n y = torch.nn.functional.avg_pool2d(\n x, ceil_mode=True, count_include_pad=True, kernel_size=(1, 2),\n padding=(0, 1), stride=2)\n self.assertTrue(not torch.isnan(y).any())\n\n if TEST_CUDA:\n y = torch.nn.functional.avg_pool2d(\n x.to('cuda'), ceil_mode=True, count_include_pad=True, kernel_size=(1, 2),\n padding=(0, 1), stride=2)\n self.assertTrue(not torch.isnan(y).any())\n\n\n def test_avg_pool3d_ceil_mode(self):\n # Regression test for gh-36977\n x = 10 * torch.randn((1, 16, 4, 4, 4))\n y = torch.nn.functional.avg_pool3d(\n x, ceil_mode=True, count_include_pad=True, kernel_size=(1, 2, 3), stride=2)\n self.assertTrue(not torch.isnan(y).any())\n\n if TEST_CUDA:\n y = torch.nn.functional.avg_pool3d(\n x.to('cuda'), ceil_mode=True, count_include_pad=True, kernel_size=(1, 2, 3), stride=2)\n self.assertTrue(not torch.isnan(y).any())\n\n\nclass TestNN(NNTestCase):\n _do_cuda_memory_leak_check = True\n _do_cuda_non_default_stream = True\n\n def _forward(self, module, input: _TensorOrTensors):\n with freeze_rng_state():\n if isinstance(input, tuple):\n return module(*input)\n else:\n return module(input)\n\n def _backward(self, module, input: _TensorOrTensors, output, grad_output, create_graph=False):\n output.backward(grad_output, retain_graph=True, create_graph=create_graph)\n if isinstance(input, tuple):\n return tuple(i.grad.data if i.grad is not None else None for i in input)\n else:\n return input.grad.data if input.grad is not None else None\n\n def _forward_criterion(self, criterion, input, target, extra_args=None):\n if extra_args is None:\n extra_args = tuple()\n if isinstance(input, tuple):\n args = input + (target,) + extra_args\n output = criterion(*args)\n else:\n output = criterion(input, target, *extra_args)\n return output\n\n def _backward_criterion(self, criterion, input, output, target, gradOutput=None, extra_args=None):\n if extra_args is None:\n extra_args = tuple()\n input_tuple = input if isinstance(input, tuple) else (input,)\n output_tuple = output if isinstance(output, tuple) else (output,)\n for i in input_tuple:\n if i.grad is not None:\n i.grad.data.zero_()\n args = input_tuple + (target,) + extra_args\n if gradOutput is None:\n gradOutput = torch.ones(())\n criterion(*args).backward(gradOutput.to(output_tuple[0]))\n if isinstance(input, tuple):\n return tuple(i.grad.data for i in input)\n else:\n return input.grad.data\n\n def _zero_grad_parameters(self, module):\n for p in module.parameters():\n if p.grad is not None:\n with torch.no_grad():\n p.grad.zero_()\n p.grad.detach_()\n\n def _get_parameters(self, module):\n params = []\n d_params = []\n for p in module.parameters():\n params.append(p)\n d_params.append(p.grad)\n return params, d_params\n\n def _create_basic_net(self):\n class Layer(nn.Module):\n def __init__(self):\n super(Layer, self).__init__()\n self.layer_dummy_param = Parameter(torch.empty(3, 5))\n self.register_buffer('layer_dummy_buf', torch.zeros(1, 3, 3, 7))\n\n class Net(nn.Module):\n def __init__(self):\n super(Net, self).__init__()\n self.l1 = Layer()\n self.dummy_param = Parameter(torch.empty(3, 5))\n self.register_buffer('dummy_buf', torch.zeros(7, 3, 3, 1))\n\n l = Layer()\n n = Net()\n s = nn.Sequential(n, n)\n\n return l, n, s\n\n def test_requires_grad_(self):\n m = self._create_basic_net()[-1]\n assert len(list(m.buffers())) > 0, 'invalid test'\n assert all(not b.requires_grad for b in m.buffers()) > 0, 'invalid test'\n assert len(list(m.parameters())) > 0, 'invalid test'\n assert all(p.requires_grad for p in m.parameters()) > 0, 'invalid test'\n for requires_grad in (False, True):\n self.assertIs(m.requires_grad_(requires_grad), m)\n for p in m.parameters():\n self.assertEqual(p.requires_grad, requires_grad)\n for b in m.buffers():\n self.assertFalse(b.requires_grad)\n\n def test_module_backcompat(self):\n from torch.serialization import SourceChangeWarning\n path = download_file('https://download.pytorch.org/test_data/linear.pt')\n with warnings.catch_warnings():\n warnings.simplefilter('ignore', SourceChangeWarning)\n m = torch.load(path)\n input = torch.randn(2, 3, dtype=torch.float)\n self.assertEqual(m(input).size(), (2, 5))\n\n def test_conv_backcompat(self):\n from torch.serialization import SourceChangeWarning\n # This file was generated by running on PyTorch 1.0.1 on Python 2:\n #\n # import torch\n # from torch import nn\n # m = nn.Conv2d(1, 1, 1)\n # torch.save(m, 'legacy_conv2d.pt')\n #\n # NB: This Pickle also contains some Unicode data!\n path = download_file('https://download.pytorch.org/test_data/legacy_conv2d.pt')\n with warnings.catch_warnings():\n warnings.simplefilter('ignore', SourceChangeWarning)\n m = torch.load(path, encoding='utf-8')\n input = torch.randn((1, 1, 1, 1), dtype=torch.float)\n self.assertEqual(m(input).size(), (1, 1, 1, 1))\n\n def test_share_memory(self):\n class Net(nn.Module):\n def __init__(self):\n super(Net, self).__init__()\n self.p = nn.Parameter(torch.eye(5))\n self.par = nn.ParameterList()\n self.par.append(nn.Parameter(torch.randn(10)))\n\n def forward(self, inp):\n # NB: dead code\n return inp.clone()\n\n net = Net()\n for p in net.parameters():\n self.assertFalse(p.storage().is_shared())\n for b in net.buffers():\n self.assertFalse(b.storage().is_shared())\n net.share_memory()\n for p in net.parameters():\n self.assertTrue(p.storage().is_shared())\n for b in net.buffers():\n self.assertTrue(b.storage().is_shared())\n\n def _test_hooks(self, backward_register_fn):\n module = nn.Sigmoid()\n input = torch.ones(5, 5, requires_grad=True)\n\n counter = {\n 'forwards': 0,\n 'backwards': 0\n }\n\n def fw_hook(inc, h_module, input, output):\n self.assertIsInstance(input, tuple)\n self.assertTrue(isinstance(output, torch.Tensor))\n self.assertTrue(h_module is module)\n self.assertEqual(input[0], torch.ones(5, 5))\n self.assertEqual(output, torch.empty(5, 5).fill_(1 / (1 + 1 / math.e)))\n counter['forwards'] += inc\n\n def bw_hook(inc, h_module, grad_input, grad_output):\n self.assertIsInstance(grad_input, tuple)\n self.assertIsInstance(grad_output, tuple)\n self.assertTrue(h_module is module)\n self.assertEqual(grad_output[0], torch.ones(5, 5) * 2)\n counter['backwards'] += inc\n\n test_fwd = module.register_forward_hook(lambda *args: fw_hook(1, *args))\n\n module(input)\n module(input)\n self.assertEqual(counter['forwards'], 2)\n self.assertEqual(counter['backwards'], 0)\n\n test_bwd = getattr(module, backward_register_fn)(\n lambda *args: bw_hook(1, *args))\n\n output = module(input)\n self.assertEqual(counter['forwards'], 3)\n self.assertEqual(counter['backwards'], 0)\n\n output.backward(torch.ones(5, 5) * 2, retain_graph=True)\n self.assertEqual(counter['forwards'], 3)\n self.assertEqual(counter['backwards'], 1)\n\n output.backward(torch.ones(5, 5) * 2, retain_graph=True)\n self.assertEqual(counter['forwards'], 3)\n self.assertEqual(counter['backwards'], 2)\n\n test2_fwd = module.register_forward_hook(lambda *args: fw_hook(2, *args))\n\n output = module(input)\n self.assertEqual(counter['forwards'], 6)\n self.assertEqual(counter['backwards'], 2)\n\n test2_bwd = getattr(module, backward_register_fn)(lambda *args: bw_hook(2, *args))\n\n module(input).backward(torch.ones(5, 5) * 2)\n self.assertEqual(counter['forwards'], 9)\n self.assertEqual(counter['backwards'], 5)\n\n test2_bwd.remove()\n\n module(input).backward(torch.ones(5, 5) * 2)\n self.assertEqual(counter['forwards'], 12)\n self.assertEqual(counter['backwards'], 6)\n\n test2_fwd.remove()\n\n module(input).backward(torch.ones(5, 5) * 2)\n self.assertEqual(counter['forwards'], 13)\n self.assertEqual(counter['backwards'], 7)\n\n test_fwd.remove()\n test_bwd.remove()\n\n def test_hooks(self):\n self._test_hooks(\"register_backward_hook\")\n self._test_hooks(\"register_full_backward_hook\")\n\n def test_hook_cpp(self):\n bn = nn.BatchNorm1d(5)\n\n def hook(module, grad_inputs, grad_outputs):\n self.assertEqual(len(grad_inputs), 1)\n self.assertEqual(len(grad_outputs), 1)\n self.assertEqual(module, bn)\n\n bn.register_full_backward_hook(hook)\n output = bn(torch.randn(5, 5, requires_grad=True))\n output.sum().backward()\n\n def test_hook_invalid_outputs(self):\n module = nn.Sigmoid()\n input = torch.randn(5, 5, requires_grad=True)\n\n def bw_fail1(self, grad_input, grad_output):\n return grad_input[:-1]\n\n def bw_fail2(self, grad_input, grad_output):\n return grad_input + (torch.randn(2, 2),)\n\n with module.register_backward_hook(bw_fail1):\n with self.assertRaisesRegex(RuntimeError, 'got 0, but expected 1'):\n module(input).sum().backward()\n\n with module.register_backward_hook(bw_fail2):\n with self.assertRaisesRegex(RuntimeError, 'got 2, but expected 1'):\n module(input).sum().backward()\n\n def test_hook_requires_grad(self):\n test_self = self\n\n class MyModule(nn.Module):\n def forward(self, arg1, arg2, arg3):\n test_self.assertTrue(arg1.requires_grad)\n test_self.assertFalse(arg2.requires_grad)\n test_self.assertTrue(arg3.requires_grad)\n return arg1.sum() + arg2.sum() + arg3.sum()\n\n inp = torch.rand(2, requires_grad=True)\n mod = MyModule()\n\n mod(inp, inp.detach(), inp)\n # Ensure that requires grad is properly propagated\n mod.register_full_backward_hook(lambda mod, gI, gO: None)\n mod(inp, inp.detach(), inp)\n\n def test_hook_no_requires_grad(self):\n mod = nn.Linear(2, 3)\n\n inp = torch.rand(1, 2)\n\n return_val = \"None\"\n hook_called = [0]\n\n def hook(mod, grad_input, grad_output):\n hook_called[0] += 1\n for gI in grad_input:\n self.assertIsNone(gI)\n for gO in grad_output:\n self.assertEqual(gO.size(), (1, 3))\n\n if return_val == \"grad_input\":\n return grad_input\n elif return_val == \"invalid\":\n # If the inputs were requiring gradients, this would be\n # a valid return\n return inp\n elif return_val == \"None\":\n return None\n else:\n raise RuntimeError(\"Invalid return_val string\")\n\n mod.register_full_backward_hook(hook)\n\n # This should run and trigger the hook properly\n mod(inp).sum().backward()\n self.assertEqual(hook_called[0], 1)\n\n return_val = \"grad_input\"\n\n mod(inp).sum().backward()\n self.assertEqual(hook_called[0], 2)\n\n return_val = \"invalid\"\n with self.assertRaisesRegex(RuntimeError, \"where no input requires gradient\"):\n mod(inp).sum().backward()\n\n def test_hook_last_arg_requires_grad(self):\n mod = nn.L1Loss()\n inp = torch.rand(1, requires_grad=True)\n mod.register_full_backward_hook(lambda m, gI, gO: None)\n\n try:\n mod(inp.detach(), inp)\n except Exception as ex:\n self.fail(\"Unexpected exception: %s\" % ex)\n\n def test_hook_extra_input(self):\n class MyModule(nn.Module):\n def forward(self, non_tensor, tensor):\n return tensor.clone(), non_tensor\n\n inp = torch.rand(2, requires_grad=True)\n mod = MyModule()\n\n def hook(mod, grad_input, grad_output):\n self.assertIsNone(grad_input[0])\n self.assertIsInstance(grad_input[1], torch.Tensor)\n\n self.assertIsInstance(grad_output[0], torch.Tensor)\n self.assertIsNone(grad_output[1])\n\n mod.register_full_backward_hook(hook)\n out, _ = mod(True, inp)\n out.sum().backward()\n\n def test_hook_inplace(self):\n class MyModule(nn.Module):\n def forward(self, inp, do_inplace):\n self.inp = inp\n if do_inplace:\n inp += 1\n return inp.clone()\n\n hook_called = [0]\n\n def hook(mod, grad_input, grad_output):\n hook_called[0] += 1\n\n inp = torch.rand(10, requires_grad=True)\n mod = MyModule()\n mod.register_full_backward_hook(hook)\n\n # No inplace should work\n mod(inp, False).sum().backward()\n self.assertEqual(hook_called[0], 1)\n\n # Input inplace error should throw an error\n with self.assertRaisesRegex(RuntimeError, \"Output 0 of BackwardHookFunctionBackward is \"\n \"a view and is being modified inplace.\"):\n mod(inp.clone(), True)\n\n # Input inplace error should throw an error if we try to re-use the view after they have\n # been modified\n local_inp = inp.clone()\n out = mod(local_inp, False)\n local_inp[0] *= 1\n with self.assertRaisesRegex(RuntimeError, \"Output 0 of BackwardHookFunctionBackward is \"\n \"a view and its base or another view\"):\n # Any operation involving the view will fail here\n mod.inp + 2\n\n # Output inplace error should throw an error\n out = mod(inp, False)\n with self.assertRaisesRegex(RuntimeError, \"BackwardHookFunctionBackward is a view \"\n \"and is being modified inplace.\"):\n out += 1\n\n def test_hook_non_full_warning(self):\n def noop(*args):\n pass\n\n a = torch.rand(2, requires_grad=True)\n b = torch.rand(2, requires_grad=True)\n\n # Check invalid input container\n class MyModule(nn.Module):\n def forward(self, l):\n return l[0].clone(), l[1].clone()\n\n m = MyModule()\n m.register_backward_hook(noop)\n\n with self.assertWarnsRegex(UserWarning, \"does not take as input a single Tensor or a tuple of Tensors\"):\n m([a, b])\n\n # Check invalid output container\n class MyModule(nn.Module):\n def forward(self, a, b):\n return [a.clone(), b.clone()]\n\n m = MyModule()\n m.register_backward_hook(noop)\n\n with self.assertWarnsRegex(UserWarning, \"does not return a single Tensor or a tuple of Tensors\"):\n m(a, b)\n\n # Check invalid output from different Nodes\n class MyModule(nn.Module):\n def forward(self, a, b):\n return a.clone(), b.clone()\n\n m = MyModule()\n m.register_backward_hook(noop)\n\n with self.assertWarnsRegex(UserWarning, \"outputs are generated by different autograd Nodes\"):\n m(a, b)\n\n # Check invalid forward with multiple Nodes\n class MyModule(nn.Module):\n def forward(self, a):\n return a.clone().clone()\n\n m = MyModule()\n m.register_backward_hook(noop)\n\n with self.assertWarnsRegex(UserWarning, \"the forward contains multiple autograd Nodes\"):\n m(a)\n\n def test_hook_backward_size(self):\n # Make module with multiple operations in forward\n # And different size for input and outputs\n class MyModule(nn.Module):\n def forward(self, arg1, arg2):\n tmp = arg1.sum() * arg2\n tmp = tmp + arg2.sum() * arg1.sum()\n tmp = tmp.sum().view(1)\n tmp = tmp.expand(8).contiguous()\n return tmp\n\n module = MyModule()\n inp1 = torch.randn(5, 5, requires_grad=True)\n inp2 = torch.randn(10, 10, requires_grad=True)\n\n def bw_hook(module, grad_input, grad_output):\n self.assertEqual(len(grad_input), 2)\n self.assertEqual(grad_input[0].size(), torch.Size([5, 5]))\n self.assertEqual(grad_input[1].size(), torch.Size([10, 10]))\n self.assertEqual(len(grad_output), 1)\n self.assertEqual(grad_output[0].size(), torch.Size([8]))\n\n with module.register_full_backward_hook(bw_hook):\n module(inp1, inp2).sum().backward()\n\n def test_hook_backward_writeable(self):\n module = nn.Sigmoid()\n input = torch.randn(5, 5, requires_grad=True)\n sig_x = torch.nn.functional.sigmoid(input)\n\n def bw_hook(module, grad_input, grad_output):\n for grad in grad_input:\n self.assertTrue(isinstance(grad, torch.Tensor))\n for grad in grad_output:\n self.assertTrue(isinstance(grad, torch.Tensor))\n return tuple(gi * 2 for gi in grad_input)\n\n module.register_backward_hook(bw_hook)\n module(input).backward(torch.ones(5, 5))\n expected_grad = sig_x * (1 - sig_x) * 2\n self.assertEqual(input.grad, expected_grad)\n\n def test_hook_forward_preforward_writable(self):\n module = nn.Sigmoid()\n input = torch.randn(5, 5, requires_grad=True)\n sig_x = torch.nn.functional.sigmoid(input)\n\n def forward_pre_hook(m, input):\n return torch.nn.functional.relu(input[0])\n\n def forward_hook(m, input, output):\n return -output\n\n module.register_forward_pre_hook(forward_pre_hook)\n module.register_forward_hook(forward_hook)\n output = module(input)\n expected_res = -torch.nn.functional.sigmoid(torch.nn.functional.relu(input))\n self.assertEqual(output, expected_res)\n output.backward(torch.ones(5, 5) * 2, retain_graph=True)\n mask = (input > 0).double()\n expected_grad = -sig_x * (1 - sig_x) * 2 * mask\n self.assertEqual(input.grad, expected_grad)\n\n def test_to(self):\n m = nn.Linear(3, 5)\n self.assertIs(m, m.to('cpu'))\n self.assertIs(m, m.to('cpu', dtype=torch.float32))\n self.assertEqual(m.double(), m.to(torch.float64))\n self.assertRaises(RuntimeError, lambda: m.to('cpu', copy=True))\n\n if torch.cuda.is_available():\n for cuda in ['cuda', 'cuda:0' if torch.cuda.device_count() == 1 else 'cuda:1']:\n m2 = m.cuda(device=cuda)\n self.assertIs(m2, m2.to(cuda))\n self.assertEqual(m, m2.to('cpu'))\n self.assertEqual(m2, m.to(cuda))\n self.assertIs(m2, m2.to(dtype=torch.float32))\n self.assertEqual(m2.double(), m2.to(dtype=torch.float64))\n\n def test_zero_grad(self):\n i = torch.randn(2, 5, requires_grad=True)\n module = nn.Linear(5, 5)\n for p in module.parameters():\n p.requires_grad = False\n module.zero_grad()\n\n module.weight.requires_grad = True\n module.zero_grad()\n self.assertIsNone(module.weight.grad) # uninitialized grad\n\n module(i).sum().backward()\n self.assertIsNotNone(module.weight.grad)\n self.assertGreater(module.weight.grad.data.abs().sum(), 0)\n module.zero_grad()\n self.assertEqual(module.weight.grad.data, module.weight.data.clone().zero_())\n\n module.bias.requires_grad = True\n module.zero_grad()\n self.assertIsNotNone(module.weight.grad)\n self.assertIsNone(module.bias.grad)\n module(i).sum().backward()\n self.assertIsNotNone(module.weight.grad)\n self.assertIsNotNone(module.bias.grad)\n self.assertGreater(module.weight.grad.data.abs().sum(), 0)\n self.assertGreater(module.bias.grad.data.abs().sum(), 0)\n module.zero_grad()\n self.assertEqual(module.weight.grad.data, module.weight.data.clone().zero_())\n self.assertEqual(module.bias.grad.data, module.bias.data.clone().zero_())\n\n # Force set to None.\n module.zero_grad(set_to_none=True)\n self.assertIsNone(module.weight.grad)\n\n\n def test_no_grad(self):\n for dtype in [torch.bfloat16, torch.float, torch.double]:\n module = nn.Conv2d(2, 5, kernel_size=3, padding=1).to(dtype)\n input = torch.randn(1, 2, 10, 10).to(dtype)\n x = input\n y = input.clone()\n\n output = module(x)\n self.assertTrue(output.requires_grad)\n output.backward(torch.ones(1, 5, 10, 10))\n\n with torch.no_grad():\n output2 = module(y)\n self.assertFalse(output2.requires_grad)\n self.assertRaises(RuntimeError, lambda: output2.backward(torch.ones(1, 5, 10, 10)))\n\n def test_invalid_conv1d(self):\n for dtype in [torch.bfloat16, torch.float, torch.double, torch.cfloat, torch.cdouble]:\n module = nn.Conv1d(in_channels=3, out_channels=33, kernel_size=10, stride=1, bias=True).to(dtype)\n input = torch.randn(1, 3, 4).to(dtype)\n with self.assertRaisesRegex(RuntimeError,\n r'Calculated padded input size per channel: \\(4\\). ' +\n r'Kernel size: \\(10\\). Kernel size can\\'t be greater than actual input size'):\n module(input)\n\n # Negative stride check\n module = nn.Conv1d(in_channels=3, out_channels=6, kernel_size=3, stride=-1, bias=True).to(dtype)\n input = torch.randn(1, 3, 4).to(dtype)\n with self.assertRaisesRegex(RuntimeError, 'non-positive stride is not supported'):\n module(input)\n\n def test_mismatch_shape_conv2d(self):\n for dtype in (torch.float, torch.cfloat):\n x = torch.randn(1, 10, 1, 28, 28, dtype=dtype)\n w = torch.randn(6, 1, 5, 5, dtype=dtype)\n\n with self.assertRaisesRegex(RuntimeError,\n r'Expected 3D \\(unbatched\\) or 4D \\(batched\\) input to conv2d, but got ' +\n r'input of size: \\[1, 10, 1, 28, 28\\]'):\n\n F.conv2d(x, w)\n\n def test_conv2d_discontiguous_weight(self):\n for dtype in (torch.float, torch.cfloat):\n # Test for https://github.com/pytorch/pytorch/issues/55781\n x = torch.ones(64, 16, 16, 16, dtype=dtype)\n weight = torch.arange(0, 1.0, 1 / 2.0 ** 10).reshape(32, 16, 1, 2).to(dtype)[:, :, :, ::2]\n self.assertFalse(weight.is_contiguous())\n y = torch.nn.functional.conv2d(x, weight, None)\n if torch.backends.mkldnn.is_available():\n # Disable MKLDNN explicitly, so that either NNPACK or THCNN will be used\n with torch.backends.mkldnn.flags(enabled=False):\n y_ = torch.nn.functional.conv2d(x, weight, None)\n self.assertEqual(y, y_)\n self.assertEqual(y.sum(), 4186112.)\n\n def test_invalid_conv2d(self):\n for dtype in [torch.bfloat16, torch.float, torch.double, torch.cfloat, torch.cdouble]:\n module = torch.nn.Conv2d(1, 1, kernel_size=3, dilation=2, stride=2).to(dtype)\n input = torch.empty(1, 1, 4, 4).to(dtype)\n self.assertRaises(RuntimeError, lambda: module(input))\n\n module = nn.Conv2d(in_channels=3, out_channels=33, kernel_size=10, stride=1, bias=True)\n input = torch.randn(1, 3, 1, 1)\n with self.assertRaisesRegex(RuntimeError,\n r'Calculated padded input size per channel: \\(1 x 1\\). ' +\n r'Kernel size: \\(10 x 10\\). Kernel size can\\'t be greater than actual input size'):\n module(input)\n\n # Negative stride check\n module = nn.Conv2d(in_channels=3, out_channels=6, kernel_size=4, stride=-1, bias=True).to(dtype)\n input = torch.randn(1, 3, 4, 4).to(dtype)\n with self.assertRaisesRegex(RuntimeError, 'non-positive stride is not supported'):\n module(input)\n\n # Zero stride check\n module = nn.Conv2d(in_channels=3, out_channels=6, kernel_size=4, stride=0, bias=True).to(dtype)\n input = torch.randn(1, 3, 4, 4).to(dtype)\n with self.assertRaisesRegex(RuntimeError, 'non-positive stride is not supported'):\n module(input)\n\n def test_invalid_conv3d(self):\n for dtype in [torch.bfloat16, torch.float, torch.double, torch.cfloat, torch.cdouble]:\n module = torch.nn.Conv3d(1, 1, kernel_size=3, dilation=2, stride=2).to(dtype)\n input = torch.empty(1, 1, 4, 4, 4).to(dtype)\n self.assertRaises(RuntimeError, lambda: module(input))\n\n # Negative stride check\n module = torch.nn.Conv3d(1, 1, kernel_size=3, stride=-2)\n input = torch.empty(1, 1, 4, 4, 4)\n with self.assertRaisesRegex(RuntimeError, 'non-positive stride is not supported'):\n module(input)\n\n def test_Conv1d_module_same_padding(self):\n # Compare module against functional: without strides/dilation, asymmetric padding\n x = torch.rand(1, 1, 20)\n module = nn.Conv1d(in_channels=1, out_channels=1, kernel_size=10,\n padding='same')\n expect = F.conv1d(x, module.weight, module.bias, padding='same')\n self.assertEqual(expect, module(x))\n\n # Test dilation, symmetric padding\n module = nn.Conv1d(in_channels=1, out_channels=1, kernel_size=10,\n padding='same', dilation=2)\n expect = F.conv1d(x, module.weight, module.bias, padding='same', dilation=2)\n self.assertEqual(expect, module(x))\n\n # Test non-zero padding_mode, requiring explicit padding\n module = nn.Conv1d(in_channels=1, out_channels=1, kernel_size=10,\n padding='same', padding_mode='replicate')\n x_padded = F.pad(x, [4, 5], mode='replicate')\n expect = F.conv1d(x_padded, module.weight, module.bias, padding='valid')\n self.assertEqual(expect, module(x))\n self.assertEqual(x.size(), expect.size())\n\n # Test connstruction with invalid padding string raises\n with self.assertRaisesRegex(ValueError, 'Invalid padding string'):\n module = nn.Conv1d(in_channels=3, out_channels=33, kernel_size=10, padding='foo')\n\n # Test connstruction with same padding and strides raises\n with self.assertRaisesRegex(ValueError, \"padding='same'\"):\n module = nn.Conv1d(in_channels=3, out_channels=33, kernel_size=10, padding='same', stride=2)\n\n def test_Conv2d_module_same_padding(self):\n # Compare module against functional:\n # without strides/dilation, both symmetric and asymmetric padding\n x = torch.rand(1, 1, 9, 20)\n module = nn.Conv2d(in_channels=1, out_channels=1, kernel_size=(5, 10),\n padding='same')\n expect = F.conv2d(x, module.weight, module.bias, padding='same')\n self.assertEqual(expect, module(x))\n\n # with dilation, symmetric padding\n module = nn.Conv2d(in_channels=1, out_channels=1, kernel_size=(3, 4),\n padding='same', dilation=(1, 2))\n expect = F.conv2d(x, module.weight, module.bias, padding='same', dilation=(1, 2))\n self.assertEqual(expect, module(x))\n\n # Test non-zero padding_mode, requiring explicit padding\n module = nn.Conv2d(in_channels=1, out_channels=1, kernel_size=(3, 4),\n padding='same', padding_mode='reflect')\n x_padded = F.pad(x, [1, 2, 1, 1], mode='reflect')\n expect = F.conv2d(x_padded, module.weight, module.bias, padding='valid')\n self.assertEqual(expect, module(x))\n self.assertEqual(x.size(), expect.size())\n\n # Test connstruction with invalid padding string raises\n with self.assertRaisesRegex(ValueError, 'Invalid padding string'):\n module = nn.Conv2d(in_channels=3, out_channels=33, kernel_size=10, padding='foo')\n\n # Test connstruction with same padding and strides raises\n with self.assertRaisesRegex(ValueError, \"padding='same'\"):\n module = nn.Conv2d(in_channels=3, out_channels=33, kernel_size=10, padding='same', stride=2)\n with self.assertRaisesRegex(ValueError, \"padding='same'\"):\n module = nn.Conv2d(in_channels=3, out_channels=33, kernel_size=10, padding='same', stride=(1, 3))\n with self.assertRaisesRegex(ValueError, \"padding='same'\"):\n module = nn.Conv2d(in_channels=3, out_channels=33, kernel_size=10, padding='same', stride=(4, 1))\n\n def test_Conv3d_module_same_padding(self):\n # Compare module against functional:\n x = torch.rand(1, 1, 4, 4, 4)\n # without dilation, both symmetric and asymmetric padding\n module = nn.Conv3d(in_channels=1, out_channels=1, kernel_size=(2, 3, 4),\n padding='same')\n expect = F.conv3d(x, module.weight, module.bias, padding='same')\n self.assertEqual(expect, module(x))\n\n # with dilation, both symmetric and asymmetric padding\n module = nn.Conv3d(in_channels=1, out_channels=1, kernel_size=(2, 3, 4),\n padding='same', dilation=(3, 2, 1))\n expect = F.conv3d(x, module.weight, module.bias, padding='same', dilation=(3, 2, 1))\n self.assertEqual(expect, module(x))\n\n # Test non-zero padding_mode, requiring explicit padding\n module = nn.Conv3d(in_channels=1, out_channels=1, kernel_size=(2, 3, 4),\n padding='same', padding_mode='circular')\n x_padded = F.pad(x, [1, 2, 1, 1, 0, 1], mode='circular')\n expect = F.conv3d(x_padded, module.weight, module.bias, padding='valid')\n self.assertEqual(expect, module(x))\n self.assertEqual(x.size(), expect.size())\n\n # Test connstruction with invalid padding string raises\n with self.assertRaisesRegex(ValueError, 'Invalid padding string'):\n module = nn.Conv3d(in_channels=3, out_channels=33, kernel_size=10, padding='foo')\n\n # Test connstruction with same padding and strides raises\n with self.assertRaisesRegex(ValueError, \"padding='same'\"):\n module = nn.Conv2d(in_channels=3, out_channels=33, kernel_size=10, padding='same', stride=2)\n with self.assertRaisesRegex(ValueError, \"padding='same'\"):\n module = nn.Conv2d(in_channels=3, out_channels=33, kernel_size=10, padding='same', stride=(1, 1, 3))\n with self.assertRaisesRegex(ValueError, \"padding='same'\"):\n module = nn.Conv2d(in_channels=3, out_channels=33, kernel_size=10, padding='same', stride=(1, 4, 1))\n with self.assertRaisesRegex(ValueError, \"padding='same'\"):\n module = nn.Conv2d(in_channels=3, out_channels=33, kernel_size=10, padding='same', stride=(5, 1, 1))\n\n def _test_alpha_dropout(self, cls, input):\n mean = input.mean()\n std = input.std()\n\n for p in [0.2, 0.5, 0.8]:\n module = cls(p)\n input_var = input.detach().clone().requires_grad_()\n output = module(input_var)\n # output mean should be close to input mean\n self.assertLess(abs(output.data.mean() - mean), 0.1)\n # output std should be close to input std\n self.assertLess(abs(output.data.std() - std), 0.1)\n output.backward(input)\n\n def test_parameters_and_named_parameters(self):\n def names(named_parameters):\n return [k for k, _ in named_parameters]\n\n l, n, s = self._create_basic_net()\n\n self.assertEqual(len(list(l.parameters())), 1)\n self.assertEqual(\n names(l.named_parameters()),\n ['layer_dummy_param'])\n\n self.assertEqual(len(list(n.parameters())), 2)\n self.assertEqual(\n names(n.named_parameters()),\n ['dummy_param', 'l1.layer_dummy_param'])\n\n self.assertEqual(len(list(n.parameters(recurse=False))), 1)\n self.assertEqual(\n names(n.named_parameters(recurse=False)),\n ['dummy_param'])\n\n self.assertEqual(len(list(s.parameters())), 2)\n self.assertEqual(\n names(s.named_parameters()),\n ['0.dummy_param', '0.l1.layer_dummy_param'])\n\n def test_buffers_and_named_buffers(self):\n def names(named_buffers):\n return [k for k, _ in named_buffers]\n\n l, n, s = self._create_basic_net()\n\n self.assertEqual(len(list(l.buffers())), 1)\n self.assertEqual(\n names(l.named_buffers()),\n ['layer_dummy_buf'])\n\n self.assertEqual(len(list(n.buffers())), 2)\n self.assertEqual(\n names(n.named_buffers()),\n ['dummy_buf', 'l1.layer_dummy_buf'])\n\n self.assertEqual(len(list(n.buffers(recurse=False))), 1)\n self.assertEqual(\n names(n.named_buffers(recurse=False)),\n ['dummy_buf'])\n\n self.assertEqual(len(list(s.buffers())), 2)\n self.assertEqual(\n names(s.named_buffers()),\n ['0.dummy_buf', '0.l1.layer_dummy_buf'])\n\n def test_call_supports_python_dict_output(self):\n class Net(nn.Module):\n def __init__(self):\n super(Net, self).__init__()\n self.l1 = nn.Linear(10, 20)\n self.register_backward_hook(self.hook)\n self.check_backward_hook_flag = False\n\n def hook(self, module, grad_out, grad_in):\n self.check_backward_hook_flag = True\n\n def forward(self, inputs):\n return {\"output\": self.l1(inputs).sum()}\n\n net = Net()\n model_output = net(torch.randn([5, 10]))\n model_output[\"output\"].backward()\n self.assertTrue(net.check_backward_hook_flag)\n\n def test_children(self):\n l1 = nn.Linear(2, 2)\n l2 = nn.Linear(2, 2)\n l3 = nn.Linear(2, 2)\n l4 = nn.Linear(2, 2)\n subnet = nn.Sequential(l3, l4)\n s = nn.Sequential(l1, l2, l1, l2, subnet)\n self.assertEqual(list(s.children()), [l1, l2, subnet])\n\n def test_train_errors_for_invalid_mode(self):\n class SubclassNet(nn.Module):\n def __init__(self):\n super(SubclassNet, self).__init__()\n self.l1 = nn.Linear(2, 2)\n\n def forward(self, inputs):\n return self.l1(inputs)\n\n subclass_net = SubclassNet()\n sequential_net = nn.Sequential(nn.Linear(2, 2), nn.Linear(2, 2))\n\n error_modes = [\"invalid_str\", torch.device('cpu')]\n modules_to_check = [subclass_net, sequential_net]\n\n for error_mode, module in itertools.product(error_modes, modules_to_check):\n with self.assertRaises(ValueError):\n module.train(error_mode)\n\n def test_dir(self):\n linear = nn.Linear(2, 2)\n linear._test_submodule = nn.Linear(2, 2)\n linear._test_parameter = Parameter(torch.empty(2, 2))\n linear.register_buffer('_test_buffer', torch.empty(2, 2))\n keys = dir(linear)\n self.assertIn('_test_submodule', keys)\n self.assertIn('_test_parameter', keys)\n self.assertIn('_test_buffer', keys)\n\n for key in keys:\n self.assertTrue(hasattr(linear, key))\n\n def test_repr(self):\n # no extra information or sub-modules\n empty_sequential = nn.Sequential()\n expected_repr_empty = 'Sequential()'\n self.assertEqual(repr(empty_sequential), expected_repr_empty)\n\n # one liner extra information\n linear = nn.Linear(1, 1)\n expected_repr_linear = 'Linear(in_features=1, out_features=1, bias=True)'\n self.assertEqual(repr(linear), expected_repr_linear)\n\n # sub-modules repr\n sequential = nn.Sequential(linear)\n expected_repr_sequential = 'Sequential(\\n' \\\n ' (0): Linear(in_features=1, out_features=1, bias=True)\\n' \\\n ')'\n self.assertEqual(repr(sequential), expected_repr_sequential)\n\n def test_dir_digit(self):\n model = nn.Sequential(nn.Linear(2, 2))\n keys = dir(model)\n self.assertNotIn('0', keys)\n\n def test_named_children(self):\n l1 = nn.Linear(2, 2)\n l2 = nn.Linear(2, 2)\n l3 = nn.Linear(2, 2)\n l4 = nn.Linear(2, 2)\n subnet = nn.Sequential(l3, l4)\n s = nn.Sequential()\n with self.assertRaises(KeyError):\n s.add_module('', l1)\n with self.assertRaises(KeyError):\n s.add_module('name.with.dot', l1)\n s.add_module('layer1', l1)\n s.add_module('layer2', l2)\n s.add_module('layer3', l1)\n s.add_module('layer4', l2)\n s.add_module('subnet', subnet)\n self.assertEqual(list(s.named_children()), [('layer1', l1), ('layer2', l2), ('subnet', subnet)])\n\n def test_modules(self):\n class Net(nn.Module):\n def __init__(self):\n super(Net, self).__init__()\n self.l1 = l\n self.l2 = l\n self.param = torch.empty(3, 5)\n\n l = nn.Linear(10, 20)\n n = Net()\n s = nn.Sequential(n, n, n, n)\n self.assertEqual(list(s.modules()), [s, n, l])\n\n def test_named_modules(self):\n class Net(nn.Module):\n def __init__(self):\n super(Net, self).__init__()\n self.l1 = l\n self.l2 = l\n self.param = torch.empty(3, 5)\n self.block = block\n l = nn.Linear(10, 20)\n l1 = nn.Linear(10, 20)\n l2 = nn.Linear(10, 20)\n block = nn.Sequential()\n block.add_module('linear1', l1)\n block.add_module('linear2', l2)\n n = Net()\n s = nn.Sequential(n, n)\n self.assertEqual(list(s.named_modules()), [('', s), ('0', n), ('0.l1', l),\n ('0.block', block), ('0.block.linear1', l1),\n ('0.block.linear2', l2)])\n # test the option to not remove duplicate module instances\n self.assertEqual(list(s.named_modules(remove_duplicate=False)), [\n ('', s), ('0', n), ('0.l1', l), ('0.l2', l),\n ('0.block', block), ('0.block.linear1', l1),\n ('0.block.linear2', l2),\n ('1', n), ('1.l1', l), ('1.l2', l),\n ('1.block', block), ('1.block.linear1', l1),\n ('1.block.linear2', l2)])\n\n def test_register_buffer_raises_error_if_name_is_not_string(self):\n m = nn.Module()\n expected_error = 'buffer name should be a string. Got '\n with self.assertRaisesRegex(TypeError, expected_error + 'int'):\n m.register_buffer(1, torch.rand(5))\n with self.assertRaisesRegex(TypeError, expected_error + 'NoneType'):\n m.register_buffer(None, torch.rand(5))\n\n def test_register_buffer_raises_error_if_attr_exists(self):\n m = nn.Module()\n m.attribute_name = 5\n with self.assertRaises(KeyError):\n m.register_buffer('attribute_name', torch.rand(5))\n\n del m.attribute_name\n m.register_parameter('attribute_name', nn.Parameter())\n with self.assertRaises(KeyError):\n m.register_buffer('attribute_name', torch.rand(5))\n\n del m.attribute_name\n m.add_module('attribute_name', nn.Module())\n with self.assertRaises(KeyError):\n m.register_buffer('attribute_name', torch.rand(5))\n\n def test_register_buffer_raises_error_if_not_tensor(self):\n m = nn.Module()\n with self.assertRaises(TypeError):\n m.register_buffer('attribute_name', 5)\n\n def test_register_buffer_allows_overwriting_with_same_name(self):\n m = nn.Module()\n buffer1 = torch.rand(5)\n buffer2 = buffer1 + 5\n buffer3 = None\n m.register_buffer('buffer_name', buffer1)\n self.assertEqual(m.buffer_name, buffer1)\n m.register_buffer('buffer_name', buffer2)\n self.assertEqual(m.buffer_name, buffer2)\n m.register_buffer('buffer_name', buffer3)\n self.assertEqual(m.buffer_name, buffer3)\n\n def test_get_buffer(self):\n m = nn.Module()\n buffer1 = torch.randn(2, 3)\n buffer2 = torch.randn(4, 5)\n m.register_buffer('foo', buffer1)\n m.register_buffer('bar', buffer2)\n self.assertEqual(buffer1, m.get_buffer('foo'))\n self.assertEqual(buffer2, m.get_buffer('bar'))\n\n def test_get_buffer_from_submodules(self):\n class MyModule(nn.Module):\n def __init__(self, foo, bar):\n super().__init__()\n self.sub = Sub(foo, bar)\n\n class Sub(nn.Module):\n def __init__(self, foo, bar):\n super().__init__()\n self.register_buffer('foo', foo)\n self.subsub = SubSub(bar)\n\n class SubSub(nn.Module):\n def __init__(self, bar):\n super().__init__()\n self.register_buffer('bar', bar)\n\n foo = torch.randn(2, 3)\n bar = torch.randn(4, 5)\n m = MyModule(foo, bar)\n self.assertEqual(foo, m.get_buffer('sub.foo'))\n self.assertEqual(bar, m.get_buffer('sub.subsub.bar'))\n\n def test_buffer_not_persistent(self):\n m = nn.Module()\n m.register_buffer('buf', torch.rand(5), persistent=False)\n self.assertTrue(len(list(m.buffers())) == 1)\n self.assertTrue(len(m.state_dict()) == 0)\n\n def test_buffer_not_persistent_del(self):\n m = nn.Module()\n m.register_buffer('buf', torch.rand(5), persistent=False)\n del m.buf\n self.assertTrue(len(list(m.buffers())) == 0)\n\n def test_buffer_not_persistent_overwrite(self):\n m = nn.Module()\n m.register_buffer('buf', torch.rand(5), persistent=False)\n m.register_buffer('buf', torch.rand(5))\n\n # can we overwrite a non-persistent buffer with a persistent one?\n self.assertTrue(len(list(m.buffers())) == 1)\n self.assertTrue(len(m.state_dict()) == 1)\n\n # can we overwrite a persistent buffer with a non-persistent one?\n m.register_buffer('buf', torch.rand(5), persistent=False)\n self.assertTrue(len(list(m.buffers())) == 1)\n self.assertTrue(len(m.state_dict()) == 0)\n\n def test_buffer_not_persistent_assign(self):\n m = nn.Module()\n m.register_buffer('buf', torch.rand(5), persistent=False)\n\n # Assigning None removes the buffer but if we then assign a new Tensor\n # to the same property, it should still be marked as a buffer.\n m.buf = None\n self.assertTrue(len(list(m.buffers())) == 0)\n self.assertTrue(len(m.state_dict()) == 0)\n m.buf = torch.rand(5)\n self.assertTrue(len(list(m.buffers())) == 1)\n self.assertTrue(len(m.state_dict()) == 0)\n\n # Assigning a Parameter removes the buffer.\n m.buf = nn.Parameter(torch.rand(5))\n self.assertTrue(len(list(m.buffers())) == 0)\n self.assertTrue(len(m.state_dict()) == 1)\n\n @unittest.skipIf(not TEST_NUMPY, \"numpy not found\")\n def test_load_state_dict_invalid(self):\n m = torch.nn.Linear(2, 2, bias=False)\n\n state_dict = {'weight': np.random.randn(2, 2)}\n with self.assertRaisesRegex(RuntimeError,\n \"expected torch.Tensor or Tensor-like object from checkpoint but received\"):\n m.load_state_dict(state_dict)\n\n state_dict = {'weight': ((1., 1.), (2., 2.))}\n with self.assertRaisesRegex(RuntimeError,\n \"expected torch.Tensor or Tensor-like object from checkpoint but received\"):\n m.load_state_dict(state_dict)\n\n def test_buffer_not_persistent_load(self):\n m = nn.Module()\n m.register_buffer('buf', torch.rand(5), persistent=False)\n m.load_state_dict({})\n\n def test_register_parameter_raises_error_if_name_is_not_string(self):\n m = nn.Module()\n expected_error = 'parameter name should be a string. Got '\n with self.assertRaisesRegex(TypeError, expected_error + 'int'):\n m.register_parameter(1, nn.Parameter())\n with self.assertRaisesRegex(TypeError, expected_error + 'NoneType'):\n m.register_parameter(None, nn.Parameter())\n\n def test_register_parameter_raises_error_if_attr_exists(self):\n m = nn.Module()\n m.attribute_name = 5\n with self.assertRaises(KeyError):\n m.register_parameter('attribute_name', nn.Parameter())\n\n del m.attribute_name\n m.register_buffer('attribute_name', torch.rand(5))\n with self.assertRaises(KeyError):\n m.register_parameter('attribute_name', nn.Parameter())\n\n del m.attribute_name\n m.add_module('attribute_name', nn.Module())\n with self.assertRaises(KeyError):\n m.register_parameter('attribute_name', nn.Parameter())\n\n def test_register_parameter_allows_overwriting_with_same_name(self):\n m = nn.Module()\n param1 = nn.Parameter(torch.rand(5))\n param2 = nn.Parameter(param1.data + 5)\n param3 = None\n m.register_parameter('param_name', param1)\n self.assertEqual(m.param_name, param1)\n m.register_parameter('param_name', param2)\n self.assertEqual(m.param_name, param2)\n m.register_parameter('param_name', param3)\n self.assertEqual(m.param_name, param3)\n\n def test_add_module_raises_error_if_attr_exists(self):\n methods_to_test = ['add_module', 'register_module']\n for fn in methods_to_test:\n m = nn.Module()\n m.attribute_name = 5\n with self.assertRaises(KeyError):\n getattr(m, fn)('attribute_name', nn.Module())\n\n del m.attribute_name\n m.register_buffer('attribute_name', torch.rand(5))\n with self.assertRaises(KeyError):\n getattr(m, fn)('attribute_name', nn.Module())\n\n del m.attribute_name\n m.register_parameter('attribute_name', nn.Parameter())\n with self.assertRaises(KeyError):\n getattr(m, fn)('attribute_name', nn.Module())\n\n @unittest.expectedFailure\n def test_getattr_with_property(self):\n class Model(nn.Module):\n @property\n def some_property(self):\n return self.something_that_doesnt_exist\n\n model = Model()\n\n with self.assertRaisesRegex(\n AttributeError,\n r\"'Model' object has no attribute 'something_that_doesnt_exist'\"):\n model.some_property\n\n def test_Sequential_getitem(self):\n l1 = nn.Linear(10, 20)\n l2 = nn.Linear(20, 30)\n l3 = nn.Linear(30, 40)\n l4 = nn.Linear(40, 50)\n n = nn.Sequential(l1, l2, l3, l4)\n self.assertIs(n[0], l1)\n self.assertIs(n[1], l2)\n self.assertIs(n[2], l3)\n self.assertIs(n[3], l4)\n self.assertIs(n[torch.tensor(3, dtype=torch.int64)], l4)\n self.assertEqual(n[1:], nn.Sequential(l2, l3, l4))\n self.assertEqual(n[3:], nn.Sequential(l4))\n self.assertEqual(n[:-1], nn.Sequential(l1, l2, l3))\n self.assertEqual(n[:-3], nn.Sequential(l1))\n self.assertEqual(n[::-1], nn.Sequential(l4, l3, l2, l1))\n\n def test_Sequential_setitem(self):\n l1 = nn.Linear(10, 20)\n l2 = nn.Linear(20, 30)\n l3 = nn.Linear(30, 40)\n l4 = nn.Linear(40, 50)\n n = nn.Sequential(l1, l2, l3)\n n[0] = l4\n n[-1] = l4\n n[torch.tensor(1, dtype=torch.int16)] = l1\n self.assertIs(n[0], l4)\n self.assertIs(n[1], l1)\n self.assertIs(n[2], l4)\n\n def test_Sequential_setitem_named(self):\n l1 = nn.Linear(10, 20)\n l2 = nn.Linear(20, 30)\n l3 = nn.Linear(30, 40)\n l4 = nn.Linear(40, 50)\n n = nn.Sequential(OrderedDict([\n ('linear1', l1),\n ('linear2', l2),\n ('linear3', l3),\n ]))\n\n n[0] = l4\n n[-1] = l4\n self.assertEqual(n.linear1, l4)\n self.assertEqual(n.linear3, l4)\n\n def test_Sequential_delitem(self):\n l1 = nn.Linear(10, 20)\n l2 = nn.Linear(20, 30)\n l3 = nn.Linear(30, 40)\n l4 = nn.Linear(40, 50)\n n = nn.Sequential(l1, l2, l3, l4)\n del n[-1]\n self.assertEqual(n, nn.Sequential(l1, l2, l3))\n del n[1::2]\n self.assertEqual(n, nn.Sequential(l1, l3))\n\n def test_Sequential_append(self):\n l1 = nn.Linear(10, 20)\n l2 = nn.Linear(20, 30)\n l3 = nn.Linear(30, 40)\n l4 = nn.Linear(40, 50)\n n = nn.Sequential(l1, l2, l3)\n n2 = n.append(l4)\n self.assertEqual(n, nn.Sequential(l1, l2, l3, l4))\n self.assertEqual(n2, nn.Sequential(l1, l2, l3, l4))\n self.assertEqual(nn.Sequential(l1).append(l2).append(l4), nn.Sequential(l1, l2, l4))\n\n def test_ModuleList(self):\n modules = [nn.ReLU(), nn.Linear(5, 5)]\n module_list = nn.ModuleList(modules)\n\n def check():\n self.assertEqual(len(module_list), len(modules))\n for m1, m2 in zip(modules, module_list):\n self.assertIs(m1, m2)\n for m1, m2 in zip(modules, module_list.children()):\n self.assertIs(m1, m2)\n for i in range(len(modules)):\n self.assertIs(module_list[i], modules[i])\n\n check()\n modules += [nn.Conv2d(3, 4, 3)]\n module_list += [modules[-1]]\n check()\n modules = modules + [nn.Conv2d(3, 4, 3, bias=False), nn.GELU()]\n module_list = module_list + nn.ModuleList(modules[-2:])\n check()\n modules.insert(1, nn.Linear(3, 2))\n module_list.insert(1, modules[1])\n check()\n modules.append(nn.Tanh())\n module_list.append(modules[-1])\n check()\n next_modules = [nn.Linear(5, 5), nn.Sigmoid()]\n modules.extend(next_modules)\n module_list.extend(next_modules)\n check()\n modules[2] = nn.Conv2d(5, 3, 2)\n module_list[2] = modules[2]\n check()\n modules[-1] = nn.Conv2d(5, 2, 1)\n module_list[-1] = modules[-1]\n check()\n idx = torch.tensor(2, dtype=torch.int32)\n modules[2] = nn.Conv2d(5, 3, 2)\n module_list[idx] = modules[2]\n self.assertIs(module_list[idx], modules[2])\n check()\n self.assertEqual(module_list[1:], nn.ModuleList(modules[1:]))\n self.assertEqual(module_list[3:], nn.ModuleList(modules[3:]))\n self.assertEqual(module_list[:-1], nn.ModuleList(modules[:-1]))\n self.assertEqual(module_list[:-3], nn.ModuleList(modules[:-3]))\n self.assertEqual(module_list[::-1], nn.ModuleList(modules[::-1]))\n del module_list[-1]\n self.assertEqual(module_list, nn.ModuleList(modules[:-1]))\n del module_list[1::2]\n self.assertEqual(module_list, nn.ModuleList(modules[:-1][0::2]))\n\n with self.assertRaises(TypeError):\n module_list += nn.ReLU()\n with self.assertRaises(TypeError):\n module_list.extend(nn.ReLU())\n\n l1 = nn.Linear(1, 2)\n l2 = nn.Linear(2, 3)\n l3 = nn.Linear(3, 2)\n l4 = nn.Linear(2, 3)\n subnet = nn.Sequential(l3, l4)\n s = nn.Sequential(\n OrderedDict([\n (\"layer1\", l1),\n (\"layer2\", l2),\n (\"layer3\", l3),\n (\"layer4\", l4),\n (\"subnet_layer\", subnet)\n ])\n )\n modules = list(s.modules())\n module_list = nn.ModuleList()\n module_list.extend(s.modules())\n check()\n\n # verify the right exception is thrown when trying to \"forward\" through a ModuleList\n self.assertRaises(NotImplementedError, module_list)\n self.assertRaises(NotImplementedError, module_list, torch.rand(1, 3))\n\n def test_ModuleDict(self):\n modules = OrderedDict([\n ('act', nn.ReLU()),\n ('conv', nn.Conv2d(10, 10, 5)),\n ('fc', nn.Linear(5, 5)),\n ])\n\n module_dict = nn.ModuleDict(modules)\n\n def check():\n self.assertEqual(len(module_dict), len(modules))\n for k1, m2 in zip(modules, module_dict.children()):\n self.assertIs(modules[k1], m2)\n for k1, k2 in zip(modules, module_dict):\n self.assertIs(modules[k1], module_dict[k2])\n for k in module_dict:\n self.assertIs(module_dict[k], modules[k])\n for k in module_dict.keys():\n self.assertIs(module_dict[k], modules[k])\n for k, v in module_dict.items():\n self.assertIs(modules[k], v)\n for k1, m2 in zip(modules, module_dict.values()):\n self.assertIs(modules[k1], m2)\n for k in modules.keys():\n self.assertTrue(k in module_dict)\n check()\n\n modules['conv'] = nn.Conv2d(3, 4, 3)\n module_dict['conv'] = modules['conv']\n check()\n\n next_modules = [\n ('fc2', nn.Linear(5, 5)),\n ('act', nn.Sigmoid()),\n ]\n modules.update(next_modules)\n module_dict.update(next_modules)\n check()\n\n next_modules = OrderedDict([\n ('fc3', nn.Linear(5, 5)),\n ('act2', nn.Sigmoid()),\n ])\n modules.update(next_modules)\n module_dict.update(next_modules)\n check()\n\n next_modules = {\n 'fc4': nn.Linear(5, 5),\n 'act3': nn.Sigmoid()\n }\n modules.update(next_modules.items())\n module_dict.update(next_modules)\n check()\n\n next_modules = nn.ModuleDict([\n ('fc5', nn.Linear(5, 5)),\n ('act4', nn.Sigmoid()),\n ])\n modules.update(next_modules)\n module_dict.update(next_modules)\n check()\n\n del module_dict['fc']\n del modules['fc']\n check()\n\n with self.assertRaises(TypeError):\n module_dict.update(nn.ReLU())\n\n with self.assertRaises(TypeError):\n module_dict.update([nn.ReLU()])\n\n with self.assertRaises(ValueError):\n module_dict.update([[nn.ReLU()]])\n\n with self.assertRaises(TypeError):\n module_dict[1] = nn.ReLU()\n\n s = nn.Sequential(modules)\n module_dict = nn.ModuleDict(s.named_children())\n check()\n\n c = module_dict.pop('conv')\n self.assertIs(c, modules['conv'])\n modules.pop('conv')\n check()\n\n module_dict.clear()\n self.assertEqual(len(module_dict), 0)\n modules.clear()\n check()\n\n # verify the right exception is thrown when trying to \"forward\" through a ModuleDict\n self.assertRaises(NotImplementedError, module_dict)\n self.assertRaises(NotImplementedError, module_dict, torch.rand(1, 3))\n\n def test_ParameterList(self):\n def make_param():\n return Parameter(torch.randn(2, 2))\n parameters = [make_param(), make_param()]\n param_list = nn.ParameterList(parameters)\n\n def check():\n self.assertEqual(len(parameters), len(param_list))\n for p1, p2 in zip(parameters, param_list):\n self.assertIs(p1, p2)\n for p1, p2 in zip(filter(lambda x: isinstance(x, Parameter), parameters), param_list.parameters()):\n self.assertIs(p1, p2)\n for i in range(len(parameters)):\n self.assertIs(parameters[i], param_list[i])\n\n check()\n parameters += [make_param()]\n param_list += [parameters[-1]]\n check()\n parameters.append(make_param())\n param_list.append(parameters[-1])\n check()\n next_params = [make_param(), make_param()]\n parameters.extend(next_params)\n param_list.extend(next_params)\n check()\n parameters[2] = make_param()\n param_list[2] = parameters[2]\n check()\n parameters[-1] = make_param()\n param_list[-1] = parameters[-1]\n check()\n idx = torch.tensor(2, dtype=torch.int32)\n parameters[2] = make_param()\n param_list[idx] = parameters[2]\n self.assertIs(param_list[idx], parameters[2])\n check()\n self.assertEqual(param_list[1:], nn.ParameterList(parameters[1:]))\n self.assertEqual(param_list[3:], nn.ParameterList(parameters[3:]))\n self.assertEqual(param_list[:-1], nn.ParameterList(parameters[:-1]))\n self.assertEqual(param_list[:-3], nn.ParameterList(parameters[:-3]))\n self.assertEqual(param_list[::-1], nn.ParameterList(parameters[::-1]))\n\n with self.assertRaises(TypeError):\n param_list += make_param()\n with self.assertRaises(TypeError):\n param_list.extend(make_param())\n\n l1 = nn.Linear(1, 2)\n l2 = nn.Linear(2, 3)\n l3 = nn.Linear(3, 2)\n l4 = nn.Linear(2, 3)\n subnet = nn.Sequential(l3, l4)\n s = nn.Sequential(\n OrderedDict([\n (\"layer1\", l1),\n (\"layer2\", l2),\n (\"layer3\", l3),\n (\"layer4\", l4),\n (\"subnet_layer\", subnet)\n ])\n )\n parameters = list(s.parameters())\n param_list = nn.ParameterList()\n param_list.extend(s.parameters())\n check()\n\n param_list.append(torch.rand(2, 2))\n self.assertIsInstance(param_list[-1], Parameter)\n parameters.append(param_list[-1])\n\n param_list.extend([torch.rand(2, 2), \"foo\"])\n self.assertIsInstance(param_list[-2], Parameter)\n self.assertIsInstance(param_list[-1], str)\n parameters.extend(param_list[-2:])\n\n param_list += [\"bar\", torch.rand(2, 2)]\n self.assertIsInstance(param_list[-2], str)\n self.assertIsInstance(param_list[-1], Parameter)\n parameters += param_list[-2:]\n check()\n\n def test_ParameterList_replication(self):\n # The actual replication code from DP cannot be used on CPU so doing it manually here\n def make_param():\n return Parameter(torch.randn(2, 2))\n parameters = [make_param(), make_param()]\n param_list = nn.ParameterList(parameters)\n\n new_param_list = param_list._replicate_for_data_parallel()\n\n for n, p in param_list.named_parameters():\n # Do a view here so that we can check the base later\n setattr(new_param_list, n, p.view_as(p))\n\n for p, p2 in zip(param_list, new_param_list):\n self.assertEqual(p, p2)\n self.assertIsNotNone(p2.grad_fn)\n self.assertIs(p2._base, p)\n\n def test_ParameterDict(self):\n parameters = OrderedDict([\n ('p1', Parameter(torch.randn(10, 10))),\n ('p2', Parameter(torch.randn(10, 10))),\n ('p3', Parameter(torch.randn(10, 10))),\n ])\n\n parameter_dict = nn.ParameterDict(parameters)\n\n def check():\n self.assertEqual(len(parameter_dict), len(parameters))\n for i, (k1, (k2, m2)) in enumerate(zip(parameters, parameter_dict.named_parameters())):\n self.assertEqual(k1, k2)\n self.assertIs(parameters[k1], m2)\n for k1, k2 in zip(parameters, parameter_dict):\n self.assertIs(parameters[k1], parameter_dict[k2])\n for k in parameter_dict:\n self.assertIs(parameter_dict[k], parameters[k])\n for k in parameter_dict.keys():\n self.assertIs(parameter_dict[k], parameters[k])\n for k, v in parameter_dict.items():\n self.assertIs(v, parameters[k])\n for k1, m2 in zip(parameters, parameter_dict.values()):\n self.assertIs(parameters[k1], m2)\n for k in parameters.keys():\n self.assertTrue(k in parameter_dict)\n\n check()\n\n parameters['p4'] = Parameter(torch.randn(10, 10))\n parameter_dict['p4'] = parameters['p4']\n check()\n\n next_parameters = [\n ('p5', Parameter(torch.randn(10, 10))),\n ('p2', Parameter(torch.randn(10, 10))),\n ]\n parameters.update(next_parameters)\n parameter_dict.update(next_parameters)\n check()\n\n next_parameters = OrderedDict([\n ('p6', Parameter(torch.randn(10, 10))),\n ('p5', Parameter(torch.randn(10, 10))),\n ])\n parameters.update(next_parameters)\n parameter_dict.update(next_parameters)\n check()\n\n next_parameters = {\n 'p8': Parameter(torch.randn(10, 10)),\n 'p7': Parameter(torch.randn(10, 10))\n }\n parameters.update(sorted(next_parameters.items()))\n parameter_dict.update(next_parameters)\n check()\n\n next_parameters = nn.ParameterDict([\n ('p10', Parameter(torch.randn(10, 10))),\n ('p9', Parameter(torch.randn(10, 10))),\n ])\n parameters.update(next_parameters)\n parameter_dict.update(next_parameters)\n check()\n\n del parameter_dict['p3']\n del parameters['p3']\n check()\n\n with self.assertRaises(TypeError):\n parameter_dict.update(1)\n\n with self.assertRaises(TypeError):\n parameter_dict.update([1])\n\n with self.assertRaises(ValueError):\n parameter_dict.update(Parameter(torch.randn(10, 10)))\n\n p_pop = parameter_dict.pop('p4')\n self.assertIs(p_pop, parameters['p4'])\n parameters.pop('p4')\n check()\n\n # Check reverse works\n forward = list(iter(parameter_dict))\n backward = list(reversed(parameter_dict))\n self.assertEqual(len(forward), len(backward))\n n = len(forward)\n for i in range(n):\n self.assertIs(forward[i], backward[n - i - 1])\n check()\n\n # Check copy works\n copy = parameter_dict.copy()\n\n # Check all keys are present and have shallow copied values\n for key in parameter_dict:\n self.assertTrue(key in copy)\n self.assertEqual(parameter_dict[key], copy[key])\n self.assertIs(parameter_dict[key], copy[key])\n check()\n\n parameter_dict[\"p20\"] = Parameter(torch.randn(10, 10))\n copy[\"p21\"] = Parameter(torch.randn(9, 10))\n\n self.assertTrue(\"p20\" in parameter_dict)\n self.assertFalse(\"p20\" in copy)\n self.assertFalse(\"p21\" in parameter_dict)\n self.assertTrue(\"p21\" in copy)\n parameter_dict.pop(\"p20\")\n check()\n\n p = Parameter(torch.randn(10, 10))\n parameter_dict['p12'] = p\n p_popitem = parameter_dict.popitem()\n self.assertEqual(p_popitem[0], 'p12')\n self.assertIs(p_popitem[1], p)\n check()\n\n # Unit test for set_default\n # 1. Ensure parameter is correctly inserted when\n # the key is not present in `ParameterDict`\n assert 'p11' not in parameter_dict\n assert 'p11' not in parameters\n parameters['p11'] = Parameter(torch.randn(10, 10))\n p_setdefault = parameter_dict.setdefault('p11', parameters['p11'])\n self.assertIs(p_setdefault, parameters['p11'])\n self.assertIs(p_setdefault, parameter_dict['p11'])\n check()\n # 2. Ensure parameter is NOT inserted when the\n # key is already present in `ParameterDict`\n p = Parameter(torch.randn(10, 10))\n self.assertFalse(parameter_dict.setdefault('p11', p) is p)\n check()\n # 3. Ensure `None` is inserted when the key is not\n # present in `Parameter` and parameter is not specified\n self.assertIs(parameter_dict.setdefault('p26'), None)\n del parameter_dict['p26']\n check()\n\n parameters2 = OrderedDict([\n ('p13', Parameter(torch.randn(10, 10))),\n ('p2', Parameter(torch.randn(10, 10))),\n ('p3', Parameter(torch.randn(10, 10))),\n ])\n parameter_dict2 = nn.ParameterDict(parameters2)\n parameters.update(parameters2)\n parameter_dict |= parameter_dict2\n check()\n\n parameters2 = OrderedDict()\n parameter_dict2 = nn.ParameterDict(parameters2)\n parameters.update(parameters2)\n parameter_dict |= parameter_dict2\n check()\n\n parameters2 = OrderedDict([\n ('p14', Parameter(torch.randn(10, 10))),\n ('p15', Parameter(torch.randn(10, 10))),\n ('p13', Parameter(torch.randn(10, 10))),\n ])\n parameter_dict2 = nn.ParameterDict(parameters2)\n parameters.update(parameters2)\n parameter_dict |= parameter_dict2\n check()\n\n # Check __or__ and __ror__ works\n parameters2 = OrderedDict([\n ('p20', Parameter(torch.randn(10, 10))),\n ('p21', Parameter(torch.randn(10, 10))),\n ('p22', Parameter(torch.randn(10, 10))),\n ])\n parameter_dict2 = nn.ParameterDict(parameters2)\n parameters.update(parameters2)\n parameter_dict = parameter_dict | parameter_dict2\n check()\n\n parameters2 = OrderedDict([\n ('p23', Parameter(torch.randn(10, 10))),\n ('p24', Parameter(torch.randn(10, 10))),\n ('p25', Parameter(torch.randn(10, 10))),\n ])\n parameter_dict2 = nn.ParameterDict(parameters2)\n parameters2.update(parameters)\n parameters = parameters2\n parameter_dict = parameter_dict2 | parameter_dict\n check()\n\n parameters['p17'] = Parameter(torch.randn(10, 10))\n parameter_dict['p17'] = parameters['p17']\n self.assertIs(parameters['p17'], parameter_dict.get('p17'))\n temp_param = Parameter(torch.randn(10, 10))\n self.assertIs(parameters['p17'], parameter_dict.get('p17', temp_param))\n self.assertIs(None, parameter_dict.get('p18'))\n self.assertIs(temp_param, parameter_dict.get('p18', temp_param))\n check()\n\n parameter_dict.clear()\n self.assertEqual(len(parameter_dict), 0)\n parameters.clear()\n check()\n\n parameter_dict2 = parameter_dict.fromkeys(['p19', 'p20'])\n self.assertEqual({'p19': None, 'p20': None}, parameter_dict2)\n check()\n\n parameter_dict2 = parameter_dict.fromkeys(['p19', 'p20'], temp_param)\n self.assertEqual({'p19': temp_param, 'p20': temp_param}, parameter_dict2)\n check()\n\n parameter_dict['p21'] = torch.rand(2, 2)\n self.assertIsInstance(parameter_dict['p21'], Parameter)\n parameters['p21'] = parameter_dict['p21']\n\n parameter_dict.update({'p22': torch.rand(2, 2), 'foo': 'bar'})\n self.assertIsInstance(parameter_dict['p22'], Parameter)\n self.assertIsInstance(parameter_dict['foo'], str)\n parameters['p22'] = parameter_dict['p22']\n parameters['foo'] = parameter_dict['foo']\n\n def test_ParameterDict_replication(self):\n # The actual replication code from DP cannot be used on CPU so doing it manually here\n def make_param():\n return Parameter(torch.randn(2, 2))\n parameters = {\"foo\": make_param(), \"bar\": make_param()}\n param_dict = nn.ParameterDict(parameters)\n\n new_param_dict = param_dict._replicate_for_data_parallel()\n\n for n, p in param_dict.named_parameters():\n # Do a view here so that we can check the base later\n setattr(new_param_dict, n, p.view_as(p))\n\n for (k, p), (k2, p2) in zip(param_dict.items(), new_param_dict.items()):\n self.assertEqual(k, k2)\n self.assertEqual(p, p2)\n self.assertIsNotNone(p2.grad_fn)\n self.assertIs(p2._base, p)\n\n self.assertEqual(param_dict[\"foo\"], new_param_dict[\"foo\"])\n\n def test_add_module(self):\n methods_to_test = ['add_module', 'register_module']\n for fn in methods_to_test:\n l = nn.Linear(10, 20)\n net = nn.Module()\n net.l = l\n net.l2 = l\n getattr(net, fn)('empty', None)\n self.assertEqual(net.l, l)\n self.assertEqual(net.l2, l)\n self.assertEqual(net.empty, None)\n getattr(net, fn)('l3', l)\n self.assertEqual(net.l3, l)\n l3 = nn.Linear(20, 10)\n getattr(net, fn)('l', l3)\n self.assertEqual(net.l, l3)\n self.assertRaises(TypeError, lambda: getattr(net, fn)('x', 'non-module'))\n self.assertRaisesRegex(TypeError, 'module name should be a string. Got int',\n lambda: getattr(net, fn)(1, l))\n self.assertRaisesRegex(TypeError, 'module name should be a string. Got NoneType',\n lambda: getattr(net, fn)(None, l))\n\n def test_module_to_argparse(self):\n net = nn.Sequential(nn.Linear(3, 3))\n cpu = torch.device('cpu')\n with self.assertRaises(TypeError):\n net.to(cpu, True)\n with self.assertRaises(TypeError):\n net.to(torch.long)\n with self.assertRaises(TypeError):\n net.to(None, True)\n with self.assertRaises(TypeError):\n net.to(cpu, torch.long, True)\n with self.assertRaises(TypeError):\n net.to(cpu, dtype=torch.long, non_blocking=True)\n with self.assertRaises(TypeError):\n net.to([])\n with self.assertRaises(TypeError):\n net.to({}, non_blocking=True)\n with self.assertRaises(TypeError):\n net.to(torch.tensor(3, dtype=torch.long), non_blocking=True)\n with self.assertRaises(TypeError):\n net.to(cpu, torch.tensor(3, dtype=torch.long), non_blocking=True)\n\n def test_RNN_nonlinearity(self):\n rnn = torch.nn.RNN(1, 10)\n self.assertEqual(rnn.nonlinearity, 'tanh')\n\n rnn = torch.nn.RNN(1, 10, nonlinearity='relu')\n self.assertEqual(rnn.nonlinearity, 'relu')\n\n with self.assertRaisesRegex(ValueError, 'Unknown nonlinearity'):\n rnn = torch.nn.RNN(1, 10, nonlinearity='garbage')\n\n def test_module_apply_inplace_op(self):\n def add_one_inplace(t):\n return t.add_(1.0)\n\n # Test that applying an in-place operation to a module would bump\n # the module's parameters' version counter.\n m = nn.Linear(20, 10)\n pvm = m.weight.mul(m.weight)\n m_weight_version_saved = m.weight._version\n m = m._apply(add_one_inplace)\n self.assertGreater(m.weight._version, m_weight_version_saved)\n with self.assertRaisesRegex(RuntimeError, \"modified by an inplace operation\"):\n pvm.backward(torch.randn(10, 20))\n\n # Test that applying an in-place operation to a module would bump\n # the module's parameters' gradients' version counter.\n m = nn.Linear(20, 10)\n m.weight.grad = torch.randn(10, 20).requires_grad_()\n pgm = m.weight.grad.mul(m.weight.grad)\n m_weight_grad_version_saved = m.weight.grad._version\n m = m._apply(add_one_inplace)\n self.assertGreater(m.weight.grad._version, m_weight_grad_version_saved)\n with self.assertRaisesRegex(RuntimeError, \"modified by an inplace operation\"):\n pgm.backward(torch.randn(10, 20))\n\n def test_overwrite_module_params_on_conversion(self):\n # Test that if the conversion function passed to `module._apply()`\n # changes the TensorImpl type of `module`'s parameters, the `module`'s\n # parameters are always overwritten, regardless of the value of\n # `torch.__future__.get_overwrite_module_params_on_conversion()`.\n m = nn.Linear(20, 10)\n m.weight.grad = torch.randn(10, 20)\n weight_ref = m.weight\n weight_grad_ref = m.weight.grad\n m = m._apply(lambda t: torch.sparse_coo_tensor(torch.zeros([2, 1]), torch.ones([1]), torch.Size([10, 20])))\n self.assertNotEqual(weight_ref.layout, m.weight.layout)\n self.assertNotEqual(weight_grad_ref.layout, m.weight.grad.layout)\n\n # Test that under the current default settings\n # (`torch.__future__.get_overwrite_module_params_on_conversion() == False`),\n # a view to a module's parameters is not pointing to the same storage as\n # its base variable after converting the module to a different dtype.\n m = nn.Linear(20, 10).float()\n mw = m.weight[:]\n m.double()\n with torch.no_grad():\n mw[0][0] = 5\n self.assertTrue(mw[0][0].dtype == torch.float)\n self.assertTrue(mw._base[0][0].dtype == torch.double)\n\n try:\n torch.__future__.set_overwrite_module_params_on_conversion(True)\n\n # Test that if `torch.__future__.get_overwrite_module_params_on_conversion() == True`,\n # a view to a module's parameters is still pointing to the same storage as\n # its base variable after converting the module to a different dtype.\n m = nn.Linear(20, 10).float()\n mw = m.weight[:]\n m.double()\n with torch.no_grad():\n mw[0][0] = 5\n self.assertTrue(mw[0][0] == mw._base[0][0])\n\n # Test that if `torch.__future__.get_overwrite_module_params_on_conversion() == True`,\n # `float_module.double()` doesn't preserve previous references to\n # `float_module`'s parameters or gradients.\n m = nn.Linear(20, 10).float()\n m.weight.grad = torch.randn(10, 20).float()\n weight_ref = m.weight\n weight_grad_ref = m.weight.grad\n m.double()\n self.assertNotEqual(weight_ref.dtype, m.weight.dtype)\n self.assertNotEqual(weight_grad_ref.dtype, m.weight.grad.dtype)\n\n def add_one_inplace(t):\n return t.add_(1.0)\n\n # Test that if `torch.__future__.get_overwrite_module_params_on_conversion() == True`,\n # applying an in-place operation to a module would bump the module's\n # original parameters' version counter.\n m = nn.Linear(20, 10)\n pvm = m.weight.mul(m.weight)\n weight_ref = m.weight\n m_weight_version_saved = weight_ref._version\n m = m._apply(add_one_inplace)\n # Test that the in-place operation bumps the original parameter's version counter\n self.assertGreater(weight_ref._version, m_weight_version_saved)\n with self.assertRaisesRegex(RuntimeError, \"modified by an inplace operation\"):\n pvm.backward(torch.randn(10, 20))\n\n # Test that if `torch.__future__.get_overwrite_module_params_on_conversion() == True`,\n # applying an in-place operation to a module would bump the module's\n # original parameters' gradients' version counter.\n m = nn.Linear(20, 10)\n m.weight.grad = torch.randn(10, 20).requires_grad_()\n pgm = m.weight.grad.mul(m.weight.grad)\n weight_grad_ref = m.weight.grad\n m_weight_grad_version_saved = weight_grad_ref._version\n m = m._apply(add_one_inplace)\n self.assertGreater(weight_grad_ref._version, m_weight_grad_version_saved)\n with self.assertRaisesRegex(RuntimeError, \"modified by an inplace operation\"):\n pgm.backward(torch.randn(10, 20))\n\n # Test that if `torch.__future__.get_overwrite_module_params_on_conversion() == True`,\n # applying an out-of-place operation to a module doesn't bump\n # the module's original parameters' version counter.\n m = nn.Linear(20, 10)\n weight_ref = m.weight\n m_weight_version_saved = weight_ref._version\n m = m._apply(lambda t: torch.randn(t.shape))\n self.assertEqual(weight_ref._version, m_weight_version_saved)\n\n # Test that if `torch.__future__.get_overwrite_module_params_on_conversion() == True`,\n # applying an out-of-place operation to a module doesn't bump\n # the module's original parameters' gradients' version counter.\n m = nn.Linear(20, 10)\n m.weight.grad = torch.randn(10, 20).requires_grad_()\n weight_grad_ref = m.weight.grad\n m_weight_grad_version_saved = weight_grad_ref._version\n m = m._apply(lambda t: torch.randn(t.shape))\n self.assertEqual(weight_grad_ref._version, m_weight_grad_version_saved)\n finally:\n torch.__future__.set_overwrite_module_params_on_conversion(False)\n\n def test_type(self):\n l = nn.Linear(10, 20)\n net = nn.Module()\n net.l = l\n net.l2 = l\n net.add_module('empty', None)\n net.register_buffer('indices', torch.LongTensor(1))\n net.float()\n self.assertIsInstance(l.weight.data, torch.FloatTensor)\n self.assertIsInstance(l.bias.data, torch.FloatTensor)\n self.assertIsInstance(net.indices, torch.LongTensor)\n net.double()\n self.assertIsInstance(l.weight.data, torch.DoubleTensor)\n self.assertIsInstance(l.bias.data, torch.DoubleTensor)\n self.assertIsInstance(net.indices, torch.LongTensor)\n net.to(torch.half)\n self.assertIsInstance(l.weight.data, torch.HalfTensor)\n self.assertIsInstance(l.bias.data, torch.HalfTensor)\n self.assertIsInstance(net.indices, torch.LongTensor)\n if TEST_CUDA:\n net.float().cuda()\n self.assertIsInstance(l.weight.data, torch.cuda.FloatTensor)\n self.assertIsInstance(l.bias.data, torch.cuda.FloatTensor)\n self.assertIsInstance(net.indices, torch.cuda.LongTensor)\n net.cpu()\n self.assertIsInstance(l.weight.data, torch.FloatTensor)\n self.assertIsInstance(l.bias.data, torch.FloatTensor)\n self.assertIsInstance(net.indices, torch.LongTensor)\n net.to(\"cuda\", torch.double, True)\n self.assertIsInstance(l.weight.data, torch.cuda.DoubleTensor)\n self.assertIsInstance(l.bias.data, torch.cuda.DoubleTensor)\n self.assertIsInstance(net.indices, torch.cuda.LongTensor)\n net.to(torch.empty(1, device=\"cuda:0\", dtype=torch.half))\n self.assertIsInstance(l.weight.data, torch.cuda.HalfTensor)\n self.assertIsInstance(l.bias.data, torch.cuda.HalfTensor)\n self.assertIsInstance(net.indices, torch.cuda.LongTensor)\n net.to(torch.device(\"cpu\"), non_blocking=True)\n self.assertIsInstance(l.weight.data, torch.HalfTensor)\n self.assertIsInstance(l.bias.data, torch.HalfTensor)\n self.assertIsInstance(net.indices, torch.LongTensor)\n net.to(torch.float)\n self.assertIsInstance(l.weight.data, torch.FloatTensor)\n self.assertIsInstance(l.bias.data, torch.FloatTensor)\n net.to(torch.DoubleTensor(1))\n self.assertIsInstance(l.weight.data, torch.DoubleTensor)\n self.assertIsInstance(l.bias.data, torch.DoubleTensor)\n if TEST_CUDA:\n net.to(device='cuda', dtype=torch.float)\n self.assertIsInstance(l.weight.data, torch.cuda.FloatTensor)\n self.assertIsInstance(l.bias.data, torch.cuda.FloatTensor)\n\n def test_non_leaf_parameters(self):\n l1 = nn.Linear(10, 10)\n l2 = nn.Linear(10, 10)\n\n def assign_weight():\n l2.weight = l1.weight + 2\n\n self.assertRaises(TypeError, assign_weight)\n # This should work though\n l2.weight = Parameter(torch.randn(10, 10))\n\n def test_clip_grad_norm(self):\n l = nn.Linear(10, 10)\n max_norm = 2\n\n def compute_norm(norm_type):\n norm_type = float(norm_type)\n if norm_type != inf:\n total_norm = 0\n for p in l.parameters():\n total_norm += p.grad.data.abs().pow(norm_type).sum()\n return pow(total_norm, 1. / norm_type)\n else:\n return max(p.grad.data.abs().max() for p in l.parameters())\n\n def compare_scaling(grads):\n p_scale = [p.grad.data.div(g).view(-1) for p, g in zip(l.parameters(), grads)]\n scale = torch.cat(p_scale)\n self.assertEqual(scale.std(), 0)\n return scale[0]\n\n grads = torch.arange(1., 101).view(10, 10), torch.ones(10).div(1000)\n for norm_type in [0.5, 1.5, 2, 4, 'inf']:\n for p, g in zip(l.parameters(), grads):\n p._grad = g.clone().view_as(p.data)\n norm_before = compute_norm(norm_type)\n norm = clip_grad_norm_(l.parameters(), max_norm, norm_type=norm_type)\n norm_after = compute_norm(norm_type)\n self.assertEqual(norm, norm_before)\n self.assertEqual(norm_after, max_norm)\n self.assertLessEqual(norm_after, norm_before)\n compare_scaling(grads)\n\n # Small gradients should be left unchanged\n grads = torch.rand(10, 10).div(10000), torch.ones(10).div(500)\n for norm_type in [0.5, 1.5, 2, 4, 'inf']:\n for p, g in zip(l.parameters(), grads):\n p.grad.data.copy_(g)\n norm_before = compute_norm(norm_type)\n norm = clip_grad_norm_(l.parameters(), max_norm, norm_type=norm_type)\n norm_after = compute_norm(norm_type)\n self.assertEqual(norm, norm_before)\n self.assertEqual(norm_before, norm_after)\n self.assertLessEqual(norm_after, max_norm)\n scale = compare_scaling(grads)\n self.assertEqual(scale, 1)\n\n # Should accept a single Tensor as input\n p1, p2 = torch.randn(10, 10), torch.randn(10, 10)\n g = torch.arange(1., 101).view(10, 10)\n p1._grad = g.clone()\n p2._grad = g.clone()\n for norm_type in [0.5, 1.5, 2, 4, 'inf']:\n clip_grad_norm_(p1, max_norm, norm_type=norm_type)\n clip_grad_norm_([p2], max_norm, norm_type=norm_type)\n self.assertEqual(p1.grad, p2.grad)\n\n def test_clip_grad_value(self):\n l = nn.Linear(10, 10)\n clip_value = 2.5\n\n grad_w, grad_b = torch.arange(-50., 50).view(10, 10).div_(5), torch.ones(10).mul_(2)\n for grad_list in [[grad_w, grad_b], [grad_w, None]]:\n for p, g in zip(l.parameters(), grad_list):\n p._grad = g.clone().view_as(p.data) if g is not None else g\n\n clip_grad_value_(l.parameters(), clip_value)\n for p in filter(lambda p: p.grad is not None, l.parameters()):\n self.assertLessEqual(p.grad.data.max(), clip_value)\n self.assertGreaterEqual(p.grad.data.min(), -clip_value)\n\n # Should accept a single Tensor as input\n p1, p2 = torch.randn(10, 10), torch.randn(10, 10)\n g = torch.arange(-50., 50).view(10, 10).div_(5)\n p1._grad = g.clone()\n p2._grad = g.clone()\n clip_grad_value_(p1, clip_value)\n clip_grad_value_([p2], clip_value)\n self.assertEqual(p1.grad, p2.grad)\n\n def test_parameters_to_vector(self):\n conv1 = nn.Conv2d(3, 10, 5)\n fc1 = nn.Linear(10, 20)\n model = nn.Sequential(conv1, fc1)\n\n vec = parameters_to_vector(model.parameters())\n self.assertEqual(vec.size(0), 980)\n\n def test_vector_to_parameters(self):\n conv1 = nn.Conv2d(3, 10, 5)\n fc1 = nn.Linear(10, 20)\n model = nn.Sequential(conv1, fc1)\n\n vec = torch.arange(0., 980)\n vector_to_parameters(vec, model.parameters())\n\n sample = next(model.parameters())[0, 0, 0]\n self.assertTrue(torch.equal(sample.data, vec.data[:5]))\n\n # FIXME: Rewrite this test using functions not depending on LAPACK\n # and remove the `@skipIfNoLapack` (see #70995)\n # torch/nn/utils/parametrize\n @skipIfNoLapack\n def test_register_and_remove_parametrization(self):\n r\"\"\"Test that it is possible to add a few parametrizations\n on a parameter or a buffer and that removing them restores the initial state\n It also tests that backpropagating through them works as expected\n \"\"\"\n # Define a couple matrix parametrizations\n class Skew(nn.Module):\n def forward(self, X):\n X = X.tril(-1)\n return X - X.T\n\n class Orthogonal(nn.Module):\n def forward(self, X):\n # Cayley map\n # If X is skew-symmetric it returns an orthogonal matrix\n Id = torch.eye(X.size(0), device=X.device)\n # We call contiguous because solve returns a tensor with strides that are Fortran-contiguous\n # and autograd raises a performance warning.\n # This happens when we remove the parametrization with leave_parametrized=True,\n # which does a set_ with a non-contiguous tensor while the gradient is contiguous\n return torch.linalg.solve(Id + X, Id - X).contiguous()\n\n class Resize(nn.Module):\n def forward(self, X):\n return X[[0]]\n\n class NoResize(nn.Module):\n def forward(self, X):\n return X\n\n # Define a couple vector parametrizations\n class FirstZero(nn.Module):\n def forward(self, x):\n return torch.cat([x.new_zeros(1), x[1:]])\n\n class LastZero(nn.Module):\n def forward(self, x):\n return torch.cat([x[:-1], x.new_zeros(1)])\n\n model = nn.Linear(8, 8)\n initial_weight_id = id(model.weight)\n initial_bias_id = id(model.bias)\n initial_model = deepcopy(model)\n\n # Test unsafe flag\n with self.assertRaisesRegex(ValueError, \"Registering a parametrization may not change the shape of the tensor\"):\n parametrize.register_parametrization(model, \"weight\", Resize()) # default unsafe = False\n model(torch.ones(8, 8))\n\n # One parametrization with unsafe=True\n parametrize.register_parametrization(model, \"weight\", Resize(), unsafe=True)\n self.assertTrue(hasattr(model, \"parametrizations\"))\n self.assertTrue(parametrize.is_parametrized(model))\n self.assertTrue(parametrize.is_parametrized(model, \"weight\"))\n self.assertFalse(parametrize.is_parametrized(model, \"bias\"))\n self.assertNotIn(\"weight\", model._parameters)\n A = model.weight\n self.assertTrue(A.shape[0] == 1)\n parametrize.remove_parametrizations(model, \"weight\", leave_parametrized=False)\n self.assertFalse(hasattr(model, \"parametrizations\"))\n self.assertEqual(model.weight, initial_model.weight)\n self.assertEqual(id(model.weight), initial_weight_id)\n self.assertEqual(model.__class__, nn.Linear)\n\n # Two parametrizations with unsafe=True\n parametrize.register_parametrization(model, \"weight\", Resize(), unsafe=True)\n parametrize.register_parametrization(model, \"weight\", NoResize(), unsafe=False)\n self.assertTrue(hasattr(model, \"parametrizations\"))\n self.assertTrue(parametrize.is_parametrized(model))\n self.assertTrue(parametrize.is_parametrized(model, \"weight\"))\n self.assertFalse(parametrize.is_parametrized(model, \"bias\"))\n self.assertNotIn(\"weight\", model._parameters)\n A = model.weight\n self.assertTrue(A.shape[0] == 1)\n parametrize.remove_parametrizations(model, \"weight\", leave_parametrized=False)\n self.assertFalse(hasattr(model, \"parametrizations\"))\n self.assertEqual(model.weight, initial_model.weight)\n self.assertEqual(id(model.weight), initial_weight_id)\n self.assertEqual(model.__class__, nn.Linear)\n\n # Test unsafe flag doesn't change expected behavior\n parametrize.register_parametrization(model, \"weight\", Skew(), unsafe=True)\n self.assertTrue(hasattr(model, \"parametrizations\"))\n self.assertTrue(parametrize.is_parametrized(model))\n self.assertTrue(parametrize.is_parametrized(model, \"weight\"))\n self.assertFalse(parametrize.is_parametrized(model, \"bias\"))\n self.assertNotIn(\"weight\", model._parameters)\n # Result should be skew-symmetric\n A = model.weight\n self.assertEqual(A, -A.T)\n # Remove and check consistency\n parametrize.remove_parametrizations(model, \"weight\", leave_parametrized=False)\n self.assertFalse(hasattr(model, \"parametrizations\"))\n self.assertEqual(model.weight, initial_model.weight)\n self.assertEqual(id(model.weight), initial_weight_id)\n self.assertEqual(model.__class__, nn.Linear)\n\n # Test one parametrization\n parametrize.register_parametrization(model, \"weight\", Skew())\n self.assertTrue(hasattr(model, \"parametrizations\"))\n self.assertTrue(parametrize.is_parametrized(model))\n self.assertTrue(parametrize.is_parametrized(model, \"weight\"))\n self.assertFalse(parametrize.is_parametrized(model, \"bias\"))\n self.assertNotIn(\"weight\", model._parameters)\n # Result should be skew-symmetric\n A = model.weight\n self.assertEqual(A, -A.T)\n # Remove and check consistency\n parametrize.remove_parametrizations(model, \"weight\", leave_parametrized=False)\n self.assertFalse(hasattr(model, \"parametrizations\"))\n self.assertEqual(model.weight, initial_model.weight)\n self.assertEqual(id(model.weight), initial_weight_id)\n self.assertEqual(model.__class__, nn.Linear)\n\n # Test two parametrizations at the same time and removing them\n parametrize.register_parametrization(model, \"weight\", Skew())\n parametrize.register_parametrization(model, \"weight\", Orthogonal())\n # Result should be orthogonal\n X = model.weight\n Id = torch.eye(X.size(0), device=X.device)\n self.assertEqual(X.T @ X, Id)\n # Structure tests\n self.assertTrue(hasattr(model, \"parametrizations\"))\n self.assertTrue(parametrize.is_parametrized(model))\n self.assertTrue(parametrize.is_parametrized(model, \"weight\"))\n self.assertFalse(parametrize.is_parametrized(model, \"bias\"))\n self.assertIn(\"weight\", model.parametrizations)\n self.assertNotIn(\"weight\", model._parameters)\n # Remove\n parametrize.remove_parametrizations(model, \"weight\", leave_parametrized=False)\n self.assertEqual(model.weight, initial_model.weight)\n self.assertEqual(id(model.weight), initial_weight_id)\n self.assertFalse(hasattr(model, \"parametrizations\"))\n self.assertEqual(model.__class__, nn.Linear)\n\n # Add everything\n parametrize.register_parametrization(model, \"weight\", Skew())\n parametrize.register_parametrization(model, \"weight\", Orthogonal())\n parametrize.register_parametrization(model, \"bias\", FirstZero())\n parametrize.register_parametrization(model, \"bias\", LastZero())\n\n # Basic tests\n self.assertTrue(parametrize.is_parametrized(model))\n self.assertTrue(parametrize.is_parametrized(model, \"weight\"))\n self.assertTrue(parametrize.is_parametrized(model, \"bias\"))\n self.assertEqual(model.bias[0].item(), 0.)\n self.assertEqual(model.bias[-1].item(), 0.)\n self.assertEqual(len(list(model.parameters())), 2) # Nothing weird has happpened\n # Should not throw\n\n sgd = torch.optim.SGD(model.parameters(), lr=0.01)\n\n weight_copy = model.weight.clone()\n bias_copy = model.bias.clone()\n sgd.zero_grad()\n (model.weight.T @ model.bias).sum().backward()\n sgd.step()\n self.assertNotEqual(model.weight, weight_copy)\n self.assertNotEqual(model.bias, bias_copy)\n\n # Remove first parametrization.\n # Check that the model is still parametrized and so is the second parameter\n parametrize.remove_parametrizations(model, \"weight\", leave_parametrized=False)\n self.assertTrue(parametrize.is_parametrized(model)) # Still parametrized\n self.assertFalse(parametrize.is_parametrized(model, \"weight\")) # Parametrization removed\n self.assertTrue(parametrize.is_parametrized(model, \"bias\")) # Still parametrized\n self.assertEqual(model.bias[0].item(), 0.) # Still parametrized\n self.assertEqual(model.bias[-1].item(), 0.) # Still parametrized\n self.assertNotEqual(model.weight, initial_model.weight) # Has been updated\n self.assertEqual(id(model.weight), initial_weight_id) # Keeps the same id\n self.assertEqual(len(list(model.parameters())), 2) # Nothing weird has happened\n # Should not throw\n weight_copy = model.weight.clone()\n bias_copy = model.bias.clone()\n sgd.zero_grad()\n (model.weight.T @ model.bias).sum().backward()\n sgd.step()\n self.assertNotEqual(model.weight, weight_copy)\n self.assertNotEqual(model.bias, bias_copy)\n\n # Remove the second parametrization.\n # Check that the module is not parametrized\n parametrize.remove_parametrizations(model, \"bias\", leave_parametrized=False)\n self.assertFalse(parametrize.is_parametrized(model)) # Not parametrized\n self.assertNotEqual(model.bias, initial_model.bias) # Has been updated\n self.assertNotEqual(model.bias[0].item(), 0.) # Not parametrized\n self.assertNotEqual(model.bias[-1].item(), 0.) # Not parametrized\n self.assertEqual(id(model.bias), initial_bias_id) # Keeps the same id\n self.assertFalse(hasattr(model, \"parametrizations\")) # Not parametrized the module\n self.assertEqual(model.__class__, nn.Linear) # Resores the previous class\n self.assertEqual(len(list(model.parameters())), 2) # Nothing weird has happeed\n\n # Should not throw things are updated\n weight_copy = model.weight.clone()\n bias_copy = model.bias.clone()\n sgd.zero_grad()\n (model.weight.T @ model.bias).sum().backward()\n sgd.step()\n self.assertNotEqual(model.weight, weight_copy)\n self.assertNotEqual(model.bias, bias_copy)\n\n # Test leave_parametrized=True\n for _ in range(2):\n parametrize.register_parametrization(model, \"weight\", Skew())\n parametrize.register_parametrization(model, \"weight\", Orthogonal())\n parametrize.remove_parametrizations(model, \"weight\", leave_parametrized=True)\n # We didn't change the dtype nor had multiple inputs, so the id should be the same\n self.assertEqual(id(model.weight), initial_weight_id)\n self.assertEqual(id(model.bias), initial_bias_id)\n\n # Should not throw. Things are updated\n weight_copy = model.weight.clone()\n bias_copy = model.bias.clone()\n sgd.zero_grad()\n (model.weight.T @ model.bias).sum().backward()\n sgd.step()\n self.assertNotEqual(model.weight, weight_copy)\n self.assertNotEqual(model.bias, bias_copy)\n\n def test_register_and_remove_nested_parametrization(self):\n r\"\"\"Test that it is possible to nest the parametrizations\n meaning that the original param is parametrized again\n \"\"\"\n class Skew(nn.Module):\n def forward(self, X):\n X = X.tril(-1)\n return X - X.T\n\n model = nn.Linear(8, 8)\n # Add top level parametrization\n parametrize.register_parametrization(model, \"weight\", Skew())\n self.assertTrue(hasattr(model, \"parametrizations\"))\n self.assertTrue(parametrize.is_parametrized(model))\n self.assertTrue(parametrize.is_parametrized(model, \"weight\"))\n self.assertFalse(parametrize.is_parametrized(model, \"bias\"))\n self.assertNotIn(\"weight\", model._parameters)\n # Result should be skew-symmetric\n A = model.weight\n self.assertEqual(A, -A.T)\n\n # Add nested parametrization\n param_mod = model.parametrizations.weight\n self.assertFalse(hasattr(param_mod, \"parametrizations\"))\n self.assertFalse(parametrize.is_parametrized(param_mod))\n self.assertFalse(parametrize.is_parametrized(param_mod, \"original\"))\n\n parametrize.register_parametrization(param_mod, \"original\", Skew())\n self.assertTrue(hasattr(param_mod, \"parametrizations\"))\n self.assertTrue(parametrize.is_parametrized(param_mod))\n self.assertTrue(parametrize.is_parametrized(param_mod, \"original\"))\n self.assertNotIn(\"original\", param_mod._parameters)\n # Result should be skew-symmetric\n A = param_mod.original\n self.assertEqual(A, -A.T)\n\n # Remove nested param and check consistency\n parametrize.remove_parametrizations(param_mod, \"original\", leave_parametrized=False)\n self.assertFalse(hasattr(param_mod, \"parametrizations\"))\n self.assertEqual(param_mod.__class__, parametrize.ParametrizationList)\n\n # Remove top level and check consistency\n parametrize.remove_parametrizations(model, \"weight\", leave_parametrized=False)\n self.assertFalse(hasattr(model, \"parametrizations\"))\n self.assertEqual(model.__class__, nn.Linear)\n\n def test_register_and_remove_buffer_parametrization(self):\n r\"\"\"Test that it is possible to add and remove parametrizations on buffers\"\"\"\n # Define a couple vector parametrizations\n class FirstZero(nn.Module):\n def forward(self, x):\n return torch.cat([x.new_zeros(1), x[1:]])\n\n class LastZero(nn.Module):\n def forward(self, x):\n return torch.cat([x[:-1], x.new_zeros(1)])\n\n model = nn.Linear(8, 8)\n\n # Instantiate parametrizations on buffers. It should work as expected\n delattr(model, \"bias\")\n model.register_buffer(\"bias\", torch.ones(8))\n parametrize.register_parametrization(model, \"bias\", FirstZero())\n parametrize.register_parametrization(model, \"bias\", LastZero())\n self.assertTrue(parametrize.is_parametrized(model))\n self.assertTrue(parametrize.is_parametrized(model, \"bias\"))\n self.assertEqual(model.bias[0].item(), 0.)\n self.assertEqual(model.bias[-1].item(), 0.)\n self.assertTrue((model.bias[1:-1] == torch.ones(6)).all())\n self.assertEqual(len(list(model.parameters())), 1)\n\n # Remove parametrizations on buffers. It should work as expected\n parametrize.remove_parametrizations(model, \"bias\", leave_parametrized=True)\n self.assertFalse(parametrize.is_parametrized(model))\n self.assertFalse(parametrize.is_parametrized(model, \"bias\"))\n self.assertEqual(model.bias[0].item(), 0.)\n self.assertEqual(model.bias[-1].item(), 0.)\n self.assertTrue((model.bias[1:-1] == torch.ones(6)).all())\n self.assertEqual(len(list(model.parameters())), 1)\n\n # FIXME: Rewrite this test using functions not depending on LAPACK\n # and remove the `@skipIfNoLapack` (see #70995)\n @skipIfNoLapack\n def test_serialization_parametrization(self):\n r\"\"\"Test that it is possible to serialize a parametrized model via state_dict\"\"\"\n # A stateful parametrization\n class Orthogonal(nn.Module):\n def __init__(self, n):\n super().__init__()\n self.register_buffer(\"id\", torch.eye(n))\n self.register_buffer(\"B\", torch.empty(n, n))\n init.orthogonal_(self.B)\n\n def forward(self, X):\n A = X.triu(1)\n A = A - A.T\n return self.B @ torch.linalg.solve(self.id + A, self.id - A)\n\n def get_model():\n model = torch.nn.Sequential(\n torch.nn.Linear(5, 5),\n torch.nn.ReLU(),\n torch.nn.Linear(5, 1),\n )\n\n parametrize.register_parametrization(model[0], \"weight\", Orthogonal(5))\n return model\n\n model = get_model()\n\n prev_weight = model[0].weight\n prev_B = model[0].parametrizations.weight[0].B\n\n new_model = get_model()\n with TemporaryFileName() as fname:\n torch.save(model.state_dict(), fname)\n new_model.load_state_dict(torch.load(fname))\n\n # Integrity tests\n self.assertTrue(parametrize.is_parametrized(new_model[0], \"weight\"))\n self.assertEqual(prev_weight, new_model[0].weight)\n self.assertEqual(prev_B, new_model[0].parametrizations.weight[0].B)\n\n # Trying to save the whole parametrized model raises\n with self.assertRaisesRegex(RuntimeError, \"state_dict\"):\n with TemporaryFileName() as fname:\n torch.save(model, fname)\n\n # FIXME: Rewrite this test using functions not depending on LAPACK\n # and remove the `@skipIfNoLapack` (see #70995)\n @skipIfNoLapack\n def test_initialization_parametrization(self):\n r\"\"\"Test that it is possible to initialize a parametrization when it\n implements a `right_inverse` method\n \"\"\"\n class Skew(nn.Module):\n def forward(self, X):\n A = X.triu(1)\n return A - A.T\n\n def is_skew(self, A):\n return torch.allclose(A, -A.T, atol=1e-6)\n\n def right_inverse(self, X):\n if not self.is_skew(X):\n raise ValueError(\"The matrix is not skew-symmetric.\")\n return X.triu(1)\n\n # Implements a Cayley map where right_inverse is not quite the inverse of forward\n class Orthogonal(nn.Module):\n def __init__(self, n):\n super().__init__()\n self.register_buffer(\"B\", torch.eye(n))\n\n def forward(self, X):\n Id = torch.eye(X.size(0))\n return self.B @ torch.linalg.solve(Id + X, Id - X)\n\n def is_orthogonal(self, X):\n Id = torch.eye(X.size(0))\n return torch.allclose(X.T @ X, Id, atol=1e-4)\n\n def right_inverse(self, X):\n if not self.is_orthogonal(X):\n raise ValueError(\"The input is not orthogonal.\")\n # cayley(0) == Id, so B @ cayley(0) == B\n self.B = X\n return torch.zeros_like(X)\n\n N = 5\n model = nn.Linear(N, N)\n # Register the skew-symmetric constraint. The result is now skew-symmetric\n skew = Skew()\n # Make the weight skew-symmetric before registering the parametrization\n with torch.no_grad():\n model.weight.set_(skew(model.weight))\n parametrize.register_parametrization(model, \"weight\", skew)\n X = torch.rand(N, N)\n # X is not skew-symmetric, so it throws an error\n with self.assertRaises(ValueError):\n model.weight = X\n # Make X skew-symmetric\n X = X - X.T\n model.weight = X\n self.assertEqual(model.parametrizations.weight.original, X.triu(1))\n self.assertEqual(model.weight, X)\n\n # Having several parametrizations registered should work in the same way\n parametrize.register_parametrization(model, \"weight\", Orthogonal(N))\n # Register now the Cayley map. The result is now orthogonal\n X = torch.rand(N, N)\n # X is not orthogonal, so it throws an error\n with self.assertRaises(ValueError):\n model.weight = X\n init.orthogonal_(X)\n model.weight = X\n self.assertEqual(model.weight, X)\n self.assertEqual(model.parametrizations.weight.original, torch.zeros_like(X))\n\n def test_errors_unparametrized_tensor_parametrization(self):\n # Test errors when registering a parametrization on an unparametrized tensor\n module = nn.Linear(3, 4)\n weight_init = module.weight.clone()\n\n class Identity(nn.Module):\n def forward(self, x):\n return x\n\n # Register a parametrization on a non-existing parameter throws\n with self.assertRaisesRegex(ValueError, \"does not have a parameter\"):\n parametrize.register_parametrization(module, \"foo\", Identity())\n self.assertFalse(parametrize.is_parametrized(module))\n\n # Removing parametrizations from an unparametrized tensor throws\n with self.assertRaisesRegex(ValueError, \"does not have a parametrization\"):\n parametrize.remove_parametrizations(module, \"bias\")\n self.assertFalse(parametrize.is_parametrized(module))\n\n # A correct parametrization with several outputs\n class Sum(nn.Module):\n def forward(self, x, y):\n return x + y\n\n def right_inverse(self, z):\n return z, torch.zeros_like(z)\n\n parametrize.register_parametrization(module, \"weight\", Sum())\n # Cannot remove a parametrization with several outputs with `leave_parametrized=False`\n with self.assertRaisesRegex(ValueError, \"leave_parametrized=False\"):\n parametrize.remove_parametrizations(module, \"weight\", leave_parametrized=False)\n parametrize.remove_parametrizations(module, \"weight\", leave_parametrized=True)\n\n # A parametrization with an incorrect number of outputs\n class WrongNumberParams(nn.Module):\n def forward(self, x, y, z):\n return x + y + z\n\n def right_inverse(self, w):\n return w, torch.zeros_like(w)\n\n # Makes param(*param.right_inverse(X)) fail\n with self.assertRaisesRegex(TypeError, \"positional argument\"):\n parametrize.register_parametrization(module, \"weight\", WrongNumberParams())\n self.assertFalse(parametrize.is_parametrized(module))\n\n # A parametrization with a right_inverse that does not return a Tensor or Sequence[Tensor]\n class WrongRightInverse(Identity):\n def right_inverse(self, z):\n return None\n\n # right_inverse should return a Tensor or a Sequence[Tensor]\n with self.assertRaisesRegex(ValueError, \"Tensor or a Sequence of\"):\n parametrize.register_parametrization(module, \"weight\", WrongRightInverse())\n self.assertFalse(parametrize.is_parametrized(module))\n\n # If it's a sequence, it must to be a sequence of tensors\n class WrongRightInverseSequence(nn.Module):\n def forward(self, x, y):\n return x\n\n def right_inverse(self, z):\n return None, z\n\n with self.assertRaisesRegex(ValueError, \"of the sequence with type\"):\n parametrize.register_parametrization(module, \"weight\", WrongRightInverseSequence())\n self.assertFalse(parametrize.is_parametrized(module))\n\n # A parametrization from one tensor to one tensor that changes the dtype\n class ChangeDtypeInverse(nn.Module):\n def forward(self, x):\n return x.float()\n\n def right_inverse(self, w):\n return w.bool()\n\n # For parametrizations that return one tensor, right_inverse may not change the dtype\n with self.assertRaisesRegex(ValueError, \"outputs one tensor, it may not change the dtype\"):\n parametrize.register_parametrization(module, \"weight\", ChangeDtypeInverse())\n self.assertFalse(parametrize.is_parametrized(module))\n\n # Doesn't return a tensor\n class NotTensor(nn.Module):\n def forward(self, x):\n return 2\n\n # Forward must return a tensor\n with self.assertRaisesRegex(ValueError, \"must return a tensor\"):\n parametrize.register_parametrization(module, \"weight\", NotTensor())\n self.assertFalse(parametrize.is_parametrized(module))\n\n # A parametrization from one tensor to one tensor that changes the dtype\n class ChangeDtype(nn.Module):\n def forward(self, x):\n return x.bool()\n\n # forward should not change the initial dtype\n with self.assertRaisesRegex(ValueError, \"may not change the dtype\"):\n parametrize.register_parametrization(module, \"weight\", ChangeDtype())\n self.assertFalse(parametrize.is_parametrized(module))\n\n # Change shape\n class ChangeShape(nn.Module):\n def forward(self, x):\n return x[:-1]\n\n # forward should not change the original shape\n with self.assertRaisesRegex(ValueError, \"may not change the shape\"):\n parametrize.register_parametrization(module, \"weight\", ChangeShape())\n self.assertFalse(parametrize.is_parametrized(module))\n\n # Many to one that changes dtype\n class ChangeDtypeMulti(nn.Module):\n def forward(self, x, y):\n return (x + y).bool()\n\n def right_inverse(self, w):\n return w, w + 1\n\n # forward should not change the original shape even for parametrizations with many inputs\n with self.assertRaisesRegex(ValueError, \"may not change the dtype\"):\n parametrize.register_parametrization(module, \"weight\", ChangeDtypeMulti())\n self.assertFalse(parametrize.is_parametrized(module))\n\n # Returning a sequence of size one, although weird, it's correct\n class SequenceLen1(nn.Module):\n def forward(self, x):\n return x\n\n def right_inverse(self, w):\n return (w,)\n\n parametrize.register_parametrization(module, \"weight\", SequenceLen1())\n self.assertTrue(hasattr(module.parametrizations.weight, \"original0\"))\n self.assertFalse(hasattr(module.parametrizations.weight, \"original1\"))\n _ = module.weight # Does not throw\n self.assertTrue(parametrize.is_parametrized(module))\n parametrize.remove_parametrizations(module, \"weight\", leave_parametrized=True)\n\n # None of the operations above should have altered the weight\n self.assertFalse(parametrize.is_parametrized(module))\n self.assertEqual(module.weight, weight_init)\n\n def test_errors_parametrized_tensor_parametrization(self):\n # Test errors when registering a parametrization on a parametrized tensor\n\n class Identity(nn.Module):\n def forward(self, x):\n return x\n\n module = nn.Linear(3, 4)\n parametrize.register_parametrization(module, \"weight\", Identity())\n\n # Has to return a tensor\n class WrongReturn(nn.Module):\n def forward(self, x):\n return x, x\n\n with self.assertRaisesRegex(ValueError, \"must return a tensor\"):\n parametrize.register_parametrization(module, \"weight\", WrongReturn())\n self.assertTrue(parametrize.is_parametrized(module))\n self.assertEqual(len(module.parametrizations.weight), 1)\n self.assertTrue(isinstance(module.parametrizations.weight[0], Identity))\n\n # Cannot change dtype\n class ChangeDtype(nn.Module):\n def forward(self, x):\n return x.bool()\n\n with self.assertRaisesRegex(ValueError, \"may not change the dtype\"):\n parametrize.register_parametrization(module, \"weight\", ChangeDtype())\n self.assertTrue(parametrize.is_parametrized(module))\n self.assertEqual(len(module.parametrizations.weight), 1)\n self.assertTrue(isinstance(module.parametrizations.weight[0], Identity))\n\n # Cannot change shape\n class ChangeShape(nn.Module):\n def forward(self, x):\n return x[:-1]\n\n with self.assertRaisesRegex(ValueError, \"may not change the shape\"):\n parametrize.register_parametrization(module, \"weight\", ChangeShape())\n self.assertTrue(parametrize.is_parametrized(module))\n self.assertEqual(len(module.parametrizations.weight), 1)\n self.assertTrue(isinstance(module.parametrizations.weight[0], Identity))\n\n # The following checks are mostly due to bugs in the code of the parametrization\n\n # right_inverse has to return a tensor\n class WrongReturnInverse(Identity):\n def right_inverse(self, x):\n return x, x\n\n with self.assertRaisesRegex(ValueError, \"right_inverse must return a tensor\"):\n parametrize.register_parametrization(module, \"weight\", WrongReturnInverse())\n self.assertTrue(parametrize.is_parametrized(module))\n self.assertEqual(len(module.parametrizations.weight), 1)\n self.assertTrue(isinstance(module.parametrizations.weight[0], Identity))\n\n # Cannot change dtype\n class ChangeDtypeInverse(Identity):\n def right_inverse(self, x):\n return x.bool()\n\n with self.assertRaisesRegex(ValueError, \"must have the same dtype\"):\n parametrize.register_parametrization(module, \"weight\", ChangeDtypeInverse())\n self.assertTrue(parametrize.is_parametrized(module))\n self.assertEqual(len(module.parametrizations.weight), 1)\n self.assertTrue(isinstance(module.parametrizations.weight[0], Identity))\n\n # Cannot change shape\n class ChangeShapeInverse(Identity):\n def right_inverse(self, x):\n return x[:-1]\n\n with self.assertRaisesRegex(ValueError, \"must have the same shape\"):\n parametrize.register_parametrization(module, \"weight\", ChangeShapeInverse())\n self.assertTrue(parametrize.is_parametrized(module))\n self.assertEqual(len(module.parametrizations.weight), 1)\n self.assertTrue(isinstance(module.parametrizations.weight[0], Identity))\n\n # FIXME: Rewrite this test using functions not depending on LAPACK\n # and remove the `@skipIfNoLapack` (see #70995)\n @skipIfNoLapack\n def test_multiple_inputs_parametrization(self):\n # A parametrization with several outputs\n class RankOne(nn.Module):\n def forward(self, x, y):\n # Form a rank-1 matrix from a pair of vectors\n return x.unsqueeze(-1) @ y.unsqueeze(-2)\n\n def right_inverse(self, Y):\n # We project the given matrix onto the rank 1 matrices\n U, S, Vh = torch.linalg.svd(Y, full_matrices=False)\n # S is ordered in a decreasing way.\n s0_sqrt = S[0].sqrt().unsqueeze(-1)\n return U[..., :, 0] * s0_sqrt, Vh[..., 0, :] * s0_sqrt\n\n # Simple parametrisation\n class Double(nn.Module):\n def forward(self, x):\n return 2.0 * x\n\n def right_inverse(self, w):\n return 0.5 * w\n\n model = nn.Linear(3, 3)\n # Test one parametrization\n parametrize.register_parametrization(model, \"weight\", RankOne())\n self.assertTrue(hasattr(model, \"parametrizations\"))\n self.assertTrue(parametrize.is_parametrized(model))\n self.assertTrue(parametrize.is_parametrized(model, \"weight\"))\n self.assertTrue(hasattr(model.parametrizations.weight, \"original0\"))\n self.assertIn(\"original0\", model.parametrizations.weight._parameters)\n self.assertTrue(hasattr(model.parametrizations.weight, \"original1\"))\n self.assertIn(\"original1\", model.parametrizations.weight._parameters)\n self.assertFalse(parametrize.is_parametrized(model, \"bias\"))\n self.assertNotIn(\"weight\", model._parameters)\n # Result should be rank 1\n self.assertEqual(torch.linalg.matrix_rank(model.weight).item(), 1)\n\n with self.assertRaisesRegex(ValueError, \"leave_parametrized=False\"):\n # Cannot remove a parametrization with multiple inputs and not leave it parametrized\n parametrize.remove_parametrizations(model, \"weight\", leave_parametrized=False)\n # Remove parametrization and check consistency\n parametrize.remove_parametrizations(model, \"weight\", leave_parametrized=True)\n self.assertFalse(hasattr(model, \"parametrizations\"))\n self.assertEqual(model.__class__, nn.Linear)\n self.assertFalse(parametrize.is_parametrized(model))\n self.assertEqual(torch.linalg.matrix_rank(model.weight).item(), 1)\n self.assertIn(\"weight\", model._parameters)\n\n # Registering parametrizations with one input on top of one with multiple inputs should work\n init_weight = model.weight.clone()\n parametrize.register_parametrization(model, \"weight\", RankOne())\n # Projecting a rank 1 matrix onto the matrices of rank one does not change the matrix\n self.assertEqual(init_weight, model.weight)\n parametrize.register_parametrization(model, \"weight\", Double())\n # The matrix now is twice the initial matrix\n self.assertEqual(2.0 * init_weight, model.weight)\n # Multiplying by a scalar does not change the rank\n self.assertEqual(torch.linalg.matrix_rank(model.weight).item(), 1)\n\n # The model has now three parameters\n self.assertEqual(len(list(model.parameters())), 3)\n\n sgd = torch.optim.SGD(model.parameters(), lr=0.1)\n\n # Test backward. Should not throw\n for _ in range(2):\n sgd.zero_grad()\n loss = (model.weight.T @ model.bias).sum()\n loss.backward()\n sgd.step()\n\n # Same drill as before, removing should work as expected\n with self.assertRaisesRegex(ValueError, \"leave_parametrized=False\"):\n # Cannot remove a parametrization with multiple inputs and not leave it parametrized\n parametrize.remove_parametrizations(model, \"weight\", leave_parametrized=False)\n # Remove parametrization and check consistency\n parametrize.remove_parametrizations(model, \"weight\", leave_parametrized=True)\n self.assertFalse(hasattr(model, \"parametrizations\"))\n self.assertEqual(model.__class__, nn.Linear)\n self.assertFalse(parametrize.is_parametrized(model))\n self.assertEqual(torch.linalg.matrix_rank(model.weight).item(), 1)\n self.assertIn(\"weight\", model._parameters)\n\n # The model has now two parameters\n self.assertEqual(len(list(model.parameters())), 2)\n\n # Test backward. Should not throw\n sgd = torch.optim.SGD(model.parameters(), lr=0.1)\n for _ in range(2):\n sgd.zero_grad()\n loss = (model.weight.T @ model.bias).sum()\n loss.backward()\n sgd.step()\n\n # FIXME: Rewrite this test using functions not depending on LAPACK\n # and remove the `@skipIfNoLapack` (see #70995)\n @skipIfNoLapack\n def test_caching_parametrization(self):\n r\"\"\"Test the caching system of a parametrization\"\"\"\n # Define a couple matrix parametrizations\n class Skew(nn.Module):\n def forward(self, X):\n X = X.tril(-1)\n return X - X.T\n\n class Orthogonal(nn.Module):\n def forward(self, X):\n Id = torch.eye(X.size(0), device=X.device)\n return torch.linalg.solve(Id + X, Id - X)\n\n model = nn.Linear(5, 5)\n parametrize.register_parametrization(model, \"weight\", Skew())\n parametrize.register_parametrization(model, \"weight\", Orthogonal())\n\n # Test that the caching system works\n with parametrize.cached():\n X = model.weight\n Y = model.weight\n self.assertEqual(id(X), id(Y))\n\n # FIXME: Rewrite this test using functions not depending on LAPACK\n # and remove the `@skipIfNoLapack` (see #70995)\n @skipIfNoLapack\n def test_caching_parametrization_with_transfer_parametrizations_and_params(self):\n r\"\"\"Test that transferring parametrizations doesn't cause issues with caching\"\"\"\n class Skew(nn.Module):\n def forward(self, X):\n X = X.tril(-1)\n return X - X.T\n\n class Orthogonal(nn.Module):\n def forward(self, X):\n Id = torch.eye(X.size(0), device=X.device)\n return torch.linalg.solve(Id + X, Id - X)\n\n model = nn.Linear(5, 5)\n parametrize.register_parametrization(model, \"weight\", Skew())\n parametrize.register_parametrization(model, \"weight\", Orthogonal())\n\n to_model = nn.Linear(5, 5)\n parametrize.transfer_parametrizations_and_params(model, to_model)\n\n with parametrize.cached():\n X = model.weight\n Y = model.weight\n self.assertEqual(id(X), id(Y))\n\n A = to_model.weight\n B = to_model.weight\n self.assertEqual(id(A), id(B))\n\n # test that the results are distinct objects for each module\n self.assertNotEqual(id(A), id(X))\n\n def test_parametrization_same_training_mode(self):\n r\"\"\"Test training mode updated on parametrization registration\"\"\"\n class Identity(nn.Module):\n def forward(self, X):\n return X\n\n module = nn.Linear(4, 4)\n module.eval()\n parametrize.register_parametrization(module, \"weight\", Identity())\n self.assertFalse(module.parametrizations.weight[0].training)\n module.train()\n parametrize.register_parametrization(module, \"weight\", Identity().eval())\n self.assertTrue(module.parametrizations.weight[0].training)\n self.assertTrue(module.parametrizations.weight[1].training)\n\n def test_type_before_parametrizations(self):\n r\"\"\"Test that type_before_parametrizations always retrieves original type\"\"\"\n\n class Identity(nn.Module):\n def forward(self, X):\n return X\n\n model = nn.Linear(5, 5)\n original_type = type(model)\n self.assertTrue(\n parametrize.type_before_parametrizations(model) == original_type\n )\n parametrize.register_parametrization(model, \"weight\", Identity())\n self.assertTrue(\n parametrize.type_before_parametrizations(model) == original_type\n )\n\n def test_transfer_parametrizations_and_params(self):\n r\"\"\"Test that all parametrizations and their associated parameters are transferred.\"\"\"\n\n class AddOne(nn.Module):\n def forward(self, x):\n return x + 1.0\n\n class Double(nn.Module):\n def forward(self, x):\n return 2.0 * x\n\n def right_inverse(self, x):\n return 0.5 * x\n\n class MinusOne(nn.Module):\n def forward(self, x):\n return x - 1.0\n\n model = nn.Linear(5, 5)\n parametrize.register_parametrization(model, \"weight\", AddOne())\n parametrize.register_parametrization(model, \"weight\", Double())\n parametrize.register_parametrization(model, \"weight\", MinusOne())\n hold_weight = model.weight\n\n to_model = nn.qat.Linear(\n 5, 5, qconfig=torch.ao.quantization.get_default_qconfig()\n )\n parametrize.transfer_parametrizations_and_params(model, to_model)\n\n # checks that final and original value are correct and the to_model is parametrized\n self.assertTrue(torch.nn.utils.parametrize.is_parametrized(to_model, \"weight\"))\n self.assertEqual(model.weight, to_model.weight)\n self.assertEqual(\n model.parametrizations.weight.original,\n to_model.parametrizations.weight.original,\n )\n\n # check that the transfer didn't affect the original value\n self.assertEqual(hold_weight, model.weight)\n\n # testing that changes to one set of parametrizations do not affect the other\n parametrize.remove_parametrizations(to_model, \"weight\")\n self.assertFalse(torch.nn.utils.parametrize.is_parametrized(to_model, \"weight\"))\n self.assertTrue(torch.nn.utils.parametrize.is_parametrized(model, \"weight\"))\n\n # also test that parameters that don't exist in to_model get transferred\n model.test_param = Parameter(torch.randn(5, 5))\n\n self.assertTrue(not hasattr(to_model, \"test_param\"))\n parametrize.register_parametrization(model, \"test_param\", Double())\n hold_test_param = model.test_param\n parametrize.transfer_parametrizations_and_params(model, to_model, \"test_param\")\n\n # check that previously missing params got transferred correctly\n self.assertEqual(model.test_param, to_model.test_param)\n self.assertEqual(\n model.parametrizations.test_param.original,\n to_model.parametrizations.test_param.original,\n )\n\n # check that the new transfer didn't change the value for the from_module\n self.assertEqual(hold_test_param, model.test_param)\n\n def test_transfer_parametrizations_and_params_right_inverse(self):\n r\"\"\"Test that all parametrizations and their associated parameters are transferred.\"\"\"\n\n class Double(nn.Module):\n def forward(self, x):\n return 2.0 * x\n\n def right_inverse(self, x):\n return 0.5 * x\n\n model = nn.Linear(5, 5)\n parametrize.register_parametrization(model, \"weight\", Double())\n hold_weight = model.weight\n\n to_model = nn.qat.Linear(\n 5, 5, qconfig=torch.ao.quantization.get_default_qconfig()\n )\n parametrize.transfer_parametrizations_and_params(model, to_model)\n\n # check that transfer occurs successfully\n self.assertEqual(model.weight, to_model.weight)\n self.assertEqual(\n model.parametrizations.weight.original,\n to_model.parametrizations.weight.original,\n )\n\n # check that transfer doesn't affect the from_model weight\n self.assertEqual(hold_weight, model.weight)\n\n def test_transfer_parametrizations_and_params_single_param(self):\n r\"\"\"Test that all parametrizations and their associated parameters are transferred.\"\"\"\n\n class AddOne(nn.Module):\n def forward(self, x):\n return x + 1.0\n\n class Double(nn.Module):\n def forward(self, x):\n return 2.0 * x\n\n class MinusOne(nn.Module):\n def forward(self, x):\n return x - 1.0\n\n model = nn.Linear(5, 5, bias=True)\n parametrize.register_parametrization(model, \"weight\", AddOne())\n parametrize.register_parametrization(model, \"weight\", Double())\n parametrize.register_parametrization(model, \"weight\", MinusOne())\n parametrize.register_parametrization(model, \"bias\", AddOne())\n parametrize.register_parametrization(model, \"bias\", Double())\n parametrize.register_parametrization(model, \"bias\", MinusOne())\n\n to_model = nn.qat.Linear(\n 5, 5, bias=True, qconfig=torch.ao.quantization.get_default_qconfig()\n )\n parametrize.transfer_parametrizations_and_params(model, to_model, \"weight\")\n\n # check that weight and only weight was transferred\n self.assertEqual(model.weight, to_model.weight)\n self.assertEqual(\n model.parametrizations.weight.original,\n to_model.parametrizations.weight.original,\n )\n self.assertTrue(\"bias\" not in to_model.parametrizations)\n\n # FIXME: Rewrite this test using functions not depending on LAPACK\n # and remove the `@skipIfNoLapack` (see #70995)\n @skipIfNoLapack\n def test_transfer_parametrizations_and_params_many_to_one(self):\n # A parametrization with several outputs\n class RankOne(nn.Module):\n def forward(self, x, y):\n # Form a rank-1 matrix from a pair of vectors\n return x.unsqueeze(-1) @ y.unsqueeze(-2)\n\n def right_inverse(self, Y):\n # We project the given matrix onto the rank 1 matrices\n U, S, Vh = torch.linalg.svd(Y, full_matrices=False)\n # S is ordered in a decreasing way.\n s0_sqrt = S[0].sqrt().unsqueeze(-1)\n return U[..., :, 0] * s0_sqrt, Vh[..., 0, :] * s0_sqrt\n\n class Double(nn.Module):\n def forward(self, x):\n return 2.0 * x\n\n model = nn.Linear(3, 3)\n parametrize.register_parametrization(model, \"weight\", RankOne())\n parametrize.register_parametrization(model, \"weight\", Double())\n hold_weight = model.weight\n\n to_model = nn.qat.Linear(\n 3, 3, qconfig=torch.ao.quantization.get_default_qconfig()\n )\n\n parametrize.transfer_parametrizations_and_params(model, to_model)\n\n # checks that final and original value are correct and the to_model is parametrized\n self.assertTrue(torch.nn.utils.parametrize.is_parametrized(to_model, \"weight\"))\n self.assertEqual(model.weight, to_model.weight)\n self.assertEqual(\n model.parametrizations.weight.original0,\n to_model.parametrizations.weight.original0,\n )\n self.assertEqual(\n model.parametrizations.weight.original1,\n to_model.parametrizations.weight.original1,\n )\n\n # check that the transfer didn't affect the original value\n self.assertEqual(hold_weight, model.weight)\n\n # testing that changes to one set of parametrizations do not affect the other\n model.test_param = Parameter(torch.randn(3, 3))\n\n self.assertTrue(not hasattr(to_model, \"test_param\"))\n parametrize.register_parametrization(model, \"test_param\", RankOne())\n hold_test_param = model.test_param\n parametrize.transfer_parametrizations_and_params(model, to_model, \"test_param\")\n\n # also check that previously missing params got transferred correctly\n self.assertEqual(model.test_param, to_model.test_param)\n self.assertEqual(\n model.parametrizations.test_param.original0,\n to_model.parametrizations.test_param.original0,\n )\n self.assertEqual(\n model.parametrizations.test_param.original1,\n to_model.parametrizations.test_param.original1,\n )\n\n # check that the new transfer didn't change the value for the from_module\n self.assertEqual(hold_test_param, model.test_param)\n\n # torch/nn/utils/prune.py\n @unittest.skipIf(not TEST_NUMPY, \"numpy not found\")\n def test_validate_pruning_amount_init(self):\n r\"\"\"Test the first util function that validates the pruning\n amount requested by the user the moment the pruning method\n is initialized. This test checks that the expected errors are\n raised whenever the amount is invalid.\n The original function runs basic type checking + value range checks.\n It doesn't check the validity of the pruning amount with\n respect to the size of the tensor to prune. That's left to\n `_validate_pruning_amount`, tested below.\n \"\"\"\n # neither float not int should raise TypeError\n with self.assertRaises(TypeError):\n prune._validate_pruning_amount_init(amount=\"I'm a string\")\n\n # float not in [0, 1] should raise ValueError\n with self.assertRaises(ValueError):\n prune._validate_pruning_amount_init(amount=1.1)\n with self.assertRaises(ValueError):\n prune._validate_pruning_amount_init(amount=20.)\n\n # negative int should raise ValueError\n with self.assertRaises(ValueError):\n prune._validate_pruning_amount_init(amount=-10)\n\n # all these should pass without errors because they're valid amounts\n prune._validate_pruning_amount_init(amount=0.34)\n prune._validate_pruning_amount_init(amount=1500)\n prune._validate_pruning_amount_init(amount=0)\n prune._validate_pruning_amount_init(amount=0.)\n prune._validate_pruning_amount_init(amount=1)\n prune._validate_pruning_amount_init(amount=1.)\n self.assertTrue(True)\n\n @unittest.skipIf(not TEST_NUMPY, \"numpy not found\")\n def test_validate_pruning_amount(self):\n r\"\"\"Tests the second util function that validates the pruning\n amount requested by the user, this time with respect to the size\n of the tensor to prune. The rationale is that if the pruning amount,\n converted to absolute value of units to prune, is larger than\n the number of units in the tensor, then we expect the util function\n to raise a value error.\n \"\"\"\n # if amount is int and amount > tensor_size, raise ValueError\n with self.assertRaises(ValueError):\n prune._validate_pruning_amount(amount=20, tensor_size=19)\n\n # amount is a float so this should not raise an error\n prune._validate_pruning_amount(amount=0.3, tensor_size=0)\n\n # this is okay\n prune._validate_pruning_amount(amount=19, tensor_size=20)\n prune._validate_pruning_amount(amount=0, tensor_size=0)\n prune._validate_pruning_amount(amount=1, tensor_size=1)\n self.assertTrue(True)\n\n @unittest.skipIf(not TEST_NUMPY, \"numpy not found\")\n def test_compute_nparams_to_prune(self):\n r\"\"\"Test that requested pruning `amount` gets translated into the\n correct absolute number of units to prune.\n \"\"\"\n self.assertEqual(\n prune._compute_nparams_toprune(amount=0, tensor_size=15),\n 0\n )\n self.assertEqual(\n prune._compute_nparams_toprune(amount=10, tensor_size=15),\n 10\n )\n # if 1 is int, means 1 unit\n self.assertEqual(\n prune._compute_nparams_toprune(amount=1, tensor_size=15),\n 1\n )\n # if 1. is float, means 100% of units\n self.assertEqual(\n prune._compute_nparams_toprune(amount=1., tensor_size=15),\n 15\n )\n self.assertEqual(\n prune._compute_nparams_toprune(amount=0.4, tensor_size=17),\n 7\n )\n\n def test_random_pruning_sizes(self):\n r\"\"\"Test that the new parameters and buffers created by the pruning\n method have the same size as the input tensor to prune. These, in\n fact, correspond to the pruned version of the tensor itself, its\n mask, and its original copy, so the size must match.\n \"\"\"\n # fixturize test\n # TODO: add other modules\n modules = [nn.Linear(5, 7), nn.Conv3d(2, 2, 2)]\n names = ['weight', 'bias']\n\n for m in modules:\n for name in names:\n with self.subTest(m=m, name=name):\n original_tensor = getattr(m, name)\n\n prune.random_unstructured(m, name=name, amount=0.1)\n # mask has the same size as tensor being pruned\n self.assertEqual(\n original_tensor.size(),\n getattr(m, name + '_mask').size()\n )\n # 'orig' tensor has the same size as the original tensor\n self.assertEqual(\n original_tensor.size(),\n getattr(m, name + '_orig').size()\n )\n # new tensor has the same size as the original tensor\n self.assertEqual(\n original_tensor.size(),\n getattr(m, name).size()\n )\n\n def test_random_pruning_orig(self):\n r\"\"\"Test that original tensor is correctly stored in 'orig'\n after pruning is applied. Important to make sure we don't\n lose info about the original unpruned parameter.\n \"\"\"\n # fixturize test\n # TODO: add other modules\n modules = [nn.Linear(5, 7), nn.Conv3d(2, 2, 2)]\n names = ['weight', 'bias']\n\n for m in modules:\n for name in names:\n with self.subTest(m=m, name=name):\n\n # tensor prior to pruning\n original_tensor = getattr(m, name)\n prune.random_unstructured(m, name=name, amount=0.1)\n self.assertEqual(\n original_tensor,\n getattr(m, name + '_orig')\n )\n\n def test_random_pruning_new_weight(self):\n r\"\"\"Test that module.name now contains a pruned version of\n the original tensor obtained from multiplying it by the mask.\n \"\"\"\n # fixturize test\n # TODO: add other modules\n modules = [nn.Linear(5, 7), nn.Conv3d(2, 2, 2)]\n names = ['weight', 'bias']\n\n for m in modules:\n for name in names:\n with self.subTest(m=m, name=name):\n # tensor prior to pruning\n original_tensor = getattr(m, name)\n prune.random_unstructured(m, name=name, amount=0.1)\n # weight = weight_orig * weight_mask\n self.assertEqual(\n getattr(m, name),\n getattr(m, name + '_orig')\n * getattr(m, name + '_mask').to(\n dtype=original_tensor.dtype\n ),\n )\n\n def test_identity_pruning(self):\n r\"\"\"Test that a mask of 1s does not change forward or backward.\n \"\"\"\n input_ = torch.ones(1, 5)\n m = nn.Linear(5, 2)\n y_prepruning = m(input_) # output prior to pruning\n\n # compute grad pre-pruning and check it's equal to all ones\n y_prepruning.sum().backward()\n old_grad_weight = m.weight.grad.clone() # don't grab pointer!\n self.assertEqual(old_grad_weight, torch.ones_like(m.weight))\n old_grad_bias = m.bias.grad.clone()\n self.assertEqual(old_grad_bias, torch.ones_like(m.bias))\n\n # remove grads\n m.zero_grad()\n\n # force the mask to be made of all 1s\n prune.identity(m, name=\"weight\")\n\n # with mask of 1s, output should be identical to no mask\n y_postpruning = m(input_)\n self.assertEqual(y_prepruning, y_postpruning)\n\n # with mask of 1s, grad should be identical to no mask\n y_postpruning.sum().backward()\n self.assertEqual(old_grad_weight, m.weight_orig.grad)\n self.assertEqual(old_grad_bias, m.bias.grad)\n\n # calling forward twice in a row shouldn't change output\n y1 = m(input_)\n y2 = m(input_)\n self.assertEqual(y1, y2)\n\n def test_random_pruning_0perc(self):\n r\"\"\"Test that a mask of 1s does not change forward or backward.\n \"\"\"\n input_ = torch.ones(1, 5)\n m = nn.Linear(5, 2)\n y_prepruning = m(input_) # output prior to pruning\n\n # compute grad pre-pruning and check it's equal to all ones\n y_prepruning.sum().backward()\n old_grad_weight = m.weight.grad.clone() # don't grab pointer!\n self.assertEqual(old_grad_weight, torch.ones_like(m.weight))\n old_grad_bias = m.bias.grad.clone()\n self.assertEqual(old_grad_bias, torch.ones_like(m.bias))\n\n # remove grads\n m.zero_grad()\n\n # force the mask to be made of all 1s\n with mock.patch(\n \"torch.nn.utils.prune.RandomUnstructured.compute_mask\"\n ) as compute_mask:\n compute_mask.return_value = torch.ones_like(m.weight)\n prune.random_unstructured(m, name='weight', amount=0.9) # amount won't count\n\n # with mask of 1s, output should be identical to no mask\n y_postpruning = m(input_)\n self.assertEqual(y_prepruning, y_postpruning)\n\n # with mask of 1s, grad should be identical to no mask\n y_postpruning.sum().backward()\n self.assertEqual(old_grad_weight, m.weight_orig.grad)\n self.assertEqual(old_grad_bias, m.bias.grad)\n\n # calling forward twice in a row shouldn't change output\n y1 = m(input_)\n y2 = m(input_)\n self.assertEqual(y1, y2)\n\n def test_random_pruning(self):\n input_ = torch.ones(1, 5)\n m = nn.Linear(5, 2)\n\n # define custom mask to assign with mock\n mask = torch.ones_like(m.weight)\n mask[1, 0] = 0\n mask[0, 3] = 0\n\n # check grad is zero for masked weights\n with mock.patch(\n \"torch.nn.utils.prune.RandomUnstructured.compute_mask\"\n ) as compute_mask:\n compute_mask.return_value = mask\n prune.random_unstructured(m, name='weight', amount=0.9)\n\n y_postpruning = m(input_)\n y_postpruning.sum().backward()\n # weight_orig is the parameter, so it's the tensor that will accumulate the grad\n self.assertEqual(m.weight_orig.grad, mask) # all 1s, except for masked units\n self.assertEqual(m.bias.grad, torch.ones_like(m.bias))\n\n # make sure that weight_orig update doesn't modify [1, 0] and [0, 3]\n old_weight_orig = m.weight_orig.clone()\n # update weights\n learning_rate = 1.\n for p in m.parameters():\n p.data.sub_(p.grad.data * learning_rate)\n # since these are pruned, they should not be updated\n self.assertEqual(old_weight_orig[1, 0], m.weight_orig[1, 0])\n self.assertEqual(old_weight_orig[0, 3], m.weight_orig[0, 3])\n\n def test_random_pruning_forward(self):\n r\"\"\"check forward with mask (by hand).\n \"\"\"\n input_ = torch.ones(1, 5)\n m = nn.Linear(5, 2)\n\n # define custom mask to assign with mock\n mask = torch.zeros_like(m.weight)\n mask[1, 0] = 1\n mask[0, 3] = 1\n\n with mock.patch(\n \"torch.nn.utils.prune.RandomUnstructured.compute_mask\"\n ) as compute_mask:\n compute_mask.return_value = mask\n prune.random_unstructured(m, name='weight', amount=0.9)\n\n yhat = m(input_)\n self.assertEqual(yhat[0, 0], m.weight_orig[0, 3] + m.bias[0])\n self.assertEqual(yhat[0, 1], m.weight_orig[1, 0] + m.bias[1])\n\n def test_remove_pruning_forward(self):\n r\"\"\"Remove pruning and check forward is unchanged from previous\n pruned state.\n \"\"\"\n input_ = torch.ones(1, 5)\n m = nn.Linear(5, 2)\n\n # define custom mask to assign with mock\n mask = torch.ones_like(m.weight)\n mask[1, 0] = 0\n mask[0, 3] = 0\n\n # check grad is zero for masked weights\n with mock.patch(\n \"torch.nn.utils.prune.RandomUnstructured.compute_mask\"\n ) as compute_mask:\n compute_mask.return_value = mask\n prune.random_unstructured(m, name='weight', amount=0.9)\n\n y_postpruning = m(input_)\n\n prune.remove(m, 'weight')\n\n y_postremoval = m(input_)\n self.assertEqual(y_postpruning, y_postremoval)\n\n def test_pruning_id_consistency(self):\n r\"\"\"Test that pruning doesn't change the id of the parameters, which\n would otherwise introduce issues with pre-existing optimizers that\n point to old parameters.\n \"\"\"\n m = nn.Linear(5, 2, bias=False)\n\n tensor_id = id(list(m.parameters())[0])\n\n prune.random_unstructured(m, name=\"weight\", amount=0.9)\n self.assertEqual(tensor_id, id(list(m.parameters())[0]))\n\n prune.remove(m, \"weight\")\n self.assertEqual(tensor_id, id(list(m.parameters())[0]))\n\n def test_random_pruning_pickle(self):\n modules = [nn.Linear(5, 7), nn.Conv3d(2, 2, 2)]\n names = ['weight', 'bias']\n\n for m in modules:\n for name in names:\n with self.subTest(m=m, name=name):\n prune.random_unstructured(m, name=name, amount=0.1)\n m_new = pickle.loads(pickle.dumps(m))\n self.assertIsInstance(m_new, type(m))\n\n def test_multiple_pruning_calls(self):\n # if you call pruning twice, the hook becomes a PruningContainer\n m = nn.Conv3d(2, 2, 2)\n prune.l1_unstructured(m, name='weight', amount=0.1)\n weight_mask0 = m.weight_mask # save it for later sanity check\n\n # prune again\n prune.ln_structured(m, name='weight', amount=0.3, n=2, dim=0)\n hook = next(iter(m._forward_pre_hooks.values()))\n self.assertIsInstance(\n hook,\n torch.nn.utils.prune.PruningContainer\n )\n # check that container._tensor_name is correctly set no matter how\n # many pruning methods are in the container\n self.assertEqual(hook._tensor_name, 'weight')\n\n # check that the pruning container has the right length\n # equal to the number of pruning iters\n self.assertEqual(len(hook), 2) # m.weight has been pruned twice\n\n # check that the entries of the pruning container are of the expected\n # type and in the expected order\n self.assertIsInstance(hook[0], torch.nn.utils.prune.L1Unstructured)\n self.assertIsInstance(hook[1], torch.nn.utils.prune.LnStructured)\n\n # check that all entries that are 0 in the 1st mask are 0 in the\n # 2nd mask too\n self.assertTrue(torch.all(m.weight_mask[weight_mask0 == 0] == 0))\n\n # prune again\n prune.ln_structured(m, name='weight', amount=0.1, n=float('inf'), dim=1)\n # check that container._tensor_name is correctly set no matter how\n # many pruning methods are in the container\n hook = next(iter(m._forward_pre_hooks.values()))\n self.assertEqual(hook._tensor_name, 'weight')\n\n def test_pruning_container(self):\n # create an empty container\n container = prune.PruningContainer()\n container._tensor_name = 'test'\n self.assertEqual(len(container), 0)\n\n p = prune.L1Unstructured(amount=2)\n p._tensor_name = 'test'\n\n # test adding a pruning method to a container\n container.add_pruning_method(p)\n\n # test error raised if tensor name is different\n q = prune.L1Unstructured(amount=2)\n q._tensor_name = 'another_test'\n with self.assertRaises(ValueError):\n container.add_pruning_method(q)\n\n # test that adding a non-pruning method object to a pruning container\n # raises a TypeError\n with self.assertRaises(TypeError):\n container.add_pruning_method(10)\n with self.assertRaises(TypeError):\n container.add_pruning_method('ugh')\n\n def test_pruning_container_compute_mask(self):\n r\"\"\"Test `compute_mask` of pruning container with a known `t` and\n `default_mask`. Indirectly checks that Ln structured pruning is\n acting on the right axis.\n \"\"\"\n # create an empty container\n container = prune.PruningContainer()\n container._tensor_name = 'test'\n\n # 1) test unstructured pruning\n # create a new pruning method\n p = prune.L1Unstructured(amount=2)\n p._tensor_name = 'test'\n # add the pruning method to the container\n container.add_pruning_method(p)\n\n # create tensor to be pruned\n t = torch.tensor([[1, 2, 3, 4], [5, 6, 7, 8]]).to(dtype=torch.float32)\n # create prior mask by hand\n default_mask = torch.tensor([[1, 1, 1, 0], [1, 1, 0, 1]])\n # since we are pruning the two lowest magnitude units, the outcome of\n # the calculation should be this:\n expected_mask = torch.tensor([[0, 0, 1, 0], [1, 1, 0, 1]])\n computed_mask = container.compute_mask(t, default_mask)\n # TODO(#38095): Replace assertEqualIgnoreType. See issue #38095\n self.assertEqualIgnoreType(expected_mask, computed_mask)\n\n # 2) test structured pruning\n q = prune.LnStructured(amount=1, n=2, dim=0)\n q._tensor_name = 'test'\n container.add_pruning_method(q)\n # since we are pruning the lowest magnitude one of the two rows, the\n # outcome of the calculation should be this:\n expected_mask = torch.tensor([[0, 0, 0, 0], [1, 1, 0, 1]])\n computed_mask = container.compute_mask(t, default_mask)\n # TODO(#38095): Replace assertEqualIgnoreType. See issue #38095\n self.assertEqualIgnoreType(expected_mask, computed_mask)\n\n # 2) test structured pruning, along another axis\n r = prune.LnStructured(amount=1, n=2, dim=1)\n r._tensor_name = 'test'\n container.add_pruning_method(r)\n # since we are pruning the lowest magnitude of the four columns, the\n # outcome of the calculation should be this:\n expected_mask = torch.tensor([[0, 1, 1, 0], [0, 1, 0, 1]])\n computed_mask = container.compute_mask(t, default_mask)\n # TODO(#38095): Replace assertEqualIgnoreType. See issue #38095\n self.assertEqualIgnoreType(expected_mask, computed_mask)\n\n def test_l1_unstructured_pruning(self):\n r\"\"\"Test that l1 unstructured pruning actually removes the lowest\n entries by l1 norm (by hand). It also checks that applying l1\n unstructured pruning more than once respects the previous mask.\n \"\"\"\n m = nn.Linear(4, 2)\n # modify its weight matrix by hand\n m.weight = torch.nn.Parameter(\n torch.tensor(\n [[1, 2, 3, 4], [-4, -3, -2, -1]], dtype=torch.float32\n )\n )\n\n prune.l1_unstructured(m, 'weight', amount=2)\n expected_weight = torch.tensor([[0, 2, 3, 4], [-4, -3, -2, 0]],\n dtype=m.weight.dtype)\n self.assertEqual(expected_weight, m.weight)\n\n # check that pruning again removes the next two smallest entries\n prune.l1_unstructured(m, 'weight', amount=2)\n expected_weight = torch.tensor([[0, 0, 3, 4], [-4, -3, 0, 0]],\n dtype=m.weight.dtype)\n self.assertEqual(expected_weight, m.weight)\n\n def test_l1_unstructured_pruning_with_importance_scores(self):\n r\"\"\"Test that l1 unstructured pruning actually removes the lowest\n entries of importance scores and not the parameter by l1 norm (by hand).\n It also checks that applying l1 unstructured pruning more than once\n respects the previous mask.\n \"\"\"\n m = nn.Linear(4, 2)\n # modify its weight matrix by hand\n m.weight = torch.nn.Parameter(\n torch.tensor(\n [[1, 2, 3, 4], [-4, -3, -2, -1]], dtype=torch.float32\n )\n )\n importance_scores = torch.tensor(\n [[4, 2, 1, 3], [-3, -1, -2, -4]], dtype=torch.float32\n )\n\n prune.l1_unstructured(m, 'weight', amount=2, importance_scores=importance_scores)\n expected_weight = torch.tensor([[1, 2, 0, 4], [-4, 0, -2, -1]],\n dtype=m.weight.dtype)\n self.assertEqual(expected_weight, m.weight)\n\n # check that pruning again removes two entries of m.weight that are colocated with\n # the next two smallest absolute values of importance scores.\n prune.l1_unstructured(m, 'weight', amount=2, importance_scores=importance_scores)\n expected_weight = torch.tensor([[1, 0, 0, 4], [-4, 0, 0, -1]],\n dtype=m.weight.dtype)\n self.assertEqual(expected_weight, m.weight)\n\n def test_unstructured_pruning_same_magnitude(self):\n r\"\"\"Since it may happen that the tensor to prune has entries with the\n same exact magnitude, it is important to check that pruning happens\n consistenly based on the bottom % of weights, and not by threshold,\n which would instead kill off *all* units with magnitude = threshold.\n \"\"\"\n AMOUNT = 0.2\n p = prune.L1Unstructured(amount=AMOUNT)\n # create a random tensors with entries in {-2, 0, 2}\n t = 2 * torch.randint(low=-1, high=2, size=(10, 7))\n nparams_toprune = prune._compute_nparams_toprune(AMOUNT, t.nelement())\n\n computed_mask = p.compute_mask(t, default_mask=torch.ones_like(t))\n nparams_pruned = torch.sum(computed_mask == 0)\n self.assertEqual(nparams_toprune, nparams_pruned)\n\n def test_random_structured_pruning_amount(self):\n AMOUNT = 0.6\n AXIS = 2\n p = prune.RandomStructured(amount=AMOUNT, dim=AXIS)\n t = 2 * torch.randint(low=-1, high=2, size=(5, 4, 2)).to(\n dtype=torch.float32\n )\n nparams_toprune = prune._compute_nparams_toprune(AMOUNT, t.shape[AXIS])\n\n computed_mask = p.compute_mask(t, default_mask=torch.ones_like(t))\n # check that 1 column is fully prune, the others are left untouched\n remaining_axes = [_ for _ in range(len(t.shape)) if _ != AXIS]\n per_column_sums = sorted(\n torch.sum(computed_mask == 0, axis=remaining_axes)\n )\n assert per_column_sums == [0, 20]\n\n def test_ln_structured_pruning(self):\n r\"\"\"Check Ln structured pruning by hand.\n \"\"\"\n m = nn.Conv2d(3, 1, 2)\n m.weight.data = torch.tensor(\n [[[[1., 2.], [1., 2.5]],\n [[0.5, 1.], [0.1, 0.1]],\n [[-3., -5.], [0.1, -1.]]]]\n )\n # expected effect of pruning 1 of the 3 channels by L2-norm\n expected_mask_axis1 = torch.ones_like(m.weight)\n expected_mask_axis1[:, 1] = 0.\n\n prune.ln_structured(m, 'weight', amount=1, n=2, dim=1)\n self.assertEqual(expected_mask_axis1, m.weight_mask)\n\n # expected effect of pruning 1 of the 2 columns along axis -1 by L1-norm\n expected_mask_axis3 = expected_mask_axis1\n expected_mask_axis3[:, :, :, 0] = 0.\n\n prune.ln_structured(m, 'weight', amount=1, n=1, dim=-1)\n self.assertEqual(expected_mask_axis3, m.weight_mask)\n\n def test_ln_structured_pruning_importance_scores(self):\n r\"\"\"Check Ln structured pruning by hand.\n \"\"\"\n m = nn.Conv2d(3, 1, 2)\n m.weight.data = torch.tensor(\n [[[[1., 2.], [1., 2.5]],\n [[0.5, 1.], [0.1, 0.1]],\n [[-3., -5.], [0.1, -1.]]]]\n )\n importance_scores = torch.tensor(\n [[[[10., 1.], [10., 1.]],\n [[30., 3.], [30., 3.]],\n [[-20., -2.], [-20., -2.]]]]\n )\n # expected effect of pruning 1 of the 3 channels by L2-norm\n expected_mask_axis1 = torch.ones_like(m.weight)\n expected_mask_axis1[:, 0] = 0.\n\n prune.ln_structured(m, 'weight', amount=1, n=2, dim=1, importance_scores=importance_scores)\n self.assertEqual(expected_mask_axis1, m.weight_mask)\n\n # expected effect of pruning 1 of the 2 columns along axis -1 by L1-norm\n expected_mask_axis3 = expected_mask_axis1\n expected_mask_axis3[:, :, :, 1] = 0.\n\n prune.ln_structured(m, 'weight', amount=1, n=1, dim=-1, importance_scores=importance_scores)\n self.assertEqual(expected_mask_axis3, m.weight_mask)\n\n def test_remove_pruning(self):\n r\"\"\"`prune.remove` removes the hook and the reparametrization\n and makes the pruning final in the original parameter.\n \"\"\"\n modules = [nn.Linear(5, 7), nn.Conv3d(2, 2, 2)]\n names = ['weight', 'bias']\n\n for m in modules:\n for name in names:\n with self.subTest(m=m, name=name):\n # first prune\n prune.random_unstructured(m, name, amount=0.5)\n self.assertIn(name + \"_orig\", dict(m.named_parameters()))\n self.assertIn(name + \"_mask\", dict(m.named_buffers()))\n self.assertNotIn(name, dict(m.named_parameters()))\n self.assertTrue(hasattr(m, name))\n pruned_t = getattr(m, name)\n\n # then remove pruning\n prune.remove(m, name)\n self.assertIn(name, dict(m.named_parameters()))\n self.assertNotIn(name + \"_orig\", dict(m.named_parameters()))\n self.assertNotIn(name + \"_mask\", dict(m.named_buffers()))\n final_t = getattr(m, name)\n\n self.assertEqual(pruned_t, final_t)\n\n def test_remove_pruning_exception(self):\n r\"\"\"Removing from an unpruned tensor throws an assertion error\n \"\"\"\n modules = [nn.Linear(5, 7), nn.Conv3d(2, 2, 2)]\n names = ['weight', 'bias']\n\n for m in modules:\n for name in names:\n with self.subTest(m=m, name=name):\n # check that the module isn't pruned\n self.assertFalse(prune.is_pruned(m))\n # since it isn't pruned, pruning can't be removed from it\n with self.assertRaises(ValueError):\n prune.remove(m, name)\n\n\n def test_global_pruning(self):\n r\"\"\"Test that global l1 unstructured pruning over 2 parameters removes\n the `amount=4` smallest global weights across the 2 parameters.\n \"\"\"\n m = nn.Linear(4, 2)\n n = nn.Linear(3, 1)\n # modify the weight matrices by hand\n m.weight = torch.nn.Parameter(\n torch.tensor([[1, 2, 3, 4], [-4, -3, -2, -1]]).to(\n dtype=torch.float32)\n )\n n.weight = torch.nn.Parameter(\n torch.tensor([[0, 0.1, -2]]).to(\n dtype=torch.float32)\n )\n\n params_to_prune = (\n (m, 'weight'),\n (n, 'weight'),\n )\n\n # prune the 4 smallest weights globally by L1 magnitude\n prune.global_unstructured(\n params_to_prune,\n pruning_method=prune.L1Unstructured,\n amount=4\n )\n\n expected_mweight = torch.tensor([[0, 2, 3, 4], [-4, -3, -2, 0]],\n dtype=m.weight.dtype)\n self.assertEqual(expected_mweight, m.weight)\n\n expected_nweight = torch.tensor([[0, 0, -2]]).to(dtype=n.weight.dtype)\n self.assertEqual(expected_nweight, n.weight)\n\n def test_global_pruning_importance_scores(self):\n r\"\"\"Test that global l1 unstructured pruning over 2 parameters removes\n the `amount=4` smallest global weights across the 2 parameters.\n \"\"\"\n m = nn.Linear(4, 2)\n n = nn.Linear(3, 1)\n # modify the weight matrices by hand\n m.weight = torch.nn.Parameter(\n torch.tensor([[1, 2, 3, 4], [-4, -3, -2, -1]]).to(\n dtype=torch.float32)\n )\n m_importance_scores = torch.tensor(\n [[4, 2, 1, 3], [-3, -1, -2, -4]], dtype=torch.float32\n )\n n.weight = torch.nn.Parameter(\n torch.tensor([[0, 0.1, -2]]).to(\n dtype=torch.float32)\n )\n n_importance_scores = torch.tensor([[0, 10., -0.2]]).to(dtype=torch.float32)\n\n params_to_prune = (\n (m, 'weight'),\n (n, 'weight'),\n )\n importance_scores = {\n (m, 'weight'): m_importance_scores,\n (n, 'weight'): n_importance_scores,\n }\n\n # prune the 4 smallest weights globally by L1 magnitude\n prune.global_unstructured(\n params_to_prune,\n pruning_method=prune.L1Unstructured,\n amount=4,\n importance_scores=importance_scores,\n )\n\n expected_m_weight = torch.tensor([[1, 2, 0, 4], [-4, 0, -2, -1]],\n dtype=m.weight.dtype)\n self.assertEqual(expected_m_weight, m.weight)\n\n expected_n_weight = torch.tensor([[0, 0.1, 0]]).to(dtype=n.weight.dtype)\n self.assertEqual(expected_n_weight, n.weight)\n\n def test_custom_from_mask_pruning(self):\n r\"\"\"Test that the CustomFromMask is capable of receiving\n as input at instantiation time a custom mask, and combining it with\n the previous default mask to generate the correct final mask.\n \"\"\"\n # new mask\n mask = torch.tensor([[0, 1, 1, 0], [0, 0, 1, 1]])\n # old mask\n default_mask = torch.tensor([[0, 0, 0, 0], [1, 1, 1, 1]])\n\n # some tensor (not actually used)\n t = torch.rand_like(mask.to(dtype=torch.float32))\n\n p = prune.CustomFromMask(mask=mask)\n\n computed_mask = p.compute_mask(t, default_mask)\n expected_mask = torch.tensor([[0, 0, 0, 0], [0, 0, 1, 1]]).to(\n dtype=t.dtype\n )\n\n # TODO(#38095): Replace assertEqualIgnoreType. See issue #38095\n self.assertEqualIgnoreType(computed_mask, expected_mask)\n\n def test_pruning_rollback(self):\n r\"\"\"Test that if something fails when the we try to compute the mask,\n then the model isn't left in some intermediate half-pruned state.\n The try/except statement in `apply` should handle rolling back\n to the previous state before pruning began.\n \"\"\"\n modules = [nn.Linear(5, 7), nn.Conv3d(2, 2, 2)]\n names = ['weight', 'bias']\n\n for m in modules:\n for name in names:\n with self.subTest(m=m, name=name):\n\n with mock.patch(\n \"torch.nn.utils.prune.L1Unstructured.compute_mask\"\n ) as compute_mask:\n compute_mask.side_effect = Exception('HA!')\n with self.assertRaises(Exception):\n prune.l1_unstructured(m, name=name, amount=0.9)\n\n self.assertTrue(\n name in dict(m.named_parameters())\n )\n self.assertFalse(\n name + '_mask' in dict(m.named_buffers())\n )\n self.assertFalse(\n name + '_orig' in dict(m.named_parameters())\n )\n\n def test_pruning_serialization_model(self):\n # create a model\n model = torch.nn.Sequential(\n torch.nn.Linear(10, 10),\n torch.nn.ReLU(),\n torch.nn.Linear(10, 1),\n )\n # check that everything looks normal before pruning\n self.assertNotIn('0.weight_orig', model.state_dict())\n self.assertNotIn('0.weight_mask', model.state_dict())\n self.assertIn('0.weight', model.state_dict())\n\n # prune one of its parameters\n prune.l1_unstructured(module=model[0], name='weight', amount=0.9)\n\n # check that the original weight and the new mask are present\n self.assertIn('0.weight_orig', model.state_dict())\n self.assertIn('0.weight_mask', model.state_dict())\n self.assertNotIn('0.weight', model.state_dict())\n self.assertTrue(hasattr(model[0], 'weight'))\n\n pruned_weight = model[0].weight\n\n with TemporaryFileName() as fname:\n torch.save(model, fname)\n new_model = torch.load(fname)\n\n # check that the original weight and the new mask are present\n self.assertIn('0.weight_orig', new_model.state_dict())\n self.assertIn('0.weight_mask', new_model.state_dict())\n self.assertNotIn('0.weight', new_model.state_dict())\n self.assertTrue(hasattr(new_model[0], 'weight'))\n\n self.assertEqual(pruned_weight, new_model[0].weight)\n\n def test_pruning_serialization_state_dict(self):\n # create a model\n model = torch.nn.Sequential(\n torch.nn.Linear(10, 10),\n torch.nn.ReLU(),\n torch.nn.Linear(10, 1),\n )\n # check that everything looks normal before pruning\n self.assertNotIn('0.weight_orig', model.state_dict())\n self.assertNotIn('0.weight_mask', model.state_dict())\n self.assertIn('0.weight', model.state_dict())\n\n # prune one of its parameters\n prune.l1_unstructured(module=model[0], name='weight', amount=0.9)\n\n # check that the original weight and the new mask are present\n self.assertIn('0.weight_orig', model.state_dict())\n self.assertIn('0.weight_mask', model.state_dict())\n self.assertNotIn('0.weight', model.state_dict())\n self.assertTrue(hasattr(model[0], 'weight'))\n\n pruned_weight = model[0].weight\n\n # make pruning permanent and restore parameter names as in base\n # architecture\n prune.remove(module=model[0], name='weight')\n\n # check that the original weight and the new mask are no longer present\n self.assertNotIn('0.weight_orig', model.state_dict())\n self.assertNotIn('0.weight_mask', model.state_dict())\n self.assertIn('0.weight', model.state_dict())\n\n # save the state dict of model and reload it into new_model\n new_model = torch.nn.Sequential(\n torch.nn.Linear(10, 10),\n torch.nn.ReLU(),\n torch.nn.Linear(10, 1),\n )\n with TemporaryFileName() as fname:\n torch.save(model.state_dict(), fname)\n new_model.load_state_dict(torch.load(fname))\n\n # check that the original weight and the new mask are not present in\n # new_model either.\n self.assertNotIn('0.weight_orig', new_model.state_dict())\n self.assertNotIn('0.weight_mask', new_model.state_dict())\n self.assertIn('0.weight', new_model.state_dict())\n\n self.assertEqual(pruned_weight, new_model[0].weight)\n\n def test_prune(self):\n # create a new pruning method\n p = prune.L1Unstructured(amount=2)\n # create tensor to be pruned\n t = torch.tensor([[1, 2, 3, 4], [5, 6, 7, 8]]).to(dtype=torch.float32)\n # create prior mask by hand\n default_mask = torch.tensor([[1, 1, 1, 0], [1, 1, 0, 1]])\n # since we are pruning the two lowest magnitude units, the outcome of\n # the calculation should be this:\n expected_mask = torch.tensor([[0, 0, 1, 0], [1, 1, 0, 1]])\n pruned_tensor = p.prune(t, default_mask)\n self.assertEqual(t * expected_mask, pruned_tensor)\n\n def test_prune_importance_scores(self):\n # create a new pruning method\n p = prune.L1Unstructured(amount=2)\n # create tensor to be pruned\n t = torch.tensor([[1, 2, 3, 4], [5, 6, 7, 8]]).to(dtype=torch.float32)\n importance_scores = torch.tensor(\n [[1, 2, 3, 4], [1.5, 1.6, 1.7, 1.8]]\n ).to(dtype=torch.float32)\n # create prior mask by hand\n default_mask = torch.tensor([[1, 1, 1, 0], [1, 1, 0, 1]])\n # since we are pruning the two lowest magnitude units, the outcome of\n # the calculation should be this:\n expected_mask = torch.tensor([[0, 1, 1, 0], [0, 1, 0, 1]])\n pruned_tensor = p.prune(t, default_mask, importance_scores=importance_scores)\n self.assertEqual(t * expected_mask, pruned_tensor)\n\n def test_prune_importance_scores_mimic_default(self):\n # create a new pruning method\n p = prune.L1Unstructured(amount=2)\n # create tensor to be pruned\n t = torch.tensor([[1, 2, 3, 4], [5, 6, 7, 8]]).to(dtype=torch.float32)\n # create prior mask by hand\n default_mask = torch.tensor([[1, 1, 1, 0], [1, 1, 0, 1]])\n # since we are pruning the two lowest magnitude units, the outcome of\n # the calculation should be this:\n expected_mask = torch.tensor([[0, 0, 1, 0], [1, 1, 0, 1]])\n pruned_tensor_without_importance_scores = p.prune(t, default_mask)\n pruned_tensor_with_importance_scores = p.prune(t, default_mask, importance_scores=t)\n self.assertEqual(pruned_tensor_without_importance_scores, pruned_tensor_with_importance_scores)\n self.assertEqual(t * expected_mask, pruned_tensor_without_importance_scores)\n\n def test_rnn_pruning(self):\n l = torch.nn.LSTM(32, 32)\n # This Module has 4 parameters called:\n # 'weight_ih_l0', 'weight_hh_l0', 'bias_ih_l0', 'bias_hh_l0'\n\n # Pruning one of them causes one of the weights to become a tensor\n prune.l1_unstructured(l, 'weight_ih_l0', 0.5)\n assert (\n sum([isinstance(p, torch.nn.Parameter) for p in l._flat_weights])\n == 3\n )\n\n # Removing the pruning reparametrization restores the Parameter\n prune.remove(l, 'weight_ih_l0')\n assert (\n sum([isinstance(p, torch.nn.Parameter) for p in l._flat_weights])\n == 4\n )\n\n # Make sure that, upon removal of the reparametrization, the\n # `._parameters` and `.named_parameters` contain the right params.\n # Specifically, the original weight ('weight_ih_l0') should be placed\n # back in the parameters, while the reparametrization component\n # ('weight_ih_l0_orig') should be removed.\n assert 'weight_ih_l0' in l._parameters\n assert l._parameters['weight_ih_l0'] is not None\n assert 'weight_ih_l0_orig' not in l._parameters\n assert 'weight_ih_l0' in dict(l.named_parameters())\n assert dict(l.named_parameters())['weight_ih_l0'] is not None\n assert 'weight_ih_l0_orig' not in dict(l.named_parameters())\n\n\n def test_rnn_weight_norm(self):\n def check_weight_norm(l, name, num_params):\n # This Module has 4 or 5 parameters called:\n # 'weight_ih_l0', 'weight_hh_l0', 'bias_ih_l0', 'bias_hh_l0', weight_hr_l0\n\n # Applying weight norm on one of them causes it to become a tensor\n l = torch.nn.utils.weight_norm(l, name=name)\n self.assertEqual(\n sum([isinstance(p, torch.nn.Parameter) for p in l._flat_weights]),\n num_params - 1,\n )\n\n # Removing the weight norm reparametrization restores the Parameter\n l = torch.nn.utils.remove_weight_norm(l, name=name)\n self.assertEqual(\n sum([isinstance(p, torch.nn.Parameter) for p in l._flat_weights]),\n num_params,\n )\n\n # Make sure that, upon removal of the reparametrization, the\n # `._parameters` and `.named_parameters` contain the right params.\n # Specifically, the original weight ('weight_ih_l0') should be placed\n # back in the parameters, while the reparametrization components\n # ('weight_ih_l0_v' and 'weight_ih_l0_g') should be removed.\n self.assertTrue(name in l._parameters)\n self.assertIsNotNone(l._parameters[name])\n self.assertTrue(name + '_v' not in l._parameters)\n self.assertTrue(name + '_g' not in l._parameters)\n self.assertTrue(name in dict(l.named_parameters()))\n self.assertIsNotNone(dict(l.named_parameters())[name])\n self.assertTrue(name + '_v' not in dict(l.named_parameters()))\n self.assertTrue(name + '_g' not in dict(l.named_parameters()))\n\n check_weight_norm(torch.nn.LSTM(32, 32), 'weight_ih_l0', 4)\n check_weight_norm(torch.nn.LSTM(32, 32, proj_size=16), 'weight_hr_l0', 5)\n\n\n def test_weight_norm(self):\n input = torch.randn(3, 5)\n m = nn.Linear(5, 7)\n expected_output = m(input)\n\n # add weight normalization\n m = torch.nn.utils.weight_norm(m)\n self.assertEqual(m.weight_v.size(), m.weight.size())\n self.assertEqual(m.weight_g.size(), (7, 1))\n self.assertEqual(m(input), expected_output)\n\n # remove weight norm\n m = torch.nn.utils.remove_weight_norm(m)\n self.assertFalse(hasattr(m, 'weight_g'))\n self.assertFalse(hasattr(m, 'weight_v'))\n self.assertEqual(m(input), expected_output)\n\n # test with dim=1\n m = torch.nn.utils.weight_norm(m, dim=1)\n self.assertEqual(m.weight_v.size(), m.weight.size())\n self.assertEqual(m.weight_g.size(), (1, 5))\n self.assertEqual(m(input), expected_output)\n\n # test with dim=None\n m = nn.Linear(5, 7)\n expected_output = m(input)\n m = torch.nn.utils.weight_norm(m, dim=None)\n self.assertEqual(m(input), expected_output)\n\n with self.assertRaisesRegex(RuntimeError, 'register two weight_norm hooks'):\n m = torch.nn.utils.weight_norm(m)\n m = torch.nn.utils.weight_norm(m)\n\n def test_parameterlistdict_setting_attributes(self):\n with warnings.catch_warnings(record=True) as w:\n mod = nn.ParameterList(map(nn.Parameter, [torch.rand(2), torch.rand(2)]))\n self.assertTrue(len(w) == 0)\n\n with warnings.catch_warnings(record=True) as w:\n mod.train()\n mod.eval()\n self.assertTrue(len(w) == 0)\n\n with warnings.catch_warnings(record=True) as w:\n mod = nn.ParameterDict({\"a\": nn.Parameter(torch.rand(2)), \"b\": nn.Parameter(torch.rand(2))})\n self.assertTrue(len(w) == 0)\n\n with warnings.catch_warnings(record=True) as w:\n mod.train()\n mod.eval()\n self.assertTrue(len(w) == 0)\n\n def test_parameterlistdict_pickle(self):\n m = nn.ParameterList(map(nn.Parameter, [torch.rand(2), torch.rand(2)]))\n with warnings.catch_warnings(record=True) as w:\n m = pickle.loads(pickle.dumps(m))\n self.assertTrue(len(w) == 0)\n\n # Test whether loading from older checkpoints works without triggering warnings\n m = nn.ParameterList(map(nn.Parameter, [torch.rand(2), torch.rand(2)]))\n del m._forward_pre_hooks, m._state_dict_hooks, m._load_state_dict_pre_hooks, m._non_persistent_buffers_set\n with warnings.catch_warnings(record=True) as w:\n m = pickle.loads(pickle.dumps(m))\n self.assertTrue(len(w) == 0)\n\n m = nn.ParameterDict({\"a\": nn.Parameter(torch.rand(2)), \"b\": nn.Parameter(torch.rand(2))})\n with warnings.catch_warnings(record=True) as w:\n m = pickle.loads(pickle.dumps(m))\n self.assertTrue(len(w) == 0)\n\n # Test whether loading from older checkpoints works without triggering warnings\n m = nn.ParameterDict({\"a\": nn.Parameter(torch.rand(2)), \"b\": nn.Parameter(torch.rand(2))})\n del m._forward_pre_hooks, m._state_dict_hooks, m._load_state_dict_pre_hooks, m._non_persistent_buffers_set\n with warnings.catch_warnings(record=True) as w:\n m = pickle.loads(pickle.dumps(m))\n self.assertTrue(len(w) == 0)\n\n def test_weight_norm_pickle(self):\n m = torch.nn.utils.weight_norm(nn.Linear(5, 7))\n m = pickle.loads(pickle.dumps(m))\n self.assertIsInstance(m, nn.Linear)\n\n def test_spectral_norm(self):\n input = torch.randn(3, 5)\n m = nn.Linear(5, 7)\n m = torch.nn.utils.spectral_norm(m)\n\n self.assertEqual(m.weight_u.size(), torch.Size([m.weight.size(0)]))\n # weight_orig should be trainable\n self.assertTrue(hasattr(m, 'weight_orig'))\n self.assertTrue('weight_orig' in m._parameters)\n # weight_u should be just a reused buffer\n self.assertTrue(hasattr(m, 'weight_u'))\n self.assertTrue('weight_u' in m._buffers)\n self.assertTrue('weight_v' in m._buffers)\n # weight should be a plain attribute, not counted as a buffer or a param\n self.assertFalse('weight' in m._buffers)\n self.assertFalse('weight' in m._parameters)\n # it should also be sharing storage as `weight_orig`\n self.assertEqual(m.weight_orig.storage(), m.weight.storage())\n self.assertEqual(m.weight_orig.size(), m.weight.size())\n self.assertEqual(m.weight_orig.stride(), m.weight.stride())\n\n m = torch.nn.utils.remove_spectral_norm(m)\n self.assertFalse(hasattr(m, 'weight_orig'))\n self.assertFalse(hasattr(m, 'weight_u'))\n # weight should be converted back as a parameter\n self.assertTrue(hasattr(m, 'weight'))\n self.assertTrue('weight' in m._parameters)\n\n with self.assertRaisesRegex(RuntimeError, 'register two spectral_norm hooks'):\n m = torch.nn.utils.spectral_norm(m)\n m = torch.nn.utils.spectral_norm(m)\n\n # test correctness in training/eval modes and cpu/multi-gpu settings\n for apply_dp in (True, False):\n if apply_dp:\n if not TEST_MULTIGPU:\n continue\n device = torch.device('cuda:0')\n\n def maybe_wrap(m):\n return torch.nn.DataParallel(m, [0, 1])\n else:\n device = torch.device('cpu')\n\n def maybe_wrap(m):\n return m\n\n for requires_grad in (True, False):\n m = nn.Linear(3, 4).to(device)\n m.weight.requires_grad_(requires_grad)\n m = torch.nn.utils.spectral_norm(m)\n wrapped_m = maybe_wrap(m)\n self.assertTrue(hasattr(m, 'weight_u'))\n u0 = m.weight_u.clone()\n v0 = m.weight_v.clone()\n\n # TEST TRAINING BEHAVIOR\n\n # assert that u and v are updated\n input = torch.randn(2, 3, device=device)\n out = wrapped_m(input)\n self.assertNotEqual(u0, m.weight_u)\n self.assertNotEqual(v0, m.weight_v)\n\n # assert that backprop reaches weight_orig\n # can't use gradcheck because the function changes as we\n # activate through it in training mode\n if requires_grad:\n torch.autograd.grad(out.sum(), m.weight_orig)\n\n # test backward works with multiple forwards\n # it uses training mode so we need to reset `u` and `v` vectors\n # to same value at beginning for finite difference test to pass\n saved_u = m.weight_u.clone()\n saved_v = m.weight_v.clone()\n\n def fn(input):\n m.weight_u.data.copy_(saved_u)\n m.weight_v.data.copy_(saved_v)\n out0 = wrapped_m(input)\n out1 = wrapped_m(input)\n return out0 + out1\n\n gradcheck(fn, (input.clone().requires_grad_(),), check_batched_grad=False)\n\n # test removing\n pre_remove_out = wrapped_m(input)\n m = torch.nn.utils.remove_spectral_norm(m)\n self.assertEqual(wrapped_m(input), pre_remove_out)\n\n m = torch.nn.utils.spectral_norm(m)\n for _ in range(3):\n pre_remove_out = wrapped_m(input)\n m = torch.nn.utils.remove_spectral_norm(m)\n self.assertEqual(wrapped_m(input), pre_remove_out)\n\n # TEST EVAL BEHAVIOR\n\n m = torch.nn.utils.spectral_norm(m)\n wrapped_m(input)\n last_train_out = wrapped_m(input)\n last_train_u = m.weight_u.clone()\n last_train_v = m.weight_v.clone()\n wrapped_m.zero_grad()\n wrapped_m.eval()\n\n eval_out0 = wrapped_m(input)\n # assert eval gives same result as last training iteration\n self.assertEqual(eval_out0, last_train_out)\n # assert doing more iteartion in eval don't change things\n self.assertEqual(eval_out0, wrapped_m(input))\n self.assertEqual(last_train_u, m.weight_u)\n self.assertEqual(last_train_v, m.weight_v)\n\n # FIXME: the code below is flaky when executed with DataParallel\n # see https://github.com/pytorch/pytorch/issues/13818\n if apply_dp:\n continue\n\n # test backward works with multiple forwards in mixed training\n # and eval modes\n # it uses training mode so we need to reset `u` and `v` vectors\n # to same value at beginning for finite difference test to pass\n saved_u = m.weight_u.clone()\n saved_v = m.weight_v.clone()\n\n def fn(input):\n m.weight_u.data.copy_(saved_u)\n m.weight_v.data.copy_(saved_v)\n wrapped_m.train()\n out0 = wrapped_m(input)\n wrapped_m.eval()\n out1 = wrapped_m(input)\n wrapped_m.train()\n out2 = wrapped_m(input)\n wrapped_m.eval()\n out3 = wrapped_m(input)\n return out0 + out1 + out2 + out3\n\n gradcheck(fn, (input.clone().requires_grad_(),))\n\n # assert that backprop reaches weight_orig in eval\n if requires_grad:\n def fn(weight):\n return wrapped_m(input)\n\n gradcheck(fn, (m.weight_orig,))\n\n def test_new_spectral_norm(self):\n input = torch.randn(3, 5)\n m = nn.Linear(5, 7)\n m = torch.nn.utils.parametrizations.spectral_norm(m)\n spectral_norm_m = m.parametrizations.weight[0]\n\n self.assertEqual(spectral_norm_m._u.size(), torch.Size([m.weight.size(0)]))\n\n # .parametrizations.weight.original should be trainable\n self.assertTrue(hasattr(m.parametrizations.weight, 'original'))\n self.assertTrue('original' in m.parametrizations.weight._parameters)\n\n # u should be just a reused buffer\n self.assertTrue(hasattr(spectral_norm_m, '_u'))\n self.assertTrue('_u' in spectral_norm_m._buffers)\n self.assertTrue('_v' in spectral_norm_m._buffers)\n\n # weight should be a plain attribute, not counted as a buffer or a param\n self.assertIsNotNone(m.weight)\n self.assertFalse('weight' in m._buffers)\n self.assertFalse('weight' in m._parameters)\n\n # it should also be sharing storage as `weight_orig`\n # self.assertEqual(m.parametrizations.weight.original.storage(), m.weight.storage())\n self.assertEqual(m.parametrizations.weight.original.size(), m.weight.size())\n self.assertEqual(m.parametrizations.weight.original.stride(), m.weight.stride())\n\n m = torch.nn.utils.parametrize.remove_parametrizations(m, 'weight')\n\n # spectral_norm is the only parametrization\n self.assertFalse(hasattr(m, 'parametrizations'))\n self.assertTrue('weight' in m._parameters)\n\n # We can register spectral_norm multiple times on the same parameter\n # and on multiple parameters in the same module\n m = torch.nn.utils.parametrizations.spectral_norm(m, 'weight')\n m = torch.nn.utils.parametrizations.spectral_norm(m, 'weight')\n m = torch.nn.utils.parametrizations.spectral_norm(m, 'bias')\n\n # If we remove the parametrization on bias, weight is still parametrized\n # Removing a parametrization runs forward in eval mode if leave_parametrized=True\n m = torch.nn.utils.parametrize.remove_parametrizations(m, 'bias')\n self.assertTrue('bias' in m._parameters)\n self.assertTrue(hasattr(m, 'parametrizations'))\n self.assertFalse('weight' in m._parameters)\n\n m = torch.nn.utils.parametrize.remove_parametrizations(m, 'weight')\n # Neither weight and bias are parametrized\n self.assertFalse(hasattr(m, 'parametrizations'))\n self.assertTrue('weight' in m._parameters)\n self.assertFalse(torch.nn.utils.parametrize.is_parametrized(m))\n\n # test correctness in training/eval modes and cpu/multi-gpu settings\n for apply_dp in (True, False):\n if apply_dp:\n if not TEST_MULTIGPU:\n continue\n device = torch.device('cuda:0')\n\n def maybe_wrap(m):\n return torch.nn.DataParallel(m, [0, 1])\n else:\n device = torch.device('cpu')\n\n def maybe_wrap(m):\n return m\n\n for requires_grad in (True, False):\n def get_modules():\n m = nn.Linear(3, 4).to(device)\n m.weight.requires_grad_(requires_grad)\n m = torch.nn.utils.parametrizations.spectral_norm(m)\n wrapped_m = maybe_wrap(m)\n spectral_norm_m = m.parametrizations.weight[0]\n return m, wrapped_m, spectral_norm_m\n\n input = torch.randn(2, 3, device=device)\n\n m, wrapped_m, spectral_norm_m = get_modules()\n\n self.assertTrue(hasattr(spectral_norm_m, '_u'))\n u0 = spectral_norm_m._u.clone()\n v0 = spectral_norm_m._v.clone()\n\n # TEST TRAINING BEHAVIOR\n\n # We perform GD first to modify the initial matrix\n opt = torch.optim.SGD(wrapped_m.parameters(), lr=0.1)\n\n opt.zero_grad()\n wrapped_m(input).sum().backward()\n opt.step()\n\n out = wrapped_m(input)\n if requires_grad:\n # run forward again and assert that u and v are updated\n self.assertNotEqual(u0, spectral_norm_m._u)\n self.assertNotEqual(v0, spectral_norm_m._v)\n\n # assert that backprop reaches original weight\n # can't use gradcheck because the function changes as we\n # activate through it in training mode\n if requires_grad:\n torch.autograd.grad(out.sum(), m.parametrizations.weight.original)\n\n # test backward works with multiple forwards\n # it uses training mode so we need to reset `u` and `v` vectors\n # to same value at beginning for finite difference test to pass\n saved_u = spectral_norm_m._u.clone()\n saved_v = spectral_norm_m._v.clone()\n\n def fn(input):\n spectral_norm_m._u.data.copy_(saved_u)\n spectral_norm_m._v.data.copy_(saved_v)\n out0 = wrapped_m(input)\n out1 = wrapped_m(input)\n return out0 + out1\n\n # Make sure we can compute gradients wrt to all the parameters in the case\n # of double forward\n fn(input.clone().requires_grad_()).sum().backward()\n gradcheck(fn, (input.clone().requires_grad_(),), check_batched_grad=False)\n\n # test removing\n # spectral norm module needs to be in eval mode if we'd like to\n # avoid doing another power iteration\n m, wrapped_m, _ = get_modules()\n pre_remove_out = wrapped_m(input)\n m.eval()\n m = torch.nn.utils.parametrize.remove_parametrizations(m, 'weight')\n self.assertEqual(wrapped_m(input), pre_remove_out)\n\n torch.nn.utils.parametrizations.spectral_norm(m)\n for _ in range(3):\n pre_remove_out = wrapped_m(input)\n m.eval()\n m = torch.nn.utils.parametrize.remove_parametrizations(m, 'weight')\n self.assertEqual(wrapped_m(input), pre_remove_out)\n\n # TEST EVAL BEHAVIOR\n m, wrapped_m, spectral_norm_m = get_modules()\n wrapped_m(input)\n last_train_out = wrapped_m(input)\n last_train_u = spectral_norm_m._u.clone()\n last_train_v = spectral_norm_m._v.clone()\n wrapped_m.zero_grad()\n wrapped_m.eval()\n\n eval_out0 = wrapped_m(input)\n # assert eval gives same result as last training iteration\n self.assertEqual(eval_out0, last_train_out)\n # assert doing more iteartion in eval don't change things\n self.assertEqual(eval_out0, wrapped_m(input))\n self.assertEqual(last_train_u, spectral_norm_m._u)\n self.assertEqual(last_train_v, spectral_norm_m._v)\n\n # FIXME: the code below is flaky when executed with DataParallel\n # see https://github.com/pytorch/pytorch/issues/13818\n if apply_dp:\n continue\n\n # test backward works with multiple forwards in mixed training\n # and eval modes\n # it uses training mode so we need to reset `u` and `v` vectors\n # to same value at beginning for finite difference test to pass\n saved_u = spectral_norm_m._u.clone()\n saved_v = spectral_norm_m._v.clone()\n\n def fn(input):\n spectral_norm_m._u.data.copy_(saved_u)\n spectral_norm_m._v.data.copy_(saved_v)\n wrapped_m.train()\n out0 = wrapped_m(input)\n wrapped_m.eval()\n out1 = wrapped_m(input)\n wrapped_m.train()\n out2 = wrapped_m(input)\n wrapped_m.eval()\n out3 = wrapped_m(input)\n return out0 + out1 + out2 + out3\n\n gradcheck(fn, (input.clone().requires_grad_(),))\n\n # assert that backprop reaches weight_orig in eval\n if requires_grad:\n def fn(weight):\n return wrapped_m(input)\n\n gradcheck(fn, (m.parametrizations.weight.original,))\n\n def test_new_spectral_norm_load_state_dict(self):\n for activate_times in (0, 3):\n inp = torch.randn(2, 3)\n m = nn.Linear(3, 5)\n snm = torch.nn.utils.parametrizations.spectral_norm(m)\n snm.train()\n\n for _ in range(activate_times):\n snm(inp)\n\n state_dict = deepcopy(snm.state_dict())\n self.assertEqual({\n 'parametrizations.weight.original',\n 'bias',\n 'parametrizations.weight.0._v',\n 'parametrizations.weight.0._u'\n }, set(state_dict.keys()))\n\n # test that non-strict loading works\n non_strict_state_dict = deepcopy(state_dict)\n non_strict_state_dict['nonsense'] = 'nonsense'\n with self.assertRaisesRegex(RuntimeError, r'Unexpected key\\(s\\) in state_dict: \"nonsense\"'):\n snm.load_state_dict(non_strict_state_dict, strict=True)\n snm.load_state_dict(non_strict_state_dict, strict=False)\n del non_strict_state_dict['parametrizations.weight.original']\n snm.load_state_dict(non_strict_state_dict, strict=False)\n del non_strict_state_dict['parametrizations.weight.0._u']\n snm.load_state_dict(non_strict_state_dict, strict=False)\n del non_strict_state_dict['parametrizations.weight.0._v']\n snm.load_state_dict(non_strict_state_dict, strict=False)\n non_strict_state_dict['weight'] = snm.weight.detach().clone() # set W as a buffer\n snm.load_state_dict(non_strict_state_dict, strict=False)\n del non_strict_state_dict._metadata['parametrizations.weight.0'] # remove metadata info\n snm.load_state_dict(non_strict_state_dict, strict=False)\n del non_strict_state_dict['weight'] # remove W buffer\n snm.load_state_dict(non_strict_state_dict, strict=False)\n del non_strict_state_dict['bias']\n snm.load_state_dict(non_strict_state_dict, strict=False)\n\n # normal state_dict\n\n # test that re-wrapping does not matter\n m = torch.nn.utils.parametrize.remove_parametrizations(snm, 'weight')\n snm = torch.nn.utils.parametrizations.spectral_norm(m)\n\n snm.load_state_dict(state_dict)\n with torch.no_grad():\n snm.eval()\n out0_eval = snm(inp)\n snm.train()\n out1_train = snm(inp)\n out2_train = snm(inp)\n snm.eval()\n out3_eval = snm(inp)\n\n # test that re-wrapping does not matter\n m = torch.nn.utils.parametrize.remove_parametrizations(snm, 'weight')\n snm = torch.nn.utils.parametrizations.spectral_norm(m)\n\n # Test normal loading\n snm.load_state_dict(state_dict)\n with torch.no_grad():\n snm.eval()\n self.assertEqual(out0_eval, snm(inp))\n snm.train()\n self.assertEqual(out1_train, snm(inp))\n self.assertEqual(out2_train, snm(inp))\n snm.eval()\n self.assertEqual(out3_eval, snm(inp))\n\n @skipIfNoLapack\n def test_spectral_norm_load_state_dict(self):\n inp = torch.randn(2, 3)\n for activate_times in (0, 3):\n # Test backward compatibility\n # At version None -> 1: weight becomes not a buffer and v vector becomes a buffer\n m = nn.Linear(3, 5)\n snm = torch.nn.utils.spectral_norm(m)\n snm.train()\n for _ in range(activate_times):\n snm(inp)\n\n version_latest_ref_state_dict = deepcopy(snm.state_dict())\n self.assertEqual({'weight_orig', 'bias', 'weight_u', 'weight_v'}, set(version_latest_ref_state_dict.keys()))\n\n # test that non-strict loading works\n non_strict_state_dict = deepcopy(version_latest_ref_state_dict)\n non_strict_state_dict['nonsense'] = 'nonsense'\n with self.assertRaisesRegex(RuntimeError, r'Unexpected key\\(s\\) in state_dict: \"nonsense\"'):\n snm.load_state_dict(non_strict_state_dict, strict=True)\n snm.load_state_dict(non_strict_state_dict, strict=False)\n del non_strict_state_dict['weight_orig']\n snm.load_state_dict(non_strict_state_dict, strict=False)\n del non_strict_state_dict['weight_u']\n snm.load_state_dict(non_strict_state_dict, strict=False)\n del non_strict_state_dict['weight_v']\n snm.load_state_dict(non_strict_state_dict, strict=False)\n non_strict_state_dict['weight'] = snm.weight.detach().clone() # set W as a buffer\n snm.load_state_dict(non_strict_state_dict, strict=False)\n del non_strict_state_dict._metadata['']['spectral_norm'] # remove metadata info\n snm.load_state_dict(non_strict_state_dict, strict=False)\n del non_strict_state_dict['weight'] # remove W buffer\n snm.load_state_dict(non_strict_state_dict, strict=False)\n del non_strict_state_dict['bias']\n snm.load_state_dict(non_strict_state_dict, strict=False)\n\n # craft a version None state_dict\n version_none_state_dict = deepcopy(version_latest_ref_state_dict)\n self.assertIn('spectral_norm', version_none_state_dict._metadata[''])\n del version_none_state_dict._metadata['']['spectral_norm'] # remove metadata info\n del version_none_state_dict['weight_v'] # remove v vector\n version_none_state_dict['weight'] = snm.weight.detach().clone() # set W as a buffer\n\n # normal state_dict\n for version_latest_with_metadata in [True, False]:\n version_latest_state_dict = deepcopy(version_latest_ref_state_dict)\n\n if not version_latest_with_metadata:\n # We want to still load a user-crafted state_dict, one without metadata\n del version_latest_state_dict._metadata['']['spectral_norm']\n\n # test that re-wrapping does not matter\n m = torch.nn.utils.remove_spectral_norm(snm)\n snm = torch.nn.utils.spectral_norm(m)\n\n snm.load_state_dict(version_latest_ref_state_dict)\n with torch.no_grad():\n snm.eval()\n out0_eval = snm(inp)\n snm.train()\n out1_train = snm(inp)\n out2_train = snm(inp)\n snm.eval()\n out3_eval = snm(inp)\n\n # test that re-wrapping does not matter\n m = torch.nn.utils.remove_spectral_norm(snm)\n snm = torch.nn.utils.spectral_norm(m)\n\n snm.load_state_dict(version_none_state_dict)\n if activate_times > 0:\n # since in loading version None state dict, we assume that the\n # values in the state dict have gone through at lease one\n # forward, we only test for equivalence when activate_times > 0.\n with torch.no_grad():\n snm.eval()\n self.assertEqual(out0_eval, snm(inp))\n snm.train()\n self.assertEqual(out1_train, snm(inp))\n self.assertEqual(out2_train, snm(inp))\n snm.eval()\n self.assertEqual(out3_eval, snm(inp))\n\n # test that re-wrapping does not matter\n m = torch.nn.utils.remove_spectral_norm(snm)\n snm = torch.nn.utils.spectral_norm(m)\n\n # Test normal loading\n snm.load_state_dict(version_latest_state_dict)\n with torch.no_grad():\n snm.eval()\n self.assertEqual(out0_eval, snm(inp))\n snm.train()\n self.assertEqual(out1_train, snm(inp))\n self.assertEqual(out2_train, snm(inp))\n snm.eval()\n self.assertEqual(out3_eval, snm(inp))\n\n def test_spectral_norm_dim(self):\n inp = torch.randn(2, 3, 10, 12)\n m = nn.ConvTranspose2d(3, 4, (5, 6))\n m = torch.nn.utils.spectral_norm(m)\n # this should not run into incompatible shapes\n x = m(inp)\n # check that u refers to the same dimension\n self.assertEqual(m.weight_u.shape, m.weight_orig[0, :, 0, 0].shape)\n\n def test_new_spectral_norm_dim(self):\n inp = torch.randn(2, 3, 10, 12)\n m = nn.ConvTranspose2d(3, 4, (5, 6))\n m = torch.nn.utils.parametrizations.spectral_norm(m)\n snm = m.parametrizations.weight[0]\n # this should not run into incompatible shapes\n x = m(inp)\n # check that u refers to the same dimension\n self.assertEqual(snm._u.shape, m.parametrizations.weight.original[0, :, 0, 0].shape)\n\n def test_spectral_norm_forward(self):\n input = torch.randn(3, 5)\n m = nn.Linear(5, 7)\n m = torch.nn.utils.spectral_norm(m)\n # naive forward\n _weight, _bias, _u = m.weight_orig, m.bias, m.weight_u\n _weight_mat = _weight.view(_weight.size(0), -1)\n _v = torch.mv(_weight_mat.t(), _u)\n _v = F.normalize(_v, dim=0, eps=1e-12)\n _u = torch.mv(_weight_mat, _v)\n _u = F.normalize(_u, dim=0, eps=1e-12)\n _weight.data /= torch.dot(_u, torch.matmul(_weight_mat, _v))\n out_hat = torch.nn.functional.linear(input, _weight, _bias)\n expect_out = m(input)\n self.assertEqual(expect_out, out_hat)\n\n def test_new_spectral_norm_forward(self):\n input = torch.randn(3, 5)\n m = nn.Linear(5, 7)\n m = torch.nn.utils.parametrizations.spectral_norm(m)\n snm = m.parametrizations.weight[0]\n # naive forward\n _weight = m.parametrizations.weight.original\n _bias, _v = m.bias, snm._v\n _weight_mat = _weight.view(_weight.size(0), -1)\n _u = torch.mv(_weight_mat, _v)\n _u = F.normalize(_u, dim=0, eps=1e-12)\n _v = torch.mv(_weight_mat.t(), _u)\n _v = F.normalize(_v, dim=0, eps=1e-12)\n _weight.data /= torch.dot(_u, torch.matmul(_weight_mat, _v))\n out_hat = torch.nn.functional.linear(input, _weight, _bias)\n expect_out = m(input)\n self.assertEqual(expect_out, out_hat)\n\n def test_spectral_norm_pickle(self):\n m = torch.nn.utils.spectral_norm(nn.Linear(5, 7))\n m = pickle.loads(pickle.dumps(m))\n self.assertIsInstance(m, nn.Linear)\n\n @skipIfNoLapack\n def test_orthogonal_parametrization(self):\n # Orthogonal implements 6 algorithms (3x parametrizations times 2 options of use_trivialization)\n\n def assert_is_orthogonal(X):\n n, k = X.size(-2), X.size(-1)\n if n < k:\n X = X.mT\n n, k = k, n\n Id = torch.eye(k, dtype=X.dtype, device=X.device).expand(*(X.size()[:-2]), k, k)\n eps = 10 * n * torch.finfo(X.dtype).eps\n torch.testing.assert_allclose(X.mH @ X, Id, atol=eps, rtol=0.)\n\n\n def assert_weight_allclose_Q(weight, W):\n # Test that weight is equal to the Q part of the QR decomposition of W\n # (or of its transpose if the matrix is wide)\n wide_matrix = W.size(-2) < W.size(-1)\n if wide_matrix:\n W = W.mT\n Q, R = torch.linalg.qr(W)\n Q *= R.diagonal(dim1=-2, dim2=-1).sgn().unsqueeze(-2)\n if wide_matrix:\n Q = Q.mT\n torch.testing.assert_allclose(Q, weight, atol=1e-5, rtol=0.)\n\n\n for shape, dtype, use_linear in product(((4, 4), (5, 3), (3, 5)), # square/ tall / wide\n (torch.float32, torch.complex64),\n (True, False)):\n # Conv2d does not support complex yet\n if not use_linear:\n continue\n\n if use_linear:\n input = torch.randn(3, shape[0], dtype=dtype)\n else:\n input = torch.randn(2, 2, shape[0] + 2, shape[1] + 1, dtype=dtype)\n\n for parametrization, use_trivialization in product((\"matrix_exp\", \"cayley\", \"householder\"),\n (False, True)):\n # right_inverse for Cayley and matrix_exp not implemented for use_trivialization=False\n # See Note [right_inverse expm cayley]\n can_initialize = use_trivialization or parametrization == \"householder\"\n\n # We generate them every time to always start with fresh weights\n if use_linear:\n m = nn.Linear(*shape, dtype=dtype)\n else:\n m = nn.Conv2d(2, 3, shape, dtype=dtype)\n\n # We do not support householder for complex inputs\n # See Note [Householder complex]\n w_init = m.weight.clone()\n if parametrization == \"householder\" and m.weight.is_complex():\n msg = \"householder parametrization does not support complex tensors\"\n with self.assertRaisesRegex(ValueError, msg):\n torch.nn.utils.parametrizations.orthogonal(m,\n \"weight\",\n parametrization,\n use_trivialization=use_trivialization)\n continue\n\n wide_matrix = w_init.size(-2) < w_init.size(-1)\n torch.nn.utils.parametrizations.orthogonal(m,\n \"weight\",\n parametrization,\n use_trivialization=use_trivialization)\n # Forwards works as expected\n self.assertEqual(w_init.shape, m.weight.shape)\n assert_is_orthogonal(m.weight)\n if can_initialize:\n assert_weight_allclose_Q(m.weight, w_init)\n\n # Intializing with a given orthogonal matrix works\n X = torch.randn_like(m.weight)\n if wide_matrix:\n X = X.mT\n w_new = torch.linalg.qr(X).Q\n if wide_matrix:\n w_new = w_new.mT\n if can_initialize:\n m.weight = w_new\n torch.testing.assert_allclose(w_new, m.weight, atol=1e-5, rtol=0.)\n else:\n msg = \"assign to the matrix exponential or the Cayley parametrization\"\n with self.assertRaisesRegex(NotImplementedError, msg):\n m.weight = w_new\n\n # Intializing with a non-orthogonal matrix makes m.weight be the Q part of the given matrix\n w_new = torch.randn_like(m.weight)\n if can_initialize:\n m.weight = w_new\n assert_weight_allclose_Q(m.weight, w_new)\n else:\n msg = \"assign to the matrix exponential or the Cayley parametrization\"\n with self.assertRaisesRegex(NotImplementedError, msg):\n m.weight = w_new\n\n opt = torch.optim.SGD(m.parameters(), lr=0.1)\n for _ in range(2):\n opt.zero_grad()\n m(input).norm().backward()\n grad = m.parametrizations.weight.original.grad\n self.assertIsNotNone(grad)\n # We do not update the upper triangular part of the matrix if tall tril if wide\n if grad.size(-2) >= grad.size(-1):\n zeros_grad = grad.triu(1)\n else:\n zeros_grad = grad.tril(-1)\n self.assertEqual(zeros_grad, torch.zeros_like(zeros_grad))\n # The gradient in the diagonal can only be imaginary because a skew-Hermitian\n # matrix has imaginary diagonal\n diag_grad = grad.diagonal(dim1=-2, dim2=-1)\n if grad.is_complex():\n diag_grad = diag_grad.real\n self.assertEqual(diag_grad, torch.zeros_like(diag_grad))\n opt.step()\n assert_is_orthogonal(m.weight)\n\n @skipIfNoLapack\n def test_orthogonal_errors(self):\n m = nn.Linear(3, 4)\n with self.assertRaisesRegex(ValueError, \"has to be one of\"):\n torch.nn.utils.parametrizations.orthogonal(m, \"weight\", \"foo\")\n\n with self.assertRaisesRegex(ValueError, \"Expected a matrix\"):\n torch.nn.utils.parametrizations.orthogonal(m, \"bias\")\n\n torch.nn.utils.parametrizations.orthogonal(m, \"weight\")\n with self.assertRaisesRegex(ValueError, \"matrices of shape\"):\n m.weight = torch.randn(5, 5)\n torch.nn.utils.parametrize.remove_parametrizations(m, \"weight\")\n\n\n def test_threshold_int(self):\n x = torch.tensor([-3, -2, -1, 0, 1, 2, 3])\n expected = torch.tensor([99, 99, 99, 99, 1, 2, 3])\n self.assertEqual(F.threshold(x, 0, 99), expected)\n\n def test_threshold_bfloat16(self):\n x = torch.randn(100)\n for threshold in [0, -0.5, 0.5, float('inf'), float('-inf'), float('nan')]:\n expected = F.threshold(x, threshold, 0).bfloat16().float()\n res_bf16 = F.threshold(x.bfloat16(), threshold, 0).float()\n self.assertEqual(res_bf16, expected)\n\n @unittest.skipIf(not TEST_CUDA, \"CUDA unavailable\")\n def test_embedding_max_norm_unsorted_repeating_indices(self):\n def create_embedding(device):\n # Seed RNG so we get the same Embedding each time\n torch.manual_seed(0)\n return torch.nn.Embedding(\n num_embeddings=20,\n embedding_dim=64,\n max_norm=1.0).to(device)\n\n ix = torch.arange(2, device='cpu', dtype=torch.long).repeat(2000)\n out_cpu = create_embedding('cpu')(ix)\n\n ix = ix.to('cuda')\n out = create_embedding('cuda')(ix)\n self.assertEqual(out.cpu(), out_cpu)\n\n def test_embedding_sparse_basic(self):\n embedding = nn.Embedding(10, 20, sparse=True)\n input = torch.tensor([[0, 2, 4, 5], [4, 3, 0, 9]], dtype=torch.long)\n embedding(input).sum().backward()\n self.assertTrue(embedding.weight.grad.is_sparse)\n self.assertEqual(embedding.weight.grad.shape, embedding.weight.shape)\n\n def test_embedding_sparse_empty_tensor(self):\n embedding = nn.Embedding(0, 0, sparse=True)\n input = torch.tensor([], dtype=torch.int64)\n embedding(input).sum().backward()\n self.assertTrue(embedding.weight.grad.is_sparse)\n self.assertEqual(embedding.weight.grad.shape, embedding.weight.shape)\n\n embedding = nn.Embedding(10, 0, sparse=True)\n input = torch.LongTensor([[0, 2, 4, 5], [4, 3, 0, 9]])\n embedding(input).sum().backward()\n self.assertTrue(embedding.weight.grad.is_sparse)\n self.assertEqual(embedding.weight.grad.shape, embedding.weight.shape)\n\n def test_move_sparse_half_embedding(self):\n embedding = nn.Embedding(10, 3, sparse=True)\n self.assertEqual(embedding.weight.device.type, 'cpu')\n self.assertEqual(embedding.weight.dtype, torch.float64)\n embedding.to(torch.float16)\n self.assertEqual(embedding.weight.dtype, torch.float16)\n self.assertEqual(embedding.embedding_dim, 3)\n self.assertEqual(embedding.num_embeddings, 10)\n\n if torch.cuda.is_available():\n embedding.to('cuda')\n self.assertEqual(embedding.weight.device.type, 'cuda')\n embedding.to('cpu')\n self.assertEqual(embedding.weight.device.type, 'cpu')\n\n def test_embedding_max_norm(self):\n embedding = nn.Embedding(22, 5, max_norm=1.0)\n input = torch.tensor([2, 8, 8, 6], dtype=torch.long)\n output = embedding(input)\n self.assertEqual(output[1], output[2])\n self.assertTrue(output.data.norm(p=2, dim=1).le(1).all())\n\n def test_embedding_from_pretrained(self):\n a = torch.tensor([[1., 2., 3.], [4., 5., 6.]])\n embedding = nn.Embedding.from_pretrained(a)\n self.assertEqual(a, embedding.weight.data)\n\n input = torch.LongTensor([0, 1])\n output = embedding(input)\n self.assertEqual(a, output)\n\n def test_embedding_bag_from_pretrained(self):\n a = torch.tensor([[1., 2., 3.], [4., 5., 6.]])\n embedding = nn.EmbeddingBag.from_pretrained(a)\n self.assertEqual(a, embedding.weight)\n\n input = torch.tensor([0, 1], dtype=torch.long)\n output = embedding(input, torch.arange(input.size(0)))\n self.assertEqual(a, output)\n\n def test_embedding_from_pretrained_padding_idx(self):\n padding_idx = 2\n padding_vec = torch.ones(3) * 7\n embeddings = torch.rand(4, 3, requires_grad=True)\n with torch.no_grad():\n embeddings[padding_idx] = padding_vec\n embedding_nn = nn.Embedding.from_pretrained(embeddings, padding_idx=padding_idx)\n self.assertEqual(embedding_nn.weight[padding_idx], padding_vec)\n\n def test_embedding_bag_from_pretrained_padding_idx(self):\n padding_idx = 2\n embeddings = torch.rand(4, 3, requires_grad=True)\n embedding_nn = nn.EmbeddingBag.from_pretrained(embeddings, padding_idx=padding_idx)\n self.assertEqual(embedding_nn.weight, embeddings)\n\n def test_embedding_from_pretrained_options(self):\n a = torch.tensor([[1., 2., 3.], [4., 5., 6.]])\n opts = {\n \"max_norm\": 2.,\n \"norm_type\": .5,\n \"scale_grad_by_freq\": False,\n \"sparse\": True\n }\n embedding = nn.Embedding.from_pretrained(a, **opts)\n input = torch.LongTensor([0, 1])\n output = embedding(input)\n # test output and that weight matrix was renormalized\n self.assertEqual(a, output)\n self.assertTrue(a.ne(torch.arange(1, 7, dtype=a.dtype).view(2, 3)).all())\n self.assertTrue(output.data.norm(p=opts[\"norm_type\"], dim=1).le(opts[\"max_norm\"]).all())\n\n def test_embedding_functional(self):\n a = torch.tensor([\n [1, 3, 2],\n [0, 2, 1]\n ], dtype=torch.long)\n embeddings = torch.rand(4, 3, requires_grad=True)\n\n embed_old = torch.nn.Embedding(4, 3)\n embed_old.weight.data = embeddings.data\n res_old = embed_old(a)\n\n res_F = F.embedding(a, embeddings)\n self.assertEqual(res_old, res_F)\n\n embed_old = torch.nn.Embedding(4, 3)\n embed_old = embed_old.from_pretrained(embeddings, padding_idx=2)\n res_old = embed_old(a)\n res_F = F.embedding(a, embeddings, padding_idx=2)\n\n self.assertEqual(res_old, res_F)\n\n def test_embedding_bag_functional(self):\n a = torch.tensor([\n [1, 3, 2],\n [0, 2, 1]\n ], dtype=torch.long)\n embeddings = torch.rand(4, 3, requires_grad=True)\n\n embed_old = torch.nn.EmbeddingBag(4, 3)\n embed_old.weight = torch.nn.Parameter(embeddings)\n res_old = embed_old(a)\n\n res_F = F.embedding_bag(a, embeddings)\n self.assertEqual(res_old, res_F)\n\n embed_old = torch.nn.EmbeddingBag(4, 3)\n embed_old = embed_old.from_pretrained(embeddings, padding_idx=2)\n res_old = embed_old(a)\n res_F = F.embedding_bag(a, embeddings, padding_idx=2)\n\n self.assertEqual(res_old, res_F)\n\n # Make sure that error is thrown if padding_idx is out of bounds\n def test_embedding_bag_padding_idx_error(self):\n a = torch.tensor([\n [1, 3, 2],\n [0, 2, 1]\n ], dtype=torch.long)\n num_embeddings = 4\n num_features = 3\n embeddings = torch.rand(num_embeddings, num_features, requires_grad=True)\n\n functional_err_msg = r'padding_idx must be within the number of embeddings'\n module_err_msg = r'padding_idx must be within num_embeddings'\n\n for padding_idx in range(-(num_embeddings + 2), (num_embeddings + 2)):\n if (padding_idx < -num_embeddings) or (padding_idx >= num_embeddings):\n with self.assertRaisesRegex(RuntimeError, functional_err_msg):\n F.embedding_bag(a, embeddings, padding_idx=padding_idx)\n with self.assertRaisesRegex(AssertionError, module_err_msg):\n torch.nn.EmbeddingBag(num_embeddings, num_features, padding_idx=padding_idx)\n else:\n F.embedding_bag(a, embeddings, padding_idx=padding_idx)\n torch.nn.EmbeddingBag(num_embeddings, num_features, padding_idx=padding_idx)\n\n @unittest.skipUnless('fbgemm' in torch.backends.quantized.supported_engines,\n 'Linear_FP16_weight requires FBGEMM. FBGEMM is only optimized for CPUs'\n ' with instruction set support avx2 or newer.')\n def test_fb_fc_packed(self):\n X = np.random.rand(16, 16).astype(np.float32) - 0.5\n W = np.random.rand(16, 16).astype(np.float32) - 0.5\n b = np.random.rand(16).astype(np.float32) - 0.5\n\n def fc_op(X, W, b):\n return np.dot(X, W.T) + b\n\n x_tensor = torch.tensor(X)\n w_tensor = torch.tensor(W)\n b_tensor = torch.tensor(b)\n packed_w_tensor = torch.fbgemm_pack_gemm_matrix_fp16(w_tensor)\n actual_output = torch.fbgemm_linear_fp16_weight(x_tensor, packed_w_tensor, b_tensor)\n expected_output = fc_op(X, W, b)\n torch.testing.assert_close(torch.from_numpy(expected_output), actual_output.cpu(), atol=1e-3, rtol=1e-3)\n\n def test_embeddingbag_from_pretrained(self):\n a = torch.tensor([[1., 2., 3.], [4., 5., 6.]])\n embeddingbag = nn.EmbeddingBag.from_pretrained(a)\n self.assertEqual(a, embeddingbag.weight.data)\n\n input = torch.LongTensor([[0, 1]])\n output = embeddingbag(input)\n self.assertEqual(a.mean(0, keepdim=True), output)\n\n def test_embeddingbag_from_pretrained_options(self):\n a = torch.tensor([[1., 2., 3.], [4., 5., 6.]])\n opts = {\n \"max_norm\": 2.,\n \"norm_type\": .5,\n \"scale_grad_by_freq\": False,\n \"mode\": \"max\",\n \"sparse\": False\n }\n embeddingbag = nn.EmbeddingBag.from_pretrained(a, **opts)\n\n input = torch.LongTensor([[0, 1]])\n output = embeddingbag(input)\n self.assertEqual(a.max(0, keepdim=True)[0], output)\n self.assertTrue(a.ne(torch.arange(1, 7, dtype=a.dtype).view(2, 3)).all())\n self.assertTrue(a.norm(p=opts[\"norm_type\"], dim=1).le(opts[\"max_norm\"]).all())\n\n def test_AlphaDropout(self):\n # generate random tensor with zero mean and unit std\n input = torch.randn(5000)\n self._test_alpha_dropout(nn.AlphaDropout, input)\n\n def test_FeatureAlphaDropout(self):\n b = random.randint(1, 5)\n w = random.randint(1, 5)\n h = random.randint(1, 5)\n d = random.randint(1, 2)\n num_features = 1000\n input = torch.randn(num_features, b, d, w, h)\n self._test_alpha_dropout(nn.FeatureAlphaDropout, input)\n\n # no batch dims\n input = torch.randn(50, 20, 64, 64)\n self._test_alpha_dropout(nn.FeatureAlphaDropout, input)\n\n def test_pad_scalar_error(self):\n inputs = torch.tensor(0., requires_grad=True)\n self.assertRaises(RuntimeError, lambda: F.pad(inputs, (1, 1)))\n self.assertRaises(RuntimeError, lambda: F.pad(inputs, (1,)))\n\n @unittest.skipIf(not TEST_NUMPY, \"numpy not found\")\n @parametrize_test(\"average_attn_weights\", [True, False])\n def test_multihead_attention(self, average_attn_weights):\n def _scaled_dot_attn_ref(Q, K, V, dims, unseen_mask=None, key_padding_mask=None,\n average_attn_weights=average_attn_weights):\n \"\"\" Numpy-based reference implementation of scaled dot attention\n for testing\"\"\"\n\n QKT = _batchmatmul(\n Q,\n np.transpose(K, axes=[0, 1, 3, 2])\n / np.sqrt(dims[3], dtype=np.float32), # divide by sqrt(d_head)\n )\n b1, b2, s1, s2 = QKT.shape\n if unseen_mask is not None or key_padding_mask is not None:\n # assert s1 == s2\n for i in range(b1):\n for j in range(b2):\n for m in range(s1):\n for n in range(s2):\n if unseen_mask is not None and unseen_mask[m][n] == 0:\n QKT[i, j, m, n] = -np.inf\n if key_padding_mask is not None and key_padding_mask[i][n]:\n QKT[i, j, m, n] = -np.inf\n\n reference = _softmax(QKT)\n ref_attn_weight = reference\n if average_attn_weights:\n ref_attn_weight = np.sum(ref_attn_weight, axis=1) / b2\n reference = _batchmatmul(reference, V)\n return reference, ref_attn_weight\n\n def _batchmatmul(a, b): # batchmatmul over 4 dim matrix\n \"\"\" Numpy-based batch matrix multiply over 4 dim matrix\"\"\"\n assert a.shape[0] == b.shape[0]\n assert a.shape[1] == b.shape[1]\n retval = np.zeros(\n (a.shape[0], a.shape[1], a.shape[2], b.shape[3]), dtype=np.float32\n )\n for i in range(a.shape[0]):\n for j in range(a.shape[1]):\n retval[i, j, :, :] = np.matmul(a[i, j, :, :], b[i, j, :, :])\n return retval\n\n def _softmax(x): # softmax over 4 dim matrix\n \"\"\" Numpy-based reference softmax over 4 dim matrix\"\"\"\n np.seterr(invalid='ignore')\n output = np.zeros(x.shape, dtype=np.float64)\n for i in range(x.shape[0]):\n for j in range(x.shape[1]):\n for k in range(x.shape[2]):\n x_curr = x[i, j, k, :]\n e_x = np.exp(x_curr - np.amax(x_curr))\n output[i, j, k, :] = e_x / np.sum(e_x)\n return output\n\n def _split_heads_ref(X, dims, nheads, d_head):\n X_split = np.reshape(X, dims[:2] + [nheads, d_head])\n X_split_transposed = np.transpose(X_split, [0, 2, 1, 3])\n reference = np.reshape(X_split_transposed, [dims[0], nheads, dims[1], d_head])\n return reference\n\n def _combine_heads_ref(X, dims, nheads, d_head):\n X_transposed = np.transpose(X, [0, 2, 1, 3])\n reference = np.reshape(X_transposed, dims[:2] + [nheads * d_head])\n return reference\n\n def _fc(X, X_weight, X_bias):\n X_fc_b = X_bias.detach().numpy()\n X_fc_w = X_weight.detach().numpy()\n return np.matmul(X, np.transpose(X_fc_w)) + X_fc_b\n\n def _create_src_lengths_mask(batch_size, src_lengths):\n \"\"\"\n Generate boolean mask to prevent attention beyond the end of source\n Inputs:\n batch_size : int\n src_lengths : [batch_size] of sentence lengths\n Outputs:\n [batch_size, max_src_len]\n \"\"\"\n max_srclen = src_lengths.max()\n src_indices = torch.arange(0, max_srclen).unsqueeze(0).to(src_lengths)\n src_indices = src_indices.expand(batch_size, max_srclen)\n src_lengths = src_lengths.unsqueeze(dim=1).expand(batch_size, max_srclen)\n # returns [batch_size, max_seq_len]\n return (src_indices < src_lengths).int().detach()\n\n def _multihead_attn_test_helper(add_key_padding_mask=False, add_bias_kv=False, add_zero_attn=False,\n saved_kv=False, same_embed_dim=False, byte_mask=False,\n average_attn_weights=average_attn_weights):\n for _ in range(100):\n batch_sz, seq_len = [random.randint(2, 10) for r in range(2)]\n d_head = random.randint(3, 10)\n nheads = random.randint(3, 10)\n d_model = d_head * nheads\n if same_embed_dim:\n kv_dim = d_model\n else:\n kv_dim = random.randint(5, 20)\n dims = [batch_sz, seq_len, kv_dim]\n\n saved_k = None\n saved_k_tensor = None\n saved_v = None\n saved_v_tensor = None\n if saved_kv:\n saved_k = np.random.rand(batch_sz * nheads, seq_len, d_head)\n saved_k_tensor = torch.from_numpy(saved_k).to(torch.get_default_dtype())\n saved_v = np.random.rand(batch_sz * nheads, seq_len, d_head)\n saved_v_tensor = torch.from_numpy(saved_v).to(torch.get_default_dtype())\n\n key_padding_mask = None\n key_padding_mask_tensor = None\n if add_key_padding_mask:\n seq_mask = np.random.randint(0, 2, (1, seq_len))\n key_padding_mask = (np.repeat(seq_mask, batch_sz, axis=0) == 1)\n key_padding_mask_tensor = torch.from_numpy(key_padding_mask)\n if byte_mask:\n key_padding_mask_tensor = key_padding_mask_tensor.byte()\n decoder_state = np.random.rand(batch_sz, d_model)\n K = np.random.rand(*dims)\n V = K\n Q = np.expand_dims(decoder_state, 1)\n attn_mask = np.random.randint(0 , 2, size=(1, seq_len))\n attn_mask_tensor = torch.from_numpy(attn_mask).float()\n if byte_mask:\n attn_mask_tensor = (attn_mask_tensor == 0).byte()\n else:\n attn_mask_tensor.masked_fill_(attn_mask_tensor == 0, float('-inf'))\n attn_mask_tensor.masked_fill_(attn_mask_tensor > 0, float('0.0'))\n attn_mask_tensor = attn_mask_tensor.double()\n\n decoder_state_tensor = torch.from_numpy(decoder_state).to(torch.get_default_dtype())\n source_hid_tensor = torch.from_numpy(K).to(torch.get_default_dtype()).transpose(0, 1)\n\n multihead_attn_module = MultiheadAttention(d_model, nheads,\n add_bias_kv=add_bias_kv,\n add_zero_attn=add_zero_attn,\n kdim=kv_dim, vdim=kv_dim)\n\n if add_bias_kv:\n bias_k = multihead_attn_module.bias_k.detach().numpy()\n bias_v = multihead_attn_module.bias_v.detach().numpy()\n else:\n bias_k = None\n bias_v = None\n\n _Q = decoder_state_tensor.unsqueeze(1).transpose(0, 1)\n _V = source_hid_tensor\n _K = source_hid_tensor\n\n if multihead_attn_module._qkv_same_embed_dim:\n result, result_weight = torch.nn.functional.multi_head_attention_forward(\n _Q, _K, _V,\n d_model, nheads,\n multihead_attn_module.in_proj_weight, multihead_attn_module.in_proj_bias,\n multihead_attn_module.bias_k, multihead_attn_module.bias_v,\n multihead_attn_module.add_zero_attn, multihead_attn_module.dropout,\n multihead_attn_module.out_proj.weight, multihead_attn_module.out_proj.bias,\n multihead_attn_module.training, key_padding_mask_tensor, True, attn_mask_tensor,\n static_k=saved_k_tensor, static_v=saved_v_tensor,\n average_attn_weights=average_attn_weights)\n else:\n result, result_weight = torch.nn.functional.multi_head_attention_forward(\n _Q, _K, _V,\n d_model, nheads,\n None, multihead_attn_module.in_proj_bias,\n multihead_attn_module.bias_k, multihead_attn_module.bias_v,\n multihead_attn_module.add_zero_attn, multihead_attn_module.dropout,\n multihead_attn_module.out_proj.weight, multihead_attn_module.out_proj.bias,\n multihead_attn_module.training, key_padding_mask_tensor, True, attn_mask_tensor,\n True, multihead_attn_module.q_proj_weight,\n multihead_attn_module.k_proj_weight, multihead_attn_module.v_proj_weight,\n static_k=saved_k_tensor, static_v=saved_v_tensor,\n average_attn_weights=average_attn_weights)\n\n result = result.squeeze(0).detach().numpy()\n\n if multihead_attn_module._qkv_same_embed_dim:\n q_proj_weight = multihead_attn_module.in_proj_weight[:d_model]\n k_proj_weight = multihead_attn_module.in_proj_weight[d_model:(d_model * 2)]\n v_proj_weight = multihead_attn_module.in_proj_weight[(d_model * 2):]\n else:\n q_proj_weight = multihead_attn_module.q_proj_weight\n k_proj_weight = multihead_attn_module.k_proj_weight\n v_proj_weight = multihead_attn_module.v_proj_weight\n\n Q_fc = _fc(Q, q_proj_weight, multihead_attn_module.in_proj_bias[:d_model])\n K_fc = _fc(K, k_proj_weight, multihead_attn_module.in_proj_bias[d_model:(d_model * 2)])\n V_fc = _fc(V, v_proj_weight, multihead_attn_module.in_proj_bias[(d_model * 2):])\n\n if add_bias_kv:\n K_fc = np.concatenate((K_fc, np.repeat(bias_k, K_fc.shape[0], axis=0)), axis=1)\n V_fc = np.concatenate((V_fc, np.repeat(bias_v, V_fc.shape[0], axis=0)), axis=1)\n if attn_mask is not None:\n attn_mask = np.concatenate((attn_mask, np.ones([1, 1])), axis=1)\n if key_padding_mask is not None:\n key_padding_mask = np.concatenate((key_padding_mask, np.full((batch_sz, 1), False, dtype=bool)), axis=1)\n dims[1] += 1\n Q_split = _split_heads_ref(\n Q_fc, [batch_sz, 1, d_model], nheads, d_head\n )\n\n if saved_k is not None:\n K_split = np.reshape(saved_k, [dims[0], nheads, dims[1], d_head])\n else:\n K_split = _split_heads_ref(K_fc, dims, nheads, d_head)\n\n if saved_v is not None:\n V_split = np.reshape(saved_v, [dims[0], nheads, dims[1], d_head])\n else:\n V_split = _split_heads_ref(V_fc, dims, nheads, d_head)\n\n if add_zero_attn:\n dims[1] += 1\n K_split = np.concatenate((K_split, np.zeros([K_split.shape[0], K_split.shape[1], 1, K_split.shape[3]])), axis=2)\n V_split = np.concatenate((V_split, np.zeros([V_split.shape[0], V_split.shape[1], 1, V_split.shape[3]])), axis=2)\n\n if attn_mask is not None:\n attn_mask = np.concatenate((attn_mask, np.ones([1, 1])), axis=1)\n\n if key_padding_mask is not None:\n key_padding_mask = np.concatenate((key_padding_mask, np.full((batch_sz, 1), False, dtype=bool)), axis=1)\n attn_heads, ref_attn_weight = _scaled_dot_attn_ref(\n Q=Q_split,\n K=K_split,\n V=V_split,\n dims=Q_split.shape,\n unseen_mask=attn_mask,\n key_padding_mask=key_padding_mask\n )\n combined_attn_heads = _combine_heads_ref(\n X=attn_heads, dims=[batch_sz, 1], nheads=nheads, d_head=d_head\n )\n\n reference = _fc(combined_attn_heads, multihead_attn_module.out_proj.weight, multihead_attn_module.out_proj.bias)\n reference = np.squeeze(reference, axis=1)\n\n # result = reference\n self.assertEqual(tuple(result.shape), (batch_sz, d_model))\n np.testing.assert_allclose(result, reference, atol=1e-5)\n\n # result_weight = ref_attn_weight\n result_weight = result_weight.detach().numpy()\n self.assertEqual(tuple(result_weight.shape), tuple(ref_attn_weight.shape))\n np.testing.assert_allclose(result_weight, ref_attn_weight, atol=1e-5)\n\n def test_multihead_attn_add_bias_kv():\n _multihead_attn_test_helper(add_bias_kv=True)\n\n def test_multihead_attn_add_zero_attn():\n _multihead_attn_test_helper(add_zero_attn=True)\n\n def test_multihead_attn_no_masking():\n _multihead_attn_test_helper()\n\n def test_multihead_attn_key_padding_mask():\n _multihead_attn_test_helper(add_key_padding_mask=True)\n\n def test_multihead_attn_saved_kv():\n _multihead_attn_test_helper(saved_kv=True)\n\n def test_multihead_attn_add_bias_kv_zero_attn():\n _multihead_attn_test_helper(add_key_padding_mask=True, add_bias_kv=True,\n add_zero_attn=True)\n\n def test_multihead_attn_all_arguments1():\n _multihead_attn_test_helper(add_key_padding_mask=True, add_zero_attn=True, saved_kv=True)\n\n def test_multihead_attn_all_arguments2():\n _multihead_attn_test_helper(add_key_padding_mask=True, add_bias_kv=True,\n add_zero_attn=True, saved_kv=True)\n\n def test_multihead_attn_all_arguments3():\n _multihead_attn_test_helper(add_key_padding_mask=True, add_zero_attn=True,\n saved_kv=True, same_embed_dim=True)\n\n def test_multihead_attn_all_arguments4():\n _multihead_attn_test_helper(add_key_padding_mask=True, add_zero_attn=True,\n saved_kv=True, same_embed_dim=True, byte_mask=True)\n\n test_multihead_attn_add_zero_attn() # Test MultiheadAttention with add_zero_attn\n test_multihead_attn_add_bias_kv() # Test MultiheadAttention with add_bias_kv\n test_multihead_attn_no_masking() # Test MultiheadAttention without masking\n test_multihead_attn_key_padding_mask() # Test MultiheadAttention with src lengths\n test_multihead_attn_saved_kv() # Test MultiheadAttention with static kv.\n test_multihead_attn_add_bias_kv_zero_attn() # Test MultiheadAttention with bias_kv and zero_attn.\n test_multihead_attn_all_arguments1() # Test MultiheadAttention with all the argument.\n with self.assertRaisesRegex(AssertionError, \"bias cannot be added to static key.\"):\n test_multihead_attn_all_arguments2() # Test MultiheadAttention with all the argument.\n test_multihead_attn_all_arguments3() # Test MultiheadAttention with all the argument.\n test_multihead_attn_all_arguments4() # Test MultiheadAttention with all the argument.\n\n def test_multihead_attn_3d_attn_mask(self):\n embed_dim = 8\n num_heads = 4\n batch_size = 8\n src_len = 3\n tgt_len = 2\n\n query = torch.rand(batch_size, tgt_len, embed_dim) # [N, T, D]\n key = torch.rand(batch_size, src_len, embed_dim) # [N, S, D]\n value = key # [N, S, D]\n attn_mask = torch.randint(0, 2, (batch_size, tgt_len, src_len)).float() # [N, T, S]\n attn_mask = attn_mask.masked_fill(attn_mask == 0, float('-inf')).masked_fill(attn_mask == 1, float(0.0))\n\n mta_model = torch.nn.MultiheadAttention(embed_dim, num_heads)\n\n # Generate 3D results\n attn_mask_3d = torch.repeat_interleave(attn_mask, num_heads, dim=0) # [N * H, T, S]\n output_3d = mta_model(query.transpose(0, 1), key.transpose(0, 1), value.transpose(0, 1), attn_mask=attn_mask_3d)[0]\n output_3d = output_3d.transpose(0, 1) # [N, T, D]\n\n for i in range(0, batch_size):\n output_2d = mta_model(query[i].unsqueeze(0).transpose(0, 1),\n key[i].unsqueeze(0).transpose(0, 1),\n value[i].unsqueeze(0).transpose(0, 1),\n attn_mask=attn_mask[i])[0]\n\n # output_2d in shape of [T, 1, D]\n self.assertEqual(output_3d[i].unsqueeze(0).transpose(0, 1), output_2d)\n\n def test_multihead_attn_no_bias(self):\n embed_dim = 8\n num_heads = 4\n mha = torch.nn.MultiheadAttention(embed_dim, num_heads, bias=False)\n\n # Verify that bias=False applies to both in and out projection layers.\n self.assertIsNone(mha.in_proj_bias)\n self.assertIsNone(mha.out_proj.bias)\n\n def _test_multihead_attn_invalid_shape_impl(self, mha):\n # Batched (3D) query cases\n query = torch.randn(3, 3, 3)\n key = torch.randn(3, 3, 3)\n value = torch.randn(3, 3, 3)\n\n msg = \"expected `key` and `value` to be 3-D but found 2-D and 3-D tensors respectively\"\n # 3D query, 2D key and 3D value\n with self.assertRaisesRegex(AssertionError, msg):\n mha(query, torch.randn(3, 3), value)\n\n msg = \"expected `key` and `value` to be 3-D but found 3-D and 2-D tensors respectively\"\n # 3D query, 3D key and 2D value\n with self.assertRaisesRegex(AssertionError, msg):\n mha(query, key, torch.randn(3, 3))\n\n msg = \"expected `key_padding_mask` to be `None` or 2-D but found 1-D tensor instead\"\n # 3D query, 3D key, 3D value and 1D key_padding_mask\n with self.assertRaisesRegex(AssertionError, msg):\n mha(query, key, value, key_padding_mask=torch.tensor([False, True, True], dtype=torch.bool))\n\n msg = \"expected `attn_mask` to be `None`, 2-D or 3-D but found 1-D tensor instead\"\n # 3D query, 3D key, 3D value and 1D attn_mask\n with self.assertRaisesRegex(AssertionError, msg):\n mha(query, key, value, attn_mask=torch.tensor([False, True, True], dtype=torch.bool))\n\n # Unbatched (2D) query cases\n query = torch.randn(3, 3)\n key = torch.randn(3, 3)\n value = torch.randn(3, 3)\n\n msg = \"expected `key` and `value` to be 2-D but found 3-D and 2-D tensors respectively\"\n # 2D query, 3D key and 2D value\n with self.assertRaisesRegex(AssertionError, msg):\n mha(query, torch.randn(3, 3, 3), value)\n\n msg = \"expected `key` and `value` to be 2-D but found 2-D and 3-D tensors respectively\"\n # 2D query, 3D key and 2D value\n with self.assertRaisesRegex(AssertionError, msg):\n mha(query, key, torch.randn(3, 3, 3))\n\n msg = \"expected `key_padding_mask` to be `None` or 1-D but found 2-D tensor instead\"\n # 2D query, 2D key, 2D value and 1D key_padding_mask\n with self.assertRaisesRegex(AssertionError, msg):\n mha(query, key, value, key_padding_mask=torch.tensor([[False, True, True] * 2], dtype=torch.bool))\n\n msg = \"expected `attn_mask` to be `None`, 2-D or 3-D but found 1-D tensor instead\"\n # 2D query, 2D key, 2D value and 1D attn_mask\n with self.assertRaisesRegex(AssertionError, msg):\n mha(query, key, value, attn_mask=torch.tensor([False, True, True], dtype=torch.bool))\n\n msg = r\"Expected `attn_mask` shape to be \\(3, 3, 3\\)\"\n # 2D query, 2D key, 2D value and 3D incorrect attn_mask\n with self.assertRaisesRegex(AssertionError, msg):\n mha(query, key, value, attn_mask=torch.randn(4, 3, 3).bernoulli_().to(torch.bool))\n\n def test_multihead_attn_invalid_shape(self):\n mha = torch.nn.MultiheadAttention(3, 3)\n self._test_multihead_attn_invalid_shape_impl(mha)\n # Give the test a chance to hit the fast path. (Right now, it\n # won't, but gating may be less restricted in the future.)\n with torch.no_grad():\n self._test_multihead_attn_invalid_shape_impl(mha.eval())\n\n @torch.no_grad()\n def test_multihead_attn_fast_path_invalid_shape(self):\n mha = torch.nn.MultiheadAttention(3, 3, batch_first=True).eval()\n\n # Batched (3D) query cases\n query = torch.randn(3, 3, 3)\n key = torch.randn(3, 3, 3)\n value = torch.randn(3, 3, 3)\n\n # Currently, this case will just go to the slow path and get\n # the usual message because it fails the requirement to be\n # batched.\n msg = \"expected `key` and `value` to be 3-D but found 2-D and 3-D tensors respectively\"\n # 3D query, 2D key and 3D value\n with self.assertRaisesRegex(AssertionError, msg):\n mha(query, torch.randn(3, 3), value, need_weights=False)\n\n # Currently, this case will just go to the slow path and get\n # the usual message because it fails the requirement to be\n # batched.\n msg = \"expected `key` and `value` to be 3-D but found 3-D and 2-D tensors respectively\"\n # 3D query, 3D key and 2D value\n with self.assertRaisesRegex(AssertionError, msg):\n mha(query, key, torch.randn(3, 3), need_weights=False)\n\n msg = \"expected `key_padding_mask` to be `None` or 2-D but found 1-D tensor instead\"\n # 3D query, 3D key, 3D value and 1D key_padding_mask\n with self.assertRaisesRegex(AssertionError, msg):\n mha(query, key, value, key_padding_mask=torch.tensor([False, True, True], dtype=torch.bool), need_weights=False)\n\n msg = \"expected `attn_mask` to be `None`, 2-D or 3-D but found 1-D tensor instead\"\n # 3D query, 3D key, 3D value and 1D attn_mask\n with self.assertRaisesRegex(AssertionError, msg):\n mha(query, key, value, attn_mask=torch.tensor([False, True, True], dtype=torch.bool), need_weights=False)\n\n # Unbatched (2D) query cases\n # NOTE: error messages are the same as regular path because the fast path doesn't support 2D.\n query = torch.randn(3, 3)\n key = torch.randn(3, 3)\n value = torch.randn(3, 3)\n\n msg = \"expected `key` and `value` to be 2-D but found 3-D and 2-D tensors respectively\"\n # 2D query, 3D key and 2D value\n with self.assertRaisesRegex(AssertionError, msg):\n mha(query, torch.randn(3, 3, 3), value)\n\n msg = \"expected `key` and `value` to be 2-D but found 2-D and 3-D tensors respectively\"\n # 2D query, 3D key and 2D value\n with self.assertRaisesRegex(AssertionError, msg):\n mha(query, key, torch.randn(3, 3, 3))\n\n msg = \"expected `key_padding_mask` to be `None` or 1-D but found 2-D tensor instead\"\n # 2D query, 2D key, 2D value and 1D key_padding_mask\n with self.assertRaisesRegex(AssertionError, msg):\n mha(query, key, value, key_padding_mask=torch.tensor([[False, True, True] * 2], dtype=torch.bool))\n\n msg = \"expected `attn_mask` to be `None`, 2-D or 3-D but found 1-D tensor instead\"\n # 2D query, 2D key, 2D value and 1D attn_mask\n with self.assertRaisesRegex(AssertionError, msg):\n mha(query, key, value, attn_mask=torch.tensor([False, True, True], dtype=torch.bool))\n\n msg = r\"Expected `attn_mask` shape to be \\(3, 3, 3\\)\"\n # 2D query, 2D key, 2D value and 3D incorrect attn_mask\n with self.assertRaisesRegex(AssertionError, msg):\n mha(query, key, value, attn_mask=torch.randn(4, 3, 3).bernoulli_().to(torch.bool))\n\n def test_multihead_attn_nested_tensor_outside_fast_path(self):\n mha = torch.nn.MultiheadAttention(3, 3, batch_first=True).eval()\n nt = torch.nested_tensor([torch.randn(3, 3)])\n # One tested platform (linux-bionic-py3.7-clang) has a torch_function for one\n # or more of these. Take advantage of that to test the torch_function bailout.\n has_torch_func = torch.overrides.has_torch_function(\n (nt, mha.in_proj_weight, mha.in_proj_bias, mha.out_proj.weight, mha.out_proj.bias))\n if has_torch_func:\n msg = \"MultiheadAttention does not support NestedTensor.*argument has_torch_function\"\n else:\n msg = (\"MultiheadAttention does not support NestedTensor outside of its fast path.*grad is \" +\n \"enabled and.*or biases requires_grad\")\n with self.assertRaisesRegex(AssertionError, msg):\n mha(nt, nt, nt)\n\n if has_torch_func:\n # Just give up, they're all going to fail with the same message.\n return\n\n with torch.no_grad():\n mha(nt, nt, nt)\n with torch.inference_mode():\n mha(nt, nt, nt)\n nt = torch.nested_tensor([torch.randn(3, 3, requires_grad=False)])\n nt.requires_grad = False\n with self.assertRaisesRegex(AssertionError, msg):\n mha(nt, nt, nt)\n mha.in_proj_weight.requires_grad = False\n mha.in_proj_bias.requires_grad = False\n mha.out_proj.weight.requires_grad = False\n mha.out_proj.bias.requires_grad = False\n mha(nt, nt, nt)\n\n def test_normalize(self):\n inputs = torch.randn(1, 3, 4, 4, requires_grad=True)\n self.assertTrue(gradcheck(lambda x: F.normalize(x, p=1, dim=-1), (inputs,)))\n self.assertTrue(gradcheck(lambda x: F.normalize(x, p=2, dim=-2), (inputs,)))\n\n inputs = torch.randn((), requires_grad=True)\n self.assertTrue(gradcheck(lambda x: F.normalize(x, p=1, dim=-1), (inputs,)))\n\n def test_adaptive_pooling_input_size(self):\n for numel in (2, 3):\n for pool_type in ('Max', 'Avg'):\n cls_name = 'Adaptive{}Pool{}d'.format(pool_type, numel)\n module_cls = getattr(nn, cls_name)\n output_size = (2,) * numel\n module = module_cls(output_size)\n\n input = torch.randn(output_size)\n self.assertRaises(ValueError, lambda: module(input))\n\n def test_adaptive_pooling_size_none(self):\n for numel in (2, 3):\n for pool_type in ('Max', 'Avg'):\n cls_name = 'Adaptive{}Pool{}d'.format(pool_type, numel)\n module_cls = getattr(nn, cls_name)\n output_size = (2,) * (numel - 1) + (None,)\n module = module_cls(output_size)\n\n input = torch.randn((4,) * (numel + 1))\n output = module(input)\n self.assertEqual(output.size(), (4,) + (2,) * (numel - 1) + (4,))\n\n @unittest.skipIf(TEST_WITH_UBSAN, \"signed integer overflow error with UBSAN\")\n def test_adaptive_pooling_size_overflow(self):\n # 0x0x3fffffffffffffff * 2 * 2 = 0xfffffffffffffffc = -4 as int64_t\n # Tensor::numel() return int64_t, so following check that negative allocs are correctly handled\n self.assertRaises(\n RuntimeError,\n lambda: torch.nn.AdaptiveMaxPool1d(0x3fffffffffffffff)(torch.empty([2, 2, 2])))\n\n def test_adaptive_pooling_avg_nhwc(self):\n device_list = ['cpu']\n if TEST_CUDA:\n device_list.append('cuda')\n\n for device in device_list:\n input = torch.randint(1, 10, (4, 8, 8, 8), dtype=torch.float32).to(device)\n input = input.contiguous(memory_format=torch.channels_last).requires_grad_()\n grad = torch.randint(1, 10, (4, 8, 7, 7), dtype=torch.float32).to(device)\n pool = torch.nn.AdaptiveAvgPool2d((7, 7)).to(device)\n\n ref_input = input.detach().clone().contiguous().requires_grad_(True)\n ref_grad = grad.detach().clone().contiguous()\n ref_pool = torch.nn.AdaptiveAvgPool2d((7, 7)).to(device)\n\n out = pool(input)\n out.backward(grad)\n ref_out = ref_pool(ref_input)\n ref_out.backward(ref_grad)\n\n self.assertTrue(out.is_contiguous(memory_format=torch.channels_last))\n self.assertTrue(ref_out.is_contiguous())\n self.assertEqual(out, ref_out)\n self.assertEqual(input.grad, ref_input.grad)\n\n def test_adaptive_pooling_avg_nhwc_non_contiguous(self):\n device_list = ['cpu']\n if TEST_CUDA:\n device_list.append('cuda')\n\n for device in device_list:\n input = torch.randint(1, 10, (4, 8, 8, 8), dtype=torch.float32).to(device)\n input = input.contiguous(memory_format=torch.channels_last)\n input = input[:, ::2, :, :].requires_grad_()\n grad = torch.randint(1, 10, (4, 8, 7, 7), dtype=torch.float32).to(device)\n grad = grad[:, ::2, :, :]\n pool = torch.nn.AdaptiveAvgPool2d((7, 7)).to(device)\n\n ref_input = input.detach().clone().contiguous().requires_grad_(True)\n ref_grad = grad.detach().clone().contiguous()\n ref_pool = torch.nn.AdaptiveAvgPool2d((7, 7)).to(device)\n\n out = pool(input)\n out.backward(grad)\n ref_out = ref_pool(ref_input)\n ref_out.backward(ref_grad)\n\n self.assertTrue(out.is_contiguous(memory_format=torch.channels_last))\n self.assertTrue(ref_out.is_contiguous())\n self.assertEqual(out, ref_out)\n self.assertEqual(input.grad, ref_input.grad)\n\n def test_adaptive_pooling_bfloat16(self):\n def _test_adaptive_pooling_bfloat16(self, device, mod, memory_format):\n input = torch.randint(1, 10, (3, 19, 8, 8), dtype=torch.float32)\n input = input.to(device).to(memory_format=memory_format).requires_grad_()\n pool = mod((7, 7)).to(device)\n\n input2 = input.detach().clone().bfloat16().requires_grad_(True)\n\n out = pool(input)\n out.sum().backward()\n out2 = pool(input2)\n out2.sum().backward()\n\n self.assertTrue(out2.is_contiguous(memory_format=memory_format))\n self.assertEqual(out2.dtype, torch.bfloat16)\n self.assertEqual(input2.grad.dtype, torch.bfloat16)\n self.assertEqual(out, out2.float(), atol=0.1, rtol=0)\n self.assertEqual(input.grad, input2.grad.float(), atol=0.1, rtol=0)\n\n device_list = ['cpu']\n for device in device_list:\n _test_adaptive_pooling_bfloat16(self, device, torch.nn.AdaptiveAvgPool2d, torch.contiguous_format)\n _test_adaptive_pooling_bfloat16(self, device, torch.nn.AdaptiveAvgPool2d, torch.channels_last)\n _test_adaptive_pooling_bfloat16(self, device, torch.nn.AdaptiveMaxPool2d, torch.contiguous_format)\n _test_adaptive_pooling_bfloat16(self, device, torch.nn.AdaptiveMaxPool2d, torch.channels_last)\n\n @unittest.skipIf(not TEST_CUDA, \"CUDA unavailable\")\n @largeTensorTest('12GB', device='cuda')\n def test_adaptive_pooling_avg_nhwc_launch_config_backward(self):\n input = torch.randint(1, 10, (1, 32, 2 ** 17 + 1, 32), dtype=torch.float32, device=\"cuda\")\n input = input.contiguous(memory_format=torch.channels_last).requires_grad_()\n grad = torch.randint(1, 10, (1, 32, 10, 32), dtype=torch.float32, device=\"cuda\")\n\n pool = torch.nn.AdaptiveAvgPool2d((10, 32)).cuda()\n\n ref_input = input.detach().clone().contiguous().requires_grad_(True)\n ref_grad = grad.detach().clone().contiguous()\n ref_pool = torch.nn.AdaptiveAvgPool2d((10, 32)).cuda()\n\n out = pool(input)\n out.backward(grad)\n ref_out = ref_pool(ref_input)\n ref_out.backward(ref_grad)\n\n self.assertTrue(out.is_contiguous(memory_format=torch.channels_last))\n self.assertTrue(ref_out.is_contiguous())\n self.assertEqual(out, ref_out)\n self.assertEqual(input.grad, ref_input.grad)\n\n @unittest.skipIf(not TEST_CUDA, \"CUDA unavailable\")\n @largeTensorTest('12GB', device='cuda')\n def test_adaptive_pooling_avg_nhwc_launch_config_forward(self):\n input = torch.randint(1, 10, (1, 32, 16, 16), dtype=torch.float32, device=\"cuda\")\n input = input.contiguous(memory_format=torch.channels_last).requires_grad_()\n pool = torch.nn.AdaptiveAvgPool2d((2 ** 17 + 1, 32)).cuda()\n\n ref_input = input.detach().clone().contiguous().requires_grad_(True)\n ref_pool = torch.nn.AdaptiveAvgPool2d((2 ** 17 + 1, 32)).cuda()\n\n out = pool(input)\n ref_out = ref_pool(ref_input)\n\n self.assertTrue(out.is_contiguous(memory_format=torch.channels_last))\n self.assertTrue(ref_out.is_contiguous())\n self.assertEqual(out, ref_out)\n\n @unittest.skipIf(not TEST_MULTIGPU, \"multi-GPU not supported\")\n # Skip the test for ROCm as per https://github.com/pytorch/pytorch/issues/53190\n @skipIfRocm\n def test_broadcast_double_backwards_gpu(self):\n tensors = (torch.randn(4, 4, device='cuda', requires_grad=True),\n torch.randn(4, 4, device='cuda', requires_grad=True),\n torch.randn(4, 4, device='cuda', requires_grad=True))\n # TODO(#50743): the following segfaults with check_batched_grad=True\n _assertGradAndGradgradChecks(self, lambda *i: Broadcast.apply((0, 1), *i), tensors,\n check_batched_grad=False)\n\n @unittest.skipIf(not TEST_MULTIGPU, \"multi-GPU not supported\")\n def test_broadcast_not_requiring_grad(self):\n variables = [\n torch.randn(1, 2, device='cuda', requires_grad=True),\n torch.randn(1, 2, device='cuda', requires_grad=False),\n torch.randn(1, 2, device='cuda', requires_grad=False),\n torch.randn(1, 2, device='cuda', requires_grad=True),\n torch.randn(1, 2, device='cuda', requires_grad=True),\n ]\n broadcasted_variables = Broadcast.apply((0, 1), *variables)\n for output_idx, broadcasted_var in enumerate(broadcasted_variables):\n input_var = variables[output_idx % len(variables)]\n self.assertEqual(input_var.requires_grad, broadcasted_var.requires_grad)\n\n @unittest.skipIf(not TEST_MULTIGPU, \"multi-GPU not supported\")\n def test_broadcast_no_grad(self):\n x = torch.randn(1, 2, dtype=torch.float32, requires_grad=True, device='cuda')\n with torch.no_grad():\n broadcasted = Broadcast.apply((0, 1), x)\n self.assertTrue(x.requires_grad)\n for output in broadcasted:\n self.assertFalse(output.requires_grad)\n\n def test_state_dict(self):\n l = nn.Linear(5, 5)\n block = nn.Module()\n block.conv = nn.Conv2d(3, 3, 3, bias=False)\n net = nn.Module()\n net.linear1 = l\n net.linear2 = l\n net.bn = nn.BatchNorm2d(2)\n net.block = block\n net.add_module('empty', None)\n\n state_dict = net.state_dict()\n self.assertEqual(len(state_dict), 10)\n self.assertEqual(len(state_dict._metadata), 6)\n self.assertIn('', state_dict._metadata)\n self.assertIn('linear1', state_dict._metadata)\n self.assertIn('linear1.weight', state_dict)\n self.assertIn('linear1.bias', state_dict)\n self.assertIn('linear2', state_dict._metadata)\n self.assertIn('linear2.weight', state_dict)\n self.assertIn('linear2.bias', state_dict)\n self.assertIn('block', state_dict._metadata)\n self.assertIn('block.conv', state_dict._metadata)\n self.assertIn('block.conv.weight', state_dict)\n self.assertIn('block.conv.weight', state_dict)\n self.assertNotIn('block.conv.bias', state_dict)\n self.assertIn('bn', state_dict._metadata)\n self.assertIn('bn.weight', state_dict)\n self.assertIn('bn.bias', state_dict)\n self.assertIn('bn.running_var', state_dict)\n self.assertIn('bn.running_mean', state_dict)\n self.assertIn('bn.num_batches_tracked', state_dict)\n self.assertFalse(any(k.startswith('empty') for k in state_dict.keys()))\n for k, v in state_dict.items():\n param = net\n for component in k.split('.'):\n param = getattr(param, component)\n if isinstance(param, Parameter):\n param = param.data\n self.assertEqual(v.data_ptr(), param.data_ptr())\n\n l = nn.Linear(5, 5)\n state_dict = l.state_dict()\n self.assertEqual(len(state_dict), 2)\n self.assertEqual(len(state_dict._metadata), 1)\n self.assertIn('', state_dict._metadata)\n self.assertTrue(state_dict._metadata['']['version'] >= 0)\n self.assertEqual(state_dict['weight'].data_ptr(), l.weight.data_ptr())\n self.assertEqual(state_dict['bias'].data_ptr(), l.bias.data_ptr())\n\n # Reference https://github.com/pytorch/pytorch/pull/75507#issuecomment-1110291545\n self.assertNotWarn(lambda: l.state_dict(destination=dict()), \"Should not warn kwarg destination w/o _metadata\")\n\n def test_load_state_dict(self):\n l = nn.Linear(5, 5)\n block = nn.Module()\n block.conv1 = nn.Conv2d(3, 3, 3, bias=True)\n block.conv2 = nn.Conv2d(3, 3, 3, bias=False)\n net = nn.Module()\n net.linear1 = l\n net.linear2 = l\n net.bn = nn.BatchNorm2d(2)\n net.block = block\n net.add_module('empty', None)\n conv1_bias_dtype = block.conv1.bias.dtype\n\n state_dict = net.state_dict()\n state_dict.update({\n 'linear1.weight': torch.ones(5, 5),\n 'block.conv1.bias': torch.arange(1, 4, dtype=conv1_bias_dtype),\n 'bn.running_mean': torch.randn(2),\n })\n # Also test if a DDP state_dict can be loaded from a local model.\n ddp_state_dict = net.state_dict()\n ddp_state_dict.update({\n 'module.linear1.weight': torch.ones(5, 5),\n 'module.block.conv1.bias': torch.arange(1, 4, dtype=conv1_bias_dtype),\n 'module.bn.running_mean': torch.randn(2),\n })\n torch.nn.modules.utils.consume_prefix_in_state_dict_if_present(ddp_state_dict, 'module.')\n for sd in [state_dict, ddp_state_dict]:\n incompatible_keys = net.load_state_dict(sd)\n self.assertEqual(len(incompatible_keys.missing_keys), 0)\n self.assertEqual(len(incompatible_keys.unexpected_keys), 0)\n self.assertNotIn('Incompatible', str(incompatible_keys))\n self.assertEqual(net.linear1.weight, sd['linear1.weight'])\n self.assertEqual(net.block.conv1.bias, sd['block.conv1.bias'])\n self.assertEqual(net.bn.running_mean, sd['bn.running_mean'])\n\n state_dict = net.state_dict()\n state_dict.update({'extra': torch.ones(5)})\n self.assertRaises(RuntimeError, lambda: net.load_state_dict(state_dict))\n incompatible_keys = net.load_state_dict(state_dict, strict=False)\n self.assertEqual(len(incompatible_keys.missing_keys), 0)\n self.assertEqual(len(incompatible_keys.unexpected_keys), 1)\n self.assertIn('extra', incompatible_keys.unexpected_keys)\n self.assertIn('Incompatible', str(incompatible_keys))\n\n state_dict = net.state_dict()\n state_dict.update({'extra.param': torch.ones(5)})\n self.assertRaises(RuntimeError, lambda: net.load_state_dict(state_dict))\n incompatible_keys = net.load_state_dict(state_dict, strict=False)\n self.assertEqual(len(incompatible_keys.missing_keys), 0)\n self.assertEqual(len(incompatible_keys.unexpected_keys), 1)\n self.assertIn('extra.param', incompatible_keys.unexpected_keys)\n\n state_dict = net.state_dict()\n del state_dict['linear1.weight']\n self.assertRaises(RuntimeError, lambda: net.load_state_dict(state_dict))\n incompatible_keys = net.load_state_dict(state_dict, strict=False)\n self.assertEqual(len(incompatible_keys.missing_keys), 1)\n self.assertEqual(len(incompatible_keys.unexpected_keys), 0)\n self.assertIn('linear1.weight', incompatible_keys.missing_keys)\n state_dict.update({'extra.param': torch.ones(5)})\n self.assertRaises(RuntimeError, lambda: net.load_state_dict(state_dict))\n incompatible_keys = net.load_state_dict(state_dict, strict=False)\n self.assertEqual(len(incompatible_keys.missing_keys), 1)\n self.assertEqual(len(incompatible_keys.unexpected_keys), 1)\n self.assertIn('linear1.weight', incompatible_keys.missing_keys)\n self.assertIn('extra.param', incompatible_keys.unexpected_keys)\n\n state_dict = net.state_dict()\n state_dict.update({'bn.running_mean': torch.rand(14, 4)}) # wrong size\n self.assertRaises(RuntimeError, lambda: net.load_state_dict(state_dict))\n self.assertRaises(RuntimeError, lambda: net.load_state_dict(state_dict, strict=False))\n\n state_dict = net.state_dict()\n old_state_dict = deepcopy(state_dict)\n state_dict = {\n 'linear1.weight': torch.ones(5, 5),\n 'block.conv1.bias': torch.arange(1, 4, dtype=conv1_bias_dtype),\n 'bn.running_mean': torch.randn(2),\n 'nonexistent_key': torch.rand(3)\n }\n net.load_state_dict(state_dict, strict=False)\n self.assertEqual(net.linear1.weight, state_dict['linear1.weight'])\n self.assertEqual(net.block.conv1.bias, state_dict['block.conv1.bias'])\n self.assertEqual(net.bn.running_mean, state_dict['bn.running_mean'])\n new_state_dict = net.state_dict()\n del old_state_dict['linear1.weight']\n del old_state_dict['block.conv1.bias']\n del old_state_dict['bn.running_mean']\n for k, v, in old_state_dict.items():\n self.assertTrue(v.equal(new_state_dict[k]))\n\n def test_load_state_dict_BC(self):\n # BatchNormNd\n # Added num_batches_tracked buffer at version 2. For state dict with\n # earlier versions or no versions, it should provide default value of 0.\n bn = nn.BatchNorm2d(3)\n state_dict = bn.state_dict()\n del state_dict['num_batches_tracked']\n state_dict._metadata['']['version'] = 1 # version 1\n bn.load_state_dict(state_dict)\n self.assertEqual(bn.num_batches_tracked.dtype, torch.long)\n self.assertEqual(bn.num_batches_tracked.item(), 0)\n del state_dict._metadata['']['version'] # no version\n bn.load_state_dict(state_dict)\n self.assertEqual(bn.num_batches_tracked.dtype, torch.long)\n self.assertEqual(bn.num_batches_tracked.item(), 0)\n\n def test_load_state_dict_ref_cycle(self):\n # load_state_dict shouldn't cause a reference cycle involving Tensors\n import gc\n\n m = torch.nn.LSTM(16, 16, bidirectional=True)\n\n gc.collect()\n m.load_state_dict(deepcopy(m).state_dict())\n refcycles = gc.collect()\n\n self.assertEqual(refcycles, 0)\n\n def test_load_state_dict_custom(self):\n\n class CustomState(nn.Module):\n def __init__(self):\n super(CustomState, self).__init__()\n self.param = torch.nn.Parameter(torch.ones(1))\n self.sub = torch.nn.Linear(5, 5)\n\n def _save_to_state_dict(self, destination, prefix, keep_vars):\n destination[prefix + \"serialized\"] = self.param.data + 1\n\n def _load_from_state_dict(self, state_dict, prefix, local_metadata,\n strict, missing_keys, unexpected_keys,\n error_msgs):\n # skip some of the error handling\n self.param.data.copy_(state_dict[prefix + \"serialized\"] - 1)\n\n # use sequential to verify nesting\n m = nn.Sequential(CustomState())\n with torch.no_grad():\n m[0].param[0] = 10\n m[0].sub.weight[0, 0] = 555\n state_dict = m.state_dict()\n self.assertEqual(state_dict[\"0.serialized\"].item(), 11)\n self.assertIn(\"0.sub.weight\", state_dict)\n self.assertNotIn(\"0.param\", state_dict)\n del m\n mm = nn.Sequential(CustomState())\n self.assertEqual(mm[0].param[0].item(), 1)\n mm.load_state_dict(state_dict)\n self.assertEqual(mm[0].param[0].item(), 10)\n self.assertEqual(mm[0].sub.weight[0, 0].item(), 555)\n\n def test_extra_state(self):\n\n class SubModule(torch.nn.Module):\n def __init__(self, foo):\n super().__init__()\n self.foo = foo\n\n def get_extra_state(self):\n return {\n 'foo': self.foo\n }\n\n def set_extra_state(self, state):\n self.foo = state['foo']\n\n class MyModule(torch.nn.Module):\n def __init__(self, foo, bar):\n super().__init__()\n self.sub = SubModule(foo)\n self.bar = bar\n\n def get_extra_state(self):\n return {\n 'bar': self.bar\n }\n\n def set_extra_state(self, state):\n self.bar = state['bar']\n\n # Ensure state_dict contains the extra state by loading it into another module.\n m = MyModule(3, 'something')\n m2 = MyModule(5, 'something else')\n m2.load_state_dict(m.state_dict())\n self.assertEqual(m.state_dict(), m2.state_dict())\n self.assertEqual(m2.bar, m.bar)\n self.assertEqual(m2.sub.foo, m.sub.foo)\n\n def test_extra_state_non_dict(self):\n\n class MyModule(torch.nn.Module):\n def __init__(self, foo):\n super().__init__()\n self.foo = foo\n\n def get_extra_state(self):\n return self.foo\n\n def set_extra_state(self, state):\n self.foo = state\n\n # Test various types of extra state.\n for state in ('something', 5, MyModule(3)):\n m = MyModule(state)\n m2 = MyModule('something else')\n m2.load_state_dict(m.state_dict())\n self.assertEqual(m.state_dict(), m2.state_dict())\n self.assertEqual(m.foo, m2.foo)\n\n def test_extra_state_missing_set_extra_state(self):\n\n class MyModule(torch.nn.Module):\n def __init__(self):\n super().__init__()\n\n def get_extra_state(self):\n return {\n 'foo': 5\n }\n\n m = MyModule()\n with self.assertRaisesRegex(RuntimeError, 'Unexpected key'):\n m.load_state_dict(m.state_dict())\n\n def test_extra_state_missing_get_extra_state(self):\n\n class MyModule(torch.nn.Module):\n def __init__(self):\n super().__init__()\n\n def set_extra_state(self):\n pass\n\n m = MyModule()\n with self.assertRaisesRegex(RuntimeError, 'Missing key'):\n m.load_state_dict(m.state_dict())\n\n def test_parameter_assignment(self):\n l = nn.Linear(5, 5)\n\n def num_params():\n return len(list(l.parameters()))\n\n self.assertEqual(num_params(), 2)\n\n new_param = Parameter(torch.randn(5, 5))\n l.param_name = new_param\n self.assertEqual(num_params(), 3)\n self.assertObjectIn(new_param, l.parameters())\n\n var = torch.randn(5, 5)\n l.var_name = var\n self.assertEqual(num_params(), 3)\n self.assertNotIn(id(var), map(id, l.parameters()))\n\n # Make sure Variables are not saved as parameters\n l.variable_attr = torch.empty(5, 5)\n self.assertEqual(num_params(), 3)\n l.param_attr = Parameter(torch.empty(5, 5))\n self.assertEqual(num_params(), 4)\n\n # It shouldn't be possible to replace a parameter with a Variable\n def assign_var():\n l.param_attr = torch.empty(5, 5)\n\n self.assertRaises(TypeError, assign_var)\n # But replacing it with None should be fine\n l.param_attr = None\n self.assertEqual(num_params(), 3)\n\n def test_assignment(self):\n l = nn.Module()\n a = nn.Parameter(torch.randn(2))\n b = nn.Parameter(torch.randn(3))\n c = nn.Parameter(torch.randn(4))\n q = nn.Linear(4, 4)\n r = nn.Linear(5, 5)\n w = nn.Linear(6, 6)\n\n def test_assignments(get_list, a, b, c):\n # Check that None can be shadowed\n l.a = None\n self.assertIsNone(l.a)\n self.assertIn('a', l.__dict__)\n l.a = a\n self.assertIs(l.a, a)\n self.assertEqual(get_list(), [a])\n self.assertNotIn('a', l.__dict__)\n\n # Assign second object\n l.b = None\n self.assertIsNone(l.b)\n self.assertIn('b', l.__dict__)\n l.b = b\n self.assertIs(l.b, b)\n self.assertEqual(get_list(), [a, b])\n self.assertNotIn('b', l.__dict__)\n\n # Remove and add the object back. Order should be unchanged.\n l.a = None\n self.assertIsNone(l.a)\n self.assertEqual(get_list(), [b])\n l.a = a\n self.assertIs(l.a, a)\n self.assertEqual(get_list(), [a, b])\n\n # Replace object with another one. Order should be unchanged.\n l.a = c\n self.assertIs(l.a, c)\n self.assertEqual(get_list(), [c, b])\n\n # Remove and reassign an attribute. It should appear at the end of the list now.\n del l.a\n self.assertFalse(hasattr(l, 'a'))\n l.a = a\n self.assertIs(l.a, a)\n self.assertEqual(get_list(), [b, a])\n\n test_assignments(lambda: list(l.parameters()), a, b, c)\n del l.a, l.b\n self.assertEqual(list(l.parameters()), [])\n\n test_assignments(lambda: list(l.children()), q, r, w)\n del l.a, l.b\n self.assertEqual(list(l.children()), [])\n\n buf = torch.randn(10)\n l.register_buffer('buf', buf)\n self.assertIs(l.buf, buf)\n l.buf = None\n self.assertIs(l.buf, None)\n self.assertNotIn('buf', l.__dict__) # should be stored in l._buffers\n l.buf = buf\n self.assertIn('buf', l.state_dict())\n self.assertEqual(l.state_dict()['buf'], buf)\n\n @unittest.skipIf(not TEST_CUDA, 'CUDA not available')\n def test_thnn_conv_strided_padded_dilated(self):\n for convfn, dims, transposed in (\n (torch.nn.functional.conv2d, 2, False),\n (torch.nn.functional.conv_transpose2d, 2, True),\n (torch.nn.functional.conv3d, 3, False),\n (torch.nn.functional.conv_transpose3d, 3, True)):\n for stride, padding, dilation in (\n (2, 0, 1), (1, 1, 1), (2, 1, 1), (1, 0, 2)):\n kwargs = {\"stride\": stride, \"padding\": padding, \"dilation\": dilation}\n inp_shape = (1, 2) + dims * (4,)\n weight_shape = (2, 2) + dims * (1,)\n inputs = torch.randn(inp_shape, dtype=torch.double, device=\"cuda\", requires_grad=True)\n weight = torch.randn(weight_shape, dtype=torch.double, device=\"cuda\", requires_grad=True)\n bias = torch.randn(2, dtype=torch.double, device=\"cuda\", requires_grad=True)\n with torch.backends.cudnn.flags(enabled=False):\n res = convfn(inputs, weight, bias, **kwargs)\n res_cpu = convfn(inputs.cpu(), weight.cpu(), bias.cpu(), **kwargs)\n self.assertEqual(res, res_cpu)\n with torch.backends.cudnn.flags(enabled=False):\n torch.autograd.gradcheck(\n lambda x, w, b: convfn(x, w, b, **kwargs),\n (inputs, weight, bias)\n )\n torch.autograd.gradcheck(\n lambda x, w, b: convfn(x, w, b, **kwargs),\n (inputs.cpu(), weight.cpu(), bias.cpu())\n )\n\n def test_Conv2d_inconsistent_types(self):\n inputs = torch.randn(4, 1, 7, 7, dtype=torch.float)\n weights = torch.randn(1, 1, 3, 3, dtype=torch.double)\n # inconsistent types should raise an exception\n self.assertRaises(RuntimeError, lambda: nn.functional.conv2d(inputs, weights))\n # but it should work with the same type\n nn.functional.conv2d(inputs.float(), weights.float())\n\n @unittest.skipIf(not TEST_CUDA, 'CUDA not available')\n def test_Conv2d_inconsistent_types_on_GPU_without_cudnn(self):\n inputs = torch.randn(4, 1, 7, 7, dtype=torch.float, device=\"cuda\")\n weights = torch.randn(1, 1, 3, 3, dtype=torch.double, device=\"cuda\")\n bias = torch.randn(1, dtype=torch.double, device=\"cuda\")\n\n with torch.backends.cudnn.flags(enabled=False):\n # inconsistent types should raise an exception\n self.assertRaises(RuntimeError, lambda: nn.functional.conv2d(inputs, weights))\n self.assertRaises(RuntimeError, lambda: nn.functional.conv2d(inputs, weights.float(), bias))\n\n # but it should work with the same type\n nn.functional.conv2d(inputs.float(), weights.float(), bias.float())\n\n def test_Conv2d_1x1(self):\n in_channels = 2\n out_channels = 2\n mod = torch.nn.Conv2d(2, 2, 1, bias=False).to(dtype=torch.double)\n input = torch.randn(1, in_channels, 5, 5, requires_grad=True, dtype=torch.double)\n for enabled in (False, True):\n with torch.backends.mkldnn.flags(enabled=enabled):\n gradcheck(F.conv2d, (input, mod.weight))\n\n def test_Conv2d_OneDNN(self):\n def run_once(group_val=24, dilation=1):\n ifm = torch.ones([1, group_val, 6, 6], dtype=torch.float32)\n weights = torch.ones([group_val, 1, 3, 3], dtype=torch.float32)\n op = torch.nn.Conv2d(\n in_channels=group_val,\n out_channels=group_val,\n kernel_size=[3, 3],\n stride=[2, 2],\n padding=[1, 1],\n dilation=[dilation, dilation],\n groups=group_val,\n bias=False,\n padding_mode='zeros'\n )\n\n op.weight.data = weights\n res = op(ifm)\n grad_in = torch.ones(res.shape, dtype=torch.float32)\n res.backward(grad_in)\n return op.weight.grad\n\n for gorup_val in (24, 48, 23, 25):\n for dilation in (1, 2):\n with torch.backends.mkldnn.flags(enabled=False):\n without_onednn = run_once(gorup_val, dilation)\n\n with torch.backends.mkldnn.flags(enabled=True):\n with_onednn = run_once(gorup_val, dilation)\n\n self.assertEqual(without_onednn, with_onednn)\n\n @unittest.skipIf(not TEST_CUDA, 'CUDA not available')\n @unittest.skipIf(not TEST_CUDNN, 'CUDNN not available')\n def test_cudnn_non_contiguous(self):\n x = torch.randn(192, 16, 50).cuda()\n x = x.permute(0, 2, 1).contiguous().permute(0, 2, 1)\n m = torch.nn.Conv1d(\n in_channels=16,\n out_channels=32,\n kernel_size=2,\n bias=True).cuda()\n result = m(x)\n\n @unittest.skipIf(not TEST_CUDA, 'CUDA not available')\n @unittest.skipIf(not TEST_CUDNN, 'CUDNN not available')\n def test_Conv2d_inconsistent_types_on_GPU_with_cudnn(self):\n inputs = torch.randn(4, 1, 7, 7, dtype=torch.float, device=\"cuda\")\n weights = torch.randn(1, 1, 3, 3, dtype=torch.double, device=\"cuda\")\n bias = torch.randn(1, dtype=torch.double, device=\"cuda\")\n\n with torch.backends.cudnn.flags(enabled=True):\n # inconsistent types should raise an exception\n self.assertRaises(RuntimeError, lambda: nn.functional.conv2d(inputs, weights))\n self.assertRaises(RuntimeError, lambda: nn.functional.conv2d(inputs, weights.float(), bias))\n\n # but it should work with the same type\n nn.functional.conv2d(inputs.float(), weights.float(), bias.float())\n\n def test_Conv2d_missing_argument(self):\n c = nn.Conv2d(3, 3, 3)\n self.assertRaises(TypeError, lambda: c(None))\n\n def test_Conv2d_backward_twice(self):\n input = torch.randn(2, 3, 5, 5)\n c = nn.Conv2d(3, 3, 3)\n o1 = c(input)\n o1.sum().backward()\n self.assertRaisesRegex(RuntimeError, 'Specify retain_graph=True',\n lambda: o1.sum().backward())\n\n\n def test_conv_modules_raise_error_on_incorrect_input_size(self):\n for dtype in [torch.bfloat16, torch.double, torch.float]:\n modules = [nn.Conv1d(3, 8, 3).to(dtype), nn.ConvTranspose1d(3, 8, 3).to(dtype),\n nn.Conv2d(3, 8, 3).to(dtype), nn.ConvTranspose2d(3, 8, 3).to(dtype),\n nn.Conv3d(3, 8, 3).to(dtype), nn.ConvTranspose3d(3, 8, 3).to(dtype)]\n\n invalid_input_dims = [(1, 4), (1, 4),\n (2, 5), (2, 5),\n (3, 6), (3, 6)]\n\n for invalid_dims, module in zip(invalid_input_dims, modules):\n for dims in invalid_dims:\n input = torch.empty(torch.Size((3, ) * dims))\n self.assertRaises(RuntimeError, lambda: module(input))\n\n def test_conv_shapecheck(self):\n def test(should_raise, module, input_size, dtype):\n input = torch.empty(3, *input_size).to(dtype)\n if should_raise:\n self.assertRaises(RuntimeError, lambda: module(input))\n else:\n # just run it to ensure no exception raised.\n module(input)\n\n for dtype in [torch.bfloat16, torch.float, torch.double, torch.cfloat, torch.cdouble]:\n # Conv1d\n test(True, nn.Conv1d(1, 1, 3).to(dtype), (1, 2), dtype)\n test(True, nn.Conv1d(1, 1, 3, stride=2).to(dtype), (1, 2), dtype)\n test(False, nn.Conv1d(1, 1, 2).to(dtype), (1, 2), dtype)\n test(False, nn.Conv1d(1, 1, 2, stride=2).to(dtype), (1, 2), dtype)\n test(False, nn.Conv1d(1, 1, 3, stride=2, padding=1).to(dtype), (1, 2), dtype)\n\n # Conv2d\n test(True, nn.Conv2d(1, 1, (3, 3)).to(dtype), (1, 2, 2), dtype)\n test(False, nn.Conv2d(1, 1, (3, 3)).to(dtype), (1, 3, 3), dtype)\n test(False, nn.Conv2d(1, 1, (3, 3), padding=1).to(dtype), (1, 2, 2), dtype)\n\n # Conv3D\n test(True, nn.Conv3d(1, 1, (3, 3, 3)).to(dtype), (1, 2, 2, 2), dtype)\n test(False, nn.Conv3d(1, 1, (3, 3, 3)).to(dtype), (1, 3, 3, 3), dtype)\n test(False, nn.Conv3d(1, 1, (3, 3, 3), padding=1).to(dtype), (1, 2, 2, 2), dtype)\n\n def test_ConvTranspose2d_output_size(self):\n m = nn.ConvTranspose2d(3, 4, 3, 3, 0, 2)\n i = torch.randn(2, 3, 6, 6)\n for h in range(15, 22):\n for w in range(15, 22):\n if 18 <= h <= 20 and 18 <= w <= 20:\n output = m(i, output_size=(h, w))\n self.assertEqual(output.size()[2:], (h, w))\n else:\n self.assertRaises(ValueError, lambda: m(i, (h, w)))\n\n def test_ConvTranspose2d_output_size_downsample_upsample(self):\n b, c, hid_c = 2, 3, 2\n for h in range(13, 24):\n for w in range(13, 17):\n for k in range(2, 5):\n for d in range(1, 5):\n for s in range(1, 4):\n for p in range(3):\n conv = nn.Conv2d(\n in_channels=c,\n out_channels=hid_c,\n kernel_size=k,\n stride=s,\n padding=p,\n dilation=d,\n )\n\n t_conv = nn.ConvTranspose2d(\n in_channels=hid_c,\n out_channels=c,\n kernel_size=k,\n stride=s,\n padding=p,\n dilation=d,\n )\n\n i = torch.randn(b, c, h, w)\n\n out = t_conv(conv(i), output_size=i.shape)\n\n self.assertEqual(out.size()[2:], i.size()[2:])\n\n def test_ConvTranspose3d_correct_output_size(self):\n # Check that ConvTranspose3d can take a 5d output_size.\n m = nn.ConvTranspose3d(2, 2, 2)\n i = torch.rand(1, 2, 1, 1, 1)\n out = m(i, output_size=(1, 2, 2, 2, 2))\n\n @unittest.skipIf(not TEST_CUDA, 'CUDA not available')\n def test_ConvTranspose2d_half_cublas_gemm(self):\n with torch.backends.cudnn.flags(enabled=False):\n inputs = torch.randn(1, 1, 16, 16, device='cuda', dtype=torch.half)\n deconv = nn.ConvTranspose2d(\n 1, 1, 3, stride=2, padding=1, output_padding=1).cuda().half()\n output = deconv(inputs)\n output.mean().backward()\n\n # For https://github.com/pytorch/pytorch/pull/1273\n # Almost identical to the above `test_Conv2d_naive_groups`\n def test_Conv2d_groups_nobias(self):\n dev_dtypes = [(\"cpu\", torch.float)]\n if TEST_CUDA:\n dev_dtypes += [(\"cuda\", torch.float), (\"cuda\", torch.half)]\n if AMPERE_OR_ROCM:\n dev_dtypes += [(\"cuda\", torch.bfloat16)]\n for device, dtype in dev_dtypes:\n m = nn.Conv2d(4, 4, kernel_size=3, groups=2, bias=False).to(device, dtype)\n i = torch.randn(2, 4, 6, 6, device=device, dtype=dtype, requires_grad=True)\n output = m(i)\n grad_output = torch.randn(2, 4, 4, 4, device=device, dtype=dtype)\n output.backward(grad_output)\n\n m1 = nn.Conv2d(2, 2, kernel_size=3, bias=False).to(device, dtype)\n m1.weight.data.copy_(m.weight.data[:2])\n i1 = i.data[:, :2].contiguous().requires_grad_(True)\n output1 = m1(i1)\n output1.backward(grad_output[:, :2].contiguous())\n\n m2 = nn.Conv2d(2, 2, kernel_size=3, bias=False).to(device, dtype)\n m2.weight.data.copy_(m.weight.data[2:])\n i2 = i.data[:, 2:].contiguous().requires_grad_(True)\n output2 = m2(i2)\n output2.backward(grad_output[:, 2:].contiguous())\n\n self.assertEqual(output, torch.cat([output1, output2], 1))\n self.assertEqual(i.grad.data,\n torch.cat([i1.grad.data, i2.grad.data], 1),\n atol=dtype2prec_DONTUSE[dtype], rtol=0)\n self.assertEqual(m.weight.grad.data,\n torch.cat([m1.weight.grad.data, m2.weight.grad.data], 0),\n atol=1e-1 if dtype == torch.half else dtype2prec_DONTUSE[dtype], rtol=0)\n\n # Almost identical to the above `test_Conv2d_naive_groups`\n # Covering special case when group > 1, input-channel / group < 16 and output-channel is multiple of 16\n # See also https://github.com/pytorch/pytorch/pull/18463#issuecomment-476563686\n # and https://github.com/pytorch/pytorch/pull/18463#issuecomment-477001024\n def test_Conv2d_groups_nobias_v2(self):\n torch.manual_seed(123)\n dev_dtypes = [(\"cpu\", torch.float)]\n if TEST_CUDA:\n dev_dtypes += [(\"cuda\", torch.float), (\"cuda\", torch.half)]\n if AMPERE_OR_ROCM:\n dev_dtypes += [(\"cuda\", torch.bfloat16)]\n for device, dtype in dev_dtypes:\n m = nn.Conv2d(4, 16, kernel_size=3, groups=2, bias=False).to(device, dtype)\n i = torch.randn(2, 4, 6, 6, device=device, dtype=dtype, requires_grad=True)\n output = m(i)\n grad_output = torch.randn(2, 16, 4, 4, device=device, dtype=dtype)\n output.backward(grad_output)\n\n m1 = nn.Conv2d(2, 8, kernel_size=3, bias=False).to(device, dtype)\n m1.weight.data.copy_(m.weight.data[:8])\n i1 = i.data[:, :2].contiguous().requires_grad_(True)\n output1 = m1(i1)\n output1.backward(grad_output[:, :8].contiguous())\n\n m2 = nn.Conv2d(2, 8, kernel_size=3, bias=False).to(device, dtype)\n m2.weight.data.copy_(m.weight.data[8:])\n i2 = i.data[:, 2:].contiguous().requires_grad_(True)\n output2 = m2(i2)\n output2.backward(grad_output[:, 8:].contiguous())\n\n self.assertEqual(output, torch.cat([output1, output2], 1))\n self.assertEqual(i.grad.data,\n torch.cat([i1.grad.data, i2.grad.data], 1),\n atol=dtype2prec_DONTUSE[dtype], rtol=0)\n self.assertEqual(m.weight.grad.data,\n torch.cat([m1.weight.grad.data, m2.weight.grad.data], 0),\n atol=1e-1 if dtype == torch.half else dtype2prec_DONTUSE[dtype], rtol=0)\n\n # CPU-only test for group conv3d fast implementation using bmm\n # See: https://github.com/pytorch/pytorch/pull/36355\n def test_Conv3d_groups_nobias(self):\n torch.manual_seed(123)\n m = nn.Conv3d(4, 16, kernel_size=3, groups=2, bias=False).to(\"cpu\", torch.float)\n i = torch.randn(2, 4, 6, 6, 6, device=\"cpu\", dtype=torch.float, requires_grad=True)\n output = m(i)\n grad_output = torch.randn(2, 16, 4, 4, 4, device=\"cpu\", dtype=torch.float)\n output.backward(grad_output)\n\n m1 = nn.Conv3d(2, 8, kernel_size=3, bias=False).to(\"cpu\", torch.float)\n m1.weight.data.copy_(m.weight.data[:8])\n i1 = i.data[:, :2].contiguous().requires_grad_(True)\n output1 = m1(i1)\n output1.backward(grad_output[:, :8].contiguous())\n\n m2 = nn.Conv3d(2, 8, kernel_size=3, bias=False).to(\"cpu\", torch.float)\n m2.weight.data.copy_(m.weight.data[8:])\n i2 = i.data[:, 2:].contiguous().requires_grad_(True)\n output2 = m2(i2)\n output2.backward(grad_output[:, 8:].contiguous())\n\n self.assertEqual(output, torch.cat([output1, output2], 1))\n self.assertEqual(i.grad.data,\n torch.cat([i1.grad.data, i2.grad.data], 1),\n atol=dtype2prec_DONTUSE[torch.float], rtol=0)\n self.assertEqual(m.weight.grad.data,\n torch.cat([m1.weight.grad.data, m2.weight.grad.data], 0),\n atol=dtype2prec_DONTUSE[torch.float], rtol=dtype2prec_DONTUSE[torch.float])\n\n def test_Conv3d_groups_wbias(self):\n torch.manual_seed(123)\n m = nn.Conv3d(4, 16, kernel_size=3, groups=2, bias=True).to(\"cpu\", torch.float)\n i = torch.randn(2, 4, 6, 6, 6, device=\"cpu\", dtype=torch.float, requires_grad=True)\n output = m(i)\n grad_output = torch.randn(2, 16, 4, 4, 4, device=\"cpu\", dtype=torch.float)\n output.backward(grad_output)\n\n m1 = nn.Conv3d(2, 8, kernel_size=3, bias=True).to(\"cpu\", torch.float)\n m1.weight.data.copy_(m.weight.data[:8])\n m1.bias.data.copy_(m.bias.data[:8])\n i1 = i.data[:, :2].contiguous().requires_grad_(True)\n output1 = m1(i1)\n output1.backward(grad_output[:, :8].contiguous())\n\n m2 = nn.Conv3d(2, 8, kernel_size=3, bias=True).to(\"cpu\", torch.float)\n m2.weight.data.copy_(m.weight.data[8:])\n m2.bias.data.copy_(m.bias.data[8:])\n i2 = i.data[:, 2:].contiguous().requires_grad_(True)\n output2 = m2(i2)\n output2.backward(grad_output[:, 8:].contiguous())\n\n self.assertEqual(output, torch.cat([output1, output2], 1))\n self.assertEqual(i.grad.data,\n torch.cat([i1.grad.data, i2.grad.data], 1),\n atol=dtype2prec_DONTUSE[torch.float],\n rtol=dtype2prec_DONTUSE[torch.float])\n self.assertEqual(m.weight.grad.data,\n torch.cat([m1.weight.grad.data, m2.weight.grad.data], 0),\n atol=dtype2prec_DONTUSE[torch.float],\n rtol=dtype2prec_DONTUSE[torch.float])\n self.assertEqual(m.bias.grad.data,\n torch.cat([m1.bias.grad.data, m2.bias.grad.data], 0),\n atol=dtype2prec_DONTUSE[torch.float], rtol=dtype2prec_DONTUSE[torch.float])\n\n\n\n def test_MaxUnpool2d_output_size(self):\n m = nn.MaxPool2d(3, stride=2, return_indices=True)\n mu = nn.MaxUnpool2d(3, stride=2)\n big_t = torch.rand(1, 1, 6, 6)\n big_t[0][0][4][4] = 100\n output_big, indices_big = m(big_t)\n self.assertRaises(RuntimeError, lambda: mu(output_big, indices_big))\n\n small_t = torch.rand(1, 1, 5, 5)\n for i in range(0, 4, 2):\n for j in range(0, 4, 2):\n small_t[:, :, i, j] = 100\n output_small, indices_small = m(small_t)\n for h in range(3, 10):\n for w in range(3, 10):\n if 4 <= h <= 6 and 4 <= w <= 6:\n size = (h, w)\n if h == 6:\n size = (1, 1) + size\n\n mu(output_small, indices_small, output_size=size)\n else:\n self.assertRaises(ValueError, lambda: mu(output_small, indices_small, (h, w)))\n\n def test_max_unpool2d_nhwc_cpu(self):\n input = torch.randn(2, 10, 9, 9).float().cpu()\n input = input.contiguous(memory_format=torch.channels_last)\n ref_input = input.clone().contiguous()\n\n pool = nn.MaxPool2d(3, stride=2, return_indices=True).cpu()\n ref_pool = nn.MaxPool2d(3, stride=2, return_indices=True).cpu()\n\n out, ind = pool(input)\n ref_out, ref_ind = ref_pool(ref_input)\n out.requires_grad_()\n ref_out.requires_grad_()\n\n unpool = nn.MaxUnpool2d(3, stride=2).cpu()\n ref_unpool = nn.MaxUnpool2d(3, stride=2).cpu()\n\n upout = unpool(out, ind)\n ref_upout = ref_unpool(ref_out, ref_ind)\n\n grad = torch.randn(upout.size()).float().cpu()\n grad = grad.contiguous(memory_format=torch.channels_last)\n ref_grad = grad.clone().contiguous()\n\n upout.backward(grad)\n ref_upout.backward(ref_grad)\n\n self.assertTrue(upout.is_contiguous(memory_format=torch.channels_last))\n self.assertTrue(ref_upout.is_contiguous())\n self.assertTrue(torch.allclose(upout, ref_upout))\n self.assertTrue(torch.allclose(out.grad, ref_out.grad))\n\n def test_container_copy(self):\n class Model(nn.Module):\n def __init__(self):\n super(Model, self).__init__()\n self.linear = nn.Linear(4, 5)\n\n def forward(self, input):\n return self.linear(input)\n\n input = torch.randn(2, 4)\n\n model = Model()\n model_cp = deepcopy(model)\n self.assertEqual(model(input).data, model_cp(input).data)\n\n model_cp.linear.weight.data[:] = 2\n self.assertNotEqual(model(input).data, model_cp(input).data)\n\n def test_RNN_cell(self):\n # this is just a smoke test; these modules are implemented through\n # autograd so no Jacobian test is needed\n for module in (nn.RNNCell, nn.GRUCell):\n for bias in (True, False):\n input = torch.randn(3, 10)\n hx = torch.randn(3, 20)\n cell = module(10, 20, bias=bias)\n for _ in range(6):\n hx = cell(input, hx)\n\n hx.sum().backward()\n\n def test_RNN_cell_forward_input_size(self):\n input = torch.randn(3, 11)\n hx = torch.randn(3, 20)\n for module in (nn.RNNCell, nn.GRUCell):\n cell = module(10, 20)\n self.assertRaises(Exception, lambda: cell(input, hx))\n\n def test_RNN_cell_forward_hidden_size(self):\n input = torch.randn(3, 10)\n hx = torch.randn(3, 21)\n cell_shared_param = (10, 20)\n for cell in (nn.RNNCell(*cell_shared_param, nonlinearity=\"relu\"),\n nn.RNNCell(*cell_shared_param, nonlinearity=\"tanh\"),\n nn.GRUCell(*cell_shared_param)):\n self.assertRaises(Exception, lambda: cell(input, hx))\n\n def test_RNN_cell_forward_zero_hidden_size(self):\n input = torch.randn(3, 10)\n hx = torch.randn(3, 0)\n cell_shared_param = (10, 0)\n for cell in (nn.RNNCell(*cell_shared_param, nonlinearity=\"relu\"),\n nn.RNNCell(*cell_shared_param, nonlinearity=\"tanh\"),\n nn.GRUCell(*cell_shared_param)):\n self.assertEqual(cell(input, hx).shape, torch.Size([3, 0]))\n\n def _test_loss_equal_input_target_shape(self, cast):\n # Tests losses whose inputs should have the same size.\n losses = {\n 'mse_loss': lambda x, y: F.mse_loss(x, y),\n 'l1_loss': lambda x, y: F.l1_loss(x, y),\n 'smooth_l1_loss': lambda x, y: F.smooth_l1_loss(x, y),\n 'huber_loss': lambda x, y: F.huber_loss(x, y),\n 'kl_div': lambda x, y: F.kl_div(x, y),\n 'poisson_nll_loss': lambda x, y: F.poisson_nll_loss(x, y),\n }\n\n input = cast(torch.randn(3, 5))\n target = cast(torch.randn(5, 3))\n for _name, fn in losses.items():\n self.assertRaises(Exception, lambda: fn(input, target))\n\n def test_loss_equal_input_target_shape(self):\n self._test_loss_equal_input_target_shape(lambda x: x)\n\n def test_mse_loss_size_warning(self):\n i = torch.randn((10, 1), requires_grad=True)\n t = torch.randn((10,))\n with warnings.catch_warnings(record=True) as w:\n # Ensure warnings are being shown\n warnings.simplefilter(\"always\")\n # Trigger Warning\n F.mse_loss(i, t)\n # Check warning occurs\n self.assertEqual(len(w), 1)\n self.assertIn('Please ensure they have the same size.', str(w[0]))\n\n def test_poisson_nll_loss_reduction_modes(self):\n input = torch.tensor([0.5, 1.5, 2.5])\n target = torch.tensor([1., 2., 3.])\n component_wise_loss = torch.exp(input) - target * input\n self.assertEqual(component_wise_loss,\n F.poisson_nll_loss(input, target, reduction='none'))\n self.assertEqual(torch.sum(component_wise_loss),\n F.poisson_nll_loss(input, target, reduction='sum'))\n self.assertEqual(torch.mean(component_wise_loss),\n F.poisson_nll_loss(input, target, reduction='mean'))\n with self.assertRaisesRegex(ValueError, 'is not valid'):\n F.poisson_nll_loss(input, target, reduction='total')\n\n def test_gaussian_nll_loss_reduction_modes(self):\n input = torch.tensor([[0.5, 1.5, 2.5], [2., 4., 6.]])\n target = torch.tensor([[1., 2., 3.], [4., 5., 6.]])\n var = torch.tensor([[0.5, 1., 1.5], [1., 1.5, 2.]])\n component_wise_loss = 0.5 * (torch.log(var) + (input - target)**2 / var)\n self.assertEqual(component_wise_loss,\n F.gaussian_nll_loss(input, target, var, reduction='none'))\n self.assertEqual(torch.sum(component_wise_loss),\n F.gaussian_nll_loss(input, target, var, reduction='sum'))\n self.assertEqual(torch.mean(component_wise_loss),\n F.gaussian_nll_loss(input, target, var, reduction='mean'))\n with self.assertRaisesRegex(ValueError, 'is not valid'):\n F.gaussian_nll_loss(input, target, var, reduction='total')\n\n def test_gaussian_nll_loss_broadcasting(self):\n input = torch.tensor([[0.5, 1.5, 2.5], [2., 4., 6.]])\n target_full = torch.tensor([[1., 2., 3.], [1., 2., 3.]])\n target_part = torch.tensor([[1., 2., 3.]])\n var_full = torch.tensor([[0.5, 0.5, 0.5], [1.5, 1.5, 1.5]])\n var_part1 = torch.tensor([[0.5], [1.5]])\n var_part2 = torch.tensor([0.5, 1.5])\n component_wise_loss = 0.5 * (torch.log(var_full) + (input - target_full)**2 / var_full)\n self.assertEqual(component_wise_loss,\n F.gaussian_nll_loss(input, target_part, var_full, reduction='none'))\n self.assertEqual(component_wise_loss,\n F.gaussian_nll_loss(input, target_full, var_part1, reduction='none'))\n self.assertEqual(component_wise_loss,\n F.gaussian_nll_loss(input, target_full, var_part2, reduction='none'))\n self.assertEqual(component_wise_loss,\n F.gaussian_nll_loss(input, target_part, var_part1, reduction='none'))\n self.assertEqual(component_wise_loss,\n F.gaussian_nll_loss(input, target_part, var_part2, reduction='none'))\n\n def test_gaussian_nll_loss_args(self):\n input = torch.randn(3, 5)\n with self.assertRaisesRegex(ValueError, 'var is of incorrect size'):\n target = torch.randn(3, 5)\n var = torch.ones(3, 3)\n torch.nn.functional.gaussian_nll_loss(input, target, var)\n with self.assertRaisesRegex(ValueError, 'var has negative entry/entries'):\n var = -1 * torch.ones(3, 5)\n torch.nn.functional.gaussian_nll_loss(input, target, var)\n\n def test_KLDivLoss_batch_mean(self):\n input_shape = (2, 5)\n log_prob1 = F.log_softmax(torch.randn(input_shape), 1)\n prob2 = F.softmax(torch.randn(input_shape), 1)\n\n loss = nn.KLDivLoss(reduction='batchmean')\n l = loss(log_prob1, prob2)\n\n loss_none_reduce = nn.KLDivLoss(reduction='sum')(log_prob1, prob2)\n expected = loss_none_reduce / input_shape[0]\n\n self.assertEqual(l, expected)\n\n def test_KLDivLoss_batch_mean_log_target(self):\n input_shape = (2, 5)\n log_prob1 = F.log_softmax(torch.randn(input_shape), 1)\n log_prob2 = F.log_softmax(torch.randn(input_shape), 1)\n\n loss = nn.KLDivLoss(reduction='batchmean', log_target=True)\n l = loss(log_prob1, log_prob2)\n\n loss_none_reduce = nn.KLDivLoss(reduction='sum', log_target=True)(log_prob1, log_prob2)\n expected = loss_none_reduce / input_shape[0]\n\n self.assertEqual(l, expected)\n\n def test_CTCLoss_typechecks(self):\n target_lengths = torch.tensor([30, 25, 20])\n input_lengths = torch.tensor([50, 50, 50])\n targets = torch.randint(1, 15, (sum(target_lengths),), dtype=torch.int)\n log_probs = torch.randn(50, 3, 15, dtype=torch.float).log_softmax(2)\n with self.assertRaises(RuntimeError):\n _input_lengths = input_lengths.to(dtype=torch.float)\n torch.nn.functional.ctc_loss(log_probs, targets, _input_lengths, target_lengths)\n with self.assertRaises(RuntimeError):\n target_lengths = target_lengths.to(dtype=torch.float)\n torch.nn.functional.ctc_loss(log_probs, targets, input_lengths, target_lengths)\n\n @unittest.skipIf(not TEST_CUDA, 'CUDA not available')\n def test_CTCLoss_lengthchecks_cuda(self):\n target_lengths = [30, 25, 20]\n input_lengths = [50, 50, 50]\n targets = torch.randint(1, 15, (3, 29), dtype=torch.long, device='cuda')\n log_probs = torch.randn(50, 3, 15, dtype=torch.float, device='cuda').log_softmax(2)\n with self.assertRaises(RuntimeError):\n torch.nn.functional.ctc_loss(log_probs, targets, input_lengths, target_lengths)\n\n def test_CTCLoss_lengthchecks_cpu(self):\n target_lengths = [30, 25, 20]\n input_lengths = [50, 50, 50]\n targets = torch.randint(1, 15, (3, 29), dtype=torch.int)\n log_probs = torch.randn(50, 3, 15, dtype=torch.float).log_softmax(2)\n with self.assertRaises(RuntimeError):\n torch.nn.functional.ctc_loss(log_probs, targets, input_lengths, target_lengths)\n\n @unittest.skipIf(not TEST_CUDA, 'CUDA not available')\n def test_CTCLoss_long_targets(self):\n input_length = 4000\n vocab_size = 3\n batch_size = 4\n target_length = 1200\n\n log_probs = torch.randn(input_length, batch_size, vocab_size).log_softmax(2).requires_grad_()\n targets = torch.randint(low=1, high=vocab_size - 1, size=(batch_size, target_length), dtype=torch.long)\n input_lengths = batch_size * [input_length]\n target_lengths = batch_size * [target_length]\n\n res_cpu = torch.nn.functional.ctc_loss(log_probs, targets, input_lengths, target_lengths,\n reduction='sum', zero_infinity=True)\n grad_out = torch.randn_like(res_cpu)\n grad_cpu, = torch.autograd.grad(res_cpu, log_probs, grad_out)\n\n with torch.backends.cudnn.flags(enabled=False):\n res_gpu = torch.nn.functional.ctc_loss(log_probs.cuda(), targets.cuda(), input_lengths, target_lengths,\n reduction='sum', zero_infinity=True)\n grad_gpu, = torch.autograd.grad(res_gpu, log_probs, grad_out.cuda())\n self.assertEqual(res_cpu, res_gpu, atol=1e-4, rtol=0)\n self.assertEqual(grad_cpu, grad_gpu, atol=1e-4, rtol=0)\n\n @unittest.skipIf(not TEST_CUDA, 'CUDA not available')\n def test_CTCLoss_critical_target_len(self):\n # cudnn has an unexpected problem with target length 256, see issue #53505\n N = 1\n S = 256\n C = 10\n T = 500\n target = torch.randint(low=1, high=C, size=(S,), dtype=torch.int)\n input_lengths = torch.full(size=(N,), fill_value=T, dtype=torch.int)\n target_lengths = torch.tensor(S, dtype=torch.int)\n inp = torch.randn(T, N, C, dtype=torch.float, device='cuda').log_softmax(2).requires_grad_()\n with cudnn.flags(enabled=True):\n res_gpu = torch.nn.functional.ctc_loss(inp, target, input_lengths, target_lengths, reduction='none')\n res_cpu = torch.nn.functional.ctc_loss(inp.cpu(), target, input_lengths, target_lengths, reduction='none')\n self.assertEqual(res_cpu, res_gpu, atol=1e-3, rtol=0)\n\n @unittest.skipIf(not TEST_CUDA, 'CUDA not available')\n def test_CTCLoss_zero_infinity(self):\n target_lengths = [60, 25, 20]\n input_lengths = [50, 50, 50]\n targets = torch.randint(1, 15, (sum(target_lengths),), dtype=torch.int, device='cuda')\n log_probs = torch.randn(50, 3, 15, dtype=torch.float, device='cuda').log_softmax(2).requires_grad_()\n res = torch.nn.functional.ctc_loss(log_probs, targets, input_lengths, target_lengths,\n reduction='sum', zero_infinity=True)\n with torch.backends.cudnn.flags(enabled=False):\n res2 = torch.nn.functional.ctc_loss(log_probs, targets.cuda().long(), input_lengths, target_lengths,\n reduction='sum', zero_infinity=True)\n res_cpu = torch.nn.functional.ctc_loss(log_probs.cpu(), targets.cpu(), input_lengths, target_lengths,\n reduction='sum', zero_infinity=True)\n\n self.assertEqual(res2, res, atol=1e-4, rtol=0)\n self.assertEqual(res_cpu, res.cpu(), atol=1e-4, rtol=0)\n g1, = torch.autograd.grad(res, log_probs)\n g2, = torch.autograd.grad(res2, log_probs)\n g3, = torch.autograd.grad(res_cpu, log_probs)\n self.assertEqual(g2, g3, atol=1e-4, rtol=0)\n self.assertEqual(g1, g2, atol=1e-4, rtol=0)\n self.assertTrue((g1 == g1).all().item()) # check that we don't have NaN\n\n def test_RNN_cell_no_broadcasting(self):\n def test(cell_module, input, hx, input_size, hidden_size):\n cell = cell_module(input_size, hidden_size)\n self.assertRaises(RuntimeError, lambda: cell(input, hx))\n\n def test_all(hidden_size, bad_hx, good_hx, input_size, input):\n test(nn.RNNCell, input, bad_hx, input_size, hidden_size)\n test(nn.GRUCell, input, bad_hx, input_size, hidden_size)\n test(nn.LSTMCell, input, (bad_hx, good_hx), input_size, hidden_size)\n test(nn.LSTMCell, input, (good_hx, bad_hx), input_size, hidden_size)\n\n hidden_size = 20\n input_size = 10\n input = torch.randn(3, input_size)\n bad_hx = torch.randn(1, hidden_size)\n good_hx = torch.randn(3, hidden_size)\n\n # Test hidden/input batch size broadcasting\n test_all(hidden_size, bad_hx, good_hx, input_size, input)\n\n # Test hx's hidden_size vs module's hidden_size broadcasting\n bad_hx = torch.randn(3, 1)\n test_all(hidden_size, bad_hx, good_hx, input_size, input)\n\n # Test input's input_size vs module's input_size broadcasting\n bad_input = torch.randn(3, 1)\n test_all(hidden_size, good_hx, good_hx, input_size, bad_input)\n\n @unittest.skipIf(not TEST_CUDA, \"CUDA unavailable\")\n def test_native_dropout_corner_case(self):\n for train in [True, False]:\n for p in [0.0, 1.0]:\n for device in [\"cuda\", \"cpu\"]:\n x = torch.randn(5).to(device=device).requires_grad_()\n x_ref = x.detach().requires_grad_()\n o = torch.native_dropout(x, p, train)[0]\n o_ref = torch.dropout(x_ref, p, train)\n o.sum().backward()\n o_ref.sum().backward()\n assert(o.equal(o_ref))\n assert(x.grad.equal(x_ref.grad))\n\n def test_invalid_dropout_p(self):\n v = torch.ones(1)\n self.assertRaises(ValueError, lambda: nn.Dropout(-0.1))\n self.assertRaises(ValueError, lambda: nn.Dropout(1.1))\n self.assertRaises(ValueError, lambda: nn.Dropout2d(-0.1))\n self.assertRaises(ValueError, lambda: nn.Dropout2d(1.1))\n self.assertRaises(ValueError, lambda: nn.Dropout3d(-0.1))\n self.assertRaises(ValueError, lambda: nn.Dropout3d(1.1))\n self.assertRaises(ValueError, lambda: F.dropout(v, -0.1))\n self.assertRaises(ValueError, lambda: F.dropout(v, 1.1))\n\n def test_pad_sequence(self):\n def pad(tensor, length):\n return torch.cat(\n [tensor.data, tensor.data.new(\n length - tensor.size(0), *tensor.size()[1:]).zero_()])\n\n # single dimensional\n a = torch.tensor([1, 2, 3])\n b = torch.tensor([4, 5])\n c = torch.tensor([6])\n\n # batch_first = true\n expected = torch.tensor([[4, 5, 0], [1, 2, 3], [6, 0, 0]])\n padded = rnn_utils.pad_sequence([b, a, c], True)\n self.assertEqual(padded, expected)\n\n # batch_first = false\n padded = rnn_utils.pad_sequence([b, a, c])\n self.assertEqual(padded, expected.transpose(0, 1))\n\n # pad with non-zero value\n expected = torch.tensor([[4, 5, 1], [1, 2, 3], [6, 1, 1]])\n padded = rnn_utils.pad_sequence([b, a, c], True, 1)\n self.assertEqual(padded, expected)\n\n # Test pad sorted sequence\n expected = torch.tensor([[1, 2, 3], [4, 5, 0], [6, 0, 0]])\n padded = rnn_utils.pad_sequence([a, b, c], True)\n self.assertEqual(padded, expected)\n\n # more dimensions\n maxlen = 9\n for num_dim in (0, 1, 2, 3):\n sequences = []\n trailing_dims = [4] * num_dim\n for i in range(1, maxlen + 1):\n seq_len = i * i\n sequences.append(torch.rand(seq_len, 5, *trailing_dims))\n random.shuffle(sequences)\n expected = []\n for seq in sequences:\n expected.append(pad(seq, maxlen * maxlen))\n # batch first = true\n expected = torch.stack(expected)\n padded = rnn_utils.pad_sequence(sequences, True)\n self.assertEqual(padded, expected)\n\n # batch first = false\n padded = rnn_utils.pad_sequence(sequences)\n self.assertEqual(padded, expected.transpose(0, 1))\n\n def test_unpad_sequence(self):\n\n # single dimensional\n a = torch.tensor([1, 2, 3])\n b = torch.tensor([4, 5])\n c = torch.tensor([6])\n sequences = [a, b, c]\n\n lengths = torch.as_tensor([v.size(0) for v in sequences])\n for batch_first in [True, False]:\n padded_sequences = rnn_utils.pad_sequence(sequences, batch_first=batch_first)\n unpadded_sequences = rnn_utils.unpad_sequence(padded_sequences, lengths, batch_first=batch_first)\n self.assertEqual(sequences, unpadded_sequences)\n\n # more dimensions\n maxlen = 9\n for num_dim in (0, 1, 2, 3):\n sequences = []\n trailing_dims = [4] * num_dim\n for i in range(1, maxlen + 1):\n seq_len = i * i\n sequences.append(torch.rand(seq_len, 5, *trailing_dims))\n random.shuffle(sequences)\n\n lengths = torch.as_tensor([v.size(0) for v in sequences])\n padded_sequences = rnn_utils.pad_sequence(sequences, batch_first=batch_first)\n unpadded_sequences = rnn_utils.unpad_sequence(padded_sequences, lengths, batch_first=batch_first)\n self.assertEqual(sequences, unpadded_sequences)\n\n def test_pack_sequence(self):\n def _compatibility_test(sequences, lengths, batch_first, enforce_sorted=False):\n padded = rnn_utils.pad_sequence(sequences, batch_first)\n packed = rnn_utils.pack_sequence(sequences, enforce_sorted)\n unpacked = rnn_utils.pad_packed_sequence(packed, batch_first)\n self.assertEqual(padded, unpacked[0])\n pack_padded = rnn_utils.pack_padded_sequence(\n padded, lengths, batch_first, enforce_sorted)\n self.assertEqual(packed, pack_padded)\n\n # single dimensional\n a = torch.tensor([1, 2, 3])\n b = torch.tensor([4, 5])\n c = torch.tensor([6])\n packed = rnn_utils.pack_sequence([a, b, c], enforce_sorted=False)\n expected = torch.tensor([1, 4, 6, 2, 5, 3])\n self.assertEqual(packed.batch_sizes, [3, 2, 1])\n self.assertEqual(packed.data.data, expected)\n self.assertEqual(packed.sorted_indices, [0, 1, 2])\n self.assertEqual(packed.unsorted_indices, [0, 1, 2])\n\n packed_unsorted = rnn_utils.pack_sequence([b, c, a], enforce_sorted=False)\n self.assertEqual(packed_unsorted.batch_sizes, [3, 2, 1])\n self.assertEqual(packed_unsorted.data.data, expected)\n self.assertEqual(packed_unsorted.sorted_indices, [2, 0, 1])\n self.assertEqual(packed_unsorted.unsorted_indices, [1, 2, 0])\n\n # single dimensional, enforce_sorted = True\n packed_enforce_sorted = rnn_utils.pack_sequence([a, b, c], enforce_sorted=True)\n self.assertEqual(packed_enforce_sorted.batch_sizes, [3, 2, 1])\n self.assertEqual(packed_enforce_sorted.data.data, expected)\n self.assertTrue(packed_enforce_sorted.sorted_indices is None)\n self.assertTrue(packed_enforce_sorted.unsorted_indices is None)\n\n with self.assertRaisesRegex(RuntimeError, 'must be sorted in decreasing order'):\n rnn_utils.pack_sequence([b, c, a], enforce_sorted=True)\n\n with self.assertRaisesRegex(RuntimeError, 'You can pass `enforce_sorted=False`'):\n rnn_utils.pack_sequence([b, c, a], enforce_sorted=True)\n\n # more dimensions\n maxlen = 9\n for num_dim in (0, 1, 2, 3):\n sequences = []\n lengths = []\n trailing_dims = [4] * num_dim\n for i in range(maxlen, 0, -1):\n seq_len = i * i\n lengths.append(seq_len)\n sequences.append(torch.rand(seq_len, 5, *trailing_dims))\n unsorted_sequences = [s.clone() for s in sequences]\n random.shuffle(unsorted_sequences)\n unsorted_sequences_lengths = [t.size(0) for t in unsorted_sequences]\n\n # compatibility with other utilities\n for batch_first in (True, False):\n for enforce_sorted in (True, False):\n _compatibility_test(sequences, lengths, batch_first, enforce_sorted)\n _compatibility_test(unsorted_sequences, unsorted_sequences_lengths,\n batch_first)\n\n def test_unpack_sequence(self):\n\n # single dimensional\n a = torch.tensor([1, 2, 3])\n b = torch.tensor([4, 5])\n c = torch.tensor([6])\n sequences = [a, b, c]\n\n packed_sequences = rnn_utils.pack_sequence(sequences, enforce_sorted=False)\n unpacked_sequences = rnn_utils.unpack_sequence(packed_sequences)\n self.assertEqual(sequences, unpacked_sequences)\n\n # more dimensions\n maxlen = 9\n for num_dim in (0, 1, 2, 3):\n sequences = []\n trailing_dims = [4] * num_dim\n for i in range(1, maxlen + 1):\n seq_len = i * i\n sequences.append(torch.rand(seq_len, 5, *trailing_dims))\n random.shuffle(sequences)\n\n packed_sequences = rnn_utils.pack_sequence(sequences, enforce_sorted=False)\n unpacked_sequences = rnn_utils.unpack_sequence(packed_sequences)\n self.assertEqual(sequences, unpacked_sequences)\n\n def test_pack_padded_sequence(self):\n def generate_test_case(sorted_lengths, should_shuffle):\n def pad(tensor, length):\n return torch.cat([tensor, tensor.new(length - tensor.size(0), *tensor.size()[1:]).zero_()])\n\n max_length = sorted_lengths[0]\n batch_sizes = [sum(map(bool, filter(lambda x: x >= i, sorted_lengths)))\n for i in range(1, max_length + 1)]\n offset = 0\n padded = torch.cat([pad(i * 100 + torch.arange(1., 5 * l + 1).view(l, 1, 5), max_length)\n for i, l in enumerate(sorted_lengths, 1)], 1)\n expected_data = [[torch.arange(1., 6) + (i + 1) * 100 + 5 * n for i in range(batch_size)]\n for n, batch_size in enumerate(batch_sizes)]\n expected_data = list(itertools.chain.from_iterable(expected_data))\n expected_data = torch.stack(expected_data, dim=0)\n\n if should_shuffle:\n # Shuffle the padded sequence to create an unsorted sequence\n permutation = list(range(len(sorted_lengths)))\n random.shuffle(permutation)\n\n unsorted_indices = torch.tensor(permutation)\n padded = padded.index_select(1, unsorted_indices)\n lengths = torch.tensor(sorted_lengths).index_select(0, unsorted_indices)\n else:\n unsorted_indices = None\n lengths = sorted_lengths\n\n return padded.requires_grad_(), lengths, expected_data, batch_sizes, unsorted_indices\n\n test_cases = [\n # sorted_lengths, should_shuffle\n [[10, 8, 4, 2, 2, 2, 1], False],\n [[11, 10, 8, 6, 4, 3, 1], False],\n [[11, 10, 8, 6, 4, 3, 1], True],\n ]\n\n for test_case, batch_first in itertools.product(test_cases, (True, False)):\n sorted_lengths, should_shuffle = test_case\n padded, lengths, expected_data, batch_sizes, unsorted_indices = generate_test_case(\n sorted_lengths, should_shuffle)\n\n src = padded\n if batch_first:\n src = src.transpose(0, 1)\n\n # check output\n packed = rnn_utils.pack_padded_sequence(src, lengths, batch_first=batch_first,\n enforce_sorted=not should_shuffle)\n self.assertEqual(packed.data.data, expected_data)\n self.assertEqual(packed.batch_sizes, batch_sizes)\n self.assertEqual(packed.unsorted_indices, unsorted_indices)\n\n # test inverse\n unpacked, unpacked_len = rnn_utils.pad_packed_sequence(packed, batch_first=batch_first)\n self.assertEqual(unpacked, src)\n self.assertEqual(unpacked_len, lengths)\n\n # check grad\n if padded.grad is not None:\n padded.grad.data.zero_()\n grad_output = unpacked.data.clone().normal_()\n unpacked.backward(grad_output)\n if batch_first:\n grad_output.transpose_(0, 1)\n for i, l in enumerate(lengths):\n self.assertEqual(padded.grad.data[:l, i], grad_output[:l, i])\n if l < 10:\n self.assertEqual(padded.grad.data[l:, i].abs().sum(), 0)\n\n # test error messages\n with self.assertRaisesRegex(RuntimeError, 'You can pass `enforce_sorted=False`'):\n packed = rnn_utils.pack_padded_sequence(torch.randn(3, 3), [1, 3, 2])\n with self.assertRaisesRegex(RuntimeError, 'empty tensor'):\n packed = rnn_utils.pack_padded_sequence(torch.randn(0, 0), [])\n\n def test_LSTM_cell(self):\n # this is just a smoke test; these modules are implemented through\n # autograd so no Jacobian test is needed\n for bias in (True, False):\n input = torch.randn(3, 10)\n hx = torch.randn(3, 20)\n cx = torch.randn(3, 20)\n lstm = nn.LSTMCell(10, 20, bias=bias)\n for _ in range(6):\n hx, cx = lstm(input, (hx, cx))\n\n (hx + cx).sum().backward()\n\n def test_LSTM_cell_forward_input_size(self):\n input = torch.randn(3, 11)\n hx = torch.randn(3, 20)\n cx = torch.randn(3, 20)\n lstm = nn.LSTMCell(10, 20)\n self.assertRaises(Exception, lambda: lstm(input, (hx, cx)))\n\n def test_LSTM_cell_forward_hidden_size(self):\n input = torch.randn(3, 10)\n hx = torch.randn(3, 21)\n cx = torch.randn(3, 20)\n lstm = nn.LSTMCell(10, 20)\n self.assertRaises(Exception, lambda: lstm(input, (hx, cx)))\n self.assertRaises(Exception, lambda: lstm(input, (cx, hx)))\n\n\n @unittest.skipIf(not TEST_CUDA, 'CUDA not available')\n def test_pack_sequence_batch_sizes_throw(self):\n with self.assertRaisesRegex(ValueError, r\"batch_sizes should always be on CPU\"):\n m = nn.LSTM(3, 4, bidirectional=True, num_layers=2).to('cuda')\n a = torch.rand(5, 3, device='cuda')\n b = torch.tensor([1, 1, 1, 1, 1], device='cuda')\n input = nn.utils.rnn.PackedSequence(a, b)\n\n def test_Transformer_cell(self):\n # this is just a smoke test; these modules are implemented through\n # autograd so no Jacobian test is needed\n d_model = 512\n nhead = 16\n num_encoder_layers = 4\n num_decoder_layers = 3\n dim_feedforward = 256\n dropout = 0.3\n bsz = 8\n seq_length = 35\n tgt_length = 15\n for batch_first, src_size, tgt_size in zip((True, False),\n [(bsz, seq_length, d_model),\n (seq_length, bsz, d_model)],\n [(bsz, tgt_length, d_model),\n (tgt_length, bsz, d_model)]):\n transformer = nn.Transformer(d_model, nhead, num_encoder_layers, num_decoder_layers,\n dim_feedforward, dropout, batch_first=batch_first)\n src = torch.randn(src_size)\n src_mask = transformer.generate_square_subsequent_mask(seq_length).double()\n tgt = torch.randn(tgt_size)\n tgt_mask = transformer.generate_square_subsequent_mask(tgt_length).double()\n memory_mask = torch.randn(tgt_length, seq_length).double()\n src_key_padding_mask = torch.rand(bsz, seq_length) >= 0.5\n tgt_key_padding_mask = torch.rand(bsz, tgt_length) >= 0.5\n memory_key_padding_mask = torch.rand(bsz, seq_length) >= 0.5\n\n output = transformer(src, tgt,\n src_mask=src_mask,\n tgt_mask=tgt_mask,\n memory_mask=memory_mask,\n src_key_padding_mask=src_key_padding_mask,\n tgt_key_padding_mask=tgt_key_padding_mask,\n memory_key_padding_mask=memory_key_padding_mask)\n output.sum().backward()\n\n def test_transformerdecoderlayer(self):\n # this is a deterministic test for TransformerDecoderLayer\n d_model = 4\n nhead = 2\n dim_feedforward = 16\n dropout = 0.0\n bsz = 2\n seq_length = 5\n tgt_length = 3\n\n for batch_first in (False, True):\n def perm_fn(x):\n return x.transpose(1, 0) if batch_first else x\n\n model = nn.TransformerDecoderLayer(d_model, nhead, dim_feedforward, dropout,\n batch_first=batch_first)\n\n # set constant weights of the model\n for idx, p in enumerate(model.parameters()):\n x = p.data\n sz = x.view(-1).size(0)\n shape = x.shape\n x = torch.cos(torch.arange(0, sz).float().view(shape))\n p.data.copy_(x)\n\n # deterministic input\n decoder_input = torch.tensor([[[20., 30., 40., 50.]]])\n memory_input = torch.tensor([[[60., 70., 80., 90.]]])\n result = model(decoder_input, memory_input)\n ref_output = torch.tensor([[[2.314351, 0.094805, -0.671322, 0.101977]]])\n result = result.detach().numpy()\n ref_output = ref_output.detach().numpy()\n self.assertEqual(tuple(result.shape), tuple(ref_output.shape))\n np.testing.assert_allclose(result, ref_output, atol=1e-5)\n\n # deterministic input\n decoder_input = perm_fn(torch.tensor([[[9., 10., 11., 12.]],\n [[11., 12., 13., 14.]]]))\n memory_input = torch.tensor([[[1., 2., 3., 4.]]])\n result = model(decoder_input, memory_input)\n result = result.detach().numpy()\n ref_output = perm_fn(torch.tensor([[[2.422245, 0.051716, -0.606338, -0.024756]],\n [[2.422245, 0.051716, -0.606338, -0.024756]]]))\n ref_output = ref_output.detach().numpy()\n self.assertEqual(tuple(result.shape), tuple(ref_output.shape))\n np.testing.assert_allclose(result, ref_output, atol=1e-5)\n\n # deterministic input\n decoder_input = perm_fn(torch.tensor([[[1., 2., 3., 4.]],\n [[5., 6., 7., 8.]]]))\n memory_input = perm_fn(torch.tensor([[[9., 10., 11., 12.]],\n [[11., 12., 13., 14.]]]))\n result = model(decoder_input, memory_input)\n ref_output = perm_fn(torch.tensor([[[2.343536, 0.085561, -0.654954, 0.074991]],\n [[2.343536, 0.085561, -0.654954, 0.074991]]]))\n result = result.detach().numpy()\n ref_output = ref_output.detach().numpy()\n self.assertEqual(tuple(result.shape), tuple(ref_output.shape))\n np.testing.assert_allclose(result, ref_output, atol=1e-5)\n\n # deterministic input\n decoder_input = perm_fn(torch.tensor([[[0.4517, 0.6793, 0.5313, 0.0034],\n [0.2678, 0.3677, 0.4459, 0.7166]],\n [[0.8100, 0.3716, 0.4096, 0.1976],\n [0.6958, 0.8844, 0.6081, 0.8315]],\n [[0.0494, 0.9343, 0.5955, 0.3830],\n [0.5404, 0.3464, 0.9378, 0.6200]]]))\n memory_input = perm_fn(torch.tensor([[[0.7462, 0.6653, 0.5679, 0.4891],\n [0.5387, 0.1655, 0.3565, 0.0471]],\n [[0.8335, 0.2799, 0.5031, 0.2947],\n [0.1402, 0.0318, 0.7636, 0.1346]],\n [[0.6333, 0.9344, 0.1376, 0.9938],\n [0.8924, 0.2872, 0.6692, 0.2944]],\n [[0.9897, 0.6915, 0.3154, 0.1733],\n [0.8645, 0.3513, 0.3064, 0.0767]],\n [[0.8117, 0.2366, 0.4838, 0.7881],\n [0.3718, 0.4945, 0.9511, 0.0864]]]))\n result = model(decoder_input, memory_input)\n ref_output = perm_fn(torch.tensor([[[2.430065, 0.027862, -0.601136, -0.073096],\n [2.431935, 0.028907, -0.599809, -0.072488]],\n [[2.428457, 0.027053, -0.602275, -0.073462],\n [2.431970, 0.029387, -0.599789, -0.071621]],\n [[2.431934, 0.028196, -0.599802, -0.073809],\n [2.432306, 0.028858, -0.599542, -0.072846]]]))\n result = result.detach().numpy()\n ref_output = ref_output.detach().numpy()\n self.assertEqual(tuple(result.shape), tuple(ref_output.shape))\n np.testing.assert_allclose(result, ref_output, atol=1e-5)\n\n # key_padding_mask\n key_padding_mask = torch.zeros(2, 3) == 1\n result = model(decoder_input, memory_input, tgt_key_padding_mask=key_padding_mask)\n ref_output = perm_fn(torch.tensor([[[2.430065, 0.027862, -0.601136, -0.073096],\n [2.431935, 0.028907, -0.599809, -0.072488]],\n [[2.428457, 0.027053, -0.602275, -0.073462],\n [2.431970, 0.029387, -0.599789, -0.071621]],\n [[2.431934, 0.028196, -0.599802, -0.073809],\n [2.432306, 0.028858, -0.599542, -0.072846]]]))\n result = result.detach().numpy()\n ref_output = ref_output.detach().numpy()\n self.assertEqual(tuple(result.shape), tuple(ref_output.shape))\n np.testing.assert_allclose(result, ref_output, atol=1e-5)\n\n # key_padding_mask\n key_padding_mask[0, 2] = 1\n key_padding_mask[1, 1] = 1\n key_padding_mask[1, 2] = 1\n result = model(decoder_input, memory_input, tgt_key_padding_mask=key_padding_mask)\n ref_output = perm_fn(torch.tensor([[[2.430025, 0.027643, -0.601164, -0.073476],\n [2.4323, 0.029375, -0.599553, -0.071881]],\n [[2.428523, 0.026838, -0.602226, -0.07391],\n [2.432634, 0.029842, -0.599318, -0.071253]],\n [[2.432278, 0.028152, -0.599555, -0.074139],\n [2.432659, 0.029244, -0.599294, -0.072382]]]))\n result = result.detach().numpy()\n ref_output = ref_output.detach().numpy()\n self.assertEqual(tuple(result.shape), tuple(ref_output.shape))\n np.testing.assert_allclose(result, ref_output, atol=1e-5)\n\n # memory_key_padding_mask\n key_padding_mask = torch.zeros(2, 5) == 1\n result = model(decoder_input, memory_input, memory_key_padding_mask=key_padding_mask)\n ref_output = perm_fn(torch.tensor([[[2.430065, 0.027862, -0.601136, -0.073096],\n [2.431935, 0.028907, -0.599809, -0.072488]],\n [[2.428457, 0.027053, -0.602275, -0.073462],\n [2.431970, 0.029387, -0.599789, -0.071621]],\n [[2.431934, 0.028196, -0.599802, -0.073809],\n [2.432306, 0.028858, -0.599542, -0.072846]]]))\n result = result.detach().numpy()\n ref_output = ref_output.detach().numpy()\n self.assertEqual(tuple(result.shape), tuple(ref_output.shape))\n np.testing.assert_allclose(result, ref_output, atol=1e-5)\n\n # memory_key_padding_mask\n key_padding_mask[0, 4] = 1\n key_padding_mask[1, 3] = 1\n key_padding_mask[1, 4] = 1\n result = model(decoder_input, memory_input, memory_key_padding_mask=key_padding_mask)\n ref_output = perm_fn(torch.tensor([[[2.429757, 0.027358, -0.601351, -0.073816],\n [2.432692, 0.028583, -0.599263, -0.073634]],\n [[2.428247, 0.02662, -0.602419, -0.074123],\n [2.432657, 0.029055, -0.599293, -0.072732]],\n [[2.431515, 0.027687, -0.600096, -0.074459],\n [2.433075, 0.028543, -0.598987, -0.073985]]]))\n result = result.detach().numpy()\n ref_output = ref_output.detach().numpy()\n self.assertEqual(tuple(result.shape), tuple(ref_output.shape))\n np.testing.assert_allclose(result, ref_output, atol=1e-5)\n\n def test_transformerdecoderlayer_gelu(self):\n # this is a deterministic test for TransformerDecoderLayer with gelu activation\n d_model = 4\n nhead = 2\n dim_feedforward = 16\n dropout = 0.0\n bsz = 2\n seq_length = 5\n tgt_length = 3\n\n for activation, batch_first in product(('gelu', F.gelu, nn.GELU()), (True, False)):\n def perm_fn(x):\n return x.transpose(1, 0) if batch_first else x\n\n model = nn.TransformerDecoderLayer(d_model, nhead, dim_feedforward, dropout,\n activation, batch_first=batch_first)\n\n # set constant weights of the model\n for idx, p in enumerate(model.parameters()):\n x = p.data\n sz = x.view(-1).size(0)\n shape = x.shape\n x = torch.cos(torch.arange(0, sz).float().view(shape))\n p.data.copy_(x)\n\n # deterministic input\n decoder_input = torch.tensor([[[20., 30., 40., 50.]]])\n memory_input = torch.tensor([[[60., 70., 80., 90.]]])\n result = model(decoder_input, memory_input)\n ref_output = torch.tensor([[[2.306435, 0.095946, -0.675796, 0.10687]]])\n torch.testing.assert_close(result, ref_output, rtol=1e-5, atol=0)\n\n # deterministic input\n decoder_input = perm_fn(torch.tensor([[[9., 10., 11., 12.]],\n [[11., 12., 13., 14.]]]))\n memory_input = perm_fn(torch.tensor([[[1., 2., 3., 4.]]]))\n result = model(decoder_input, memory_input)\n ref_output = perm_fn(torch.tensor([[[2.415448, 0.054389, -0.610932, -0.0156613]],\n [[2.415448, 0.054389, -0.610932, -0.0156613]]]))\n torch.testing.assert_close(result, ref_output, rtol=1e-5, atol=0)\n\n # deterministic input\n decoder_input = perm_fn(torch.tensor([[[1., 2., 3., 4.]],\n [[5., 6., 7., 8.]]]))\n memory_input = perm_fn(torch.tensor([[[9., 10., 11., 12.]],\n [[11., 12., 13., 14.]]]))\n result = model(decoder_input, memory_input)\n ref_output = perm_fn(torch.tensor([[[2.338531, 0.087709, -0.65776, 0.080646]],\n [[2.338531, 0.087709, -0.65776, 0.080646]]]))\n torch.testing.assert_close(result, ref_output, rtol=1e-5, atol=0)\n\n # deterministic input\n decoder_input = perm_fn(torch.tensor([[[0.4517, 0.6793, 0.5313, 0.0034],\n [0.2678, 0.3677, 0.4459, 0.7166]],\n [[0.8100, 0.3716, 0.4096, 0.1976],\n [0.6958, 0.8844, 0.6081, 0.8315]],\n [[0.0494, 0.9343, 0.5955, 0.3830],\n [0.5404, 0.3464, 0.9378, 0.6200]]]))\n memory_input = perm_fn(torch.tensor([[[0.7462, 0.6653, 0.5679, 0.4891],\n [0.5387, 0.1655, 0.3565, 0.0471]],\n [[0.8335, 0.2799, 0.5031, 0.2947],\n [0.1402, 0.0318, 0.7636, 0.1346]],\n [[0.6333, 0.9344, 0.1376, 0.9938],\n [0.8924, 0.2872, 0.6692, 0.2944]],\n [[0.9897, 0.6915, 0.3154, 0.1733],\n [0.8645, 0.3513, 0.3064, 0.0767]],\n [[0.8117, 0.2366, 0.4838, 0.7881],\n [0.3718, 0.4945, 0.9511, 0.0864]]]))\n result = model(decoder_input, memory_input)\n ref_output = perm_fn(torch.tensor([[[2.42049104, 0.03443088, -0.60793706, -0.05436271],\n [2.42210631, 0.03546578, -0.60679895, -0.05357488]],\n [[2.41907674, 0.0336104, -0.60892977, -0.05490462],\n [2.42216881, 0.03586554, -0.6067524, -0.05289126]],\n [[2.42205716, 0.03488046, -0.60683681, -0.05460596],\n [2.42240309, 0.0354595, -0.60659063, -0.05378816]]]))\n torch.testing.assert_close(result, ref_output, rtol=1e-5, atol=0)\n\n def test_transformerencoder(self):\n def get_a_test_layer(use_cuda, activation, batch_first=False):\n d_model = 4\n nhead = 2\n dim_feedforward = 16\n dropout = 0.0\n device = torch.device(\"cuda\" if use_cuda else \"cpu\")\n\n layer = nn.TransformerEncoderLayer(\n d_model,\n nhead,\n dim_feedforward=dim_feedforward,\n dropout=dropout,\n activation=activation,\n batch_first=batch_first).to(device)\n\n with torch.no_grad():\n # set constant weights of the model\n for idx, p in enumerate(layer.parameters()):\n x = p.data\n sz = x.view(-1).size(0)\n shape = x.shape\n x = torch.cos(torch.arange(0, sz).float().view(shape))\n p.data.copy_(x)\n\n return layer\n\n # this is a deterministic test for TransformerEncoder\n activation = F.relu\n use_cuda = torch.cuda.is_available()\n device = torch.device(\"cuda\" if use_cuda else \"cpu\")\n\n def _test(batch_first, training):\n def perm_fn(x):\n return x.transpose(1, 0) if batch_first else x\n\n encoder_layer = get_a_test_layer(use_cuda=use_cuda, activation=activation,\n batch_first=batch_first)\n\n model = nn.TransformerEncoder(encoder_layer, 1).to(device)\n if not training:\n model = model.eval()\n\n # deterministic input\n encoder_input = perm_fn(torch.tensor([[[0.7462, 0.6653, 0.5679, 0.4891],\n [0.5387, 0.1655, 0.3565, 0.0471]],\n [[0.8335, 0.2799, 0.5031, 0.2947],\n [0.1402, 0.0318, 0.7636, 0.1346]],\n [[0.6333, 0.9344, 0.1376, 0.9938],\n [0.8924, 0.2872, 0.6692, 0.2944]],\n [[0.9897, 0.6915, 0.3154, 0.1733],\n [0.8645, 0.3513, 0.3064, 0.0767]],\n [[0.8117, 0.2366, 0.4838, 0.7881],\n [0.3718, 0.4945, 0.9511, 0.0864]]]\n )).to(device)\n result = model(encoder_input)\n ref_output = perm_fn(torch.tensor([[[2.428589, 0.020835, -0.602055, -0.085249],\n [2.427987, 0.021213, -0.602496, -0.084103]],\n [[2.424689, 0.019155, -0.604793, -0.085672],\n [2.413863, 0.022211, -0.612486, -0.072490]],\n [[2.433774, 0.021598, -0.598343, -0.087548],\n [2.425104, 0.019748, -0.604515, -0.084839]],\n [[2.436185, 0.022682, -0.596625, -0.087261],\n [2.433556, 0.021891, -0.598509, -0.086832]],\n [[2.416246, 0.017512, -0.610712, -0.082961],\n [2.422901, 0.024187, -0.606178, -0.074929]]]\n )).to(device)\n self.assertEqual(tuple(result.shape), tuple(ref_output.shape))\n torch.testing.assert_close(result, ref_output, rtol=1e-7, atol=1e-5)\n\n # all 0\n mask = torch.zeros([2, 5]).to(device) == 1\n result = model(encoder_input, src_key_padding_mask=mask)\n self.assertEqual(tuple(result.shape), tuple(ref_output.shape))\n torch.testing.assert_close(result, ref_output, rtol=1e-7, atol=1e-5)\n mask[0, 1] = 1\n mask[1, 3] = 1\n mask[1, 4] = 1\n result = model(encoder_input, src_key_padding_mask=mask)\n ref_output = perm_fn(torch.tensor([[[2.429026, 0.020793, -0.601741, -0.085642],\n [2.428811, 0.021445, -0.601912, -0.084252]],\n [[2.425009, 0.019155, -0.604566, -0.085899],\n [2.415408, 0.02249, -0.611415, -0.073]],\n [[2.434199, 0.021682, -0.598039, -0.087699],\n [2.42598, 0.019941, -0.603896, -0.085091]],\n [[2.436457, 0.022736, -0.59643, -0.08736],\n [2.434021, 0.022093, -0.598179, -0.08679]],\n [[2.416531, 0.017498, -0.610513, -0.083181],\n [2.4242, 0.024653, -0.605266, -0.074959]]]\n )).to(device)\n self.assertEqual(tuple(result.shape), tuple(ref_output.shape))\n torch.testing.assert_close(result, ref_output, rtol=1e-7, atol=1e-5)\n\n # test case 2, multiple layers no norm\n model = nn.TransformerEncoder(encoder_layer, 2).to(device)\n if not training:\n model = model.eval()\n result = model(encoder_input, src_key_padding_mask=mask)\n ref_output = perm_fn(torch.tensor([[[2.419051, 0.017446, -0.608738, -0.085003],\n [2.419102, 0.017452, -0.608703, -0.085026]],\n [[2.419043, 0.017445, -0.608744, -0.084999],\n [2.419052, 0.017446, -0.608738, -0.085004]],\n [[2.419067, 0.017448, -0.608727, -0.085010],\n [2.419098, 0.017452, -0.608706, -0.085024]],\n [[2.419072, 0.017449, -0.608724, -0.085012],\n [2.419119, 0.017455, -0.608691, -0.085034]],\n [[2.419019, 0.017442, -0.608761, -0.084989],\n [2.419075, 0.017449, -0.608722, -0.085014]]]\n )).to(device)\n self.assertEqual(tuple(result.shape), tuple(ref_output.shape))\n torch.testing.assert_close(result, ref_output, rtol=1e-7, atol=1e-5)\n\n model = nn.TransformerEncoder(encoder_layer, 6).to(device)\n if not training:\n model = model.eval()\n result = model(encoder_input, src_key_padding_mask=mask)\n ref_output = perm_fn(torch.tensor([[[2.419101, 0.017453, -0.608703, -0.085025],\n [2.419101, 0.017453, -0.608704, -0.085025]],\n [[2.419101, 0.017453, -0.608703, -0.085025],\n [2.419101, 0.017453, -0.608704, -0.085025]],\n [[2.419101, 0.017453, -0.608703, -0.085025],\n [2.419101, 0.017453, -0.608704, -0.085025]],\n [[2.419101, 0.017453, -0.608703, -0.085025],\n [2.419101, 0.017453, -0.608704, -0.085025]],\n [[2.419101, 0.017453, -0.608703, -0.085025],\n [2.419101, 0.017453, -0.608704, -0.085025]]]\n )).to(device)\n self.assertEqual(tuple(result.shape), tuple(ref_output.shape))\n torch.testing.assert_close(result, ref_output, rtol=1e-7, atol=1e-5)\n\n # test case 3, multiple layers with norm\n # d_model = 4\n norm = nn.LayerNorm(4)\n model = nn.TransformerEncoder(encoder_layer, 2, norm=norm).to(device)\n if not training:\n model = model.eval()\n result = model(encoder_input, src_key_padding_mask=mask)\n ref_output = perm_fn(torch.tensor([[[1.695949, -0.357635, -0.893077, -0.445238],\n [1.695955, -0.357639, -0.893050, -0.445266]],\n [[1.695948, -0.357634, -0.893082, -0.445233],\n [1.695950, -0.357635, -0.893077, -0.445238]],\n [[1.695951, -0.357636, -0.893069, -0.445246],\n [1.695955, -0.357639, -0.893052, -0.445264]],\n [[1.695952, -0.357636, -0.893066, -0.445249],\n [1.695957, -0.357641, -0.893041, -0.445276]],\n [[1.695946, -0.357632, -0.893095, -0.445220],\n [1.695952, -0.357637, -0.893065, -0.445251]]]\n )).to(device)\n self.assertEqual(tuple(result.shape), tuple(ref_output.shape))\n torch.testing.assert_close(result, ref_output, rtol=1e-7, atol=1e-5)\n\n model = nn.TransformerEncoder(encoder_layer, 6, norm=norm).to(device)\n if not training:\n model = model.eval()\n result = model(encoder_input, src_key_padding_mask=mask)\n ref_output = perm_fn(torch.tensor([[[1.695955, -0.357639, -0.893051, -0.445265],\n [1.695955, -0.357639, -0.893051, -0.445265]],\n [[1.695955, -0.357639, -0.893051, -0.445265],\n [1.695955, -0.357639, -0.893051, -0.445265]],\n [[1.695955, -0.357639, -0.893051, -0.445265],\n [1.695955, -0.357639, -0.893051, -0.445265]],\n [[1.695955, -0.357639, -0.893051, -0.445265],\n [1.695955, -0.357639, -0.893051, -0.445265]],\n [[1.695955, -0.357639, -0.893051, -0.445265],\n [1.695955, -0.357639, -0.893051, -0.445265]]]\n )).to(device)\n self.assertEqual(tuple(result.shape), tuple(ref_output.shape))\n torch.testing.assert_close(result, ref_output, rtol=1e-7, atol=1e-5)\n for batch_first in (True, False):\n for training in (True, False):\n # Fast path requires inference mode.\n if training:\n cm = contextlib.nullcontext()\n else:\n cm = torch.no_grad()\n with cm:\n _test(batch_first, training)\n\n def test_transformerdecoder(self):\n def get_a_test_layer(use_cuda, activation, batch_first=False):\n d_model = 4\n nhead = 2\n dim_feedforward = 16\n dropout = 0.0\n device = torch.device(\"cuda\" if use_cuda else \"cpu\")\n\n layer = nn.TransformerDecoderLayer(\n d_model,\n nhead,\n dim_feedforward=dim_feedforward,\n dropout=dropout,\n activation=activation,\n batch_first=batch_first).to(device)\n\n with torch.no_grad():\n # set constant weights of the model\n for idx, p in enumerate(layer.parameters()):\n x = p.data\n sz = x.view(-1).size(0)\n shape = x.shape\n x = torch.cos(torch.arange(0, sz).float().view(shape))\n p.data.copy_(x)\n\n return layer\n\n # this is a deterministic test for TransformerDecoder\n for batch_first in (False, True):\n def perm_fn(x):\n return x.transpose(1, 0) if batch_first else x\n activation = F.relu\n use_cuda = torch.cuda.is_available()\n device = torch.device(\"cuda\" if use_cuda else \"cpu\")\n\n decoder_layer = get_a_test_layer(use_cuda=use_cuda, activation=activation,\n batch_first=batch_first)\n\n model = nn.TransformerDecoder(decoder_layer, 1).to(device)\n\n # deterministic input\n decoder_input = torch.tensor([[[20., 30., 40., 50.]]]).to(device)\n memory_input = torch.tensor([[[60., 70., 80., 90.]]]).to(device)\n result = model(decoder_input, memory_input)\n ref_output = torch.tensor(\n [[[2.314351, 0.094805, -0.671322, 0.101977]]]).to(device)\n self.assertEqual(tuple(result.shape), tuple(ref_output.shape))\n torch.testing.assert_close(result, ref_output, rtol=1e-7, atol=1e-3)\n\n # deterministic input\n decoder_input = perm_fn(torch.tensor([[[9., 10., 11., 12.]],\n [[11., 12., 13., 14.]]])).to(device)\n memory_input = perm_fn(torch.tensor([[[1., 2., 3., 4.]]])).to(device)\n result = model(decoder_input, memory_input)\n ref_output = perm_fn(torch.tensor([[[2.422245, 0.051716, -0.606338, -0.024756]],\n [[2.422245, 0.051716, -0.606338, -0.024756]]]\n )).to(device)\n self.assertEqual(tuple(result.shape), tuple(ref_output.shape))\n torch.testing.assert_close(result, ref_output, rtol=1e-7, atol=1e-4)\n\n # deterministic input\n decoder_input = perm_fn(torch.tensor([[[1., 2., 3., 4.]],\n [[5., 6., 7., 8.]]])).to(device)\n memory_input = perm_fn(torch.tensor([[[9., 10., 11., 12.]],\n [[11., 12., 13., 14.]]])).to(device)\n result = model(decoder_input, memory_input)\n ref_output = perm_fn(torch.tensor([[[2.343536, 0.085561, -0.654954, 0.074991]],\n [[2.343536, 0.085561, -0.654954, 0.074991]]]\n )).to(device)\n self.assertEqual(tuple(result.shape), tuple(ref_output.shape))\n torch.testing.assert_close(result, ref_output, rtol=1e-7, atol=1e-4)\n\n # deterministic input\n decoder_input = perm_fn(torch.tensor([[[0.4517, 0.6793, 0.5313, 0.0034],\n [0.2678, 0.3677, 0.4459, 0.7166]],\n [[0.8100, 0.3716, 0.4096, 0.1976],\n [0.6958, 0.8844, 0.6081, 0.8315]],\n [[0.0494, 0.9343, 0.5955, 0.3830],\n [0.5404, 0.3464, 0.9378, 0.6200]]]\n )).to(device)\n memory_input = perm_fn(torch.tensor([[[0.7462, 0.6653, 0.5679, 0.4891],\n [0.5387, 0.1655, 0.3565, 0.0471]],\n [[0.8335, 0.2799, 0.5031, 0.2947],\n [0.1402, 0.0318, 0.7636, 0.1346]],\n [[0.6333, 0.9344, 0.1376, 0.9938],\n [0.8924, 0.2872, 0.6692, 0.2944]],\n [[0.9897, 0.6915, 0.3154, 0.1733],\n [0.8645, 0.3513, 0.3064, 0.0767]],\n [[0.8117, 0.2366, 0.4838, 0.7881],\n [0.3718, 0.4945, 0.9511, 0.0864]]]\n )).to(device)\n result = model(decoder_input, memory_input)\n ref_output = perm_fn(torch.tensor([[[2.430065, 0.027862, -0.601136, -0.073096],\n [2.431935, 0.028907, -0.599809, -0.072488]],\n [[2.428457, 0.027053, -0.602275, -0.073462],\n [2.431970, 0.029387, -0.599789, -0.071621]],\n [[2.431934, 0.028196, -0.599802, -0.073809],\n [2.432306, 0.028858, -0.599542, -0.072846]]]\n )).to(device)\n self.assertEqual(tuple(result.shape), tuple(ref_output.shape))\n torch.testing.assert_close(result, ref_output, rtol=1e-7, atol=1e-5)\n\n # key_padding_mask\n key_padding_mask = torch.zeros(2, 3).to(device) == 1\n result = model(decoder_input, memory_input,\n tgt_key_padding_mask=key_padding_mask)\n ref_output = perm_fn(torch.tensor([[[2.430065, 0.027862, -0.601136, -0.073096],\n [2.431935, 0.028907, -0.599809, -0.072488]],\n [[2.428457, 0.027053, -0.602275, -0.073462],\n [2.431970, 0.029387, -0.599789, -0.071621]],\n [[2.431934, 0.028196, -0.599802, -0.073809],\n [2.432306, 0.028858, -0.599542, -0.072846]]]\n )).to(device)\n self.assertEqual(tuple(result.shape), tuple(ref_output.shape))\n torch.testing.assert_close(result, ref_output, rtol=1e-7, atol=1e-5)\n\n # key_padding_mask\n key_padding_mask[0, 2] = 1\n key_padding_mask[1, 1] = 1\n key_padding_mask[1, 2] = 1\n result = model(decoder_input, memory_input,\n tgt_key_padding_mask=key_padding_mask)\n ref_output = perm_fn(torch.tensor([[[2.430025, 0.027643, -0.601164, -0.073476],\n [2.4323, 0.029375, -0.599553, -0.071881]],\n [[2.428523, 0.026838, -0.602226, -0.07391],\n [2.432634, 0.029842, -0.599318, -0.071253]],\n [[2.432278, 0.028152, -0.599555, -0.074139],\n [2.432659, 0.029244, -0.599294, -0.072382]]]\n )).to(device)\n self.assertEqual(tuple(result.shape), tuple(ref_output.shape))\n torch.testing.assert_close(result, ref_output, rtol=1e-7, atol=1e-5)\n\n # memory_key_padding_mask\n key_padding_mask = torch.zeros(2, 5).to(device) == 1\n result = model(decoder_input, memory_input,\n memory_key_padding_mask=key_padding_mask)\n ref_output = perm_fn(torch.tensor([[[2.430065, 0.027862, -0.601136, -0.073096],\n [2.431935, 0.028907, -0.599809, -0.072488]],\n [[2.428457, 0.027053, -0.602275, -0.073462],\n [2.431970, 0.029387, -0.599789, -0.071621]],\n [[2.431934, 0.028196, -0.599802, -0.073809],\n [2.432306, 0.028858, -0.599542, -0.072846]]]\n )).to(device)\n self.assertEqual(tuple(result.shape), tuple(ref_output.shape))\n torch.testing.assert_close(result, ref_output, rtol=1e-7, atol=1e-5)\n\n # memory_key_padding_mask\n key_padding_mask[0, 4] = 1\n key_padding_mask[1, 3] = 1\n key_padding_mask[1, 4] = 1\n result = model(decoder_input,\n memory_input,\n memory_key_padding_mask=key_padding_mask)\n ref_output = perm_fn(torch.tensor([[[2.429757, 0.027358, -0.601351, -0.073816],\n [2.432692, 0.028583, -0.599263, -0.073634]],\n [[2.428247, 0.02662, -0.602419, -0.074123],\n [2.432657, 0.029055, -0.599293, -0.072732]],\n [[2.431515, 0.027687, -0.600096, -0.074459],\n [2.433075, 0.028543, -0.598987, -0.073985]]]\n )).to(device)\n self.assertEqual(tuple(result.shape), tuple(ref_output.shape))\n torch.testing.assert_close(result, ref_output, rtol=1e-7, atol=1e-5)\n\n # multiple layers no norm\n model = nn.TransformerDecoder(decoder_layer, 2).to(device)\n\n # deterministic input\n decoder_input = torch.tensor([[[20., 30., 40., 50.]]]).to(device)\n memory_input = torch.tensor([[[60., 70., 80., 90.]]]).to(device)\n result = model(decoder_input, memory_input)\n ref_output = torch.tensor(\n [[[2.31316, 0.0950293, -0.671995, 0.102802]]]).to(device)\n self.assertEqual(tuple(result.shape), tuple(ref_output.shape))\n torch.testing.assert_close(result, ref_output, rtol=1e-7, atol=1e-3)\n\n # multiple layers no norm\n model = nn.TransformerDecoder(decoder_layer, 6).to(device)\n\n # deterministic input\n decoder_input = perm_fn(torch.tensor([[[0.4517, 0.6793, 0.5313, 0.0034],\n [0.2678, 0.3677, 0.4459, 0.7166]],\n [[0.8100, 0.3716, 0.4096, 0.1976],\n [0.6958, 0.8844, 0.6081, 0.8315]],\n [[0.0494, 0.9343, 0.5955, 0.3830],\n [0.5404, 0.3464, 0.9378, 0.6200]]]\n )).to(device)\n memory_input = perm_fn(torch.tensor([[[0.7462, 0.6653, 0.5679, 0.4891],\n [0.5387, 0.1655, 0.3565, 0.0471]],\n [[0.8335, 0.2799, 0.5031, 0.2947],\n [0.1402, 0.0318, 0.7636, 0.1346]],\n [[0.6333, 0.9344, 0.1376, 0.9938],\n [0.8924, 0.2872, 0.6692, 0.2944]],\n [[0.9897, 0.6915, 0.3154, 0.1733],\n [0.8645, 0.3513, 0.3064, 0.0767]],\n [[0.8117, 0.2366, 0.4838, 0.7881],\n [0.3718, 0.4945, 0.9511, 0.0864]]]\n )).to(device)\n result = model(decoder_input, memory_input)\n ref_output = perm_fn(torch.tensor([[[2.42794, 0.026164, -0.60263, -0.0747591],\n [2.43113, 0.0279516, -0.600376, -0.0736896]],\n [[2.42794, 0.026164, -0.60263, -0.0747591],\n [2.43113, 0.0279516, -0.600376, -0.0736896]],\n [[2.42794, 0.026164, -0.60263, -0.0747591],\n [2.43113, 0.0279516, -0.600376, -0.0736896]]]\n )).to(device)\n self.assertEqual(tuple(result.shape), tuple(ref_output.shape))\n torch.testing.assert_close(result, ref_output, rtol=1e-7, atol=1e-5)\n\n # multiple layers with norm\n # d_model = 4\n norm = nn.LayerNorm(4)\n model = nn.TransformerDecoder(decoder_layer, 2, norm=norm).to(device)\n\n # deterministic input\n decoder_input = torch.tensor([[[20., 30., 40., 50.]]]).to(device)\n memory_input = torch.tensor([[[60., 70., 80., 90.]]]).to(device)\n result = model(decoder_input, memory_input)\n ref_output = torch.tensor(\n [[[1.66166, -0.326986, -1.01466, -0.320017]]]).to(device)\n self.assertEqual(tuple(result.shape), tuple(ref_output.shape))\n torch.testing.assert_close(result, ref_output, rtol=1e-7, atol=1e-3)\n\n # multiple layers with norm\n model = nn.TransformerDecoder(decoder_layer, 6, norm=norm).to(device)\n\n # deterministic input\n decoder_input = perm_fn(torch.tensor([[[0.4517, 0.6793, 0.5313, 0.0034],\n [0.2678, 0.3677, 0.4459, 0.7166]],\n [[0.8100, 0.3716, 0.4096, 0.1976],\n [0.6958, 0.8844, 0.6081, 0.8315]],\n [[0.0494, 0.9343, 0.5955, 0.3830],\n [0.5404, 0.3464, 0.9378, 0.6200]]]\n )).to(device)\n memory_input = perm_fn(torch.tensor([[[0.7462, 0.6653, 0.5679, 0.4891],\n [0.5387, 0.1655, 0.3565, 0.0471]],\n [[0.8335, 0.2799, 0.5031, 0.2947],\n [0.1402, 0.0318, 0.7636, 0.1346]],\n [[0.6333, 0.9344, 0.1376, 0.9938],\n [0.8924, 0.2872, 0.6692, 0.2944]],\n [[0.9897, 0.6915, 0.3154, 0.1733],\n [0.8645, 0.3513, 0.3064, 0.0767]],\n [[0.8117, 0.2366, 0.4838, 0.7881],\n [0.3718, 0.4945, 0.9511, 0.0864]]]\n )).to(device)\n result = model(decoder_input, memory_input)\n ref_output = perm_fn(torch.tensor([[[1.69559, -0.357291, -0.894741, -0.443553],\n [1.69571, -0.357363, -0.894154, -0.444196]],\n [[1.69559, -0.357291, -0.894741, -0.443553],\n [1.69571, -0.357363, -0.894154, -0.444196]],\n [[1.69559, -0.357291, -0.894741, -0.443553],\n [1.69571, -0.357363, -0.894154, -0.444196]]]\n )).to(device)\n self.assertEqual(tuple(result.shape), tuple(ref_output.shape))\n torch.testing.assert_close(result, ref_output, rtol=1e-7, atol=1e-5)\n\n # gelu activation test cases\n activation = \"gelu\"\n use_cuda = torch.cuda.is_available()\n device = torch.device(\"cuda\" if use_cuda else \"cpu\")\n\n decoder_layer = get_a_test_layer(use_cuda=use_cuda, activation=activation,\n batch_first=batch_first)\n\n model = nn.TransformerDecoder(decoder_layer, 1).to(device)\n\n # deterministic input\n decoder_input = torch.tensor([[[20., 30., 40., 50.]]]).to(device)\n memory_input = torch.tensor([[[60., 70., 80., 90.]]]).to(device)\n result = model(decoder_input, memory_input)\n ref_output = torch.tensor([[[2.306435, 0.095946, -0.675796, 0.10687]]]).to(device)\n self.assertEqual(tuple(result.shape), tuple(ref_output.shape))\n torch.testing.assert_close(result, ref_output, rtol=1e-7, atol=1e-3)\n\n # deterministic input\n decoder_input = perm_fn(torch.tensor([[[9., 10., 11., 12.]],\n [[11., 12., 13., 14.]]])).to(device)\n memory_input = perm_fn(torch.tensor([[[1., 2., 3., 4.]]])).to(device)\n result = model(decoder_input, memory_input)\n ref_output = perm_fn(torch.tensor([[[2.415448, 0.054389, -0.610932, -0.0156613]],\n [[2.415448, 0.054389, -0.610932, -0.0156613]]])).to(device)\n self.assertEqual(tuple(result.shape), tuple(ref_output.shape))\n torch.testing.assert_close(result, ref_output, rtol=1e-7, atol=1e-4)\n\n # deterministic input\n decoder_input = perm_fn(torch.tensor([[[1., 2., 3., 4.]],\n [[5., 6., 7., 8.]]])).to(device)\n memory_input = perm_fn(torch.tensor([[[9., 10., 11., 12.]],\n [[11., 12., 13., 14.]]])).to(device)\n result = model(decoder_input, memory_input)\n ref_output = perm_fn(torch.tensor([[[2.338531, 0.087709, -0.65776, 0.080646]],\n [[2.338531, 0.087709, -0.65776, 0.080646]]])).to(device)\n self.assertEqual(tuple(result.shape), tuple(ref_output.shape))\n torch.testing.assert_close(result, ref_output, rtol=1e-7, atol=1e-4)\n\n # deterministic input\n decoder_input = perm_fn(torch.tensor([[[0.4517, 0.6793, 0.5313, 0.0034],\n [0.2678, 0.3677, 0.4459, 0.7166]],\n [[0.8100, 0.3716, 0.4096, 0.1976],\n [0.6958, 0.8844, 0.6081, 0.8315]],\n [[0.0494, 0.9343, 0.5955, 0.3830],\n [0.5404, 0.3464, 0.9378, 0.6200]]]\n )).to(device)\n memory_input = perm_fn(torch.tensor([[[0.7462, 0.6653, 0.5679, 0.4891],\n [0.5387, 0.1655, 0.3565, 0.0471]],\n [[0.8335, 0.2799, 0.5031, 0.2947],\n [0.1402, 0.0318, 0.7636, 0.1346]],\n [[0.6333, 0.9344, 0.1376, 0.9938],\n [0.8924, 0.2872, 0.6692, 0.2944]],\n [[0.9897, 0.6915, 0.3154, 0.1733],\n [0.8645, 0.3513, 0.3064, 0.0767]],\n [[0.8117, 0.2366, 0.4838, 0.7881],\n [0.3718, 0.4945, 0.9511, 0.0864]]]\n )).to(device)\n result = model(decoder_input, memory_input)\n ref_output = perm_fn(torch.tensor([[[2.42049104, 0.03443088, -0.60793706, -0.05436271],\n [2.42210631, 0.03546578, -0.60679895, -0.05357488]],\n [[2.41907674, 0.0336104, -0.60892977, -0.05490462],\n [2.42216881, 0.03586554, -0.6067524, -0.05289126]],\n [[2.42205716, 0.03488046, -0.60683681, -0.05460596],\n [2.42240309, 0.0354595, -0.60659063, -0.05378816]]]\n )).to(device)\n self.assertEqual(tuple(result.shape), tuple(ref_output.shape))\n torch.testing.assert_close(result, ref_output, rtol=1e-7, atol=1e-5)\n\n @unittest.skipIf(not (TEST_CUDNN and TEST_MULTIGPU), 'CUDNN or multi-gpu not available')\n def test_cudnn_rnn_dropout_states_device(self):\n rnn = nn.RNN(10, 20, num_layers=2, dropout=.5)\n device = 1\n input = torch.randn(5, 4, 10).cuda(device)\n rnn.cuda(device)\n hx = torch.randn(2, 4, 20).cuda(device)\n output = rnn(input, hx)\n\n @unittest.skipIf(not TEST_CUDNN, 'CUDNN not available')\n @skipIfRocm\n def test_cudnn_weight_format(self):\n rnns = [\n nn.LSTM(10, 20, batch_first=True),\n nn.LSTM(10, 20, batch_first=True, proj_size=10),\n nn.GRU(10, 20, batch_first=True),\n nn.RNN(10, 20, batch_first=True)\n ]\n first_warn = True\n for rnn in rnns:\n rnn.cuda()\n input = torch.randn(5, 4, 10, requires_grad=True, device=\"cuda\")\n hx = torch.randn(1, 5, 20, requires_grad=True, device=\"cuda\")\n all_vars = [input, hx] + list(rnn.parameters())\n if isinstance(rnn, nn.LSTM):\n # LSTM with projections has different hx size\n if rnn.proj_size > 0:\n hx = torch.randn(1, 5, 10, requires_grad=True, device=\"cuda\")\n all_vars[1] = hx\n cx = torch.randn(1, 5, 20, requires_grad=True, device=\"cuda\")\n all_vars[2:2] = [cx]\n hx = (hx, cx)\n\n output = rnn(input, hx)\n output[0].sum().backward()\n grads = [v.grad.data.clone() for v in all_vars]\n for v in all_vars:\n v.grad.data.zero_()\n\n # Weights will no longer view onto the same chunk of memory\n weight = all_vars[4]\n weight_data = weight.data.clone()\n with torch.no_grad():\n weight.set_(weight_data)\n\n for _ in range(2):\n with warnings.catch_warnings(record=True) as w:\n output_noncontig = rnn(input, hx)\n if first_warn:\n self.assertEqual(len(w), 1)\n self.assertIn('weights are not part of single contiguous chunk of memory', w[0].message.args[0])\n first_warn = False\n warnings.resetwarnings()\n output_noncontig[0].sum().backward()\n grads_noncontig = [v.grad.data.clone() for v in all_vars]\n for v in all_vars:\n v.grad.data.zero_()\n self.assertEqual(output, output_noncontig)\n self.assertEqual(grads_noncontig, grads)\n\n # Make sure these still share storage\n weight_data[:] = 4\n self.assertEqual(weight_data, all_vars[4].data)\n\n @unittest.skipIf(not TEST_CUDNN, 'CUDNN not available')\n def test_cudnn_weight_tying(self):\n rnns = [\n nn.LSTM(10, 20, batch_first=True, bidirectional=True),\n nn.LSTM(10, 20, batch_first=True, bidirectional=True, proj_size=10),\n nn.GRU(10, 20, batch_first=True, bidirectional=True),\n nn.RNN(10, 20, batch_first=True, bidirectional=True)\n ]\n for rnn in rnns:\n rnn.bias_ih_l0_reverse = rnn.bias_ih_l0\n rnn.cuda()\n input = torch.randn(5, 4, 10, requires_grad=True, device=\"cuda\")\n hx = torch.randn(2, 5, 20, requires_grad=True, device=\"cuda\")\n all_vars = [input, hx] + list(rnn.parameters())\n opt = torch.optim.SGD(rnn.parameters(), lr=0.1)\n opt.zero_grad()\n if isinstance(rnn, nn.LSTM):\n # LSTM with projections has different hx size\n if rnn.proj_size > 0:\n hx = torch.randn(2, 5, 10, requires_grad=True, device=\"cuda\")\n all_vars[1] = hx\n cx = torch.randn(2, 5, 20, requires_grad=True, device=\"cuda\")\n all_vars[2:2] = [cx]\n hx = (hx, cx)\n\n with warnings.catch_warnings(record=True) as w:\n output = rnn(input, hx)\n output[0].sum().backward()\n\n opt.step()\n with warnings.catch_warnings(record=True) as w:\n output_cuda = rnn(input, hx)\n rnn.cpu()\n hx = (hx[0].cpu(), hx[1].cpu()) if isinstance(rnn, nn.LSTM) else hx.cpu()\n output_cpu = rnn(input.cpu(), hx)\n self.assertEqual(output_cuda, output_cpu)\n\n def test_transformer_args_check(self):\n model_name = 'Transformer'\n d_model = 128\n nhead = 4\n num_encoder_layers = 2\n num_decoder_layers = 3\n dim_feedforward = 65\n dropout = 0.3\n bsz = 3\n seq_len = 35\n tgt_len = 15\n activations = [F.relu, F.gelu]\n\n wrong_bsz = 7\n wrong_d_model = 63\n wrong_nhead = 5\n wrong_activation = \"abc\"\n\n def test(encoder_input_shape, decoder_input_shape,\n src_mask_len=None, tgt_mask_len=None, memory_mask_size=None,\n src_key_padding_mask_size=None, tgt_key_padding_mask_size=None,\n memory_key_padding_mask_size=None):\n encoder_input = torch.randn(encoder_input_shape)\n decoder_input = torch.randn(decoder_input_shape)\n model = getattr(nn, model_name)(d_model, nhead, num_encoder_layers,\n num_decoder_layers, dim_feedforward, dropout)\n\n if src_mask_len is not None:\n src_mask = model.generate_square_subsequent_mask(src_mask_len)\n else:\n src_mask = None\n\n if tgt_mask_len is not None:\n tgt_mask = model.generate_square_subsequent_mask(tgt_mask_len)\n else:\n tgt_mask = None\n\n if memory_mask_size is not None:\n memory_task = torch.rand(memory_mask_size)\n else:\n memory_task = None\n\n if src_key_padding_mask_size is not None:\n src_key_padding_mask = torch.rand(src_key_padding_mask_size) >= 0.5\n else:\n src_key_padding_mask = None\n\n if tgt_key_padding_mask_size is not None:\n tgt_key_padding_mask = torch.rand(tgt_key_padding_mask_size) >= 0.5\n else:\n tgt_key_padding_mask = None\n\n if memory_key_padding_mask_size is not None:\n memory_key_padding_mask = torch.rand(memory_key_padding_mask_size) >= 0.5\n else:\n memory_key_padding_mask = None\n\n with self.assertRaises(RuntimeError):\n model(encoder_input, decoder_input,\n src_mask=src_mask,\n tgt_mask=tgt_mask,\n memory_mask=memory_task,\n src_key_padding_mask=src_key_padding_mask,\n tgt_key_padding_mask=tgt_key_padding_mask,\n memory_key_padding_mask=memory_key_padding_mask)\n\n\n correct_encoder_input_shape = (seq_len, bsz, d_model)\n correct_decoder_input_shape = (tgt_len, bsz, d_model)\n\n def update_shape(shape, dim, new_dim_size):\n new_shape = list(shape)\n new_shape[dim] = new_dim_size\n return tuple(new_shape)\n\n # Incorrect encoder_input batch size\n encoder_input_shape = update_shape(correct_encoder_input_shape, 1, wrong_bsz)\n decoder_input_shape = correct_decoder_input_shape\n test(encoder_input_shape, decoder_input_shape)\n\n # Incorrect decoder_input batch size\n encoder_input_shape = correct_encoder_input_shape\n decoder_input_shape = update_shape(correct_decoder_input_shape, 1, wrong_bsz)\n test(encoder_input_shape, decoder_input_shape)\n\n # Incorrect encoder_input input size\n encoder_input_shape = update_shape(correct_encoder_input_shape, 2, wrong_d_model)\n decoder_input_shape = correct_decoder_input_shape\n test(encoder_input_shape, decoder_input_shape)\n\n # Incorrect decoder_input input size\n encoder_input_shape = correct_encoder_input_shape\n decoder_input_shape = update_shape(correct_decoder_input_shape, 2, wrong_d_model)\n test(encoder_input_shape, decoder_input_shape)\n\n # Incorrect nhead\n encoder_input_shape = correct_encoder_input_shape\n decoder_input_shape = correct_decoder_input_shape\n with self.assertRaises(AssertionError):\n model = getattr(nn, model_name)(d_model, wrong_nhead, num_encoder_layers,\n num_decoder_layers, dim_feedforward, dropout)\n\n # Incorrect src_mask\n encoder_input_shape = correct_encoder_input_shape\n decoder_input_shape = correct_decoder_input_shape\n wrong_src_mask_size = seq_len + 1\n test(encoder_input_shape, decoder_input_shape, src_mask_len=wrong_src_mask_size)\n\n # Incorrect tgt_mask\n encoder_input_shape = correct_encoder_input_shape\n decoder_input_shape = correct_decoder_input_shape\n wrong_tgt_mask_size = tgt_len + 1\n test(encoder_input_shape, decoder_input_shape, tgt_mask_len=wrong_tgt_mask_size)\n\n # Incorrect memory_mask\n encoder_input_shape = correct_encoder_input_shape\n decoder_input_shape = correct_decoder_input_shape\n wrong_tgt_mask_size = tgt_len + 1\n test(encoder_input_shape, decoder_input_shape,\n memory_mask_size=(wrong_tgt_mask_size, wrong_src_mask_size))\n\n # Incorrect src_key_padding_mask\n encoder_input_shape = correct_encoder_input_shape\n decoder_input_shape = correct_decoder_input_shape\n with self.assertRaises(AssertionError):\n test(encoder_input_shape, decoder_input_shape,\n src_key_padding_mask_size=(wrong_bsz, wrong_src_mask_size))\n\n # Incorrect tgt_key_padding_mask\n encoder_input_shape = correct_encoder_input_shape\n decoder_input_shape = correct_decoder_input_shape\n with self.assertRaises(AssertionError):\n test(encoder_input_shape, decoder_input_shape,\n tgt_key_padding_mask_size=(wrong_bsz, wrong_tgt_mask_size))\n\n # Incorrect memory_key_padding_mask\n encoder_input_shape = correct_encoder_input_shape\n decoder_input_shape = correct_decoder_input_shape\n with self.assertRaises(AssertionError):\n test(encoder_input_shape, decoder_input_shape,\n memory_key_padding_mask_size=(wrong_bsz, wrong_src_mask_size))\n\n # Correct activations\n for activation in activations:\n model = getattr(nn, model_name)(d_model, nhead, num_encoder_layers, num_decoder_layers,\n dim_feedforward, dropout, activation)\n # Incorrect activation\n with self.assertRaises(RuntimeError):\n model = getattr(nn, model_name)(d_model, nhead, num_encoder_layers, num_decoder_layers,\n dim_feedforward, dropout, wrong_activation)\n\n def test_transformer_layer_args_check(self):\n model_names = ['TransformerEncoderLayer', 'TransformerDecoderLayer']\n d_model = 128\n nhead = 4\n dim_feedforward = 65\n dropout = 0.3\n bsz = 3\n seq_len = 35\n tgt_len = 15\n activations = [F.relu, F.gelu]\n\n wrong_activation = \"abc\"\n\n encoder_input_shape = (seq_len, bsz, d_model)\n decoder_input_shape = (tgt_len, bsz, d_model)\n\n encoder_input = torch.randn(encoder_input_shape)\n decoder_input = torch.randn(decoder_input_shape)\n\n for model_name in model_names:\n for activation in activations:\n model = getattr(nn, model_name)(d_model, nhead, dim_feedforward,\n dropout, activation)\n # Incorrect activation\n for model_name in model_names:\n with self.assertRaises(RuntimeError):\n model = getattr(nn, model_name)(d_model, nhead, dim_feedforward,\n dropout, wrong_activation)\n\n def test_rnn_args_check(self):\n input_size = 3\n hidden_size = 5\n num_layers = 2\n batch_size = 4\n seq_len = 6\n num_directions = 1\n bad_size = 7 # prime number so that no size can divide it.\n\n def test(input_shape, hidden_shape, mode):\n for input, hidden in get_inputs(input_shape, hidden_shape, mode):\n model = getattr(nn, mode)(input_size, hidden_size, num_layers)\n self.assertRaises(RuntimeError, lambda: model(input, hidden))\n\n correct_input_shape = (seq_len, batch_size, input_size)\n correct_hidden_shape = (num_layers * num_directions, batch_size, hidden_size)\n\n def update_shape(shape, dim, new_dim_size):\n new_shape = list(shape)\n new_shape[dim] = new_dim_size\n return tuple(new_shape)\n\n def get_inputs(input_shape, hidden_shape, mode):\n '''returns list( tuple(input, hidden) )\n where input, hidden are inputs to a model'''\n input = torch.randn(input_shape)\n hidden = torch.randn(hidden_shape)\n if mode != 'LSTM':\n return [(input, hidden)]\n if hidden_shape == correct_hidden_shape:\n return [(input, (hidden, hidden))]\n good_hidden = torch.randn(correct_hidden_shape)\n return [\n (input, (hidden, good_hidden)),\n (input, (good_hidden, hidden)),\n ]\n\n rnn_modes = ['RNN', 'GRU', 'LSTM']\n for mode in rnn_modes:\n # Incorrect input batch size\n input_shape = update_shape(correct_input_shape, 1, bad_size)\n hidden_shape = correct_hidden_shape\n test(input_shape, hidden_shape, mode)\n\n # Incorrect hidden batch size\n input_shape = correct_input_shape\n hidden_shape = update_shape(correct_hidden_shape, 1, bad_size)\n test(input_shape, hidden_shape, mode)\n\n # Incorrect input size\n input_shape = update_shape(correct_input_shape, 2, bad_size)\n hidden_shape = correct_hidden_shape\n test(input_shape, hidden_shape, mode)\n\n # Incorrect hidden size\n input_shape = correct_input_shape\n hidden_shape = update_shape(correct_hidden_shape, 2, bad_size)\n test(input_shape, hidden_shape, mode)\n\n # Incorrect hidden[0]\n input_shape = correct_input_shape\n hidden_shape = update_shape(correct_hidden_shape, 0, bad_size)\n test(input_shape, hidden_shape, mode)\n\n def test_projections_lstm_args_check(self):\n input_size = 3\n hidden_size = 5\n proj_size = 2\n num_layers = 2\n batch_size = 4\n seq_len = 6\n num_directions = 1\n bad_size = 7 # prime number so that no size can divide it.\n\n def test(input_shape, hidden_h_shape, hidden_c_shape):\n for input, hidden in get_inputs(input_shape, hidden_h_shape, hidden_c_shape):\n model = nn.LSTM(input_size, hidden_size, num_layers, proj_size=proj_size)\n self.assertRaises(RuntimeError, lambda: model(input, hidden))\n\n correct_input_shape = (seq_len, batch_size, input_size)\n correct_hidden_h_shape = (num_layers * num_directions, batch_size, proj_size)\n correct_hidden_c_shape = (num_layers * num_directions, batch_size, hidden_size)\n\n def update_shape(shape, dim, new_dim_size):\n new_shape = list(shape)\n new_shape[dim] = new_dim_size\n return tuple(new_shape)\n\n def get_inputs(input_shape, hidden_h_shape, hidden_c_shape):\n '''returns list( tuple(input, hidden) )\n where input, hidden are inputs to a model'''\n input = torch.randn(input_shape)\n hidden_h = torch.randn(hidden_h_shape)\n hidden_c = torch.randn(hidden_c_shape)\n return [(input, (hidden_h, hidden_c))]\n\n # Incorrect input batch size\n input_shape = update_shape(correct_input_shape, 1, bad_size)\n test(input_shape, correct_hidden_h_shape, correct_hidden_c_shape)\n\n # Incorrect hidden batch size\n input_shape = correct_input_shape\n hidden_h_shape = update_shape(correct_hidden_h_shape, 1, bad_size)\n hidden_c_shape = update_shape(correct_hidden_c_shape, 1, bad_size)\n test(input_shape, hidden_h_shape, hidden_c_shape)\n\n # Incorrect input size\n input_shape = update_shape(correct_input_shape, 2, bad_size)\n test(input_shape, correct_hidden_h_shape, correct_hidden_c_shape)\n\n # Incorrect hidden size\n input_shape = correct_input_shape\n hidden_h_shape = update_shape(correct_hidden_h_shape, 2, bad_size)\n hidden_c_shape = update_shape(correct_hidden_c_shape, 2, bad_size)\n test(input_shape, hidden_h_shape, hidden_c_shape)\n\n # Incorrect hidden[0]\n input_shape = correct_input_shape\n hidden_h_shape = update_shape(correct_hidden_h_shape, 0, bad_size)\n hidden_c_shape = update_shape(correct_hidden_c_shape, 0, bad_size)\n test(input_shape, hidden_h_shape, hidden_c_shape)\n\n # Incorrect proj size = hidden size\n input_shape = correct_input_shape\n hidden_h_shape = update_shape(correct_hidden_h_shape, 0, hidden_size)\n hidden_c_shape = correct_hidden_c_shape\n test(input_shape, hidden_h_shape, hidden_c_shape)\n\n # Incorrect proj size != hidden size\n input_shape = correct_input_shape\n hidden_h_shape = update_shape(correct_hidden_h_shape, 0, bad_size)\n hidden_c_shape = correct_hidden_c_shape\n test(input_shape, hidden_h_shape, hidden_c_shape)\n\n # Incorrect cell size != hidden size\n input_shape = correct_input_shape\n hidden_h_shape = correct_hidden_h_shape\n hidden_c_shape = update_shape(correct_hidden_c_shape, 0, bad_size)\n test(input_shape, hidden_h_shape, hidden_c_shape)\n\n @unittest.skipIf(not TEST_MULTIGPU, \"multi-GPU not supported\")\n def test_rnn_check_device(self):\n input_size = 3\n hidden_size = 5\n num_layers = 2\n batch_size = 4\n seq_len = 6\n num_directions = 1\n\n correct_input_shape = (seq_len, batch_size, input_size)\n correct_hidden_shape = (num_layers * num_directions, batch_size, hidden_size)\n rnn_modes = ['RNN', 'GRU', 'LSTM']\n\n for mode in rnn_modes:\n model = getattr(nn, mode)(input_size, hidden_size, num_layers)\n input = torch.randn(correct_input_shape)\n hidden = torch.randn(correct_hidden_shape)\n\n # input and weights are not at the same device\n with self.assertRaisesRegex(RuntimeError,\n \"Input and parameter tensors are not at the same device\"):\n model(input.to('cuda:0'))\n\n # input and hiddens are not at the same device\n with self.assertRaisesRegex(RuntimeError,\n r\"Input and hidden tensors are not at the same device\"):\n if mode == 'LSTM':\n model(input, (hidden.to('cuda:0'), hidden.to('cuda:0')))\n else:\n model(input, (hidden.to('cuda:0')))\n\n # hidden tensors are not at the same CUDA device\n if mode == 'LSTM':\n with self.assertRaisesRegex(RuntimeError,\n \"Input and hidden tensors are not at the same device\"):\n model(input.to('cuda:0'), (hidden.to('cuda:0'), hidden.to('cuda:1')))\n\n @unittest.skipIf(not TEST_MULTIGPU, \"multi-GPU not supported\")\n def test_projections_lstm_check_device(self):\n input_size = 3\n hidden_size = 5\n proj_size = 2\n num_layers = 2\n batch_size = 4\n seq_len = 6\n num_directions = 1\n\n correct_input_shape = (seq_len, batch_size, input_size)\n correct_hidden_h_shape = (num_layers * num_directions, batch_size, proj_size)\n correct_hidden_c_shape = (num_layers * num_directions, batch_size, hidden_size)\n\n model = nn.LSTM(input_size, hidden_size, num_layers, proj_size=proj_size)\n input = torch.randn(correct_input_shape)\n hidden_h = torch.randn(correct_hidden_h_shape)\n hidden_c = torch.randn(correct_hidden_c_shape)\n\n # input and weights are not at the same device\n with self.assertRaisesRegex(RuntimeError,\n \"Input and parameter tensors are not at the same device\"):\n model(input.to('cuda:0'))\n\n # input and hiddens are not at the same device\n with self.assertRaisesRegex(RuntimeError,\n r\"Input and hidden tensors are not at the same device\"):\n model(input, (hidden_h.to('cuda:0'), hidden_c.to('cuda:0')))\n\n # hidden tensors are not at the same CUDA device\n with self.assertRaisesRegex(RuntimeError,\n \"Input and hidden tensors are not at the same device\"):\n model(input.to('cuda:0'), (hidden_h.to('cuda:0'), hidden_c.to('cuda:1')))\n\n def test_rnn_initial_hidden_state(self):\n rnn_modes = ['RNN', 'GRU', 'LSTM']\n for mode in rnn_modes:\n rnn = getattr(nn, mode)(30, 20, 2)\n input = torch.randn(10, 32, 30)\n hidden = torch.zeros(2, 32, 20)\n\n if mode == 'LSTM':\n hidden = (hidden, hidden)\n output1, hidden1 = rnn(input, hidden)\n output2, hidden2 = rnn(input)\n self.assertEqual(output1, output2)\n self.assertEqual(hidden1, hidden2)\n\n def test_projections_lstm_initial_hidden_state(self):\n for bidir in [False, True]:\n rnn = nn.LSTM(30, 20, 2, bidirectional=bidir, proj_size=10)\n num_dirs = 2 if bidir else 1\n input = torch.randn(10, 32, 30)\n hidden_h = torch.zeros(2 * num_dirs, 32, 10)\n hidden_c = torch.zeros(2 * num_dirs, 32, 20)\n hidden = (hidden_h, hidden_c)\n output1, hidden1 = rnn(input, hidden)\n output2, hidden2 = rnn(input)\n self.assertEqual(output1, output2)\n self.assertEqual(hidden1, hidden2)\n\n def test_projections_errors_on_gru_and_rnn(self):\n error_msg = \"proj_size argument is only supported for LSTM, not RNN or GRU\"\n for mode in ['RNN', 'GRU']:\n with self.assertRaisesRegex(ValueError, error_msg):\n rnn = getattr(nn, mode)(30, 20, 2, proj_size=10)\n\n def _test_RNN_cpu_vs_cudnn(self, dropout, dtype=torch.double):\n\n def forward_backward(cuda, rnn, input_val, grad_output, weights_val, hx_val, grad_hy,\n cx_val=None, grad_cy=None):\n is_lstm = isinstance(rnn, nn.LSTM)\n\n for x_layer, y_layer in zip(rnn.all_weights, weights_val):\n for x, y in zip(x_layer, y_layer):\n x.data.copy_(y.data)\n\n if isinstance(input_val, rnn_utils.PackedSequence):\n input = rnn_utils.PackedSequence(\n input_val.data.data.requires_grad_(True), input_val.batch_sizes)\n input_var = input.data\n else:\n input = input_val.clone().requires_grad_(True)\n input_var = input\n if is_lstm:\n if cx_val is None:\n hx = (hx_val.clone().requires_grad_(True),\n hx_val.add(1).requires_grad_(True))\n else:\n hx = (hx_val.clone().requires_grad_(True),\n cx_val.add(1).requires_grad_(True))\n else:\n hx = hx_val.clone().requires_grad_(True)\n\n if cuda:\n rnn.cuda()\n input_var.data = input_var.data.cuda()\n if is_lstm:\n hx[0].data = hx[0].data.cuda()\n hx[1].data = hx[1].data.cuda()\n else:\n hx.data = hx.data.cuda()\n grad_hy = grad_hy.cuda()\n if grad_cy is not None:\n grad_cy = grad_cy.cuda()\n grad_output = grad_output.cuda()\n\n output, hy = rnn(input, hx)\n\n if isinstance(output, rnn_utils.PackedSequence):\n output = output.data\n\n if is_lstm:\n if grad_cy is None:\n torch.autograd.backward([output, hy[0], hy[1]], [grad_output, grad_hy, grad_hy + 1])\n else:\n torch.autograd.backward([output, hy[0], hy[1]], [grad_output, grad_hy, grad_cy + 1])\n else:\n torch.autograd.backward([output, hy], [grad_output, grad_hy])\n\n return {'output': output.data,\n 'hy': hy[0].data if is_lstm else hy.data,\n 'weights': rnn.all_weights,\n 'grad_input': input_var.grad.data,\n 'grad_hx': hx[0].grad.data if is_lstm else hx.grad.data,\n 'cy': hy[1].data if is_lstm else None,\n 'grad_cx': hx[1].grad.data if is_lstm else None}\n\n input_size = 10\n hidden_size = 6\n proj_size = 3\n num_layers = 2\n seq_length = 7\n batch = 6\n\n def make_noncontig(tensor):\n ndim = tensor.dim()\n return torch.stack([tensor.clone().zero_(), tensor], ndim).select(ndim, 1)\n\n def compare_cpu_gpu(outputs_cpu, outputs_gpu):\n self.assertEqual(list(outputs_cpu.keys()), list(outputs_gpu.keys()))\n for key in outputs_cpu.keys():\n if key != 'weights':\n self.assertEqual(outputs_cpu[key], outputs_gpu[key], atol=5e-5, rtol=0, msg=key)\n\n # check grad weights separately, as nested dict\n for cpu_layer_weight, gpu_layer_weight in zip(outputs_cpu['weights'], outputs_gpu['weights']):\n for (cpu_weight, gpu_weight) in zip(cpu_layer_weight, gpu_layer_weight):\n self.assertEqual(cpu_weight.grad.data, gpu_weight.grad.data, atol=5e-5, rtol=0)\n\n for module in (nn.RNN, nn.LSTM, nn.GRU):\n for bias, bidirectional, batch_first, contig, variable_len, lens_as_tensor \\\n in product((True, False), repeat=6):\n\n num_directions = 2 if bidirectional else 1\n if batch_first:\n input_val = torch.randn(batch, seq_length, input_size, dtype=dtype)\n grad_output = torch.randn(batch, seq_length, hidden_size * num_directions, dtype=dtype)\n else:\n input_val = torch.randn(seq_length, batch, input_size, dtype=dtype)\n grad_output = torch.randn(seq_length, batch, hidden_size * num_directions, dtype=dtype)\n\n hx_val = torch.randn(num_layers * num_directions, batch, hidden_size, dtype=dtype)\n grad_hy = torch.randn(num_layers * num_directions, batch, hidden_size, dtype=dtype)\n\n if not contig:\n grad_output = make_noncontig(grad_output)\n grad_hy = make_noncontig(grad_hy)\n input_var = make_noncontig(input_val)\n hx_val = make_noncontig(hx_val)\n\n if variable_len:\n lengths = [7, 5, 5, 2, 1, 1]\n if lens_as_tensor:\n lengths = torch.tensor(lengths, dtype=torch.long)\n input_val = rnn_utils.pack_padded_sequence(input_val, lengths, batch_first=batch_first)\n grad_output = rnn_utils.pack_padded_sequence(grad_output, lengths, batch_first=batch_first).data\n\n rnn = module(input_size,\n hidden_size,\n num_layers,\n bias=bias,\n dropout=dropout,\n bidirectional=bidirectional,\n batch_first=batch_first).to(dtype)\n\n outputs_cpu = forward_backward(\n False, rnn, input_val, grad_output, rnn.all_weights, hx_val, grad_hy)\n\n rnn_gpu = module(input_size,\n hidden_size,\n num_layers,\n bias=bias,\n dropout=dropout,\n bidirectional=bidirectional,\n batch_first=batch_first).to(dtype)\n\n outputs_gpu = forward_backward(\n True, rnn_gpu, input_val, grad_output, rnn.all_weights, hx_val, grad_hy)\n\n compare_cpu_gpu(outputs_cpu, outputs_gpu)\n\n for nonlinearity in ('tanh', 'relu'):\n hx_val = torch.randn(num_layers, batch, hidden_size, dtype=dtype)\n input_val = torch.randn(seq_length, batch, input_size, dtype=dtype)\n grad_output = torch.randn(\n seq_length, batch, hidden_size * num_directions, dtype=dtype)\n grad_hy = torch.randn(\n num_layers * num_directions, batch, hidden_size, dtype=dtype)\n\n rnn = nn.RNN(input_size, hidden_size, num_layers, bias=bias, nonlinearity=nonlinearity).to(dtype)\n outputs_cpu = forward_backward(False, rnn, input_val, grad_output, rnn.all_weights, hx_val, grad_hy)\n\n rnn_gpu = nn.RNN(input_size, hidden_size, num_layers, bias=bias, nonlinearity=nonlinearity).to(dtype)\n outputs_gpu = forward_backward(True, rnn_gpu, input_val, grad_output, rnn.all_weights, hx_val, grad_hy)\n\n compare_cpu_gpu(outputs_cpu, outputs_gpu)\n\n # checking LSTM with projections\n for bias, bidirectional, batch_first, contig, variable_len, lens_as_tensor \\\n in product((True, False), repeat=6):\n num_directions = 2 if bidirectional else 1\n if batch_first:\n input_val = torch.randn(batch, seq_length, input_size, dtype=dtype)\n grad_output = torch.randn(batch, seq_length, proj_size * num_directions, dtype=dtype)\n else:\n input_val = torch.randn(seq_length, batch, input_size, dtype=dtype)\n grad_output = torch.randn(seq_length, batch, proj_size * num_directions, dtype=dtype)\n\n hx_val = torch.randn(num_layers * num_directions, batch, proj_size, dtype=dtype)\n cx_val = torch.randn(num_layers * num_directions, batch, hidden_size, dtype=dtype)\n grad_hy = torch.randn(num_layers * num_directions, batch, proj_size, dtype=dtype)\n grad_cy = torch.randn(num_layers * num_directions, batch, hidden_size, dtype=dtype)\n\n if not contig:\n grad_output = make_noncontig(grad_output)\n grad_hy = make_noncontig(grad_hy)\n grad_cy = make_noncontig(grad_cy)\n input_var = make_noncontig(input_val)\n hx_val = make_noncontig(hx_val)\n cx_val = make_noncontig(cx_val)\n\n if variable_len:\n lengths = [7, 5, 5, 2, 1, 1]\n if lens_as_tensor:\n lengths = torch.tensor(lengths, dtype=torch.long)\n input_val = rnn_utils.pack_padded_sequence(input_val, lengths, batch_first=batch_first)\n grad_output = rnn_utils.pack_padded_sequence(grad_output, lengths, batch_first=batch_first).data\n\n rnn = nn.LSTM(input_size,\n hidden_size,\n num_layers,\n bias=bias,\n dropout=dropout,\n bidirectional=bidirectional,\n batch_first=batch_first,\n proj_size=proj_size).to(dtype)\n\n outputs_cpu = forward_backward(\n False, rnn, input_val, grad_output, rnn.all_weights,\n hx_val, grad_hy, cx_val, grad_cy)\n\n rnn_gpu = nn.LSTM(input_size,\n hidden_size,\n num_layers,\n bias=bias,\n dropout=dropout,\n bidirectional=bidirectional,\n batch_first=batch_first,\n proj_size=proj_size).to(dtype)\n\n outputs_gpu = forward_backward(\n True, rnn_gpu, input_val, grad_output, rnn.all_weights,\n hx_val, grad_hy, cx_val, grad_cy)\n compare_cpu_gpu(outputs_cpu, outputs_gpu)\n\n @unittest.skipIf(not TEST_CUDNN, \"needs cudnn\")\n def test_RNN_cpu_vs_cudnn_no_dropout(self):\n dtype = torch.double\n self._test_RNN_cpu_vs_cudnn(0, dtype)\n\n @unittest.skipIf(not (TEST_CUDNN and (TEST_CUDNN_VERSION if TEST_CUDNN_VERSION else 0) >= 5103), \"needs cudnn >= 5.1\")\n def test_RNN_cpu_vs_cudnn_with_dropout(self):\n # Because of dropout randomness, can only compare dropout=0 and dropout=1\n self._test_RNN_cpu_vs_cudnn(1)\n\n @unittest.skipIf(not TEST_CUDNN, \"needs cudnn\")\n def test_RNN_cudnn_weight_norm(self):\n input_size = 10\n hidden_size = 6\n num_layers = 2\n seq_length = 7\n batch = 6\n\n # runs on CPU to acquire expected output\n def check_weight_norm(m, name):\n input = torch.randn(seq_length, batch, input_size)\n expected_output = m(input)\n\n # adds weight normalization\n m = torch.nn.utils.weight_norm(m, name=name)\n\n # moves to CUDA\n m = m.cuda()\n input = input.cuda()\n\n # otherwise, subsequent warnings will be hidden, and further tests rely on them\n warnings.simplefilter(\"always\")\n self.assertEqual(m(input), expected_output)\n\n # remove weight norm\n m = torch.nn.utils.remove_weight_norm(m, name=name)\n self.assertEqual(m(input), expected_output)\n\n check_weight_norm(nn.LSTM(input_size, hidden_size, num_layers), 'weight_hh_l0')\n check_weight_norm(nn.LSTM(input_size, hidden_size, num_layers, proj_size=3), 'weight_hr_l0')\n\n @unittest.skipIf(not TEST_CUDA, 'CUDA not available')\n def test_partial_flat_weights(self):\n input_size = 10\n hidden_size = 6\n num_layers = 2\n\n m = nn.LSTM(input_size, hidden_size, num_layers)\n inp = torch.randn(3, 2, 10)\n out_expected = m(inp)\n # deletes an attribute of original LSTM\n weight_orig = m.weight_hh_l0\n del m.weight_hh_l0\n self.assertFalse(hasattr(m, \"weight_hh_l0\"))\n # verifies that moving to CUDA with only some attributes defined\n # does not throw an error\n m.cuda()\n # recompute the weight and make sure that module can be used\n m.weight_hh_l0 = weight_orig.cuda()\n inp = inp.cuda()\n # otherwise, subsequent warnings will be hidden, and further tests rely on them\n warnings.simplefilter(\"always\")\n self.assertEqual(m(inp)[0].cpu(), out_expected[0])\n\n\n @unittest.skipIf(not (TEST_CUDNN and (TEST_CUDNN_VERSION if TEST_CUDNN_VERSION else 0) >= 5103), \"needs cudnn >= 5.1\")\n def test_RNN_dropout(self):\n # checking the assumption that cuDNN sticks dropout in between\n # RNN layers\n for p in (0, 0.276, 0.731, 1):\n for train in (True, False):\n for cuda in (True, False):\n rnn = nn.RNN(10, 1000, 2, bias=False, dropout=p, nonlinearity='relu')\n if cuda:\n rnn.cuda()\n\n if train:\n rnn.train()\n else:\n rnn.eval()\n rnn.weight_ih_l0.data.fill_(1)\n rnn.weight_hh_l0.data.fill_(1)\n rnn.weight_ih_l1.data.fill_(1)\n rnn.weight_hh_l1.data.fill_(1)\n input = torch.ones(1, 1, 10)\n hx = torch.zeros(2, 1, 1000)\n if cuda:\n input = input.cuda()\n hx = hx.cuda()\n\n output, hy = rnn(input, hx)\n self.assertEqual(output.data.min(), output.data.max())\n output_val = output.data[0][0][0]\n if p == 0 or not train:\n self.assertEqual(output_val, 10000)\n elif p == 1:\n self.assertEqual(output_val, 0)\n else:\n self.assertGreater(output_val, 8000)\n self.assertLess(output_val, 12000)\n denorm_mod = (output_val * (1 - p)) % 10\n self.assertLess(min(denorm_mod, 10 - denorm_mod), 1e-2)\n\n self.assertEqual(hy[0].data.min(), hy[0].data.max())\n self.assertEqual(hy[1].data.min(), hy[1].data.max())\n self.assertEqual(hy.data[0][0][0], 10)\n self.assertEqual(hy.data[1][0][0], output_val)\n\n def test_error_RNN_seq_len_zero(self):\n # checking error message when RNN has seq_len = 0\n for module in (nn.RNN, nn.LSTM, nn.GRU):\n for bidirectional in [True, False]:\n for device in get_all_device_types():\n input = torch.ones(0, 10, 5)\n rnn = module(5, 6, bidirectional=bidirectional)\n if device == 'cuda':\n rnn.cuda()\n input = input.cuda()\n\n with self.assertRaisesRegex(RuntimeError, \"Expected sequence length to be larger than 0 in RNN\"):\n rnn(input)\n\n def test_RNN_input_size_zero(self):\n for module in (nn.RNN, nn.LSTM, nn.GRU):\n for device in get_all_device_types():\n input = torch.zeros((5, 0, 3))\n rnn = module(input_size=3, hidden_size=4)\n if device == 'cuda':\n rnn.cuda()\n input = input.cuda()\n outs = rnn(input)\n self.assertEqual(outs[0].shape, torch.Size([5, 0, 4]))\n # Check that backward does not cause a hard error\n outs[0].sum().backward()\n\n @unittest.skipIf(not (TEST_CUDNN and (TEST_CUDNN_VERSION if TEST_CUDNN_VERSION else 0) >= 5103), \"needs cudnn >= 5.1\")\n def test_RNN_dropout_state(self):\n for p in (0, 0.1234):\n for train in (True, False):\n for cuda in (True, False):\n rnn = nn.RNN(100, 100, 2, bias=False, dropout=p, nonlinearity='relu')\n if cuda:\n rnn.cuda()\n\n if train:\n rnn.train()\n else:\n rnn.eval()\n input = torch.rand(1, 1, 100)\n hx = torch.rand(2, 1, 100)\n if cuda:\n input = input.cuda()\n hx = hx.cuda()\n\n output1, hy1 = rnn(input, hx)\n output2, hy2 = rnn(input, hx)\n\n buf = io.BytesIO()\n rnn_pickle = torch.save(rnn, buf)\n buf.seek(0)\n rnn2 = torch.load(buf)\n rnn2.flatten_parameters()\n output3, hy3 = rnn2(input, hx)\n\n if p == 0 or not train:\n self.assertEqual(output1, output2)\n self.assertEqual(output1, output3)\n self.assertEqual(hy1, hy2)\n self.assertEqual(hy1, hy3)\n else:\n self.assertNotEqual(output1, output2)\n self.assertNotEqual(output1, output3)\n self.assertNotEqual(hy1, hy2)\n self.assertNotEqual(hy1, hy3)\n\n @unittest.skipIf(not (TEST_CUDNN and (TEST_CUDNN_VERSION if TEST_CUDNN_VERSION else 0) >= 5103), \"needs cudnn >= 5.1\")\n def test_RNN_change_dropout(self):\n for train, cuda in product((True, False), repeat=2):\n rnn = nn.RNN(100, 100, 2, dropout=0, nonlinearity='relu')\n input = torch.rand(3, 2, 100)\n if cuda:\n input.data = input.data.cuda()\n rnn.cuda()\n\n if train:\n rnn.train()\n else:\n rnn.eval()\n\n prev_output = None\n for p in (0, 0.5, 0, 0.7, 0.2, 1, 0.2, 0):\n rnn.dropout = p\n output1, hy1 = rnn(input)\n output2, hy2 = rnn(input)\n\n if p == 0 or p == 1 or not train:\n self.assertEqual(output1, output2)\n self.assertEqual(hy1, hy2)\n else:\n self.assertNotEqual(output1, output2)\n self.assertNotEqual(hy1, hy2)\n\n if prev_output is not None:\n if not train:\n self.assertEqual(output1.data, prev_output)\n self.assertEqual(output2.data, prev_output)\n else:\n self.assertNotEqual(output1.data, prev_output)\n self.assertNotEqual(output2.data, prev_output)\n prev_output = output1.data\n\n def test_inplace_thnn(self):\n modules = [nn.ReLU, nn.ELU, nn.SELU, nn.CELU, nn.RReLU]\n for mod in modules:\n r = mod(inplace=True)\n input = torch.randn(5, 5, requires_grad=True)\n output = r(input + 0)\n grad_output = torch.randn(5, 5)\n grad_output_clone = grad_output.clone()\n output.backward(grad_output)\n self.assertEqual(grad_output, grad_output_clone)\n\n\n def test_pixel_shuffle_unshuffle(self):\n def _test_pixel_shuffle_unshuffle_helper(num_input_dims, valid_channels_dim=True,\n upscale_factor=None):\n # Function to imperatively ensure pixels are shuffled to the correct locations.\n # Used to validate the batch operations in pixel_shuffle.\n def _verify_pixel_shuffle(input, output, upscale_factor):\n for c in range(output.size(-3)):\n for h in range(output.size(-2)):\n for w in range(output.size(-1)):\n height_idx = h // upscale_factor\n weight_idx = w // upscale_factor\n channel_idx = (upscale_factor * (h % upscale_factor)) + (w % upscale_factor) + \\\n (c * upscale_factor ** 2)\n self.assertEqual(output[..., c, h, w], input[..., channel_idx, height_idx, weight_idx])\n\n upscale_factor = random.randint(2, 5) if upscale_factor is None else upscale_factor\n # If valid_channels_dim=False, add 1 to make channels dim indivisible by upscale_factor ** 2.\n channels = random.randint(1, 4) * upscale_factor ** 2 + (0 if valid_channels_dim else 1)\n height = random.randint(5, 10)\n width = random.randint(5, 10)\n\n if num_input_dims == 1:\n input = torch.rand(channels, requires_grad=True)\n elif num_input_dims == 2:\n input = torch.rand(height, width, requires_grad=True)\n else:\n batch_sizes = [random.randint(1, 3) for _ in range(num_input_dims - 3)]\n input = torch.rand(*batch_sizes, channels, height, width, requires_grad=True)\n ps = nn.PixelShuffle(upscale_factor)\n pus = nn.PixelUnshuffle(downscale_factor=upscale_factor)\n\n if num_input_dims >= 3 and valid_channels_dim and upscale_factor > 0:\n output = ps(input)\n _verify_pixel_shuffle(input, output, upscale_factor)\n output.backward(output.data)\n self.assertEqual(input.data, input.grad.data)\n\n # Ensure unshuffle properly inverts shuffle.\n unshuffle_output = pus(output)\n self.assertEqual(input, unshuffle_output)\n else:\n self.assertRaises(RuntimeError, lambda: ps(input))\n\n def _test_pixel_unshuffle_error_case_helper(num_input_dims, valid_height_dim=True, valid_width_dim=True,\n downscale_factor=None):\n downscale_factor = random.randint(2, 5) if downscale_factor is None else downscale_factor\n channels = random.randint(1, 4)\n # If valid_height_dim=False, add 1 to make height dim indivisible by downscale_factor.\n height = random.randint(3, 5) * abs(downscale_factor) + (0 if valid_height_dim else 1)\n # If valid_width_dim=False, add 1 to make width dim indivisible by downscale_factor.\n width = random.randint(3, 5) * abs(downscale_factor) + (0 if valid_width_dim else 1)\n\n if num_input_dims == 1:\n input = torch.rand(channels, requires_grad=True)\n elif num_input_dims == 2:\n input = torch.rand(height, width, requires_grad=True)\n else:\n batch_sizes = [random.randint(1, 3) for _ in range(num_input_dims - 3)]\n input = torch.rand(*batch_sizes, channels, height, width, requires_grad=True)\n\n pus = nn.PixelUnshuffle(downscale_factor)\n self.assertRaises(RuntimeError, lambda: pus(input))\n\n def _test_pixel_shuffle_unshuffle_for_input_dims(num_input_dims):\n # For 1D - 2D, this is an error case.\n # For 3D - 5D, this is a success case for pixel_shuffle + pixel_unshuffle.\n _test_pixel_shuffle_unshuffle_helper(num_input_dims=num_input_dims)\n\n # Error cases for pixel_shuffle.\n _test_pixel_shuffle_unshuffle_helper(num_input_dims=num_input_dims, valid_channels_dim=False)\n _test_pixel_shuffle_unshuffle_helper(num_input_dims=num_input_dims, upscale_factor=0)\n _test_pixel_shuffle_unshuffle_helper(num_input_dims=num_input_dims, upscale_factor=-2)\n\n # Error cases for pixel_unshuffle.\n _test_pixel_unshuffle_error_case_helper(num_input_dims=num_input_dims, valid_height_dim=False)\n _test_pixel_unshuffle_error_case_helper(num_input_dims=num_input_dims, valid_width_dim=False)\n _test_pixel_unshuffle_error_case_helper(num_input_dims=num_input_dims, downscale_factor=0)\n _test_pixel_unshuffle_error_case_helper(num_input_dims=num_input_dims, downscale_factor=-2)\n\n def test_pixel_shuffle_unshuffle_1D():\n _test_pixel_shuffle_unshuffle_for_input_dims(num_input_dims=1)\n\n def test_pixel_shuffle_unshuffle_2D():\n _test_pixel_shuffle_unshuffle_for_input_dims(num_input_dims=2)\n\n def test_pixel_shuffle_unshuffle_3D():\n _test_pixel_shuffle_unshuffle_for_input_dims(num_input_dims=3)\n\n def test_pixel_shuffle_unshuffle_4D():\n _test_pixel_shuffle_unshuffle_for_input_dims(num_input_dims=4)\n\n def test_pixel_shuffle_unshuffle_5D():\n _test_pixel_shuffle_unshuffle_for_input_dims(num_input_dims=5)\n\n test_pixel_shuffle_unshuffle_1D()\n test_pixel_shuffle_unshuffle_2D()\n test_pixel_shuffle_unshuffle_3D()\n test_pixel_shuffle_unshuffle_4D()\n test_pixel_shuffle_unshuffle_5D()\n\n # These tests should be OpInfo'd\n def test_elu_inplace_on_view(self):\n v = torch.tensor([1.0, -1.0, 1.0, -1.0], requires_grad=True)\n\n def func(root):\n x = root.clone()\n view = x.narrow(0, 1, 2)\n res = F.elu(view, inplace=True)\n self.assertIs(res, view)\n return x\n\n gradcheck(func, [v])\n gradgradcheck(func, [v])\n\n def test_elu_inplace_gradgrad(self):\n v = torch.randn(8, requires_grad=True)\n\n def func(root):\n x = root.clone()\n return F.elu(x, inplace=True)\n\n gradcheck(func, [v])\n gradgradcheck(func, [v])\n\n def test_relu_inplace_on_view(self):\n v = torch.tensor([1.0, -1.0, 1.0, -1.0], requires_grad=True)\n\n def func(root):\n x = root.clone()\n view = x.narrow(0, 1, 2)\n res = F.relu(view, inplace=True)\n self.assertIs(res, view)\n return x\n\n gradcheck(func, [v])\n gradgradcheck(func, [v])\n\n def test_PReLU_backward_requires_grad_false(self):\n devices = ['cpu']\n devices += ['cuda'] if TEST_CUDA else []\n for d in devices:\n m = nn.PReLU().to(d)\n x = torch.randn(2, 3, 4, 5, device=d, requires_grad=False)\n y = m(x)\n y.mean().backward()\n self.assertEqual(x.grad, None)\n\n def test_bce_loss_always_nonnegative(self):\n target = torch.ones(5)\n input = torch.ones(5)\n self.assertEqual((nn.BCELoss()(input, target) < 0).sum(), 0)\n\n target = torch.zeros(5)\n input = torch.zeros(5)\n self.assertEqual((nn.BCELoss()(input, target) < 0).sum(), 0)\n\n def test_bce_with_logits_raises_if_target_and_input_are_different_size(self):\n target = torch.rand(5)\n input = torch.rand(5, 1)\n with self.assertRaises(ValueError):\n nn.BCEWithLogitsLoss()(input, target)\n\n target = torch.rand(5, 1)\n input = torch.rand(5)\n with self.assertRaises(ValueError):\n nn.BCEWithLogitsLoss()(input, target)\n\n def test_bce_with_logits_gives_same_result_as_sigmoid_and_bce_loss(self):\n sigmoid = nn.Sigmoid()\n\n target = torch.rand(64, 4)\n output = torch.rand(64, 4) - 0.5\n\n self.assertEqual(nn.BCEWithLogitsLoss()(output, target), nn.BCELoss()(sigmoid(output), target))\n\n weight = torch.rand(4)\n self.assertEqual(nn.BCEWithLogitsLoss(weight)(output, target), nn.BCELoss(weight)(sigmoid(output), target))\n\n target = torch.zeros(4, 1, dtype=torch.float)\n output = torch.empty(4, 1, dtype=torch.float).fill_(-100)\n\n self.assertEqual(nn.BCEWithLogitsLoss()(output, target), nn.BCELoss()(sigmoid(output), target))\n\n self.assertEqual(nn.BCEWithLogitsLoss(reduction='none')(output, target),\n nn.BCELoss(reduction='none')(sigmoid(output), target))\n\n weight = torch.rand(1, dtype=torch.float)\n self.assertEqual(nn.BCEWithLogitsLoss(weight)(output, target), nn.BCELoss(weight)(sigmoid(output), target))\n\n def test_bce_loss_input_range(self):\n bceloss = nn.BCELoss()\n\n target = torch.rand(25, 25)\n output_valid = torch.rand(25, 25)\n output_too_negative = output_valid - 1.0\n output_too_positive = output_valid + 1.0\n\n loss_valid = bceloss(output_valid, target)\n with self.assertRaisesRegex(RuntimeError, 'between 0 and 1'):\n loss_too_negative = bceloss(output_too_negative, target)\n with self.assertRaisesRegex(RuntimeError, 'between 0 and 1'):\n loss_too_positive = bceloss(output_too_positive, target)\n\n def test_bce_loss_size_mismatch(self):\n bceloss = nn.BCELoss()\n a = torch.rand(25)\n b = torch.rand(25, 1)\n with self.assertRaisesRegex(ValueError, r'Using a target size \\('):\n bceloss(a, b)\n\n def test_bce_with_logits_gives_same_result_as_sigmoid_and_bce_loss_large_tensors_with_grad(self):\n x_size = 1024\n y_size = 256\n target = torch.rand(x_size, y_size)\n\n for reduction in ['none', 'mean', 'sum']:\n output_sig = torch.rand(x_size, y_size) - 0.5\n output_logits = output_sig.clone().detach()\n\n output_sig.requires_grad = True\n output_logits.requires_grad = True\n weight = torch.rand(y_size)\n\n loss_sig = nn.BCELoss(weight, reduction=reduction)(\n torch.sigmoid(output_sig), target\n )\n loss_logits = nn.BCEWithLogitsLoss(weight, reduction=reduction)(\n output_logits, target\n )\n\n self.assertEqual(loss_logits, loss_sig)\n\n if reduction == 'none':\n grad = torch.rand(x_size, y_size)\n loss_sig.backward(grad)\n loss_logits.backward(grad)\n else:\n loss_sig.backward()\n loss_logits.backward()\n\n self.assertEqual(output_sig.grad, output_logits.grad)\n\n def test_bce_with_logits_has_correct_forward_grad(self):\n output = torch.randn(3, 5, requires_grad=True)\n target = torch.randn(3, 5)\n for reduction in ('sum', 'mean', 'none'):\n gradcheck(lambda self, target: nn.BCEWithLogitsLoss(reduction=reduction)(self, target),\n (output, target), check_forward_ad=True)\n\n def test_bce_with_logits_has_correct_grad_at_zero(self):\n output = torch.zeros(3, 1, requires_grad=True)\n target = torch.zeros(3, 1)\n nn.BCEWithLogitsLoss(reduction='sum')(output, target).backward()\n expected_grad = torch.empty(3, 1).fill_(0.5)\n self.assertEqual(output.grad, expected_grad)\n\n def test_bce_with_logits_broadcasts_weights(self):\n target = torch.rand(16, 4)\n output = torch.rand(16, 4) - 0.5\n\n weight = torch.rand(4)\n out1 = nn.BCEWithLogitsLoss(weight)(output, target)\n\n weight = weight.expand(16, 4).contiguous()\n out2 = nn.BCEWithLogitsLoss(weight)(output, target)\n\n self.assertEqual(out1, out2)\n\n weight = torch.rand(16, 1)\n out1 = nn.BCEWithLogitsLoss(weight)(output, target)\n\n weight = weight.expand(16, 4).contiguous()\n out2 = nn.BCEWithLogitsLoss(weight)(output, target)\n\n self.assertEqual(out1, out2)\n\n def test_bce_with_logits_ones_in_pos_weights_are_the_same_as_none(self):\n target = torch.rand(64, 4)\n output = torch.rand(64, 4) - 0.5\n pos_weight = torch.ones(64, 4)\n\n self.assertEqual(nn.BCEWithLogitsLoss()(output, target),\n nn.BCEWithLogitsLoss(pos_weight=pos_weight)(output, target))\n\n def test_bce_with_logits_broadcasts_pos_weights(self):\n target = torch.rand(64, 4)\n output = torch.rand(64, 4) - 0.5\n pos_weight = torch.rand(4)\n out1 = nn.BCEWithLogitsLoss(pos_weight=pos_weight)(output, target)\n\n pos_weight1 = pos_weight.expand(1, 4)\n out2 = nn.BCEWithLogitsLoss(pos_weight=pos_weight1)(output, target)\n\n pos_weight2 = pos_weight.expand(64, 4)\n out3 = nn.BCEWithLogitsLoss(pos_weight=pos_weight2)(output, target)\n\n self.assertEqual(out1, out2)\n self.assertEqual(out1, out3)\n\n def test_bce_with_logits_with_pos_weight_has_correct_grad_at_zero(self):\n output = torch.zeros(3, 1, requires_grad=True)\n target = torch.zeros(3, 1)\n pos_weight = torch.ones(3, 1)\n nn.BCEWithLogitsLoss(pos_weight=pos_weight, reduction='sum')(output, target).backward()\n expected_grad = torch.empty(3, 1).fill_(0.5)\n grad = output.grad\n self.assertEqual(grad, expected_grad)\n\n def test_bce_with_logits_stability(self):\n output = torch.tensor([0., -120.])\n target = torch.tensor([0., 1.])\n pos_weight = torch.tensor([1., 1.])\n\n out1 = nn.BCEWithLogitsLoss()(output, target)\n self.assertTrue(torch.isfinite(out1).all().item())\n\n out2 = nn.BCEWithLogitsLoss(pos_weight=pos_weight)(output, target)\n self.assertTrue(torch.isfinite(out2).all().item())\n\n def test_bce_loss_broadcasts_weights(self):\n sigmoid = nn.Sigmoid()\n target = torch.rand(16, 4)\n output = torch.rand(16, 4) - 0.5\n\n weight = torch.rand(4)\n out1 = nn.BCELoss(weight)(sigmoid(output), target)\n\n weight = weight.expand(16, 4).contiguous()\n out2 = nn.BCELoss(weight)(sigmoid(output), target)\n\n self.assertEqual(out1, out2)\n\n weight = torch.rand(16, 1)\n out1 = nn.BCELoss(weight)(sigmoid(output), target)\n\n weight = weight.expand(16, 4).contiguous()\n out2 = nn.BCELoss(weight)(sigmoid(output), target)\n\n self.assertEqual(out1, out2)\n\n def test_hardtanh_inplace_gradgrad(self):\n v = torch.randn(8, requires_grad=True)\n\n def func(root):\n x = root.clone()\n return F.hardtanh(x, inplace=True)\n\n gradcheck(func, [v])\n gradgradcheck(func, [v])\n\n # test hardtanh backward froo large tensor\n def test_hardtanh_backward(self):\n x = torch.randn(128, 10000, requires_grad=True)\n grad = torch.randn(128, 10000)\n z = torch.zeros(128, 10000)\n y = F.hardtanh(x)\n y.backward(grad)\n # ref backward path for hardtanh\n mask = (x > -1) & (x < 1)\n x_grad_ref = torch.where(mask, grad, z)\n self.assertEqual(x.grad, x_grad_ref)\n\n def test_batchnorm_nhwc_cpu(self):\n def helper(self, size):\n channels = size[1]\n input = torch.randn(size, dtype=torch.float32, device='cpu', requires_grad=True)\n input = input.contiguous(memory_format=torch.channels_last)\n input.retain_grad()\n grad = torch.randn(size, dtype=torch.float32, device='cpu')\n grad = grad.contiguous(memory_format=torch.channels_last)\n bn = nn.BatchNorm2d(channels).cpu().float()\n bn.weight.data.uniform_()\n bn.bias.data.uniform_()\n\n ref_input = input.detach().clone().contiguous().requires_grad_(True)\n ref_grad = grad.detach().clone().contiguous()\n ref_bn = nn.BatchNorm2d(channels).cpu().float()\n ref_bn.load_state_dict(bn.state_dict())\n\n out = bn(input)\n out.backward(grad)\n ref_out = ref_bn(ref_input)\n ref_out.backward(ref_grad)\n\n self.assertTrue(out.is_contiguous(memory_format=torch.channels_last))\n self.assertTrue(ref_out.is_contiguous())\n self.assertEqual(out, ref_out)\n self.assertEqual(bn.weight.grad, ref_bn.weight.grad)\n self.assertEqual(bn.bias.grad, ref_bn.bias.grad)\n self.assertEqual(input.grad, ref_input.grad)\n\n helper(self, (4, 8, 10, 10))\n helper(self, (4, 1, 9, 9))\n helper(self, (4, 9, 1, 1))\n\n def test_batchnorm_non_contig_cpu(self):\n input = torch.arange(6, dtype=torch.float).reshape(1, 3, 2, 1).cpu()\n input = input.permute(0, 2, 1, 3)\n\n bn = torch.nn.BatchNorm2d(2).cpu().float().eval()\n bn.weight.data.uniform_()\n bn.bias.data.uniform_()\n\n ref_input = input.detach().clone().contiguous()\n ref_bn = nn.BatchNorm2d(2).cpu().float().eval()\n ref_bn.load_state_dict(bn.state_dict())\n\n out = bn(input)\n ref_out = ref_bn(ref_input)\n\n self.assertTrue(out.is_contiguous(memory_format=torch.channels_last))\n self.assertTrue(ref_out.is_contiguous())\n self.assertEqual(out, ref_out)\n\n @unittest.skipIf(not TEST_CUDA, \"CUDA unavailable\")\n @unittest.skipIf(not TEST_CUDNN, \"needs cudnn\")\n @skipIfRocm\n def test_batchnorm_cudnn_nhwc(self):\n def run_test(input, grad_output):\n c = input.size(1)\n mod = nn.BatchNorm2d(c).cuda().float()\n mod.weight.data.uniform_()\n mod.bias.data.uniform_()\n ref_input = input.detach().clone().contiguous().requires_grad_(True)\n ref_grad = grad.detach().clone().contiguous()\n ref_mod = nn.BatchNorm2d(c).cuda().float()\n ref_mod.load_state_dict(mod.state_dict())\n out = mod(input)\n out.backward(grad_output)\n ref_out = ref_mod(ref_input)\n ref_out.backward(ref_grad)\n self.assertTrue(out.is_contiguous(memory_format=torch.channels_last))\n self.assertTrue(ref_out.is_contiguous())\n self.assertEqual(out, ref_out)\n self.assertEqual(mod.weight.grad, ref_mod.weight.grad)\n self.assertEqual(mod.bias.grad, ref_mod.bias.grad)\n self.assertEqual(input.grad, ref_input.grad)\n\n input = torch.randint(1, 10, (4, 8, 2, 2), dtype=torch.float32, device=\"cuda\")\n input = input.contiguous(memory_format=torch.channels_last).detach().requires_grad_()\n\n grad = torch.randint(1, 10, (4, 8, 2, 2), dtype=torch.float32, device=\"cuda\")\n grad = grad.contiguous(memory_format=torch.channels_last)\n run_test(input, grad)\n # see #42588, grad is channels_last contiguous, but grad.suggest_memory_format (rightly) return \"contiguous\"\n # not channels_last\n input = torch.randint(1, 10, (2, 8, 8, 1), dtype=torch.float32, device=\"cuda\")\n input = input.contiguous(memory_format=torch.channels_last).detach().requires_grad_()\n grad = torch.randint(1, 10, (2, 8, 8, 1), dtype=torch.float32, device=\"cuda\")\n grad = grad.permute(0, 2, 1, 3)\n run_test(input, grad)\n\n @unittest.skipIf(not TEST_CUDA, \"CUDA unavailable\")\n def test_batchnorm_cudnn_half(self):\n # THNN\n input = torch.randint(1, 10, (2, 3, 2, 2), dtype=torch.half, device=\"cuda\", requires_grad=True)\n m = nn.BatchNorm2d(3).half().cuda()\n thnn_output = m(input)\n thnn_output.sum().backward()\n thnn_input_grad = input.grad.data.clone()\n self.assertEqualTypeString(thnn_output, input)\n # cuDNN\n if TEST_CUDNN:\n input.grad = None\n m = m.float()\n cudnn_output = m(input)\n cudnn_output.sum().backward()\n cudnn_input_grad = input.grad.data.clone()\n self.assertEqualTypeString(cudnn_output, input)\n self.assertEqual(cudnn_output, thnn_output)\n self.assertEqual(cudnn_input_grad, thnn_input_grad, atol=1e-3, rtol=0)\n\n @unittest.skipIf(not TEST_CUDA, \"CUDA unavailable\")\n def test_batchnorm_nonaffine_cuda_half_input(self):\n input = torch.randn(16, 3, 24, 24, dtype=torch.half, device=\"cuda\")\n m = nn.BatchNorm2d(3, affine=False).cuda().float() # keep running stats in FP32\n output = m(input)\n self.assertEqualTypeString(output, input)\n m.eval()\n output = m(input)\n self.assertEqualTypeString(output, input)\n\n def test_batchnorm_raises_error_if_less_than_one_value_per_channel(self):\n x = torch.rand(10)[None, :, None]\n with self.assertRaises(ValueError):\n torch.nn.BatchNorm1d(10)(x)\n\n def test_batchnorm_raises_error_if_running_mean_is_not_same_size_as_input(self):\n input = torch.rand(2, 10)\n running_var = torch.rand(10)\n wrong_sizes = [9, 11]\n for size in wrong_sizes:\n with self.assertRaises(RuntimeError):\n F.batch_norm(input, torch.rand(size), running_var)\n\n def test_batchnorm_raises_error_if_running_var_is_not_same_size_as_input(self):\n input = torch.rand(2, 10)\n running_mean = torch.rand(10)\n wrong_sizes = [9, 11]\n for size in wrong_sizes:\n with self.assertRaises(RuntimeError):\n F.batch_norm(input, running_mean, torch.rand(size))\n\n def test_batchnorm_raises_error_if_weight_is_not_same_size_as_input(self):\n input = torch.rand(2, 10)\n running_mean = torch.rand(10)\n running_var = torch.rand(10)\n wrong_sizes = [9, 11]\n for size in wrong_sizes:\n with self.assertRaises(RuntimeError):\n F.batch_norm(input, running_mean, running_var, weight=Parameter(torch.rand(size)))\n\n def test_batchnorm_raises_error_if_bias_is_not_same_size_as_input(self):\n input = torch.rand(2, 10)\n running_mean = torch.rand(10)\n running_var = torch.rand(10)\n wrong_sizes = [9, 11]\n for size in wrong_sizes:\n with self.assertRaises(RuntimeError):\n F.batch_norm(input, running_mean, running_var, bias=Parameter(torch.rand(size)))\n\n def test_batchnorm_raises_error_if_running_var_or_running_mean_have_forward_grad(self):\n args = (\n torch.randn(3, 2, 5), # input\n torch.randn(2), # running_mean\n torch.randn(2), # running_var\n )\n kwargs = {'training': False, 'momentum': -1.2}\n fn = partial(F.batch_norm, **kwargs)\n\n for dual_indices in ((0,), (1,), (1, 2), (0, 1), (0, 1, 2),):\n tangents = tuple(torch.rand_like(x) for x in args)\n\n with fwAD.dual_level():\n duals = [fwAD.make_dual(primal, tangent) if i in dual_indices else primal\n for i, (primal, tangent) in enumerate(zip(args, tangents))]\n msg = \"batch_norm is not differentiable wrt running_mean and running_var\"\n # 0 needs to have forward grad because otherwise we won't even run batch_norm_jvp\n if (1 in dual_indices or 2 in dual_indices) and 0 in dual_indices:\n with self.assertRaisesRegex(RuntimeError, msg):\n fn(*duals)\n else:\n fn(*duals)\n\n def test_batchnorm_buffer_update_when_stats_are_not_tracked(self):\n input_size = (32, 4)\n # Instantiate BN with buffers that are not None\n bn = nn.BatchNorm1d(input_size[1], track_running_stats=True)\n # Use buffers for normalization but don't update them\n bn.track_running_stats = False\n # Store initial values\n num_batches = bn.num_batches_tracked.clone()\n running_mean = bn.running_mean.clone()\n running_var = bn.running_var.clone()\n # Forward random tensor\n _ = bn(torch.rand(input_size))\n # Ensure none of the buffers has been updated\n self.assertTrue(torch.equal(num_batches, bn.num_batches_tracked))\n self.assertTrue(torch.equal(running_mean, bn.running_mean))\n self.assertTrue(torch.equal(running_var, bn.running_var))\n\n @unittest.skipIf(not torch.cuda.is_available(), \"CUDA not available\")\n def test_batchnorm_nhwc_cuda(self):\n for dtype in (torch.half, torch.float):\n (N, C, H, W) = 2, 64, 50, 50\n model = torch.nn.BatchNorm2d(C, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n model = model.eval().cuda().to(dtype)\n inp1 = torch.randn(N, C, H, W, device=torch.device('cuda'), dtype=dtype)\n inp2 = inp1.contiguous(memory_format=torch.channels_last)\n out1 = model(inp1)\n out2 = model(inp2)\n self.assertTrue(torch.equal(out1, out2))\n\n def test_pairwise_distance(self):\n input1 = torch.randn(4, 4, requires_grad=True)\n input2 = torch.randn(4, 4, requires_grad=True)\n self.assertTrue(gradcheck(lambda x, y: F.pairwise_distance(x, y), (input1, input2)))\n\n # TODO: Create an OpInfo for pdist\n def test_pdist(self):\n for device, trans in itertools.product(device_(), [False, True]):\n inp = torch.randn(4, 5, dtype=torch.double, device=device, requires_grad=True)\n if trans:\n inp = inp.transpose(0, 1)\n for p in [0, 1, 2, 0.5, 1.5, 2.5, float('inf')]:\n self.assertTrue(gradcheck(lambda x: F.pdist(x, p), (inp,)))\n\n def test_pdist_zeros(self):\n \"\"\"Test that grad is still valid when dist is 0\"\"\"\n for device in device_():\n inp = torch.randn(1, 3, dtype=torch.double, device=device, requires_grad=True).repeat([2, 1])\n for p in [0, 1, 2, 0.5, 1.5, 2.5, float('inf')]:\n self.assertTrue(gradcheck(lambda x: F.pdist(x, p), (inp,)))\n\n def test_pdist_empty_row(self):\n for device in device_():\n inp = torch.randn(1, 3, dtype=torch.double, device=device, requires_grad=True)\n self.assertTrue(gradcheck(F.pdist, (inp,)))\n\n def test_pdist_empty_col(self):\n for device in device_():\n inp = torch.randn(4, 0, dtype=torch.double, device=device, requires_grad=True)\n self.assertTrue(gradcheck(F.pdist, (inp,)))\n\n @unittest.expectedFailure\n def test_pdist_cpu_gradgrad_unimplemented(self):\n inp = torch.randn(4, 5, requires_grad=True)\n gradgradcheck(F.pdist, (inp,))\n\n @unittest.expectedFailure\n def test_pdist_cuda_gradgrad_unimplemented(self):\n inp = torch.randn(4, 5, device='cuda', requires_grad=True)\n gradgradcheck(F.pdist, (inp,))\n\n # Merge into OpInfo?\n # test for backward in https://github.com/pytorch/pytorch/issues/15511\n def test_pdist_large(self):\n for device in device_():\n def func(x):\n return torch.pdist(x, p=2)\n\n # shape[0] should be able to be (roughly) arbitrarily large, but the kernel\n # is currently limited to smaller sizes (see issue above); this is just testing\n # a floor.\n shape = (1000, 1)\n x = torch.randn(shape, device=device).requires_grad_()\n output = torch.pdist(x, p=2)\n # just run a single backward, as gradcheck/gradgradcheck is expensive here\n output.sum().backward()\n\n def test_binary_cross_entropy_grads(self):\n import torch.nn.functional as F\n for device in device_():\n input = torch.rand(3, 3, dtype=torch.double, device=device, requires_grad=True)\n target = torch.rand(3, 3, dtype=torch.double, device=device)\n\n gradcheck(F.binary_cross_entropy, [input, target])\n gradgradcheck(F.binary_cross_entropy, [input, target])\n\n # now with diffentiable target\n target.requires_grad_(True)\n gradcheck(F.binary_cross_entropy, [input, target], check_batched_grad=False)\n # no double backward for target yet\n with self.assertRaisesRegex(RuntimeError, \"not implemented\"):\n gradgradcheck(F.binary_cross_entropy, [input, target], check_batched_grad=False)\n\n def test_cosine_embedding_loss_with_diff_type(self):\n for device in device_():\n input1 = torch.tensor([[2, 3, 4], [6, 2, 4]], dtype=torch.double, device=device)\n input2 = torch.tensor([[2, 3, 5], [3, 2, 1]], dtype=torch.double, device=device)\n target = torch.tensor([1, -1], dtype=torch.int, device=device)\n expected = torch.nn.functional.cosine_embedding_loss(input1, input2, target)\n for dt1 in get_all_math_dtypes(device):\n for dt2 in get_all_math_dtypes(device):\n for dt3 in get_all_math_dtypes(device):\n # dt3 is used as dtype for target = [1, -1], so let's skip unsigned type\n if dt3 == torch.uint8:\n continue\n if dt1.is_complex or dt2.is_complex or dt3.is_complex:\n continue\n input1 = input1.to(dt1)\n input2 = input2.to(dt2)\n target = target.to(dt3)\n result = torch.nn.functional.cosine_embedding_loss(input1, input2, target)\n self.assertEqual(result.item(), expected.item(), atol=0.001, rtol=0)\n\n def test_kl_div_with_diff_type(self):\n for device in device_():\n input = torch.tensor([[2, 3, 5], [3, 2, 1]], dtype=torch.double, device=device)\n target = torch.tensor([[1, 2, 3], [4, 5, 6]], dtype=torch.double, device=device)\n expected = torch.nn.functional.kl_div(input, target)\n for input_dtype in get_all_math_dtypes(device):\n if input_dtype.is_complex:\n continue\n for target_dtype in [torch.float32, torch.float64, torch.float16]:\n if (torch.device(device).type == 'cpu' and target_dtype == torch.float16):\n continue\n input = input.to(input_dtype)\n target = target.to(target_dtype)\n result = torch.nn.functional.kl_div(input, target)\n self.assertEqual(result.item(), expected.item(), atol=0.001, rtol=0)\n\n def test_kl_div_with_diff_type_log_target(self):\n for device in device_():\n input = torch.tensor([[2, 3, 5], [3, 2, 1]], dtype=torch.double, device=device)\n target = torch.tensor([[1, 2, 3], [4, 5, 6]], dtype=torch.double, device=device).log()\n expected = torch.nn.functional.kl_div(input, target, log_target=True)\n for input_dtype in get_all_math_dtypes(device):\n if input_dtype.is_complex:\n continue\n for target_dtype in [torch.float32, torch.float64, torch.float16]:\n if (torch.device(device).type == 'cpu' and target_dtype == torch.float16):\n continue\n input = input.to(input_dtype)\n target = target.to(target_dtype)\n result = torch.nn.functional.kl_div(input, target, log_target=True)\n self.assertEqual(result.item(), expected.item(), atol=0.001, rtol=0)\n\n def test_kl_div_log_softmax_target(self):\n for device in device_():\n a = torch.tensor([[1.0, 2, 3], [5.0, 5, 5]], device=device)\n b = torch.tensor([[1.0, 2, 3], [5.0, 5, 5]], device=device)\n self.assertEqual(\n F.kl_div(F.log_softmax(a, 1), F.log_softmax(b, 1), reduction='none', log_target=True),\n torch.zeros_like(a)\n )\n\n def test_cosine_embedding_loss_no_reduce(self):\n input1 = torch.randn(15, 10, requires_grad=True)\n input2 = torch.randn(15, 10, requires_grad=True)\n target = torch.randn(15).sign()\n self.assertTrue(gradcheck(lambda x, y, z: F.cosine_embedding_loss(\n x, y, z, reduction='none'), (input1, input2, target)))\n self.assertEqual(F.cosine_embedding_loss(input1, input2, target, reduction='none'),\n loss_reference_fns['CosineEmbeddingLoss'](input1, input2, target, reduction='none'))\n\n def test_cosine_embedding_loss_margin_no_reduce(self):\n input1 = torch.randn(15, 10, requires_grad=True)\n input2 = torch.randn(15, 10, requires_grad=True)\n target = torch.randn(15).sign()\n self.assertTrue(gradcheck(lambda x, y, z: F.cosine_embedding_loss(\n x, y, z, margin=0.5, reduction='none'), (input1, input2, target)))\n self.assertEqual(F.cosine_embedding_loss(input1, input2, target, margin=0.5, reduction='none'),\n loss_reference_fns['CosineEmbeddingLoss'](input1, input2, target,\n margin=0.5, reduction='none'))\n\n def test_cosine_embedding_loss_invalid_shape(self):\n input1 = torch.randn(15, 10)\n input2 = torch.randn(15, 10)\n target = torch.randn(15, 1).sign()\n\n with self.assertRaisesRegex(RuntimeError, \"1D target tensor expected\"):\n F.cosine_embedding_loss(input1, input2, target)\n\n with self.assertRaisesRegex(RuntimeError, \"1D target tensor expects 2D input tensors\"):\n F.cosine_embedding_loss(torch.randn(10), torch.randn(10), torch.randn(10))\n\n with self.assertRaisesRegex(RuntimeError, \"0D target tensor expects 1D input tensors\"):\n F.cosine_embedding_loss(torch.randn(2, 5), torch.randn(2, 5), torch.randn(()))\n\n def test_margin_ranking_loss_no_reduce(self):\n input1 = torch.randn(15).mul_(10).requires_grad_()\n input2 = torch.randn(15).mul_(10).requires_grad_()\n target = torch.randn(15).sign()\n self.assertTrue(gradcheck(lambda x, y, z: F.margin_ranking_loss(\n x, y, z, reduction='none'), (input1, input2, target)))\n self.assertEqual(F.margin_ranking_loss(input1, input2, target, reduction='none'),\n loss_reference_fns['MarginRankingLoss'](input1, input2, target, reduction='none'))\n\n def test_margin_ranking_loss_margin_no_reduce(self):\n input1 = torch.randn(15).mul_(10).requires_grad_()\n input2 = torch.randn(15).mul_(10).requires_grad_()\n target = torch.randn(15).sign()\n self.assertTrue(gradcheck(lambda x, y, z: F.margin_ranking_loss(\n x, y, z, margin=0.5, reduction='none'), (input1, input2, target)))\n self.assertEqual(F.margin_ranking_loss(input1, input2, target, margin=0.5, reduction='none'),\n loss_reference_fns['MarginRankingLoss'](input1, input2, target, margin=0.5, reduction='none'))\n\n def test_triplet_margin_loss(self):\n input1 = torch.randn(5, 10, requires_grad=True)\n input2 = torch.randn(5, 10, requires_grad=True)\n input3 = torch.randn(5, 10, requires_grad=True)\n self.assertTrue(gradcheck(lambda x1, x2, x3: F.triplet_margin_loss(\n x1, x2, x3), (input1, input2, input3)))\n self.assertEqual(F.triplet_margin_loss(input1, input2, input3),\n loss_reference_fns['TripletMarginLoss'](input1, input2, input3))\n\n def test_triplet_margin_loss_swap(self):\n input1 = torch.randn(5, 10, requires_grad=True)\n input2 = torch.randn(5, 10, requires_grad=True)\n input3 = torch.randn(5, 10, requires_grad=True)\n self.assertTrue(gradcheck(lambda x1, x2, x3: F.triplet_margin_loss(\n x1, x2, x3, swap=True), (input1, input2, input3)))\n self.assertEqual(F.triplet_margin_loss(input1, input2, input3, swap=True),\n loss_reference_fns['TripletMarginLoss'](input1, input2, input3, swap=True))\n\n def test_triplet_margin_loss_no_reduce(self):\n input1 = torch.randn(5, 10, requires_grad=True)\n input2 = torch.randn(5, 10, requires_grad=True)\n input3 = torch.randn(5, 10, requires_grad=True)\n self.assertTrue(gradcheck(lambda x1, x2, x3: F.triplet_margin_loss(\n x1, x2, x3, reduction='none'), (input1, input2, input3)))\n self.assertEqual(F.triplet_margin_loss(input1, input2, input3, reduction='none'),\n loss_reference_fns['TripletMarginLoss'](input1, input2, input3, reduction='none'))\n\n def test_triplet_margin_loss_swap_no_reduce(self):\n input1 = torch.randn(5, 10, requires_grad=True)\n input2 = torch.randn(5, 10, requires_grad=True)\n input3 = torch.randn(5, 10, requires_grad=True)\n self.assertTrue(gradcheck(lambda x1, x2, x3: F.triplet_margin_loss(\n x1, x2, x3, swap=True, reduction='none'), (input1, input2, input3)))\n self.assertEqual(F.triplet_margin_loss(input1, input2, input3, swap=True, reduction='none'),\n loss_reference_fns['TripletMarginLoss'](input1, input2, input3, swap=True, reduction='none'))\n\n def test_triplet_margin_loss_invalid(self):\n input1 = torch.randn(5, 10, requires_grad=True)\n input2 = torch.randn(5, 10, requires_grad=True)\n input3 = torch.randn(5, 10, requires_grad=True)\n input_1d = torch.randn(10, requires_grad=True)\n\n with self.assertRaisesRegex(RuntimeError, \"All inputs should have same dimension\"):\n F.triplet_margin_loss(input1, input2, input_1d)\n\n with self.assertRaisesRegex(RuntimeError, \"All inputs should have same dimension\"):\n F.triplet_margin_loss(input1, input_1d, input3)\n\n with self.assertRaisesRegex(RuntimeError, \"All inputs should have same dimension\"):\n F.triplet_margin_loss(input_1d, input2, input3)\n\n def test_pointwise_loss_target_grad_none_reduction(self):\n i = torch.randn(5, 10)\n t = torch.randn(5, 10, requires_grad=True)\n self.assertEqual(F.mse_loss(i, t, reduction='none').size(), t.size())\n self.assertEqual(F.l1_loss(i, t, reduction='none').size(), t.size())\n\n def test_pointwise_loss_broadcast(self):\n losses = {\n 'mse_loss': lambda x, y, r: F.mse_loss(x, y, reduction=r),\n 'l1_loss': lambda x, y, r: F.l1_loss(x, y, reduction=r),\n 'smooth_l1_loss': lambda x, y, r: F.smooth_l1_loss(x, y, reduction=r),\n 'huber_loss': lambda x, y, r: F.huber_loss(x, y, reduction=r),\n }\n\n input = torch.randn(2, 1, requires_grad=True)\n for _name, fn in losses.items():\n for requires_grad in [True, False]:\n # When target.requires_grad=True, its impl is in Python, while the other is in TH.\n target = torch.randn(2, 10, requires_grad=requires_grad)\n for reduction in ['none', 'mean', 'sum']:\n l = fn(input, target, reduction)\n if reduction == 'none':\n self.assertEqual(l.size(), target.size())\n self.assertTrue(gradcheck(fn, (input, target, reduction)))\n\n # https://github.com/pytorch/pytorch/issues/27692 reports\n # that l1_loss get a wrong result for big batch size\n def test_l1_loss_correct(self):\n for dtype in [torch.float, torch.cfloat]:\n for N in range(1, 50, 10):\n input = torch.rand(N, 3, 1024, 1024, dtype=dtype)\n self.assertEqual(\n torch.nn.L1Loss()(input, torch.zeros_like(input)),\n input.abs().mean())\n\n def test_smoothl1loss_intergral_target(self):\n def _input_grad(input, target, reduction):\n output = F.smooth_l1_loss(input, target, reduction=reduction, beta=0.5)\n output.sum().backward()\n return input.grad\n\n for device, dtype, reduction in product(device_(),\n integral_types(),\n ('none', 'sum', 'mean')):\n input = torch.randn(2, 2, device=device, requires_grad=True)\n target = torch.randint(0, 9, (2, 2), device=device, dtype=dtype)\n\n input_grad_with_float_target = _input_grad(input, target.float(), reduction)\n\n input_grad = _input_grad(input.detach().clone().requires_grad_(True),\n target,\n reduction)\n self.assertEqual(input_grad, input_grad_with_float_target)\n\n def test_smoothl1loss_negative_beta_not_supported(self):\n with self.assertRaises(RuntimeError):\n F.smooth_l1_loss(torch.randn(2, 2), torch.randn(2, 2), beta=-1.0)\n\n def test_huber_loss_invalid_delta(self):\n def _test_huber_loss_delta_error_helper(delta):\n input, target = torch.randn(2, 2), torch.randn(2, 2)\n loss = torch.nn.HuberLoss(delta=delta)\n with self.assertRaises(RuntimeError):\n loss(input, target)\n\n def test_huber_loss_negative_delta():\n _test_huber_loss_delta_error_helper(delta=-0.5)\n\n def test_huber_loss_zero_delta():\n _test_huber_loss_delta_error_helper(delta=0.0)\n\n test_huber_loss_negative_delta()\n test_huber_loss_zero_delta()\n\n def test_cosine_similarity(self):\n # Check cosine_similarity input/output shapes\n input_size = (1, 3, 2, 1)\n expected_size = (1, 2, 1)\n input1 = torch.randn(input_size, requires_grad=True)\n input2 = torch.randn(input_size, requires_grad=True)\n self.assertEqual(F.cosine_similarity(input1, input2, dim=1).size(), expected_size)\n\n # Check numerical precision, issue #18057\n vv1 = torch.tensor(list([float(i) for i in range(84)])).unsqueeze(0)\n vv2 = torch.tensor(list([float(i) for i in range(84)])).unsqueeze(0)\n out = F.cosine_similarity(vv1, vv2)\n self.assertLessEqual(out, 1.0)\n\n # Check dividing by 0.\n # previous behavior: <x,y>/max(eps, ||x|| * ||y||)\n # current: <x/max(eps, ||x||), y/max(eps,||y||)>\n # if f(x,y) is the cosine similarity, then\n # df/dx = y/(||x|| * ||y||) - (x * <x,y> * ||y||/||x||)/(||x|| * ||y||)^2\n # the tests below check division by zero in the backward formula when\n # x := input2 = 0, y := input1 != 0.\n # For these inputs the gradient wrt x simplifies to g(x,y) := y/(||x|| * ||y||)\n # Previous test checks g(x,y) == y/eps,\n # Current test checks g(x,y) == (y/||y||)/eps.\n input1 = torch.randn(10).requires_grad_()\n input2 = torch.zeros_like(input1).requires_grad_()\n torch.cosine_similarity(input1, input2, 0).sum().backward()\n self.assertEqual(input1.grad, torch.zeros_like(input1))\n self.assertEqual(input2.grad, input1 / input1.norm() * 1e8)\n\n # Check type promotion, issue #61454\n input = torch.tensor(12.)\n out = F.cosine_similarity(input.to(torch.int8), input, dim=-1)\n self.assertEqual(out, 1.)\n\n def test_grid_sample_error_checking(self):\n input = torch.empty(1, 1, 2, 2)\n grid = torch.empty(1, 1, 1, 2)\n\n # assert no error\n F.grid_sample(input, grid, align_corners=False)\n\n with self.assertRaisesRegex(ValueError, \"but got: 'garbage'\"):\n F.grid_sample(input, grid, mode='garbage', align_corners=False)\n\n with self.assertRaisesRegex(ValueError, \"but got: 'garbage'\"):\n F.grid_sample(input, grid, padding_mode='garbage', align_corners=False)\n\n with self.assertRaisesRegex(RuntimeError, \"expected grid to have size 1 in last dimension\"):\n F.grid_sample(input[0], grid, align_corners=False)\n\n with self.assertRaisesRegex(RuntimeError, \"expected grid to have size 2 in last dimension\"):\n F.grid_sample(input, torch.empty(1, 1, 1, 1, 3), align_corners=False)\n\n with self.assertRaisesRegex(RuntimeError, \"expected grid and input to have same batch size\"):\n F.grid_sample(input, torch.empty(2, 1, 1, 2), align_corners=False)\n\n with self.assertRaisesRegex(RuntimeError, \"expected grid to have size 2 in last dimension\"):\n F.grid_sample(input, torch.empty(1, 1, 1, 3), align_corners=False)\n\n with self.assertRaisesRegex(RuntimeError, \"expected input to have non-empty spatial dimensions\"):\n F.grid_sample(torch.empty(1, 1, 0, 2), grid, align_corners=False)\n\n with self.assertRaisesRegex(RuntimeError, \"bicubic interpolation only supports 4D input\"):\n F.grid_sample(torch.empty(1, 1, 2, 2, 2), torch.empty(1, 1, 1, 1, 3), mode='bicubic')\n\n if TEST_CUDA:\n with self.assertRaisesRegex(RuntimeError, \"Expected all tensors to be on the same device\"):\n F.grid_sample(input.cuda(), grid, align_corners=False)\n\n def test_affine_grid_error_checking(self):\n # 2D affine\n theta = torch.empty(1, 2, 3, dtype=torch.double)\n size = torch.Size([1, 1, 2, 2])\n\n # assert no error\n F.affine_grid(theta, size, align_corners=False)\n\n # check for warning for empty span along dimension\n with warnings.catch_warnings(record=True) as w:\n # Ensure warnings are being shown\n warnings.simplefilter(\"always\")\n # Should not trigger warning\n F.affine_grid(theta, torch.Size([1, 1, 2, 1]), align_corners=False)\n # Check no warning occurs\n self.assertNotIn('See the documentation of affine_grid for details.', ' '.join(map(str, w)))\n # Should trigger warning\n F.affine_grid(theta, torch.Size([1, 1, 2, 1]), align_corners=True)\n # Check warning occurs\n self.assertIn('See the documentation of affine_grid for details.', ' '.join(map(str, w)))\n\n with self.assertRaisesRegex(ValueError, \"Expected theta to have floating point type\"):\n F.affine_grid(theta.int(), size, align_corners=False)\n\n with self.assertRaisesRegex(ValueError, \"Expected a batch of 2D affine matrices of shape Nx2x3\"):\n F.affine_grid(theta[0], size, align_corners=False)\n\n with self.assertRaisesRegex(ValueError, \"Expected a batch of 2D affine matrices of shape Nx2x3\"):\n F.affine_grid(theta.unsqueeze(0), size, align_corners=False)\n\n with self.assertRaisesRegex(ValueError, \"Expected a batch of 2D affine matrices of shape Nx2x3\"):\n F.affine_grid(theta.repeat(1, 2, 1), size, align_corners=False)\n\n with self.assertRaisesRegex(ValueError, \"Expected a batch of 2D affine matrices of shape Nx2x3\"):\n F.affine_grid(theta.repeat(1, 1, 2), size, align_corners=False)\n\n # 3D affine\n theta = torch.empty(1, 3, 4, dtype=torch.double)\n size = torch.Size([1, 1, 2, 2, 2])\n\n # assert no error\n F.affine_grid(theta, size, align_corners=False)\n\n # check for warning for empty span along dimension\n with warnings.catch_warnings(record=True) as w:\n # Ensure warnings are being shown\n warnings.simplefilter(\"always\")\n # Should not trigger warning\n F.affine_grid(theta, torch.Size([1, 1, 3, 2, 1]), align_corners=False)\n # Check no warning occurs\n self.assertNotIn('See the documentation of affine_grid for details.', ' '.join(map(str, w)))\n # Should trigger warning\n F.affine_grid(theta, torch.Size([1, 1, 3, 2, 1]), align_corners=True)\n # Check warning occurs\n self.assertIn('See the documentation of affine_grid for details.', ' '.join(map(str, w)))\n\n with self.assertRaisesRegex(ValueError, \"Expected a batch of 3D affine matrices of shape Nx3x4\"):\n F.affine_grid(theta[0], size, align_corners=False)\n\n with self.assertRaisesRegex(ValueError, \"Expected a batch of 3D affine matrices of shape Nx3x4\"):\n F.affine_grid(theta.unsqueeze(0), size, align_corners=False)\n\n with self.assertRaisesRegex(ValueError, \"Expected a batch of 3D affine matrices of shape Nx3x4\"):\n F.affine_grid(theta.repeat(1, 2, 1), size, align_corners=False)\n\n with self.assertRaisesRegex(ValueError, \"Expected a batch of 3D affine matrices of shape Nx3x4\"):\n F.affine_grid(theta.repeat(1, 1, 2), size, align_corners=False)\n\n with self.assertRaisesRegex(NotImplementedError, \"affine_grid only supports 4D and 5D sizes\"):\n F.affine_grid(theta, torch.Size([1, 2, 2]), align_corners=False)\n\n with self.assertRaisesRegex(NotImplementedError, \"affine_grid only supports 4D and 5D sizes\"):\n F.affine_grid(theta, torch.Size([1, 1, 2, 2, 2, 2]), align_corners=False)\n\n def test_grid_sample(self):\n # Backward pass of native C++ and CUDA kernels branch depending on whether input requires gradient,\n # so we test both cases.\n def test(N, C, H, W, mode, padding_mode, align_corners, input_requires_grad):\n def test_shape(N, C, IH, IW, H, W, mode, padding_mode, align_corners):\n for grid_dim_contig_order in [(0, 1, 2, 3), (0, 3, 1, 2), (3, 0, 1, 2), (0, 2, 1, 3)]:\n # grid_dim_contig_order specifies the dimension order that can\n # make grid to be contiguous.\n # i.e., grid.permute(grid_dim_contig_order) is contiguous.\n # e.g., with grid_dim_contig_order=[0, 3, 1, 2], grid should be\n # initialized with contiguous tensor of shape [N, 2, H, W]\n # and permuted to [N, H, W, 2] afterwards.\n grid_shape = [N, H, W, 2]\n grid_init_shape = [grid_shape[d] for d in grid_dim_contig_order]\n grid_fwd_permute = [None, None, None, None]\n for i, d in enumerate(grid_dim_contig_order):\n grid_fwd_permute[d] = i\n\n def get_grid(device='cpu', data=None):\n if data is not None:\n assert list(data.shape) == grid_shape\n data = data.permute(grid_dim_contig_order).to(device)\n else:\n data = torch.randn(grid_init_shape, device=device)\n grid = data.permute(grid_fwd_permute)\n assert grid.permute(grid_dim_contig_order).is_contiguous()\n return grid\n\n input_cpu = torch.randn(C, N, IH, IW).transpose(0, 1).requires_grad_(input_requires_grad)\n grid_cpu = get_grid().requires_grad_()\n out_cpu = F.grid_sample(input_cpu, grid_cpu, mode=mode, padding_mode=padding_mode,\n align_corners=align_corners)\n self.assertTrue(out_cpu.size() == torch.Size([N, C, H, W]))\n\n gradients = torch.randn_like(out_cpu)\n out_cpu.backward(gradients)\n\n\n # Compare against unvectorized CPU fallback\n\n # NOTE [ grid_sample CPU fallback ]\n # grid_sample uses AVX for 2d images, but that requires 32-bit indexing for\n # 32-bit floats. So we also have a fallback that is used only for float tensors\n # requiring 64-bit indexing. That requires too much memory to run on CI, so we\n # also export the fallback and test it here to ensure feature parity with\n # the vectorized version.\n input_fallback = input_cpu.float().detach_().requires_grad_()\n grid_fallback = grid_cpu.float().detach_().requires_grad_()\n out_fallback = torch._grid_sampler_2d_cpu_fallback(\n input_fallback, grid_fallback,\n F.GRID_SAMPLE_INTERPOLATION_MODES[mode],\n F.GRID_SAMPLE_PADDING_MODES[padding_mode],\n align_corners)\n self.assertEqual(out_fallback, out_cpu.float(), atol=1e-5, rtol=5e-5)\n\n out_fallback.backward(gradients.float())\n if input_requires_grad:\n self.assertEqual(input_fallback.grad, input_cpu.grad.float(), atol=1e-4, rtol=5e-5)\n self.assertEqual(grid_fallback.grad, grid_cpu.grad.float(), atol=1e-4, rtol=5e-5)\n\n if TEST_CUDA:\n input_cuda = input_cpu.detach().transpose(0, 1).cuda().transpose(0, 1).requires_grad_(input_requires_grad)\n grid_cuda = get_grid('cuda', grid_cpu.detach()).requires_grad_()\n out_cuda = F.grid_sample(input_cuda, grid_cuda, mode=mode, padding_mode=padding_mode,\n align_corners=align_corners)\n self.assertEqual(out_cpu, out_cuda)\n\n out_cuda.backward(gradients.cuda())\n if input_requires_grad:\n self.assertEqual(input_cpu.grad, input_cuda.grad)\n self.assertEqual(grid_cpu.grad, grid_cuda.grad, atol=5e-5, rtol=0)\n\n # check that zero-dimensional input strides don't error out\n base_input = torch.randn(N, C, 1, IW)\n input_cpu = base_input.expand_as(input_cuda).requires_grad_(input_requires_grad)\n out_cpu = F.grid_sample(input_cpu, grid_cpu, mode=mode, padding_mode=padding_mode,\n align_corners=align_corners)\n\n input_cuda = base_input.cuda().expand_as(input_cuda).requires_grad_(input_requires_grad)\n out_cuda = F.grid_sample(input_cuda, grid_cuda, mode=mode, padding_mode=padding_mode,\n align_corners=align_corners)\n self.assertEqual(out_cpu, out_cuda)\n\n # test same size output\n test_shape(N, C, H, W, H, W, mode, padding_mode, align_corners)\n\n # test larger output\n N = random.randint(2, 8)\n C = random.randint(2, 8)\n IH = random.randint(2, 8)\n IW = random.randint(2, 8)\n H = random.randint(IH + 1, 12)\n W = random.randint(IW + 1, 12)\n test_shape(N, C, IH, IW, H, W, mode, padding_mode, align_corners)\n\n # test smaller output\n N = random.randint(2, 8)\n C = random.randint(2, 8)\n IH = random.randint(2, 8)\n IW = random.randint(2, 8)\n H = random.randint(2, IH)\n W = random.randint(2, IW)\n test_shape(N, C, IH, IW, H, W, mode, padding_mode, align_corners)\n\n # test 1x1 inpput\n N = random.randint(2, 8)\n C = random.randint(2, 8)\n IH = 1\n IW = 1\n H = random.randint(2, 5)\n W = random.randint(2, 5)\n test_shape(N, C, IH, IW, H, W, mode, padding_mode, align_corners)\n\n # testing empty grid\n N = random.randint(2, 8)\n C = random.randint(2, 8)\n IH = random.randint(2, 8)\n IW = random.randint(2, 8)\n W = random.randint(3, IW + 2)\n test_shape(N, C, IH, IW, 0, W, mode, padding_mode, align_corners)\n\n # testing empty channel\n N = random.randint(2, 8)\n IH = random.randint(2, 8)\n IW = random.randint(2, 8)\n H = random.randint(3, IH + 2)\n W = random.randint(3, IW + 2)\n test_shape(N, 0, IH, IW, H, W, mode, padding_mode, align_corners)\n\n # testing empty batch\n C = random.randint(2, 8)\n IH = random.randint(2, 8)\n IW = random.randint(2, 8)\n H = random.randint(3, IH + 2)\n W = random.randint(3, IW + 2)\n test_shape(0, C, IH, IW, H, W, mode, padding_mode, align_corners)\n\n for mode in ('bilinear', 'nearest', 'bicubic'):\n for padding_mode in ('zeros', 'border', 'reflection'):\n for align_corners in (True, False):\n # test known input on CPU\n input = torch.arange(1., 11).view(1, 1, 2, 5)\n grid = torch.tensor(\n [[[-0.9, -4.1], [0, 0.2000], [1, -1], [-0.333, 1e-6], [0.5, 1.0]],\n [[-1.0, -0.5], [0, 0.3333], [1, -1], [-0.200, 1e-6], [1.5, 0.5]]]).view(1, 2, 5, 2)\n if mode == 'bilinear':\n if padding_mode == 'zeros':\n if align_corners:\n groundtruth = torch.tensor(\n [[0.0000, 6.0000000000, 5.0000, 4.8340, 9.0000],\n [2.2500, 6.3332500450, 5.0000, 5.1000, 0.0000]]).view(1, 1, 2, 5)\n else:\n groundtruth = torch.tensor(\n [[0.0000, 6.5000000000, 1.2500, 4.6675000191, 4.6250],\n [0.5000, 7.1665000916, 1.2500, 5.0000000000, 0.0000]]).view(1, 1, 2, 5)\n elif padding_mode == 'border':\n if align_corners:\n groundtruth = torch.tensor(\n [[1.2000, 6.0000000000, 5.0000, 4.8340, 9.0000],\n [2.2500, 6.3332500450, 5.0000, 5.1000, 8.7500]]).view(1, 1, 2, 5)\n else:\n groundtruth = torch.tensor(\n [[1.0000, 6.5000000000, 5.0000, 4.6675000191, 9.2500],\n [1.0000, 7.1665000916, 5.0000, 5.0000000000, 10.0000]]).view(1, 1, 2, 5)\n elif padding_mode == 'reflection':\n if align_corners:\n groundtruth = torch.tensor(\n [[3.4500, 6.0000000000, 5.0000, 4.8340, 9.0000],\n [2.2500, 6.3332500450, 5.0000, 5.1000, 7.7500]]).view(1, 1, 2, 5)\n else:\n groundtruth = torch.tensor(\n [[3.0000004768, 6.5000000000, 5.0000, 4.6675000191, 9.2500],\n [1.0000000000, 7.1665000916, 5.0000, 5.0000000000, 9.2500]]).view(1, 1, 2, 5)\n else:\n raise AssertionError(\"missing groundtruth test for padding mode '{}'\".format(padding_mode))\n elif mode == 'nearest':\n if padding_mode == 'zeros':\n if align_corners:\n groundtruth = torch.tensor(\n [[0., 8., 5., 7., 9.],\n [1., 8., 5., 8., 0.]]).view(1, 1, 2, 5)\n else:\n groundtruth = torch.tensor(\n [[0., 8., 5., 7., 0.],\n [1., 8., 5., 8., 0.]]).view(1, 1, 2, 5)\n elif padding_mode == 'border':\n if align_corners:\n groundtruth = torch.tensor(\n [[1., 8., 5., 7., 9.],\n [1., 8., 5., 8., 10.]]).view(1, 1, 2, 5)\n else:\n groundtruth = torch.tensor(\n [[1., 8., 5., 7., 9.],\n [1., 8., 5., 8., 10.]]).view(1, 1, 2, 5)\n elif padding_mode == 'reflection':\n if align_corners:\n groundtruth = torch.tensor(\n [[1., 8., 5., 7., 9.],\n [1., 8., 5., 8., 9.]]).view(1, 1, 2, 5)\n else:\n groundtruth = torch.tensor(\n [[1., 8., 5., 7., 9.],\n [1., 8., 5., 8., 9.]]).view(1, 1, 2, 5)\n else:\n raise AssertionError(\"missing groundtruth test for padding mode '{}'\".format(padding_mode))\n elif mode == 'bicubic':\n if padding_mode == 'zeros':\n if align_corners:\n groundtruth = torch.tensor(\n [[-0.10424726, 7.1400003, 5.0000, 5.7842274, 9.0000],\n [2.4492188, 7.4814040, 5.0000, 6.0277520, 0.0000]]).view(1, 1, 2, 5)\n else:\n groundtruth = torch.tensor(\n [[0.00000, 7.6287503, 1.0625, 5.5977230, 5.3270264],\n [0.40625, 8.0288770, 1.0625, 5.9375067, -0.3515625]]).view(1, 1, 2, 5)\n elif padding_mode == 'border':\n if align_corners:\n groundtruth = torch.tensor(\n [[1.1520010, 6.0599990, 5.0000, 4.870930, 9.0000000],\n [2.1328125, 6.4258375, 5.0000, 5.076003, 8.8671875]]).view(1, 1, 2, 5)\n else:\n groundtruth = torch.tensor(\n [[0.894531, 6.6050020, 4.625, 4.7138715, 9.800781],\n [0.906250, 7.2822485, 4.625, 5.0000052, 10.00000]]).view(1, 1, 2, 5)\n elif padding_mode == 'reflection':\n if align_corners:\n groundtruth = torch.tensor(\n [[3.1822524, 6.239998, 5.0000, 4.8709273, 9.00000],\n [1.7812500, 6.703594, 5.0000, 5.0760007, 8.21875]]).view(1, 1, 2, 5)\n else:\n groundtruth = torch.tensor(\n [[2.7993753, 6.6050020, 4.25, 4.7138715, 10.269531],\n [0.8125000, 7.2822485, 4.25, 5.0000052, 9.332031]]).view(1, 1, 2, 5)\n else:\n raise AssertionError(\"missing groundtruth test for padding mode '{}'\".format(padding_mode))\n\n else:\n raise AssertionError(\"missing groundtruth test for interpolation mode '{}'\".format(mode))\n output = F.grid_sample(input, grid, mode=mode, padding_mode=padding_mode,\n align_corners=align_corners)\n self.assertEqual(output, groundtruth, atol=1e-5, rtol=0,\n msg=\"groundtruth comparison failed for mode={}, \"\n \"padding_mode={}\".format(mode, padding_mode))\n\n # See NOTE [ grid_sample CPU fallback ]\n output = torch._grid_sampler_2d_cpu_fallback(\n input.float(), grid.float(),\n F.GRID_SAMPLE_INTERPOLATION_MODES[mode],\n F.GRID_SAMPLE_PADDING_MODES[padding_mode],\n align_corners)\n self.assertEqual(output, groundtruth.float(), atol=1e-5, rtol=0)\n\n # explicit check for gradient edge cases\n input = torch.arange(0., 5).expand((1, 1, 5, 5))\n grid = torch.tensor(\n [[[1.0, 1.0], [1.0, -1.0], [0.8, 0.8], [0.8, -0.8]],\n [[-1.0, -1.0], [-1.0, 1.0], [-0.8, -0.8], [-0.8, 0.8]]]).view(1, 2, 4, 2).requires_grad_()\n if mode == 'bilinear':\n if padding_mode == 'zeros':\n if align_corners:\n groundtruth = torch.tensor(\n [[[[-8., -8.], [-8., 0.], [2., 0.], [2., 0.]],\n [[2., 0.], [2., 0.], [2., 0.], [2., 0.]]]]).view(1, 2, 4, 2)\n else:\n groundtruth = torch.tensor(\n [[[[-5., -5.], [-5., 5.], [-10., -10.], [-10., 10.]],\n [[0., 0.], [0., 0.], [0., 0.], [0., 0.]]]]).view(1, 2, 4, 2)\n elif padding_mode == 'border':\n if align_corners:\n groundtruth = torch.tensor(\n [[[[-0., -0.], [-0., 0.], [2., 0.], [2., 0.]],\n [[0., 0.], [0., 0.], [2., 0.], [2., 0.]]]]).view(1, 2, 4, 2)\n else:\n groundtruth = torch.tensor(\n [[[[-0., -0.], [-0., 0.], [-0., -0.], [-0., 0.]],\n [[0., 0.], [0., 0.], [0., 0.], [0., 0.]]]]).view(1, 2, 4, 2)\n elif padding_mode == 'reflection':\n if align_corners:\n groundtruth = torch.tensor(\n [[[[-0., -0.], [-0., 0.], [2., 0.], [2., 0.]],\n [[0., 0.], [0., 0.], [2., 0.], [2., 0.]]]]).view(1, 2, 4, 2)\n else:\n groundtruth = torch.tensor(\n [[[[-0., -0.], [-0., 0.], [-0., -0.], [-0., 0.]],\n [[0., 0.], [0., 0.], [0., 0.], [0., 0.]]]]).view(1, 2, 4, 2)\n else:\n raise AssertionError(\"missing gradient groundtruth test for padding mode '{}'\".format(padding_mode))\n elif mode == 'nearest':\n groundtruth = torch.tensor(\n [[[[-0., -0.], [-0., 0.], [-0., -0.], [-0., 0.]],\n [[0., 0.], [0., 0.], [0., 0.], [0., 0.]]]]).view(1, 2, 4, 2)\n elif mode == 'bicubic':\n if padding_mode == 'zeros':\n if align_corners:\n groundtruth = torch.tensor(\n [[[[-4.5, -6.], [-4.5, 6.], [2.725679, 0.740878], [2.725679, -0.740878]],\n [[1.5, 0.], [1.5, 0.], [1.927921, -0.05688], [1.927921, 0.05688]]]]).view(1, 2, 4, 2)\n else:\n groundtruth = torch.tensor(\n [[[[-5.859375, -5.888672], [-5.859375, 5.888672], [-5.6250, -7.5000], [-5.6250, 7.5000]],\n [[-0.234375, -0.263672], [-0.234375, 0.263672], [1.8750, 0.], [1.8750, 0.]]]]\n ).view(1, 2, 4, 2)\n elif padding_mode == 'border':\n if align_corners:\n groundtruth = torch.tensor(\n [[[[1.5, 0.], [1.5, 0.], [1.74, 0.], [1.74, 0.]],\n [[1.5, 0.], [1.5, 0.], [1.74, 0.], [1.74, 0.]]]]).view(1, 2, 4, 2)\n else:\n groundtruth = torch.tensor(\n [[[[-0.46875, 0.], [-0.46875, 0.], [1.8750, 0.], [1.8750, 0.]],\n [[-0.46875, 0.], [-0.46875, 0.], [1.8750, 0.], [1.8750, 0.]]]]).view(1, 2, 4, 2)\n elif padding_mode == 'reflection':\n if align_corners:\n groundtruth = torch.tensor(\n [[[[0., 0.], [0., 0.], [1.92, 0.], [1.92, 0.]],\n [[0., 0.], [0., 0.], [1.92, 0.], [1.92, 0.]]]]).view(1, 2, 4, 2)\n else:\n groundtruth = torch.tensor(\n [[[[0., 0.], [0., 0.], [1.875, 0.], [1.875, 0.]],\n [[0., 0.], [0., 0.], [1.875, 0.], [1.875, 0.]]]]).view(1, 2, 4, 2)\n else:\n raise AssertionError(\"missing gradient groundtruth test for padding mode '{}'\".format(padding_mode))\n else:\n raise AssertionError(\"missing gradient groundtruth test for interpolation mode '{}'\".format(mode))\n for input_requires_grad in [False, True]:\n input = input.requires_grad_(input_requires_grad)\n F.grid_sample(input, grid, mode=mode, padding_mode=padding_mode,\n align_corners=align_corners).sum().backward()\n self.assertEqual(grid.grad, groundtruth, atol=1e-5, rtol=0,\n msg=\"gradient groundtruth comparison failed for mode={}, \"\n \"padding_mode={}, input_requires_grad={}\".format(mode, padding_mode, input_requires_grad))\n grid.grad.zero_()\n\n # See NOTE [ grid_sample CPU fallback ]\n torch._grid_sampler_2d_cpu_fallback(\n input.float(), grid.float(),\n F.GRID_SAMPLE_INTERPOLATION_MODES[mode],\n F.GRID_SAMPLE_PADDING_MODES[padding_mode],\n align_corners).sum().backward()\n self.assertEqual(grid.grad, groundtruth, atol=1e-5, rtol=0)\n\n # do gradcheck\n N = random.randint(2, 8)\n C = random.randint(2, 6)\n H = random.randint(2, 8)\n W = random.randint(2, 8)\n input = torch.randn(N, C, H, W, requires_grad=True)\n grid = torch.randn(N, H, W, 2, requires_grad=True)\n\n for input_requires_grad in [False, True]:\n input.requires_grad_(input_requires_grad)\n self.assertTrue(gradcheck(\n lambda inp, grd: F.grid_sample(inp, grd, mode=mode, padding_mode=padding_mode,\n align_corners=align_corners),\n (input, grid)))\n test(N, C, H, W, mode, padding_mode, align_corners, input_requires_grad)\n if TEST_CUDNN:\n with cudnn.flags(enabled=False):\n test(N, C, H, W, mode, padding_mode, align_corners, input_requires_grad)\n\n def test_grid_sample_3d(self):\n # Backward pass of native C++ and CUDA kernels branch depending on whether input requires gradient,\n # so we test both cases.\n def test(N, C, D, H, W, mode, padding_mode, align_corners, input_requires_grad):\n def test_shape(N, C, ID, IH, IW, D, H, W, mode, padding_mode, align_corners):\n input_cpu = torch.randn(C, N, ID, IH, IW).transpose(0, 1).requires_grad_(input_requires_grad)\n grid_cpu = torch.randn(D, N, H, W, 3).transpose(0, 1).requires_grad_()\n out_cpu = F.grid_sample(input_cpu, grid_cpu, mode=mode, padding_mode=padding_mode,\n align_corners=align_corners)\n self.assertTrue(out_cpu.size() == torch.Size([N, C, D, H, W]))\n\n gradients = torch.randn_like(out_cpu)\n out_cpu.backward(gradients)\n\n if TEST_CUDA:\n input_cuda = input_cpu.detach().transpose(0, 1).cuda().transpose(0, 1).requires_grad_(input_requires_grad)\n grid_cuda = grid_cpu.detach().transpose(0, 1).cuda().transpose(0, 1).requires_grad_()\n out_cuda = F.grid_sample(input_cuda, grid_cuda, mode=mode, padding_mode=padding_mode,\n align_corners=align_corners)\n self.assertEqual(out_cpu, out_cuda)\n\n out_cuda.backward(gradients.cuda())\n if input_requires_grad:\n self.assertEqual(input_cpu.grad, input_cuda.grad)\n self.assertEqual(grid_cpu.grad, grid_cuda.grad, atol=5e-5, rtol=0)\n\n # check that zero-dimensional input strides don't error out\n base_input = torch.randn(N, C, 1, IH, IW)\n input_cpu = base_input.expand_as(input_cuda).requires_grad_(input_requires_grad)\n grid_cpu = torch.randn(N, D, H, W, 3, requires_grad=True)\n out_cpu = F.grid_sample(input_cpu, grid_cpu, mode=mode, padding_mode=padding_mode,\n align_corners=align_corners)\n\n input_cuda = base_input.cuda().expand_as(input_cuda).requires_grad_(input_requires_grad)\n grid_cuda = grid_cpu.detach().cuda().requires_grad_()\n out_cuda = F.grid_sample(input_cuda, grid_cuda, mode=mode, padding_mode=padding_mode,\n align_corners=align_corners)\n self.assertEqual(out_cpu, out_cuda)\n\n # test same size output\n test_shape(N, C, D, H, W, D, H, W, mode, padding_mode, align_corners)\n\n # test larger output\n N = random.randint(2, 7)\n C = random.randint(2, 5)\n ID = random.randint(2, 7)\n IH = random.randint(2, 7)\n IW = random.randint(2, 7)\n D = random.randint(ID + 1, 10)\n H = random.randint(IH + 1, 10)\n W = random.randint(IW + 1, 10)\n test_shape(N, C, ID, IH, IW, D, H, W, mode, padding_mode, align_corners)\n\n # test smaller output\n N = random.randint(2, 7)\n C = random.randint(2, 5)\n ID = random.randint(2, 7)\n IH = random.randint(2, 7)\n IW = random.randint(2, 7)\n D = random.randint(2, ID)\n H = random.randint(2, IH)\n W = random.randint(2, IW)\n test_shape(N, C, ID, IH, IW, D, H, W, mode, padding_mode, align_corners)\n\n # test 1x1 inpput\n N = random.randint(2, 7)\n C = random.randint(2, 7)\n ID = 1\n IH = 1\n IW = 1\n H = random.randint(2, 5)\n W = random.randint(2, 5)\n test_shape(N, C, ID, IH, IW, D, H, W, mode, padding_mode, align_corners)\n\n # testing empty grid\n N = random.randint(2, 7)\n C = random.randint(2, 5)\n ID = random.randint(2, 7)\n IH = random.randint(2, 7)\n IW = random.randint(2, 7)\n D = random.randint(3, ID + 2)\n W = random.randint(3, IW + 2)\n test_shape(N, C, ID, IH, IW, D, 0, W, mode, padding_mode, align_corners)\n\n # testing empty channel\n N = random.randint(2, 7)\n ID = random.randint(2, 5)\n IH = random.randint(2, 7)\n IW = random.randint(2, 7)\n D = random.randint(3, ID + 2)\n H = random.randint(3, IH + 2)\n W = random.randint(3, IW + 2)\n test_shape(N, 0, ID, IH, IW, D, H, W, mode, padding_mode, align_corners)\n\n # testing empty batch\n C = random.randint(2, 5)\n ID = random.randint(2, 7)\n IH = random.randint(2, 7)\n IW = random.randint(2, 7)\n D = random.randint(3, ID + 2)\n H = random.randint(3, IH + 2)\n W = random.randint(3, IW + 2)\n test_shape(0, C, ID, IH, IW, D, H, W, mode, padding_mode, align_corners)\n\n for mode in ('bilinear', 'nearest'):\n for padding_mode in ('zeros', 'border', 'reflection'):\n for align_corners in (True, False):\n # do gradcheck\n N = random.randint(2, 5)\n C = random.randint(2, 4)\n D = random.randint(2, 5)\n H = random.randint(2, 5)\n W = random.randint(2, 5)\n input = torch.randn(N, C, D, H, W, requires_grad=True)\n grid = torch.randn(N, D, H, W, 3, requires_grad=True)\n self.assertTrue(gradcheck(\n lambda inp, grid: F.grid_sample(inp, grid, mode=mode, padding_mode=padding_mode,\n align_corners=align_corners),\n (input, grid)))\n input = input.requires_grad_(False)\n self.assertTrue(gradcheck(\n lambda grid: F.grid_sample(input, grid, mode=mode, padding_mode=padding_mode,\n align_corners=align_corners),\n (grid,)))\n\n for input_requires_grad in [False, True]:\n test(N, C, D, H, W, mode, padding_mode, align_corners, input_requires_grad)\n\n def test_affine_grid(self):\n # test known input on CPU\n input = torch.arange(1., 7).view(1, 2, 3)\n output = F.affine_grid(input, torch.Size([1, 1, 2, 2]), align_corners=True)\n groundtruth = torch.tensor(\n [[[0., -3.], [2., 5.]], [[4., 7.], [6., 15.]]]).view(1, 2, 2, 2)\n self.assertEqual(output, groundtruth)\n output = F.affine_grid(input, torch.Size([1, 1, 2, 2]), align_corners=False)\n groundtruth = torch.tensor(\n [[[1.5, 1.5], [2.5, 5.5]], [[3.5, 6.5], [4.5, 10.5]]]).view(1, 2, 2, 2)\n self.assertEqual(output, groundtruth)\n\n for align_corners in (True, False):\n # do gradcheck\n N = random.randint(1, 8)\n C = random.randint(1, 8)\n H = random.randint(1, 8)\n W = random.randint(1, 8)\n sz = torch.Size([N, C, H, W])\n inp = torch.randn(N, 2, 3, requires_grad=True)\n with warnings.catch_warnings(record=True):\n warnings.simplefilter(\"always\") # python2 requires this so other tests can trigger\n self.assertTrue(gradcheck(\n lambda inp: F.affine_grid(inp, sz, align_corners=align_corners),\n (inp,)))\n\n # test CPU against CUDA\n if TEST_CUDA:\n N = random.randint(1, 8)\n C = random.randint(1, 8)\n H = random.randint(1, 8)\n W = random.randint(1, 8)\n sz = torch.Size([N, C, H, W])\n for align_corners in (True, False):\n input_cpu = torch.randn(N, 2, 3, requires_grad=True)\n with warnings.catch_warnings(record=True):\n warnings.simplefilter(\"always\") # python2 requires this so other tests can trigger\n out_cpu = F.affine_grid(input_cpu, sz, align_corners=align_corners)\n gradients = torch.randn(out_cpu.size())\n out_cpu.backward(gradients)\n input_gpu = input_cpu.detach().cuda().requires_grad_()\n with warnings.catch_warnings(record=True):\n warnings.simplefilter(\"always\") # python2 requires this so other tests can trigger\n out_cuda = F.affine_grid(input_gpu, sz, align_corners=align_corners)\n out_cuda.backward(gradients.cuda())\n self.assertEqual(out_cpu, out_cuda)\n self.assertEqual(input_cpu.grad, input_gpu.grad)\n\n def test_affine_grid_3d(self):\n # test known input on CPU\n input = torch.arange(1., 13).view(1, 3, 4)\n output = F.affine_grid(input, torch.Size([1, 1, 2, 2, 2]), align_corners=True)\n groundtruth = torch.tensor(\n [[[[[-2., -10., -18.], [0., 0., 0.]], [[2., 2., 2.], [4., 12., 20.]]],\n [[[4., 4., 4.], [6., 14., 22.]], [[8., 16., 24.], [10., 26., 42.]]]]]).view(1, 2, 2, 2, 3)\n self.assertEqual(output, groundtruth)\n output = F.affine_grid(input, torch.Size([1, 1, 2, 2, 2]), align_corners=False)\n groundtruth = torch.tensor(\n [[[[[1., -1., -3.], [2., 4., 6.]], [[3., 5., 7.], [4., 10., 16.]]],\n [[[4., 6., 8.], [5., 11., 17.]], [[6., 12., 18.], [7., 17., 27.]]]]]).view(1, 2, 2, 2, 3)\n self.assertEqual(output, groundtruth)\n\n for align_corners in (True, False):\n # do gradcheck\n N = random.randint(1, 8)\n C = random.randint(1, 8)\n D = random.randint(1, 8)\n H = random.randint(1, 8)\n W = random.randint(1, 8)\n sz = torch.Size([N, C, D, H, W])\n inp = torch.randn(N, 3, 4, requires_grad=True)\n with warnings.catch_warnings(record=True):\n warnings.simplefilter(\"always\") # python2 requires this so other tests can trigger\n self.assertTrue(gradcheck(\n lambda inp: F.affine_grid(inp, sz, align_corners=align_corners),\n (inp,)))\n\n # test CPU against CUDA\n if TEST_CUDA:\n N = random.randint(1, 8)\n C = random.randint(1, 8)\n D = random.randint(1, 8)\n H = random.randint(1, 8)\n W = random.randint(1, 8)\n sz = torch.Size([N, C, D, H, W])\n for align_corners in (True, False):\n input_cpu = torch.randn(N, 3, 4, requires_grad=True)\n with warnings.catch_warnings(record=True):\n warnings.simplefilter(\"always\") # python2 requires this so other tests can trigger\n out_cpu = F.affine_grid(input_cpu, sz, align_corners=align_corners)\n gradients = torch.randn(out_cpu.size())\n out_cpu.backward(gradients)\n input_gpu = input_cpu.detach().cuda().requires_grad_()\n with warnings.catch_warnings(record=True):\n warnings.simplefilter(\"always\") # python2 requires this so other tests can trigger\n out_cuda = F.affine_grid(input_gpu, sz, align_corners=align_corners)\n out_cuda.backward(gradients.cuda())\n self.assertEqual(out_cpu, out_cuda)\n self.assertEqual(input_cpu.grad, input_gpu.grad)\n\n def test_channel_shuffle(self):\n # 3D tensor\n x = torch.tensor(\n [[[1, 2],\n [5, 6],\n [9, 10],\n [13, 14],\n ]]\n )\n y_ref = torch.tensor(\n [[[1, 2],\n [9, 10],\n [5, 6],\n [13, 14],\n ]]\n )\n # ChannelsFirst\n with warnings.catch_warnings(record=True) as w:\n y = F.channel_shuffle(x, 2)\n self.assertEqual(len(w), 0)\n self.assertEqual(y, y_ref)\n # ChannelsLast not supported for 3dim\n\n # 4D tensor\n x = torch.tensor(\n [[[[1, 2],\n [3, 4]],\n [[5, 6],\n [7, 8]],\n [[9, 10],\n [11, 12]],\n [[13, 14],\n [15, 16]],\n ]]\n )\n y_ref = torch.tensor(\n [[[[1, 2],\n [3, 4]],\n [[9, 10],\n [11, 12]],\n [[5, 6],\n [7, 8]],\n [[13, 14],\n [15, 16]],\n ]]\n )\n # ChannelsFirst NCHW\n with warnings.catch_warnings(record=True) as w:\n y = F.channel_shuffle(x, 2)\n self.assertEqual(len(w), 0)\n self.assertEqual(y, y_ref)\n # ChannelsLast NHWC\n with warnings.catch_warnings(record=True) as w:\n y = F.channel_shuffle(x.contiguous(memory_format=torch.channels_last), 2)\n self.assertEqual(len(w), 0)\n y = y.contiguous(memory_format=torch.contiguous_format)\n self.assertEqual(y, y_ref)\n\n # 5D tensor\n x = torch.tensor(\n [[[[[1, 2],\n [3, 4]]],\n [[[5, 6],\n [7, 8]]],\n [[[9, 10],\n [11, 12]]],\n [[[13, 14],\n [15, 16]]],\n ]]\n )\n y_ref = torch.tensor(\n [[[[[1, 2],\n [3, 4]]],\n [[[9, 10],\n [11, 12]]],\n [[[5, 6],\n [7, 8]]],\n [[[13, 14],\n [15, 16]]],\n ]]\n )\n # ChannelsFirst NCHW\n with warnings.catch_warnings(record=True) as w:\n y = F.channel_shuffle(x, 2)\n self.assertEqual(len(w), 0)\n self.assertEqual(y, y_ref)\n # ChannelsLast NHWC\n with warnings.catch_warnings(record=True) as w:\n y = F.channel_shuffle(x.contiguous(memory_format=torch.channels_last_3d), 2)\n self.assertEqual(len(w), 0)\n y = y.contiguous(memory_format=torch.contiguous_format)\n self.assertEqual(y, y_ref)\n\n def test_upsamplingLinear1d(self):\n for align_corners in [True, False]:\n for recompute_scale_factor in [True, False]:\n kwargs = dict(\n mode='linear', align_corners=align_corners, recompute_scale_factor=recompute_scale_factor\n )\n # test float scale factor up & downsampling\n for scale_factor in [0.5, 1.5, 2]:\n m = nn.Upsample(scale_factor=scale_factor, **kwargs)\n in_t = torch.ones(1, 1, 2)\n out_size = int(math.floor(in_t.shape[-1] * scale_factor))\n with warnings.catch_warnings(record=True) as w:\n out_t = m(in_t)\n self.assertEqual(torch.ones(1, 1, out_size), out_t.data)\n\n input = torch.randn(1, 1, 2, requires_grad=True)\n if not recompute_scale_factor:\n gradcheck(lambda x: F.interpolate(x, out_size, **kwargs), (input,))\n else:\n gradcheck(lambda x: F.interpolate(x, scale_factor=scale_factor, **kwargs), (input,))\n\n def test_upsamplingLinear1d_spatial_invariance(self):\n m = nn.Upsample(scale_factor=3, mode='linear', align_corners=False)\n in_t_9 = torch.zeros(1, 1, 9)\n in_t_9[:, :, :4].normal_()\n with warnings.catch_warnings(record=True) as w:\n out_t_9 = m(in_t_9)\n out_t_5 = m(in_t_9[:, :, :5])\n self.assertEqual(out_t_9[:, :, :15], out_t_5)\n\n def test_upsampling_not_recompute_scale_factor(self):\n # test output against known input: result must match opencv\n in_t = torch.arange(8.).view(1, 2, 2, 2)\n expected_out_t = torch.tensor(\n [[[[-0.32725, -0.08843, 0.37933, 0.79744],\n [0.15039, 0.38921, 0.85697, 1.27508],\n [1.08591, 1.32473, 1.79249, 2.21060],\n [1.92213, 2.16095, 2.62871, 3.04682]],\n\n [[3.67275, 3.91157, 4.37933, 4.79744],\n [4.15039, 4.38921, 4.85697, 5.27508],\n [5.08591, 5.32473, 5.79249, 6.21060],\n [5.92213, 6.16095, 6.62871, 7.04682]]]])\n if IS_PPC:\n # Both OpenCV and PyTorch give a slightly different result on PPC\n expected_out_t = torch.tensor(\n [[[[-0.32725, -0.08843, 0.37933, 0.79744],\n [0.15039, 0.38921, 0.85697, 1.27508],\n [1.08591, 1.32473, 1.79249, 2.21060],\n [1.92212, 2.16094, 2.62870, 3.04681]],\n\n [[3.67275, 3.91157, 4.37933, 4.79743],\n [4.15039, 4.38921, 4.85697, 5.27508],\n [5.08591, 5.32473, 5.79249, 6.21059],\n [5.92212, 6.16094, 6.62870, 7.04680]]]])\n out_t = F.interpolate(in_t, scale_factor=2.3, mode='bicubic', align_corners=False, recompute_scale_factor=False)\n torch.set_printoptions(precision=5)\n self.assertEqual(out_t, expected_out_t, atol=1e-4, rtol=0)\n\n device_list = ['cpu']\n if TEST_CUDA:\n device_list.append('cuda')\n\n for align_corners in [True, False]:\n kwargs = dict(mode='bicubic', align_corners=align_corners)\n # test float scale factor up & downsampling\n for device in device_list:\n for scale_factor in [0.6, 1.6, 2.3]:\n in_t = torch.ones(2, 2, 2, 2).to(device)\n out_t = F.interpolate(in_t, scale_factor=scale_factor, **kwargs)\n out_size = int(math.floor(in_t.shape[-1] * scale_factor))\n self.assertEqual(torch.ones(2, 2, out_size, out_size), out_t.data, atol=1e-5, rtol=0)\n\n input = torch.randn(2, 2, 2, 2, requires_grad=True)\n gradcheck(lambda x: F.interpolate(x, out_size, **kwargs), [input])\n\n def test_upsamplingBilinear2d_spatial_invariance(self):\n m = nn.Upsample(scale_factor=3, mode='bilinear', align_corners=False)\n in_t_9 = torch.zeros(1, 1, 9, 9)\n in_t_9[:, :, :4, :4].normal_()\n with warnings.catch_warnings(record=True) as w:\n out_t_9 = m(in_t_9)\n out_t_5 = m(in_t_9[:, :, :5, :5])\n self.assertEqual(out_t_9[:, :, :15, :15], out_t_5)\n\n def test_upsamplingTrilinear3d(self):\n for align_corners in [True, False]:\n kwargs = dict(mode='trilinear', align_corners=align_corners)\n\n for memory_format in [torch.contiguous_format, torch.channels_last_3d]:\n # test float scale factor up & downsampling\n for scale_factor in [0.5, 1.5, 2]:\n m = nn.Upsample(scale_factor=scale_factor, **kwargs)\n in_t = torch.ones(1, 2, 2, 2, 2).contiguous(memory_format=memory_format)\n out_size = int(math.floor(in_t.shape[-1] * scale_factor))\n with warnings.catch_warnings(record=True) as w:\n out_t = m(in_t)\n self.assertEqual(torch.ones(1, 2, out_size, out_size, out_size), out_t.data)\n # Assert that memory format is carried through to the output\n self.assertTrue(out_t.is_contiguous(memory_format=memory_format))\n\n input = torch.randn(1, 2, 2, 2, 2, requires_grad=True)\n self.assertEqual(\n F.interpolate(input, (out_size, out_size, out_size), **kwargs),\n F.interpolate(input, scale_factor=scale_factor, **kwargs))\n gradcheck(lambda x: F.interpolate(x, out_size, **kwargs), [input])\n gradgradcheck(lambda x: F.interpolate(x, out_size, **kwargs), [input])\n\n def test_upsamplingTrilinear3d_spatial_invariance(self):\n m = nn.Upsample(scale_factor=3, mode='trilinear', align_corners=False)\n in_t_9 = torch.zeros(1, 1, 9, 9, 9)\n in_t_9[:, :, :4, :4, :4].normal_()\n with warnings.catch_warnings(record=True) as w:\n out_t_9 = m(in_t_9)\n out_t_5 = m(in_t_9[:, :, :5, :5, :5])\n self.assertEqual(out_t_9[:, :, :15, :15, :15], out_t_5)\n\n def test_upsampling_small_scale(self):\n m = torch.nn.Upsample(scale_factor=0.5, mode=\"bilinear\")\n in_t = torch.arange(1, 5, dtype=torch.float64).reshape(1, 1, 2, 2)\n out_t = m(in_t)\n expected_out_t = torch.tensor([[[[2.5]]]])\n self.assertEqual(expected_out_t, out_t)\n\n @unittest.skipIf(not TEST_CUDA, \"CUDA unavailable\")\n def test_interpolate_illegal_memory_access(self):\n in_s = 45\n out_s = 14\n\n input = torch.ones((1, 1, in_s), device='cuda', requires_grad=True)\n # note we allocated grad_output to be larger so out of bound access\n # woudl be visible in grad_input\n grad = torch.ones((1, 1, out_s * 2), device='cuda', requires_grad=True)\n grad = grad[:, :, :out_s]\n\n input_ref = input.detach().cpu().requires_grad_()\n grad_ref = grad.cpu()\n\n out = F.interpolate(input, size=(out_s,), mode='nearest')\n out.backward(grad)\n\n out_ref = F.interpolate(input_ref, size=(out_s,), mode='nearest')\n out_ref.backward(grad_ref)\n\n self.assertEqual(out_ref, out)\n self.assertEqual(input_ref.grad, input.grad)\n\n def test_interpolate(self):\n def _test_interpolate_helper(in_t, scale_factor, layer):\n out_size = int(math.floor(in_t.shape[-1] * scale_factor))\n dim = len(in_t.shape) - 2\n out_shape = [1, 1] + [out_size] * dim\n with warnings.catch_warnings(record=True) as w:\n out_t = layer(in_t)\n self.assertEqual(torch.ones(out_shape), out_t)\n\n self.assertEqual(\n F.interpolate(in_t, (out_size,) * dim, **kwargs),\n F.interpolate(in_t, scale_factor=scale_factor, **kwargs))\n gradcheck(lambda x: F.interpolate(x, out_size, **kwargs), [in_t], nondet_tol=GRADCHECK_NONDET_TOL)\n gradgradcheck(lambda x: F.interpolate(x, out_size, **kwargs), [in_t], nondet_tol=GRADCHECK_NONDET_TOL)\n\n def _make_input(dim, device):\n size = [1, 1]\n size += [2] * dim\n return torch.ones(size, requires_grad=True, device=device)\n\n device_list = ['cpu']\n if TEST_CUDA:\n device_list.append('cuda')\n\n for device in device_list:\n for scale_factor in [0.5, 1.5, 2]:\n for mode in ['nearest', 'area']:\n kwargs = dict(mode=mode)\n m = nn.Upsample(scale_factor=scale_factor, **kwargs).to(device)\n for input in [_make_input(1, device), _make_input(2, device), _make_input(3, device)]:\n _test_interpolate_helper(input, scale_factor, m)\n\n for align_corners in [True, False]:\n kwargs = dict(mode='linear', align_corners=align_corners)\n m = nn.Upsample(scale_factor=scale_factor, **kwargs).to(device)\n _test_interpolate_helper(_make_input(1, device), scale_factor, m)\n\n kwargs = dict(mode='bilinear', align_corners=align_corners)\n m = nn.Upsample(scale_factor=scale_factor, **kwargs).to(device)\n _test_interpolate_helper(_make_input(2, device), scale_factor, m)\n\n kwargs = dict(mode='bicubic', align_corners=align_corners)\n\n def m(t):\n return F.interpolate(t, scale_factor=scale_factor, **kwargs).to(device)\n _test_interpolate_helper(_make_input(2, device), scale_factor, m)\n\n kwargs = dict(mode='trilinear', align_corners=align_corners)\n m = nn.Upsample(scale_factor=scale_factor, **kwargs).to(device)\n _test_interpolate_helper(_make_input(3, device), scale_factor, m)\n\n def test_linear_broadcasting(self):\n m = nn.Linear(5, 8)\n inp = torch.randn(2, 3, 5)\n expected = m(inp.view(6, 5)).view(2, 3, 8)\n self.assertEqual(expected, m(inp))\n\n def test_bilinear(self):\n module = nn.Bilinear(10, 10, 8)\n input1 = torch.randn(4, 10, requires_grad=True)\n input2 = torch.randn(4, 10, requires_grad=True)\n grad_output = torch.randn(4, 8)\n\n res = module(input1, input2)\n expected = (torch.einsum(\"bi,kij,bj->bk\", input1, module.weight, input2) +\n module.bias)\n self.assertEqual(res, expected)\n grads = torch.autograd.grad(res, [module.weight, module.bias, input1, input2], grad_output)\n grads_expected = torch.autograd.grad(expected, [module.weight, module.bias, input1, input2], grad_output)\n for g, ge in zip(grads, grads_expected):\n self.assertEqual(g, ge)\n\n def test_bilinear_non_contiguous(self):\n module = nn.Bilinear(7, 7, 5)\n input1 = torch.randn(4, 7, 10, requires_grad=True)\n input2 = torch.randn(4, 7, 10, requires_grad=True)\n input1_tp = input1.transpose(1, 2)\n input2_tp = input2.transpose(1, 2)\n\n grad_output = torch.randn(4, 10, 5)\n\n def run(input1_tp, input2_tp):\n input1.grad = input2.grad = None\n output = module(input1_tp, input2_tp)\n output.backward(grad_output)\n\n return output.data, input1.grad.data, input2.grad.data\n\n out_nc, g1_nc, g2_nc = run(input1_tp, input2_tp)\n input1_tp = input1_tp.contiguous()\n input2_tp = input2_tp.contiguous()\n out, g1, g2 = run(input1_tp, input2_tp)\n\n self.assertEqual(out, out_nc)\n self.assertEqual(g1, g1_nc)\n self.assertEqual(g2, g2_nc)\n\n def test_bilinear_no_bias(self):\n module = nn.Bilinear(10, 10, 8)\n module_no_bias = nn.Bilinear(10, 10, 8, False)\n\n module.bias.data.zero_()\n module.weight.data.copy_(module_no_bias.weight)\n\n input1 = torch.randn(4, 10, requires_grad=True)\n input2 = torch.randn(4, 10, requires_grad=True)\n grad_output = torch.randn(4, 8)\n\n def run(net):\n input1.grad = input2.grad = None\n output = net(input1, input2)\n output.backward(grad_output)\n\n return output.data, input1.grad.data, input2.grad.data\n\n out, g1, g2 = run(module)\n out_nb, g1_nb, g2_nb = run(module_no_bias)\n\n self.assertEqual(out, out_nb)\n self.assertEqual(g1, g1_nb)\n self.assertEqual(g2, g2_nb)\n\n _assertGradAndGradgradChecks(self,\n lambda x1, x2: F.bilinear(x1, x2, module_no_bias.weight, module_no_bias.bias),\n (input1, input2))\n\n def test_bilinear_broadcasting(self):\n m = nn.Bilinear(5, 6, 8)\n input1 = torch.randn(2, 3, 5)\n input2 = torch.randn(2, 3, 6)\n expected = m(input1.view(6, 5), input2.view(6, 6)).view(2, 3, 8)\n self.assertEqual(expected, m(input1, input2))\n\n def test_conv_tbc(self):\n inp = torch.randn(9, 4, 5, requires_grad=True)\n weight = torch.randn(3, 5, 6, requires_grad=True)\n bias = torch.randn(6, requires_grad=True)\n\n gradcheck(lambda i, w, b, pad: F.conv_tbc(i, w, b, pad), (inp, weight, bias, 3))\n\n\n @unittest.skipIf(not TEST_CUDA, \"CUDA unavailable\")\n @unittest.skipIf(not TEST_CUDNN, \"needs cudnn\")\n @skipIfRocmVersionLessThan((4, 3))\n @skipIfNotMiopenSuggestNHWC\n def test_grouped_conv_cudnn_nhwc_support(self):\n # in order to catch the hols in grouped convolution in nhwc support for earlier cudnn version\n input = torch.randn((16, 16, 8, 8), dtype=torch.float16, device=\"cuda\").to(memory_format=torch.channels_last)\n weight = torch.randn((8, 4, 3, 3), dtype=torch.float16, device=\"cuda\").to(memory_format=torch.channels_last)\n out = torch.convolution(input, weight, None, (1, 1), (1, 1), (1, 1), False, (0, 0), 4)\n input = torch.randn((16, 8, 8, 8), dtype=torch.float16, device=\"cuda\").to(memory_format=torch.channels_last)\n out_transpose = torch.convolution(input, weight, None, (1, 1), (1, 1), (1, 1), True, (0, 0), 4)\n\n @unittest.expectedFailure\n @unittest.skipIf(not TEST_CUDA, \"CUDA unavailable\")\n @unittest.skipIf(not TEST_CUDNN, \"needs cudnn\")\n def test_conv_cudnn_memory_layout_dominance(self):\n # desired behavior here is to have the memory_layout of conv.weight to\n # dominante the layout of output.\n # which is not the same as current behavior, we'll fix this in\n # following up PRs and remove the `expectedFailure` tag\n input = torch.randint(1, 10, (2, 8, 4, 4), dtype=torch.float32, device=\"cuda\", requires_grad=True)\n conv = nn.Conv2d(8, 4, 3).cuda().float()\n\n out = conv(input)\n self.assertTrue(out.is_contiguous())\n\n input = input.contiguous(memory_format=torch.channels_last)\n out = conv(input)\n self.assertTrue(out.is_contiguous())\n\n conv.weight.data = conv.weight.contiguous(memory_format=torch.channels_last)\n out = conv(input)\n self.assertTrue(out.is_contiguous(memory_format=torch.channels_last))\n\n input = input.contiguous()\n out = conv(input)\n self.assertTrue(out.is_contiguous(memory_format=torch.channels_last))\n\n\n @unittest.skipIf(not TEST_CUDA, \"CUDA unavailable\")\n def test_cudnn_noncontiguous_weight(self):\n # Noncontiguous weights must be contiguous() before being\n # passed to cuDNN\n input = torch.tensor([1, 1, 1], dtype=torch.double, device=\"cuda\").view(1, 1, 3)\n weights1 = torch.tensor([1], dtype=torch.double, device=\"cuda\").expand(1, 1, 2)\n weights2 = torch.tensor([1], dtype=torch.double, device=\"cuda\").expand(1, 1, 2).contiguous()\n self.assertEqual(F.conv1d(input, weights1, bias=None, stride=2, dilation=2),\n F.conv1d(input, weights2, bias=None, stride=2, dilation=2))\n\n\n def run_grad_conv_test(self, func_forward, func_backward, dim=1, gradient='input'):\n for kern, inp_size in [(3, 6), (3, 7), (4, 9)]:\n for batch, stride, padding, chan_in, chan_out, dilation in \\\n product([1, 2], [1, 2], [0, 1, 2], [2], [3], [1]):\n\n for has_bias in [True, False]:\n input_shape = [batch, chan_in]\n weight_shape = [chan_out, chan_in]\n for _ in range(dim):\n input_shape.append(inp_size)\n weight_shape.append(kern)\n\n input = torch.randn(input_shape, requires_grad=True)\n weight = torch.randn(weight_shape, requires_grad=True)\n if has_bias:\n bias = torch.randn([chan_out], requires_grad=True)\n output = func_forward(input, weight, stride=stride, padding=padding, dilation=dilation, bias=bias)\n\n gradient_o = torch.randn(output.shape)\n gradient_w = torch.autograd.grad(output, input if (gradient == 'input') else weight, gradient_o)\n\n self.assertEqual(gradient_w[0],\n func_backward(\n input_shape if (gradient == 'input') else input,\n weight_shape if (gradient == 'weight') else weight,\n gradient_o,\n stride=stride,\n padding=padding,\n dilation=dilation))\n\n def test_grad_conv1d_input(self):\n self.run_grad_conv_test(F.conv1d, F.grad.conv1d_input, 1, 'input')\n\n def test_grad_conv1d_weight(self):\n self.run_grad_conv_test(F.conv1d, F.grad.conv1d_weight, 1, 'weight')\n\n def test_grad_conv2d_input(self):\n self.run_grad_conv_test(F.conv2d, F.grad.conv2d_input, 2, 'input')\n\n def test_grad_conv2d_weight(self):\n self.run_grad_conv_test(F.conv2d, F.grad.conv2d_weight, 2, 'weight')\n\n def test_grad_conv3d_input(self):\n self.run_grad_conv_test(F.conv3d, F.grad.conv3d_input, 3, 'input')\n\n def test_grad_conv3d_weight(self):\n self.run_grad_conv_test(F.conv3d, F.grad.conv3d_weight, 3, 'weight')\n\n @unittest.skipIf(not torch._nnpack_available(), \"NNPACK unavailable\")\n def test_nnpack_conv(self):\n for kern, inp_size in [(3, 6), (3, 7), (4, 9)]:\n for batch, stride, padding, chan_in, chan_out in \\\n product([1, 2, 3, 4], [1, 2], [0, 1, 2], [2], [3]):\n\n for has_bias in [True, False]:\n input_shape = [batch, chan_in]\n weight_shape = [chan_out, chan_in]\n for _ in range(2):\n input_shape.append(inp_size)\n weight_shape.append(kern)\n\n input = torch.randn(input_shape, requires_grad=True, dtype=torch.float)\n weight = torch.randn(weight_shape, requires_grad=True, dtype=torch.float)\n if has_bias:\n bias = torch.randn([chan_out], requires_grad=True, dtype=torch.float)\n output = torch._nnpack_spatial_convolution(input, weight, stride=stride, padding=padding, bias=bias)\n output_expected = torch.nn.functional.conv2d(input, weight, stride=stride, padding=padding, bias=bias)\n self.assertEqual(output, output_expected, atol=3e-4, rtol=0)\n\n gradient_o = torch.randn(output.shape, dtype=torch.float)\n\n grads = torch.autograd.grad(output, [input, weight], gradient_o)\n grads_expected = torch.autograd.grad(output_expected, [input, weight], gradient_o)\n for gr, gr_expected in zip(grads, grads_expected):\n self.assertEqual(gr, gr_expected, atol=3e-4, rtol=0)\n\n def test_fold_invalid_arg(self):\n # input.size(1) not divisible by \\prod(kernel_size)\n\n fold = nn.Fold(output_size=(4, 5), kernel_size=(2, 3))\n with self.assertRaisesRegex(RuntimeError, r\"be divisible by the product of kernel_size\"):\n fold(torch.randn(1, 5, 9))\n\n with self.assertRaisesRegex(RuntimeError, r\"be divisible by the product of kernel_size\"):\n fold(torch.randn(1, 19, 9))\n\n # input.size(2) not matching the total number of sliding blocks\n\n with self.assertRaisesRegex(RuntimeError, r\"match the calculated number of sliding blocks\"):\n fold = nn.Fold(output_size=(4, 5), kernel_size=(2, 3))\n fold(torch.randn(1, 6, 10))\n\n with self.assertRaisesRegex(RuntimeError, r\"match the calculated number of sliding blocks\"):\n fold = nn.Fold(output_size=(4, 5), kernel_size=(2, 3), stride=(2, 2))\n fold(torch.randn(1, 6, 5))\n\n with self.assertRaisesRegex(RuntimeError, r\"match the calculated number of sliding blocks\"):\n fold = nn.Fold(output_size=(4, 5), kernel_size=(2, 3), stride=(2, 2), dilation=(1, 2), padding=(2, 0))\n fold(torch.randn(1, 6, 5)) # should be 4 * 1 = 4 sliding blocks\n\n fold = nn.Fold(output_size=(4, 5), kernel_size=(2, 2), stride=1, dilation=8, padding=0)\n with self.assertRaisesRegex(RuntimeError, r\"calculated shape of the array of sliding blocks as\"):\n fold(torch.randn(1, 12, 12))\n\n def test_unfold_invalid_arg(self):\n # input wrong dimension\n\n unfold = nn.Unfold(kernel_size=(2, 3))\n with self.assertRaisesRegex(NotImplementedError, r\"Only 4D input Tensors are supported\"):\n unfold(torch.randn(1, 5, 2))\n\n # calculated output shape is too small\n\n with self.assertRaisesRegex(RuntimeError, r\"too small \\(non-positive\\)\"):\n unfold = nn.Unfold(kernel_size=(2, 3))\n unfold(torch.randn(1, 2, 2, 2))\n\n with self.assertRaisesRegex(RuntimeError, r\"too small \\(non-positive\\)\"):\n unfold = nn.Unfold(kernel_size=(5, 3), padding=(1, 1))\n unfold(torch.randn(1, 2, 2, 3))\n\n with self.assertRaisesRegex(RuntimeError, r\"too small \\(non-positive\\)\"):\n unfold = nn.Unfold(kernel_size=(1, 3), padding=(1, 1), dilation=(1, 2))\n unfold(torch.randn(1, 2, 2, 2))\n\n def test_conv_padding_mode(self):\n with self.assertRaisesRegex(ValueError, \"padding_mode must be one of\"):\n nn.Conv2d(3, 3, 3, padding_mode=\"xyz\")\n\n with self.assertRaisesRegex(ValueError, \"padding_mode must be one of\"):\n nn.Conv2d(3, 3, 3, padding_mode=3)\n\n with self.assertRaisesRegex(ValueError, \"Only \\\"zeros\\\" \"):\n nn.ConvTranspose2d(3, 3, 3, padding_mode=\"reflect\")\n\n def test_softmin(self):\n x = torch.randn(2, 16)\n self.assertEqual(F.softmin(x, 1), F.softmax(-x, 1))\n self.assertEqual(F.softmin(x, 0), F.softmax(-x, 0))\n\n def test_log_softmax_cpu(self, dtype=torch.bfloat16):\n inputf = torch.rand(32, 100, device=\"cpu\", dtype=torch.float, requires_grad=True)\n input = inputf.to(dtype).detach().requires_grad_(True)\n outf = F.log_softmax(inputf, dim=-1)\n out = F.log_softmax(input, dim=-1)\n self.assertEqual(out.dtype, dtype)\n # TODO(#38095): Replace assertEqualIgnoreType. See issue #38095\n self.assertEqualIgnoreType(out, outf, atol=0.1, rtol=0)\n\n out.sum().backward()\n outf.sum().backward()\n self.assertEqual(input.grad.dtype, dtype)\n self.assertEqual(input.grad, inputf.grad.to(dtype), atol=0.1, rtol=0)\n\n def test_softmax_cpu(self, dtype=torch.bfloat16):\n inputf = torch.rand(32, 100, device=\"cpu\", dtype=torch.float, requires_grad=True)\n input = inputf.to(dtype).detach().requires_grad_(True)\n outf = F.softmax(inputf, dim=-1)\n out = F.softmax(input, dim=-1)\n self.assertEqual(out.dtype, dtype)\n self.assertEqualIgnoreType(out, outf, atol=1e-3, rtol=0)\n\n out.sum().backward()\n outf.sum().backward()\n self.assertEqual(input.grad.dtype, dtype)\n self.assertEqual(input.grad, inputf.grad.to(dtype), atol=1e-3, rtol=0)\n\n def test_adaptive_log_softmax(self):\n # args validation\n with self.assertRaises(ValueError):\n _ = nn.AdaptiveLogSoftmaxWithLoss(16, 20, [5, 15, 15], div_value=2.)\n\n with self.assertRaises(ValueError):\n _ = nn.AdaptiveLogSoftmaxWithLoss(16, 20, [5, 15, 10], div_value=2.)\n\n with self.assertRaises(ValueError):\n _ = nn.AdaptiveLogSoftmaxWithLoss(16, 20, [5, 10, 25], div_value=2.)\n\n with self.assertRaisesRegex(ValueError, \"cutoffs should be a sequence of unique,\"):\n _ = nn.AdaptiveLogSoftmaxWithLoss(16, 20, [5, 10, 20], div_value=2.)\n\n # not raise\n _ = nn.AdaptiveLogSoftmaxWithLoss(16, 20, [5, 10, 19], div_value=2.)\n\n # input shapes\n with self.assertRaisesRegex(RuntimeError, r\"Input and target should have the same size\"):\n asfm = nn.AdaptiveLogSoftmaxWithLoss(16, 20, [5, 10, 15], div_value=2.)\n x = torch.randn(2, 16)\n y = torch.tensor([0, 5, 10])\n asfm(x, y)\n\n # out-of-bound targets\n with self.assertRaisesRegex(RuntimeError, r\"Target values should be in\"):\n asfm = nn.AdaptiveLogSoftmaxWithLoss(16, 20, [5, 10, 15], div_value=2.)\n x = torch.randn(2, 16)\n y = torch.tensor([0, 20])\n asfm(x, y)\n\n # cluster sizes\n asfm = nn.AdaptiveLogSoftmaxWithLoss(16, 20, [5, 10, 15], div_value=2.)\n x = torch.randn(2, 16)\n y = torch.tensor([0, 17])\n\n self.assertEqual(asfm.head.weight.size(), (5 + 3, 16)) # 5 targets in head, 3 clusters, dimensionality 16\n self.assertEqual(asfm.tail[0][1].weight.size(), (5, 8)) # 5 targets in this cluster, dimensionality 8\n self.assertEqual(asfm.tail[1][1].weight.size(), (5, 4))\n self.assertEqual(asfm.tail[2][1].weight.size(), (5, 2))\n self.assertEqual(asfm(x, y).output.size(), (2, ))\n\n # test no_batch_dim support\n asfm = nn.AdaptiveLogSoftmaxWithLoss(16, 20, [5, 10, 15], div_value=2.)\n x = torch.randn(1, 16)\n y = torch.tensor([17])\n x2 = x.squeeze(0)\n y2 = y.squeeze(0)\n self.assertEqual(asfm(x, y).output.squeeze(0), asfm(x2, y2).output)\n\n # log_probs actually returns log_proba\n asfm = nn.AdaptiveLogSoftmaxWithLoss(8, 4, [2], div_value=2.)\n x = torch.randn(4, 8)\n logprob_out = asfm.log_prob(x)\n\n self.assertEqual(torch.exp(logprob_out).data.sum(1), torch.ones(4))\n\n # forward returns the same thing as log_probs\n for v in [0, 1, 2, 3]:\n y = torch.full((4,), v, dtype=torch.long)\n out, loss = asfm(x, y)\n\n self.assertEqual(out, logprob_out.gather(1, y.unsqueeze(1)).squeeze())\n self.assertEqual(loss, F.nll_loss(logprob_out, y))\n\n # predict\n x = torch.randn(64, 8).abs_()\n\n # argmax in shortlist\n asfm = nn.AdaptiveLogSoftmaxWithLoss(8, 10, [4, 8], div_value=2., head_bias=True)\n asfm.head.weight.data.abs_()\n asfm.head.bias.data.abs_()\n asfm.head.weight.data[asfm.shortlist_size:, :].zero_()\n\n out = asfm.predict(x)\n self.assertEqual(out, asfm.log_prob(x).argmax(dim=1))\n\n # argmax outside of shortlist\n asfm = nn.AdaptiveLogSoftmaxWithLoss(8, 10, [4, 8], div_value=2., head_bias=True)\n asfm.head.weight.data.abs_()\n asfm.head.bias.data.abs_()\n asfm.head.weight.data[:asfm.shortlist_size, :].zero_()\n\n out = asfm.predict(x)\n self.assertEqual(out, asfm.log_prob(x).argmax(dim=1))\n\n # half of the argmax in shortlist, half in clusters\n asfm = nn.AdaptiveLogSoftmaxWithLoss(8, 10, [4, 8], div_value=2., head_bias=True)\n asfm.head.weight.data.abs_()\n asfm.head.bias.data.abs_()\n\n x[:32, :asfm.shortlist_size].zero_()\n x[32:, asfm.shortlist_size:].zero_()\n\n asfm.head.weight.data[:asfm.shortlist_size, asfm.shortlist_size:].zero_()\n asfm.head.weight.data[asfm.shortlist_size:, :asfm.shortlist_size].zero_()\n\n out = asfm.predict(x)\n self.assertEqual(out, asfm.log_prob(x).argmax(dim=1))\n\n def test_cross_entropy_loss(self, dtype=torch.bfloat16):\n loss_cpu = nn.CrossEntropyLoss().cpu()\n inputf = torch.randn(15, 10, device=\"cpu\", dtype=torch.float, requires_grad=True)\n input = inputf.to(dtype).detach().requires_grad_(True)\n target = torch.empty(15, dtype=torch.long).random_(10)\n\n outf = loss_cpu(inputf, target)\n out = loss_cpu(input, target)\n self.assertEqual(out.dtype, dtype)\n # TODO(#38095): Replace assertEqualIgnoreType. See issue #38095\n self.assertEqualIgnoreType(out, outf, atol=1e-1, rtol=0)\n\n outf.backward()\n out.backward()\n self.assertEqual(input.grad.dtype, dtype)\n # TODO(#38095): Replace assertEqualIgnoreType. See issue #38095\n self.assertEqualIgnoreType(input.grad, inputf.grad, atol=1e-1, rtol=0)\n\n def test_cross_entropy_loss_precision(self):\n # Regression test for #55657\n loss_cpu = nn.CrossEntropyLoss().cpu()\n inputf = torch.randn(128, 2, 768, 768, device=\"cpu\", dtype=torch.float)\n inputd = inputf.double()\n target = torch.randint(2, (128, 768, 768), dtype=torch.long)\n\n outf = loss_cpu(inputf, target)\n outd = loss_cpu(inputd, target)\n self.assertEqual(outf, outd, exact_dtype=False)\n\n def test_cross_entropy_loss_zero_div(self):\n # Test for issue #73165\n input_1 = torch.rand([5, 0], dtype=torch.float32)\n input_2 = torch.rand([5, 0], dtype=torch.float32)\n torch.nn.CrossEntropyLoss()(input_1, input_2)\n\n @unittest.skipIf(not torch.cuda.is_available(), \"CUDA not available\")\n def test_convert_sync_batchnorm(self):\n module = torch.nn.Sequential(\n torch.nn.BatchNorm1d(100),\n torch.nn.InstanceNorm1d(100)\n ).cuda()\n\n # necessary to have an anchor point for comparison, in case the\n # convert_sync_batchnorm updates in place\n comp_module = torch.nn.Sequential(\n torch.nn.BatchNorm1d(100),\n torch.nn.InstanceNorm1d(100)\n ).cuda()\n comp_module.load_state_dict(module.state_dict())\n\n sync_bn_module = torch.nn.SyncBatchNorm.convert_sync_batchnorm(module)\n children = list(sync_bn_module.children())\n self.assertEqual(children[0].__class__, torch.nn.SyncBatchNorm)\n self.assertEqual(children[1].__class__, torch.nn.InstanceNorm1d)\n\n for layer, converted_layer in zip(comp_module.children(), sync_bn_module.children()):\n for key in layer.state_dict().keys():\n self.assertEqual(layer.state_dict()[key].device, converted_layer.state_dict()[key].device)\n self.assertEqual(layer.state_dict()[key], converted_layer.state_dict()[key])\n\n @unittest.skipIf(not TEST_CUDA, \"CUDA not available\")\n def test_sync_batchnorm_backward_elemt(self):\n device = 'cuda'\n saved_input = torch.rand(2, 3, 2, 1, device=device)\n grad_output = torch.rand(2, 3, 2, 1, device=device)\n mean = torch.rand(3, device=device)\n invstd = torch.rand(3, device=device)\n weight = torch.rand(3, device=device)\n sum_dy = torch.rand(3, device=device)\n sum_dy_xmu = torch.rand(3, device=device)\n count_tensor = torch.tensor([5, 5, 5], dtype=torch.int32, device=device)\n\n gI_contiguous = torch.batch_norm_backward_elemt(\n grad_output,\n saved_input,\n mean,\n invstd,\n weight,\n sum_dy,\n sum_dy_xmu,\n count_tensor\n )\n\n # Test batch_norm_backward_elemt gives the same answer for all\n # combinations of contiguous as channels_last input\n for a, b in [\n (torch.channels_last, torch.contiguous_format),\n (torch.contiguous_format, torch.channels_last),\n (torch.channels_last, torch.channels_last),\n ]:\n gI_actual = torch.batch_norm_backward_elemt(\n grad_output.contiguous(memory_format=a),\n saved_input.contiguous(memory_format=b),\n mean,\n invstd,\n weight,\n sum_dy,\n sum_dy_xmu,\n count_tensor\n )\n self.assertEqual(gI_actual, gI_contiguous)\n\n @unittest.skipIf(not TEST_CUDA, \"CUDA not available\")\n def test_sync_batchnorm_accuracy_cuda(self):\n # The target of this test is to test the functionality and accuracy of\n # those single-GPU cuda kernels used in SyncBatchNorm\n # They are:\n # fwd: torch.batch_norm_stats, torch.batch_norm_gather_stats_with_counts, torch.batch_norm_elemt\n # bwd: torch.batch_norm_backward_reduce, torch.batch_norm_backward_elemt\n\n def _batch_norm_stats(data):\n mean1, _ = torch.batch_norm_stats(data, 1e-5)\n mean2, _ = torch.batch_norm_stats(data.to(memory_format=torch.channels_last), 1e-5)\n mean_ref = torch.mean(data, (0, 2, 3), keepdim=False)\n\n self.assertEqual(mean_ref, mean1)\n self.assertEqual(mean_ref, mean2)\n\n data = torch.randn(1, 96, 112, 112, dtype=torch.float, device='cuda')\n _batch_norm_stats(data)\n\n def test_functional_grad_conv(self):\n # Conv 1D\n input = torch.randn(1, 1, 5, requires_grad=True)\n weight = torch.randn(1, 1, 3, requires_grad=True)\n output = F.conv1d(input, weight, dilation=2)\n grad_output = torch.randn(output.shape)\n\n grad_input_autograd = torch.autograd.grad(output, input, grad_output)[0]\n grad_input_functional = torch.nn.grad.conv1d_input(input.shape, weight, grad_output, dilation=2)\n self.assertEqual(grad_input_functional, grad_input_autograd)\n\n # Conv 2D\n input = torch.randn(1, 1, 5, 5, requires_grad=True)\n weight = torch.randn(1, 1, 3, 3, requires_grad=True)\n output = F.conv2d(input, weight, dilation=2)\n grad_output = torch.randn(output.shape)\n\n grad_input_autograd = torch.autograd.grad(output, input, grad_output)[0]\n grad_input_functional = torch.nn.grad.conv2d_input(input.shape, weight, grad_output, dilation=2)\n self.assertEqual(grad_input_functional, grad_input_autograd)\n\n # Conv 3D\n input = torch.randn(1, 1, 5, 5, 5, requires_grad=True)\n weight = torch.randn(1, 1, 3, 3, 3, requires_grad=True)\n output = F.conv3d(input, weight, dilation=2)\n grad_output = torch.randn(output.shape)\n\n grad_input_autograd = torch.autograd.grad(output, input, grad_output)[0]\n grad_input_functional = torch.nn.grad.conv3d_input(input.shape, weight, grad_output, dilation=2)\n self.assertEqual(grad_input_functional, grad_input_autograd)\n\n # Warning for _grad_input_padding\n with warnings.catch_warnings(record=True) as w:\n torch.nn.grad._grad_input_padding(torch.rand(1, 2, 3), [1, 2, 5], (1,), (0,), (3,))\n self.assertEqual(len(w), 1)\n\n def test_flatten(self):\n tensor_input = torch.randn(2, 1, 2, 3)\n\n # Flatten Tensor\n\n flatten = nn.Flatten(start_dim=1, end_dim=-1)\n tensor_output = flatten(tensor_input)\n self.assertEqual(tensor_output.size(), torch.Size([2, 6]))\n\n def test_unflatten(self):\n tensor_input = torch.randn(2, 50)\n\n # Unflatten Tensor (unflattened_size as a tuple of ints and list of ints)\n\n for us in ((2, 5, 5), [2, 5, 5]):\n unflatten = nn.Unflatten(dim=1, unflattened_size=us)\n tensor_output = unflatten(tensor_input)\n self.assertEqual(tensor_output.size(), torch.Size([2, 2, 5, 5]))\n\n # Unflatten NamedTensor\n\n unflatten = nn.Unflatten(dim='features', unflattened_size=(('C', 2), ('H', 5), ('W', 5)))\n named_tensor_input = tensor_input.refine_names('N', 'features')\n named_tensor_output = unflatten(named_tensor_input)\n self.assertEqual(named_tensor_output.size(), torch.Size([2, 2, 5, 5]))\n\n def test_unflatten_invalid_arg(self):\n # Wrong type for unflattened_size (tuple of floats)\n\n with self.assertRaisesRegex(\n TypeError,\n r\"unflattened_size must be tuple of ints, but found element of type float at pos 2\"):\n nn.Unflatten(dim=1, unflattened_size=(2, 5, 5.0))\n\n # Wrong type for unflattened_size (list of lists and list of tuples)\n for us in ([['C', 2], ['W', 5], ['H', 5]], [('C', 2), ('W', 5), ('H', 5)]):\n with self.assertRaisesRegex(\n TypeError,\n r\"unflattened_size must be a tuple of tuples, but found type list\"):\n nn.Unflatten(dim='features', unflattened_size=us)\n\n # Wrong type for unflattened_size (tuple of lists)\n\n with self.assertRaisesRegex(\n TypeError,\n r\"unflattened_size must be tuple of tuples, but found element of type list at pos 0\"):\n nn.Unflatten(dim='features', unflattened_size=(['C', 2], ['W', 5], ['H', 5]))\n\n # Wrong type for unflattened_size (tuple of dicts)\n\n with self.assertRaisesRegex(\n TypeError,\n r\"unflattened_size must be tuple of tuples, but found element of type dict at pos 0\"):\n nn.Unflatten(dim='features', unflattened_size=({'C': 2}, {'W': 5}, {'H': 5}))\n\n def test_layer_norm_grads_with_create_graph_flag(self):\n atol = 1e-5\n rtol = 1e-3\n\n x = torch.randn((4, 4, 16), requires_grad=True)\n layer_norm = nn.LayerNorm((16,), 1e-5, True)\n with torch.no_grad():\n layer_norm.weight = torch.nn.Parameter(0.1 * torch.ones_like(layer_norm.weight))\n\n grads1 = torch.autograd.grad(layer_norm(x).sum(), x, create_graph=False)[0]\n grads2 = torch.autograd.grad(layer_norm(x).sum(), x, create_graph=True)[0]\n\n self.assertEqual(grads1, grads2, rtol=rtol, atol=atol)\n\n if TEST_CUDA:\n x = x.to('cuda')\n layer_norm = layer_norm.to('cuda')\n\n grads1 = torch.autograd.grad(layer_norm(x).sum(), x, create_graph=False)[0]\n grads2 = torch.autograd.grad(layer_norm(x).sum(), x, create_graph=True)[0]\n\n self.assertEqual(grads1, grads2, rtol=rtol, atol=atol)\n\n def test_padding_list(self):\n # Padding can be a list, or tuple (regression test for gh-54452)\n x = torch.randn(4, 8, 32, 32)\n net = torch.nn.ConvTranspose2d(8, 16, kernel_size=3, padding=[3, 3])\n y = net(x)\n\n net = torch.nn.ConvTranspose2d(8, 16, kernel_size=3, padding=(3, 3))\n y = net(x)\n\n\nclass TestNNInit(TestCase):\n def setUp(self):\n super(TestNNInit, self).setUp()\n random.seed(123)\n\n def _is_normal(self, tensor, mean, std):\n samples = tensor.view(-1).tolist()\n p_value = stats.kstest(samples, 'norm', args=(mean, std))[1]\n return p_value > 0.0001\n\n def _is_trunc_normal(self, tensor, mean, std, a, b):\n # scipy's trunc norm is suited for data drawn from N(0, 1),\n # so we need to transform our data to test it using scipy.\n z_samples = (tensor.view(-1) - mean) / std\n z_samples = z_samples.tolist()\n a0 = (a - mean) / std\n b0 = (b - mean) / std\n p_value = stats.kstest(z_samples, 'truncnorm', args=(a0, b0))[1]\n return p_value > 0.0001\n\n def _is_uniform(self, tensor, a, b):\n samples = tensor.view(-1).tolist()\n p_value = stats.kstest(samples, 'uniform', args=(a, (b - a)))[1]\n return p_value > 0.0001\n\n def _create_random_nd_tensor(self, dims, size_min, size_max):\n size = [random.randint(size_min, size_max) for _ in range(dims)]\n tensor = torch.zeros(size)\n return tensor\n\n def _random_float(self, a, b):\n return (b - a) * random.random() + a\n\n def test_calculate_gain_linear(self):\n for fn in ['linear', 'conv1d', 'conv2d', 'conv3d', 'conv_transpose2d', 'conv_transpose2d', 'conv_transpose3d']:\n gain = init.calculate_gain(fn)\n self.assertEqual(gain, 1)\n\n def test_calculate_gain_nonlinear(self):\n for fn in ['sigmoid', 'tanh', 'relu', 'leaky_relu']:\n gain = init.calculate_gain(fn)\n if fn == 'sigmoid':\n self.assertEqual(gain, 1)\n elif fn == 'tanh': # 5 / 3\n self.assertEqual(gain, 1.6666666666666667)\n elif fn == 'relu': # sqrt(2)\n self.assertEqual(gain, 1.4142135623730951)\n elif fn == 'leaky_relu': # sqrt(2 / 1 + slope^2))\n self.assertEqual(gain, 1.4141428569978354)\n elif fn == 'selu':\n self.assertEqual(gain, 0.75)\n\n def test_calculate_gain_leaky_relu(self):\n for param in [None, 0, 0.01, 10]:\n gain = init.calculate_gain('leaky_relu', param)\n if param is None: # Default slope is 0.01\n self.assertEqual(gain, 1.4141428569978354)\n elif param == 0: # No slope = same gain as normal ReLU\n self.assertEqual(gain, 1.4142135623730951)\n elif param == 0.01:\n self.assertEqual(gain, 1.4141428569978354)\n elif param == 10:\n self.assertEqual(gain, 0.14071950894605836)\n\n def test_calculate_gain_leaky_relu_only_accepts_numbers(self):\n for param in [True, [1], {'a': 'b'}]:\n with self.assertRaises(ValueError):\n init.calculate_gain('leaky_relu', param)\n\n def test_calculate_gain_only_accepts_valid_nonlinearities(self):\n for n in [2, 5, 25]:\n # Generate random strings of lengths that definitely aren't supported\n random_string = ''.join([random.choice(string.ascii_lowercase) for i in range(n)])\n with self.assertRaises(ValueError):\n init.calculate_gain(random_string)\n\n @unittest.skipIf(not TEST_SCIPY, \"Scipy not found.\")\n def test_uniform(self):\n for dims in [1, 2, 4]:\n input_tensor = self._create_random_nd_tensor(dims, size_min=30, size_max=50)\n a = self._random_float(-3, 3)\n b = a + self._random_float(1, 5)\n init.uniform_(input_tensor, a=a, b=b)\n assert self._is_uniform(input_tensor, a, b)\n\n @unittest.skipIf(not TEST_SCIPY, \"Scipy not found.\")\n def test_normal(self):\n for dims in [1, 2, 4]:\n input_tensor = self._create_random_nd_tensor(dims, size_min=30, size_max=50)\n mean = self._random_float(-3, 3)\n std = self._random_float(1, 5)\n init.normal_(input_tensor, mean=mean, std=std)\n\n assert self._is_normal(input_tensor, mean, std)\n\n @unittest.skipIf(not TEST_SCIPY, \"Scipy not found.\")\n def test_trunc_normal(self):\n for dims in [1, 2, 4]:\n input_tensor = self._create_random_nd_tensor(dims, size_min=30, size_max=50)\n mean = self._random_float(-3, 3)\n std = self._random_float(.01, 1)\n a = self._random_float(mean - 2 * std, mean)\n b = self._random_float(mean, mean + 2 * std)\n init.trunc_normal_(input_tensor, mean=mean, std=std, a=a, b=b)\n\n assert self._is_trunc_normal(input_tensor, mean, std, a, b)\n\n def test_constant(self):\n for dims in [1, 2, 4]:\n input_tensor = self._create_random_nd_tensor(dims, size_min=1, size_max=5)\n val = self._random_float(1, 10)\n init.constant_(input_tensor, val)\n\n self.assertEqual(input_tensor, input_tensor.clone().fill_(val))\n\n def test_ones_and_zeros(self):\n for init_fn_, val in zip([init.ones_, init.zeros_], [1, 0]):\n for dims in [1, 2, 4]:\n input_tensor = self._create_random_nd_tensor(dims, size_min=1, size_max=5)\n init_fn_(input_tensor)\n\n self.assertEqual(input_tensor, input_tensor.clone().fill_(val))\n\n def test_eye(self):\n input_tensor = self._create_random_nd_tensor(2, size_min=1, size_max=5)\n init.eye_(input_tensor)\n\n # Check every single element\n for i in range(input_tensor.size(0)):\n for j in range(input_tensor.size(1)):\n if i == j:\n assert input_tensor[i][j] == 1\n else:\n assert input_tensor[i][j] == 0\n\n def test_eye_only_works_on_2d_inputs(self):\n for dims in [1, 3]:\n with self.assertRaises(ValueError):\n tensor = self._create_random_nd_tensor(dims, size_min=1, size_max=3)\n init.eye_(tensor)\n\n def test_max_unpool(self):\n # Test 1D\n output, indices = F.max_pool1d(torch.randn([1, 1, 4]), 2, stride=2, return_indices=True)\n self.assertEqual(F.max_unpool1d(output, indices, 2), F.max_unpool1d(output, indices, 2, stride=2))\n\n # Test list / tuple passed as argument to max_unpool1d\n input = torch.randn([1, 1, 5], requires_grad=True)\n output, indices = F.max_pool1d(input, 2, stride=2, return_indices=True)\n self.assertEqual(F.max_unpool1d(output, indices, 2, stride=2, output_size=input.shape),\n F.max_unpool1d(output, indices, 2, stride=2, output_size=input.size()))\n gradcheck(F.max_unpool1d, (output, indices, 2), check_forward_ad=True)\n\n # Test 2D\n output, indices = F.max_pool2d(torch.randn([1, 1, 4, 4], requires_grad=True), 2, stride=2, return_indices=True)\n self.assertEqual(F.max_unpool2d(output, indices, 2), F.max_unpool2d(output, indices, 2, stride=2))\n gradcheck(F.max_unpool2d, (output, indices, 2), check_forward_ad=True)\n\n # Test 3D\n output, indices = F.max_pool3d(torch.randn([4, 4, 4, 4, 4], requires_grad=True), 2, stride=2, return_indices=True)\n self.assertEqual(F.max_unpool3d(output, indices, 2), F.max_unpool3d(output, indices, 2, stride=2))\n gradcheck(F.max_unpool3d, (output, indices, 2), check_forward_ad=True)\n\n def test_dirac_properties(self):\n for dims in [3, 4, 5]:\n for groups in [1, 2, 3]:\n # prepare random tensor with random sizes, but fits groups\n a, c, d, e = (random.randint(1, 5) for _ in range(4))\n b = random.randint(1, 5 * groups) # same range as a*groups but all range allowed\n # make sure first dim divides by groups\n input_tensor = torch.randn((a * groups, b, c, d, e)[:dims])\n\n init.dirac_(input_tensor, groups)\n\n c_out, c_in = input_tensor.size(0) // groups, input_tensor.size(1)\n min_d = min(c_out, c_in)\n # Check number of nonzeros is equivalent to smallest dim (for each group)\n assert torch.nonzero(input_tensor).size(0) == min_d * groups\n # Check sum of values (can have precision issues, hence assertEqual) is also equivalent\n self.assertEqual(input_tensor.sum(), min_d * groups)\n\n\n def test_dirac_identity(self):\n for groups in [1, 3]:\n batch, in_c, out_c, size, kernel_size = 8, 3, 9, 5, 3 # in_c, out_c must divide by groups\n eff_out_c = out_c // groups\n\n # Test 1D\n input_var = torch.randn(batch, in_c, size)\n filter_var = torch.zeros(eff_out_c, in_c, kernel_size)\n filter_var = torch.cat([filter_var] * groups)\n init.dirac_(filter_var, groups)\n output_var = F.conv1d(input_var, filter_var)\n input_tensor, output_tensor = input_var.data, output_var.data # Variables do not support nonzero\n for g in range(groups):\n # Assert in_c outputs are preserved (per each group)\n self.assertEqual(input_tensor[:, :, 1:-1],\n output_tensor[:, eff_out_c * g:eff_out_c * g + in_c, :])\n # Assert extra outputs are 0\n assert torch.nonzero(output_tensor[:, eff_out_c * g + in_c:eff_out_c * (g + 1), :]).numel() == 0\n\n # Test 2D\n input_var = torch.randn(batch, in_c, size, size)\n filter_var = torch.zeros(eff_out_c, in_c, kernel_size, kernel_size)\n filter_var = torch.cat([filter_var] * groups)\n init.dirac_(filter_var, groups)\n output_var = F.conv2d(input_var, filter_var)\n input_tensor, output_tensor = input_var.data, output_var.data # Variables do not support nonzero\n for g in range(groups):\n # Assert in_c outputs are preserved (per each group)\n self.assertEqual(input_tensor[:, :, 1:-1, 1:-1],\n output_tensor[:, eff_out_c * g:eff_out_c * g + in_c, :, :])\n # Assert extra outputs are 0\n assert torch.nonzero(output_tensor[:, eff_out_c * g + in_c:eff_out_c * (g + 1), :, :]).numel() == 0\n\n # Test 3D\n input_var = torch.randn(batch, in_c, size, size, size)\n filter_var = torch.zeros(eff_out_c, in_c, kernel_size, kernel_size, kernel_size)\n filter_var = torch.cat([filter_var] * groups)\n init.dirac_(filter_var, groups)\n output_var = F.conv3d(input_var, filter_var)\n input_tensor, output_tensor = input_var.data, output_var.data\n for g in range(groups):\n # Assert in_c outputs are preserved (per each group)\n self.assertEqual(input_tensor[:, :, 1:-1, 1:-1, 1:-1],\n output_tensor[:, eff_out_c * g:eff_out_c * g + in_c, :, :, :])\n # Assert extra outputs are 0\n assert torch.nonzero(output_tensor[:, eff_out_c * g + in_c:eff_out_c * (g + 1), :, :, :]).numel() == 0\n\n def test_dirac_only_works_on_3_4_5d_inputs(self):\n for dims in [1, 2, 6]:\n with self.assertRaises(ValueError):\n tensor = self._create_random_nd_tensor(dims, size_min=1, size_max=3)\n init.dirac_(tensor)\n\n def test_xavier_uniform_errors_on_inputs_smaller_than_2d(self):\n for dims in [0, 1]:\n tensor = self._create_random_nd_tensor(dims, size_min=1, size_max=1)\n with self.assertRaises(ValueError):\n init.xavier_uniform_(tensor)\n\n def test_xavier_normal_errors_on_inputs_smaller_than_2d(self):\n for dims in [0, 1]:\n tensor = self._create_random_nd_tensor(dims, size_min=1, size_max=1)\n with self.assertRaises(ValueError):\n init.xavier_normal_(tensor)\n\n @unittest.skipIf(not TEST_SCIPY, \"Scipy not found.\")\n def test_xavier_uniform(self):\n for use_gain in [True, False]:\n for dims in [2, 4]:\n input_tensor = self._create_random_nd_tensor(dims, size_min=20, size_max=25)\n gain = 1\n\n if use_gain:\n gain = self._random_float(0.1, 2)\n init.xavier_uniform_(input_tensor, gain=gain)\n else:\n init.xavier_uniform_(input_tensor)\n\n fan_in = input_tensor.size(1)\n fan_out = input_tensor.size(0)\n if input_tensor.dim() > 2:\n fan_in *= input_tensor[0, 0].numel()\n fan_out *= input_tensor[0, 0].numel()\n\n expected_std = gain * math.sqrt(2.0 / (fan_in + fan_out))\n bounds = expected_std * math.sqrt(3)\n assert self._is_uniform(input_tensor, -bounds, bounds)\n\n @unittest.skipIf(not TEST_SCIPY, \"Scipy not found.\")\n def test_xavier_normal(self):\n for use_gain in [True, False]:\n for dims in [2, 4]:\n input_tensor = self._create_random_nd_tensor(dims, size_min=20, size_max=25)\n gain = 1\n\n if use_gain:\n gain = self._random_float(0.1, 2)\n init.xavier_normal_(input_tensor, gain=gain)\n else:\n init.xavier_normal_(input_tensor)\n\n fan_in = input_tensor.size(1)\n fan_out = input_tensor.size(0)\n if input_tensor.dim() > 2:\n fan_in *= input_tensor[0, 0].numel()\n fan_out *= input_tensor[0, 0].numel()\n\n expected_std = gain * math.sqrt(2.0 / (fan_in + fan_out))\n assert self._is_normal(input_tensor, 0, expected_std)\n\n def test_kaiming_uniform_errors_on_inputs_smaller_than_2d(self):\n for dims in [0, 1]:\n with self.assertRaises(ValueError):\n tensor = self._create_random_nd_tensor(dims, size_min=1, size_max=1)\n init.kaiming_uniform_(tensor)\n\n def test_kaiming_normal_errors_on_inputs_smaller_than_2d(self):\n for dims in [0, 1]:\n with self.assertRaises(ValueError):\n tensor = self._create_random_nd_tensor(dims, size_min=1, size_max=1)\n init.kaiming_normal_(tensor)\n\n def test_kaiming_uniform_warning_on_0element_tensor(self):\n tensor = torch.empty(0, 1)\n with self.assertWarnsRegex(UserWarning, \"Initializing zero-element tensors is a no-op\"):\n _ = init.kaiming_uniform_(tensor)\n\n def test_kaiming_normal_warning_on_0element_tensor(self):\n tensor = torch.empty(0, 1)\n with self.assertWarnsRegex(UserWarning, \"Initializing zero-element tensors is a no-op\"):\n _ = init.kaiming_normal_(tensor)\n\n @unittest.skipIf(not TEST_SCIPY, \"Scipy not found.\")\n def test_kaiming_uniform(self):\n for use_a in [True, False]:\n for dims in [2, 4]:\n for mode in ['fan_in', 'fan_out']:\n input_tensor = self._create_random_nd_tensor(dims, size_min=20, size_max=25)\n if use_a:\n a = self._random_float(0.1, 2)\n init.kaiming_uniform_(input_tensor, a=a, mode=mode)\n else:\n a = 0\n init.kaiming_uniform_(input_tensor, mode=mode)\n\n fan_in = input_tensor.size(1)\n fan_out = input_tensor.size(0)\n if input_tensor.dim() > 2:\n fan_in *= input_tensor[0, 0].numel()\n fan_out *= input_tensor[0, 0].numel()\n\n if mode == 'fan_in':\n n = fan_in\n else:\n n = fan_out\n\n expected_std = math.sqrt(2.0 / ((1 + a**2) * n))\n bounds = expected_std * math.sqrt(3.0)\n assert self._is_uniform(input_tensor, -bounds, bounds)\n\n @unittest.skipIf(not TEST_SCIPY, \"Scipy not found.\")\n def test_kaiming_normal(self):\n for use_a in [True, False]:\n for dims in [2, 4]:\n for mode in ['fan_in', 'fan_out']:\n input_tensor = self._create_random_nd_tensor(dims, size_min=20, size_max=25)\n if use_a:\n a = self._random_float(0.1, 2)\n init.kaiming_normal_(input_tensor, a=a, mode=mode)\n else:\n a = 0\n init.kaiming_normal_(input_tensor, mode=mode)\n\n fan_in = input_tensor.size(1)\n fan_out = input_tensor.size(0)\n if input_tensor.dim() > 2:\n fan_in *= input_tensor[0, 0].numel()\n fan_out *= input_tensor[0, 0].numel()\n\n if mode == 'fan_in':\n n = fan_in\n else:\n n = fan_out\n\n expected_std = math.sqrt(2.0 / ((1 + a**2) * n))\n assert self._is_normal(input_tensor, 0, expected_std)\n\n def test_sparse_only_works_on_2d_inputs(self):\n for dims in [1, 3]:\n with self.assertRaises(ValueError):\n sparsity = self._random_float(0.1, 0.9)\n tensor = self._create_random_nd_tensor(dims, size_min=1, size_max=3)\n init.sparse_(tensor, sparsity)\n\n @unittest.skipIf(not TEST_SCIPY, \"Scipy not found.\")\n def test_sparse_default_std(self):\n for use_random_std in [True, False]:\n input_tensor = self._create_random_nd_tensor(2, size_min=30, size_max=35)\n rows, cols = input_tensor.size(0), input_tensor.size(1)\n sparsity = self._random_float(0.1, 0.2)\n\n std = 0.01 # default std\n if use_random_std:\n std = self._random_float(0.01, 0.2)\n init.sparse_(input_tensor, sparsity=sparsity, std=std)\n else:\n init.sparse_(input_tensor, sparsity=sparsity)\n\n for col_idx in range(input_tensor.size(1)):\n column = input_tensor[:, col_idx]\n assert column[column == 0].nelement() >= math.ceil(sparsity * rows)\n\n assert self._is_normal(input_tensor[input_tensor != 0], 0, std)\n\n @skipIfNoLapack\n def test_orthogonal(self):\n for use_gain in [True, False]:\n for tensor_size in [[3, 4], [4, 3], [20, 2, 3, 4], [2, 3, 4, 5]]:\n input_tensor = torch.zeros(tensor_size)\n gain = 1.0\n\n if use_gain:\n gain = self._random_float(0.1, 2)\n init.orthogonal_(input_tensor, gain=gain)\n else:\n init.orthogonal_(input_tensor)\n\n rows, cols = tensor_size[0], reduce(mul, tensor_size[1:])\n flattened_tensor = input_tensor.view(rows, cols)\n if rows > cols:\n self.assertEqual(torch.mm(flattened_tensor.t(), flattened_tensor),\n torch.eye(cols) * gain ** 2, atol=1e-6, rtol=0)\n else:\n self.assertEqual(torch.mm(flattened_tensor, flattened_tensor.t()),\n torch.eye(rows) * gain ** 2, atol=1e-6, rtol=0)\n\n def test_deprecation(self):\n x = torch.randn(3, 3)\n\n def fn():\n init.normal(x)\n\n with self.assertWarnsRegex(UserWarning, 'deprecated', msg='methods not suffixed with underscore should be deprecated'):\n fn()\n\nclass TestFusionEval(TestCase):\n @given(X=hu.tensor(shapes=((5, 3, 5, 5),)),\n running_mean=hu.tensor(shapes=(6,)),\n running_var=hu.tensor(shapes=(6,)))\n def test_fuse_module_eval_numerics(self, X, running_mean, running_var):\n inputs, _ = X\n\n iC, oC = inputs.shape[1], len(running_mean[0])\n inputs = torch.from_numpy(inputs).to(torch.double)\n kernel_size = (3, 3)\n\n conv_ref = torch.nn.Conv2d(iC, oC, bias=True, kernel_size=kernel_size)\n bn_ref = torch.nn.BatchNorm2d(oC)\n bn_ref.running_mean = torch.from_numpy(running_mean[0]).to(torch.double)\n bn_ref.running_var = torch.from_numpy(running_var[0]).to(torch.double)\n\n conv_ref.eval()\n bn_ref.eval()\n\n Y_ref = bn_ref(conv_ref(inputs))\n conv_bn_fused = torch.nn.utils.fusion.fuse_conv_bn_eval(conv_ref,\n bn_ref)\n Y_hat = conv_bn_fused(inputs)\n\n self.assertEqual(Y_ref, Y_hat, msg=\"Conv+BN fusion results are off\")\n\n na_bn_ref = torch.nn.BatchNorm2d(oC, affine=False)\n na_bn_ref.running_mean = torch.from_numpy(running_mean[0]).to(torch.double)\n na_bn_ref.running_var = torch.from_numpy(running_var[0]).to(torch.double)\n na_bn_ref.eval()\n\n Y_ref = na_bn_ref(conv_ref(inputs))\n conv_na_bn_fused = torch.nn.utils.fusion.fuse_conv_bn_eval(conv_ref,\n na_bn_ref)\n Y_hat = conv_na_bn_fused(inputs)\n\n self.assertEqual(Y_ref, Y_hat, msg=\"Conv+BN(non-affine) fusion results are off\")\n\n\nclass TestConstantPadNd(TestCase):\n def test_constant_pad_nd(self):\n a = torch.tensor([[1, 2], [3, 4]])\n res = torch.constant_pad_nd(a, [1, 2, 1, 0], 9)\n expected = torch.tensor([\n [9, 9, 9, 9, 9],\n [9, 1, 2, 9, 9],\n [9, 3, 4, 9, 9]\n ])\n self.assertEqual(res, expected)\n\n def test_preserves_memory_format(self):\n nchw_tensor = torch.rand((1, 2, 5, 3))\n nchw_padded = torch.constant_pad_nd(nchw_tensor, [1, 2], 0.5)\n self.assertTrue(nchw_padded.is_contiguous(memory_format=torch.contiguous_format))\n\n nhwc_tensor = nchw_tensor.contiguous(memory_format=torch.channels_last)\n nhwc_padded = torch.constant_pad_nd(nhwc_tensor, [1, 2], 0.5)\n self.assertTrue(nhwc_padded.is_contiguous(memory_format=torch.channels_last))\n\n\nclass TestAddRelu(TestCase):\n def test_add_relu(self):\n a = torch.rand((7, 11))\n b = torch.rand((7, 11))\n a = a.float()\n b = b.float()\n a = a * -10\n a = a + 5\n add_res = a + b\n relu_res = torch.relu(add_res)\n add_relu_res = torch._VF._add_relu(a, b)\n\n self.assertEqual(add_relu_res, relu_res)\n\n def test_add_relu_broadcasting(self):\n a = torch.rand((1, 32))\n b = 1\n b_scalar = torch.ones(1, 32)\n res = torch._VF._add_relu(a, b)\n broadcasted_res = torch._VF._add_relu(a, b_scalar)\n\n self.assertEqual(broadcasted_res, res)\n\n\ndef add_test(test, decorator=None):\n def add(test_name, fn):\n if hasattr(TestNN, test_name):\n raise RuntimeError('Found two tests with the same name: ' + test_name)\n if decorator is not None:\n fn = decorator(fn)\n setattr(TestNN, test_name, fn)\n\n test_name = test.get_name()\n if not hasattr(test, 'test_cpu') or test.test_cpu:\n add(test_name, lambda self, test=test: test(self))\n cuda_test_name = test_name + '_cuda'\n # With dtype enable, it's good enough to test against three floating types\n kwargs = {}\n if 'extra_args' in get_function_arglist(test.test_cuda):\n kwargs['extra_args'] = test.extra_args\n\n if 'dtype' in get_function_arglist(test.test_cuda):\n if tf32_is_not_fp32() and test.with_tf32:\n\n def with_tf32_off(self, test=test, kwargs=kwargs):\n with tf32_off():\n test.test_cuda(self, dtype=torch.float, **kwargs)\n\n add(cuda_test_name + '_fp32', with_tf32_off)\n\n def with_tf32_on(self, test=test, kwargs=kwargs):\n with tf32_on(self, test.tf32_precision):\n test.test_cuda(self, dtype=torch.float, **kwargs)\n\n add(cuda_test_name + '_tf32', with_tf32_on)\n else:\n add(cuda_test_name + '_float', lambda self,\n test=test, kwargs=kwargs: test.test_cuda(self, dtype=torch.float, **kwargs))\n add(cuda_test_name + '_double', lambda self,\n test=test, kwargs=kwargs: test.test_cuda(self, dtype=torch.double, **kwargs))\n\n def test_half(self, test=test, kwargs=kwargs):\n test.test_cuda(self, dtype=torch.half, **kwargs)\n if getattr(test, 'check_half', True):\n add(cuda_test_name + '_half', test_half)\n\n def test_bfloat16(self, test=test, kwargs=kwargs):\n test.test_cuda(self, dtype=torch.bfloat16, **kwargs)\n if getattr(test, 'check_bfloat16', True):\n add(cuda_test_name + '_bfloat16', test_bfloat16)\n\n def test_cfloat(self, test=test, kwargs=kwargs):\n test.test_cuda(self, dtype=torch.cfloat, **kwargs)\n\n def test_cdouble(self, test=test, kwargs=kwargs):\n test.test_cuda(self, dtype=torch.cdouble, **kwargs)\n if getattr(test, 'check_complex', False):\n add(cuda_test_name + '_cfloat', test_cfloat)\n add(cuda_test_name + '_cdouble', test_cdouble)\n\n else:\n def with_tf32_off(self, test=test, kwargs=kwargs):\n with tf32_off():\n test.test_cuda(self, **kwargs)\n\n if tf32_is_not_fp32() and test.with_tf32:\n add(cuda_test_name + '_fp32', with_tf32_off)\n\n def with_tf32_on(self, test=test, kwargs=kwargs):\n with tf32_on(self, test.tf32_precision):\n test.test_cuda(self, **kwargs)\n\n add(cuda_test_name + '_tf32', with_tf32_on)\n else:\n add(cuda_test_name, with_tf32_off)\n\nfor test_params in module_tests + new_module_tests:\n # TODO: CUDA is not implemented yet\n if 'constructor' not in test_params:\n name = test_params.pop('module_name')\n test_params['constructor'] = getattr(nn, name)\n decorator = test_params.pop('decorator', None)\n test = NewModuleTest(**test_params)\n add_test(test, decorator)\n if 'check_eval' in test_params:\n # create a new test that is identical but that sets module.training to False\n desc = test_params.get('desc', None)\n test_params['desc'] = 'eval' if desc is None else desc + '_eval'\n\n def gen_eval_constructor(constructor):\n def eval_constructor(*args, **kwargs):\n cons = constructor(*args, **kwargs)\n cons.training = False\n return cons\n eval_constructor.__name__ = constructor.__name__\n return eval_constructor\n\n test_params['constructor'] = gen_eval_constructor(test_params['constructor'])\n test = NewModuleTest(**test_params)\n add_test(test, decorator)\n if 'check_with_long_tensor' in test_params:\n fullname = test_params.get('fullname', None)\n if fullname:\n test_params['fullname'] = fullname + '_with_long_tensor'\n else:\n desc = test_params.get('desc', None)\n test_params['desc'] = 'with_long_tensor' if desc is None else desc + '_with_long_tensor'\n\n def double_equivalent_of_long_tensor(size):\n return torch.randint(-1000, 1000, size=size).double()\n\n def apply_to_cons(t):\n if t.is_floating_point():\n if isinstance(t, Parameter):\n return Parameter(double_equivalent_of_long_tensor(t.size()))\n elif isinstance(t, torch.Tensor):\n return double_equivalent_of_long_tensor(t.size())\n else:\n return t\n\n def gen_long_tensor_constructor(constructor):\n def long_tensor_constructor(*args, **kwargs):\n cons = constructor(*args, **kwargs)\n cons._apply(apply_to_cons)\n return cons\n long_tensor_constructor.__name__ = constructor.__name__\n return long_tensor_constructor\n\n def gen_long_tensor_input(input_size):\n def input_func():\n return double_equivalent_of_long_tensor(input_size)\n return input_func\n\n def reference_fn(i, p, m):\n # For bad reasons this would create LongTensors that requires gradients\n # Remove requires_grad to avoid this\n for p in m.parameters():\n p.requires_grad_(False)\n m._apply(lambda t: t.long())\n input = i.long()\n out = m.forward(input)\n return out\n\n test_params['constructor'] = gen_long_tensor_constructor(test_params['constructor'])\n test_params['input_fn'] = gen_long_tensor_input(test_params['input_size'])\n test_params['reference_fn'] = reference_fn\n test_params['check_forward_only'] = True\n # Currently we don't support conv2d/conv3d for LongTensor in CUDA\n test_params['test_cuda'] = False\n test = NewModuleTest(**test_params)\n\n add_test(test, decorator)\n\nfor test_params in criterion_tests:\n if 'constructor' not in test_params:\n name = test_params.pop('module_name')\n test_params['constructor'] = getattr(nn, name)\n test = CriterionTest(**test_params)\n decorator = test_params.pop('decorator', None)\n add_test(test, decorator)\n if 'check_sum_reduction' in test_params:\n desc = test_params.get('desc', None)\n test_params['desc'] = 'sum_reduction' if desc is None else desc + '_sum_reduction'\n\n def gen_sum_reduction_constructor(constructor):\n def sum_reduction_constructor(*args, **kwargs):\n cons = constructor(*args, reduction='sum', **kwargs)\n return cons\n sum_reduction_constructor.__name__ = constructor.__name__\n return sum_reduction_constructor\n\n test_params['constructor'] = gen_sum_reduction_constructor(test_params['constructor'])\n test = CriterionTest(**test_params)\n add_test(test, decorator)\n\n\nclass UnpoolingNet(nn.Module):\n def __init__(self, pool, unpool):\n super(UnpoolingNet, self).__init__()\n self.pool = pool\n self.unpool = unpool\n\n def forward(self, input):\n return self.unpool(*self.pool(input))\n\n\nadd_test(NewModuleTest(\n constructor=lambda: UnpoolingNet(\n nn.MaxPool1d(2, return_indices=True),\n nn.MaxUnpool1d(2)),\n input_size=(1, 1, 4),\n fullname='MaxUnpool1d_net',))\nadd_test(NewModuleTest(\n constructor=lambda: UnpoolingNet(\n nn.MaxPool2d(2, return_indices=True),\n nn.MaxUnpool2d(2)),\n input_size=(1, 1, 2, 4),\n fullname='MaxUnpool2d_net',))\nadd_test(NewModuleTest(\n constructor=lambda: UnpoolingNet(\n nn.MaxPool3d(2, return_indices=True),\n nn.MaxUnpool3d(2)),\n input_size=(1, 1, 2, 4, 6),\n fullname='MaxUnpool3d_net',\n check_gradgrad=False,))\n\nadd_test(NewModuleTest(\n constructor=lambda: UnpoolingNet(\n nn.MaxPool1d(2, return_indices=True),\n nn.MaxUnpool1d(2)),\n input_size=(1, 4),\n reference_fn=single_batch_reference_fn,\n fullname='MaxUnpool1d_net_no_batch_dim',))\nadd_test(NewModuleTest(\n constructor=lambda: UnpoolingNet(\n nn.MaxPool2d(2, return_indices=True),\n nn.MaxUnpool2d(2)),\n input_size=(1, 2, 4),\n reference_fn=single_batch_reference_fn,\n fullname='MaxUnpool2d_net_no_batch_dim',))\n\nadd_test(NewModuleTest(\n constructor=lambda: UnpoolingNet(\n nn.MaxPool3d(2, return_indices=True),\n nn.MaxUnpool3d(2)),\n input_size=(1, 2, 4, 6),\n reference_fn=single_batch_reference_fn,\n fullname='MaxUnpool3d_net_no_batch_dim',\n check_gradgrad=False))\n\nclass _AdaptiveLogSoftmaxWithLoss(nn.AdaptiveLogSoftmaxWithLoss):\n def __call__(self, input):\n t = torch.tensor([0, 1, 4, 8]).to(input.device)\n return nn.AdaptiveLogSoftmaxWithLoss.__call__(self, input, t).output\n\nadd_test(NewModuleTest(\n constructor=lambda: _AdaptiveLogSoftmaxWithLoss(16, 10, [2, 6]),\n input_size=(4, 16),\n fullname='AdaptiveLogSoftmax',\n with_tf32=True,\n tf32_precision=0.005))\n\n\n# The following are helpers for TestNN.test_affine_*\nif torch.cuda.is_available():\n def device_():\n return ['cpu', 'cuda']\nelse:\n def device_():\n return ['cpu']\n\n\ndef angle_rad_():\n return [r * math.pi * 2 for r in [0.0, 0.5, 0.25, 0.125, random.random()]]\n\n\ndef axis_vector_():\n t = (random.random(), random.random(), random.random())\n l = sum(x ** 2 for x in t) ** 0.5\n\n return [(1.0, 0.0, 0.0), (0.0, 1.0, 0.0), (0.0, 0.0, 1.0), tuple(x / l for x in t)]\n\n\ndef input_size2d_():\n return [[1, 1, 3, 5], [1, 1, 3, 3], [1, 1, 4, 4], [1, 1, 3, 4]]\n\n\ndef output_size2d_():\n return [[1, 1, 5, 3], [1, 1, 3, 5], [1, 1, 4, 3], [1, 1, 5, 5], [1, 1, 6, 6]]\n\n\ndef input_size2dsq_():\n return [[1, 1, 2, 2], [1, 1, 3, 3], [1, 1, 4, 4], [1, 1, 6, 6]]\n\n\ndef output_size2dsq_():\n return [[1, 1, 2, 2], [1, 1, 3, 3], [1, 1, 4, 4], [1, 1, 5, 5], [1, 1, 6, 6]]\n\n\ndef input_size3d_():\n return [[1, 1, 2, 2, 2], [1, 1, 2, 3, 4], [1, 1, 3, 3, 3], [1, 1, 4, 4, 4], [1, 1, 3, 4, 5]]\n\n\ndef input_size3dsq_():\n return [[1, 1, 2, 2, 2], [1, 1, 3, 3, 3], [1, 1, 4, 4, 4], [1, 1, 6, 6, 6]]\n\n\ndef output_size3dsq_():\n return [[1, 1, 2, 2, 2], [1, 1, 3, 3, 3], [1, 1, 4, 4, 4], [1, 1, 5, 5, 5], [1, 1, 6, 6, 6]]\n\n\ndef output_size3d_():\n return [[1, 1, 2, 2, 2], [1, 1, 3, 3, 3], [1, 1, 3, 4, 5], [1, 1, 4, 3, 2], [1, 1, 5, 5, 5], [1, 1, 6, 6, 6]]\n\n\ndef _buildEquivalentAffineTransforms2d(device, input_size, output_size, angle_rad):\n input_center = [(x - 1) / 2.0 for x in input_size]\n output_center = [(x - 1) / 2.0 for x in output_size]\n\n s = math.sin(angle_rad)\n c = math.cos(angle_rad)\n\n intrans_ary = np.array([\n [1, 0, input_center[2]],\n [0, 1, input_center[3]],\n [0, 0, 1],\n ], dtype=np.float64)\n\n inscale_ary = np.array([\n [input_center[2], 0, 0],\n [0, input_center[3], 0],\n [0, 0, 1],\n ], dtype=np.float64)\n\n rotation_ary = np.array([\n [c, -s, 0],\n [s, c, 0],\n [0, 0, 1],\n ], dtype=np.float64)\n\n outscale_ary = np.array([\n [1.0 / output_center[2], 0, 0],\n [0, 1.0 / output_center[3], 0],\n [0, 0, 1],\n ], dtype=np.float64)\n\n outtrans_ary = np.array([\n [1, 0, -output_center[2]],\n [0, 1, -output_center[3]],\n [0, 0, 1],\n ], dtype=np.float64)\n\n reorder_ary = np.array([\n [0, 1, 0],\n [1, 0, 0],\n [0, 0, 1],\n ], dtype=np.float64)\n\n transform_ary = np.dot(np.dot(np.dot(np.dot(\n intrans_ary,\n inscale_ary),\n rotation_ary.T),\n outscale_ary),\n outtrans_ary)\n grid_ary = np.dot(np.dot(np.dot(reorder_ary, rotation_ary.T), outscale_ary), outtrans_ary)\n\n transform_tensor = torch.from_numpy((rotation_ary)).to(device, torch.float32)\n transform_tensor = transform_tensor[:2].unsqueeze(0)\n\n return transform_tensor, transform_ary, grid_ary\n\n\ndef _buildEquivalentAffineTransforms3d(device, input_size, output_size, angle_rad, axis_vector):\n input_center = [(x - 1) / 2.0 for x in input_size]\n output_center = [(x - 1) / 2.0 for x in output_size]\n\n s = math.sin(angle_rad)\n c = math.cos(angle_rad)\n c1 = 1 - c\n\n intrans_ary = np.array([\n [1, 0, 0, input_center[2]],\n [0, 1, 0, input_center[3]],\n [0, 0, 1, input_center[4]],\n [0, 0, 0, 1],\n ], dtype=np.float64)\n\n inscale_ary = np.array([\n [input_center[2], 0, 0, 0],\n [0, input_center[3], 0, 0],\n [0, 0, input_center[4], 0],\n [0, 0, 0, 1],\n ], dtype=np.float64)\n\n l, m, n = axis_vector\n scipyRotation_ary = np.array([\n [l * l * c1 + c, m * l * c1 - n * s, n * l * c1 + m * s, 0],\n [l * m * c1 + n * s, m * m * c1 + c, n * m * c1 - l * s, 0],\n [l * n * c1 - m * s, m * n * c1 + l * s, n * n * c1 + c, 0],\n [0, 0, 0, 1],\n ], dtype=np.float64)\n\n z, y, x = axis_vector\n torchRotation_ary = np.array([\n [x * x * c1 + c, y * x * c1 - z * s, z * x * c1 + y * s, 0],\n [x * y * c1 + z * s, y * y * c1 + c, z * y * c1 - x * s, 0],\n [x * z * c1 - y * s, y * z * c1 + x * s, z * z * c1 + c, 0],\n [0, 0, 0, 1],\n ], dtype=np.float64)\n\n outscale_ary = np.array([\n [1.0 / output_center[2], 0, 0, 0],\n [0, 1.0 / output_center[3], 0, 0],\n [0, 0, 1.0 / output_center[4], 0],\n [0, 0, 0, 1],\n ], dtype=np.float64)\n\n outtrans_ary = np.array([\n [1, 0, 0, -output_center[2]],\n [0, 1, 0, -output_center[3]],\n [0, 0, 1, -output_center[4]],\n [0, 0, 0, 1],\n ], dtype=np.float64)\n\n reorder_ary = np.array([\n [0, 0, 1, 0],\n [0, 1, 0, 0],\n [1, 0, 0, 0],\n [0, 0, 0, 1],\n ], dtype=np.float64)\n\n transform_ary = np.dot(np.dot(np.dot(np.dot(\n intrans_ary,\n inscale_ary),\n np.linalg.inv(scipyRotation_ary)),\n outscale_ary),\n outtrans_ary)\n grid_ary = np.dot(np.dot(np.dot(reorder_ary, np.linalg.inv(scipyRotation_ary)), outscale_ary), outtrans_ary)\n\n transform_tensor = torch.from_numpy((torchRotation_ary)).to(device, torch.float32)\n transform_tensor = transform_tensor[:3].unsqueeze(0)\n\n return transform_tensor, transform_ary, grid_ary\n# end TestNN.test_affine_* helpers\n\n\nclass TestNNDeviceType(NNTestCase):\n def run_conv_double_back_test(self, kern, stride, padding, chan_in, chan_out, batch_size,\n inp_size, dilation, no_weight, groups=1, use_cuda=False,\n use_bias=True, dtype=torch.double):\n if use_cuda:\n device = torch.device(\"cuda\")\n else:\n device = torch.device(\"cpu\")\n\n x = torch.randn(batch_size, chan_in, inp_size, inp_size, device=device,\n dtype=dtype, requires_grad=True)\n weight = torch.randn(chan_out, chan_in // groups, kern, kern, device=device,\n dtype=dtype, requires_grad=not no_weight)\n if use_bias:\n bias = torch.randn(chan_out, device=device, dtype=dtype, requires_grad=True)\n else:\n bias = None\n\n def func(*inputs):\n if use_bias:\n lx, lweight, lbias = inputs\n else:\n lx, lweight = inputs\n lbias = None\n # We disable cudnn during forward to avoid finite difference imprecision issues\n with cudnn.flags(enabled=False):\n out = F.conv2d(lx, lweight, lbias, stride, padding, dilation, groups)\n return out\n\n if use_bias:\n inputs = x, weight, bias\n else:\n inputs = x, weight\n\n dummy_out = func(*inputs)\n grad_y = torch.randn_like(dummy_out, device=device, dtype=dtype, requires_grad=True)\n\n # Issue #15353: test mkldnn double backward, don't run gradgradcheck due\n # to imprecision issues\n if dtype == torch.float:\n g, = torch.autograd.grad(dummy_out.sum(), x, create_graph=True)\n return g.requires_grad\n\n return gradgradcheck(func, inputs, (grad_y,))\n\n def _test_dropout(self, cls, device, input, memory_format=torch.contiguous_format):\n p = 0.2\n input = input.to(device).fill_(1 - p)\n\n module = cls(p)\n input_var = input.clone(memory_format=memory_format).requires_grad_()\n output = module(input_var)\n self.assertTrue(output.is_contiguous(memory_format=memory_format))\n self.assertLess(abs(output.data.mean() - (1 - p)), 0.05)\n output.backward(input)\n self.assertTrue(input_var.grad.is_contiguous(memory_format=memory_format))\n self.assertLess(abs(input_var.grad.data.mean() - (1 - p)), 0.05)\n\n module = cls(p, True)\n input_var = input.clone(memory_format=memory_format).requires_grad_()\n output = module(input_var + 0)\n self.assertTrue(output.is_contiguous(memory_format=memory_format))\n self.assertLess(abs(output.data.mean() - (1 - p)), 0.05)\n output.backward(input)\n self.assertTrue(input_var.grad.is_contiguous(memory_format=memory_format))\n self.assertLess(abs(input_var.grad.data.mean() - (1 - p)), 0.05)\n\n # check eval mode doesn't change anything\n for inplace in [True, False]:\n module = cls(p, inplace).eval()\n self.assertEqual(input, module(input))\n\n # Check that these don't raise errors\n module.__repr__()\n str(module)\n\n def _test_dropout_discontiguous(self, cls, device, memory_format=torch.contiguous_format):\n # In this test, we verify that dropout preserves the layout and data for different memory formats.\n # We check whether, we get same values for the output of dropout, when the probability\n # of dropout is 0 or very close to 0.\n # Reference: https://github.com/pytorch/pytorch/issues/47176\n close_to_zero_p = 1e-10 # Should be almost zero but not zero, as for p=0 different path is taken\n for p in [0, close_to_zero_p]:\n inp = torch.ones(2, 3, 3, 3, device=device)\n inp_discontiguous = torch.empty(2, 3, 3, 6, device=device, memory_format=memory_format)[..., ::2]\n inp_discontiguous.copy_(inp)\n mod = cls(p=p)\n out = mod(inp_discontiguous)\n if p != 0: # Zero will keep strides as is based on input.\n # When prob == 0, input stride (54, 18, 6, 2) -> output stride (54, 18, 6, 2)\n # When prob != 0, input stride (54, 18, 6, 2) -> output stride (27, 9, 3, 1)\n self.assertTrue(out.is_contiguous(memory_format=memory_format))\n self.assertEqual(inp_discontiguous, out)\n\n def _test_dropout_stride_mean_preserve(self, cls, device):\n def invert_perm(p):\n d = {x: i for i, x in enumerate(p)}\n return (d[0], d[1], d[2], d[3])\n\n inp = torch.ones(2, 3, 4, 5, device=device)\n shifts = [(0, 0), (1, 0), (0, 1), (1, 1)]\n for perm in itertools.permutations((0, 1, 2, 3), r=4):\n for shift in shifts:\n for p in [1e-10, 0.3, 0.5, 0.7]:\n mod = cls(p=p)\n permuted_inp = inp.permute(perm).contiguous().permute(invert_perm(perm))\n permuted_inp = permuted_inp[shift[0]:, shift[1]:, :, :]\n out = mod(permuted_inp)\n\n self.assertTrue(out.permute(perm).is_contiguous())\n self.assertEqual(inp.mean(), out.mean(), rtol=0.5, atol=0.5)\n if p == 1e-10:\n self.assertEqual(permuted_inp, out)\n else:\n self.assertNotEqual(permuted_inp, out)\n\n def _test_InstanceNorm_general(self, cls, input, device, dtype=torch.float):\n # default case track_running_stats=False\n b, c = input.size(0), input.size(1)\n input_var = input.to(device=device, dtype=dtype).requires_grad_()\n\n IN = cls(c, eps=0).to(device, dtype)\n\n output = IN(input_var)\n out_reshaped = output.view(b * c, -1)\n\n mean = out_reshaped.mean(1)\n var = out_reshaped.var(1, unbiased=False)\n\n self.assertEqual(torch.abs(mean.data).mean(), 0, atol=1e-5, rtol=0)\n self.assertEqual(torch.abs(var.data).mean(), 1, atol=1e-5, rtol=0)\n\n # check that eval mode doesn't change behavior\n grad_out = torch.randn_like(output)\n res1 = output.data.clone()\n output.backward(grad_out)\n grad1 = input_var.grad.data.clone()\n\n IN.eval()\n output = IN(input_var)\n input_var.grad = None\n output.backward(grad_out)\n res2 = output.data\n grad2 = input_var.grad.data\n self.assertEqual(res1, res2)\n self.assertEqual(grad1, grad2)\n\n # If track_running_stats=True and momentum=1, running_mean/var should be\n # equal to mean/var of the input (with unbias correction)\n IN = cls(c, momentum=1, eps=0, track_running_stats=True).to(device, dtype)\n\n output = IN(input_var)\n\n input_reshaped = input_var.transpose(1, 0).reshape(c, -1)\n mean = input_reshaped.mean(1)\n\n input_reshaped = input_var.transpose(1, 0).reshape(c, b, -1)\n var = input_reshaped.var(2, unbiased=True)[:, :]\n\n self.assertEqual(torch.abs(mean.data - IN.running_mean).mean(), 0, atol=1e-5, rtol=0)\n self.assertEqual(torch.abs(var.data.mean(1) - IN.running_var).mean(), 0, atol=1e-5, rtol=0)\n\n # in eval mode, adding X * std to a channel in input should make the\n # corresponding channel in output have mean X\n IN.eval()\n delta = IN.running_var.sqrt() * torch.arange(c, device=device, dtype=dtype)\n delta = delta.view(-1, *[1 for _ in range(2, input.dim())])\n output = IN(input_var + delta)\n self.assertEqual(output.transpose(0, 1).reshape(c, -1).mean(1), torch.arange(c, dtype=dtype))\n\n def _test_InstanceNorm_cuda_half(self, cls, input, device):\n # THNN\n input = input.to(device=device, dtype=torch.half).random_(1, 10).requires_grad_(True)\n m = cls(input.size(1), affine=True, track_running_stats=True).to(device, torch.half)\n thnn_output = m(input)\n thnn_output.sum().backward()\n thnn_input_grad = input.grad.data.clone()\n self.assertEqualTypeString(thnn_output, input)\n # cuDNN\n if TEST_CUDNN:\n input.grad = None\n m = m.float()\n cudnn_output = m(input)\n cudnn_output.sum().backward()\n cudnn_input_grad = input.grad.data.clone()\n self.assertEqualTypeString(cudnn_output, input)\n self.assertEqual(cudnn_output, thnn_output, atol=1e-4, rtol=0)\n self.assertEqual(cudnn_input_grad, thnn_input_grad, atol=1e-3, rtol=0)\n\n def _test_LayerNorm_general(self, device, dtype=torch.float):\n for i in range(2, 6):\n shape = torch.randint(3, 6, (i,), dtype=torch.long).tolist()\n x = torch.empty(*shape, device=device, dtype=dtype).uniform_(0, 10)\n normalized_ndim = random.randint(1, i - 1) # inclusive\n normalized_shape = shape[-normalized_ndim:]\n unnormalized_shape = shape[:-normalized_ndim]\n\n # test that LN normalizes to mean 0 and stddev 1\n ln = nn.LayerNorm(normalized_shape, eps=0).to(device, dtype)\n ln.weight.data.fill_(1)\n ln.bias.data.fill_(0)\n output = ln(x)\n out_reshaped = output.view(*(unnormalized_shape + [-1]))\n mean = out_reshaped.mean(-1)\n var = out_reshaped.var(-1, unbiased=False)\n\n delta = 1e-1 if dtype == torch.bfloat16 else 1e-5\n self.assertEqual(torch.abs(mean.data).mean(), 0, atol=delta, rtol=0)\n self.assertEqual(torch.abs(var.data).mean(), 1, atol=delta, rtol=0)\n\n # test that LN applies weight and bias correctly\n scale, bias = torch.empty(2).uniform_(0.2, 2).tolist()\n ln.weight.data.fill_(scale)\n ln.bias.data.fill_(bias)\n output = ln(x)\n out_reshaped = output.view(*(unnormalized_shape + [-1]))\n mean = out_reshaped.mean(-1)\n var = out_reshaped.var(-1, unbiased=False)\n self.assertEqual(torch.abs(mean.data).mean(), bias, atol=delta, rtol=0)\n self.assertEqual(torch.abs(var.data).mean(), scale ** 2, atol=delta, rtol=0)\n\n bad_norm_shape_input_shape = {\n (): (),\n (2, 3): (3,),\n (2,): (1, 2, 3),\n (10,): (2, 3),\n 10: (2, 3),\n }\n for norm_shape, input_shape in bad_norm_shape_input_shape.items():\n ln = nn.LayerNorm(norm_shape)\n input = torch.empty(input_shape, device=device, dtype=dtype).uniform_(0, 10)\n self.assertRaises(RuntimeError, lambda: ln(input))\n\n def _test_LayerNorm_cuda_half(self, device):\n input = torch.empty(2, 3, 3, 2, device=device, dtype=torch.half).random_(1, 10).requires_grad_(True)\n m = nn.LayerNorm([3, 2]).to(device, torch.half)\n output = m(input)\n output.sum().backward()\n self.assertEqualTypeString(output, input)\n\n def _test_GroupNorm_general(self, device, dtype=torch.float):\n good_shape_g = {\n (1, 2, 3, 4): 2,\n (2, 3, 10): 3,\n (3, 1, 1, 1, 2): 1,\n (2, 6, 4, 2, 2): 3,\n (1, 256, 1, 1): 32,\n }\n for shape_g, grad in product(good_shape_g.items(), [True, False]):\n shape, g = shape_g\n x = torch.empty(*shape, device=device, dtype=dtype).uniform_(0, 10)\n x.requires_grad_(grad)\n b = shape[0]\n c = shape[1]\n\n # test that GN normalizes to mean 0 and stddev 1\n gn = nn.GroupNorm(g, c, eps=0).to(device, dtype)\n gn.weight.data.fill_(1)\n gn.bias.data.fill_(0)\n output = gn(x)\n out_reshaped = output.view(b, g, -1)\n mean = out_reshaped.mean(-1)\n var = out_reshaped.var(-1, unbiased=False)\n # TODO: fix numerical issue. See #44863\n self.assertEqual(torch.abs(mean).mean(), 0, atol=1e-3, rtol=1e-3)\n self.assertEqual(torch.abs(var).mean(), 1, atol=1e-3, rtol=1e-3)\n\n output.backward(torch.randn_like(output))\n if output.is_cuda:\n torch.cuda.synchronize()\n\n # test that GN applies weight and bias correctly\n scale = torch.empty(c, device=device, dtype=dtype).uniform_(0.2, 2)\n bias = torch.empty(c, device=device, dtype=dtype).uniform_(0.2, 2)\n gn.weight.data.copy_(scale)\n gn.bias.data.copy_(bias)\n output = gn(x)\n out_reshaped = output.view(b, c, -1)\n out_normed = (out_reshaped - bias.view(c, 1)) / scale.view(c, 1)\n out_normed_reshaped = out_normed.view(b, g, -1)\n mean = out_normed_reshaped.mean(-1)\n var = out_normed_reshaped.var(-1, unbiased=False)\n # TODO: fix numerical issue. See #44863\n self.assertEqual(torch.abs(mean).mean(), 0, atol=1e-3, rtol=1e-3)\n self.assertEqual(torch.abs(var).mean(), 1, atol=1e-3, rtol=1e-3)\n\n bad_shape_g = {\n (1, 2, 3, 4): 3,\n (2, 3, 10): 2,\n (3, 1, 1, 1, 2): 10,\n (2, 6, 4, 2, 2): 4,\n }\n for shape, g in bad_shape_g.items():\n with self.assertRaises(ValueError):\n gn = nn.GroupNorm(g, shape[1])\n\n def _test_GroupNorm_cuda_half(self):\n input = torch.zeros(2, 4, 3, 2, requires_grad=True).cuda().half().random_(1, 10)\n m = nn.GroupNorm(2, 4).to(\"cuda\", torch.half)\n output = m(input)\n output.sum().backward()\n self.assertEqualTypeString(output, input)\n\n def _test_module_empty_input(self, module, inp, check_size=True, inference=False):\n if not inference:\n inp.requires_grad_(True)\n out = module(inp)\n if not inference:\n gO = torch.rand_like(out)\n out.backward(gO)\n if check_size:\n self.assertEqual(out.size(), inp.size())\n if not inference:\n for p in module.parameters():\n if p.requires_grad:\n self.assertEqual(p.grad, torch.zeros_like(p.grad))\n self.assertEqual(inp.grad, torch.zeros_like(inp))\n\n def _test_module_empty_inputs(self, module, inputs):\n for _inp in inputs:\n _inp.requires_grad_(True)\n out = module(*inputs)\n gO = torch.rand_like(out)\n out.backward(gO)\n\n for p in module.parameters():\n if p.requires_grad:\n self.assertEqual(p.grad, torch.zeros_like(p.grad))\n\n for _inp in inputs:\n self.assertEqual(_inp.grad, torch.zeros_like(_inp))\n\n @unittest.skipIf((not TEST_NUMPY) or (not TEST_SCIPY) or (scipy.__version__ < '1.0.0'),\n \"Scipy v1.0 and/or numpy not found\")\n @tf32_on_and_off()\n def test_affine_2d_rotate0(self, device):\n # scipy before 1.0.0 do not support homogeneous coordinate\n # scipy.ndimage.affine_transform, so we need to skip.\n input_size = [1, 1, 3, 3]\n input_ary = np.array(np.random.random(input_size), dtype=np.float32)\n output_size = [1, 1, 5, 5]\n angle_rad = 0.\n\n transform_tensor, transform_ary, offset = \\\n _buildEquivalentAffineTransforms2d(device, input_size, output_size, angle_rad)\n\n scipy_ary = torch.from_numpy(scipy.ndimage.affine_transform(\n input_ary[0, 0],\n transform_ary,\n offset=offset,\n output_shape=output_size[2:],\n order=1,\n mode='nearest',\n prefilter=False))\n\n affine_tensor = torch.nn.functional.affine_grid(\n transform_tensor,\n torch.Size(output_size),\n align_corners=True\n )\n\n gridsample_ary = torch.nn.functional.grid_sample(\n torch.tensor(input_ary, device=device).to(device),\n affine_tensor,\n padding_mode='border',\n align_corners=True\n ).to('cpu')\n\n self.assertEqual(scipy_ary.mean(), gridsample_ary.mean())\n self.assertEqual(scipy_ary, gridsample_ary.reshape_as(scipy_ary))\n\n @unittest.skipIf((not TEST_NUMPY) or (not TEST_SCIPY) or (scipy.__version__ < '1.0.0'),\n \"Scipy v1.0 and/or numpy not found\")\n @tf32_on_and_off(0.001)\n def test_affine_2d_rotate90(self, device):\n # scipy before 1.0.0 do not support homogeneous coordinate\n # scipy.ndimage.affine_transform, so we need to skip.\n for input_size2dsq, output_size2dsq in \\\n itertools.product(input_size2dsq_(), output_size2dsq_()):\n input_size = input_size2dsq\n input_ary = np.array(np.random.random(input_size), dtype=np.float32)\n output_size = output_size2dsq\n angle_rad = 0.25 * math.pi * 2\n\n transform_tensor, transform_ary, offset = \\\n _buildEquivalentAffineTransforms2d(device, input_size, output_size, angle_rad)\n\n scipy_ary = torch.from_numpy(scipy.ndimage.affine_transform(\n input_ary[0, 0],\n transform_ary,\n offset=offset,\n output_shape=output_size[2:],\n order=1,\n mode='nearest',\n prefilter=True))\n\n if input_size2dsq == output_size2dsq:\n self.assertEqual(scipy_ary.mean(), input_ary.mean())\n self.assertEqual(scipy_ary[0, 0], input_ary[0, 0, 0, -1])\n self.assertEqual(scipy_ary[0, -1], input_ary[0, 0, -1, -1])\n self.assertEqual(scipy_ary[-1, -1], input_ary[0, 0, -1, 0])\n self.assertEqual(scipy_ary[-1, 0], input_ary[0, 0, 0, 0])\n\n affine_tensor = torch.nn.functional.affine_grid(\n transform_tensor,\n torch.Size(output_size),\n align_corners=True\n )\n\n gridsample_ary = torch.nn.functional.grid_sample(\n torch.tensor(input_ary, device=device).to(device),\n affine_tensor,\n padding_mode='border',\n align_corners=True\n ).to('cpu')\n\n self.assertEqual(scipy_ary.mean(), gridsample_ary.mean())\n self.assertEqual(scipy_ary, gridsample_ary.reshape_as(scipy_ary))\n\n @unittest.skipIf((not TEST_NUMPY) or (not TEST_SCIPY) or (scipy.__version__ < '1.0.0'),\n \"Scipy v1.0 and/or numpy not found\")\n @tf32_on_and_off(0.005)\n def test_affine_2d_rotate45(self, device):\n # scipy before 1.0.0 do not support homogeneous coordinate\n # scipy.ndimage.affine_transform, so we need to skip.\n input_size = [1, 1, 3, 3]\n input_ary = np.array(np.zeros(input_size), dtype=np.float32)\n input_ary[0, 0, 0, :] = 0.5\n input_ary[0, 0, 2, 2] = 1.0\n output_size = [1, 1, 3, 3]\n angle_rad = 0.125 * math.pi * 2\n\n transform_tensor, transform_ary, offset = \\\n _buildEquivalentAffineTransforms2d(device, input_size, output_size, angle_rad)\n\n scipy_ary = torch.from_numpy(scipy.ndimage.affine_transform(\n input_ary[0, 0],\n transform_ary,\n offset=offset,\n output_shape=output_size[2:],\n order=1,\n mode='nearest',\n prefilter=False))\n\n affine_tensor = torch.nn.functional.affine_grid(\n transform_tensor,\n torch.Size(output_size),\n align_corners=True\n )\n\n gridsample_ary = torch.nn.functional.grid_sample(\n torch.tensor(input_ary, device=device).to(device),\n affine_tensor,\n padding_mode='border',\n align_corners=True\n ).to('cpu')\n\n self.assertEqual(scipy_ary, gridsample_ary.reshape_as(scipy_ary))\n\n @unittest.skipIf((not TEST_NUMPY) or (not TEST_SCIPY) or (scipy.__version__ < '1.0.0'),\n \"Scipy v1.0 and/or numpy not found\")\n @tf32_on_and_off(0.005)\n def test_affine_2d_rotateRandom(self, device):\n # scipy before 1.0.0 do not support homogeneous coordinate\n # scipy.ndimage.affine_transform, so we need to skip.\n for angle_rad, input_size2d, output_size2d in \\\n itertools.product(angle_rad_(), input_size2d_(), output_size2d_()):\n\n input_size = input_size2d\n input_ary = np.array(np.random.random(input_size), dtype=np.float32).round(3)\n output_size = output_size2d\n\n input_ary[0, 0, 0, 0] = 2\n input_ary[0, 0, 0, -1] = 4\n input_ary[0, 0, -1, 0] = 6\n input_ary[0, 0, -1, -1] = 8\n\n transform_tensor, transform_ary, grid_ary = \\\n _buildEquivalentAffineTransforms2d(device, input_size, output_size, angle_rad)\n\n scipy_ary = torch.from_numpy(scipy.ndimage.affine_transform(\n input_ary[0, 0],\n transform_ary,\n output_shape=output_size[2:],\n order=1,\n mode='nearest',\n prefilter=False))\n\n affine_tensor = torch.nn.functional.affine_grid(\n transform_tensor,\n torch.Size(output_size),\n align_corners=True\n )\n\n gridsample_ary = torch.nn.functional.grid_sample(\n torch.tensor(input_ary, device=device).to(device),\n affine_tensor,\n padding_mode='border',\n align_corners=True\n ).to('cpu')\n\n affine_tensor = affine_tensor.to('cpu')\n\n for r in range(affine_tensor.size(1)):\n for c in range(affine_tensor.size(2)):\n grid_out = np.dot(grid_ary, [r, c, 1])\n self.assertEqual(affine_tensor[0, r, c], grid_out[:2], exact_dtype=False)\n\n self.assertEqual(scipy_ary, gridsample_ary.reshape_as(scipy_ary))\n\n @unittest.skipIf((not TEST_NUMPY) or (not TEST_SCIPY) or (scipy.__version__ < '1.0.0'),\n \"Scipy v1.0 and/or numpy not found\")\n @tf32_on_and_off(0.005)\n def test_affine_3d_rotateRandom(self, device):\n # scipy before 1.0.0 do not support homogeneous coordinate\n # scipy.ndimage.affine_transform, so we need to skip.\n for angle_rad, axis_vector, input_size3d, output_size3d in \\\n itertools.product(angle_rad_(), axis_vector_(), input_size3d_(), output_size3d_()):\n input_size = input_size3d\n input_ary = np.array(np.random.random(input_size), dtype=np.float32)\n output_size = output_size3d\n\n input_ary[0, 0, 0, 0, 0] = 2\n input_ary[0, 0, 0, 0, -1] = 3\n input_ary[0, 0, 0, -1, 0] = 4\n input_ary[0, 0, 0, -1, -1] = 5\n input_ary[0, 0, -1, 0, 0] = 6\n input_ary[0, 0, -1, 0, -1] = 7\n input_ary[0, 0, -1, -1, 0] = 8\n input_ary[0, 0, -1, -1, -1] = 9\n\n transform_tensor, transform_ary, grid_ary = \\\n _buildEquivalentAffineTransforms3d(device, input_size, output_size, angle_rad, axis_vector)\n\n scipy_ary = torch.from_numpy(scipy.ndimage.affine_transform(\n input_ary[0, 0],\n transform_ary,\n output_shape=output_size[2:],\n order=1,\n mode='nearest',\n prefilter=False))\n\n affine_tensor = torch.nn.functional.affine_grid(\n transform_tensor,\n torch.Size(output_size),\n align_corners=True\n )\n\n gridsample_ary = torch.nn.functional.grid_sample(\n torch.tensor(input_ary, device=device).to(device),\n affine_tensor,\n padding_mode='border',\n align_corners=True\n ).to('cpu')\n\n affine_tensor = affine_tensor.to('cpu')\n\n for i in range(affine_tensor.size(1)):\n for r in range(affine_tensor.size(2)):\n for c in range(affine_tensor.size(3)):\n grid_out = np.dot(grid_ary, [i, r, c, 1])\n self.assertEqual(affine_tensor[0, i, r, c], grid_out[:3], exact_dtype=False)\n\n self.assertEqual(scipy_ary, gridsample_ary.reshape_as(scipy_ary))\n\n\n @onlyCUDA\n @skipCUDAIfNoCudnn\n @dtypes(*floating_and_complex_types_and(torch.half, *[torch.bfloat16] if AMPERE_OR_ROCM else []))\n def test_Conv2d_deterministic_cudnn(self, device, dtype):\n inputs = torch.randn(2, 3, 5, 5, device=device, dtype=dtype, requires_grad=True)\n with cudnn.flags(enabled=True, benchmark=True, deterministic=True):\n conv1 = torch.nn.Conv2d(3, 3, 3).to(device, dtype)\n conv2 = torch.nn.Conv2d(3, 3, 3).to(device, dtype)\n conv2.bias.data.copy_(conv1.bias.data)\n conv2.weight.data.copy_(conv1.weight.data)\n out1 = conv1(inputs)\n out2 = conv2(inputs)\n self.assertEqual(out1, out2, atol=0.0, rtol=0)\n y = torch.randn(out1.size(), device=device, dtype=dtype)\n out1.backward(y)\n out2.backward(y)\n self.assertEqual(conv1.bias.grad.data, conv2.bias.grad.data, atol=0.0, rtol=0)\n self.assertEqual(conv1.weight.grad.data, conv2.weight.grad.data, atol=0.0, rtol=0)\n\n\n @onlyCUDA\n @dtypes(*floating_types_and(torch.half, *[torch.bfloat16] if AMPERE_OR_ROCM else []))\n def test_Conv2d_large_workspace(self, device, dtype):\n # These sizes require huge cuDNN workspaces. Make sure we choose a\n # reasonable algorithm that does not run out of memory\n sizes = [\n (1, 256, 109, 175),\n (1, 256, 80, 128),\n (1, 256, 120, 192),\n ]\n\n def run_test(benchmark):\n with torch.backends.cudnn.flags(benchmark=benchmark):\n conv = torch.nn.Conv2d(256, 256, kernel_size=3, padding=1).to(device, dtype)\n for size in sizes:\n x = torch.randn(size, device=device, dtype=dtype)\n out = conv(x.detach().clone().requires_grad_())\n out.backward(torch.ones_like(out))\n\n run_test(benchmark=False)\n run_test(benchmark=True)\n\n\n @onlyCUDA\n @dtypes(torch.half, torch.float)\n def test_ConvTranspose2d_large_output_padding(self, device, dtype):\n net1 = torch.nn.ConvTranspose2d(128, 64, kernel_size=3, stride=2, padding=1, output_padding=1)\\\n .to(device=device, dtype=dtype)\n net2 = torch.nn.ConvTranspose2d(64, 32, kernel_size=3, stride=2, padding=1, output_padding=1)\\\n .to(device=device, dtype=dtype)\n net3 = torch.nn.ConvTranspose2d(32, 3, kernel_size=3, stride=2, padding=1, output_padding=1)\\\n .to(device=device, dtype=dtype)\n x = torch.rand(1, 128, 6, 6, device=device, dtype=dtype, requires_grad=True)\n x = net1(x)\n x = net2(x)\n x = net3(x)\n x.backward(torch.randn_like(x))\n torch.cuda.synchronize()\n\n\n @onlyCUDA\n @tf32_on_and_off(0.01)\n @dtypes(torch.float, torch.double, torch.half)\n # Very similar to test_Conv2d_naive_groups but with special care to handle\n # the number of groups == number of input channels\n def test_Conv2d_depthwise_naive_groups(self, device, dtype):\n for depth_multiplier in [1, 2]:\n m = nn.Conv2d(2, 2 * depth_multiplier, kernel_size=3, groups=2).to(device, dtype)\n i = torch.randn(2, 2, 6, 6, device=\"cuda\", dtype=dtype).div_(2).requires_grad_()\n output = m(i)\n grad_output = torch.randn(2, 2 * depth_multiplier, 4, 4, device=device, dtype=dtype) / 2\n output.backward(grad_output)\n\n offset = 1 * depth_multiplier\n\n m1 = nn.Conv2d(1, 1 * depth_multiplier, kernel_size=3).to(device, dtype)\n m1.weight.data = m.weight.data[:offset].clone()\n m1.bias.data = m.bias.data[:offset].clone()\n i1 = i.detach()[:, :1].clone().requires_grad_()\n output1 = m1(i1)\n output1.backward(grad_output[:, :offset].contiguous())\n\n m2 = nn.Conv2d(1, 1 * depth_multiplier, kernel_size=3).to(device, dtype)\n m2.weight.data.copy_(m.weight.data[offset:])\n m2.bias.data.copy_(m.bias.data[offset:])\n i2 = i.detach()[:, 1:].clone().requires_grad_()\n output2 = m2(i2)\n output2.backward(grad_output[:, offset:].contiguous())\n\n self.assertEqual(output, torch.cat([output1, output2], 1),\n atol=dtype2prec_DONTUSE[dtype], rtol=0)\n self.assertEqual(i.grad.data,\n torch.cat([i1.grad.data, i2.grad.data], 1),\n atol=dtype2prec_DONTUSE[dtype], rtol=0)\n self.assertEqual(m.bias.grad.data,\n torch.cat([m1.bias.grad.data,\n m2.bias.grad.data], 0),\n atol=dtype2prec_DONTUSE[dtype], rtol=0)\n self.assertEqual(m.weight.grad.data,\n torch.cat([m1.weight.grad.data,\n m2.weight.grad.data], 0),\n atol=dtype2prec_DONTUSE[dtype], rtol=0)\n\n @onlyCUDA\n @dtypes(torch.float, torch.double, torch.half)\n @tf32_on_and_off(0.005)\n def test_Conv3d_depthwise_naive_groups(self, device, dtype):\n for depth_multiplier in [1, 2]:\n m = nn.Conv3d(2, 2 * depth_multiplier, kernel_size=3, groups=2).to(device, dtype)\n i = torch.randn(2, 2, 6, 6, 6, device=\"cuda\", dtype=dtype).div_(2).requires_grad_()\n output = m(i)\n grad_output = torch.randn(2, 2 * depth_multiplier, 4, 4, 4, device=device, dtype=dtype) / 2\n output.backward(grad_output)\n\n offset = 1 * depth_multiplier\n\n m1 = nn.Conv3d(1, 1 * depth_multiplier, kernel_size=3).to(device, dtype)\n m1.weight.data = m.weight.data[:offset].clone()\n m1.bias.data = m.bias.data[:offset].clone()\n i1 = i.detach()[:, :1].clone().requires_grad_()\n output1 = m1(i1)\n output1.backward(grad_output[:, :offset].contiguous())\n\n m2 = nn.Conv3d(1, 1 * depth_multiplier, kernel_size=3).to(device, dtype)\n m2.weight.data.copy_(m.weight.data[offset:])\n m2.bias.data.copy_(m.bias.data[offset:])\n i2 = i.detach()[:, 1:].clone().requires_grad_()\n output2 = m2(i2)\n output2.backward(grad_output[:, offset:].contiguous())\n\n self.assertEqual(output, torch.cat([output1, output2], 1),\n atol=dtype2prec_DONTUSE[dtype], rtol=0)\n self.assertEqual(i.grad.data,\n torch.cat([i1.grad.data, i2.grad.data], 1),\n atol=dtype2prec_DONTUSE[dtype], rtol=0)\n self.assertEqual(m.bias.grad.data,\n torch.cat([m1.bias.grad.data,\n m2.bias.grad.data], 0),\n atol=dtype2prec_DONTUSE[dtype], rtol=0)\n self.assertEqual(m.weight.grad.data,\n torch.cat([m1.weight.grad.data,\n m2.weight.grad.data], 0),\n atol=dtype2prec_DONTUSE[dtype], rtol=0)\n\n\n @onlyCUDA\n @dtypes(*floating_types_and(torch.half, *[torch.bfloat16] if AMPERE_OR_ROCM else []))\n def test_noncontig_conv_grad(self, device, dtype):\n # FIXME: remove after adding non-contiguous grad tests for all modules\n module = nn.Conv2d(3, 5, kernel_size=3, padding=1).to(device, dtype)\n input = torch.randn(2, 3, 10, 10, dtype=dtype, device=device, requires_grad=True)\n output = module(input)\n\n grad = torch.randn(2, 2, 5, 10, 10, dtype=dtype, device=device)[:, 1]\n assert not grad.is_contiguous()\n output.backward(grad, retain_graph=True)\n self.assertIsNotNone(input.grad)\n result = input.grad.data.clone()\n input.grad.data.zero_()\n\n output.backward(grad.contiguous())\n self.assertEqual(result, input.grad.data, atol=dtype2prec_DONTUSE[dtype], rtol=0)\n\n\n @onlyCUDA\n @dtypes(torch.float, torch.half)\n def test_batchnorm_large_batch(self, device, dtype):\n bn = nn.BatchNorm2d(1).to(device, dtype)\n data = torch.rand(880801, 1, 1, 1, device=device, dtype=dtype)\n out = bn(data).sum().backward()\n\n\n @onlyCUDA\n @dtypes(torch.double)\n def test_conv_double_backward(self, device, dtype):\n with torch.backends.cudnn.flags(deterministic=True):\n # Double backward only runs with DoubleTensor due to precision reason\n batch_size = 1\n for kern, inp_size, dilations in [(3, 5, [1, 2]), (4, 9, [1])]:\n for stride, padding, chan_in, chan_out, dilation in product([1], [2], [2], [3], dilations):\n no_weight = stride == 2\n result = self.run_conv_double_back_test(kern, stride,\n padding, chan_in, chan_out,\n batch_size, inp_size, dilation,\n no_weight, use_cuda=True, dtype=dtype)\n self.assertTrue(result,\n \"Conv double backward test failed with parameters:\" +\n \"\\nkern: \" + str(kern) +\n \"\\nstride: \" + str(stride) +\n \"\\npadding: \" + str(padding) +\n \"\\nchan_in: \" + str(chan_in) +\n \"\\nchan_out: \" + str(chan_out) +\n \"\\nbatch_size: \" + str(batch_size) +\n \"\\ninp_size: \" + str(inp_size) +\n \"\\ndilation: \" + str(dilation))\n\n\n def test_conv_double_backward_no_bias(self):\n kern = 3\n stride = 2\n chan_in, chan_out = 2, 4\n batch_size = 2\n inp_size = 5\n padding = 1\n dilation = 1\n no_weight = False\n use_bias = True\n result = self.run_conv_double_back_test(kern, stride,\n padding, chan_in, chan_out,\n batch_size, inp_size, dilation,\n no_weight, use_bias=use_bias)\n self.assertTrue(result,\n \"Conv double backward test failed with parameters:\" +\n \"\\nkern: \" + str(kern) +\n \"\\nstride: \" + str(stride) +\n \"\\npadding: \" + str(padding) +\n \"\\nchan_in: \" + str(chan_in) +\n \"\\nchan_out: \" + str(chan_out) +\n \"\\nbatch_size: \" + str(batch_size) +\n \"\\ninp_size: \" + str(inp_size) +\n \"\\ndilation: \" + str(dilation))\n\n\n def test_conv_double_backward_groups(self):\n kern = 3\n stride = 1\n padding = 2\n chan_in, chan_out = 2, 4\n batch_size = 2\n inp_size = 6\n dilation = 1\n no_weight = False\n groups = 2\n result = self.run_conv_double_back_test(kern, stride,\n padding, chan_in * groups, chan_out * groups,\n batch_size, inp_size, dilation,\n no_weight, groups=groups)\n self.assertTrue(result,\n \"Conv double backward test failed with parameters:\" +\n \"\\nkern: \" + str(kern) +\n \"\\nstride: \" + str(stride) +\n \"\\npadding: \" + str(padding) +\n \"\\nchan_in: \" + str(chan_in) +\n \"\\nchan_out: \" + str(chan_out) +\n \"\\nbatch_size: \" + str(batch_size) +\n \"\\ninp_size: \" + str(inp_size) +\n \"\\ndilation: \" + str(dilation) +\n \"\\ngroups: \" + str(groups))\n\n\n def test_conv_double_backward_stride(self):\n batch_size = 2\n\n # Cannot provide ggW when stride is > 1\n for kern, inp_size, dilations in [(3, 5, [1, 2]), (3, 7, [1])]:\n for stride, padding, chan_in, chan_out, dilation in product([2], [0, 1], [1], [2], dilations):\n no_weight = False\n self.run_conv_double_back_test(kern, stride,\n padding, chan_in, chan_out,\n batch_size, inp_size, dilation,\n no_weight)\n\n @dtypes(torch.float, torch.cfloat)\n def test_conv1d_same_padding(self, device, dtype):\n # Test padding='same' outputs the correct shape\n test_args = [\n # in_size\n range(50, 55),\n # kernel_size\n [1, 2, 3, 8],\n # dilation\n range(1, 4),\n # stride\n [1],\n ]\n for in_size, k_size, dilation, stride in itertools.product(*test_args):\n x = torch.rand(1, 1, in_size, device=device, dtype=dtype)\n y = torch.rand(1, 1, k_size, device=device, dtype=dtype)\n z = F.conv1d(x, y, padding='same', dilation=dilation, stride=stride)\n self.assertEqual(z.size(2), int(math.ceil(in_size / stride)))\n\n # Compare F.conv1d padding='same' output against manual padding\n # Without strides/dilation\n x = torch.rand(1, 1, 12, device=device, dtype=dtype)\n y = torch.rand(1, 1, 3, device=device, dtype=dtype)\n expect = F.conv1d(x, y, padding=1)\n actual = F.conv1d(x, y, padding='same')\n self.assertEqual(expect, actual)\n\n # With dilation\n x = torch.rand(1, 1, 12, device=device, dtype=dtype)\n y = torch.rand(1, 1, 4, device=device, dtype=dtype)\n expect = F.conv1d(x, y, padding=3, dilation=2)\n actual = F.conv1d(x, y, padding='same', dilation=2)\n self.assertEqual(expect, actual)\n\n # Dilation with asymmetric padding\n expect = F.conv1d(x, y, padding=5, dilation=3)[..., 1:]\n actual = F.conv1d(x, y, padding='same', dilation=3)\n self.assertEqual(expect, actual)\n\n @dtypes(torch.float, torch.cfloat)\n def test_conv2d_same_padding(self, device, dtype):\n if dtype is torch.cfloat:\n rtol, atol = 2e-6, 2e-6\n else:\n rtol, atol = None, None\n # Compare F.conv2d padding='same' output against manual padding\n # Without strides/dilation\n x = torch.rand(1, 1, 10, 11, device=device, dtype=dtype)\n y = torch.rand(1, 1, 4, 5, device=device, dtype=dtype)\n expect = F.conv2d(x, y, padding=(2, 2))[..., 1:, :]\n actual = F.conv2d(x, y, padding='same')\n self.assertEqual(expect, actual, rtol=rtol, atol=atol)\n\n # With dilation\n y = torch.rand(1, 1, 3, 4, device=device, dtype=dtype)\n expect = F.conv2d(x, y, padding=(2, 3), dilation=2)\n actual = F.conv2d(x, y, padding='same', dilation=2)\n self.assertEqual(expect, actual, rtol=rtol, atol=atol)\n\n # Dilation with asymmetric padding\n y = torch.rand(1, 1, 4, 4, device=device, dtype=dtype)\n expect = F.conv2d(x, y, padding=5, dilation=3)[..., 1:, 1:]\n actual = F.conv2d(x, y, padding='same', dilation=3)\n self.assertEqual(expect, actual, rtol=rtol, atol=atol)\n\n @dtypes(torch.float, torch.cfloat)\n def test_conv3d_same_padding(self, device, dtype):\n if dtype is torch.cfloat:\n rtol, atol = 2e-6, 2e-6\n else:\n rtol, atol = None, None\n # Compare F.conv3d padding='same' output against manual padding\n # Without strides/dilation\n x = torch.rand(1, 1, 10, 11, 12, device=device, dtype=dtype)\n y = torch.rand(1, 1, 1, 2, 5, device=device, dtype=dtype)\n expect = F.conv3d(x, y, padding=(0, 1, 2))[..., :, 1:, :]\n actual = F.conv3d(x, y, padding='same')\n self.assertEqual(expect, actual, rtol=rtol, atol=atol)\n\n # With dilation\n expect = F.conv3d(x, y, padding=(0, 1, 4), dilation=2)\n actual = F.conv3d(x, y, padding='same', dilation=2)\n self.assertEqual(expect, actual, rtol=rtol, atol=atol)\n\n # Dilation with asymmetric padding\n y = torch.rand(1, 1, 4, 4, 4, device=device, dtype=dtype)\n expect = F.conv3d(x, y, padding=5, dilation=3)[..., 1:, 1:, 1:]\n actual = F.conv3d(x, y, padding='same', dilation=3)\n self.assertEqual(expect, actual, rtol=rtol, atol=atol)\n\n @dtypes(torch.float, torch.cfloat)\n def test_conv1d_valid_padding(self, device, dtype):\n # Test F.conv1d padding='valid' is the same as no padding\n x = torch.rand(1, 1, 10, device=device, dtype=dtype)\n y = torch.rand(1, 1, 4, device=device, dtype=dtype)\n expect = F.conv1d(x, y)\n actual = F.conv1d(x, y, padding='valid')\n self.assertEqual(expect, actual)\n\n @dtypes(torch.float, torch.cfloat)\n def test_conv2d_valid_padding(self, device, dtype):\n # Test F.conv2d padding='valid' is the same as no padding\n x = torch.rand(1, 1, 1, 10, device=device, dtype=dtype)\n y = torch.rand(1, 1, 1, 4, device=device, dtype=dtype)\n expect = F.conv2d(x, y)\n actual = F.conv2d(x, y, padding='valid')\n self.assertEqual(expect, actual)\n\n @dtypes(torch.float, torch.cfloat)\n def test_conv3d_valid_padding(self, device, dtype):\n # Test F.conv3d padding='valid' is the same as no padding\n x = torch.rand(1, 1, 1, 1, 10, dtype=dtype, device=device)\n y = torch.rand(1, 1, 1, 1, 4, dtype=dtype, device=device)\n expect = F.conv3d(x, y)\n actual = F.conv3d(x, y, padding='valid')\n self.assertEqual(expect, actual)\n\n @dtypes(torch.float, torch.cfloat)\n def test_conv1d_same_padding_backward(self, device, dtype):\n # Test F.conv1d gradients work with padding='same'\n x = torch.rand(1, 1, 12, dtype=dtype, device=device, requires_grad=True)\n y = torch.rand(1, 1, 4, dtype=dtype, device=device, requires_grad=True)\n\n # Symmetric padding\n z = F.conv1d(x, y, padding=3, dilation=2)\n z.sum().backward()\n gx_expect, gy_expect = x.grad, y.grad\n x.grad, y.grad = None, None\n\n z = F.conv1d(x, y, padding='same', dilation=2)\n z.sum().backward()\n self.assertEqual(gx_expect, x.grad)\n self.assertEqual(gy_expect, y.grad)\n x.grad, y.grad = None, None\n\n # Asymmetric padding\n z = F.conv1d(x, y, padding=2)[..., 1:]\n z.sum().backward()\n gx_expect, gy_expect = x.grad, y.grad\n x.grad, y.grad = None, None\n\n z = F.conv1d(x, y, padding='same')\n z.sum().backward()\n self.assertEqual(gx_expect, x.grad)\n self.assertEqual(gy_expect, y.grad)\n\n @dtypes(torch.float, torch.cfloat)\n def test_conv2d_same_padding_backward(self, device, dtype):\n # Test F.conv2d gradients work with padding='same'\n x = torch.rand(1, 1, 10, 11, device=device, dtype=dtype, requires_grad=True)\n y = torch.rand(1, 1, 4, 5, device=device, dtype=dtype, requires_grad=True)\n\n # Symmetric padding\n z = F.conv2d(x, y, padding=(3, 4), dilation=2)\n z.sum().backward()\n gx_expect, gy_expect = x.grad, y.grad\n x.grad, y.grad = None, None\n\n z = F.conv2d(x, y, padding='same', dilation=2)\n z.sum().backward()\n self.assertEqual(gx_expect, x.grad)\n self.assertEqual(gy_expect, y.grad)\n x.grad, y.grad = None, None\n\n # Asymmetric padding\n y = torch.rand(1, 1, 4, 4, device=device, dtype=dtype, requires_grad=True)\n z = F.conv2d(x, y, padding=2)[..., 1:, 1:]\n z.sum().backward()\n gx_expect, gy_expect = x.grad, y.grad\n x.grad, y.grad = None, None\n\n z = F.conv2d(x, y, padding='same')\n z.sum().backward()\n self.assertEqual(gx_expect, x.grad)\n self.assertEqual(gy_expect, y.grad)\n\n @dtypes(torch.double, torch.cdouble)\n def test_conv3d_same_padding_backward(self, device, dtype):\n check_forward_ad = torch.device(device).type != 'xla'\n\n # Test F.conv3d gradients work with padding='same'\n x = torch.rand(1, 1, 1, 11, 12, dtype=dtype, device=device, requires_grad=True)\n y = torch.rand(1, 1, 1, 2, 5, dtype=dtype, device=device, requires_grad=True)\n\n # Symmetric padding\n z = F.conv3d(x, y, padding=(0, 1, 4), dilation=2)\n z.sum().backward()\n gx_expect, gy_expect = x.grad, y.grad\n x.grad, y.grad = None, None\n\n z = F.conv3d(x, y, padding='same', dilation=2)\n z.sum().backward()\n self.assertEqual(gx_expect, x.grad)\n self.assertEqual(gy_expect, y.grad)\n x.grad, y.grad = None, None\n\n gradcheck(lambda x, y: F.conv3d(x, y, padding='same', dilation=2), (x, y),\n check_forward_ad=check_forward_ad, nondet_tol=1e-5)\n if torch.device(device).type != 'cuda':\n # https://github.com/pytorch/pytorch/issues/70702\n gradgradcheck(lambda x, y: F.conv3d(x, y, padding='same', dilation=2), (x, y),\n check_fwd_over_rev=True)\n\n # Asymmetric padding\n y = torch.rand(1, 1, 1, 4, 4, dtype=dtype, device=device, requires_grad=True)\n z = F.conv3d(x, y, padding=2)[..., 1:, 1:]\n z.sum().backward()\n gx_expect, gy_expect = x.grad, y.grad\n x.grad, y.grad = None, None\n\n z = F.conv3d(x, y, padding='same')\n z.sum().backward()\n self.assertEqual(gx_expect, x.grad)\n self.assertEqual(gy_expect, y.grad)\n\n gradcheck(lambda x, y: F.conv3d(x, y, padding='same'), (x, y),\n check_forward_ad=check_forward_ad, nondet_tol=1e-5)\n if torch.device(device).type != 'cuda':\n # https://github.com/pytorch/pytorch/issues/70702\n gradgradcheck(lambda x, y: F.conv3d(x, y, padding='same'), (x, y),\n check_fwd_over_rev=True)\n\n @dtypes(torch.float, torch.cfloat)\n def test_conv1d_valid_padding_backward(self, device, dtype):\n # Test F.conv1d gradients work with padding='valid'\n x = torch.rand(1, 1, 10, dtype=dtype, device=device, requires_grad=True)\n y = torch.rand(1, 1, 4, dtype=dtype, device=device, requires_grad=True)\n F.conv1d(x, y, padding=0).sum().backward()\n gx_expect, gy_expect = x.grad, y.grad\n x.grad, y.grad = None, None\n\n F.conv1d(x, y, padding='valid').sum().backward()\n gx_actual, gy_actual = x.grad, y.grad\n self.assertEqual(gx_expect, gx_actual)\n self.assertEqual(gy_expect, gy_actual)\n\n @unittest.skipIf(not TEST_SCIPY, \"Scipy required for the test.\")\n @dtypes(torch.float, torch.cfloat)\n @parametrize_test(\"mode\", ('valid', 'same'))\n def test_conv1d_vs_scipy(self, device, dtype, mode):\n t = make_tensor((1, 10), device=device, dtype=dtype)\n feat_dim = t.shape[1]\n weight_even = make_tensor((1, 1, 4), device=device, dtype=dtype)\n weight_odd = make_tensor((1, 1, 5), device=device, dtype=dtype)\n\n def _test(t, weight, mode):\n # SciPy expects two 1-D inputs.\n t_a = t.view(-1).cpu().numpy()\n w_a = weight.view(-1).cpu().numpy()\n expected = scipy.signal.convolve(t_a, w_a, mode=mode)\n\n kwargs = {'padding': mode}\n if mode == 'same':\n # `same` padding in PyTorch conv1d is different\n # from SciPy\n p = weight.shape[2] // 2\n t = torch.nn.functional.pad(t, (p, p))\n # We have already taken care of padding\n kwargs.pop(\"padding\")\n\n # second input is flipped in SciPy's convolve\n weight_flipped = torch.flip(weight, (2,))\n actual = torch.nn.functional.conv1d(t, weight_flipped, **kwargs).squeeze(0)\n if mode == 'same':\n actual = actual[:feat_dim]\n\n self.assertEqual(actual, expected)\n\n # Global dtype for this test suite is torch.double\n # This leads to change in type-promotion\n # and conv1d outputs `complex128` for `complex64` input.\n with set_default_dtype(torch.float):\n _test(t, weight_even, mode)\n _test(t, weight_odd, mode)\n\n @unittest.skipIf(not TEST_SCIPY, \"Scipy required for the test.\")\n @dtypes(torch.float, torch.cfloat)\n @parametrize_test(\"mode\", ('valid', 'same'))\n def test_conv2d_vs_scipy(self, device, dtype, mode):\n t = make_tensor((1, 5, 10), device=device, dtype=dtype)\n weight_even = make_tensor((1, 1, 2, 4), device=device, dtype=dtype)\n weight_odd = make_tensor((1, 1, 3, 5), device=device, dtype=dtype)\n\n def _test(t, weight, mode):\n # SciPy expects two 2-D inputs.\n t_a = t.squeeze(0).cpu().numpy()\n w_a = weight.squeeze(0).squeeze(0).cpu().numpy()\n expected = scipy.signal.convolve2d(t_a, w_a, mode=mode)\n\n kwargs = {'padding': mode}\n if mode == 'same':\n # `same` padding in PyTorch conv2d is different\n # from SciPy\n left_right_pad = weight.shape[3] // 2\n top_bottom_pad = weight.shape[2] // 2\n p = (left_right_pad, left_right_pad, top_bottom_pad, top_bottom_pad)\n t = torch.nn.functional.pad(t, p)\n # We have already taken care of padding\n kwargs.pop(\"padding\")\n\n # second input is flipped in SciPy's convolve2d\n weight_flipped = torch.flip(weight, (2, 3))\n actual = torch.nn.functional.conv2d(t, weight_flipped, **kwargs).squeeze(0)\n if mode == 'same':\n actual = actual[:5, :10]\n\n self.assertEqual(actual, expected, rtol=2e-5, atol=5e-6)\n\n # Global dtype for this test suite is torch.double\n # This leads to change in type-promotion\n # and conv1d outputs `complex128` for `complex64` input.\n with set_default_dtype(torch.float):\n _test(t, weight_even, mode)\n _test(t, weight_odd, mode)\n\n @unittest.skipIf(not TEST_SCIPY, \"Scipy required for the test.\")\n @dtypes(torch.float, torch.cfloat)\n @parametrize_test(\"mode\", ('valid', 'same'))\n def test_conv3d_vs_scipy(self, device, dtype, mode):\n t = make_tensor((1, 5, 5, 10), device=device, dtype=dtype)\n weight_even = make_tensor((1, 1, 2, 2, 4), device=device, dtype=dtype)\n weight_odd = make_tensor((1, 1, 2, 3, 5), device=device, dtype=dtype)\n\n def _test(t, weight, mode):\n # SciPy expects two 3-D inputs.\n t_a = t.squeeze(0).cpu().numpy()\n w_a = weight.squeeze(0).squeeze(0).cpu().numpy()\n expected = scipy.signal.convolve(t_a, w_a, mode=mode)\n\n kwargs = {'padding': mode}\n if mode == 'same':\n # `same` padding in PyTorch conv3d is different\n # from SciPy\n left_right_pad = weight.shape[4] // 2\n top_bottom_pad = weight.shape[3] // 2\n front_back_pad = weight.shape[2] // 2\n p = (left_right_pad, left_right_pad, top_bottom_pad, top_bottom_pad,\n front_back_pad, front_back_pad)\n t = torch.nn.functional.pad(t, p)\n # We have already taken care of padding\n kwargs.pop(\"padding\")\n\n # second input is flipped in SciPy's convolve\n weight_flipped = torch.flip(weight, (2, 3, 4))\n actual = torch.nn.functional.conv3d(t, weight_flipped, **kwargs).squeeze(0)\n if mode == 'same':\n actual = actual[:5, :5, :10]\n\n self.assertEqual(actual, expected, rtol=2e-5, atol=5e-6)\n\n # Global dtype for this test suite is torch.double\n # This leads to change in type-promotion\n # and conv1d outputs `complex128` for `complex64` input.\n with set_default_dtype(torch.float):\n _test(t, weight_even, mode)\n _test(t, weight_odd, mode)\n\n @dtypes(torch.float, torch.complex64)\n def test_conv2d_valid_padding_backward(self, device, dtype):\n # Test F.conv2d gradients work with padding='valid'\n x = torch.rand(1, 1, 1, 10, device=device, dtype=dtype, requires_grad=True)\n y = torch.rand(1, 1, 1, 4, device=device, dtype=dtype, requires_grad=True)\n F.conv2d(x, y, padding=0).sum().backward()\n gx_expect, gy_expect = x.grad, y.grad\n x.grad, y.grad = None, None\n\n F.conv2d(x, y, padding='valid').sum().backward()\n gx_actual, gy_actual = x.grad, y.grad\n self.assertEqual(gx_expect, gx_actual)\n self.assertEqual(gy_expect, gy_actual)\n\n @dtypes(torch.double, torch.cdouble)\n def test_conv3d_valid_padding_backward(self, device, dtype):\n check_forward_ad = torch.device(device).type != 'xla'\n\n # Test F.conv3d gradients work with padding='valid'\n x = torch.rand(1, 1, 1, 1, 10, dtype=dtype, device=device, requires_grad=True)\n y = torch.rand(1, 1, 1, 1, 4, dtype=dtype, device=device, requires_grad=True)\n F.conv3d(x, y, padding=0).sum().backward()\n gx_expect, gy_expect = x.grad, y.grad\n x.grad, y.grad = None, None\n\n F.conv3d(x, y, padding='valid').sum().backward()\n gx_actual, gy_actual = x.grad, y.grad\n self.assertEqual(gx_expect, gx_actual)\n self.assertEqual(gy_expect, gy_actual)\n\n gradcheck(lambda x, y: F.conv3d(x, y, padding='valid'), (x, y), check_forward_ad=check_forward_ad)\n gradgradcheck(lambda x, y: F.conv3d(x, y, padding='valid'), (x, y), check_fwd_over_rev=check_forward_ad)\n\n @parametrize_test(\"N\", range(2, 4), name_fn=lambda N: 'ConvTranspose{}d'.format(N))\n def test_conv_transpose_with_output_size_and_no_batch_dim(self, device, N):\n # For inputs with no batch dim, verify output is the correct shape when output_size is set.\n # See https://github.com/pytorch/pytorch/issues/75889\n inp = torch.randn((1, 15, 13) if N == 2 else (1, 15, 13, 13), device=device)\n output_size = (1, 240, 200) if N == 2 else (1, 240, 200, 200)\n ConvTransposeNd = getattr(nn, 'ConvTranspose{}d'.format(N))\n m = ConvTransposeNd(1, 1, kernel_size=16, stride=16, padding=7, bias=False, device=device)\n output = m(inp, output_size=output_size)\n self.assertEqual(output.shape, output_size)\n\n @skipMeta\n @parametrize_test(\"input_shape,transposed,dilated,groups,layout,backend_expected\", [\n # === slow ===\n subtest(((2, 6, 7), False, False, 3, torch.strided, torch._C._ConvBackend.Slow2d),\n decorators=[onlyNativeDeviceTypes, disableMkldnn, disablecuDNN], name='slow1d'),\n subtest(((2, 6, 7), True, False, 3, torch.strided, torch._C._ConvBackend.SlowTranspose2d),\n decorators=[onlyNativeDeviceTypes, disableMkldnn, disablecuDNN], name='slow1d_transposed'),\n subtest(((2, 6, 7), False, True, 3, torch.strided, torch._C._ConvBackend.SlowDilated2d),\n decorators=[onlyNativeDeviceTypes, disableMkldnn, disablecuDNN], name='slow1d_dilated'),\n subtest(((2, 6, 7), True, True, 3, torch.strided, torch._C._ConvBackend.SlowTranspose2d),\n decorators=[onlyNativeDeviceTypes, disableMkldnn, disablecuDNN], name='slow1d_dilated_transposed'),\n subtest(((2, 6, 7, 8), False, False, 3, torch.strided, torch._C._ConvBackend.Slow2d),\n decorators=[onlyNativeDeviceTypes, disableMkldnn, disablecuDNN], name='slow2d'),\n subtest(((2, 6, 7, 8), True, False, 3, torch.strided, torch._C._ConvBackend.SlowTranspose2d),\n decorators=[onlyNativeDeviceTypes, disableMkldnn, disablecuDNN], name='slow2d_transposed'),\n subtest(((2, 6, 7, 8), False, True, 3, torch.strided, torch._C._ConvBackend.SlowDilated2d),\n decorators=[onlyNativeDeviceTypes, disableMkldnn, disablecuDNN], name='slow2d_dilated'),\n subtest(((2, 6, 7, 8), True, True, 3, torch.strided, torch._C._ConvBackend.SlowTranspose2d),\n decorators=[onlyNativeDeviceTypes, disableMkldnn, disablecuDNN], name='slow2d_dilated_transposed'),\n subtest(((2, 6, 7, 8, 9), False, False, 3, torch.strided, torch._C._ConvBackend.Slow3d),\n decorators=[onlyCPU, disableMkldnn], name='slow3d_cpu'),\n # CUDA doesn't have a slow 3D implementation, so it goes to the dilated 3D implementation instead\n subtest(((2, 6, 7, 8, 9), False, False, 3, torch.strided, torch._C._ConvBackend.SlowDilated3d),\n decorators=[onlyCUDA, disablecuDNN], name='slow3d_cuda'),\n subtest(((2, 6, 7, 8, 9), True, False, 3, torch.strided, torch._C._ConvBackend.SlowTranspose3d),\n decorators=[onlyNativeDeviceTypes, disableMkldnn, disablecuDNN], name='slow3d_transposed'),\n subtest(((2, 6, 7, 8, 9), False, True, 3, torch.strided, torch._C._ConvBackend.SlowDilated3d),\n decorators=[onlyNativeDeviceTypes, disableMkldnn, disablecuDNN], name='slow3d_dilated'),\n subtest(((2, 6, 7, 8, 9), True, True, 3, torch.strided, torch._C._ConvBackend.SlowTranspose3d),\n decorators=[onlyNativeDeviceTypes, disableMkldnn, disablecuDNN], name='slow3d_dilated_transposed'),\n subtest(((0, 6, 7), False, False, 3, torch.strided, torch._C._ConvBackend.Empty),\n decorators=[onlyNativeDeviceTypes, disableMkldnn], name='empty_batch1d'),\n subtest(((2, 0, 7), False, False, 3, torch.strided, torch._C._ConvBackend.Empty),\n decorators=[onlyNativeDeviceTypes, disableMkldnn], name='empty_channel1d'),\n subtest(((0, 0, 7), False, False, 3, torch.strided, torch._C._ConvBackend.Empty),\n decorators=[onlyNativeDeviceTypes, disableMkldnn], name='empty_batch_channel1d'),\n subtest(((0, 6, 7, 8), False, False, 3, torch.strided, torch._C._ConvBackend.Empty),\n decorators=[onlyNativeDeviceTypes, disableMkldnn], name='empty_batch2d'),\n subtest(((2, 0, 7, 8), False, False, 3, torch.strided, torch._C._ConvBackend.Empty),\n decorators=[onlyNativeDeviceTypes, disableMkldnn], name='empty_channel2d'),\n subtest(((0, 0, 7, 8), False, False, 3, torch.strided, torch._C._ConvBackend.Empty),\n decorators=[onlyNativeDeviceTypes, disableMkldnn], name='empty_batch_channel2d'),\n subtest(((0, 6, 7, 8, 9), False, False, 3, torch.strided, torch._C._ConvBackend.Empty),\n decorators=[onlyNativeDeviceTypes, disableMkldnn], name='empty_batch3d'),\n subtest(((2, 0, 7, 8, 9), False, False, 3, torch.strided, torch._C._ConvBackend.Empty),\n decorators=[onlyNativeDeviceTypes, disableMkldnn], name='empty_channel3d'),\n subtest(((0, 0, 7, 8, 9), False, False, 3, torch.strided, torch._C._ConvBackend.Empty),\n decorators=[onlyNativeDeviceTypes, disableMkldnn], name='empty_batch_channel3d'),\n # === cuda ===\n # Note that disablecuDNN disables miopen as well.\n subtest(((2, 6, 7), False, False, 6, torch.strided, torch._C._ConvBackend.CudaDepthwise2d),\n decorators=[onlyCUDA, disablecuDNN], name='cuda_depthwise1d'),\n subtest(((2, 6, 7, 8), False, False, 6, torch.strided, torch._C._ConvBackend.CudaDepthwise2d),\n decorators=[onlyCUDA, disablecuDNN], name='cuda_depthwise2d'),\n subtest(((2, 6, 7, 8, 9), False, False, 6, torch.strided, torch._C._ConvBackend.CudaDepthwise3d),\n decorators=[onlyCUDA, disablecuDNN], name='cuda_depthwise3d'),\n # === cudnn ===\n subtest(((2, 6, 7), False, False, 3, torch.strided, torch._C._ConvBackend.Cudnn),\n decorators=[onlyCUDA, skipCUDAIfNoCudnn, skipCUDAIfMiopen], name='cudnn1d'),\n subtest(((2, 6, 7, 8), False, False, 3, torch.strided, torch._C._ConvBackend.Cudnn),\n decorators=[onlyCUDA, skipCUDAIfNoCudnn, skipCUDAIfMiopen], name='cudnn2d'),\n subtest(((2, 6, 7, 8, 9), False, False, 3, torch.strided, torch._C._ConvBackend.Cudnn),\n decorators=[onlyCUDA, skipCUDAIfNoCudnn, skipCUDAIfMiopen], name='cudnn3d'),\n subtest(((2, 6, 7), True, False, 3, torch.strided, torch._C._ConvBackend.CudnnTranspose),\n decorators=[onlyCUDA, skipCUDAIfNoCudnn, skipCUDAIfMiopen], name='cudnn1d_transposed'),\n subtest(((2, 6, 7, 8), True, False, 3, torch.strided, torch._C._ConvBackend.CudnnTranspose),\n decorators=[onlyCUDA, skipCUDAIfNoCudnn, skipCUDAIfMiopen], name='cudnn2d_transposed'),\n subtest(((2, 6, 7, 8, 9), True, False, 3, torch.strided, torch._C._ConvBackend.CudnnTranspose),\n decorators=[onlyCUDA, skipCUDAIfNoCudnn, skipCUDAIfMiopen], name='cudnn3d_transposed'),\n # === miopen ===\n subtest(((2, 6, 7), False, False, 3, torch.strided, torch._C._ConvBackend.Miopen),\n decorators=[onlyCUDA, skipCUDAIfNoMiopen], name='miopen1d'),\n subtest(((2, 6, 7, 8), False, False, 3, torch.strided, torch._C._ConvBackend.Miopen),\n decorators=[onlyCUDA, skipCUDAIfNoMiopen], name='miopen2d'),\n subtest(((2, 6, 7, 8, 9), False, False, 3, torch.strided, torch._C._ConvBackend.Miopen),\n decorators=[onlyCUDA, skipCUDAIfNoMiopen], name='miopen3d'),\n subtest(((2, 6, 7), True, False, 3, torch.strided, torch._C._ConvBackend.MiopenTranspose),\n decorators=[onlyCUDA, skipCUDAIfNoMiopen], name='miopen1d_transposed'),\n subtest(((2, 6, 7, 8), True, False, 3, torch.strided, torch._C._ConvBackend.MiopenTranspose),\n decorators=[onlyCUDA, skipCUDAIfNoMiopen], name='miopen2d_transposed'),\n subtest(((2, 6, 7, 8, 9), True, False, 3, torch.strided, torch._C._ConvBackend.MiopenTranspose),\n decorators=[onlyCUDA, skipCUDAIfNoMiopen], name='miopen3d_transposed'),\n subtest(((2, 6, 7), False, False, 6, torch.strided, torch._C._ConvBackend.MiopenDepthwise),\n decorators=[onlyCUDA, skipCUDAIfNoMiopen], name='miopen_depthwise1d'),\n subtest(((2, 6, 7, 8), False, False, 6, torch.strided, torch._C._ConvBackend.MiopenDepthwise),\n decorators=[onlyCUDA, skipCUDAIfNoMiopen], name='miopen_depthwise2d'),\n subtest(((2, 6, 7, 8, 9), False, False, 6, torch.strided, torch._C._ConvBackend.MiopenDepthwise),\n decorators=[onlyCUDA, skipCUDAIfNoMiopen], name='miopen_depthwise3d'),\n # === mkldnn ===\n subtest(((2, 6, 7), False, False, 3, torch._mkldnn, torch._C._ConvBackend.Mkldnn),\n decorators=[onlyCPU, skipCPUIfNoMkldnn], name='mkldnn1d'),\n subtest(((2, 6, 7, 8), False, False, 3, torch._mkldnn, torch._C._ConvBackend.Mkldnn),\n decorators=[onlyCPU, skipCPUIfNoMkldnn], name='mkldnn2d'),\n subtest(((2, 6, 7, 8, 9), False, False, 3, torch._mkldnn, torch._C._ConvBackend.Mkldnn),\n decorators=[onlyCPU, skipCPUIfNoMkldnn], name='mkldnn3d'),\n # Transposed convolution is broken for mkldnn. See https://github.com/pytorch/pytorch/issues/68775.\n subtest(((2, 6, 7), True, False, 3, torch._mkldnn, torch._C._ConvBackend.Mkldnn),\n decorators=[onlyCPU, skipCPUIfNoMkldnn, unittest.expectedFailure], name='mkldnn1d_transposed'),\n subtest(((2, 6, 7, 8), True, False, 3, torch._mkldnn, torch._C._ConvBackend.Mkldnn),\n decorators=[onlyCPU, skipCPUIfNoMkldnn, unittest.expectedFailure], name='mkldnn2d_transposed'),\n subtest(((2, 6, 7, 8, 9), True, False, 3, torch._mkldnn, torch._C._ConvBackend.Mkldnn),\n decorators=[onlyCPU, skipCPUIfNoMkldnn, unittest.expectedFailure], name='mkldnn3d_transposed'),\n subtest(((2, 6, 7), False, True, 3, torch.strided, torch._C._ConvBackend.Mkldnn),\n decorators=[onlyCPU, skipCPUIfNoMkldnn], name='mkldnn1d_cpu_input'),\n subtest(((2, 6, 7, 8), False, True, 3, torch.strided, torch._C._ConvBackend.Mkldnn),\n decorators=[onlyCPU, skipCPUIfNoMkldnn], name='mkldnn2d_cpu_input'),\n subtest(((2, 6, 7, 8, 9), False, True, 3, torch.strided, torch._C._ConvBackend.Mkldnn),\n decorators=[onlyCPU, skipCPUIfNoMkldnn], name='mkldnn3d_cpu_input'),\n subtest(((0, 6, 7), False, False, 3, torch._mkldnn, torch._C._ConvBackend.MkldnnEmpty),\n decorators=[onlyCPU, skipCPUIfNoMkldnn], name='mkldnn_empty_batch1d'),\n subtest(((2, 0, 7), False, False, 3, torch._mkldnn, torch._C._ConvBackend.MkldnnEmpty),\n decorators=[onlyCPU, skipCPUIfNoMkldnn], name='mkldnn_empty_channel1d'),\n subtest(((0, 0, 7), False, False, 3, torch._mkldnn, torch._C._ConvBackend.MkldnnEmpty),\n decorators=[onlyCPU, skipCPUIfNoMkldnn], name='mkldnn_empty_batch_channel1d'),\n subtest(((0, 6, 7, 8), False, False, 3, torch._mkldnn, torch._C._ConvBackend.MkldnnEmpty),\n decorators=[onlyCPU, skipCPUIfNoMkldnn], name='mkldnn_empty_batch2d'),\n subtest(((2, 0, 7, 8), False, False, 3, torch._mkldnn, torch._C._ConvBackend.MkldnnEmpty),\n decorators=[onlyCPU, skipCPUIfNoMkldnn], name='mkldnn_empty_channel2d'),\n subtest(((0, 0, 7, 8), False, False, 3, torch._mkldnn, torch._C._ConvBackend.MkldnnEmpty),\n decorators=[onlyCPU, skipCPUIfNoMkldnn], name='mkldnn_empty_batch_channel2d'),\n subtest(((0, 6, 7, 8, 9), False, False, 3, torch._mkldnn, torch._C._ConvBackend.MkldnnEmpty),\n decorators=[onlyCPU, skipCPUIfNoMkldnn], name='mkldnn_empty_batch3d'),\n subtest(((2, 0, 7, 8, 9), False, False, 3, torch._mkldnn, torch._C._ConvBackend.MkldnnEmpty),\n decorators=[onlyCPU, skipCPUIfNoMkldnn], name='mkldnn_empty_channel3d'),\n subtest(((0, 0, 7, 8, 9), False, False, 3, torch._mkldnn, torch._C._ConvBackend.MkldnnEmpty),\n decorators=[onlyCPU, skipCPUIfNoMkldnn], name='mkldnn_empty_batch_channel3d'),\n # Note: Tests for mobile backends are not currently supported. This comprises\n # NnpackSpatial, Winograd3x3Depthwise, and Xnnpack2d backends. Testing these\n # requires the ability to gate tests by whether PyTorch is built with USE_MOBILE=1.\n ])\n # Test with both bias and no bias.\n @parametrize_test(\"has_bias\", [False, True])\n # Test with both stride=1 and stride>1 cases.\n @parametrize_test(\"strided\", [False, True])\n # Test with both contiguous and non-contiguous inputs.\n @parametrize_test(\"contiguous\", [False, True])\n def test_conv_backend(\n self, device, input_shape, has_bias, strided, contiguous, transposed, dilated, groups,\n layout, backend_expected):\n # Build up inputs.\n dtype = torch.float32\n C_in, C_out, dim, kernel_size = input_shape[1], 12, len(input_shape) - 2, 3\n x = torch.randn(*input_shape, device=device, dtype=dtype, requires_grad=True)\n weight = torch.randn(C_in if transposed else C_out,\n C_out // groups if transposed else C_in // groups,\n *[kernel_size for _ in range(dim)],\n device=device, dtype=dtype, requires_grad=True)\n bias = torch.randn(C_out, device=device, dtype=dtype, requires_grad=True) if has_bias else None\n\n def _make_noncontiguous(inp):\n if inp is None:\n return None\n old_requires_grad = inp.requires_grad\n inp = torch.repeat_interleave(inp, 2, dim=-1)\n inp = inp[..., ::2].detach().requires_grad_(old_requires_grad)\n return inp\n\n if not contiguous:\n x = _make_noncontiguous(x)\n weight = _make_noncontiguous(weight)\n bias = _make_noncontiguous(bias)\n\n if layout is torch._mkldnn:\n x = x.to_mkldnn()\n # Note that weight and bias are not supported as mkldnn tensors during training.\n\n stride = (2,) * dim if strided else (1,) * dim\n padding = (0,) * dim\n dilation = (2,) * dim if dilated else (1,) * dim\n output_padding = (0,) * dim\n inputs = [x, weight, bias, stride, padding, dilation, transposed, output_padding, groups]\n\n # Ensure correct backend is selected.\n backend_actual = torch._C._select_conv_backend(*inputs)\n self.assertEqual(backend_actual, backend_expected)\n\n # Ensure backward call succeeds.\n convolution = torch.ops.aten.convolution\n output = convolution(*inputs)\n grad_output = torch.randn(output.shape, device=device, dtype=dtype)\n if not contiguous:\n grad_output = _make_noncontiguous(grad_output)\n if layout is torch._mkldnn:\n grad_output = grad_output.to_mkldnn()\n output.backward(grad_output)\n\n # mkldnn doesn't support gradcheck :(\n if layout is torch._mkldnn:\n return\n\n if backend_actual != torch._C._ConvBackend.Empty: # FIXME: forward AD fails\n # Forward AD and forward-over-reverse AD smoke test in float32\n # TODO: remove this if we introduce per-op gradient tests for float32\n with fwAD.dual_level():\n dual_inputs = [(fwAD.make_dual(i, torch.rand_like(i)) if isinstance(i, torch.Tensor) else i) for i in inputs]\n # Forward AD\n output = convolution(*dual_inputs)\n # Forward over reverse AD\n grad_output_d = fwAD.make_dual(torch.rand_like(output), torch.rand_like(output))\n if has_bias:\n torch.autograd.grad(output, [x, weight, bias], grad_output_d)\n else:\n torch.autograd.grad(output, [x, weight], grad_output_d)\n\n # Convert to float64 for gradcheck.\n x = x.to(torch.float64).detach().requires_grad_(True)\n weight = weight.to(torch.float64).detach().requires_grad_(True)\n if bias is not None:\n bias = bias.to(torch.float64).detach().requires_grad_(True)\n inputs = [x, weight, bias, stride, padding, dilation, transposed, output_padding, groups]\n\n # Set some backend-specific validation settings.\n gradcheck_nondet_tol = 0.0\n if torch.backends.cudnn.is_available():\n # cuDNN introduces non-determinism\n gradcheck_nondet_tol = GRADCHECK_NONDET_TOL\n\n self.assertTrue(gradcheck(convolution, inputs, nondet_tol=gradcheck_nondet_tol))\n\n # double backward doesn't support bias gradients\n if bias is not None:\n bias.requires_grad_(False)\n self.assertTrue(gradgradcheck(convolution, inputs, nondet_tol=gradcheck_nondet_tol))\n\n def test_Dropout(self, device):\n input = torch.empty(1000)\n self._test_dropout(nn.Dropout, device, input)\n\n self._test_dropout_discontiguous(nn.Dropout, device)\n self._test_dropout_discontiguous(nn.Dropout, device, memory_format=torch.channels_last)\n\n self._test_dropout_stride_mean_preserve(nn.Dropout, device)\n\n if self.device_type == 'cuda' or self.device_type == 'cpu':\n input = input.bfloat16()\n self._test_dropout(nn.Dropout, device, input)\n\n def _test_dropoutNd_no_batch(self, dropout, input):\n input_clone = input.clone()\n with freeze_rng_state():\n res_no_batch = dropout(input)\n\n with freeze_rng_state():\n res_batched = dropout(input_clone.unsqueeze(0)).squeeze(0)\n\n self.assertEqual(res_no_batch, res_batched)\n\n def _test_dropoutNd_channel_zero(self, dropout, input):\n # Verify the number of zeros in a channel is 0 or the number of elements in the channel\n # for a fully positive input tensor\n shape = input.shape\n B = shape[0]\n C = shape[1]\n channel_numel = torch.tensor(shape[2:]).prod()\n result = dropout(input)\n\n for b, c in product(range(B), range(C)):\n self.assertTrue(result[b, c].count_nonzero() in (0, channel_numel))\n\n @expectedFailureXLA # seems like freeze_rng_state is not honoured by XLA\n def test_Dropout2d(self, device):\n b = random.randint(1, 5)\n w = random.randint(1, 5)\n h = random.randint(1, 5)\n num_features = 1000\n input = torch.empty(num_features, b, w, h)\n self._test_dropout(nn.Dropout2d, device, input)\n self._test_dropout(nn.Dropout2d, device, input, memory_format=torch.channels_last)\n\n self._test_dropout_discontiguous(nn.Dropout2d, device)\n self._test_dropout_discontiguous(nn.Dropout2d, device, memory_format=torch.channels_last)\n\n with self.assertWarnsRegex(UserWarning, \"Received a 5-D input to dropout2d\"):\n nn.Dropout2d(p=0.5)(torch.rand(1, 2, 2, 2, 2, device=device))\n\n with self.assertWarnsRegex(UserWarning, \"Received a 2-D input to dropout2d\"):\n nn.Dropout2d(p=0.5)(torch.rand(1, 2, device=device))\n\n # no batch dims\n input = torch.rand(50, 2, 2, device=device)\n self._test_dropoutNd_no_batch(nn.Dropout2d(p=0.5), input)\n self._test_dropoutNd_no_batch(nn.Dropout2d(p=0.5, inplace=True), input)\n\n # check that complete channels are dropped\n input = torch.ones(10, 4, 2, 2, device=device)\n self._test_dropoutNd_channel_zero(nn.Dropout2d(p=0.5), input)\n self._test_dropoutNd_channel_zero(nn.Dropout2d(p=0.5, inplace=True), input)\n\n @expectedFailureXLA # seems like freeze_rng_state is not honoured by XLA\n def test_Dropout3d(self, device):\n b = random.randint(1, 5)\n w = random.randint(1, 5)\n h = random.randint(1, 5)\n d = random.randint(1, 2)\n num_features = 1000\n input = torch.empty(num_features, b, d, w, h)\n self._test_dropout(nn.Dropout3d, device, input)\n\n self._test_dropout_discontiguous(nn.Dropout3d, device)\n self._test_dropout_discontiguous(nn.Dropout3d, device, memory_format=torch.channels_last)\n\n with self.assertWarnsRegex(UserWarning, \"Received a 6-D input to dropout3d\"):\n nn.Dropout3d(p=0.5)(torch.rand(1, 2, 2, 2, 2, 2, device=device))\n\n with self.assertWarnsRegex(UserWarning, \"Received a 3-D input to dropout3d\"):\n nn.Dropout3d(p=0.5)(torch.rand(1, 2, 2, device=device))\n\n # no batch dims\n input = torch.rand(50, 2, 2, 2, device=device)\n self._test_dropoutNd_no_batch(nn.Dropout3d(p=0.5), input)\n self._test_dropoutNd_no_batch(nn.Dropout3d(p=0.5, inplace=True), input)\n\n # check that complete channels are dropped\n input = torch.ones(10, 4, 2, 2, 2, device=device)\n self._test_dropoutNd_channel_zero(nn.Dropout3d(p=0.5), input)\n self._test_dropoutNd_channel_zero(nn.Dropout3d(p=0.5, inplace=True), input)\n\n def test_InstanceNorm1d_general(self, device):\n b = random.randint(3, 5)\n c = random.randint(3, 5)\n d = random.randint(8, 10)\n\n input = torch.rand(b, c, d)\n self._test_InstanceNorm_general(nn.InstanceNorm1d, input, device)\n\n if self.device_type == 'cuda':\n self._test_InstanceNorm_cuda_half(nn.InstanceNorm1d, input, device)\n\n def test_InstanceNorm2d_general(self, device):\n b = random.randint(3, 5)\n c = random.randint(3, 5)\n w = random.randint(3, 6)\n h = random.randint(6, 8)\n\n input = torch.rand(b, c, h, w)\n self._test_InstanceNorm_general(nn.InstanceNorm2d, input, device)\n\n if self.device_type == 'cuda':\n self._test_InstanceNorm_cuda_half(nn.InstanceNorm2d, input, device)\n\n def test_InstanceNorm3d_general(self, device):\n b = random.randint(3, 5)\n c = random.randint(3, 5)\n w = random.randint(2, 5)\n h = random.randint(2, 5)\n d = random.randint(2, 5)\n\n input = torch.rand(b, c, h, w, d)\n self._test_InstanceNorm_general(nn.InstanceNorm3d, input, device)\n\n if self.device_type == 'cuda':\n self._test_InstanceNorm_cuda_half(nn.InstanceNorm3d, input, device)\n\n def test_instancenorm_raises_error_if_less_than_one_value_per_channel(self, device):\n x = torch.rand(10)[None, :, None]\n with self.assertRaises(ValueError):\n torch.nn.InstanceNorm1d(10)(x).to(device)\n\n def test_instancenorm_raises_error_for_single_spatial_element_during_training(self, device):\n BATCH_SIZE = 10\n NUM_CHANNELS = 3\n norms = [torch.nn.InstanceNorm1d, torch.nn.InstanceNorm2d, torch.nn.InstanceNorm3d]\n for i, norm in enumerate(norms):\n m = norm(NUM_CHANNELS, track_running_stats=True)\n m.to(device)\n\n # Create an appropriately-sized input with a single spatial element.\n input = torch.randn(BATCH_SIZE, NUM_CHANNELS, *[1 for _ in range(i + 1)],\n device=device)\n with self.assertRaises(ValueError):\n m(input)\n\n # Single spatial element should be fine in eval.\n m.eval()\n m(input)\n\n def test_LayerNorm_general(self, device):\n self._test_LayerNorm_general(device)\n\n if self.device_type == 'cuda' or self.device_type == 'cpu':\n self._test_LayerNorm_general(device, dtype=torch.bfloat16)\n\n if self.device_type == 'cuda':\n self._test_LayerNorm_cuda_half(device)\n\n @onlyNativeDeviceTypes\n def test_LayerNorm_numeric(self, device):\n def layer_norm_ref(X, gamma, beta, normalized_shape, eps):\n feature_size = np.prod(normalized_shape)\n X_view = X.view(-1, feature_size)\n mean = X_view.mean(dim=-1, keepdim=True)\n var = X_view.var(dim=-1, unbiased=False, keepdim=True)\n Y = (X_view - mean) / torch.sqrt(var + eps)\n Y = Y * gamma.view(-1) + beta.view(-1)\n return Y.view(*X.size())\n\n normalized_shape = [256, 256, 144]\n layer_norm = nn.LayerNorm(normalized_shape).float().to(device)\n X = torch.rand(2, *normalized_shape, dtype=torch.float32,\n device=device)\n\n Y = layer_norm(X)\n Y_ref = layer_norm_ref(X, layer_norm.weight.data, layer_norm.bias.data,\n normalized_shape, layer_norm.eps)\n self.assertEqual(Y, Y_ref, rtol=0, atol=1e-5)\n\n if self.device_type == 'cuda':\n layer_norm.cpu()\n Y_cpu = layer_norm(X.cpu())\n self.assertEqual(Y_cpu, Y, rtol=0, atol=1e-5)\n\n @onlyNativeDeviceTypes\n def test_GroupNorm_general(self, device):\n self._test_GroupNorm_general(device)\n\n if self.device_type == 'cuda':\n self._test_GroupNorm_cuda_half()\n\n def test_GroupNorm_raises_error_if_one_value_per_group(self, device):\n x = torch.rand(10)[None, :, None]\n with self.assertRaises(ValueError):\n torch.nn.GroupNorm(10, 10)(x).to(device)\n\n def test_GroupNorm_empty(self, device):\n mod = torch.nn.GroupNorm(2, 4).to(device)\n inp = torch.randn(0, 4, 2, 2, device=device)\n self._test_module_empty_input(mod, inp)\n if self.device_type == 'cuda' and self.has_cudnn():\n with torch.backends.cudnn.flags(enabled=False):\n self._test_module_empty_input(mod, inp)\n\n @onlyCPU\n @dtypes(torch.float, torch.double)\n def test_groupnorm_nhwc(self, device, dtype):\n def helper(self, size, groups):\n channels = size[1]\n input = torch.randn(size, dtype=dtype, device=device, requires_grad=True)\n input = input.contiguous(memory_format=torch.channels_last)\n input.retain_grad()\n grad = torch.randn(size, dtype=dtype, device=device)\n grad = grad.contiguous(memory_format=torch.channels_last)\n gn = nn.GroupNorm(groups, channels).to(device).to(dtype)\n gn.weight.data.uniform_()\n gn.bias.data.uniform_()\n\n ref_input = input.detach().clone().contiguous().requires_grad_(True)\n ref_grad = grad.detach().clone().contiguous()\n ref_gn = nn.GroupNorm(groups, channels).to(device).to(dtype)\n ref_gn.load_state_dict(gn.state_dict())\n\n out = gn(input)\n out.backward(grad)\n ref_out = ref_gn(ref_input)\n ref_out.backward(ref_grad)\n\n self.assertTrue(out.is_contiguous(memory_format=torch.channels_last))\n self.assertTrue(ref_out.is_contiguous())\n self.assertEqual(out, ref_out)\n self.assertEqual(gn.weight.grad, ref_gn.weight.grad)\n self.assertEqual(gn.bias.grad, ref_gn.bias.grad)\n self.assertEqual(input.grad, ref_input.grad)\n\n helper(self, (4, 8, 10, 10), 4)\n helper(self, (2, 30, 9, 9), 3)\n\n @onlyNativeDeviceTypes\n def test_GroupNorm_numeric(self, device):\n def group_norm_ref(X, gamma, beta, groups, channels, eps):\n batch_size = X.size()[0]\n X_view = X.view(batch_size, groups, -1)\n mean = X_view.mean(dim=-1, keepdim=True)\n var = X_view.var(dim=-1, unbiased=False, keepdim=True)\n Y = ((X_view - mean) / torch.sqrt(var + eps)).view(\n batch_size, channels, -1)\n Y = Y * gamma.view(channels, 1) + beta.view(channels, 1)\n return Y.view(*X.size())\n\n batch_size = 1\n groups = 2\n channels = 8\n group_norm = nn.GroupNorm(groups, channels).float().to(device)\n X = torch.rand(batch_size, channels, 256, 256, 72,\n dtype=torch.float32, device=device)\n\n Y = group_norm(X)\n Y_ref = group_norm_ref(\n X, group_norm.weight.data, group_norm.bias.data, groups,\n channels, group_norm.eps)\n self.assertEqual(Y, Y_ref, rtol=0, atol=1e-5)\n\n if self.device_type == 'cuda':\n group_norm.cpu()\n Y_cpu = group_norm(X.cpu())\n self.assertEqual(Y_cpu, Y, rtol=0, atol=1e-5)\n\n @onlyNativeDeviceTypes\n @dtypes(torch.float64, torch.complex128)\n def test_pad(self, device, dtype):\n # Assert assertion errors are raised for invalid circular padding values\n inputs = torch.randn(1, 1, 4, device=device, dtype=dtype, requires_grad=True)\n # Should raise error when trying to wrap around more than once\n self.assertRaises(RuntimeError, lambda: F.pad(inputs, (5, 4), mode='circular'))\n self.assertRaises(RuntimeError, lambda: F.pad(inputs, (3, 6), mode='circular'))\n # Should raise error when negative padding results in negative output shape\n self.assertRaises(RuntimeError, lambda: F.pad(inputs, (-3, -2), mode='circular'))\n\n # assert that relfection padding errors when pad >= input size\n expected_err_msg = r\"Padding size should be less than the corresponding input dimension\"\n inputs = torch.randn(1, 1, 2, 3, device=device, dtype=dtype)\n self.assertRaisesRegex(RuntimeError, expected_err_msg,\n lambda: F.pad(inputs, (1, 1, 3, 0), mode='reflect'))\n inputs = torch.randn(1, 1, 2, device=device, dtype=dtype)\n self.assertRaisesRegex(RuntimeError, expected_err_msg,\n lambda: F.pad(inputs, (2, 1), mode='reflect'))\n\n inputs = torch.rand(1, 3, 4, 4, device=device, dtype=dtype)\n # assert that pad doesn't return a view into the input tensor\n for mode in 'constant', 'reflect', 'replicate', 'circular':\n out = F.pad(inputs, (0, 0, 0, 0), mode=mode)\n out.fill_(4)\n self.assertTrue(torch.all(torch.abs(inputs) < 2))\n\n out = F.pad(inputs, (0, 0, -1, -1), mode=mode)\n out.fill_(4)\n self.assertTrue(torch.all(torch.abs(inputs) < 2))\n\n @onlyNativeDeviceTypes\n @dtypes(torch.float64, torch.complex128)\n def test_ReplicationPad_empty(self, device, dtype):\n for mod, inp in [\n (torch.nn.ReplicationPad1d(3), torch.randn(0, 3, 10, device=device, dtype=dtype)),\n (torch.nn.ReplicationPad2d(3), torch.randn(0, 3, 10, 10, device=device, dtype=dtype)),\n (torch.nn.ReplicationPad3d(3), torch.randn(0, 3, 10, 10, 10, device=device, dtype=dtype))]:\n self._test_module_empty_input(mod, inp, check_size=False)\n\n with self.assertRaisesRegex(RuntimeError, 'Expected 2D or 3D'):\n mod = torch.nn.ReplicationPad1d(2)\n inp = torch.randn(3, 0, 10, device=device, dtype=dtype)\n mod(inp)\n\n with self.assertRaisesRegex(RuntimeError, 'Expected 3D or 4D'):\n mod = torch.nn.ReplicationPad2d((2, 2, 2, 2))\n inp = torch.randn(43, 0, 10, 10, device=device, dtype=dtype)\n mod(inp)\n\n with self.assertRaisesRegex(RuntimeError, 'Expected 4D or 5D'):\n mod = torch.nn.ReplicationPad3d((2, 2, 2, 2, 2, 2))\n inp = torch.randn(3, 0, 10, 10, 10, device=device, dtype=dtype)\n mod(inp)\n\n def test_ReplicationPad1d_large(self, device):\n shapes = ([2, 65736, 4], [65736, 2, 4])\n pl, pr = 3, 4\n for shape in shapes:\n x = torch.randn(shape, device=device, requires_grad=True)\n model = torch.nn.ReplicationPad1d((pl, pr))\n\n # forward\n out = model(x)\n self.assertEqual(out[:, :, pl : -pr], x)\n\n left_padding = out[:, :, : pl]\n self.assertEqual(left_padding, x[:, :, :1].expand_as(left_padding))\n right_padding = out[:, :, -pr :]\n self.assertEqual(right_padding, x[:, :, -1:].expand_as(right_padding))\n\n # backward\n g = torch.randn_like(out)\n out.backward(g)\n self.assertEqual(x.grad[:, :, 1 : -1], g[:, :, pl + 1 : -pr - 1])\n\n self.assertEqual(x.grad[:, :, 0], g[:, :, : pl + 1].sum(-1))\n self.assertEqual(x.grad[:, :, -1], g[:, :, -pr - 1:].sum(-1))\n\n def test_ReplicationPad2d_large(self, device):\n shapes = ([2, 65736, 4, 4], [65736, 2, 4, 4])\n pl, pr, pt, pb = 3, 4, 5, 6\n for shape in shapes:\n x = torch.randn(shape, device=device, requires_grad=True)\n model = torch.nn.ReplicationPad2d((pl, pr, pt, pb))\n\n # forward center, edge\n out = model(x)\n self.assertEqual(out[:, :, pt : -pb, pl : -pr], x)\n\n left_padding = out[:, :, pt : -pb, : pl]\n self.assertEqual(left_padding, x[:, :, :, :1].expand_as(left_padding))\n right_padding = out[:, :, pt : -pb, -pr :]\n self.assertEqual(right_padding, x[:, :, :, -1:].expand_as(right_padding))\n top_padding = out[:, :, : pt, pl : -pr]\n self.assertEqual(top_padding, x[:, :, :1, :].expand_as(top_padding))\n bottom_padding = out[:, :, -pb : , pl : -pr]\n self.assertEqual(bottom_padding, x[:, :, -1:, :].expand_as(bottom_padding))\n\n # forward corner\n tl_padding = out[:, :, : pt + 1, : pl + 1]\n self.assertEqual(tl_padding, x[:, :, :1, :1].expand_as(tl_padding))\n tr_padding = out[:, :, : pt + 1, -pr - 1:]\n self.assertEqual(tr_padding, x[:, :, :1, -1:].expand_as(tr_padding))\n bl_padding = out[:, :, -pb - 1:, : pl + 1]\n self.assertEqual(bl_padding, x[:, :, -1:, :1].expand_as(bl_padding))\n br_padding = out[:, :, -pb - 1:, -pr - 1:]\n self.assertEqual(br_padding, x[:, :, -1:, -1:].expand_as(br_padding))\n\n # backward center, edge\n g = torch.randn_like(out)\n out.backward(g)\n self.assertEqual(x.grad[:, :, 1:-1, 1:-1], g[:, :, pt + 1 : -pb - 1, pl + 1 : -pr - 1])\n\n self.assertEqual(x.grad[:, :, 1:-1, 0], g[:, :, pt + 1 : -pb - 1, : pl + 1].sum(-1))\n self.assertEqual(x.grad[:, :, 1:-1, -1], g[:, :, pt + 1 : -pb - 1, -pr - 1 :].sum(-1))\n self.assertEqual(x.grad[:, :, 0, 1:-1], g[:, :, : pt + 1, pl + 1 : -pr - 1].sum(-2))\n self.assertEqual(x.grad[:, :, -1, 1:-1], g[:, :, -pb - 1 :, pl + 1 : -pr - 1].sum(-2))\n\n # backward corner\n self.assertEqual(x.grad[:, :, 0, 0], g[:, :, : pt + 1, : pl + 1].sum((-2, -1)))\n self.assertEqual(x.grad[:, :, 0, -1], g[:, :, : pt + 1, -pr - 1 :].sum((-2, -1)))\n self.assertEqual(x.grad[:, :, -1, 0], g[:, :, -pb - 1 :, : pl + 1].sum((-2, -1)))\n self.assertEqual(x.grad[:, :, -1, -1], g[:, :, -pb - 1 :, -pr - 1 :].sum((-2, -1)))\n\n @largeTensorTest(\"6GB\")\n def test_ReplicationPad3d_large(self, device):\n shapes = ([1, 65736, 2, 2, 2], [65736, 1, 2, 2, 2])\n pl, pr, pt, pbt, pf, pbk = 3, 4, 5, 6, 7, 8\n\n for shape in shapes:\n x = torch.randn(shape, device=device, requires_grad=True)\n model = torch.nn.ReplicationPad3d((pl, pr, pt, pbt, pf, pbk))\n\n # forward center\n out = model(x)\n self.assertEqual(out[:, :, pf : -pbk, pt : -pbt, pl : -pr], x)\n\n # backward center\n g = torch.randn_like(out)\n out.backward(g)\n self.assertEqual(x.grad[:, :, 1:-1, 1:-1, 1:-1], g[:, :, pf + 1 : -pbk - 1, pt + 1 : -pbt - 1, pl + 1 : -pr - 1])\n\n @onlyNativeDeviceTypes\n def test_Bilinear_empty(self, device):\n mod = torch.nn.Bilinear(20, 30, 40).to(device)\n inp1 = torch.randn(0, 10, 20, requires_grad=True, device=device)\n inp2 = torch.randn(0, 10, 30, requires_grad=True, device=device)\n\n output = mod(inp1, inp2)\n output.sum().backward()\n\n self.assertEqual(inp1, torch.zeros_like(inp1))\n self.assertEqual(inp2, torch.zeros_like(inp2))\n\n self.assertEqual(inp1.grad, torch.zeros_like(inp1))\n self.assertEqual(inp2.grad, torch.zeros_like(inp2))\n\n @expectedFailureMeta # RuntimeError: cannot reshape tensor of 0 elements into shape [1, 0, -1]\n @onlyNativeDeviceTypes\n def test_TransformerEncoderLayer_empty(self, device):\n for training in (True, False):\n for batch_first, input_shape in [(True, (0, 10, 512)),\n (False, (10, 0, 512))]:\n input = torch.rand(*input_shape, device=device)\n encoder_layer = nn.TransformerEncoderLayer(d_model=512, nhead=8, batch_first=batch_first).to(device)\n if not training:\n encoder_layer = encoder_layer.eval()\n with torch.no_grad():\n self._test_module_empty_input(encoder_layer, input, check_size=False, inference=True)\n if batch_first and not TEST_WITH_CROSSREF:\n with torch.no_grad():\n # A NestedTensor with no tensors inside it doesn't have dim 3 (or dim\n # 2, for that matter) so it can't hit the fast path, nor can we give a\n # result.\n with self.assertRaisesRegex(\n AssertionError, 'MultiheadAttention does not support NestedTensor outside'):\n nt = torch.nested_tensor([], device=device)\n self._test_module_empty_input(encoder_layer, nt, check_size=False, inference=True)\n\n nt = torch.nested_tensor([torch.rand(0, 512, device=device)], device=device)\n self._test_module_empty_input(encoder_layer, nt, check_size=False, inference=True)\n else:\n self._test_module_empty_input(encoder_layer, input, check_size=False)\n\n @expectedFailureMeta # RuntimeError: cannot reshape tensor of 0 elements into shape [1, 0, -1]\n @onlyNativeDeviceTypes\n def test_TransformerEncoder_empty(self, device):\n for batch_first, input_shape in [(True, (0, 10, 512)),\n (False, (10, 0, 512))]:\n input = torch.rand(*input_shape, device=device)\n encoder_layer = nn.TransformerEncoderLayer(d_model=512, nhead=8, batch_first=batch_first).to(device)\n transformer_encoder = nn.TransformerEncoder(encoder_layer, num_layers=6).to(device)\n self._test_module_empty_input(transformer_encoder, input, check_size=False)\n\n @expectedFailureMeta # RuntimeError: cannot reshape tensor of 0 elements into shape [1, 0, -1]\n @onlyNativeDeviceTypes\n def test_TransformerDecoderLayer_empty(self, device):\n for batch_first, memory_shape, tgt_shape in [(True, (0, 10, 512), (0, 20, 512)),\n (False, (10, 0, 512), (20, 0, 512))]:\n memory = torch.rand(*memory_shape, device=device)\n tgt = torch.rand(*tgt_shape, requires_grad=True, device=device)\n decoder_layer = nn.TransformerDecoderLayer(d_model=512, nhead=8, batch_first=batch_first).to(device)\n self._test_module_empty_inputs(decoder_layer, [tgt, memory])\n\n @expectedFailureMeta # RuntimeError: cannot reshape tensor of 0 elements into shape [1, 0, -1]\n @onlyNativeDeviceTypes\n def test_TransformerDecoder_empty(self, device):\n for batch_first, memory_shape, tgt_shape in [(True, (0, 10, 512), (0, 20, 512)),\n (False, (10, 0, 512), (20, 0, 512))]:\n memory = torch.rand(*memory_shape, device=device)\n tgt = torch.rand(*tgt_shape, requires_grad=True, device=device)\n decoder_layer = nn.TransformerDecoderLayer(d_model=512, nhead=8, batch_first=batch_first).to(device)\n transformer_decoder = nn.TransformerDecoder(decoder_layer, num_layers=6).to(device)\n self._test_module_empty_inputs(transformer_decoder, [tgt, memory])\n\n @expectedFailureMeta # RuntimeError: cannot reshape tensor of 0 elements into shape [1, 0, -1]\n @onlyNativeDeviceTypes\n def test_Transformer_empty(self, device):\n for batch_first, src_shape, tgt_shape in [(True, (10, 0, 512), (20, 0, 512))]:\n transformer_model = nn.Transformer(nhead=16, num_encoder_layers=12).to(device)\n src = torch.rand(*src_shape, requires_grad=True, device=device)\n tgt = torch.rand(*tgt_shape, requires_grad=True, device=device)\n self._test_module_empty_inputs(transformer_model, [src, tgt])\n\n @onlyNativeDeviceTypes\n @dtypes(torch.float32, torch.complex64)\n def test_ReflectionPad_empty(self, device, dtype):\n for mod, inp in [\n (torch.nn.ReflectionPad1d(2), torch.randn(0, 3, 10, device=device, dtype=dtype)),\n (torch.nn.ReflectionPad2d(2), torch.randn(0, 3, 10, 10, device=device, dtype=dtype)),\n (torch.nn.ReflectionPad3d(3), torch.randn(0, 3, 10, 10, 10, device=device, dtype=dtype))]:\n self._test_module_empty_input(mod, inp, check_size=False)\n\n with self.assertRaisesRegex(RuntimeError, '2D or 3D'):\n mod = torch.nn.ReflectionPad1d(2)\n inp = torch.randn(3, 0, 10, device=device, dtype=dtype)\n mod(inp)\n\n with self.assertRaisesRegex(RuntimeError, '3D or 4D'):\n mod = torch.nn.ReflectionPad2d(2)\n inp = torch.randn(3, 0, 10, 10, device=device, dtype=dtype)\n mod(inp)\n\n with self.assertRaisesRegex(RuntimeError, '4D or 5D'):\n mod = torch.nn.ReflectionPad3d(3)\n inp = torch.randn(3, 0, 10, 10, 10, device=device, dtype=dtype)\n mod(inp)\n\n @onlyCUDA # Test if CPU and GPU results match\n def test_ReflectionPad2d_large(self, device):\n shapes = ([2, 65736, 6, 6], [65736, 2, 6, 6])\n pad = (1, 2, 3, 4)\n for shape in shapes:\n x = torch.randn(shape, device=device, requires_grad=True)\n ref_x = x.detach().cpu().requires_grad_()\n\n out = F.pad(x, pad, mode='reflect')\n ref_out = F.pad(ref_x, pad, mode='reflect')\n\n self.assertEqual(out, ref_out)\n\n g = torch.randn_like(out)\n ref_g = g.cpu()\n\n out.backward(g)\n ref_out.backward(ref_g)\n\n self.assertEqual(x.grad, ref_x.grad)\n\n @onlyNativeDeviceTypes\n def test_LocalResponseNorm_empty(self, device):\n mod = torch.nn.LocalResponseNorm(2).to(device)\n inp = torch.ones(0, 5, 24, 24, device=device)\n self._test_module_empty_input(mod, inp, check_size=False)\n\n @onlyCUDA # Test if CPU and GPU results match\n def test_ReflectionPad3d_large(self, device):\n shapes = ([2, 1000, 7, 7, 7], [1000, 2, 7, 7, 7])\n pad = (1, 2, 3, 4, 5, 6)\n for shape in shapes:\n x = torch.randn(shape, device=device, requires_grad=True)\n ref_x = x.detach().cpu().requires_grad_()\n\n out = F.pad(x, pad, mode='reflect')\n ref_out = F.pad(ref_x, pad, mode='reflect')\n\n self.assertEqual(out, ref_out)\n\n g = torch.randn_like(out)\n ref_g = g.cpu()\n\n out.backward(g)\n ref_out.backward(ref_g)\n\n self.assertEqual(x.grad, ref_x.grad)\n\n @onlyNativeDeviceTypes\n @dtypes(torch.float, torch.double)\n def test_MarginLoss_empty(self, device, dtype):\n for mod, x, y in [\n (torch.nn.MultiMarginLoss().to(device),\n torch.randn(0, 10, requires_grad=True, device=device, dtype=dtype),\n torch.ones(0, device=device).type(torch.long)),\n (torch.nn.MultiLabelMarginLoss().to(device),\n torch.randn(0, 10, requires_grad=True, device=device, dtype=dtype),\n torch.ones(0, 10, device=device).type(torch.long))]:\n\n out = mod(x, y)\n out.sum().backward()\n\n self.assertEqual(x, torch.zeros_like(x))\n self.assertEqual(x.grad, torch.zeros_like(x))\n\n with self.assertRaisesRegex(RuntimeError, 'Expected'):\n x = torch.randn(0, requires_grad=True, device=device, dtype=dtype)\n y = torch.ones(10, device=device).type(torch.long)\n mod(x, y)\n\n with self.assertRaisesRegex(RuntimeError, 'Expected'):\n x = torch.randn(10, 0, requires_grad=True, device=device, dtype=dtype)\n y = torch.ones(10, 0, device=device).type(torch.long)\n mod(x, y)\n\n @onlyNativeDeviceTypes\n @dtypes(torch.float, torch.double)\n def test_adaptive_pooling_zero_batch(self, dtype, device):\n inp = torch.ones(0, 10, dtype=dtype, device=device)\n mod = torch.nn.AdaptiveAvgPool1d(5).to(device)\n self._test_module_empty_input(mod, inp, check_size=False)\n\n inp = torch.ones(0, 10, 10, dtype=dtype, device=device)\n mod = torch.nn.AdaptiveAvgPool2d((5, 5)).to(device)\n self._test_module_empty_input(mod, inp, check_size=False)\n\n inp = torch.ones(0, 10, 10, 10, dtype=dtype, device=device)\n mod = torch.nn.AdaptiveAvgPool3d((5, 5, 5)).to(device)\n self._test_module_empty_input(mod, inp, check_size=False)\n\n @onlyNativeDeviceTypes\n def test_FractionalMaxPool2d_zero_batch(self, device):\n mod = nn.FractionalMaxPool2d(3, output_ratio=(0.5, 0.5))\n inp = torch.ones(0, 16, 50, 32, device=device)\n self._test_module_empty_input(mod, inp, check_size=False)\n\n with self.assertRaisesRegex(RuntimeError, \"Expected input\"):\n inp = torch.randn(1, 0, 50, 32, device=device)\n mod(inp)\n\n @onlyNativeDeviceTypes\n def test_FractionalMaxPool3d_zero_batch(self, device):\n mod = nn.FractionalMaxPool3d(3, output_ratio=(0.5, 0.5, 0.5)).to(device)\n inp = torch.ones(0, 16, 50, 32, 32, device=device)\n self._test_module_empty_input(mod, inp, check_size=False)\n\n with self.assertRaisesRegex(RuntimeError, \"Expected input\"):\n inp = torch.randn(1, 0, 50, 32, 32, device=device)\n mod(inp)\n\n @onlyNativeDeviceTypes\n def test_FractionalMaxPool2d_zero_out_size(self, device):\n mod = nn.FractionalMaxPool2d([2, 2], output_size=[0, 1])\n inp = torch.rand([16, 50, 32, 32], device=device)\n out = mod(inp)\n self.assertEqual(out, torch.empty((16, 50, 0, 1), device=device))\n\n @onlyNativeDeviceTypes\n def test_FractionalMaxPool3d_zero_out_size(self, device):\n mod = nn.FractionalMaxPool3d([3, 2, 2], output_size=[0, 1, 1])\n inp = torch.rand([16, 50, 32, 32], device=device)\n out = mod(inp)\n self.assertEqual(out, torch.empty((16, 0, 1, 1), device=device))\n\n @onlyNativeDeviceTypes\n def test_Unfold_empty(self, device):\n inp = torch.randn(0, 3, 3, 4, device=device)\n unfold = torch.nn.Unfold(kernel_size=(2, 3)).to(device)\n self._test_module_empty_input(unfold, inp, check_size=False)\n\n with self.assertRaisesRegex(RuntimeError, 'Expected 3D or 4D'):\n inp = torch.randn(3, 0, 3, 4, device=device)\n unfold = torch.nn.Unfold(kernel_size=(2, 3)).to(device)\n unfold(inp)\n\n @onlyNativeDeviceTypes\n def test_MaxPool_zero_batch_dim(self, device):\n inp = torch.randn(0, 16, 50, device=device)\n mod = torch.nn.MaxPool1d(3, stride=2).to(device)\n self._test_module_empty_input(mod, inp, check_size=False)\n\n # 1D is supposed to be okay with 0 numel() inputs so dont test\n # error raising for that case.\n\n inp = torch.randn(0, 16, 50, 32, device=device)\n mod = torch.nn.MaxPool2d(3, stride=2).to(device)\n self._test_module_empty_input(mod, inp, check_size=False)\n\n with self.assertRaisesRegex(RuntimeError, \"Expected\"):\n inp = torch.randn(1, 0, 50, 32, device=device)\n mod(inp)\n\n inp = torch.ones(0, 16, 50, 44, 31, device=device)\n mod = torch.nn.MaxPool3d(3, stride=2).to(device)\n self._test_module_empty_input(mod, inp, check_size=False)\n\n with self.assertRaisesRegex(RuntimeError, \"Expected\"):\n inp = torch.ones(1, 0, 50, 44, 31, device=device)\n mod(inp)\n\n @onlyNativeDeviceTypes\n def test_MaxUnpool_zero_batch_dim(self, device):\n pool = torch.nn.MaxPool1d(2, stride=2, return_indices=True).to(device)\n unpool = torch.nn.MaxUnpool1d(2, stride=2).to(device)\n inp = torch.randn(0, 10, 10, requires_grad=True, device=device)\n output, indices = pool(inp)\n output.requires_grad_(True)\n unpool_out = unpool(output, indices)\n unpool_out.sum().backward()\n\n self.assertEqual(inp.grad, torch.zeros_like(inp))\n self.assertEqual(unpool_out, torch.zeros_like(unpool_out))\n\n pool = torch.nn.MaxPool2d(2, stride=2, return_indices=True).to(device)\n unpool = torch.nn.MaxUnpool2d(2, stride=2).to(device)\n inp = torch.randn(0, 10, 10, 10, requires_grad=True, device=device)\n output, indices = pool(inp)\n unpool_out = unpool(output, indices)\n unpool_out.sum().backward()\n\n self.assertEqual(inp.grad, torch.zeros_like(inp))\n self.assertEqual(unpool_out, torch.zeros_like(unpool_out))\n\n pool = torch.nn.MaxPool3d(2, stride=2, return_indices=True).to(device)\n unpool = torch.nn.MaxUnpool3d(2, stride=2).to(device)\n inp = torch.randn(0, 10, 10, 10, 10, requires_grad=True, device=device)\n output, indices = pool(inp)\n output.requires_grad_(True)\n unpool_out = unpool(output, indices)\n unpool_out.sum().backward()\n\n self.assertEqual(inp.grad, torch.zeros_like(inp))\n self.assertEqual(unpool_out, torch.zeros_like(unpool_out))\n\n @onlyNativeDeviceTypes\n def test_AdaptiveMaxPool_zero_batch_dim(self, device):\n inp = torch.randn(0, 16, 50, device=device)\n mod = torch.nn.AdaptiveMaxPool1d(3).to(device)\n self._test_module_empty_input(mod, inp, check_size=False)\n\n with self.assertRaisesRegex(RuntimeError, \"Expected\"):\n inp = torch.randn(1, 0, 50, device=device)\n mod(inp)\n\n inp = torch.randn(0, 16, 50, 32, device=device)\n mod = torch.nn.AdaptiveMaxPool2d(3).to(device)\n self._test_module_empty_input(mod, inp, check_size=False)\n\n with self.assertRaisesRegex(RuntimeError, \"Expected\"):\n inp = torch.randn(1, 0, 50, 32, device=device)\n mod(inp)\n\n inp = torch.ones(0, 16, 50, 44, 31, device=device)\n mod = torch.nn.AdaptiveMaxPool3d(3).to(device)\n self._test_module_empty_input(mod, inp, check_size=False)\n\n with self.assertRaisesRegex(RuntimeError, \"Expected\"):\n inp = torch.ones(1, 0, 50, 44, 31, device=device)\n mod(inp)\n\n @onlyCUDA\n @dtypes(torch.float, torch.double)\n @tf32_on_and_off(0.005)\n def test_rnn_fused(self, device, dtype):\n\n def copy_rnn(rnn1, rnn2):\n for x_layer, y_layer in zip(rnn1.all_weights, rnn2.all_weights):\n for x, y in zip(x_layer, y_layer):\n x.data.copy_(y.data)\n\n def check_rnn_grads(rnn1, rnn2):\n for x_layer, y_layer in zip(rnn1.all_weights, rnn2.all_weights):\n for x, y in zip(x_layer, y_layer):\n self.assertEqual(x.grad, y.grad, atol=5e-5, rtol=0)\n\n input_size = 10\n hidden_size = 6\n num_layers = 2\n seq_length = 7\n batch = 6\n input_val = torch.randn(seq_length, batch, input_size, dtype=dtype)\n grad_output = torch.randn(seq_length, batch, hidden_size, dtype=dtype)\n hx_val = torch.randn(num_layers, batch, hidden_size, dtype=dtype)\n grad_hy = torch.randn(num_layers, batch, hidden_size, dtype=dtype)\n with torch.backends.cudnn.flags(enabled=False, allow_tf32=None):\n for module in (nn.GRU, nn.LSTM):\n for bias in (True, False):\n rnn = module(input_size, hidden_size, num_layers, bias=bias).to(dtype)\n rnn_device = module(input_size, hidden_size, num_layers, bias=bias).to(device, dtype)\n copy_rnn(rnn, rnn_device)\n\n is_lstm = isinstance(rnn, nn.LSTM)\n if is_lstm:\n hx = (hx_val.clone().requires_grad_(True),\n hx_val.clone().add(1).requires_grad_(True))\n hx_device = (hx_val.clone().to(device).requires_grad_(True),\n hx_val.clone().to(device).add(1).requires_grad_(True))\n else:\n hx = hx_val.clone().requires_grad_(True)\n hx_device = hx_val.clone().to(device).requires_grad_(True)\n\n inp = input_val.clone().requires_grad_(True)\n inp_cu = input_val.clone().to(device).requires_grad_(True)\n output1, hy1 = rnn(inp, hx)\n output2, hy2 = rnn_device(inp_cu, hx_device)\n if is_lstm:\n torch.autograd.backward(\n [output1, hy1[0], hy1[1]], [grad_output, grad_hy, grad_hy + 1]\n )\n torch.autograd.backward(\n [output2, hy2[0], hy2[1]],\n [grad_output.to(device), grad_hy.to(device), (grad_hy + 1).to(device)]\n )\n else:\n torch.autograd.backward([output1, hy1], [grad_output, grad_hy])\n torch.autograd.backward([output2, hy2], [grad_output.to(device), grad_hy.to(device)])\n\n self.assertEqual(output1, output2)\n self.assertEqual(hy1, hy2)\n\n check_rnn_grads(rnn, rnn_device)\n self.assertEqual(inp.grad, inp_cu.grad)\n if is_lstm:\n self.assertEqual(hx[0].grad, hx_device[0].grad)\n self.assertEqual(hx[1].grad, hx_device[1].grad)\n else:\n self.assertEqual(hx.grad, hx_device.grad)\n\n def test_BatchNorm_empty(self, device):\n mod = torch.nn.BatchNorm2d(3).to(device)\n inp = torch.randn(0, 3, 2, 2, device=device)\n self._test_module_empty_input(mod, inp)\n if self.device_type == 'cuda' and self.has_cudnn():\n with torch.backends.cudnn.flags(enabled=False):\n self._test_module_empty_input(mod, inp)\n\n self.assertEqual(mod.running_mean, torch.tensor([0., 0, 0], device=device))\n self.assertEqual(mod.running_var, torch.tensor([1., 1, 1], device=device))\n self.assertEqual(mod.weight.grad, torch.tensor([0., 0, 0], device=device))\n self.assertEqual(mod.bias.grad, torch.tensor([0., 0, 0], device=device))\n\n @dtypes(torch.float, torch.cfloat)\n def test_conv_empty_channel(self, device, dtype):\n in_channels = 0\n mod = torch.nn.Conv1d(in_channels, 8, 2, stride=2, dtype=dtype).to(device)\n inp = torch.randn(2, 0, 15, device=device, dtype=dtype)\n self._test_module_empty_input(mod, inp, check_size=False)\n\n with self.assertRaisesRegex(RuntimeError, \"Given groups=1, weight\"):\n inp = torch.randn(2, 1, 0, device=device)\n mod(inp)\n\n mod = torch.nn.Conv2d(in_channels, 33, 3, stride=2, dtype=dtype).to(device)\n inp = torch.randn(2, 0, 50, 100, device=device, dtype=dtype)\n self._test_module_empty_input(mod, inp, check_size=False)\n\n with self.assertRaisesRegex(RuntimeError, \"Given groups=1, weight\"):\n inp = torch.randn(2, 1, 40, 0, device=device)\n mod(inp)\n\n mod = torch.nn.Conv3d(in_channels, 33, 3, stride=2, dtype=dtype).to(device)\n inp = torch.randn(2, 0, 50, 20, 40, device=device, dtype=dtype)\n self._test_module_empty_input(mod, inp, check_size=False)\n\n with self.assertRaisesRegex(RuntimeError, \"Given groups=1, weight\"):\n inp = torch.randn(2, 1, 50, 0, 40, device=device)\n mod(inp)\n\n def test_group_conv_empty(self, device):\n mod = torch.nn.Conv2d(4, 4, stride=2, kernel_size=3, padding=1, groups=4).to(device)\n inp = torch.randn(0, 4, 4, 4, device=device)\n self._test_module_empty_input(mod, inp, check_size=False)\n if self.device_type == 'cuda' and self.has_cudnn():\n with torch.backends.cudnn.flags(enabled=False):\n self._test_module_empty_input(mod, inp, check_size=False)\n\n def test_group_convTranspose_empty(self, device):\n mod = torch.nn.ConvTranspose2d(4, 4, stride=2, kernel_size=3, padding=1, groups=4).to(device)\n inp = torch.randn(0, 4, 4, 4, device=device)\n self._test_module_empty_input(mod, inp, check_size=False)\n if self.device_type == 'cuda' and self.has_cudnn():\n with torch.backends.cudnn.flags(enabled=False):\n self._test_module_empty_input(mod, inp, check_size=False)\n\n def test_convTranspose_empty(self, device):\n mod = torch.nn.ConvTranspose2d(4, 4, stride=2, kernel_size=3, padding=1).to(device)\n inp = torch.randn(0, 4, 4, 4, device=device)\n self._test_module_empty_input(mod, inp, check_size=False)\n if self.device_type == 'cuda' and self.has_cudnn():\n with torch.backends.cudnn.flags(enabled=False):\n self._test_module_empty_input(mod, inp, check_size=False)\n\n @onlyNativeDeviceTypes\n def test_AvgPool2d_empty(self, device):\n avgpool = torch.nn.AvgPool2d(3, stride=2).to(device)\n inp = torch.randn(0, 16, 20, 32, device=device)\n self._test_module_empty_input(avgpool, inp, check_size=False)\n\n clast_inp = torch.randn(0, 16, 20, 32, device=device).contiguous(memory_format=torch.channels_last)\n self._test_module_empty_input(avgpool, clast_inp, check_size=False)\n\n # test with empty non-batch input\n with self.assertRaisesRegex(RuntimeError, '3D or 4D'):\n inp = torch.randn(16, 0, 20, 32, device=device)\n avgpool(inp)\n\n @onlyCUDA\n @largeTensorTest('16GB')\n def test_prelu_backward_32bit_indexing(self, device):\n m = torch.nn.PReLU().cuda().half()\n input_ = torch.ones((1024, 1024, 1024, 2), dtype=torch.half, device=device)\n output = m(input_)\n output.backward(input_)\n\n def test_linear_empty(self, device):\n mod = torch.nn.Linear(7, 7).to(device)\n inp = torch.randn(0, 7, device=device)\n self._test_module_empty_input(mod, inp)\n\n def test_one_hot(self, device):\n if self.device_type != 'cuda': # cuda throws device assert for invalid data\n with self.assertRaises(RuntimeError):\n torch.nn.functional.one_hot(torch.tensor([3, 4, -1, 0], device=device), -1)\n\n with self.assertRaises(RuntimeError):\n torch.nn.functional.one_hot(torch.tensor([3, 4, 1, 0], device=device), 3)\n\n t = torch.nn.functional.one_hot(torch.tensor([3, 4, 1, 0], device=device))\n expected = torch.tensor([[0, 0, 0, 1, 0],\n [0, 0, 0, 0, 1],\n [0, 1, 0, 0, 0],\n [1, 0, 0, 0, 0]], device=device)\n self.assertEqual(t, expected)\n\n t = torch.nn.functional.one_hot(torch.tensor([3, 4, 1, 0], device=device), -1)\n expected = torch.tensor([[0, 0, 0, 1, 0],\n [0, 0, 0, 0, 1],\n [0, 1, 0, 0, 0],\n [1, 0, 0, 0, 0]], device=device)\n self.assertEqual(t, expected)\n\n t = torch.nn.functional.one_hot(torch.tensor([3, 4, 1, 0], device=device), 6)\n expected = torch.tensor([[0, 0, 0, 1, 0, 0],\n [0, 0, 0, 0, 1, 0],\n [0, 1, 0, 0, 0, 0],\n [1, 0, 0, 0, 0, 0]], device=device)\n self.assertEqual(t, expected)\n\n t = torch.nn.functional.one_hot(torch.tensor([[3, 4], [1, 0]], device=device))\n expected = torch.tensor([[[0, 0, 0, 1, 0],\n [0, 0, 0, 0, 1]],\n [[0, 1, 0, 0, 0],\n [1, 0, 0, 0, 0]]], device=device)\n self.assertEqual(t, expected)\n\n t = torch.nn.functional.one_hot(torch.tensor(4, device=device))\n expected = torch.tensor([0, 0, 0, 0, 1], device=device)\n self.assertEqual(t, expected)\n\n t = torch.nn.functional.one_hot(torch.empty([4, 0], dtype=torch.long, device=device), 100)\n expected = torch.empty([4, 0, 100], dtype=torch.long)\n self.assertEqual(t, expected)\n\n with self.assertRaises(RuntimeError):\n torch.nn.functional.one_hot(torch.empty([4, 0], dtype=torch.long, device=device))\n\n with self.assertRaises(RuntimeError):\n torch.nn.functional.one_hot(torch.tensor([3, 4, 1, 0], device=device), -2)\n\n def test_nn_empty(self, device):\n # One off tests to ensure scalars from nn.yaml are properly applied\n def verify_scalars(input, output):\n self.assertEqual(input.shape, output.shape)\n self.assertEqual(0, output.numel())\n\n for input_shape in [(0), (0, 2)]:\n for module in [torch.nn.ELU, torch.nn.Hardtanh, torch.nn.LeakyReLU, torch.nn.LogSigmoid,\n torch.nn.RReLU, torch.nn.Softshrink, torch.nn.Softplus, torch.nn.Sigmoid,\n torch.nn.Tanh]:\n input = torch.randn(input_shape, device=device, requires_grad=True)\n m = module()\n output = m(input)\n verify_scalars(input, output)\n\n def test_nn_scalars(self, device):\n # One off tests to ensure scalars from nn.yaml are properly applied\n def verify_scalars(input, output):\n if input.dim() == 0:\n self.assertEqual((), output.shape)\n else:\n self.assertNotEqual((), output.shape)\n output.sum().backward()\n self.assertEqual(input.shape, input.grad.shape)\n\n for input_shape in [(5, 6), ()]:\n for module in [torch.nn.ELU, torch.nn.Hardtanh, torch.nn.LeakyReLU, torch.nn.LogSigmoid,\n torch.nn.RReLU, torch.nn.Softshrink, torch.nn.Softplus, torch.nn.Sigmoid,\n torch.nn.Tanh]:\n input = torch.randn(input_shape, device=device, requires_grad=True)\n m = module()\n output = m(input)\n verify_scalars(input, output)\n\n def test_nn_scalars_reductions(self, device):\n # One off tests to ensure scalars from nn.yaml are properly applied\n def verify_reduction_scalars(input, reduction, output):\n if reduction != 'none' or input.dim() == 0:\n self.assertEqual((), output.shape)\n else:\n self.assertNotEqual((), output.shape)\n output.sum().backward()\n self.assertEqual(input.shape, input.grad.shape)\n\n for input_shape in [(5, 6), ()]:\n for reduction in ['none', 'mean', 'sum']:\n for module in [torch.nn.BCELoss, torch.nn.L1Loss, torch.nn.MSELoss,\n torch.nn.SmoothL1Loss, torch.nn.SoftMarginLoss]:\n input = torch.randn(input_shape, device=device, requires_grad=True)\n target = torch.empty(input_shape, device=device).random_(2)\n sigmoid = nn.Sigmoid()\n\n input = torch.randn(input_shape, device=device, requires_grad=True)\n m = module(reduction=reduction)\n output = m(sigmoid(input), target)\n verify_reduction_scalars(input, reduction, output)\n\n # verify that bogus reduction strings are errors\n @onlyNativeDeviceTypes\n def test_invalid_reduction_strings(self, device):\n input = torch.randn(3, 5, requires_grad=True, device=device)\n cinput = torch.randn(3, 5, requires_grad=True, device=device, dtype=torch.cfloat)\n target = torch.tensor([1, 0, 4], device=device)\n var = torch.ones(size=input.size(), requires_grad=True, device=device)\n\n for reduction in ['none', 'invalid']:\n def v(fn):\n if reduction == 'invalid':\n self.assertRaises(ValueError, lambda: fn())\n else:\n fn()\n\n v(lambda: F.nll_loss(input, target, reduction=reduction))\n v(lambda: F.cross_entropy(input, target, reduction=reduction))\n v(lambda: F.multi_margin_loss(input, target, reduction=reduction))\n\n v(lambda: F.kl_div(input, input, reduction=reduction))\n v(lambda: F.huber_loss(input, input, reduction=reduction))\n v(lambda: F.smooth_l1_loss(input, input, reduction=reduction))\n v(lambda: F.l1_loss(input, input, reduction=reduction))\n v(lambda: F.l1_loss(cinput, cinput, reduction=reduction))\n v(lambda: F.mse_loss(input, input, reduction=reduction))\n v(lambda: F.hinge_embedding_loss(input, input, reduction=reduction))\n v(lambda: F.poisson_nll_loss(input, input, reduction=reduction))\n v(lambda: F.gaussian_nll_loss(input, input, var, reduction=reduction))\n v(lambda: F.binary_cross_entropy(torch.sigmoid(input), input, reduction=reduction))\n v(lambda: F.binary_cross_entropy_with_logits(input, input, reduction=reduction))\n\n zeros = torch.zeros_like(input).to(torch.int64)\n v(lambda: F.multilabel_soft_margin_loss(input, zeros, reduction=reduction))\n v(lambda: F.multilabel_margin_loss(input, zeros, reduction=reduction))\n\n v(lambda: F.triplet_margin_loss(input, input, input, reduction=reduction))\n v(lambda: F.triplet_margin_with_distance_loss(input, input, input, reduction=reduction))\n v(lambda: F.margin_ranking_loss(input, input, input.sign(), reduction=reduction))\n v(lambda: F.cosine_embedding_loss(input, input, input[:, 0].sign(), reduction=reduction))\n\n log_probs = torch.randn(50, 16, 20, requires_grad=True, device=device).log_softmax(2)\n targets = torch.randint(1, 20, (16, 30), dtype=torch.long, device=device)\n input_lengths = torch.full((16,), 50, dtype=torch.long, device=device)\n target_lengths = torch.randint(10, 30, (16,), dtype=torch.long, device=device)\n v(lambda: F.ctc_loss(log_probs, targets, input_lengths, target_lengths, reduction=reduction))\n\n # FIXME: should we allow derivatives on these?\n v(lambda: F.soft_margin_loss(input, input.sign().detach(), reduction=reduction))\n\n @onlyNativeDeviceTypes\n def test_smooth_l1_loss_vs_huber_loss(self, device):\n def _make_test_tensor(shape, contiguous=True):\n if contiguous:\n test_tensor = torch.randn(shape, device=device)\n else:\n # Select every other element in the innermost dimension to\n # make it non-contiguous.\n doubled_shape = list(shape)\n doubled_shape[-1] *= 2\n test_tensor = torch.randn(doubled_shape, device=device)\n test_tensor = test_tensor[..., ::2]\n return test_tensor\n\n def _test_smooth_l1_loss_vs_huber_loss_helper(input, target, beta, require_equal):\n for reduction in ['mean', 'sum', 'none']:\n smooth_l1 = torch.nn.SmoothL1Loss(beta=beta, reduction=reduction)\n # beta hyper-parameter is called delta for Huber\n huber = torch.nn.HuberLoss(delta=beta, reduction=reduction)\n smooth_l1_loss = smooth_l1(input, target)\n huber_loss = huber(input, target)\n\n if require_equal:\n self.assertEqual(smooth_l1_loss, huber_loss)\n else:\n # Huber loss should be larger than smooth L1 loss by a factor of beta.\n self.assertEqual(smooth_l1_loss * beta, huber_loss)\n\n def _test_smooth_l1_loss_vs_huber_loss_multi_input_helper(beta, require_equal):\n # Test the non-vectorized case.\n shape = (2, 2)\n _test_smooth_l1_loss_vs_huber_loss_helper(input=_make_test_tensor(shape),\n target=_make_test_tensor(shape),\n beta=beta,\n require_equal=require_equal)\n\n # Test the vectorized case (innermost dim > 32).\n shape = (64, 64)\n _test_smooth_l1_loss_vs_huber_loss_helper(input=_make_test_tensor(shape),\n target=_make_test_tensor(shape),\n beta=beta,\n require_equal=require_equal)\n\n # Test the non-contiguous case.\n _test_smooth_l1_loss_vs_huber_loss_helper(input=_make_test_tensor(shape, contiguous=False),\n target=_make_test_tensor(shape, contiguous=False),\n beta=beta,\n require_equal=require_equal)\n\n def test_equal_when_beta_is_one():\n _test_smooth_l1_loss_vs_huber_loss_multi_input_helper(beta=1.0, require_equal=True)\n\n def test_unequal_when_beta_is_less_than_one():\n _test_smooth_l1_loss_vs_huber_loss_multi_input_helper(beta=0.5, require_equal=False)\n\n def test_unequal_when_beta_is_greater_than_one():\n _test_smooth_l1_loss_vs_huber_loss_multi_input_helper(beta=1.5, require_equal=False)\n\n test_equal_when_beta_is_one()\n test_unequal_when_beta_is_less_than_one()\n test_unequal_when_beta_is_greater_than_one()\n\n @onlyCPU\n def test_smooth_l1_loss_bfloat16(self, device):\n def test_dtype(fn, input, target, dtype):\n input = input.detach().clone().to(dtype=dtype).requires_grad_(True)\n input2 = input.detach().clone().float().requires_grad_(True)\n target = target.detach().clone().to(dtype=dtype)\n target2 = target.detach().clone().float()\n out = fn(input, target)\n out.sum().backward()\n out2 = fn(input2, target2)\n out2.sum().backward()\n self.assertEqual(out.dtype, dtype)\n self.assertEqual(input.grad.dtype, dtype)\n self.assertEqual(out, out2, exact_dtype=False)\n self.assertEqual(input.grad, input2.grad, exact_dtype=False)\n\n def func(device):\n return nn.SmoothL1Loss().to(device=device)\n\n shapes = [[1, 3, 1, 6], [1, 3, 1, 128], [1, 3, 128, 128]]\n for shape in shapes:\n x = torch.randn(shape, device=device, requires_grad=True)\n t = torch.randn(shape, device=device)\n test_dtype(func(device), x, t, torch.bfloat16)\n\n # We don't want to make propagating NaN a hard requirement on ops, but for\n # these easy ones, we should make them do so.\n def test_nonlinearity_propagate_nan(self, device):\n def test(nonlinearity, *args, **kwargs):\n x = torch.tensor([nan], device=device)\n fn = getattr(F, nonlinearity)\n try:\n self.assertTrue(math.isnan(fn(x, *args, **kwargs).item()))\n except Exception as e:\n if 'not implemented' not in str(e):\n raise\n\n test('relu')\n test('relu', inplace=True)\n test('relu6')\n test('elu')\n test('selu')\n test('celu')\n test('rrelu')\n test('rrelu', inplace=True)\n test('hardtanh')\n test('tanh')\n test('sigmoid')\n test('logsigmoid')\n test('hardshrink')\n test('tanhshrink')\n test('softsign')\n test('softmin', 0)\n test('softmax', 0)\n test('log_softmax', 0)\n test('leaky_relu', 0.2)\n test('threshold', 3, 2)\n test('threshold', 3, 2, inplace=True)\n\n def test_pooling_shape(self, device):\n ''' Test the output shape calculation for pooling functions '''\n\n # Checks output shape against expected for 1D, 2D and 3D\n def check(expected_out_shape, sizes, *args, **kwargs):\n for kernel in ['max', 'avg']:\n for i in [1, 2, 3]:\n if hasattr(torch.nn.functional, f'{kernel}_pool{i}d'):\n op = getattr(torch.nn.functional, f'{kernel}_pool{i}d')\n t = torch.randn(sizes[:i + 2], device=device)\n self.assertEqual(op(t, *args, **kwargs).shape, expected_out_shape[:i + 2])\n\n check((1, 1, 3, 3, 4), (1, 1, 5, 6, 7), kernel_size=1, stride=2, padding=0, ceil_mode=True)\n check((1, 1, 2, 3, 3), (1, 1, 3, 4, 5), kernel_size=2, stride=2, padding=1, ceil_mode=False)\n check((1, 1, 2, 3, 3), (1, 1, 3, 4, 5), kernel_size=2, stride=2, padding=1, ceil_mode=True)\n\n # Test case from issue https://github.com/pytorch/pytorch/issues/45357\n x = torch.randn(1, 1, 6, 7, device=device)\n y = torch.nn.functional.max_pool2d(x, 1, stride=(2, 2), padding=0, ceil_mode=True)\n self.assertEqual(y.size(), (1, 1, 3, 4))\n\n @onlyNativeDeviceTypes # TODO: fix on XLA\n def test_adaptive_avg_pool2d_output_size_one(self, device):\n def helper(size, memory_format):\n x = torch.randint(1, 10, size, dtype=torch.float, device=device, requires_grad=True)\n if memory_format == 'non_contiguous':\n x = x[::2, ::2, ::2, ::2]\n else:\n x = x.to(memory_format=memory_format)\n\n net = torch.nn.AdaptiveAvgPool2d((1, 1))\n out = net(x)\n ref_out = x.contiguous().mean((-1, -2)).view((x.size(0), x.size(1), 1, 1))\n\n out.sum().backward() # make sure it doesn't crash\n\n self.assertEqual(out, ref_out)\n if memory_format == torch.channels_last:\n self.assertTrue(out.is_contiguous(memory_format=torch.channels_last))\n c = out.size(1)\n self.assertEqual(out.stride(), [c, 1, c, c])\n else:\n self.assertTrue(out.is_contiguous())\n c = out.size(1)\n self.assertEqual(out.stride(), [c, 1, 1, 1])\n\n for mf in (torch.contiguous_format, torch.channels_last, 'non_contiguous'):\n helper((2, 3, 6, 6), mf)\n\n @onlyNativeDeviceTypes\n def test_adaptive_avg_pool3d_output_size_one(self, device):\n x = torch.randn((2, 3, 6, 6, 6) , dtype=torch.float, device=device, requires_grad=True)\n\n net = torch.nn.AdaptiveAvgPool3d(1)\n out = net(x)\n ref_out = x.contiguous().mean((-1, -2, -3)).view(out.shape)\n\n out.sum().backward() # make sure it doesn't crash\n\n self.assertEqual(out, ref_out)\n self.assertTrue(out.is_contiguous())\n c = out.size(1)\n self.assertEqual(out.stride(), [c, 1, 1, 1, 1])\n\n\n @expectedFailureMeta # Runtime Error not raised for meta\n @onlyNativeDeviceTypes\n @dtypes(torch.uint8, torch.int8, torch.short, torch.int, torch.long)\n def test_adaptive_pooling_no_suppot_input(self, device, dtype):\n for numel in (2, 3):\n for pool_type in ('Max', 'Avg'):\n cls_name = 'Adaptive{}Pool{}d'.format(pool_type, numel)\n module_cls = getattr(nn, cls_name)\n output_size = (2,) * numel\n module = module_cls(output_size)\n input = torch.randn((4,) * (numel + 1), device=device).to(dtype)\n with self.assertRaisesRegex(RuntimeError, \"not implemented\"):\n output = module(input)\n\n @onlyNativeDeviceTypes\n @dtypes(torch.float, torch.double)\n @dtypesIfCUDA(torch.half, torch.float, torch.double)\n def test_avg_pool2d_nhwc(self, device, dtype):\n def helper(n, c, h, w, kernel_size, stride=None,\n count_include_pad=True, divisor_override=None, padding=0):\n if stride is None:\n stride = kernel_size\n input = torch.randn(n, c, h, w, dtype=dtype, device=device)\n input = input.contiguous(memory_format=torch.channels_last).requires_grad_()\n grad = torch.randn(n, c, (h - kernel_size) // stride + 1, (w - kernel_size) // stride + 1,\n dtype=dtype, device=device)\n pool = torch.nn.AvgPool2d(kernel_size, stride=stride, count_include_pad=count_include_pad,\n divisor_override=divisor_override).to(device)\n\n ref_input = input.detach().clone().contiguous().requires_grad_(True)\n ref_grad = grad.detach().clone().contiguous()\n ref_pool = torch.nn.AvgPool2d(kernel_size, stride=stride, count_include_pad=count_include_pad,\n divisor_override=divisor_override).to(device)\n\n out = pool(input)\n out.backward(grad)\n ref_out = ref_pool(ref_input)\n ref_out.backward(ref_grad)\n\n self.assertTrue(out.is_contiguous(memory_format=torch.channels_last))\n self.assertTrue(ref_out.is_contiguous())\n self.assertEqual(out, ref_out)\n self.assertEqual(input.grad, ref_input.grad)\n\n helper(4, 8, 8, 8, 3)\n helper(4, 8, 8, 8, 3, count_include_pad=False, padding=1)\n helper(4, 8, 8, 8, 3, count_include_pad=False, padding=2, stride=2)\n helper(4, 8, 8, 8, 3, divisor_override=42)\n helper(4, 8, 8, 8, 7)\n # ROCm 16GB MI25 hits OOM error. Clear caching allocator prior to running large subtest.\n if TEST_WITH_ROCM and 'cuda' in device:\n torch.cuda.empty_cache()\n helper(200, 512, 28, 28, 2)\n helper(4, 8, 7, 7, 3, stride=1)\n helper(4, 8, 7, 7, 3, padding=2, stride=1)\n helper(10, 512, 31, 31, 3, stride=2)\n helper(1, 129, 8, 8, 3, stride=2)\n\n @onlyCPU\n @dtypes(torch.float)\n def test_max_pool1d_errors(self, device, dtype):\n def check(x, args, message):\n model = torch.nn.MaxPool1d(*args)\n with self.assertRaisesRegex(RuntimeError, r'max_pool1d\\(\\) ' + message):\n model(torch.tensor(x, device=device, dtype=dtype))\n\n # Pooling args: (kernel_size, stride, padding, dilation, return_indices, ceil_mode)\n check(0, (1,), \"Expected 2D or 3D input tensor, but got\")\n check([], (1,), \"Expected 2D or 3D input tensor, but got\")\n check([[]], (1, 0), \"stride must be greater than zero, but got 0\")\n check([[]], (1, 1, -1), \"padding must be non-negative, but got -1\")\n check([[]], (1, 1, 2), \"padding should be at most half of kernel size, but got padding=2 and kernel_size=1\")\n check([[]], (1, 1, 0, 0), \"dilation must be greater than zero, but got 0\")\n check([[]], (5, 1, 0, 1), \"Invalid computed output size: -4\")\n\n @onlyCPU\n @dtypes(torch.float, torch.double)\n def test_max_pool1d_corner_cases(self, device, dtype):\n def check(x, args, expected):\n model = torch.nn.MaxPool1d(*args)\n if isinstance(x, list):\n x = torch.tensor(x, device=device, dtype=dtype)\n expected = torch.tensor(expected, device=device, dtype=dtype)\n self.assertEqual(model(x), expected)\n\n # Pooling args: (kernel_size, stride, padding, dilation, return_indices, ceil_mode)\n check([[]], (1, None, 0, 1, False, False), [[]])\n check([[[]]], (1, None, 0, 1, False, False), [[[]]])\n check([[[]]], (2, 1, 1, 2, False, True), [[[]]])\n check([[1]], (1, None, 0, 1, False, False), [[1]])\n check([[1]], (2, None, 1, 2, False, False), [[float('-inf')]])\n check([[1], [1]], (2, None, 1, 2, False, False), [[float('-inf')], [float('-inf')]])\n check([[1, 2]], (2, 1, 1, 2, False, False), [[2, 1]])\n check([[1, 2]], (2, 2, 1, 2, False, True), [[2, 2]])\n\n empty_tensor = torch.empty((2, 0, 1), device=device, dtype=dtype)\n check(empty_tensor, (1, None, 0, 1, False, False), empty_tensor)\n\n @onlyCPU\n @dtypes(torch.float, torch.double)\n def test_max_pool1d(self, device, dtype):\n # FIXME For now compare against max_pool1d with indices\n def check(x, *args, **kwargs):\n model = torch.nn.MaxPool1d(*args, **kwargs)\n ref_model = torch.nn.MaxPool1d(*args, **kwargs, return_indices=True)\n self.assertEqual(model(x), ref_model(x)[0])\n\n sizes = [random.sample(range(8, 128), 3) for _ in range(3)]\n kernel_sizes = random.sample(range(1, 5), 3)\n strides = random.sample(range(1, 5), 3)\n dilations = random.sample(range(1, 5), 3)\n ceil_modes = [True, False]\n\n for size, kernel_size, stride, dilation, ceil_mode in \\\n itertools.product(sizes, kernel_sizes, strides, dilations, ceil_modes):\n padding = random.sample(range(0, math.floor(kernel_size / 2) + 1), 1)\n check(torch.randn(size, device=device, dtype=dtype),\n kernel_size, stride, padding, dilation, ceil_mode=ceil_mode)\n\n # Non-contiguous test\n tensor = torch.randn(5, 151, 33, device=device, dtype=dtype)[::2, ::3, ::2]\n check(tensor, 3, 2, 1, 2, ceil_mode=True)\n check(tensor.transpose(1, 2), 3, 2, 1, 2, ceil_mode=True)\n\n @onlyCUDA\n def test_max_pool2d(self, device):\n def helper(n, c, h, w, ks):\n x = torch.randn(n, c, h, w, device='cuda', dtype=torch.float, requires_grad=True)\n ref_x = x.detach().clone().cpu().requires_grad_()\n\n pool = torch.nn.MaxPool2d(kernel_size=ks)\n\n y = pool(x)\n ref_y = pool(ref_x)\n\n y.sum().backward()\n ref_y.sum().backward()\n\n self.assertEqual(y, ref_y)\n self.assertEqual(x.grad, ref_x.grad)\n\n helper(2, 8, 4, 4, ks=2)\n helper(1, 100000, 32, 32, ks=4)\n helper(1, 100000, 1, 4, ks=(1, 4)) # test for max_pool1d\n\n @onlyNativeDeviceTypes\n @dtypes(torch.float, torch.double)\n @dtypesIfCUDA(torch.half, torch.float, torch.double)\n def test_max_pool2d_nhwc(self, device, dtype):\n def helper(n, c, h, w, kernel_size, stride=None):\n if stride is None:\n stride = kernel_size\n input = torch.randn(n, c, h, w, dtype=dtype, device=device)\n input = input.contiguous(memory_format=torch.channels_last).requires_grad_()\n grad = torch.randn(n, c, (h - kernel_size) // stride + 1, (w - kernel_size) // stride + 1,\n dtype=dtype, device=device)\n pool = torch.nn.MaxPool2d(kernel_size, stride, return_indices=True).to(device)\n\n ref_input = input.detach().clone().contiguous().requires_grad_(True)\n ref_grad = grad.detach().clone().contiguous()\n ref_pool = torch.nn.MaxPool2d(kernel_size, stride, return_indices=True).to(device)\n\n out, ind = pool(input)\n out.backward(grad)\n ref_out, ref_ind = ref_pool(ref_input)\n ref_out.backward(ref_grad)\n\n self.assertTrue(out.is_contiguous(memory_format=torch.channels_last))\n self.assertTrue(ref_out.is_contiguous())\n self.assertTrue(ind.is_contiguous(memory_format=torch.channels_last))\n self.assertTrue(ref_ind.is_contiguous())\n self.assertEqual(out, ref_out)\n self.assertEqual(ind, ref_ind)\n self.assertEqual(input.grad, ref_input.grad)\n\n helper(4, 8, 8, 8, 7)\n helper(200, 512, 28, 28, 2)\n helper(4, 8, 7, 7, 3, stride=1)\n helper(10, 512, 31, 31, 3, stride=2)\n helper(1, 129, 8, 8, 3, stride=2)\n\n @onlyCPU\n def test_max_pool2d_bfloat16(self, device):\n def helper(n, c, h, w, kernel_size, stride, memory_format):\n input = torch.randn(n, c, h, w, dtype=torch.float32, device=device).bfloat16()\n input = input.to(memory_format=memory_format).requires_grad_()\n pool = torch.nn.MaxPool2d(kernel_size, stride, return_indices=True).to(device)\n\n input2 = input.detach().clone().float().requires_grad_(True)\n\n out, ind = pool(input)\n out.sum().backward()\n out2, ind2 = pool(input2)\n out2.sum().backward()\n\n self.assertTrue(out.is_contiguous(memory_format=memory_format))\n self.assertEqual(out.dtype, torch.bfloat16)\n self.assertEqual(input.grad.dtype, torch.bfloat16)\n self.assertEqual(out, out2.bfloat16())\n self.assertEqual(ind, ind2)\n self.assertEqual(input.grad, input2.grad.bfloat16())\n\n helper(4, 30, 8, 8, 7, 1, torch.contiguous_format)\n helper(4, 65, 8, 8, 7, 1, torch.channels_last)\n helper(1, 19, 20, 10, 8, 2, torch.contiguous_format)\n helper(1, 19, 20, 10, 8, 2, torch.channels_last)\n\n @onlyCUDA\n def test_max_pool2d_indices(self, device):\n def helper(n, c, h, w, ks):\n if n is None:\n x = torch.randn(c, h, w, device='cuda', dtype=torch.float, requires_grad=True)\n else:\n x = torch.randn(n, c, h, w, device='cuda', dtype=torch.float, requires_grad=True)\n\n ref_x = x.detach().clone().cpu().requires_grad_()\n\n pool = torch.nn.MaxPool2d(kernel_size=ks, return_indices=True)\n\n y, idx = pool(x)\n ref_y, ref_idx = pool(ref_x)\n\n y.sum().backward()\n ref_y.sum().backward()\n\n self.assertEqual(y, ref_y)\n self.assertEqual(idx, ref_idx) # assertEqual implicitly compares shape for tensors\n self.assertEqual(x.grad, ref_x.grad)\n\n helper(2, 8, 4, 4, ks=2)\n helper(None, 3, 50, 50, ks=5)\n\n @onlyCPU\n def test_avg_pool2d_bfloat16(self, device):\n def helper(n, c, h, w, kernel_size, stride, memory_format):\n input = torch.randn(n, c, h, w, dtype=torch.float32, device=device).bfloat16()\n input = input.to(memory_format=memory_format).requires_grad_()\n pool = torch.nn.AvgPool2d(kernel_size, stride).to(device)\n\n input2 = input.detach().clone().float().requires_grad_(True)\n\n out = pool(input)\n out.sum().backward()\n out2 = pool(input2)\n out2.sum().backward()\n\n self.assertTrue(out.is_contiguous(memory_format=memory_format))\n self.assertEqual(out.dtype, torch.bfloat16)\n self.assertEqual(input.grad.dtype, torch.bfloat16)\n self.assertEqual(out, out2.bfloat16())\n self.assertEqual(input.grad, input2.grad.bfloat16())\n\n helper(4, 30, 8, 8, 7, 1, torch.contiguous_format)\n helper(4, 65, 8, 8, 7, 1, torch.channels_last)\n helper(1, 19, 20, 10, 8, 2, torch.contiguous_format)\n helper(1, 19, 20, 10, 8, 2, torch.channels_last)\n\n def test_upsamplingNearest1d(self, device):\n # Forward AD does not support XLA because XLA tensors don't have storage\n check_forward_ad = torch.device(device).type != 'xla'\n\n def helper(mode):\n m = nn.Upsample(size=4, mode=mode)\n in_t = torch.ones(1, 1, 2, device=device)\n in_uint8_t = torch.ones(1, 1, 2, dtype=torch.uint8, device=device)\n with warnings.catch_warnings(record=True) as w:\n out_t = m(in_t)\n out_uint8_t = m(in_uint8_t)\n self.assertEqual(torch.ones(1, 1, 4, device=device), out_t.data)\n self.assertEqual(torch.ones(1, 1, 4, dtype=torch.uint8, device=device), out_uint8_t.data)\n\n # Checks upsampling\n input = torch.randn(1, 1, 2, requires_grad=True, device=device)\n gradcheck(lambda x: F.interpolate(x, 4, mode=mode), [input], check_forward_ad=check_forward_ad)\n gradgradcheck(lambda x: F.interpolate(x, 4, mode=mode), [input], check_fwd_over_rev=check_forward_ad)\n\n # Checks downsampling\n input = torch.randn(1, 1, 20, requires_grad=True, device=device)\n gradcheck(lambda x: F.interpolate(x, 11, mode=mode), [input], check_forward_ad=check_forward_ad)\n gradgradcheck(lambda x: F.interpolate(x, 4, mode=mode), [input], check_fwd_over_rev=check_forward_ad)\n\n # consistency CUDA/CPU check\n if torch.device(device).type == 'cuda':\n input_cuda = torch.randn(1, 1, 20, device=device)\n input_cpu = input_cuda.cpu()\n output_cuda = F.interpolate(input_cuda, 4, mode=mode)\n output_cpu = F.interpolate(input_cpu, 4, mode=mode)\n self.assertEqual(output_cuda.cpu(), output_cpu)\n\n output_cuda = F.interpolate(input_cuda, 24, mode=mode)\n output_cpu = F.interpolate(input_cpu, 24, mode=mode)\n self.assertEqual(output_cuda.cpu(), output_cpu)\n\n helper(\"nearest\")\n helper(\"nearest-exact\")\n\n def test_upsamplingNearest1d_correctness(self, device):\n # Here we check if output matches OpenCV's INTER_NEAREST-like result\n def helper(isize, osize):\n in_t = torch.arange(isize, dtype=torch.float, device=device).unsqueeze(0).unsqueeze(0)\n out_t = F.interpolate(\n in_t, size=(osize, ), recompute_scale_factor=False, mode=\"nearest\"\n )\n # compute expected output as OpenCV\n expected_out = torch.zeros(osize, dtype=torch.float).unsqueeze(0).unsqueeze(0)\n scale = 1.0 * isize / osize\n for o in range(osize):\n i_f32 = o * scale\n i = int(i_f32)\n expected_out[0, 0, o] = in_t[0, 0, i]\n expected_out = expected_out.to(device=device)\n self.assertEqual(out_t, expected_out)\n\n helper(20, 11)\n helper(10, 15)\n\n def test_upsamplingNearestExact1d_rescale(self, device):\n # Checks https://github.com/pytorch/pytorch/issues/62237\n isize = 20\n in_t = torch.arange(isize, dtype=torch.float, device=device).unsqueeze(0).unsqueeze(0)\n # for s in [1.00001, 0.99999]: # 0.9999 case is broken\n # See issue: https://github.com/pytorch/pytorch/issues/62396\n for s in [1.00001, ]:\n out_t = F.interpolate(\n in_t, scale_factor=s, recompute_scale_factor=False, mode=\"nearest-exact\"\n )\n expected_out = in_t\n self.assertEqual(out_t, expected_out, msg=f\"scale: {s}\")\n\n # checks data duplication if output_size == 2 * input_size\n # for s in [2.00001, 1.99999]: # 1.99999 case is broken\n # See issue: https://github.com/pytorch/pytorch/issues/62396\n for s in [2.00001, ]:\n out_t = F.interpolate(\n in_t, scale_factor=s, recompute_scale_factor=False, mode=\"nearest-exact\"\n )\n # input is [[[0, 1, 2, 3, ..., 9]]]\n # expected out is [[[0, 0, 1, 1, 2, 2, ..., 9, 9]]]\n expected_out = in_t.repeat_interleave(2, dim=-1)\n self.assertEqual(out_t, expected_out)\n\n def test_upsamplingNearestExact1d_correctness(self, device):\n # Here we check if output matches Scikit-Image/Scipy-like result\n # Checks https://github.com/pytorch/pytorch/issues/34808\n def helper(isize, osize):\n in_t = torch.arange(isize, dtype=torch.float, device=device).unsqueeze(0).unsqueeze(0)\n out_t = F.interpolate(\n in_t, size=(osize, ), recompute_scale_factor=False, mode=\"nearest-exact\"\n )\n # compute expected output as scikit-image/scipy\n expected_out = torch.zeros(osize, dtype=torch.float).unsqueeze(0).unsqueeze(0)\n scale = 1.0 * isize / osize\n for o in range(osize):\n i_f32 = (o + 0.5) * scale\n i = int(i_f32)\n expected_out[0, 0, o] = in_t[0, 0, i]\n expected_out = expected_out.to(device=device)\n self.assertEqual(out_t, expected_out)\n\n helper(20, 11)\n helper(10, 15)\n\n def test_upsamplingNearest2d(self, device):\n # Forward AD does not support XLA because XLA tensors don't have storage\n check_forward_ad = torch.device(device).type != 'xla'\n\n def helper(memory_format, mode):\n in_t = torch.ones(1, 2, 2, 2, device=device).contiguous(memory_format=memory_format)\n in_uint8_t = torch.ones(1, 2, 2, 2, dtype=torch.uint8, device=device).contiguous(memory_format=memory_format)\n with warnings.catch_warnings(record=True) as w:\n out_t = F.interpolate(in_t, size=4, mode=mode)\n out_uint8_t = F.interpolate(in_uint8_t, size=4, mode=mode)\n self.assertEqual(len(w), 0)\n self.assertEqual(torch.ones(1, 2, 4, 4, device=device), out_t)\n self.assertEqual(torch.ones(1, 2, 4, 4, dtype=torch.uint8, device=device), out_uint8_t)\n # Assert that memory format is carried through to the output\n self.assertTrue(out_t.is_contiguous(memory_format=memory_format))\n\n # test forward when input's height is not same as width\n in_t = torch.ones(1, 2, 2, 1, device=device).contiguous(memory_format=memory_format).requires_grad_()\n out_t = F.interpolate(in_t, size=(4, 2), mode=mode)\n self.assertEqual(torch.ones(1, 2, 4, 2, device=device), out_t)\n self.assertTrue(out_t.is_contiguous(memory_format=memory_format))\n\n out_t.backward(torch.randn_like(out_t))\n self.assertTrue(in_t.grad.is_contiguous(memory_format=memory_format))\n\n # test backward when input's height is not same as width\n input = torch.ones(1, 2, 2, 1, requires_grad=True, device=device).contiguous(memory_format=memory_format)\n gradcheck(lambda x: F.interpolate(x, size=(4, 2), mode=mode), [input], check_forward_ad=check_forward_ad)\n gradgradcheck(lambda x: F.interpolate(x, size=(4, 2), mode=mode), [input], check_fwd_over_rev=check_forward_ad)\n\n input = torch.randn(1, 2, 2, 2, requires_grad=True, device=device).contiguous(memory_format=memory_format)\n self.assertEqual(\n F.interpolate(input, 4, mode=mode),\n F.interpolate(input, scale_factor=2, mode=mode))\n gradcheck(lambda x: F.interpolate(x, 4, mode=mode), [input], check_forward_ad=check_forward_ad)\n gradgradcheck(lambda x: F.interpolate(x, 4, mode=mode), [input], check_fwd_over_rev=check_forward_ad)\n\n # Assert that cpu and cuda handle channels_last memory format in the same way\n # https://github.com/pytorch/pytorch/issues/54590\n if torch.device(device).type == 'cuda':\n for shapes, scale_factor in product([\n (2, 2, 3, 4), (2, 3, 4, 5), (3, 1, 2, 2), (1, 5, 3, 2)\n ], [0.5, 1.5, 2]):\n a_cuda = torch.randn(*shapes, device=device).contiguous(memory_format=memory_format).requires_grad_()\n a_cpu = a_cuda.detach().cpu().requires_grad_()\n\n out_cuda = F.interpolate(a_cuda, scale_factor=scale_factor, mode=mode)\n out_cpu = F.interpolate(a_cpu, scale_factor=scale_factor, mode=mode)\n\n self.assertEqual(out_cpu.cuda(), out_cuda)\n\n g_cuda = torch.randn_like(out_cuda)\n g_cpu = g_cuda.cpu()\n\n out_cuda.backward(g_cuda)\n out_cpu.backward(g_cpu)\n\n self.assertEqual(a_cuda.grad, a_cpu.grad)\n\n helper(torch.contiguous_format, \"nearest\")\n helper(torch.channels_last, \"nearest\")\n # Uncomment below once F.interpolate is updated\n helper(torch.contiguous_format, \"nearest-exact\")\n helper(torch.channels_last, \"nearest-exact\")\n\n def test_upsamplingNearest2d_correctness(self, device):\n # Here we check if output matches OpenCV's INTER_NEAREST-like result\n def helper(memory_format, isize, osize):\n in_t = torch.arange(isize * isize, dtype=torch.float, device=device).reshape(1, 1, isize, isize)\n in_t = in_t.contiguous(memory_format=memory_format)\n out_t = F.interpolate(\n in_t, size=(osize, osize), recompute_scale_factor=False, mode=\"nearest\"\n )\n # compute expected output as OpenCV\n expected_out = torch.zeros(1, 1, osize, osize, dtype=torch.float)\n scale = 1.0 * isize / osize\n for o1 in range(osize):\n i1_f32 = o1 * scale\n i1 = int(i1_f32)\n for o2 in range(osize):\n i2_f32 = o2 * scale\n i2 = int(i2_f32)\n expected_out[0, 0, o1, o2] = in_t[0, 0, i1, i2]\n expected_out = expected_out.to(device=device)\n self.assertEqual(out_t, expected_out)\n\n helper(torch.contiguous_format, 20, 11)\n helper(torch.channels_last, 20, 11)\n helper(torch.contiguous_format, 10, 15)\n helper(torch.channels_last, 10, 15)\n\n def test_upsamplingNearestExact2d_correctness(self, device):\n # Here we check if output matches Scikit-Image/Scipy-like result\n # Checks https://github.com/pytorch/pytorch/issues/34808\n def helper(memory_format, isize, osize):\n in_t = torch.arange(isize * isize, dtype=torch.float, device=device).reshape(1, 1, isize, isize)\n in_t = in_t.contiguous(memory_format=memory_format)\n out_t = F.interpolate(\n in_t, size=(osize, osize), recompute_scale_factor=False, mode=\"nearest-exact\"\n )\n # compute expected output as Scikit-Image/Scipy\n expected_out = torch.zeros(1, 1, osize, osize, dtype=torch.float)\n scale = 1.0 * isize / osize\n for o1 in range(osize):\n i1_f32 = (o1 + 0.5) * scale\n i1 = int(i1_f32)\n for o2 in range(osize):\n i2_f32 = (o2 + 0.5) * scale\n i2 = int(i2_f32)\n expected_out[0, 0, o1, o2] = in_t[0, 0, i1, i2]\n expected_out = expected_out.to(device=device)\n self.assertEqual(out_t, expected_out)\n\n helper(torch.contiguous_format, 20, 11)\n helper(torch.channels_last, 20, 11)\n helper(torch.contiguous_format, 10, 15)\n helper(torch.channels_last, 10, 15)\n\n def test_upsamplingNearest3d(self, device):\n # Forward AD does not support XLA because XLA tensors don't have storage\n check_forward_ad = torch.device(device).type != 'xla'\n\n def helper(memory_format, mode):\n m = nn.Upsample(size=4, mode=mode)\n in_t = torch.ones(1, 2, 2, 2, 2, device=device).contiguous(memory_format=memory_format)\n in_uint8_t = torch.ones(\n 1, 2, 2, 2, 2, dtype=torch.uint8, device=device\n ).contiguous(memory_format=memory_format)\n with warnings.catch_warnings(record=True) as w:\n out_t = m(in_t)\n out_uint8_t = m(in_uint8_t)\n expected_output = torch.ones(1, 2, 4, 4, 4, device=device)\n self.assertEqual(expected_output, out_t)\n self.assertEqual(expected_output.to(torch.uint8), out_uint8_t)\n # Assert that memory format is carried through to the output\n self.assertTrue(out_t.is_contiguous(memory_format=memory_format))\n\n input = torch.randn(\n 1, 2, 2, 2, 2, requires_grad=True, device=device\n ).contiguous(memory_format=memory_format)\n gradcheck(lambda x: F.interpolate(x, 4, mode=mode), [input], check_forward_ad=check_forward_ad)\n gradgradcheck(lambda x: F.interpolate(x, 4, mode=mode), [input], check_fwd_over_rev=check_forward_ad)\n\n # Assert that cpu and cuda handle channels_last memory format in the same way\n # https://github.com/pytorch/pytorch/issues/54590\n if torch.device(device).type == 'cuda':\n a = torch.ones(\n 2, 2, 2, 3, 4, device=device, requires_grad=True\n ).contiguous(memory_format=torch.channels_last_3d)\n # make the data asymmetric; ensure that cuda/cpu handle channels_last appropriately.\n a[1][1][1][2][2] = a[1][1][1][2][3] = 0\n\n out_cuda = torch.nn.functional.interpolate(a, scale_factor=2, mode=mode)\n out_cpu = torch.nn.functional.interpolate(a.to('cpu'), scale_factor=2, mode=mode)\n self.assertEqual(out_cpu, out_cuda.to('cpu'))\n\n gradcheck(lambda x: F.interpolate(x, 4, mode=mode), [a], check_forward_ad=check_forward_ad)\n gradgradcheck(lambda x: F.interpolate(x, 4, mode=mode), [a], check_fwd_over_rev=check_forward_ad)\n\n gradcheck(lambda x: F.interpolate(x, 4, mode=mode), [a.to('cuda')], check_forward_ad=check_forward_ad)\n gradgradcheck(lambda x: F.interpolate(x, 4, mode=mode), [a.to('cuda')], check_fwd_over_rev=check_forward_ad)\n\n helper(torch.contiguous_format, \"nearest\")\n helper(torch.channels_last_3d, \"nearest\")\n helper(torch.contiguous_format, \"nearest-exact\")\n helper(torch.channels_last_3d, \"nearest-exact\")\n\n def test_upsamplingNearest3d_correctness(self, device):\n # Here we check if output matches OpenCV's INTER_NEAREST-like result\n def helper(memory_format, isize, osize):\n in_t = torch.arange(isize * isize * isize, dtype=torch.float, device=device)\n in_t = in_t.reshape(1, 1, isize, isize, isize)\n in_t = in_t.contiguous(memory_format=memory_format)\n out_t = F.interpolate(\n in_t, size=(osize, osize, osize), recompute_scale_factor=False, mode=\"nearest\"\n )\n # compute expected output as OpenCV\n expected_out = torch.zeros(1, 1, osize, osize, osize, dtype=torch.float)\n scale = 1.0 * isize / osize\n for o1 in range(osize):\n i1_f32 = o1 * scale\n i1 = int(i1_f32)\n for o2 in range(osize):\n i2_f32 = o2 * scale\n i2 = int(i2_f32)\n for o3 in range(osize):\n i3_f32 = o3 * scale\n i3 = int(i3_f32)\n expected_out[0, 0, o1, o2, o3] = in_t[0, 0, i1, i2, i3]\n expected_out = expected_out.to(device=device)\n self.assertEqual(out_t, expected_out)\n\n helper(torch.contiguous_format, 20, 11)\n helper(torch.channels_last_3d, 20, 11)\n helper(torch.contiguous_format, 10, 15)\n helper(torch.channels_last_3d, 10, 15)\n\n def test_upsamplingNearestExact3d_correctness(self, device):\n # Here we check if output matches Scikit-Image/Scipy-like result\n # Checks https://github.com/pytorch/pytorch/issues/34808\n def helper(memory_format, isize, osize):\n in_t = torch.arange(isize * isize * isize, dtype=torch.float, device=device)\n in_t = in_t.reshape(1, 1, isize, isize, isize)\n in_t = in_t.contiguous(memory_format=memory_format)\n out_t = F.interpolate(\n in_t, size=(osize, osize, osize), recompute_scale_factor=False, mode=\"nearest-exact\"\n )\n # compute expected output as Scikit-Image/Scipy\n expected_out = torch.zeros(1, 1, osize, osize, osize, dtype=torch.float)\n scale = 1.0 * isize / osize\n for o1 in range(osize):\n i1_f32 = (o1 + 0.5) * scale\n i1 = int(i1_f32)\n for o2 in range(osize):\n i2_f32 = (o2 + 0.5) * scale\n i2 = int(i2_f32)\n for o3 in range(osize):\n i3_f32 = (o3 + 0.5) * scale\n i3 = int(i3_f32)\n expected_out[0, 0, o1, o2, o3] = in_t[0, 0, i1, i2, i3]\n expected_out = expected_out.to(device=device)\n self.assertEqual(out_t, expected_out)\n\n helper(torch.contiguous_format, 20, 11)\n helper(torch.channels_last_3d, 20, 11)\n helper(torch.contiguous_format, 10, 15)\n helper(torch.channels_last_3d, 10, 15)\n\n @parametrize_test(\"antialias\", [True, False])\n @parametrize_test(\"align_corners\", [True, False])\n def test_upsamplingBilinear2d(self, device, antialias, align_corners):\n # Forward AD does not support XLA because XLA tensors don't have storage\n check_forward_ad = torch.device(device).type != 'xla'\n\n kwargs = dict(mode='bilinear', align_corners=align_corners, antialias=antialias)\n for memory_format in [torch.contiguous_format, torch.channels_last]:\n # test float scale factor up & downsampling\n for scale_factor in [0.5, 1.5, 2]:\n in_t = torch.ones(2, 3, 8, 8, device=device).contiguous(memory_format=memory_format).requires_grad_()\n out_size = int(math.floor(in_t.shape[-1] * scale_factor))\n with warnings.catch_warnings(record=True) as w:\n out_t = F.interpolate(in_t, scale_factor=scale_factor, **kwargs)\n self.assertEqual(torch.ones(2, 3, out_size, out_size, device=device), out_t.data)\n # Assert that memory format is carried through to the output\n self.assertTrue(out_t.is_contiguous(memory_format=memory_format))\n out_t.backward(torch.randn_like(out_t))\n self.assertTrue(in_t.grad.is_contiguous(memory_format=memory_format))\n\n if torch.device(device).type == 'cuda':\n # Bilinear backward is nondeterministic because of atomicAdd usage\n nondet_tol = 1e-5\n else:\n nondet_tol = 0.0\n\n input = torch.randn(2, 3, 8, 8, device=device).contiguous(memory_format=memory_format).requires_grad_()\n gradcheck(\n lambda x: F.interpolate(x, out_size, **kwargs),\n [input],\n check_forward_ad=check_forward_ad, nondet_tol=nondet_tol\n )\n gradgradcheck(\n lambda x: F.interpolate(x, out_size, **kwargs),\n [input],\n check_fwd_over_rev=check_forward_ad, nondet_tol=nondet_tol\n )\n\n # Assert that cpu and cuda give same results\n if torch.device(device).type == 'cuda':\n for shapes in [\n (2, 2, 3, 4), (2, 3, 4, 5), (3, 1, 2, 2), (1, 5, 3, 2)\n ]:\n a_cuda = torch.randn(\n *shapes, device=device\n ).contiguous(memory_format=memory_format).requires_grad_()\n a_cpu = a_cuda.detach().cpu().requires_grad_()\n\n with warnings.catch_warnings(record=True):\n out_cuda = F.interpolate(a_cuda, scale_factor=scale_factor, **kwargs)\n out_cpu = F.interpolate(a_cpu, scale_factor=scale_factor, **kwargs)\n\n self.assertEqual(out_cpu, out_cuda.cpu())\n\n g_cuda = torch.randn_like(out_cuda)\n g_cpu = g_cuda.cpu()\n\n out_cuda.backward(g_cuda)\n out_cpu.backward(g_cpu)\n\n self.assertEqual(a_cuda.grad, a_cpu.grad)\n\n @parametrize_test(\"memory_format\", [torch.contiguous_format, torch.channels_last])\n def test_upsamplingBilinear2d_aa_correctness(self, device, memory_format):\n t_in = torch.arange(3 * 8 * 8, dtype=torch.float, device=device).reshape(1, 3, 8, 8)\n t_in = t_in.contiguous(memory_format=memory_format)\n # This expected result is obtain using PIL.Image.resize\n # for c in range(3):\n # a_in = t_in.numpy()[0, c, ...]\n # pil_in = Image.fromarray(a_in)\n # pil_out = pil_in.resize((2, 2), resample=Image.LINEAR)\n expected_out = torch.tensor([\n 17.035713, 20.25, 42.75, 45.964287, 81.03572, 84.25,\n 106.75, 109.96428, 145.0357, 148.25, 170.75, 173.9643\n ], device=device, dtype=t_in.dtype).reshape(1, 3, 2, 2)\n t_out = F.interpolate(t_in, size=(2, 2), mode=\"bilinear\", align_corners=False, antialias=True)\n self.assertEqual(expected_out, t_out)\n\n @parametrize_test(\"antialias\", [True, False])\n @parametrize_test(\"align_corners\", [True, False])\n def test_upsamplingBicubic2d(self, device, antialias, align_corners):\n kwargs = dict(mode='bicubic', align_corners=align_corners, antialias=antialias)\n # test float scale factor up & downsampling\n # for scale_factor in [0.5, 1, 1.5, 2]:\n for scale_factor in [2, ]:\n in_t = torch.ones(2, 3, 8, 8, device=device)\n out_t = F.interpolate(in_t, scale_factor=scale_factor, **kwargs)\n out_size = int(math.floor(in_t.shape[-1] * scale_factor))\n expected_out = torch.ones(2, 3, out_size, out_size, device=device)\n self.assertEqual(expected_out, out_t, atol=1e-5, rtol=0)\n\n if torch.device(device).type == 'cuda':\n # Bicubic backward is nondeterministic because of atomicAdd usage\n nondet_tol = 1e-5\n else:\n nondet_tol = 0.0\n inpt = torch.ones(2, 3, 8, 8, requires_grad=True, device=device)\n gradcheck(lambda x: F.interpolate(x, out_size, **kwargs), [inpt], nondet_tol=nondet_tol)\n\n def test_upsamplingBicubic2d_correctness(self, device):\n # test output against known input: align_corners=False result must match opencv\n in_t = torch.arange(8., device=device).view(1, 2, 2, 2)\n expected_out_t = torch.tensor(\n [[[[-0.31641, 0.01562, 0.56250, 0.89453],\n [0.34766, 0.67969, 1.22656, 1.55859],\n [1.44141, 1.77344, 2.32031, 2.65234],\n [2.10547, 2.43750, 2.98438, 3.31641]],\n\n [[3.68359, 4.01562, 4.56250, 4.89453],\n [4.34766, 4.67969, 5.22656, 5.55859],\n [5.44141, 5.77344, 6.32031, 6.65234],\n [6.10547, 6.43750, 6.98438, 7.31641]]]], device=device)\n out_t = F.interpolate(in_t, scale_factor=2, mode='bicubic', align_corners=False)\n torch.set_printoptions(precision=5)\n self.assertEqual(out_t, expected_out_t, atol=1e-5, rtol=0)\n\n @parametrize_test(\"memory_format\", [torch.contiguous_format, torch.channels_last])\n def test_upsamplingBicubic2d_aa_correctness(self, device, memory_format):\n t_in = torch.arange(3 * 8 * 8, dtype=torch.float, device=device).reshape(1, 3, 8, 8)\n t_in = t_in.contiguous(memory_format=memory_format)\n # This expected result is obtain using PIL.Image.resize\n # for c in range(3):\n # a_in = t_in.numpy()[0, c, ...]\n # pil_in = Image.fromarray(a_in)\n # pil_out = pil_in.resize((2, 2), resample=Image.BICUBIC)\n expected_out = torch.tensor([\n 15.1205635, 18.760439, 44.23956, 47.879436, 79.12056, 82.76044,\n 108.23956, 111.87944, 143.12057, 146.76044, 172.23956, 175.87943\n ], device=device, dtype=t_in.dtype).reshape(1, 3, 2, 2)\n t_out = F.interpolate(t_in, size=(2, 2), mode=\"bicubic\", align_corners=False, antialias=True)\n self.assertEqual(expected_out, t_out)\n\n @dtypes(torch.float, torch.double)\n def test_adaptive_pooling_max_nhwc(self, device, dtype):\n def helper(n, c, h, w, output_height, output_width, contig):\n input = torch.randint(1, 10, (n, c, h, w), device=device, dtype=dtype)\n input = input.contiguous(memory_format=torch.channels_last)\n grad = torch.randint(1, 10, (4, 8, output_height, output_width), device=device, dtype=dtype)\n grad = grad.contiguous(memory_format=torch.channels_last)\n if not contig:\n input = input[:, ::2, :, :]\n grad = grad[:, ::2, :, :]\n input.requires_grad_(True)\n pool = torch.nn.AdaptiveMaxPool2d((output_height, output_width), return_indices=True).to(device)\n\n ref_input = input.detach().clone().contiguous().requires_grad_(True)\n ref_grad = grad.detach().clone().contiguous()\n ref_pool = torch.nn.AdaptiveMaxPool2d((output_height, output_width), return_indices=True).to(device)\n\n out, ind = pool(input)\n out.backward(grad)\n ref_out, ref_ind = ref_pool(ref_input)\n ref_out.backward(ref_grad)\n\n self.assertTrue(out.is_contiguous(memory_format=torch.channels_last))\n self.assertTrue(ref_out.is_contiguous())\n self.assertTrue(ind.is_contiguous(memory_format=torch.channels_last))\n self.assertTrue(ref_ind.is_contiguous())\n self.assertEqual(out, ref_out)\n self.assertEqual(ind, ref_ind)\n self.assertEqual(input.grad, ref_input.grad)\n\n for contig in [True, False]:\n helper(4, 8, 10, 10, 7, 7, contig)\n helper(4, 8, 9, 14, 5, 8, contig)\n helper(4, 8, 11, 11, 1, 1, contig)\n\n def test_embedding_dense_grad(self, device):\n embd = nn.Embedding(20, 20).to(device)\n weight = embd.weight\n\n def fn_wrapper(device):\n def fn(weight):\n inp = torch.tensor([[0, 1, 1, 2], [3, 5, 7, 11]], dtype=torch.long).to(device)\n return torch.nn.functional.embedding(inp, weight)\n return fn\n\n fn = fn_wrapper(device)\n _assertGradAndGradgradChecks(self, fn, (weight, ))\n\n def test_embedding_scalar_weight_error(self, device):\n indices = torch.rand(2, 2, device=device).long()\n weights = [\n torch.tensor(1.0, device=device),\n torch.tensor(1.0, device=device).reshape(1, 1, 1),\n ]\n\n for weight in weights:\n with self.assertRaisesRegex(RuntimeError, \"'weight' must be 2-D\"):\n torch.nn.functional.embedding(indices, weight)\n\n @dtypesIfCUDA(torch.float16, torch.float64)\n @dtypes(torch.float64)\n def test_embedding_backward(self, device, dtype):\n embedding = nn.Embedding(10, 3, sparse=True)\n tensor = torch.tensor([[7, 1, 3]])\n ones = torch.tensor(1., dtype=dtype).expand(3, 3)\n tensorTwice = tensor.repeat(1, 2)\n onesTwice = torch.cat((ones, ones))\n\n embedding = embedding.to(dtype=dtype).to(device)\n tensor = tensor.to(device)\n ones = ones.to(device)\n tensorTwice = tensorTwice.to(device)\n onesTwice = onesTwice.to(device)\n\n embedding.zero_grad()\n embedding(tensor[0]).sum().backward()\n self.assertEqual(embedding.weight.grad._indices(), tensor)\n self.assertEqual(embedding.weight.grad._values(), ones)\n\n embedding.zero_grad()\n embedding(tensor[0]).sum().backward()\n embedding(tensor[0]).sum().backward()\n self.assertEqual(embedding.weight.grad._indices(), tensorTwice)\n self.assertEqual(embedding.weight.grad._values(), onesTwice)\n\n embedding.zero_grad()\n embedding(tensor[0]).sum().backward()\n tensor[0, 0] = 8\n embedding(tensor[0]).sum().backward()\n tensorTwice[0, 3] = 8\n self.assertEqual(embedding.weight.grad._indices(), tensorTwice)\n self.assertEqual(embedding.weight.grad._values(), onesTwice)\n\n @dtypesIfCUDA(*((torch.float, torch.double, torch.bfloat16, torch.half)\n if TEST_WITH_ROCM else (torch.float, torch.double, torch.half)))\n @dtypes(torch.float32)\n def test_embedding_padding_idx(self, device, dtype):\n embedding = nn.Embedding(10, 20, padding_idx=0).to(device, dtype)\n input = torch.tensor([[0, 2, 4, 5], [4, 3, 0, 9]], dtype=torch.long).to(device)\n output = embedding(input)\n self.assertEqual(output[0][0].sum(), 0)\n self.assertEqual(output[1][2].sum(), 0)\n\n embedding = nn.Embedding(10, 20, padding_idx=0, sparse=True).to(device, dtype)\n input = torch.tensor([[0, 2, 4, 5], [4, 3, 0, 9]], dtype=torch.long).to(device)\n output = embedding(input)\n self.assertEqual(output[0][0].sum(), 0)\n self.assertEqual(output[1][2].sum(), 0)\n\n # negative indexing check for padding_idx\n # padding_idx=-2, num_embeddings=10 ==> index 8 padded\n embedding = nn.Embedding(10, 20, padding_idx=-2).to(device, dtype)\n input = torch.tensor([[0, 2, 8, 5], [4, 8, 0, 9]], dtype=torch.long).to(device)\n output = embedding(input)\n self.assertEqual(output[0][2].sum(), 0)\n self.assertEqual(output[1][1].sum(), 0)\n\n embedding = nn.Embedding(10, 20, padding_idx=-2, sparse=True).to(device, dtype)\n input = torch.tensor([[0, 2, 8, 5], [4, 8, 0, 9]], dtype=torch.long).to(device)\n output = embedding(input)\n self.assertEqual(output[0][2].sum(), 0)\n self.assertEqual(output[1][1].sum(), 0)\n\n # change padding vector\n padding_vector = torch.ones(20, dtype=dtype, device=device)\n embedding = nn.Embedding(10, 20, padding_idx=2, sparse=True).to(device, dtype)\n with torch.no_grad():\n embedding.weight[2] = padding_vector\n input = torch.tensor([0, 2], dtype=torch.long).to(device)\n output = embedding(input)\n self.assertEqual(output[1], padding_vector)\n\n # out of bounds check for padding_idx\n self.assertRaises(AssertionError, nn.Embedding, num_embeddings=10, embedding_dim=20, padding_idx=25)\n self.assertRaises(AssertionError, nn.Embedding, num_embeddings=10, embedding_dim=20, padding_idx=-25)\n\n padding_idx = 0\n embedding = nn.Embedding(5, 2, padding_idx=padding_idx).to(device, dtype)\n for n in (1, 2, 1000): # Need large N to trigger all the methods we have implemented\n for other_indices in ([], [1, 3], [2]):\n indices = torch.tensor(other_indices + [padding_idx] * n, dtype=torch.long).to(device)\n pre = embedding.weight[padding_idx].clone()\n embedding(indices).sum().backward()\n after = (embedding.weight + embedding.weight.grad)[padding_idx]\n embedding.zero_grad()\n self.assertEqual(after, pre)\n\n # test double backward\n emb_sum = embedding(indices).sum()\n emb_grad = torch.autograd.grad(outputs=emb_sum, inputs=list(embedding.parameters()), retain_graph=True)\n scalar = emb_grad[0].sum() + emb_sum\n scalar.backward()\n after = (embedding.weight + embedding.weight.grad)[padding_idx]\n embedding.zero_grad()\n self.assertEqual(after, pre)\n\n # Check correctness of torch.nn.functional.embedding_bag forward and\n # backward functions with padding_idx, given a 1D input separated into bags\n # with an offset array. Compare against an equivalent 2D input that uses\n # padding indices to fill in the gaps indicated by the offset array\n\n @onlyNativeDeviceTypes\n @dtypes(torch.float32, torch.float64)\n @dtypesIfCUDA(torch.half, torch.bfloat16)\n def test_embedding_bag_1D_padding_idx(self, device, dtype):\n num_features = 3\n max_indices_per_bag = 10\n num_bags = 10\n num_words = 100\n\n def gen_1D_indices_offsets(include_last_offset, allpad):\n indices = []\n offsets = []\n cur_offset = 0\n\n # Make one bag full and one bag empty, for extra coverage\n empty_bag = random.randint(0, num_bags - 1)\n full_bag = empty_bag\n while full_bag == empty_bag:\n full_bag = random.randint(0, num_bags - 1)\n\n for bag in range(num_bags):\n offsets.append(cur_offset)\n if bag == full_bag:\n bag_size = max_indices_per_bag\n elif bag == empty_bag:\n bag_size = 0\n else:\n bag_size = random.randint(1, max_indices_per_bag - 1)\n indices += [1 if allpad else random.randint(0, num_words - 1) for _ in range(bag_size)]\n cur_offset += bag_size\n\n # embedding_bag requires first entry of offsets to be 0\n assert offsets[0] == 0\n\n indices = torch.tensor(indices, device=device)\n\n if include_last_offset:\n offsets.append(indices.size(0))\n\n offsets = torch.tensor(offsets, device=device)\n\n return indices, offsets\n\n # Convert a 1-D indices-offsets representation into 2-D. Fill any empty\n # indices with padding_idx\n def gen_2D_indices_from_1D(indices_1D, offsets, include_last_offset, padding_idx):\n assert offsets[0] == 0\n if include_last_offset:\n offsets = offsets[:-1]\n indices_2D = torch.empty(num_bags, max_indices_per_bag, device=device, dtype=torch.long)\n for bag in range(num_bags):\n # Determine the start and end position of the bag within indices_1D\n start = offsets[bag]\n end = len(indices_1D) if bag + 1 == num_bags else offsets[bag + 1]\n end = min(len(indices_1D), end)\n\n # Pull out the bag's indices from indices_1D, and fill any\n # remaining space with padding indices\n indices_in_bag = []\n for item_pos in range(0, max_indices_per_bag):\n if (start + item_pos) < end:\n indices_in_bag.append(indices_1D[start + item_pos])\n else:\n indices_in_bag.append(padding_idx)\n indices_2D[bag] = torch.tensor(indices_in_bag, device=device)\n\n return indices_2D\n\n test_cases = product(['max', 'mean', 'sum'], [False, True], [False, True], [False, True])\n\n for mode, sparse, include_last_offset, allpad in test_cases:\n # Max sparse and bfloat16 are not supported\n if mode == 'max':\n if sparse or (dtype == torch.bfloat16):\n continue\n indices_1D, offsets = gen_1D_indices_offsets(include_last_offset, allpad)\n for padding_idx_1D in list(set(indices_1D.tolist())) + [None]:\n msg = (\n f\"mode: '{mode}', sparse: {sparse}, include_last_offset: {include_last_offset}, \"\n f\"padding_idx_1D: {padding_idx_1D}\")\n\n # If 1D input does not use a padding index, we still need one for the 2D input,\n # so we can add one dummy word to the weights to act as the padded word\n padding_idx_2D = padding_idx_1D if padding_idx_1D is not None else num_words\n num_words_with_padding = num_words if padding_idx_1D is not None else num_words + 1\n\n indices_2D = gen_2D_indices_from_1D(\n indices_1D,\n offsets,\n include_last_offset,\n padding_idx_2D)\n\n weights = torch.randn(\n num_words_with_padding,\n num_features,\n dtype=dtype,\n device=device,\n requires_grad=True)\n weights_check = weights.clone().detach().requires_grad_(True)\n\n bag = torch.nn.functional.embedding_bag(\n indices_1D,\n weights,\n offsets,\n padding_idx=padding_idx_1D,\n mode=mode,\n sparse=sparse,\n include_last_offset=include_last_offset)\n\n bag_check = torch.nn.functional.embedding_bag(\n indices_2D,\n weights_check,\n padding_idx=padding_idx_2D,\n mode=mode,\n sparse=sparse)\n self.assertEqual(bag, bag_check, msg=msg)\n\n bag.sum().backward()\n bag_check.sum().backward()\n\n # Sometimes, half dtype gradients mismatch by a greater amount\n # than other dtypes\n if dtype in [torch.half, torch.bfloat16]:\n atol = 0.01\n rtol = 0.01\n else:\n atol = None\n rtol = None\n self.assertEqual(weights.grad, weights_check.grad, msg=msg, atol=atol, rtol=rtol)\n\n # Check correctness of torch.nn.functional.embedding_bag forward and\n # backward functions with padding_idx, given a 2D indices input. Compare\n # against torch.nn.functional.embedding followed by a reduction.\n @onlyNativeDeviceTypes\n @dtypes(torch.float32, torch.float64)\n @dtypesIfCUDA(torch.half, torch.bfloat16)\n def test_embedding_bag_2D_padding_idx(self, device, dtype):\n # Use a Python implementation of embedding_bag with padding_idx support\n # to check torch.nn.functional.embedding_bag correctness\n def embedding_bag_check(indices, weights, mode, sparse, padding_idx):\n assert padding_idx is not None\n embedding = torch.nn.functional.embedding(\n indices,\n weights,\n padding_idx=padding_idx,\n sparse=sparse)\n\n reduction_dim = indices.dim() - 1\n\n if mode == 'sum' or mode == 'mean':\n # We must avoid including elements at padding_idx in the\n # sum/mean, so multiply those elements by 0, and multiply\n # all other elements by 1\n per_sample_weights = indices.ne(padding_idx).to(dtype).unsqueeze(-1)\n res = embedding.mul(per_sample_weights).sum(dim=reduction_dim)\n\n if mode == 'mean':\n weights_sum = per_sample_weights.sum(dim=reduction_dim)\n res = res.div(weights_sum)\n\n elif mode == 'max':\n # We must avoid allowing elements at padding_idx to be chosen\n # as the max, so set those elements to negative infinity\n res = embedding.masked_fill(\n indices.unsqueeze(-1) == padding_idx, -float('inf')\n ).amax(dim=reduction_dim)\n\n else:\n raise RuntimeError(f\"mode '{mode}' is not available\")\n\n # If a row is all padding, set its corresponding result row to 0.\n # This is needed because the above mean and max mode\n # implementations set these elements to nan and -inf, respectively\n if mode in ['mean', 'max']:\n res = res.masked_fill(\n indices.eq(padding_idx).all(dim=-1).unsqueeze(-1),\n 0)\n\n return res\n\n num_features = 3\n num_words = 10\n indices_dim1 = 10\n\n for mode, sparse, allpad, indices_dim0 in product(['max', 'mean', 'sum'], [False, True], [False, True], [1, 10]):\n # Max sparse and bfloat16 are not supported\n if mode == 'max':\n if sparse or (dtype == torch.bfloat16):\n continue\n\n if allpad:\n indices = torch.empty(indices_dim0, indices_dim1, dtype=torch.long, device=device).fill_(1)\n else:\n indices = torch.randint(0, num_words, (indices_dim0, indices_dim1), device=device)\n\n if indices_dim0 > 1:\n # Fill one row with duplicate index so we can test with a fully\n # padded row\n duplicate_row = random.randint(0, indices_dim0 - 1)\n indices[duplicate_row] = indices[duplicate_row][0]\n\n for padding_idx in list(set(indices.flatten(0, -1).tolist())):\n weights = torch.randn(num_words, num_features, dtype=dtype, device=device, requires_grad=True)\n weights_check = weights.clone().detach().requires_grad_(True)\n\n msg = (\n f\"mode: '{mode}', sparse: {sparse}, padding_idx: {padding_idx}, \"\n f\"allpad: {allpad}, indices.size(): {indices.size()}\")\n\n # Check forward with a Python implementation of padding_idx embedding_bag\n bag_check = embedding_bag_check(\n indices,\n weights_check,\n mode,\n sparse,\n padding_idx)\n bag = torch.nn.functional.embedding_bag(\n indices,\n weights,\n padding_idx=padding_idx,\n mode=mode,\n sparse=sparse)\n\n self.assertEqual(bag, bag_check, msg=msg)\n\n bag_check.sum().backward()\n grad_check = weights_check.grad\n\n bag.sum().backward()\n grad = weights.grad\n\n # Sometimes, half dtype gradients mismatch by a greater amount\n # than other dtypes\n if dtype in [torch.half, torch.bfloat16]:\n atol = 0.01\n rtol = 0.01\n else:\n atol = None\n rtol = None\n self.assertEqual(grad, grad_check, msg=msg, atol=atol, rtol=rtol)\n\n def _slow_masked_softmax(self, input, mask):\n exp = torch.exp(input)\n exp = exp * mask\n s = exp.sum(dim=3, keepdim=True).expand(exp.size())\n return exp / s\n\n def test_masked_softmax(self, device):\n sizes = [(1, 1, 32), (3, 16, 310), (12, 4, 1024), (4, 2, 1200)]\n for (B, num_heads, L) in sizes:\n for dim in [0, 3]:\n input = torch.randn((B, num_heads, L, L))\n mask = torch.randint(0, 2, (B, L))\n mask = mask.reshape(B, 1, 1, L).expand(B, num_heads, L, L).bool()\n if (self.device_type == \"cuda\"):\n input = input.cuda()\n mask = mask.cuda()\n native_res = torch._masked_softmax(input, mask, dim)\n mask = ~mask\n\n def slow_masked_softmax(input, mask):\n exp = torch.exp(input)\n exp = exp * mask\n s = exp.sum(dim=dim, keepdim=True).expand(exp.size())\n return exp / s\n\n pt_res = slow_masked_softmax(input, mask)\n pt_res = torch.nan_to_num(pt_res)\n\n mask_not = mask.logical_not()\n # In result, should only fill the entirely masked out rows since those are non-deterministic (*may* be 0)\n # Converts rows with all True's to False\n mask_out = mask_not.all(dim, keepdim=True).expand(mask_not.shape)\n self.assertEqual(\n pt_res.masked_fill(mask_out, 0),\n native_res.masked_fill(mask_out, 0),\n exact_dtype=True\n )\n\n def _test_masked_softmax_helper(self, input, dim, mask):\n input_ref = input.detach().clone().requires_grad_()\n result = torch._masked_softmax(input, mask, dim)\n\n expected = torch._softmax(input_ref.masked_fill(mask, float('-inf')), dim, False)\n grad = torch.randn_like(expected).to(dtype=expected.dtype)\n\n result.backward(grad)\n expected.backward(grad)\n\n # Make sure the optional argument works as well\n if dim == input.dim() - 1:\n input_ref_default = input.detach().clone().requires_grad_()\n result_default = torch._masked_softmax(input_ref_default, mask)\n result_default.backward(grad)\n self.assertEqual(result, result_default)\n self.assertEqual(input.grad, input_ref_default.grad)\n\n # In result, should only fill the entirely masked out rows since those are non-deterministic (*may* be 0)\n # Converts rows with all True's to False\n mask_out = mask.all(dim, keepdim=True).expand(mask.shape)\n self.assertEqual(result.masked_fill(mask_out, 0), expected.masked_fill(mask_out, 0))\n\n self.assertEqual(input.grad, torch.nan_to_num(input_ref.grad))\n self.assertEqual(input.grad, input.grad.masked_fill(mask, 0.0))\n\n def test_masked_softmax_grad(self, device):\n shapes = [(1, 1, 32), (3, 16, 310), (12, 4, 1024), (4, 2, 1200)]\n for shape in shapes:\n dims = [0, len(shape) - 1] if len(shape) > 0 else [0]\n for dim in dims:\n input = torch.randn(shape, requires_grad=True)\n mask = torch.randint(0, 2, shape).bool()\n if (self.device_type == \"cuda\"):\n input = input.cuda().detach().requires_grad_()\n mask = mask.cuda()\n self._test_masked_softmax_helper(input, dim, mask)\n\n # In this test, the forward pass is expected to produce nan's because when dim=0, we only have unspecified values\n def test_masked_softmax_forward_with_nans(self, device):\n dim = 0\n shapes = [(4, 5), (50, 100), (1500, 1200)]\n for (x, y) in shapes:\n input = torch.randn((x, y), requires_grad=True)\n mask = torch.tensor([i % 2 for i in range(y)]).expand((x, y)).bool()\n if (self.device_type == \"cuda\"):\n input = input.cuda().detach().requires_grad_()\n mask = mask.cuda()\n self._test_masked_softmax_helper(input, dim, mask)\n\n @onlyCUDA\n def test_masked_softmax_transformer_layout(self, device):\n B = 211\n num_heads = 16\n L = 42\n input = torch.randn((B, num_heads, L, L))\n dim = input.dim() - 1\n mask = torch.randint(0, 2, (B, L))\n if (self.device_type == \"cuda\"):\n input = input.cuda()\n mask = mask.cuda()\n mask = mask.bool()\n native_res = torch._masked_softmax(input, mask, dim)\n mask = mask.reshape(B, 1, 1, L).expand(B, num_heads, L, L)\n mask = ~mask\n mask = mask.float()\n\n pt_res = self._slow_masked_softmax(input, mask)\n self.assertEqual(pt_res, native_res, exact_dtype=True)\n\n @dtypesIfCUDA(torch.half, torch.float)\n @dtypes(torch.float)\n def test_softmax_results(self, device, dtype):\n # Non-even sizes and non-zero shifts test fallback paths in vectorized kernel\n # Note: dim1 > 1024 is needed to exercise the vectorized (non-persistent) path, (16, 30576) is BERT-esque\n sizes = [(0, 10), (32, 20), (10, 0), (31, 20), (32, 21), (31, 23), (32, 1536), (31, 2048), (33, 2049), (16, 30576)]\n shifts = [(0, 0), (1, 0), (0, 1), (1, 1)]\n for fn in [F.softmax, F.log_softmax]:\n for size in sizes:\n for shift in shifts:\n input = torch.rand(size, device=device, dtype=dtype)\n # Note: With the largest tests we can hit upper limit of fp16 when we\n # sum, so scale the input down to stay in a nicer range.\n if dtype == torch.float16:\n input = input / 100.\n input = input[shift[0]:, shift[1]:]\n # Note; Don't want to bprop back through slice op\n input = input.detach().requires_grad_(True)\n ref_input = input.clone().cpu().detach().requires_grad_(True)\n for dim in [0, 1]:\n ref_output = fn(ref_input, dtype=torch.float, dim=dim)\n output = fn(input, dtype=torch.float, dim=dim)\n grad_output = torch.rand(size, device=device, dtype=dtype)\n grad_output = grad_output[shift[0]:, shift[1]:]\n ref_grad_output = grad_output.clone().cpu().detach()\n grad_input, = torch.autograd.grad(output, input, grad_outputs=(grad_output), create_graph=True)\n ref_grad_input, = torch.autograd.grad(ref_output, ref_input,\n grad_outputs=(ref_grad_output), create_graph=True)\n grad_input.sum().backward()\n ref_grad_input.sum().backward()\n\n self.assertEqual(output, ref_output)\n self.assertEqual(grad_input, ref_grad_input)\n self.assertEqual(input.grad, ref_input.grad)\n\n @onlyCUDA\n @dtypes(torch.float, torch.half)\n @largeTensorTest(\"20GB\")\n @largeTensorTest(\"90GB\", \"cpu\")\n @precisionOverride({torch.half: 0.001})\n def test_softmax_64bit_indexing(self, device, dtype):\n def run_test(*shape):\n x = torch.randn(shape, device=\"cuda\", dtype=torch.float16, requires_grad=True)\n y = F.log_softmax(x, dim=-1, dtype=dtype)\n y.backward(y)\n with torch.no_grad():\n xx = x.cpu().requires_grad_()\n yy = F.log_softmax(xx.float(), dim=-1).to(dtype)\n yy.backward(yy)\n self.assertEqual(y, yy)\n self.assertEqual(x.grad, xx.grad)\n\n run_test(1100000000, 2) # Illegal memory access https://github.com/pytorch/pytorch/issues/52715\n run_test(2200000000, 1) # invalid configuration argument https://github.com/pytorch/pytorch/issues/52716\n\n @dtypes(torch.float)\n @dtypesIfCUDA(torch.float, torch.half)\n def test_log_softmax_big(self, device, dtype):\n def _test_helper(shape):\n # generate a tensor with big numbers that are exactly representable in dtype\n # and are at a constant offset from tensor with small numbers\n # the logsoftmax of a small and big tensors should be equal\n x_small = torch.randint(100, shape, dtype=dtype, device=device)\n offset = 1.5e3 if dtype == torch.half else 1e7\n x_big = x_small + offset\n self.assertEqual(F.log_softmax(x_small, -1), F.log_softmax(x_big, -1))\n _test_helper((16, 4))\n if self.device_type == 'cuda':\n # test non-persistent softmax kernel\n _test_helper((4, 1536))\n\n @onlyCUDA\n @largeTensorTest('12GB')\n def test_conv_large_nosplit(self, device):\n # Here we just test the convolution correctly route to the fallback implementation\n # that is, it does not crash. The correctness of fallback implementation should be\n # covered in other tests\n dtype = torch.half if self.device_type == 'cuda' else torch.float\n conv1 = nn.Conv2d(2, 2, 8, 8).to(device).to(dtype)\n input_large = torch.randn(1, 2, 1024, 1024 * 1024, dtype=dtype, device=device)\n conv1(input_large)\n conv2 = torch.nn.Conv2d(1, 1024, 1, 1).to(device).to(dtype)\n input_large = torch.randn(1, 1, 2048, 1024 , dtype=dtype, device=device)\n conv2(input_large)\n\n def test_conv_noncontig_weights(self, device):\n for dim in (1, 2, 3):\n for grouped in (False, True):\n nc = 3\n groups = 3 if grouped else 1\n w = torch.randn([3] * dim, device=device)\n w = w.expand([nc, int(nc / groups)] + list(w.shape))\n w = w.detach().requires_grad_()\n x = torch.randn([1, nc] + ([5] * dim), device=device, requires_grad=True)\n y = getattr(F, 'conv{}d'.format(dim))(x, w, groups=groups)\n y.sum().backward()\n y = getattr(F, 'conv_transpose{}d'.format(dim))(x, w, groups=groups)\n y.sum().backward()\n\n def test_conv_noncontig_weights_and_bias(self, device):\n # need floats to exercise https://github.com/pytorch/pytorch/issues/16018\n for bias in [True, False]:\n conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3,\n bias=bias).to(device, torch.float)\n\n input_nc = torch.randn((1, 3, 224, 224, 2), device=device, dtype=torch.float)[:, :, :, :, 1]\n input_c = input_nc.contiguous()\n\n weight_nc = torch.randn((64, 3, 7, 7, 2), device=device, dtype=torch.float)[:, :, :, :, 1]\n conv1.weight = nn.Parameter(weight_nc)\n weight_c = conv1.weight.contiguous()\n\n if bias:\n bias_nc = torch.randn((64, 2), device=device, dtype=torch.float)[:, 1]\n conv1.bias = nn.Parameter(bias_nc)\n bias_c = conv1.bias.contiguous()\n\n out1 = conv1(input_nc)\n conv1.weight = nn.Parameter(weight_c)\n if bias:\n conv1.bias = nn.Parameter(bias_c)\n out2 = conv1(input_c)\n self.assertEqual(out1, out2)\n\n def test_save_lstm_compatibility(self, device):\n # Test that saving an LSTM in PyTorch 1.7 and older can still be\n # loaded in newer versions of PyTorch.\n model = nn.LSTM(2, 3)\n x = torch.randn(32, 5, 2)\n expected = model(x)\n\n # Get a state dict for PyTorch 1.7 LSTM. Before PyTorch 1.8, proj_size\n # didn't exist.\n assert model.proj_size == 0\n state_dict = model.__dict__\n del state_dict['proj_size']\n\n # load a model\n loaded_model = nn.LSTM(2, 3)\n loaded_model.__setstate__(state_dict)\n result = loaded_model(x)\n self.assertEqual(result, expected)\n\n @onlyCUDA\n @tf32_on_and_off(0.005)\n def test_grid_sample_large(self, device):\n def issue_35202():\n input_tensor = torch.rand(1, 1, 480, 640, dtype=torch.float, device=device, requires_grad=True)\n coords = torch.tensor([[-10059144, 67680944], [67680944, 67680944]], dtype=torch.float, device=device)\n coords = coords.unsqueeze(0).unsqueeze(0).repeat(1, 1, 1, 1)\n result = torch.nn.functional.grid_sample(input_tensor, coords)\n self.assertEqual(result, torch.tensor([[[[0., 0.]]]], dtype=torch.float, device=device))\n result.backward(torch.ones_like(result))\n torch.cuda.synchronize()\n issue_35202()\n\n def issue_24823_1(dtype):\n image = torch.arange(27, 0, -1, dtype=dtype, device=device).view(1, 1, 3, 3, 3)\n image.requires_grad_()\n grid = torch.nn.functional.affine_grid(\n torch.tensor([[[1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 1, 0]]], dtype=dtype, device=device),\n (1, 1, 3, 3, 3))\n grid[:, 1, 1, 1, 0] = float('inf')\n result = torch.nn.functional.grid_sample(image, grid, padding_mode='zeros')\n self.assertEqual(result, torch.tensor([[[[[27., 26., 25.], [24., 23., 22.], [21., 20., 19.]],\n [[18., 17., 16.], [15., 0., 13.], [12., 11., 10.]],\n [[9., 8., 7.], [6., 5., 4.], [3., 2., 1.]]]]],\n device=device, dtype=dtype))\n result.backward(torch.ones_like(result))\n expected_grad = torch.ones_like(image)\n expected_grad[0, 0, 1, 1, 1] = 0\n self.assertEqual(image.grad, expected_grad, atol=0.005, rtol=0)\n issue_24823_1(torch.half)\n issue_24823_1(torch.float)\n issue_24823_1(torch.double)\n\n def issue_24823_2():\n param = torch.tensor([[[-1.0e+20, 0.0, 0.0], [0.0, -1.0e+20, 0.0]]], dtype=torch.float, device=device)\n img = torch.zeros((1, 1, 4, 4), dtype=torch.float, device=device, requires_grad=True)\n grid = torch.nn.functional.affine_grid(param, img.size())\n result = torch.nn.functional.grid_sample(img, grid)\n self.assertEqual(result, torch.zeros(1, 1, 4, 4, device=device, dtype=torch.float))\n result.backward(torch.ones_like(result))\n torch.cuda.synchronize()\n issue_24823_2()\n\n @dtypes(torch.float, torch.double)\n @largeTensorTest(lambda self, device, dtype:\n # Compute sum of the large tensor sizes:\n # (im.numel() + small_image.numel() + small_image.grad.numel() +\n # large_view.grad.numel()) * sizeof(dtype)\n 32769 * (65536 + 3 * 65536 / 128) *\n torch.tensor([], dtype=dtype).element_size())\n def test_grid_sample_large_index_2d(self, device, dtype):\n # Test 64-bit indexing with grid_sample (gh-41656)\n # Try accessing the corners, there should be no segfault\n coords = torch.tensor([[[-1., -1.],\n [+1., -1.]],\n\n [[-1., +1.],\n [+1., +1.]]], device=device, dtype=dtype)\n coords = coords.expand(1, 2, 2, 2)\n im = torch.zeros([1, 1, 32769, 65536], device=device, dtype=dtype)\n\n # Compare sampling with large strides to the same op on a contiguous tensor\n coords = torch.rand(1, 4, 4, 2, device=device, dtype=dtype)\n large_view = im[..., 127::128]\n small_image = torch.rand_like(large_view)\n large_view[...] = small_image\n large_view.requires_grad, small_image.requires_grad = True, True\n self.assertTrue(\n sum(i * s for i, s in zip(large_view.size(), large_view.stride())) >= 2 ** 31,\n msg=\"View must use 64-bit indexing\")\n for mode, padding_mode, align_corners in itertools.product(\n ('nearest', 'bilinear', 'bicubic'), ('zeros', 'border', 'reflection'), (True, False)):\n a = F.grid_sample(\n small_image, coords, mode=mode,\n padding_mode=padding_mode, align_corners=align_corners)\n a.sum().backward()\n\n b = F.grid_sample(\n large_view, coords, mode=mode,\n padding_mode=padding_mode, align_corners=align_corners)\n b.sum().backward()\n\n self.assertEqual(a, b)\n self.assertEqual(small_image.grad, large_view.grad)\n\n small_image.grad.zero_()\n large_view.grad.zero_()\n\n @dtypes(torch.float, torch.double)\n @largeTensorTest(lambda self, device, dtype:\n # Compute sum of the large tensor sizes:\n # (im.numel() + small_image.numel() + small_image.grad.numel() +\n # large_view.grad.numel()) * sizeof(dtype)\n 2 * 32769 * (32768 + 3 * 32768 / 128) *\n torch.tensor([], dtype=dtype).element_size())\n def test_grid_sample_large_index_3d(self, device, dtype):\n # Test 64-bit indexing with grid_sample (gh-41656)\n # Try accessing the corners, there should be no segfault\n coords = torch.full((1, 2, 2, 2, 3), 1., device=device, dtype=dtype)\n im = torch.zeros([1, 1, 2, 32769, 32768], device=device, dtype=dtype)\n\n result = F.grid_sample(im, coords, align_corners=False)\n self.assertEqual(result, torch.zeros((1, 1, 2, 2, 2), device=device, dtype=dtype))\n\n # Compare sampling with large strides to the same op on a contiguous tensor\n coords = torch.rand(1, 1, 4, 4, 3, device=device, dtype=dtype)\n large_view = im[..., 127::128]\n small_image = torch.rand_like(large_view)\n large_view[...] = small_image\n small_image.requires_grad, large_view.requires_grad = True, True\n self.assertTrue(\n sum(i * s for i, s in zip(large_view.size(), large_view.stride())) >= 2 ** 31,\n msg=\"View must use 64-bit indexing\")\n for mode, padding_mode, align_corners in itertools.product(\n ('nearest', 'bilinear'), ('zeros', 'border', 'reflection'), (True, False)):\n a = F.grid_sample(\n small_image, coords, mode=mode,\n padding_mode=padding_mode, align_corners=align_corners)\n a.sum().backward()\n\n b = F.grid_sample(\n large_view, coords, mode=mode,\n padding_mode=padding_mode, align_corners=align_corners)\n b.sum().backward()\n\n self.assertEqual(a, b)\n self.assertEqual(small_image.grad, large_view.grad)\n\n small_image.grad.zero_()\n large_view.grad.zero_()\n\n @onlyCUDA\n @largeTensorTest('12GB')\n def test_conv_transposed_large(self, device):\n dtype = torch.half if self.device_type == 'cuda' else torch.float\n conv = nn.ConvTranspose2d(1, 1, 1, 1, bias=False).to(device).to(dtype)\n input_large = torch.randn(4096, 1, 512, 1024, dtype=dtype, device=device)\n # forward\n ret = conv(input_large)\n maxdiff0 = (ret.narrow(0, 0, 1024) - conv(input_large.narrow(0, 0, 1024))).abs_().max().item()\n maxdiff1 = (ret.narrow(0, 1024, 1024) - conv(input_large.narrow(0, 1024, 1024))).abs_().max().item()\n maxdiff2 = (ret.narrow(0, 2048, 1024) - conv(input_large.narrow(0, 2048, 1024))).abs_().max().item()\n maxdiff3 = (ret.narrow(0, 3072, 1024) - conv(input_large.narrow(0, 3072, 1024))).abs_().max().item()\n self.assertEqual(maxdiff0, 0)\n self.assertEqual(maxdiff1, 0)\n self.assertEqual(maxdiff2, 0)\n self.assertEqual(maxdiff3, 0)\n\n @onlyCUDA\n @skipCUDAIfRocm\n @largeTensorTest('12GB')\n def test_conv_large(self, device):\n dtype = torch.half if self.device_type == 'cuda' else torch.float\n conv = nn.Conv2d(2, 2, 8, 8, bias=False).to(device).to(dtype)\n input_large = torch.randn(4097, 2, 512, 512, dtype=dtype, device=device)\n # forward\n ret = conv(input_large)\n self.assertEqual(ret[:2048], conv(input_large[:2048]))\n self.assertEqual(ret[2048:4096], conv(input_large[2048:4096]))\n self.assertEqual(ret[4096:], conv(input_large[4096:]))\n\n # backward\n conv.zero_grad()\n # When computing the backward, we are using the `max(dim=1)`` to create\n # some sparsity. Without this sparsity, the rounding error would be\n # too large (as large as 1e-5) to satisfy the creterion (1e-6) of `assertEqual`\n ret.view(4097, -1).max(dim=1).values.sum().backward()\n del ret\n grad1 = conv.weight.grad.detach().clone()\n conv.zero_grad()\n conv(input_large[:2048]).view(2048, -1).max(dim=1).values.sum().backward()\n conv(input_large[2048:4096]).view(2048, -1).max(dim=1).values.sum().backward()\n conv(input_large[4096:]).view(1, -1).max(dim=1).values.sum().backward()\n grad2 = conv.weight.grad.detach().clone()\n # gradients are at the order of hundreds, we need to scale it to\n # the order of one so that we can compare\n scale = 1 / grad2.abs().mean()\n grad1 = grad1 * scale\n grad2 = grad2 * scale\n self.assertEqual(grad1, grad2, atol=5e-2, rtol=5e-3)\n\n def _test_gumbel_softmax_st_shapes(self, device, dtype, shape, dim, count_expected):\n logits = torch.randn(shape, dtype=torch.float, device=device)\n logits = logits.to(dtype)\n\n y_draw = F.gumbel_softmax(logits, hard=True, dim=dim)\n\n # All values positive\n self.assertGreaterEqual(y_draw.min(), 0)\n # Shape unchanged\n self.assertTrue(y_draw.shape == logits.shape)\n # One choice per draw\n self.assertEqual(y_draw.sum(), count_expected, atol=torch.finfo(y_draw.dtype).eps, rtol=0)\n\n def _test_gumbel_softmax_straight_through(self, device, dtype):\n num_draws = 100\n\n logits = torch.tensor([[0.2, 0.8, 0.1]], device=device)\n logits = logits.reshape([1, 3])\n logits = logits.to(dtype).requires_grad_()\n probs = logits.softmax(dim=-1)\n\n counts = torch.zeros_like(logits)\n for _ in range(num_draws):\n y_draw = F.gumbel_softmax(logits, hard=True)\n counts = counts + y_draw\n\n # All values positive\n self.assertGreaterEqual(y_draw.min(), 0)\n # Each experiment should result in 1 draw.\n self.assertEqual(counts.sum(), num_draws, atol=torch.finfo(counts.dtype).eps, rtol=0)\n\n # check results is asymptotically as expected.\n expected = probs * num_draws\n # ~z is approximately N(0,1) for unbiased count\n z = (counts - expected) / (expected * (1 - probs)).sqrt()\n # A (lazy) approximate 99% two-sided test:\n # occurs with prob alpha~>=0.01 if unbiased\n self.assertLess(z.abs().max().item(), 2.58)\n\n def _test_gumbel_softmax_grad(self, device, dtype):\n # \"hard\" and \"not hard\" should propagate same gradient.\n logits_soft = torch.zeros(10, 10, dtype=dtype, device=device, requires_grad=True)\n logits_hard = torch.zeros(10, 10, dtype=dtype, device=device, requires_grad=True)\n\n seed = torch.random.get_rng_state()\n y_soft = F.gumbel_softmax(logits_soft, hard=False)\n torch.random.set_rng_state(seed)\n y_hard = F.gumbel_softmax(logits_hard, hard=True)\n\n y_soft.sum().backward()\n y_hard.sum().backward()\n\n # 2eps = 1x addition + 1x subtraction.\n tol = 2 * torch.finfo(dtype).eps\n self.assertEqual(logits_soft.grad, logits_hard.grad, atol=tol, rtol=0)\n\n @dtypesIfCUDA(torch.half, torch.float, torch.double)\n @dtypes(torch.float, torch.double)\n def test_gumbel_softmax(self, device, dtype):\n self._test_gumbel_softmax_st_shapes(device, dtype, shape=[5], dim=0, count_expected=1)\n self._test_gumbel_softmax_st_shapes(device, dtype, shape=[5], dim=-1, count_expected=1)\n self._test_gumbel_softmax_st_shapes(device, dtype, shape=[5, 4], dim=1, count_expected=5)\n self._test_gumbel_softmax_st_shapes(device, dtype, shape=[5, 4, 3], dim=1, count_expected=5 * 3)\n self._test_gumbel_softmax_st_shapes(device, dtype, shape=[5, 4, 3], dim=-1, count_expected=5 * 4)\n self._test_gumbel_softmax_straight_through(device, dtype)\n self._test_gumbel_softmax_grad(device, dtype)\n\n def _test_rnn_retain_variables(self, device, dtype):\n rnns = [nn.LSTM(10, 20, num_layers=2).to(device, dtype),\n nn.GRU(10, 20, num_layers=2).to(device, dtype),\n nn.RNN(10, 20, num_layers=2).to(device, dtype)]\n for rnn in rnns:\n input = torch.randn(5, 6, 10, device=device, dtype=dtype, requires_grad=True)\n output = rnn(input)\n output[0].sum().backward(retain_graph=True)\n grads = [input.grad.data.clone()] + [p.grad.data.clone() for p in rnn.parameters()]\n for _ in range(4):\n rnn.zero_grad()\n input.grad.data.zero_()\n output[0].sum().backward(retain_graph=True)\n grads2 = [input.grad.data] + [p.grad.data for p in rnn.parameters()]\n self.assertEqual(grads, grads2)\n\n @dtypesIfCUDA(torch.half, torch.float, torch.double)\n @dtypes(torch.double)\n def test_rnn_retain_variables(self, device, dtype):\n self._test_rnn_retain_variables(device, dtype)\n\n if self.device_type == 'cuda' and self.has_cudnn():\n with torch.backends.cudnn.flags(enabled=False):\n self._test_rnn_retain_variables(device, dtype)\n\n @onlyCUDA\n @dtypes(torch.double)\n def test_lstmcell_backward_only_one_output_grad(self, device, dtype):\n # checks that undefined gradients doen't hamper the backward\n # see #11872\n l = torch.nn.LSTMCell(2, 3).to(device).to(dtype=dtype)\n s = torch.randn(1, 2, device=device, dtype=dtype, requires_grad=True)\n for i in range(2):\n out = l(s)[i]\n out.sum().backward()\n self.assertFalse(s.grad is None or s.grad.abs().sum().item() == 0)\n\n def _test_rnn_mod(self, mod, inp):\n def flatten_out(mod, inp):\n out = mod(inp)\n return tuple([t if isinstance(t, torch.Tensor) else tt for t in out for tt in t])\n gradcheckfunc = partial(flatten_out, mod)\n with torch.backends.cudnn.flags(enabled=False):\n gradcheck(gradcheckfunc, inp, check_batched_grad=False)\n gradgradcheck(gradcheckfunc, inp, check_batched_grad=False)\n\n if inp.is_cuda and not TEST_WITH_ROCM:\n # Assert that we have good error message around unsupported CuDNN double backward\n # NB: we trigger double backward using .backward() instead of autograd.grad due to\n # https://github.com/pytorch/pytorch/issues/37874\n with torch.backends.cudnn.flags(enabled=True):\n result = gradcheckfunc(inp)\n result[0].sum().backward(create_graph=True)\n grad0 = next(mod.parameters()).grad\n with self.assertRaisesRegex(RuntimeError,\n \"please disable the CuDNN backend temporarily\"):\n grad0.sum().backward()\n\n # Here we avoid the backward(create_graph=True) memory leak\n # described in https://github.com/pytorch/pytorch/issues/7343\n for param in mod.parameters():\n param.grad = None\n inp.grad = None\n\n # Merge into OpInfo?\n @skipMeta # LSTM cell reuses output which was resized\n @dtypes(torch.double)\n def test_LSTM_grad_and_gradgrad(self, device, dtype):\n hsize = 4\n inp = torch.rand(1, 3, hsize, device=device, dtype=dtype, requires_grad=True)\n for bias in [True, False]:\n mod = torch.nn.LSTM(hsize, hsize, bias=bias).to(device).to(dtype)\n self._test_rnn_mod(mod, inp)\n\n @skipMeta # GRU cell reuses output which was resized\n @dtypes(torch.double)\n def test_GRU_grad_and_gradgrad(self, device, dtype):\n hsize = 4\n inp = torch.rand(1, 3, hsize, device=device, dtype=dtype, requires_grad=True)\n for bias in [True, False]:\n mod = torch.nn.GRU(hsize, hsize, bias=bias).to(device).to(dtype)\n self._test_rnn_mod(mod, inp)\n\n @onlyCUDA\n def test_upsamplingNearest1d_launch_config(self, device):\n m = nn.Upsample(scale_factor=2)\n inp = torch.rand(2**25, 1, 1, device=device)\n out = m(inp)\n inp_ref = inp.cpu()\n out_ref = m(inp_ref)\n self.assertEqual(out_ref, out)\n\n @onlyCUDA\n def test_upsamplingNearest2d_launch_config(self, device):\n m = nn.Upsample(scale_factor=2)\n inp = torch.rand(2**25, 1, 1, 1, device=device)\n out = m(inp)\n inp_ref = inp.cpu()\n out_ref = m(inp_ref)\n self.assertEqual(out_ref, out)\n\n @onlyCUDA\n def test_upsamplingNearest3d_launch_config(self, device):\n m = nn.Upsample(scale_factor=2)\n inp = torch.rand(2**25, 1, 1, 1, 1, device=device)\n out = m(inp)\n inp_ref = inp.cpu()\n out_ref = m(inp_ref)\n self.assertEqual(out_ref, out)\n\n @unittest.expectedFailure\n @skipIfRocm\n @onlyCUDA\n def test_upsamplingNearest2d_launch_fail(self, device):\n m = nn.Upsample(scale_factor=2)\n # launch grid_y == 2**16 (larger than maximum y-dimension limit 65535)\n inp = torch.rand(1, 1, 2**15, 2**8, device=device)\n out = m(inp)\n\n @onlyCUDA\n @skipCUDAIfNotRocm\n def test_upsamplingNearest2d_launch_rocm(self, device):\n # test_upsamplingNearest2d_launch_fail should run OK on ROCm\n m = nn.Upsample(scale_factor=2)\n inp = torch.rand(1, 1, 2**15, 2**8, device=device)\n out = m(inp)\n\n @onlyCUDA\n @skipCUDAIfCudnnVersionLessThan(7600)\n def test_CTCLoss_cudnn(self, device):\n def _helper(zero_infinity):\n target_lengths = [30, 25, 20]\n input_lengths = [50, 50, 50]\n targets = torch.randint(1, 15, (sum(target_lengths),), dtype=torch.int)\n log_probs = torch.randn(50, 3, 15, dtype=torch.float, device=device).log_softmax(2).requires_grad_()\n\n log_probs_ref = log_probs.detach().clone().requires_grad_()\n\n with torch.backends.cudnn.flags(enabled=True):\n res = torch.nn.functional.ctc_loss(log_probs, targets, input_lengths, target_lengths, zero_infinity=zero_infinity)\n res.backward()\n\n expected = ctcloss_reference(log_probs, targets.cuda(), input_lengths, target_lengths).float()\n\n with torch.backends.cudnn.flags(enabled=False):\n res2 = torch.nn.functional.ctc_loss(log_probs_ref, targets.cuda().long(), input_lengths, target_lengths,\n zero_infinity=zero_infinity)\n res2.backward()\n\n self.assertEqual(res, expected)\n self.assertEqual(res2, res)\n self.assertEqual(log_probs.grad, log_probs_ref.grad)\n\n _helper(zero_infinity=True)\n _helper(zero_infinity=False)\n\n def _CTCLoss_gen_losses(self, device, input_length, vocab_size, target_length, reduction, use_module_form):\n batch_size = 1\n log_probs = torch.randn(input_length, batch_size, vocab_size, dtype=torch.float, device=device) \\\n .log_softmax(2).requires_grad_()\n targets = torch.randint(low=1, high=vocab_size - 1, size=(batch_size, target_length),\n dtype=torch.int, device=device)\n input_lengths = batch_size * [input_length]\n target_lengths = batch_size * [target_length]\n\n log_probs_no_bd = log_probs.squeeze(1).detach().clone().requires_grad_()\n targets_no_bd = targets.squeeze(0).detach().clone()\n input_lengths_no_bd = torch.tensor(input_length)\n target_lengths_no_bd = torch.tensor(target_length)\n\n # currently only length 2 and 1 right now, but left flexible for additional potential cases\n log_probs_refs = [log_probs.detach().clone().requires_grad_() for _ in range(2)]\n log_probs_no_bd_refs = [log_probs_no_bd.detach().clone().requires_grad_() for _ in range(1)]\n\n losses = []\n losses_no_bd = []\n\n has_cuda = torch.cuda.is_available()\n has_cudnn = has_cuda and 'cuda' in device and self.has_cudnn()\n # cudnn requires a cpu target\n if has_cuda and has_cudnn:\n targets = targets.cpu()\n targets_no_bd = targets_no_bd.cpu()\n\n ctc_loss = (\n nn.CTCLoss(reduction=reduction, zero_infinity=True)\n if use_module_form\n else partial(torch.nn.functional.ctc_loss, reduction=reduction, zero_infinity=True)\n )\n\n with torch.backends.cudnn.flags(enabled=has_cudnn):\n # batched case. log_probs.shape = (T, N, C), targets = (N, S), input_lengths/target_lengths = (N,)\n losses.append(ctc_loss(log_probs_refs[0], targets, input_lengths, target_lengths))\n # batched case. input.shape = (T, N, C), targets = (S,), input_lengths/target_lengths = (N,)\n losses.append(ctc_loss(log_probs_refs[1], targets_no_bd, input_lengths, target_lengths))\n # unbatched case. input.shape = (T, C), targets = (S,), input_lengths/target_lengths = (N,)\n losses_no_bd.append(ctc_loss(log_probs_no_bd_refs[0], targets_no_bd,\n input_lengths_no_bd, target_lengths_no_bd))\n\n for loss in losses + losses_no_bd:\n loss.backward()\n\n return losses, losses_no_bd, log_probs_refs, log_probs_no_bd_refs\n\n def _assertEqual_list(self, expected, list_to_compare, atol=None, rtol=None):\n for ele in list_to_compare:\n self.assertEqual(expected, ele, atol=atol, rtol=rtol)\n\n @parametrize_test(\"reduction\", ['none', 'mean', 'sum'])\n @parametrize_test(\"use_module_form\", [True, False])\n def test_CTCLoss_no_batch_dim(self, device, reduction, use_module_form):\n input_length = 40\n vocab_size = 3\n target_length = 12\n\n args = self._CTCLoss_gen_losses(device, input_length, vocab_size, target_length, reduction, use_module_form)\n losses, losses_no_bd, log_probs_refs, log_probs_no_bd_refs = args\n\n # test output values\n self._assertEqual_list(losses[0], losses[1:], atol=1e-4, rtol=0)\n self._assertEqual_list(losses[0].squeeze(0), losses_no_bd, atol=1e-4, rtol=0)\n\n # test gradient values\n self._assertEqual_list(log_probs_refs[0].grad, [t.grad for t in log_probs_refs[1:]], atol=1e-4, rtol=0)\n self._assertEqual_list(\n log_probs_refs[0].grad.squeeze(1),\n [t.grad for t in log_probs_no_bd_refs],\n atol=1e-4,\n rtol=0,\n )\n\n # checking the output's shape\n # batch dim case should be (N,). no batch dim case should be ()\n self._assertEqual_list((1,) if reduction == 'none' else (), [loss.shape for loss in losses])\n self._assertEqual_list((), [loss.shape for loss in losses_no_bd])\n\n # checking the gradient's shape\n # batch dim case should have shape (T, N, C). no batch dim case should have shape (T, C)\n self._assertEqual_list((input_length, 1, vocab_size), [t.grad.shape for t in log_probs_refs])\n self._assertEqual_list((input_length, vocab_size), [t.grad.shape for t in log_probs_no_bd_refs])\n\n @onlyCUDA\n @skipCUDAIfNoCudnn\n def test_contig_wrong_stride_cudnn(self, device):\n # x has to have batch_size 1 to test contiguous checks\n x = torch.randn(1, 16, 5, 5, device=device)\n stride = list(x.stride())\n stride[0] = 20\n # change the stride in dimension 0. the tensor is still contiguous because size[0] is 1\n x.set_(x.storage(), 0, x.size(), stride)\n self.assertTrue(x.is_contiguous())\n F.conv_transpose2d(x, torch.randn(16, 1, 1, 1, device=device))\n F.conv2d(x, torch.randn(1, 16, 1, 1, device=device))\n\n @onlyCUDA\n def test_Conv2d_size_1_kernel(self, device):\n x_cpu = torch.randn(2, 3, 5, 5)\n conv_cpu = torch.nn.Conv2d(3, 3, kernel_size=1)\n y_cpu = conv_cpu(x_cpu)\n y = torch.rand_like(y_cpu)\n y_cpu.backward(y)\n\n with cudnn.flags(enabled=False):\n conv_cuda = torch.nn.Conv2d(3, 3, kernel_size=1).to(device)\n conv_cuda.bias.data.copy_(conv_cpu.bias.data)\n conv_cuda.weight.data.copy_(conv_cpu.weight.data)\n y_cuda = conv_cuda(x_cpu.to(device))\n y_cuda.backward(y.to(device))\n\n self.assertEqual(y_cpu, y_cuda, atol=1e-5, rtol=0, exact_device=False)\n self.assertEqual(conv_cpu.bias.grad.data, conv_cuda.bias.grad.data, atol=1e-5, rtol=0, exact_device=False)\n self.assertEqual(conv_cpu.weight.grad.data, conv_cuda.weight.grad.data, atol=1e-5, rtol=0, exact_device=False)\n\n @onlyCUDA\n def test_ConvTranspose2d_size_1_kernel(self, device):\n x_cpu = torch.randn(2, 3, 5, 5)\n conv_cpu = torch.nn.ConvTranspose2d(3, 3, kernel_size=1)\n y_cpu = conv_cpu(x_cpu)\n y = torch.rand_like(y_cpu)\n y_cpu.backward(y)\n\n with cudnn.flags(enabled=False):\n conv_cuda = torch.nn.ConvTranspose2d(3, 3, kernel_size=1).to(device)\n conv_cuda.bias.data.copy_(conv_cpu.bias.data)\n conv_cuda.weight.data.copy_(conv_cpu.weight.data)\n y_cuda = conv_cuda(x_cpu.to(device))\n y_cuda.backward(y.to(device))\n\n self.assertEqual(y_cpu, y_cuda, atol=1e-5, rtol=0, exact_device=False)\n self.assertEqual(conv_cpu.bias.grad.data, conv_cuda.bias.grad.data, atol=1e-5, rtol=0, exact_device=False)\n self.assertEqual(conv_cpu.weight.grad.data, conv_cuda.weight.grad.data, atol=1e-5, rtol=0, exact_device=False)\n\n @onlyCUDA\n def test_ConvTranspose3d_size_1_kernel(self, device):\n x_cpu = torch.randn(2, 3, 3, 5, 5)\n conv_cpu = torch.nn.ConvTranspose3d(3, 3, kernel_size=1)\n y_cpu = conv_cpu(x_cpu)\n y = torch.rand_like(y_cpu)\n y_cpu.backward(y)\n\n with cudnn.flags(enabled=False):\n conv_cuda = torch.nn.ConvTranspose3d(3, 3, kernel_size=1).to(device)\n conv_cuda.bias.data.copy_(conv_cpu.bias.data)\n conv_cuda.weight.data.copy_(conv_cpu.weight.data)\n y_cuda = conv_cuda(x_cpu.to(device))\n y_cuda.backward(y.to(device))\n\n self.assertEqual(y_cpu, y_cuda, atol=1e-5, rtol=0, exact_device=False)\n self.assertEqual(conv_cpu.bias.grad.data, conv_cuda.bias.grad.data, atol=1e-5, rtol=0, exact_device=False)\n self.assertEqual(conv_cpu.weight.grad.data, conv_cuda.weight.grad.data, atol=1e-5, rtol=0, exact_device=False)\n\n def _ordered_sequence(self, device, dtype):\n \"\"\"Create ordered list of random sequences\"\"\"\n seqs = [torch.empty(random.randint(1, 6), device=device, dtype=dtype)\n for _ in range(5)]\n seqs = [s.random_(-128, 128) for s in seqs]\n ordered = sorted(seqs, key=len, reverse=True)\n return ordered\n\n def _padded_sequence(self, device, dtype):\n \"\"\"Create Tensor of random padded sequences\"\"\"\n ordered = self._ordered_sequence(device, dtype)\n lengths = [len(i) for i in ordered]\n padded_tensor = rnn_utils.pad_sequence(ordered)\n return padded_tensor, lengths\n\n @onlyCUDA\n def test_device_mask(self, device):\n for enforce_sorted in [True, False]:\n padded, lengths = self._padded_sequence('cpu', torch.float)\n packed = rnn_utils.pack_padded_sequence(\n padded, lengths, enforce_sorted=enforce_sorted)\n self.assertFalse(packed.is_cuda)\n packed = packed.to(device)\n self.assertTrue(packed.is_cuda)\n unpacked, _ = rnn_utils.pad_packed_sequence(packed)\n self.assertTrue(unpacked.is_cuda)\n self.assertEqual(unpacked.dtype, torch.float)\n\n @onlyCUDA\n def test_overwrite_module_params_on_conversion_cpu_device(self, device):\n # Test that under the current default settings\n # (`torch.__future__.get_overwrite_module_params_on_conversion() == False`),\n # a view to a module's parameters is not pointing to the same storage as\n # its base variable after converting the module to a different device.\n m = nn.Linear(20, 10)\n mw = m.weight[:]\n m.to(device)\n with torch.no_grad():\n # Without using `torch.no_grad()`, this will leak CUDA memory.\n # (Issue is filed at https://github.com/pytorch/pytorch/issues/21875)\n mw[0][0] = 5\n self.assertTrue(mw[0][0].device.type == \"cpu\")\n self.assertTrue(mw._base[0][0].device.type == \"cuda\")\n\n try:\n torch.__future__.set_overwrite_module_params_on_conversion(True)\n\n # Test that if `torch.__future__.get_overwrite_module_params_on_conversion() == True`,\n # a view to a module's parameters is still pointing to the same storage as\n # its base variable after converting the module to a different device.\n m = nn.Linear(20, 10)\n mw = m.weight[:]\n m.to(device)\n with torch.no_grad():\n mw[0][0] = 5\n self.assertTrue(mw[0][0] == mw._base[0][0])\n\n # Test that if `torch.__future__.get_overwrite_module_params_on_conversion() == True`,\n # `cpu_module.to(\"cuda\")` doesn't preserve previous references to\n # `cpu_module`'s parameters or gradients.\n m = nn.Linear(20, 10)\n m.weight.grad = torch.randn(10, 20)\n weight_ref = m.weight\n weight_grad_ref = m.weight.grad\n m.to(device)\n self.assertNotEqual(weight_ref.device, m.weight.device)\n self.assertNotEqual(weight_grad_ref.device, m.weight.grad.device)\n finally:\n torch.__future__.set_overwrite_module_params_on_conversion(False)\n\n @onlyCUDA\n @dtypes(*((torch.float, torch.double, torch.bfloat16, torch.half)\n if TEST_WITH_ROCM else (torch.float, torch.double, torch.half)))\n def test_embedding_max_norm_device(self, device, dtype):\n embedding = nn.Embedding(22, 5, max_norm=1.0).to(device, dtype=dtype)\n # nn.Embedding only takes LongTensor as input\n input = torch.tensor([2, 8, 8, 6], device=device, dtype=torch.long)\n output = embedding(input)\n self.assertEqual(output[1], output[2])\n self.assertTrue(output.data.norm(p=2, dim=1).le(1).all())\n\n @onlyCUDA\n @dtypes(torch.half, torch.float)\n def test_softmax(self, device, dtype):\n input = torch.rand(32, 100, device=device, dtype=dtype, requires_grad=True)\n inputf = input.to(torch.float).detach().requires_grad_(True)\n out = F.softmax(input, dim=-1, dtype=torch.float)\n outf = F.softmax(inputf, dim=-1)\n # should be bitwise equal\n self.assertEqual(out, outf, atol=0, rtol=0)\n gO = torch.empty_like(outf).uniform_()\n out.backward(gO)\n outf.backward(gO)\n # should be bitwise equal\n self.assertEqual(input.grad, inputf.grad.to(dtype), atol=0, rtol=0)\n\n @onlyCUDA\n def test_pool3d_size_one_feature_dim(self, device):\n # Tests crazy strides for feature dim of size 1\n x = torch.randn(7, 1, 5, 3, 2, device=device)\n strange_strides = [30, 1234, 6, 2, 1]\n y = x.as_strided(x.size(), strange_strides)\n x = x.cpu().as_strided(x.size(), strange_strides)\n\n to_test = {\n 'max_pool3d': lambda t: F.max_pool3d(t, (5, 1, 1), stride=(5, 1, 1)),\n 'avg_pool3d': lambda t: F.avg_pool3d(t, (5, 1, 1), stride=(5, 1, 1)),\n }\n\n for test, fn in to_test.items():\n # Should not crash\n out_y = fn(y)\n out_x = fn(x)\n self.assertEqual(out_y, out_x.to(device), msg=test)\n\n @onlyCUDA\n @largeTensorTest('6GB')\n def test_pool3d_large_size_int64(self, device):\n # See https://github.com/pytorch/pytorch/issues/52822\n x = torch.randn(70, 32, 100, 100, 100, dtype=torch.half, device=device)\n y = torch.nn.functional.max_pool3d(x, 5)\n torch.cuda.synchronize()\n\n ref_x = x.cpu().float() # max_pool3d_cpu is not implemented for half\n ref_y = torch.nn.functional.max_pool3d(ref_x, 5)\n\n self.assertEqual(y, ref_y, exact_dtype=False)\n\n @onlyCUDA\n def test_AvgPool3d_backward_after_cat_dim1_device(self, device):\n # x has to have batch_size 1 to test contiguous checks\n x = torch.randn(1, 3, 4, 4, 4, device=device, requires_grad=True)\n y = F.avg_pool3d(x, kernel_size=3, padding=1, stride=2)\n\n grad = torch.randn(y.size(), device=device)\n # increase the stride in dimension 0. the tensor is still contiguous because size[0] is 1\n stride = list(grad.stride())\n stride[0] = stride[0] * 2\n grad.set_(grad.storage(), 0, grad.size(), stride)\n assert grad.is_contiguous()\n\n y.backward(grad)\n\n def test_pooling_size_empty(self, device):\n t = torch.rand([1, 2, 3, 4], device=device)\n self.assertRaises(RuntimeError, lambda: F.adaptive_avg_pool1d(t, []))\n self.assertRaises(RuntimeError, lambda: F.adaptive_avg_pool2d(t, []))\n self.assertRaises(RuntimeError, lambda: F.adaptive_avg_pool3d(t, []))\n self.assertRaises(RuntimeError, lambda: F.adaptive_max_pool1d(t, []))\n self.assertRaises(RuntimeError, lambda: F.adaptive_max_pool2d(t, []))\n self.assertRaises(RuntimeError, lambda: F.adaptive_max_pool3d(t, []))\n\n @dtypes(*itertools.product((torch.int, torch.long), (torch.int, torch.long)))\n def test_embedding_bag_empty_input(self, device, dtypes):\n m = 4\n n = 3\n x = torch.tensor([], device=device, dtype=dtypes[0])\n for sparse in [True, False]:\n Embed = torch.nn.EmbeddingBag(m, n, sparse=sparse)\n Embed.to(device)\n\n output = Embed(input=x, offsets=torch.tensor([0], device=device, dtype=dtypes[1]))\n self.assertEqual(output, torch.zeros_like(output))\n\n output = Embed(input=x, offsets=torch.tensor([0, 0], device=device, dtype=dtypes[1]))\n self.assertEqual(output, torch.zeros_like(output))\n\n @skipCUDAIf(True, \"no out-of-bounds check on CUDA for perf.\")\n @dtypes(*itertools.product((torch.float, torch.double), (torch.int, torch.long)))\n @parametrize_test(\"padding_idx\", [None, 0])\n @parametrize_test(\"mode\", [\"sum\", \"mean\", \"max\"])\n def test_embedding_bag_out_of_bounds_idx(self, device, dtypes, padding_idx, mode):\n padding_idx = 0\n w_dtype, idx_dtype = dtypes\n # negative out-of-bound\n idx1 = torch.tensor([[-1, 1]], device=device, dtype=idx_dtype)\n # positive out-of-bound\n idx2 = torch.tensor([[11, 8]], device=device, dtype=idx_dtype)\n weight = torch.randn(10, 2, device=device, dtype=w_dtype)\n if mode == 'sum':\n # Only `sum` supports per_sample_weight\n per_sample_weights = (None, torch.randn_like(idx1, device=device, dtype=w_dtype))\n else:\n per_sample_weights = (None,)\n\n for p_s_weights, idx in itertools.product(per_sample_weights, (idx1, idx2)):\n msg = \"Expected idx >= 0 && idx < num_embeddings\"\n with self.assertRaisesRegex(RuntimeError, msg):\n torch.nn.functional.embedding_bag(idx, weight,\n per_sample_weights=p_s_weights, padding_idx=padding_idx,\n mode=mode)\n\n @dtypes(*itertools.product((torch.int, torch.long), (torch.int, torch.long)))\n def test_EmbeddingBag_per_sample_weights_failures(self, device, dtypes):\n # Failure 1: mismatched embeddings / per_sample_weights dtype\n es = nn.EmbeddingBag(5, 2, mode='sum').to(dtype=torch.float, device=device)\n input = torch.tensor([3, 1, 1, 1, 4, 0], dtype=dtypes[0], device=device)\n offsets = torch.tensor([0, 0, 3, 3, 6], dtype=dtypes[1], device=device)\n per_sample_weights = torch.randn_like(input, dtype=torch.double, device=device)\n if device == 'cpu':\n with self.assertRaisesRegex(RuntimeError, 'have the same type as'):\n es(input, offsets, per_sample_weights)\n else:\n with self.assertRaisesRegex(RuntimeError, 'expected scalar type'):\n es(input, offsets, per_sample_weights)\n\n # Failure 2.1: input/per_sample_weights have different sizes (1d input)\n input = torch.tensor([3, 1, 1, 1, 4, 0], dtype=dtypes[0], device=device)\n offsets = torch.tensor([0, 0, 3, 3, 6], dtype=dtypes[1], device=device)\n per_sample_weights = torch.randn(5, dtype=torch.float, device=device)\n with self.assertRaisesRegex(ValueError, 'same shape as the input'):\n es(input, offsets, per_sample_weights)\n\n # Failure 2.2: input/per_sample_weights have different sizes (2d input)\n input = torch.randint(5, (7, 3), dtype=dtypes[0], device=device)\n offsets = None\n per_sample_weights = torch.randn(7 * 3, dtype=torch.float, device=device)\n with self.assertRaisesRegex(ValueError, 'same shape as the input'):\n es(input, offsets, per_sample_weights)\n\n # Failure 3: Unsupported per_sample_weights and mode=('max', 'mean')\n for unsupported_mode in ('max', 'mean'):\n es = nn.EmbeddingBag(5, 2, mode=unsupported_mode).to(\n dtype=torch.float, device=device)\n input = torch.randint(5, (7, 3), dtype=dtypes[0], device=device)\n offsets = None\n per_sample_weights = torch.randn(7, 3, dtype=torch.float, device=device)\n with self.assertRaisesRegex(NotImplementedError,\n \"only supported for mode='sum'\"):\n es(input, offsets, per_sample_weights)\n\n def _embedding_bag_reference_impl(self, input, weight, offsets=None, mode='sum',\n per_sample_weights=None, include_last_offset=False):\n assert mode == 'sum' or per_sample_weights is None\n assert offsets is not None\n if per_sample_weights is None:\n per_sample_weights = torch.ones(input.size()).to(\n dtype=weight.dtype, device=weight.device\n )\n assert input.numel() == per_sample_weights.numel()\n\n bags = []\n long_input = input.to(torch.long)\n embeddings = weight.index_select(0, long_input) * per_sample_weights.unsqueeze(1)\n if include_last_offset:\n for index in range(len(offsets) - 1):\n offset = offsets[index]\n next_offset = offsets[index + 1]\n length = next_offset - offset\n if length == 0:\n bags.append(\n torch.tensor([0] * weight.size(1)).to(\n dtype=embeddings.dtype, device=embeddings.device\n )\n )\n else:\n if mode == 'sum':\n bags.append(embeddings.narrow(0, offset, length).sum(0))\n elif mode == 'mean':\n bags.append(embeddings.narrow(0, offset, length).sum(0).div(length))\n else:\n assert mode == 'max'\n bags.append(embeddings.narrow(0, offset, length).max(0)[0])\n else:\n for index, offset in enumerate(offsets):\n if index + 1 < len(offsets):\n next_offset = offsets[index + 1]\n else:\n next_offset = len(long_input)\n length = next_offset - offset\n if length == 0:\n bags.append(\n torch.tensor([0] * weight.size(1)).to(\n dtype=embeddings.dtype, device=embeddings.device\n )\n )\n else:\n if mode == 'sum':\n bags.append(embeddings.narrow(0, offset, length).sum(0))\n elif mode == 'mean':\n bags.append(embeddings.narrow(0, offset, length).sum(0).div(length))\n else:\n assert mode == 'max'\n bags.append(embeddings.narrow(0, offset, length).max(0)[0])\n return torch.stack(bags)\n\n @skipMeta\n @dtypes(*itertools.product((torch.int, torch.long), (torch.int, torch.long), (torch.half, torch.float, torch.double)))\n def test_EmbeddingBag_empty_per_sample_weights_and_offsets(self, device, dtypes):\n # Test empty input and per sample weight, and backward pass. There was a CUDA\n # invalid configuration bug (more context in #46572)\n def test_per_sample_weights(mode, trainable_scale):\n es = nn.EmbeddingBag(5, 2, mode=mode).to(dtype=dtypes[2], device=device)\n es.weight.data.copy_(\n torch.arange(1, 11, device=device).view_as(es.weight).to(dtypes[2]))\n input = torch.tensor([], device=device, dtype=dtypes[0])\n offsets = torch.tensor([0, 0, 0, 0, 0], device=device, dtype=dtypes[1])\n per_sample_weights = torch.randn_like(input, dtype=dtypes[2]) \\\n .requires_grad_(trainable_scale)\n ref_per_sample_weights = \\\n per_sample_weights.detach().requires_grad_(trainable_scale)\n reference_weights = es.weight.detach().requires_grad_()\n\n expected = self._embedding_bag_reference_impl(\n input, reference_weights, offsets, mode, ref_per_sample_weights)\n result = es(input, offsets, per_sample_weights)\n self.assertEqual(result, expected, atol=dtype2prec_DONTUSE[dtypes[2]], rtol=0)\n\n grad = torch.randn_like(expected)\n result.backward(grad)\n # the reference impl doesn't have grad fn for empty input; but the grad should\n # simply be a zero tensor\n ref_weights_grad = torch.zeros_like(es.weight)\n self.assertEqual(es.weight.grad, ref_weights_grad,\n atol=dtype2prec_DONTUSE[dtypes[2]], rtol=0)\n if trainable_scale:\n ref_per_sample_weights_grad = torch.empty_like(per_sample_weights)\n self.assertEqual(per_sample_weights.grad, ref_per_sample_weights_grad,\n atol=dtype2prec_DONTUSE[dtypes[2]], rtol=0)\n\n modes = ('sum',)\n trainable_scale = (True, False)\n for mode, trainable in itertools.product(modes, trainable_scale):\n test_per_sample_weights(mode, trainable)\n\n @skipMeta\n @dtypes(*itertools.product((torch.int, torch.long), (torch.int, torch.long), (torch.float, torch.double, torch.half)))\n def test_EmbeddingBag_per_sample_weights_and_offsets(self, device, dtypes):\n def test_per_sample_weights(mode, trainable_scale):\n es = nn.EmbeddingBag(5, 2, mode=mode).to(dtype=dtypes[2], device=device)\n es.weight.data.copy_(\n torch.arange(1, 11, device=device).view_as(es.weight).to(dtypes[2]))\n input = torch.tensor([3, 1, 1, 1, 4, 0], device=device, dtype=dtypes[0])\n offsets = torch.tensor([0, 0, 3, 3, 6], device=device, dtype=dtypes[1])\n per_sample_weights = torch.randn_like(input, dtype=dtypes[2]) \\\n .requires_grad_(trainable_scale)\n ref_per_sample_weights = \\\n per_sample_weights.detach().requires_grad_(trainable_scale)\n reference_weights = es.weight.detach().requires_grad_()\n\n expected = self._embedding_bag_reference_impl(\n input, reference_weights, offsets, mode, ref_per_sample_weights)\n result = es(input, offsets, per_sample_weights)\n self.assertEqual(result, expected, atol=dtype2prec_DONTUSE[dtypes[2]], rtol=0)\n\n grad = torch.randn_like(expected).to(dtype=dtypes[2], device=device)\n result.backward(grad)\n expected.backward(grad)\n self.assertEqual(es.weight.grad, reference_weights.grad,\n atol=dtype2prec_DONTUSE[dtypes[2]], rtol=0)\n if trainable_scale:\n self.assertEqual(per_sample_weights.grad, ref_per_sample_weights.grad,\n atol=dtype2prec_DONTUSE[dtypes[2]], rtol=0)\n\n modes = ('sum',)\n trainable_scale = (True, False)\n for mode, trainable in itertools.product(modes, trainable_scale):\n test_per_sample_weights(mode, trainable)\n\n @skipMeta\n @dtypes(*itertools.product((torch.int, torch.long), (torch.int, torch.long), (torch.float, torch.double, torch.half)))\n def test_EmbeddingBag_per_sample_weights_and_new_offsets(self, device, dtypes):\n def test_per_sample_weights_new_offsets(mode, trainable_scale, include_last_offset, has_weight=True):\n es = nn.EmbeddingBag(5, 2, mode=mode, include_last_offset=include_last_offset).to(dtype=dtypes[2], device=device)\n es.weight.data.copy_(\n torch.arange(1, 11, device=device).view_as(es.weight).to(dtypes[2]))\n input = torch.tensor([3, 1, 1, 1, 4, 0], device=device, dtype=dtypes[0])\n offsets = torch.tensor([0, 0, 3, 3, 6], device=device, dtype=dtypes[1])\n\n if include_last_offset:\n offsets = torch.cat((offsets, torch.tensor([input.size(0)], device=device, dtype=dtypes[1])), 0)\n\n if has_weight:\n per_sample_weights = torch.randn_like(input, device=device, dtype=dtypes[2]) \\\n .requires_grad_(trainable_scale)\n ref_per_sample_weights = \\\n per_sample_weights.detach().requires_grad_(trainable_scale)\n else:\n per_sample_weights = None\n ref_per_sample_weights = None\n\n reference_weights = es.weight.detach().requires_grad_()\n\n expected = self._embedding_bag_reference_impl(\n input, reference_weights, offsets, mode, ref_per_sample_weights, include_last_offset)\n result = es(input, offsets, per_sample_weights)\n self.assertEqual(result, expected, atol=dtype2prec_DONTUSE[dtypes[2]], rtol=0)\n\n grad = torch.randn_like(expected)\n result.backward(grad)\n expected.backward(grad)\n self.assertEqual(es.weight.grad, reference_weights.grad,\n atol=dtype2prec_DONTUSE[dtypes[2]], rtol=0)\n if has_weight and trainable_scale:\n self.assertEqual(per_sample_weights.grad, ref_per_sample_weights.grad,\n atol=dtype2prec_DONTUSE[dtypes[2]], rtol=0)\n\n trainable_scale = (True, False)\n include_last_offset = (True, False)\n modes = (('sum', False), ('sum', True), ('max', False), ('mean', False))\n for (mode, has_weight), trainable, include_last_offset in itertools.product(\n modes, trainable_scale, include_last_offset\n ):\n test_per_sample_weights_new_offsets(\n mode, trainable, include_last_offset, has_weight\n )\n\n def _test_EmbeddingBag_vs_Embedding(self, N, D, B, L, max_norm=None,\n mode='mean',\n device='cpu',\n wdtype=torch.float,\n dtype=torch.long,\n test_per_sample_weights=False,\n trainable_per_sample_weights=False,\n sparse=False,\n test_backward=True,\n backward_prec=None):\n es = nn.EmbeddingBag(N, D, mode=mode, sparse=sparse, max_norm=max_norm).to(device, wdtype)\n e = nn.Embedding(N, D, max_norm=max_norm).to(device, wdtype)\n e.weight.data.copy_(es.weight)\n input = torch.randint(N, (B, L), device=device, dtype=dtype)\n offsets = torch.arange(0, B, device=device, dtype=dtype).mul_(L)\n grad_output = torch.rand(B, D, device=device, dtype=wdtype)\n\n if test_per_sample_weights:\n # To prevent large gradients, weights should sum to 1 for each bag\n per_sample_weights = \\\n torch.randn(B, L, device=device, dtype=wdtype).softmax(dim=-1)\n per_sample_weights_reference = \\\n per_sample_weights.clone().requires_grad_(trainable_per_sample_weights)\n per_sample_weights.requires_grad_(trainable_per_sample_weights)\n output = es(input.view(-1), offsets, per_sample_weights.view(-1))\n else:\n output = es(input.view(-1), offsets)\n per_sample_weights = None\n per_sample_weights_reference = None\n\n if mode == 'sum':\n if test_per_sample_weights:\n ref_output = (e(input) * per_sample_weights_reference.unsqueeze(-1)).sum(1)\n else:\n ref_output = e(input).sum(1)\n elif mode == 'mean':\n assert not test_per_sample_weights\n ref_output = e(input).mean(1)\n elif mode == 'max':\n assert not test_per_sample_weights\n ref_output = e(input).max(1)[0]\n\n self.assertEqual(output, ref_output, atol=dtype2prec_DONTUSE[wdtype], rtol=0)\n\n if not test_backward:\n return\n\n output.backward(grad_output)\n ref_output.backward(grad_output)\n es_weight_grad = es.weight.grad.data\n if sparse:\n es_weight_grad = es.weight.grad.data.to_dense()\n\n # We have more floating point error here because we are dealing with larger numbers\n if backward_prec is None:\n needed_prec = dtype2prec_DONTUSE[wdtype] * 5\n else:\n needed_prec = backward_prec\n\n self.assertEqual(es_weight_grad, e.weight.grad, atol=needed_prec, rtol=0)\n\n if test_per_sample_weights and trainable_per_sample_weights:\n self.assertEqual(per_sample_weights.grad, per_sample_weights_reference.grad,\n atol=dtype2prec_DONTUSE[wdtype], rtol=0)\n\n @skipCUDAIf(True, \"Temporarily disabled. See t54369166\")\n @dtypesIfCUDA(*itertools.product((torch.int, torch.long), (torch.half, torch.float, torch.double)))\n @dtypes(*itertools.product((torch.int, torch.long), (torch.float, torch.double)))\n def test_EmbeddingBag_per_sample_weights_and_no_offsets(self, device, dtypes):\n def run_tests(mode, sparse, trainable_per_sample_weights):\n kwargs = dict(test_per_sample_weights=True, device=device,\n mode=mode, wdtype=dtypes[1], dtype=dtypes[0], sparse=sparse,\n trainable_per_sample_weights=trainable_per_sample_weights)\n\n # Simple case\n self._test_EmbeddingBag_vs_Embedding(2, 3, 5, 7, **kwargs)\n\n # B * L > 1000\n self._test_EmbeddingBag_vs_Embedding(2, 5, 53, 23, **kwargs)\n\n # Large num_embedding\n self._test_EmbeddingBag_vs_Embedding(101, 5, 3, 7, **kwargs)\n\n # Large embedding_dim\n self._test_EmbeddingBag_vs_Embedding(2, 101, 3, 7, **kwargs)\n\n modes = ('sum',)\n sparsity = (True, False)\n trainable_scale = (True, False)\n for mode, sparse, trainable_per_sample_weights in \\\n itertools.product(modes, sparsity, trainable_scale):\n run_tests(mode, sparse, trainable_per_sample_weights)\n\n # Test CUDA Dense on half precision\n if device == 'cuda':\n modes = ('sum',)\n sparsity = (False,)\n trainable_scale = (True, False)\n for mode, sparse, trainable_per_sample_weights in \\\n itertools.product(modes, sparsity, trainable_scale):\n run_tests(mode, sparse, trainable_per_sample_weights)\n\n def _test_EmbeddingBag(\n self,\n device,\n mode,\n sparse,\n wdtype=torch.double,\n dtype=torch.long,\n odtype=torch.long,\n test_backward=True,\n ):\n # check a known test example\n es = nn.EmbeddingBag(5, 2, mode=mode, sparse=sparse).to(device, wdtype)\n es.weight.data.copy_(torch.arange(1, 11, device=device).view_as(es.weight).to(wdtype))\n input = torch.tensor([3, 1, 1, 1, 4, 0], device=device, dtype=dtype)\n offsets = torch.tensor([0, 0, 3, 3, 6], device=device, dtype=odtype)\n\n grad_output = torch.tensor(\n [1, 2,\n 3, 4], device=device, dtype=wdtype).view(2, 2)\n grad_output_with_empty = torch.tensor(\n [99, 99,\n 1, 2,\n 99, 99,\n 3, 4,\n 99, 99], device=device, dtype=wdtype).view(5, 2)\n\n if mode == \"sum\" or mode == \"mean\":\n denominator = 1 if mode == \"sum\" else 3\n expected_output = torch.tensor(\n [[13, 16],\n [13, 16]], device=device, dtype=wdtype) / denominator\n\n expected_output_with_empty = torch.tensor(\n [[0, 0],\n [13, 16],\n [0, 0],\n [13, 16],\n [0, 0]], device=device, dtype=wdtype) / denominator\n\n expected_grad_weight = torch.tensor(\n [[3, 4],\n [5, 8],\n [0, 0],\n [1, 2],\n [3, 4]], device=device, dtype=wdtype) / denominator\n elif mode == \"max\":\n expected_output = torch.tensor(\n [[7, 8],\n [9, 10]], device=device, dtype=wdtype)\n\n expected_output_with_empty = torch.tensor(\n [[0, 0],\n [7, 8],\n [0, 0],\n [9, 10],\n [0, 0]], device=device, dtype=wdtype)\n\n expected_grad_weight = torch.tensor(\n [[0, 0],\n [0, 0],\n [0, 0],\n [1, 2],\n [3, 4]], device=device, dtype=wdtype)\n output = es(input, offsets)\n output.backward(grad_output_with_empty)\n\n es_weight_grad = es.weight.grad.data\n if sparse:\n es_weight_grad = es.weight.grad.to_dense()\n self.assertEqual(output, expected_output_with_empty)\n self.assertEqual(es_weight_grad, expected_grad_weight, atol=dtype2prec_DONTUSE[wdtype], rtol=0)\n\n # check same example except as 2D (2 x 3)\n input = input.view(2, -1)\n es.zero_grad()\n output = es(input)\n output.backward(grad_output)\n\n es_weight_grad = es.weight.grad\n if sparse:\n es_weight_grad = es.weight.grad.to_dense()\n self.assertEqual(output, expected_output)\n self.assertEqual(es_weight_grad, expected_grad_weight, atol=dtype2prec_DONTUSE[wdtype], rtol=0)\n\n # test all empty bags\n es.zero_grad()\n inputs = torch.tensor([], dtype=dtype, device=device)\n offsets = torch.tensor([0, 0, 0, 0], dtype=odtype, device=device)\n es(inputs, offsets).sum().backward()\n dense_grad = es.weight.grad\n if dense_grad.is_sparse:\n dense_grad = dense_grad.to_dense()\n self.assertEqual(dense_grad, torch.zeros_like(es.weight))\n\n # now compare EmbeddingBag vs Embedding + Sum/Mean, for constant bag length\n N, D, B, L = random.randint(1, 100), random.randint(1, 100), random.randint(1, 50), random.randint(1, 50)\n kwargs = dict(mode=mode, sparse=sparse, device=device, wdtype=wdtype, dtype=dtype, test_backward=test_backward)\n self._test_EmbeddingBag_vs_Embedding(N, D, B, L, **kwargs)\n for max_norm in (None, 3):\n for p in itertools.product([1, 2], repeat=4):\n self._test_EmbeddingBag_vs_Embedding(*p, max_norm=max_norm, **kwargs)\n\n # check that giving illegal input combos raises error\n es = nn.EmbeddingBag(10, 20, mode=mode, sparse=sparse)\n input = torch.ones(3, 4, dtype=dtype)\n offset = torch.arange(0, 3, dtype=odtype)\n self.assertRaises(ValueError, lambda: es(input, offset))\n self.assertRaises(ValueError, lambda: es(input.view(-1)))\n offset[0] = 1\n if self.device_type == \"cpu\":\n self.assertRaises(RuntimeError, lambda: es(input.view(-1), offset))\n offset[0] = 0\n offset[-1] = 100\n self.assertRaises(RuntimeError, lambda: es(input.view(-1), offset))\n\n @skipMeta\n @dtypes(*itertools.product((torch.int, torch.long), (torch.int, torch.long), (torch.float, torch.double, torch.half)))\n def test_embedding_bag_device(self, device, dtypes):\n self._test_EmbeddingBag(device, 'sum', False, wdtype=dtypes[2], dtype=dtypes[0], odtype=dtypes[1])\n self._test_EmbeddingBag(device, 'mean', False, wdtype=dtypes[2], dtype=dtypes[0], odtype=dtypes[1])\n self._test_EmbeddingBag(device, 'max', False, wdtype=dtypes[2], dtype=dtypes[0], odtype=dtypes[1])\n\n test_backward = False\n if self.device_type == 'cuda':\n # see 'todo' in test_embedding_bag.\n test_backward = dtypes[2] is not torch.float16\n elif self.device_type == 'cpu':\n # TODO: figure out why precision on sparse embeddings isn't the\n # same as for dense.\n test_backward = dtypes[2] is not torch.float and dtypes[2] is not torch.float16\n\n self._test_EmbeddingBag(\n device,\n 'sum',\n True,\n wdtype=dtypes[2],\n dtype=dtypes[0],\n odtype=dtypes[1],\n test_backward=test_backward,\n )\n self._test_EmbeddingBag(\n device,\n 'mean',\n True,\n wdtype=dtypes[2],\n dtype=dtypes[0],\n odtype=dtypes[1],\n test_backward=test_backward,\n )\n\n @skipMeta\n @dtypes(*itertools.product((torch.int, torch.long), (torch.int, torch.long), (torch.float, torch.double, torch.half)))\n def test_embedding_bag_non_contiguous_weight(self, device, dtypes):\n weight_tensor = torch.randn(3, 4, dtype=dtypes[2], device=device)\n\n weight_tensor_non_contig = weight_tensor[:, :3] # This is non-contiguous strided.\n weight_tensor_contig = weight_tensor_non_contig.clone().contiguous() # Contig-strided.\n\n index = torch.tensor([0, 1, 2], dtype=dtypes[0], device=device)\n offsets = torch.tensor([0, 2], dtype=dtypes[1], device=device)\n for mode in ['sum', 'mean', 'max']:\n output_non_contig = F.embedding_bag(\n input=index,\n weight=weight_tensor_non_contig,\n offsets=offsets,\n mode=mode,\n )\n output_contig = F.embedding_bag(\n input=index,\n weight=weight_tensor_contig,\n offsets=offsets,\n mode=mode,\n )\n self.assertEqual(output_non_contig, output_contig)\n\n @onlyCUDA\n @dtypes(*itertools.product((torch.int, torch.long), (torch.int, torch.long)))\n def test_embedding_bag_bfloat16(self, device, dtypes):\n self._test_EmbeddingBag(device, 'sum', True, wdtype=torch.bfloat16, dtype=dtypes[0], odtype=dtypes[1], test_backward=True)\n self._test_EmbeddingBag(device, 'mean', True, wdtype=torch.bfloat16, dtype=dtypes[0], odtype=dtypes[1], test_backward=True)\n\n @onlyNativeDeviceTypes # currently fails on XLA\n @dtypes(*itertools.product((torch.int, torch.long), (torch.int, torch.long)))\n def test_embedding_bag_half(self, device, dtypes):\n self._test_EmbeddingBag(device, 'sum', True, wdtype=torch.float16, dtype=dtypes[0], odtype=dtypes[1], test_backward=True)\n\n @onlyCUDA\n @dtypes(torch.half, torch.float, torch.double)\n def test_multihead_attention_dtype(self, device, dtype):\n embed_dim = 128\n num_heads = 8\n sl = 10\n bs = 8\n model = nn.MultiheadAttention(embed_dim, num_heads).cuda().to(dtype)\n q = torch.randn(sl, bs, embed_dim, device=device, dtype=dtype)\n k = torch.randn(sl, bs, embed_dim, device=device, dtype=dtype)\n v = torch.randn(sl, bs, embed_dim, device=device, dtype=dtype)\n out = model(q, k, v)\n self.assertEqual(q.size(), out[0].size())\n self.assertEqual(dtype, out[0].dtype)\n\n @onlyCUDA\n @dtypes(torch.half, torch.float, torch.double)\n def test_multihead_attention_dtype_batch_first(self, device, dtype):\n embed_dim = 128\n num_heads = 8\n sl = 10\n bs = 8\n # With batch_first=True, we have the possibility of hitting\n # the native fast path if we call .eval() and enable inference\n # mode. Test both paths.\n for training in (True, False):\n model = nn.MultiheadAttention(embed_dim, num_heads, batch_first=True).cuda().to(dtype)\n if not training:\n model = model.eval()\n cm = torch.no_grad()\n else:\n cm = contextlib.nullcontext()\n with cm:\n q = torch.randn(bs, sl, embed_dim, device=device, dtype=dtype)\n k = torch.randn(bs, sl, embed_dim, device=device, dtype=dtype)\n v = torch.randn(bs, sl, embed_dim, device=device, dtype=dtype)\n # fast path currently doesn't support weights\n out = model(q, k, v, need_weights=False)\n self.assertEqual(q.size(), out[0].size())\n self.assertEqual(dtype, out[0].dtype)\n\n @dtypesIfCUDA(*floating_types_and(torch.half, *[torch.bfloat16] if AMPERE_OR_ROCM else []))\n @dtypes(torch.float)\n def test_Conv2d_naive_groups(self, device, dtype):\n # Check that grouped convolutions matches two half convolutions\n m = nn.Conv2d(4, 4, kernel_size=3, groups=2).to(device, dtype)\n i = torch.randn(2, 4, 6, 6, device=device, dtype=dtype, requires_grad=True)\n output = m(i)\n grad_output = torch.randn(2, 4, 4, 4, device=device, dtype=dtype)\n output.backward(grad_output)\n\n m1 = nn.Conv2d(2, 2, kernel_size=3).to(device, dtype)\n m1.weight.data.copy_(m.weight.data[:2])\n m1.bias.data.copy_(m.bias.data[:2])\n i1 = i.data[:, :2].contiguous().requires_grad_(True)\n output1 = m1(i1)\n output1.backward(grad_output[:, :2].contiguous())\n\n m2 = nn.Conv2d(2, 2, kernel_size=3).to(device, dtype)\n m2.weight.data.copy_(m.weight.data[2:])\n m2.bias.data.copy_(m.bias.data[2:])\n i2 = i.data[:, 2:].contiguous().requires_grad_(True)\n output2 = m2(i2)\n output2.backward(grad_output[:, 2:].contiguous())\n\n self.assertEqual(output, torch.cat([output1, output2], 1))\n self.assertEqual(i.grad.data,\n torch.cat([i1.grad.data, i2.grad.data], 1),\n atol=dtype2prec_DONTUSE[dtype], rtol=0)\n self.assertEqual(m.bias.grad.data,\n torch.cat([m1.bias.grad.data, m2.bias.grad.data], 0),\n atol=dtype2prec_DONTUSE[dtype], rtol=0)\n self.assertEqual(m.weight.grad.data,\n torch.cat([m1.weight.grad.data, m2.weight.grad.data], 0),\n atol=dtype2prec_DONTUSE[dtype], rtol=0)\n\n @dtypes(torch.double, torch.cdouble)\n def test_Conv2d_backward_depthwise(self, device, dtype):\n x = torch.randn(2, 2, 4, 20, device=device, dtype=dtype, requires_grad=True)\n weight = torch.randn(2, 1, 3, 5, device=device, dtype=dtype, requires_grad=True)\n\n def conv2d_depthwise(x, weight):\n return torch.nn.functional.conv2d(\n x, weight, bias=None, stride=(1, 10), groups=2)\n\n for cudnn_enabled in [False, True]:\n with torch.backends.cudnn.flags(enabled=cudnn_enabled):\n torch.autograd.gradcheck(conv2d_depthwise, (x, weight))\n\n def _test_batchnorm_grad(self, device, dtype=torch.double):\n bs, n_feat, size_feat = 4, 5, 6\n input = torch.arange(bs * n_feat * size_feat, device=device,\n requires_grad=True, dtype=dtype).view(bs, n_feat, size_feat)\n weight = torch.arange(1, n_feat + 1, device=device, requires_grad=True, dtype=dtype)\n bias = torch.arange(n_feat, device=device, requires_grad=True, dtype=dtype)\n running_mean = 1 - torch.arange(n_feat, device=device, dtype=dtype)\n running_var = 2 * torch.arange(n_feat, device=device, dtype=dtype)\n for training in [False, True]:\n _assertGradAndGradgradChecks(self, F.batch_norm, (input, running_mean, running_var, weight, bias,\n training, 0.1, 0.0001))\n\n def test_batchnorm_grad(self, device):\n self._test_batchnorm_grad(device)\n\n if self.device_type == 'cuda' and self.has_cudnn():\n with torch.backends.cudnn.flags(enabled=False):\n self._test_batchnorm_grad(device)\n\n @onlyCUDA\n def test_layernorm_half_precision(self):\n width = 128\n input = torch.rand(1, 5, width, device=\"cuda\", dtype=torch.half) * 0.1\n normalized_shape = (width,)\n weight = torch.ones(width, device=\"cuda\", dtype=torch.half)\n bias = torch.zeros(width, device=\"cuda\", dtype=torch.half)\n eps = 1e-5\n\n output_fp16 = torch.layer_norm(input, normalized_shape, weight, bias, eps)\n output_fp32 = torch.layer_norm(input.float(), normalized_shape, weight.float(), bias.float(), eps).half()\n self.assertEqual(output_fp16, output_fp32, atol=0, rtol=0)\n\n @onlyCUDA\n def test_layernorm_weight_bias(self):\n width = 128\n input = torch.rand(1, 5, width, device=\"cuda\", dtype=torch.float32) * 0.1\n normalized_shape = (width,)\n data = torch.randn(width, device=\"cuda\", dtype=torch.float32)\n weight = torch.ones(width, device=\"cuda\", dtype=torch.float32)\n bias = torch.zeros(width, device=\"cuda\", dtype=torch.float32)\n eps = 1e-5\n\n out_none_weight = torch.layer_norm(input, normalized_shape, None, data, eps)\n out_one_weight = torch.layer_norm(input, normalized_shape, weight, data, eps)\n self.assertEqual(out_none_weight, out_one_weight)\n\n out_none_bias = torch.layer_norm(input, normalized_shape, data, None, eps)\n out_zero_bias = torch.layer_norm(input, normalized_shape, data, bias, eps)\n self.assertEqual(out_none_bias, out_zero_bias)\n\n def test_hardsigmoid_grad(self, device):\n inputs = (torch.randn(4, 16, 16, device=device) - 0.5) * 10\n inputs.requires_grad = True\n self.assertTrue(gradcheck(F.hardsigmoid, (inputs,)))\n\n # currently fails on XLA\n @onlyNativeDeviceTypes\n def test_hardswish_grad(self, device):\n inputs = (torch.randn(4, 16, 16, device=device) - 0.5) * 10\n inputs.requires_grad = True\n self.assertTrue(gradcheck(F.hardswish, (inputs,)))\n\n\n def _test_batchnorm_eval(self, ndim, device, dtype, module_dtype=None):\n module_dtype = module_dtype or dtype\n module = nn.BatchNorm1d(3).to(device, module_dtype)\n module.eval()\n\n data = torch.rand([3] * ndim, device=device, dtype=dtype, requires_grad=True)\n grad = torch.rand([3] * ndim, device=device, dtype=dtype)\n\n # 1st pass\n res1 = module(data)\n res1.backward(grad)\n grad1 = data.grad.clone()\n\n # 2nd pass\n if data.grad is not None:\n data.grad.data.zero_()\n\n res2 = module(data)\n res2.backward(grad)\n grad2 = data.grad.clone()\n self.assertEqual(res1, res2)\n self.assertEqual(grad1, grad2)\n\n # track_running_stats=False\n module = nn.BatchNorm1d(3, track_running_stats=False).to(device, module_dtype)\n\n data = torch.rand(4, 3, device=device, dtype=dtype, requires_grad=True)\n grad = torch.rand(4, 3, device=device, dtype=dtype)\n\n # 1st pass\n res1 = module(data)\n res1.backward(grad)\n grad1 = data.grad.clone()\n\n # set eval\n module.eval()\n\n # 2nd pass\n if data.grad is not None:\n data.grad.data.zero_()\n\n res2 = module(data)\n res2.backward(grad)\n grad2 = data.grad.clone()\n self.assertEqual(res1, res2)\n self.assertEqual(grad1, grad2)\n\n @dtypes(torch.float)\n @dtypesIfCUDA(torch.float, torch.bfloat16)\n def test_batchnorm_eval(self, device, dtype):\n self._test_batchnorm_eval(2, device, dtype)\n self._test_batchnorm_eval(3, device, dtype)\n\n if self.device_type == 'cuda' and self.has_cudnn():\n with torch.backends.cudnn.flags(enabled=False):\n self._test_batchnorm_eval(2, device, dtype)\n self._test_batchnorm_eval(3, device, dtype)\n\n @onlyCUDA\n @dtypes(torch.bfloat16, torch.half)\n def test_batchnorm_eval_mixed(self, device, dtype):\n # Test bfloat16 input with float module\n self._test_batchnorm_eval(2, device, dtype, torch.float)\n self._test_batchnorm_eval(3, device, dtype, torch.float)\n\n if self.device_type == 'cuda' and self.has_cudnn():\n with torch.backends.cudnn.flags(enabled=False):\n self._test_batchnorm_eval(2, device, dtype, torch.float)\n self._test_batchnorm_eval(3, device, dtype, torch.float)\n\n def _test_batchnorm_affine(self, ndim, device, dtype, module_dtype=None):\n # Compare affine against no-op weights and bias\n module_dtype = module_dtype or dtype\n module = nn.BatchNorm1d(3, affine=False).to(device, module_dtype)\n module_affine = nn.BatchNorm1d(3, affine=True).to(device, module_dtype)\n with torch.no_grad():\n module_affine.weight.fill_(1.0)\n module_affine.bias.zero_()\n\n data = torch.rand([3] * ndim, device=device, dtype=dtype, requires_grad=True)\n grad = torch.ones_like(data, requires_grad=False)\n\n # With weights all ones and bias all zeros\n res1 = module_affine(data)\n res1.backward(grad)\n grad1 = data.grad.clone()\n data.grad.zero_()\n\n # Without any weights or bias\n res2 = module(data)\n res2.backward(grad)\n grad2 = data.grad\n\n self.assertEqual(res1, res2)\n self.assertEqual(grad1, grad2)\n\n @dtypes(torch.float)\n @dtypesIfCUDA(torch.float, torch.bfloat16)\n def test_batchnorm_affine(self, device, dtype):\n self._test_batchnorm_affine(2, device, dtype)\n self._test_batchnorm_affine(3, device, dtype)\n\n if self.device_type == 'cuda' and self.has_cudnn():\n with torch.backends.cudnn.flags(enabled=False):\n self._test_batchnorm_affine(2, device, dtype)\n self._test_batchnorm_affine(3, device, dtype)\n\n @onlyCUDA\n @dtypes(torch.bfloat16, torch.half)\n def test_batchnorm_affine_mixed(self, device, dtype):\n cudnn_enabled = [False]\n if self.device_type == 'cuda' and self.has_cudnn():\n # TODO: Test fails with cudnn, see gh-62034\n # cudnn_enabled = [False, True]\n pass\n\n # Test bfloat16 input with float module\n for enabled in cudnn_enabled:\n with torch.backends.cudnn.flags(enabled=enabled):\n self._test_batchnorm_affine(2, device, dtype, torch.float)\n self._test_batchnorm_affine(3, device, dtype, torch.float)\n\n def _test_batchnorm_simple_average(self, device, dtype, module_dtype=None):\n module_dtype = module_dtype or dtype\n module = nn.BatchNorm1d(3, momentum=None).to(dtype=module_dtype, device=device)\n zeros = torch.zeros(3, dtype=module_dtype, device=device)\n ones = torch.ones(3, dtype=module_dtype, device=device)\n self.assertEqual(module.running_mean, zeros)\n self.assertEqual(module.running_var, ones)\n\n data1 = torch.rand(4, 3, dtype=dtype, device=device)\n data2 = torch.rand(4, 3, dtype=dtype, device=device)\n\n # 1st pass\n res1 = module(data1)\n running_mean1 = module.running_mean.clone()\n running_var1 = module.running_var.clone()\n self.assertNotEqual(running_mean1, zeros)\n self.assertNotEqual(running_var1, ones)\n\n # reset stats\n module.reset_running_stats()\n self.assertEqual(module.running_mean, zeros)\n self.assertEqual(module.running_var, ones)\n\n # 2nd pass\n res2 = module(data2)\n running_mean2 = module.running_mean.clone()\n running_var2 = module.running_var.clone()\n self.assertNotEqual(running_mean2, zeros)\n self.assertNotEqual(running_var2, ones)\n\n # reset stats\n module.reset_running_stats()\n self.assertEqual(module.running_mean, zeros)\n self.assertEqual(module.running_var, ones)\n\n # 3rd (combined) pass\n res3 = module(data1)\n res4 = module(data2)\n self.assertEqual(res3, res1)\n self.assertEqual(res4, res2)\n self.assertEqual(module.running_mean, (running_mean1 + running_mean2) / 2)\n self.assertEqual(module.running_var, (running_var1 + running_var2) / 2)\n\n @dtypes(torch.float)\n @dtypesIfCUDA(torch.float, torch.bfloat16)\n def test_batchnorm_simple_average(self, device, dtype):\n self._test_batchnorm_simple_average(device, dtype)\n\n if self.device_type == 'cuda' and self.has_cudnn():\n with torch.backends.cudnn.flags(enabled=False):\n self._test_batchnorm_simple_average(device, dtype)\n\n @onlyCUDA\n @dtypes(torch.bfloat16, torch.half)\n def test_batchnorm_simple_average_mixed(self, device, dtype):\n self._test_batchnorm_simple_average(device, dtype, torch.float)\n\n if self.device_type == 'cuda' and self.has_cudnn():\n with torch.backends.cudnn.flags(enabled=False):\n self._test_batchnorm_simple_average(device, dtype, torch.float)\n\n def _test_maxpool_indices(self, num_dim, adaptive=False, device=\"cpu\", dtype=torch.float):\n def expected_indices(dim):\n if dim == 1:\n return torch.tensor([1, 3], dtype=torch.double).repeat(2, 2, 1)\n if dim == 2:\n return torch.tensor([[5, 7], [13, 15]], dtype=torch.double).repeat(2, 2, 1, 1)\n\n def expected_grad(dim):\n if dim == 1:\n return torch.tensor([0, 1, 0, 1], dtype=torch.double).repeat(2, 2, 1)\n grad = expected_grad(dim - 1)\n zero = torch.zeros(grad.size())\n return torch.stack((zero, grad, zero, grad), 2)\n\n def expected_output(dim):\n if dim == 1:\n return torch.arange(2, 17, 2).view(2, 2, 2)\n if dim == 2:\n col = torch.arange(6, 63, 8)\n return torch.stack([col, col + 2], 1).view(2, 2, 2, 2)\n\n if adaptive:\n cls_name = 'AdaptiveMaxPool{}d'.format(num_dim)\n else:\n cls_name = 'MaxPool{}d'.format(num_dim)\n module_cls = getattr(nn, cls_name)\n module = module_cls(2, return_indices=True).to(device, dtype=dtype)\n numel = 4 ** (num_dim + 1)\n input = torch.arange(1, numel + 1).view(2, 2, *repeat(4, num_dim)).to(device, dtype=dtype)\n input_var = input.clone().detach().requires_grad_()\n\n # Check forward\n output, indices = module(input_var)\n if num_dim != 3:\n expected_indices = expected_indices(num_dim)\n expected_output = expected_output(num_dim)\n self.assertEqual(indices.dim(), input.dim())\n # TODO(#38095): Replace assertEqualIgnoreType. See issue #38095\n self.assertEqualIgnoreType(indices.data.squeeze(), expected_indices)\n # TODO(#38095): Replace assertEqualIgnoreType. See issue #38095\n self.assertEqualIgnoreType(output.data.squeeze(), expected_output)\n self.assertTrue(output.requires_grad)\n self.assertFalse(indices.requires_grad)\n\n # Make sure backward works\n grad_output = torch.ones(output.size(), device=device, dtype=dtype)\n output.backward(grad_output, retain_graph=True)\n expected_grad = expected_grad(num_dim)\n # TODO(#38095): Replace assertEqualIgnoreType. See issue #38095\n self.assertEqualIgnoreType(input_var.grad.data, expected_grad.view_as(input))\n\n # Make sure backward after changing indices will result in an error\n indices.add_(1)\n self.assertRaises(RuntimeError, lambda: output.backward(grad_output))\n\n # Make sure -Infinity is handled correctly\n t = torch.tensor([[[float(\"-inf\")]]])\n m = nn.MaxPool1d(kernel_size=1, return_indices=True)\n output, indices = m(t)\n self.assertEqual(output[0, 0, 0], float(\"-inf\"))\n self.assertEqual(indices[0, 0, 0], 0)\n\n t = torch.tensor([[[float(\"-inf\")]]])\n m = nn.MaxPool2d(kernel_size=1, return_indices=True)\n output, indices = m(t)\n self.assertEqual(output[0, 0, 0], float(\"-inf\"))\n self.assertEqual(indices[0, 0, 0], 0)\n\n t = torch.tensor([[[[float(\"-inf\")]]]])\n m = nn.MaxPool3d(kernel_size=1, return_indices=True)\n output, indices = m(t)\n self.assertEqual(output[0, 0, 0, 0], float(\"-inf\"))\n self.assertEqual(indices[0, 0, 0, 0], 0)\n\n @dtypesIfCUDA(*floating_types_and(torch.half, torch.bfloat16))\n @dtypes(torch.float)\n def test_MaxPool1d_indices(self, device, dtype):\n self._test_maxpool_indices(1, device=device, dtype=dtype)\n\n @dtypesIfCUDA(*floating_types_and(torch.half, torch.bfloat16))\n @dtypes(torch.float)\n def test_MaxPool2d_indices(self, device, dtype):\n self._test_maxpool_indices(2, device=device, dtype=dtype)\n\n @dtypesIfCUDA(*floating_types_and(torch.half, torch.bfloat16))\n @dtypes(torch.float)\n def test_MaxPool3d_indices(self, device, dtype):\n self._test_maxpool_indices(3, device=device, dtype=dtype)\n\n @dtypesIfCUDA(*floating_types_and(torch.half, torch.bfloat16))\n @dtypes(torch.float)\n def test_AdaptiveMaxPool1d_indices(self, device, dtype):\n self._test_maxpool_indices(1, adaptive=True, device=device, dtype=dtype)\n\n @dtypesIfCUDA(*floating_types_and(torch.half, torch.bfloat16))\n @dtypes(torch.float)\n def test_AdaptiveMaxPool2d_indices(self, device, dtype):\n self._test_maxpool_indices(2, adaptive=True, device=device, dtype=dtype)\n\n @dtypesIfCUDA(*floating_types_and(torch.half, torch.bfloat16))\n @dtypes(torch.float)\n def test_AdaptiveMaxPool3d_indices(self, device, dtype):\n self._test_maxpool_indices(3, adaptive=True, device=device, dtype=dtype)\n\n @dtypesIfCUDA(*floating_types_and(torch.half, torch.bfloat16))\n @dtypes(torch.float)\n def test_maxpool_indices_no_batch_dim(self, device, dtype):\n \"\"\"Check that indices with no batch dim is consistent with a single batch.\"\"\"\n max_pool_cases = [\n (nn.MaxPool1d(3, return_indices=True),\n torch.randn(3, 5, device=device, dtype=dtype)),\n (nn.MaxPool2d(3, return_indices=True),\n torch.randn(3, 5, 6, device=device, dtype=dtype)),\n (nn.MaxPool3d(3, return_indices=True),\n torch.randn(3, 5, 6, 7, device=device, dtype=dtype)),\n (nn.AdaptiveMaxPool1d(3, return_indices=True),\n torch.randn(3, 5, device=device, dtype=dtype)),\n (nn.AdaptiveMaxPool2d(3, return_indices=True),\n torch.randn(3, 5, 6, device=device, dtype=dtype)),\n (nn.AdaptiveMaxPool3d(3, return_indices=True),\n torch.randn(3, 5, 6, 7, device=device, dtype=dtype))]\n\n for module, input in max_pool_cases:\n _, indices_no_batch = module(input)\n _, indicies_single_batch = module(input.unsqueeze(0))\n self.assertEqual(indices_no_batch, indicies_single_batch.squeeze(0))\n\n\n @dtypesIfCUDA(torch.half, torch.float, torch.double)\n @dtypes(torch.float)\n @onlyNativeDeviceTypes # TODO: Fails on XLA\n def test_max_pool_nan_inf(self, device, dtype):\n for adaptive in ['', 'adaptive_']:\n for num_dim in [1, 2, 3]:\n fn_name = '{}max_pool{}d'.format(adaptive, num_dim)\n fn = getattr(F, fn_name)\n\n x = torch.full([1, 1] + num_dim * [3], nan, device=device, dtype=dtype, requires_grad=True)\n res = fn(x, 1 if adaptive else 3)\n res.backward(torch.randn_like(res))\n self.assertTrue(math.isnan(res.item()))\n x.requires_grad_(False)\n res = fn(x, 1 if adaptive else 3)\n self.assertTrue(math.isnan(res.item()))\n\n x2 = torch.full([1, 1] + num_dim * [3], -inf, device=device, dtype=dtype, requires_grad=True)\n res2 = fn(x2, 1 if adaptive else 3)\n res2.backward(torch.randn_like(res2))\n self.assertTrue(math.isinf(res2.item()))\n x2.requires_grad_(False)\n res2 = fn(x2, 1 if adaptive else 3)\n self.assertTrue(math.isinf(res2.item()))\n\n @onlyNativeDeviceTypes\n @dtypes(torch.float, torch.double)\n def test_grid_sample_nan_inf(self, device, dtype):\n input = torch.zeros([1, 1, 3, 3], device=device, dtype=dtype)\n grid = torch.tensor([[[[nan, 0], [0, inf]]]], device=device, dtype=dtype)\n for padding_mode in ('reflection', 'border', 'zeros'):\n sample = torch.nn.functional.grid_sample(input=input, grid=grid, mode='nearest',\n padding_mode=padding_mode, align_corners=False)\n self.assertEqual(sample, torch.zeros([1, 1, 1, 2], device=device, dtype=dtype))\n\n @expectedFailureMeta # RuntimeError: Unrecognized tensor type ID: Meta\n @onlyNativeDeviceTypes\n def test_fractional_max_pool2d(self, device):\n x = torch.randn(1, 2, 7, 7, requires_grad=True, device=device)\n samples = x.new(1, 2, 2).uniform_()\n\n def func(x):\n return F.fractional_max_pool2d(\n x, (2, 2), output_size=(3, 3), _random_samples=samples)\n\n self.assertEqual(func(x).shape, (1, 2, 3, 3))\n gradcheck(func, [x])\n gradgradcheck(func, [x])\n\n x = torch.randn(2, 7, 7, requires_grad=True, device=device)\n self.assertEqual(func(x).shape, (2, 3, 3))\n if self.device_type != 'cuda':\n # Reference: https://github.com/pytorch/pytorch/issues/52427\n # Raises -> RuntimeError: TensorAccessor expected 4 dims but tensor has 3\n # on CUDA in gradcheck\n gradcheck(func, [x])\n gradgradcheck(func, [x])\n\n for kernel_size in [(), (1,)]:\n with self.assertRaisesRegex(RuntimeError, \"kernel_size must either\"):\n # Incorrect kernel_size\n F.fractional_max_pool2d(x, kernel_size=kernel_size, output_size=(3, 3), _random_samples=samples)\n\n err_large_msg = \"too large relative to input \"\n err_out_size_msg = \"output_size must either\"\n for output_size, msg in [((9, 3), err_large_msg + \"height\"),\n ((3, 9), err_large_msg + \"width\"),\n ((3,), err_out_size_msg),\n ((), err_out_size_msg)]:\n with self.assertRaisesRegex(RuntimeError, msg):\n # Incorrect output_size\n F.fractional_max_pool2d(x, (2, 2), output_size=output_size, _random_samples=samples)\n\n @expectedFailureMeta # RuntimeError: Unrecognized tensor type ID: Meta\n @onlyNativeDeviceTypes\n def test_fractional_max_pool3d(self, device):\n x = torch.randn(1, 2, 7, 7, 7, requires_grad=True, device=device)\n samples = x.new(1, 2, 3).uniform_()\n\n def func(x):\n return F.fractional_max_pool3d(\n x, (2, 2, 2), output_size=(3, 3, 3), _random_samples=samples)\n\n self.assertEqual(func(x).shape, (1, 2, 3, 3, 3))\n gradcheck(func, [x])\n gradgradcheck(func, [x])\n\n x = torch.randn(2, 7, 7, 7, requires_grad=True, device=device)\n self.assertEqual(func(x).shape, (2, 3, 3, 3))\n gradcheck(func, [x])\n gradgradcheck(func, [x])\n\n for kernel_size in [(), (1,), (1, 1)]:\n with self.assertRaisesRegex(RuntimeError, \"kernel_size must either\"):\n # Incorrect kernel_size\n F.fractional_max_pool3d(x, kernel_size=kernel_size, output_size=(3, 3, 3), _random_samples=samples)\n\n err_large_msg = \"too large relative to input \"\n err_out_size_msg = \"output_size must either\"\n for output_size, msg in [((9, 3, 3), err_large_msg + \"time\"),\n ((3, 9, 3), err_large_msg + \"height\"),\n ((3, 3, 9), err_large_msg + \"width\"),\n ((3, 3), err_out_size_msg),\n ((3,), err_out_size_msg),\n ((), err_out_size_msg)]:\n with self.assertRaisesRegex(RuntimeError, msg):\n # Incorrect output_size\n F.fractional_max_pool3d(x, (2, 2, 2), output_size=output_size, _random_samples=samples)\n\n @dtypesIfCUDA(torch.half, torch.float, torch.double)\n @dtypes(torch.float)\n @onlyNativeDeviceTypes # TODO: Fails on XLA\n def test_fractional_max_pool_nan_inf(self, device, dtype):\n for num_dim in [2, 3]:\n fn_name = 'FractionalMaxPool{}d'.format(num_dim)\n fn = getattr(nn, fn_name)(kernel_size=2, output_size=1)\n x = torch.full([1, 1] + num_dim * [3], nan, device=device, dtype=dtype, requires_grad=True)\n res = fn(x)\n res.backward(torch.randn_like(res))\n self.assertTrue(math.isnan(res.item()))\n\n x2 = torch.full([1, 1] + num_dim * [3], -inf, device=device, dtype=dtype, requires_grad=True)\n res2 = fn(x2)\n res2.backward(torch.randn_like(res2))\n self.assertTrue(math.isinf(res2.item()))\n\n @onlyNativeDeviceTypes # TODO: RuntimeError message different on XLA\n def test_pooling_zero_stride(self, device):\n for op in ('max', 'avg'):\n for num_dim in [1, 2, 3]:\n fn_name = '{}_pool{}d'.format(op, num_dim)\n fn = getattr(F, fn_name)\n x = torch.ones([1, 2] + num_dim * [4], device=device, dtype=torch.float)\n self.assertRaisesRegex(RuntimeError, r\"stride should not be zero|stride must be greater than zero\",\n lambda: fn(x, kernel_size=2, stride=0))\n\n fn_module_name = '{}Pool{}d'.format(op.title(), num_dim)\n fn_module = getattr(nn, fn_module_name)(kernel_size=2, stride=0)\n self.assertRaisesRegex(RuntimeError, r\"stride should not be zero|stride must be greater than zero\",\n lambda: fn_module(x))\n\n @dtypesIfCUDA(*floating_types_and(torch.half, torch.bfloat16))\n @dtypes(torch.float)\n def test_pool_large_size(self, device, dtype):\n for op in ('max', 'avg'):\n for num_dim in [1, 2, 3]:\n fn_name = '{}_pool{}d'.format(op, num_dim)\n fn = getattr(F, fn_name)\n # 16777217 is the smallest integer not expressible in float32\n x = torch.ones([1, 1, 16777217] + (num_dim - 1) * [1],\n device=device, dtype=dtype)\n res = fn(x, 1, stride=1, padding=0)\n # check if the output shape was still computed correctly\n self.assertEqual(x.shape[2], res.shape[2])\n\n @dtypesIfCUDA(*floating_types_and(torch.half, torch.bfloat16))\n @dtypes(torch.float)\n def test_pool_invalid_size(self, device, dtype):\n for op in ('max', 'avg'):\n for num_dim in [1, 2, 3]:\n fn_name = '{}_pool{}d'.format(op, num_dim)\n if op == 'max':\n # New implementation without indices supports empty tensors\n # TODO(Heitor) change once with_indices code is updated\n fn_name += '_with_indices'\n fn = getattr(F, fn_name)\n # use a configuration that gives zero outputs only\n # when doing a correct floor division by the stride\n x = torch.ones([1, 1] + num_dim * [4],\n device=device, dtype=dtype)\n with self.assertRaisesRegex(RuntimeError, r\"too small|smaller than\"):\n try:\n res = fn(x, 3, stride=2, padding=0, dilation=2)\n except TypeError:\n # some implementations do not support dilation\n res = fn(x, 6, stride=2, padding=0)\n\n def test_CTCLoss_empty_target(self, device):\n target_lengths = [0, 0, 0]\n input_lengths = [50, 50, 50]\n targets = torch.randint(1, 15, (0,), dtype=torch.long, device=device)\n log_probs = torch.randn(50, 3, 15, dtype=torch.double, device=device).log_softmax(2)\n loss = torch.nn.functional.ctc_loss(log_probs, targets, input_lengths, target_lengths, reduction='none')\n self.assertTrue((loss >= 0).all().item())\n self.assertEqual(-log_probs.sum(0)[:, 0], loss)\n\n target_lengths = [0, 9, 0]\n input_lengths = [50, 50, 50]\n targets = torch.randint(1, 15, (9,), dtype=torch.long, device=device)\n log_probs = torch.randn(50, 3, 15, dtype=torch.double, device=device).log_softmax(2)\n loss = torch.nn.functional.ctc_loss(log_probs, targets, input_lengths, target_lengths, reduction='none')\n self.assertTrue((loss >= 0).all().item())\n self.assertEqual(-log_probs.sum(0)[[0, 2], 0], loss[[0, 2]])\n\n # Merge into OpInfo?\n @skipCUDAIf(True, \"\"\"Test is flaky on Linux and Windows, typical error message:\n https://github.com/pytorch/pytorch/issues/34870\"\"\")\n def test_ctc_loss(self, device):\n batch_size = 64\n num_labels = 101\n target_length = 15\n gradcheck_input_size = 10\n\n ZERO_NONE = 0\n ZERO_SOME = 1\n ZERO_ALL = 2\n\n # input_length, vary_lengths, zero_lengths\n tests = [(150, False, ZERO_NONE),\n (150, True, ZERO_NONE),\n (50, True, ZERO_SOME),\n (50, True, ZERO_ALL)]\n\n if 'cuda' in device:\n tests += [(50, False, ZERO_NONE),\n (50, True, ZERO_NONE),\n (150, True, ZERO_SOME),\n (150, True, ZERO_ALL)]\n\n for input_length, vary_lengths, zero_mode in tests:\n targets = torch.randint(1, num_labels, (batch_size, target_length),\n device=device, dtype=torch.long)\n x = torch.randn(gradcheck_input_size, dtype=torch.double, device=device, requires_grad=True)\n tile_factors = torch.randn(input_length * batch_size * num_labels // gradcheck_input_size + 1,\n device=device)\n input_lengths = [(torch.randint(input_length // 2, input_length + 1, ()).item()\n if vary_lengths or i == 0 else input_length) for i in range(batch_size)]\n if zero_mode == ZERO_ALL:\n target_lengths = [0 for _ in range(batch_size)]\n else:\n target_lengths = [(torch.randint(target_length // 2, target_length + 1, ()).item()\n if vary_lengths else target_length) for _ in range(batch_size)]\n if zero_mode == ZERO_SOME:\n idxes = torch.randint(0, batch_size, (10,))\n for i in idxes:\n target_lengths[i] = 0\n\n def ctc_after_softmax(x):\n x_full = ((x[:, None] * tile_factors[None, :]).view(-1)[:input_length * batch_size * num_labels]\n .view(input_length, batch_size, num_labels))\n log_probs = torch.log_softmax(x_full, 2)\n return torch.nn.functional.ctc_loss(log_probs, targets, input_lengths, target_lengths)\n\n gradcheck(ctc_after_softmax, [x])\n\n @onlyCUDA\n @skipCUDAIfRocm\n @skipCUDAIfCudnnVersionLessThan(7600)\n def test_ctc_loss_cudnn(self, device):\n batch_size = 16\n input_length = 30\n num_labels = 101\n target_length = 15\n targets = torch.randint(1, num_labels, (batch_size * target_length,),\n device='cuda', dtype=torch.long)\n log_probs = torch.log_softmax(torch.randn(input_length, batch_size, num_labels, device='cuda', dtype=torch.float), 2)\n log_probs.requires_grad_()\n\n input_lengths = batch_size * [input_length]\n target_lengths = batch_size * [target_length]\n grad_out = torch.randn(batch_size, device='cuda', dtype=torch.float)\n with torch.backends.cudnn.flags(enabled=False):\n loss_native = torch.nn.functional.ctc_loss(log_probs, targets, input_lengths, target_lengths, reduction='none')\n grad_native, = torch.autograd.grad(loss_native, log_probs, grad_out)\n loss_cudnn = torch.nn.functional.ctc_loss(log_probs, targets.to('cpu', torch.int32),\n input_lengths, target_lengths, reduction='none')\n self.assertTrue(\"Cudnn\" in str(loss_cudnn.grad_fn))\n grad_cudnn, = torch.autograd.grad(loss_cudnn, log_probs, grad_out)\n self.assertEqual(grad_cudnn, grad_native, atol=1e-4, rtol=0)\n\n def test_empty_dropout(self, device):\n x = torch.tensor([]).to(device)\n out = torch.nn.functional.dropout(x)\n self.assertEqual(out.size(), x.size())\n\n @dtypesIfCUDA(torch.half, torch.float, torch.double)\n @dtypes(torch.float)\n @tf32_on_and_off(0.005)\n def test_variable_sequence(self, device, dtype):\n def pad(var, length):\n if var.size(0) == length:\n return var\n return torch.cat([var, var.new_zeros(length - var.size(0), *var.size()[1:])])\n\n def maybe_index_tuple(maybe_tuple_of_tensors, index):\n if maybe_tuple_of_tensors is None:\n return None\n return tuple(maybe_tuple_of_tensors[j][:, index:index + 1, :].contiguous()\n for j in range(2))\n\n def check_lengths(lengths, enforce_sorted, use_default_hiddens, proj_size):\n input_size = 3\n hidden_size = 4\n num_layers = 2\n bidirectional = True\n\n max_length = max(lengths)\n x_leaf = torch.randn(max_length, len(lengths), input_size, device=device,\n dtype=dtype, requires_grad=True)\n num_directions = 2 if bidirectional else 1\n lstm = nn.LSTM(input_size, hidden_size, bidirectional=bidirectional,\n num_layers=num_layers, proj_size=proj_size).to(device, dtype)\n lstm2 = deepcopy(lstm).to(device, dtype)\n x = x_leaf\n\n hidden0 = None\n if not use_default_hiddens:\n real_hidden_size = hidden_size if proj_size == 0 else proj_size\n hidden0 = (torch.randn(num_directions * num_layers, len(lengths), real_hidden_size,\n device=device, dtype=dtype),\n torch.randn(num_directions * num_layers, len(lengths), hidden_size,\n device=device, dtype=dtype))\n\n # Compute sequences separately\n seq_outs = []\n seq_hiddens = []\n for i, l in enumerate(lengths):\n hidden_i = maybe_index_tuple(hidden0, i)\n out, hid = lstm2(x[:l, i:i + 1], hidden_i)\n out_pad = pad(out, max_length)\n seq_outs.append(out_pad)\n seq_hiddens.append(hid)\n seq_out = torch.cat(seq_outs, 1)\n seq_hidden = tuple(torch.cat(hids, 1) for hids in zip(*seq_hiddens))\n\n # Use packed format\n packed = rnn_utils.pack_padded_sequence(x, lengths, enforce_sorted=enforce_sorted)\n packed_out, packed_hidden = lstm(packed, hidden0)\n unpacked, unpacked_len = rnn_utils.pad_packed_sequence(packed_out)\n\n # Check forward\n prec = dtype2prec_DONTUSE[dtype]\n self.assertEqual(packed_hidden, seq_hidden, atol=prec, rtol=0)\n self.assertEqual(unpacked, seq_out, atol=prec, rtol=0)\n self.assertEqual(unpacked_len, lengths, atol=prec, rtol=0)\n\n # Check backward\n seq_out.sum().backward()\n grad_x = x_leaf.grad.data.clone()\n x_leaf.grad.data.zero_()\n unpacked.sum().backward()\n\n self.assertEqual(x_leaf.grad, grad_x, atol=dtype2prec_DONTUSE[dtype], rtol=0)\n for p1, p2 in zip(lstm.parameters(), lstm2.parameters()):\n prec = dtype2prec_DONTUSE[dtype]\n if dtype == torch.float16:\n prec = 4e-2\n self.assertEqual(p1.grad, p2.grad, atol=prec, rtol=0)\n\n tests = [\n # enforce_sorted, lengths\n [True, [5]],\n [False, [5]],\n [True, [10, 10, 6, 2, 2, 1, 1]],\n [False, [10, 10, 6, 2, 2, 1, 1]],\n [False, [2, 1, 3, 2, 10, 5, 3]],\n ]\n\n for enforce_sorted, seq_lens, in tests:\n for use_default_hiddens in (True, False):\n for proj_size in [0, 2]:\n check_lengths(seq_lens, enforce_sorted, use_default_hiddens, proj_size)\n\n def _test_batchnorm_update_stats(self, device, dtype=torch.float):\n module = nn.BatchNorm1d(3).to(device, dtype)\n\n data = torch.rand(4, 3, device=device, dtype=dtype)\n\n # training pass\n old_running_mean = module.running_mean.clone()\n old_running_var = module.running_var.clone()\n old_num_batches_tracked = module.num_batches_tracked.clone()\n module(data)\n self.assertNotEqual(old_running_mean, module.running_mean)\n self.assertNotEqual(old_running_var, module.running_var)\n self.assertEqual(old_num_batches_tracked + 1, module.num_batches_tracked)\n\n # eval pass\n module.eval()\n old_running_mean = module.running_mean.clone()\n old_running_var = module.running_var.clone()\n old_num_batches_tracked = module.num_batches_tracked.clone()\n module(data)\n self.assertEqual(old_running_mean, module.running_mean)\n self.assertEqual(old_running_var, module.running_var)\n self.assertEqual(old_num_batches_tracked, module.num_batches_tracked)\n\n def test_batchnorm_update_stats(self, device):\n self._test_batchnorm_update_stats(device)\n\n if self.device_type == 'cuda' and self.has_cudnn():\n with torch.backends.cudnn.flags(enabled=False):\n self._test_batchnorm_update_stats(device)\n\n def test_multi_margin_loss_errors(self, device):\n self.assertRaises(RuntimeError,\n lambda: nn.functional.multi_margin_loss(torch.randn(5, device=device),\n torch.zeros(3, device=device)))\n\n @onlyCPU\n def test_activations_bfloat16_cpu(self, device):\n def test_bfloat16(fn, device, inp_dims, prec):\n # bfloat16 compute\n input = torch.randn(inp_dims, dtype=torch.bfloat16, device=device, requires_grad=True)\n out = fn(input)\n grad_input = torch.randn_like(out, dtype=torch.bfloat16, device=device)\n out.backward(grad_input)\n\n # fp32 compute\n input2 = input.detach().clone().float().requires_grad_(True)\n out2 = fn(input2)\n grad_input2 = grad_input.detach().clone().float()\n out2.backward(grad_input2)\n\n self.assertEqual(out.dtype, torch.bfloat16)\n self.assertEqual(input.grad.dtype, torch.bfloat16)\n self.assertEqual(out, out2, atol=prec, rtol=0, exact_dtype=False)\n self.assertEqual(input.grad.data, input2.grad.data, atol=prec, rtol=0, exact_dtype=False)\n\n shapes = [[1, 3, 1, 6], [1, 3, 1, 128], [1, 3, 256, 256]]\n for shape in shapes:\n test_bfloat16(torch.nn.LogSigmoid(), device, shape, prec=2e-2)\n test_bfloat16(torch.nn.Hardsigmoid(), device, shape, prec=1e-2)\n test_bfloat16(torch.nn.Hardshrink(), device, shape, prec=1e-2)\n test_bfloat16(torch.nn.Softshrink(), device, shape, prec=1e-2)\n test_bfloat16(torch.nn.Hardswish(), device, shape, prec=2e-2)\n test_bfloat16(torch.nn.Softplus(), device, shape, prec=1e-2)\n\n def _test_bfloat16_ops(self, op, device, inp_dims=(), prec=1e-2, scale_factor=None):\n # fp32 compute\n input1 = torch.randn(inp_dims, dtype=torch.float32, device=device, requires_grad=True)\n if scale_factor is not None:\n input1 = (torch.rand(inp_dims, dtype=torch.bfloat16, device=device) * scale_factor).float().requires_grad_()\n out1 = op(input1)\n grad_input1 = torch.randn_like(out1, device=device)\n out1.backward(grad_input1)\n\n # bfloat16 compute\n op_bfp16 = op.bfloat16()\n input2 = input1.detach().bfloat16().requires_grad_()\n grad_input2 = grad_input1.bfloat16()\n out2 = op_bfp16(input2)\n out2.backward(grad_input2)\n\n self.assertEqual(out1, out2, atol=prec, rtol=prec, exact_dtype=False)\n self.assertEqual(input1.grad.data, input2.grad.data, atol=prec, rtol=prec, exact_dtype=False)\n\n @onlyCUDA\n def test_activations_bfloat16(self, device):\n self._test_bfloat16_ops(torch.nn.ReLU(), device, inp_dims=(5), prec=1e-2)\n self._test_bfloat16_ops(torch.nn.Threshold(0.1, 20), device, inp_dims=(5), prec=1e-2)\n self._test_bfloat16_ops(torch.nn.ELU(), device, inp_dims=(5), prec=1e-2)\n self._test_bfloat16_ops(torch.nn.Softplus(), device, inp_dims=(5), prec=1e-2)\n self._test_bfloat16_ops(torch.nn.Hardshrink(), device, inp_dims=(5), prec=1e-2)\n self._test_bfloat16_ops(torch.nn.Softshrink(), device, inp_dims=(5), prec=1e-2)\n self._test_bfloat16_ops(torch.nn.LeakyReLU(), device, inp_dims=(5), prec=1e-2)\n\n @onlyCUDA\n def test_pooling_bfloat16(self, device):\n self._test_bfloat16_ops(torch.nn.AvgPool1d(3, stride=2), device, inp_dims=(8, 4, 16), prec=0.05)\n self._test_bfloat16_ops(torch.nn.AvgPool2d(3, stride=2), device, inp_dims=(8, 4, 16, 16), prec=0.05)\n self._test_bfloat16_ops(torch.nn.AvgPool3d(3, stride=2), device, inp_dims=(8, 4, 16, 16, 16), prec=0.05)\n self._test_bfloat16_ops(torch.nn.AdaptiveAvgPool1d(3), device, inp_dims=(8, 4, 16), prec=0.05)\n self._test_bfloat16_ops(torch.nn.AdaptiveAvgPool2d((3, 5)), device, inp_dims=(8, 4, 16, 16), prec=0.05)\n self._test_bfloat16_ops(torch.nn.AdaptiveAvgPool3d((3, 5, 7)), device, inp_dims=(8, 4, 16, 16, 16), prec=0.05)\n\n @onlyNativeDeviceTypes\n def test_softmax_bfloat16(self, device):\n for dim in [0, 1, 2, 3]:\n self._test_bfloat16_ops(torch.nn.Softmax(dim=dim), device, inp_dims=(16, 33, 15, 16), prec=1e-2)\n # test softmax with large input value which casues exp() to overflow\n self._test_bfloat16_ops(torch.nn.Softmax(dim=dim), device, inp_dims=(16, 33, 15, 16), prec=0.05, scale_factor=1000.0)\n\n @onlyCUDA\n @skipCUDAIfRocmVersionLessThan((4, 3))\n @skipCUDAIfNotMiopenSuggestNHWC\n @skipCUDAIfCudnnVersionLessThan(7603)\n @dtypes(torch.half, torch.float, torch.cfloat)\n def test_conv_cudnn_nhwc(self, device, dtype):\n def helper(n, c, h, w, out_channels, kernel_size, groups):\n input = torch.randint(-3, 3, (n, c, h, w), dtype=dtype, device=device)\\\n .to(memory_format=torch.channels_last)\n input.requires_grad_()\n conv = nn.Conv2d(c, out_channels, kernel_size, groups=groups)\\\n .to(device='cuda', dtype=dtype, memory_format=torch.channels_last)\n for p in conv.parameters():\n p.data = torch.randint_like(p, -3, 3)\n\n # use FP64 channels-first conv as reference\n ref_input = input.detach().clone().contiguous().double().requires_grad_()\n ref_conv = nn.Conv2d(c, out_channels, kernel_size, groups=groups)\n # load_state_dict will restore the stride & memory_layout on ref_conv.weight.\n ref_conv.load_state_dict(conv.state_dict())\n ref_conv = ref_conv.to(device='cuda', dtype=torch.double, memory_format=torch.contiguous_format)\n\n out = conv(input)\n ref_out = ref_conv(ref_input)\n\n grad = torch.randint_like(out, -3, 3)\n ref_grad = grad.detach().clone().double().contiguous()\n\n out.backward(grad)\n ref_out.backward(ref_grad)\n\n self.assertTrue(out.is_contiguous(memory_format=torch.channels_last))\n self.assertTrue(input.grad.is_contiguous(memory_format=torch.channels_last))\n self.assertTrue(conv.weight.grad.is_contiguous(memory_format=torch.channels_last))\n\n self.assertTrue(ref_out.is_contiguous())\n self.assertTrue(ref_input.grad.is_contiguous())\n self.assertTrue(ref_conv.weight.grad.is_contiguous())\n\n self.assertEqual(out, ref_out, exact_dtype=False)\n self.assertEqual(conv.weight.grad, ref_conv.weight.grad, exact_dtype=False)\n self.assertEqual(conv.bias.grad, ref_conv.bias.grad, exact_dtype=False)\n self.assertEqual(input.grad, ref_input.grad, exact_dtype=False)\n\n helper(2, 8, 4, 4, out_channels=4, kernel_size=3, groups=1)\n helper(2, 8, 4, 4, out_channels=8, kernel_size=3, groups=8)\n helper(1, 16, 56, 56, out_channels=16, kernel_size=3, groups=1)\n helper(1, 16, 56, 56, out_channels=16, kernel_size=3, groups=16)\n\n @onlyCUDA\n @skipCUDAIfRocm\n @skipCUDAIfCudnnVersionLessThan(8005)\n @dtypes(torch.half, torch.float)\n def test_conv_cudnn_ndhwc(self, device, dtype):\n def helper(n, c, d, h, w, out_channels, kernel_size, groups):\n input = torch.randint(-2, 2, (n, c, d, h, w), dtype=dtype, device=device)\\\n .to(memory_format=torch.channels_last_3d)\n input.requires_grad_()\n conv = nn.Conv3d(c, out_channels, kernel_size, groups=groups)\\\n .to(device='cuda', dtype=dtype, memory_format=torch.channels_last_3d)\n for p in conv.parameters():\n p.data = torch.randint_like(p, -2, 2)\n\n # use FP64 channels-first conv as reference\n ref_input = input.detach().clone().contiguous().double().requires_grad_()\n ref_conv = nn.Conv3d(c, out_channels, kernel_size, groups=groups)\n # load_state_dict will restore the stride & memory_layout on ref_conv.weight.\n ref_conv.load_state_dict(conv.state_dict())\n ref_conv = ref_conv.to(device='cuda', dtype=torch.double, memory_format=torch.contiguous_format)\n\n out = conv(input)\n ref_out = ref_conv(ref_input)\n\n grad = torch.randint_like(out, -2, 2)\n ref_grad = grad.detach().clone().double().contiguous()\n\n out.backward(grad)\n ref_out.backward(ref_grad)\n\n self.assertTrue(out.is_contiguous(memory_format=torch.channels_last_3d))\n self.assertTrue(input.grad.is_contiguous(memory_format=torch.channels_last_3d))\n self.assertTrue(conv.weight.grad.is_contiguous(memory_format=torch.channels_last_3d))\n\n self.assertTrue(ref_out.is_contiguous())\n self.assertTrue(ref_input.grad.is_contiguous())\n self.assertTrue(ref_conv.weight.grad.is_contiguous())\n\n self.assertEqual(out, ref_out, exact_dtype=False)\n self.assertEqual(conv.weight.grad, ref_conv.weight.grad, exact_dtype=False)\n self.assertEqual(conv.bias.grad, ref_conv.bias.grad, exact_dtype=False)\n self.assertEqual(input.grad, ref_input.grad, exact_dtype=False)\n\n helper(2, 8, 4, 4, 4, out_channels=4, kernel_size=3, groups=1)\n helper(2, 8, 4, 4, 4, out_channels=8, kernel_size=3, groups=8)\n helper(1, 16, 18, 18, 18, out_channels=16, kernel_size=3, groups=1)\n helper(1, 16, 18, 18, 18, out_channels=16, kernel_size=3, groups=16)\n\n def _run_conv(self, layer, device, inp, grad, ref_conv, ref_input, ref_out,\n input_format, weight_format, grad_format, output_format):\n conv = layer(inp.size(1), grad.size(1),\n ref_conv.weight.size(2)).float().to(device)\n # load_state_dict will restore the stride & memory_layout on ref_conv.weight.\n conv.load_state_dict(ref_conv.state_dict())\n weight_data = conv.weight.detach().clone().contiguous(memory_format=weight_format)\n conv.weight.data = weight_data.resize_(weight_data.size(), memory_format=weight_format)\n input = inp.clone().contiguous(memory_format=input_format)\n input.resize_(input.size(), memory_format=input_format)\n input = input.requires_grad_()\n grad = grad.contiguous(memory_format=grad_format)\n grad.resize_(grad.size(), memory_format=grad_format)\n out = conv(input)\n out.backward(grad)\n self.assertTrue(out.is_contiguous(memory_format=output_format))\n self.assertEqual(out, ref_out)\n self.assertEqual(conv.weight.grad, ref_conv.weight.grad)\n self.assertEqual(conv.bias.grad, ref_conv.bias.grad)\n self.assertEqual(input.grad, ref_input.grad)\n\n def _test_conv_cudnn_nhwc_nchw(self, layer, n, c, h, w, k, filter_size, device):\n data = torch.randint(1, 10, (n, c, h, w), dtype=torch.float32, device=device)\n ref_input = data.clone().contiguous().requires_grad_(True)\n ref_conv = layer(c, k, filter_size).float().to(device)\n ref_out = ref_conv(ref_input)\n grad = torch.randint(1, 10, ref_out.size(), dtype=torch.float32, device=\"cuda\")\n ref_out.backward(grad)\n\n for w_f in [torch.contiguous_format, torch.channels_last]:\n for g_f in [torch.contiguous_format, torch.channels_last]:\n for input_format in [torch.contiguous_format, torch.channels_last]:\n output_format = torch.contiguous_format\n # Older versions of CudNN have Channels Last support disabled\n if torch.backends.cudnn.version() >= 7603:\n if input_format == torch.channels_last:\n output_format = torch.channels_last\n # This is because we have N111 weight that cannot handle\n # the ambiguous memory_format\n if w_f == torch.channels_last:\n if layer == nn.Conv2d and filter_size * c != 1:\n output_format = torch.channels_last\n if layer == nn.ConvTranspose2d and filter_size * k != 1:\n output_format = torch.channels_last\n self._run_conv(layer, device, data, grad, ref_conv, ref_input,\n ref_out, input_format, w_f, g_f, output_format)\n\n @onlyCUDA\n @skipCUDAIfRocmVersionLessThan((4, 3))\n @skipCUDAIfNotMiopenSuggestNHWC\n @skipCUDAIfCudnnVersionLessThan(7603)\n @tf32_on_and_off(0.05)\n def test_conv_cudnn_mismatch_memory_format(self, device):\n configs = [\n [4, 2, 8, 8, 4, 2],\n [4, 1, 8, 8, 4, 2],\n [1, 1, 8, 8, 4, 2],\n [4, 2, 2, 8, 4, 1],\n [4, 2, 1, 8, 4, 1],\n [4, 2, 8, 8, 4, 1],\n [4, 1, 8, 8, 4, 1],\n ]\n for n, c, h, w, k, filter_size in configs:\n self._test_conv_cudnn_nhwc_nchw(nn.Conv2d, n, c, h, w, k, filter_size, device)\n self._test_conv_cudnn_nhwc_nchw(nn.ConvTranspose2d, n, c, h, w, k, filter_size, device)\n\n # torch.half is erroring out on Windows with CUDA 10.1 + cuDNN 7.6.4\n # returning CUDNN_STATUS_BAD_PARAM\n # Disabling that specific test for now [see issue # 33918]\n @onlyCUDA\n @skipCUDAIfNoCudnn\n @dtypes(torch.float, torch.double)\n def test_conv_cudnn_nhwc_support(self, device, dtype):\n input = torch.randn((1, 16, 1, 1), dtype=dtype, device=\"cuda\", requires_grad=True)\n weight = torch.randn((8, 16, 3, 3), dtype=dtype, device=\"cuda\", requires_grad=True)\n weight = weight.to(memory_format=torch.channels_last)\n o = torch.conv2d(input, weight, None, (2, 1), (1, 1), (1, 1), 1)\n self.assertTrue(o.is_contiguous(memory_format=torch.channels_last))\n o.sum().backward()\n\n # Test that faster algorithms used for inference produce the same results\n # Validates depthwise3x3 bug reported in https://github.com/pytorch/pytorch/issues/60176\n @onlyCPU\n @dtypes(torch.float)\n def test_conv2d_no_grad(self, device, dtype):\n for batch in [1, 2, 3]:\n for groups in [1, 2, 4]:\n input = torch.rand(batch, groups, 8, 8, dtype=dtype, device=device)\n m = nn.Conv2d(groups, 8, kernel_size=(3, 3), groups=groups, dtype=dtype, device=device)\n with torch.no_grad():\n output_ng = m(input)\n output = m(input)\n self.assertEqual(output, output_ng, rtol=1e-2, atol=1e-5)\n\n @onlyCUDA\n @skipCUDAIfRocm\n @skipCUDAIfNoCudnn\n @dtypes(torch.float, torch.float16)\n @precisionOverride({torch.half: 0.002, torch.float: 1e-4})\n def test_cudnn_convolution_relu(self, device, dtype):\n for batch, groups, image_size, kernel_size, memory_format in \\\n product((1, 2, 3),\n (1, 2, 4),\n ((1, 1), (8, 8)),\n ((1, 1), (3, 3)),\n (torch.channels_last, torch.contiguous_format)):\n if image_size[0] < kernel_size[0]:\n continue\n inp = torch.rand(batch, groups, *image_size, dtype=dtype, device=device)\n w = torch.randn(8, groups, *kernel_size, dtype=dtype, device=device)\n conv2d_out = torch.conv2d(inp, w, None, (1, 1), (0, 0), (1, 1), 1)\n inp = inp.to(memory_format=memory_format)\n w = w.to(memory_format=memory_format)\n cudnn_out = torch.cudnn_convolution_relu(inp, w, None, (1, 1), (0, 0), (1, 1), 1)\n self.assertTrue(cudnn_out.is_contiguous(memory_format=memory_format))\n self.assertEqual(conv2d_out.relu(), cudnn_out)\n\n @onlyCUDA\n @skipCUDAIfRocm\n @skipCUDAIfNoCudnn\n @dtypes(torch.float, torch.float16)\n @precisionOverride({torch.half: 0.002, torch.float: 1e-4})\n def test_cudnn_convolution_add_relu(self, device, dtype):\n for batch, groups, image_size, kernel_size, memory_format in \\\n product((1, 2, 3),\n (1, 2, 4),\n ((1, 1), (8, 8)),\n ((1, 1), (3, 3)),\n (torch.channels_last, torch.contiguous_format)):\n if image_size[0] < kernel_size[0]:\n continue\n inp = torch.rand(batch, groups, *image_size, dtype=dtype, device=device)\n w = torch.randn(8, groups, *kernel_size, dtype=dtype, device=device)\n conv2d_out = torch.conv2d(inp, w, None, (1, 1), (0, 0), (1, 1), 1)\n alpha = 2.0\n z = torch.randn_like(conv2d_out)\n\n inp = inp.to(memory_format=memory_format)\n w = w.to(memory_format=memory_format)\n z = z.to(memory_format=memory_format)\n cudnn_out = torch.cudnn_convolution_add_relu(inp, w, z, alpha, None, (1, 1), (0, 0), (1, 1), 1)\n\n self.assertTrue(cudnn_out.is_contiguous(memory_format=memory_format))\n self.assertEqual(F.relu(conv2d_out + alpha * z), cudnn_out)\n\n @onlyCUDA\n @skipCUDAIfRocm\n @skipCUDAIfCudnnVersionLessThan(7603)\n def test_convert_conv2d_weight_memory_format(self, device):\n input = torch.randint(1, 10, (2, 8, 4, 4), dtype=torch.float32, device=device)\n model = nn.Sequential(\n nn.Conv2d(8, 4, 3),\n nn.BatchNorm2d(4)).to(device).float()\n for memory_format in [torch.channels_last, torch.contiguous_format]:\n model = nn.utils.convert_conv2d_weight_memory_format(model, memory_format)\n out = model(input)\n self.assertTrue(out.is_contiguous(memory_format=memory_format))\n\n model = nn.Sequential(\n nn.ConvTranspose2d(8, 4, 3),\n nn.BatchNorm2d(4)).to(device).float()\n for memory_format in [torch.channels_last, torch.contiguous_format]:\n model = nn.utils.convert_conv2d_weight_memory_format(model, memory_format)\n out = model(input)\n self.assertTrue(out.is_contiguous(memory_format=memory_format))\n\n def test_conv_double_backward_strided_with_3D_input_and_weight(self, device):\n # Test that _convolution_double_backward() outputs the correct grad shapes\n # for 3D input / weight when stride > 1. This is an ad-hoc regression test for a\n # specific case that was uncovered during the convolution consolidation effort.\n # The test can be safely deleted if _convolution_double_backward() is removed.\n\n input = torch.randn(2, 3, 6, device=device)\n weight = torch.randn(3, 3, 3, device=device)\n bias = torch.randn(3, device=device)\n stride = (2,)\n padding = (1,)\n dilation = (1,)\n transposed = False\n output_padding = (0,)\n groups = 1\n output = torch.ops.aten.convolution(input, weight, bias, stride, padding, dilation, transposed,\n output_padding, groups)\n\n ggI = torch.randn(input.shape, device=device)\n ggW = torch.randn(weight.shape, device=device)\n ggB = torch.randn(bias.shape, device=device)\n gO = torch.randn(output.shape, device=device)\n output_mask = [True, True, True]\n grad_grad_output, grad_input, grad_weight = torch.ops.aten._convolution_double_backward(\n ggI, ggW, ggB, gO, weight, input, stride, padding, dilation, transposed,\n output_padding, groups, output_mask)\n\n # Make sure the correct shapes are computed.\n self.assertEqual(grad_grad_output.shape, gO.shape)\n self.assertEqual(grad_input.shape, input.shape)\n self.assertEqual(grad_weight.shape, weight.shape)\n\n def test_nll_loss_mismatched_batch(self, device):\n x = torch.randn((10, 3), requires_grad=True, device=device)\n # t should have size (10,)\n t = torch.zeros((3,), dtype=torch.int64, device=device)\n with self.assertRaisesRegex(ValueError, 'Expected.*batch_size'):\n F.nll_loss(x, t)\n\n def test_nll_loss_out_of_bounds_ignore_index(self, device):\n x = torch.randn(6, 3, requires_grad=True, device=device)\n t = torch.tensor([0, 1, 255, 0, 1, 2], dtype=torch.int64, device=device)\n for reduction in ['mean', 'none']:\n F.nll_loss(x, t, ignore_index=255, reduction=reduction).sum().backward()\n\n def test_nll_loss_invalid_target_dim(self, device):\n x = torch.randn((10, 3), device=device)\n t = torch.zeros((10, 2), dtype=torch.int64, device=device)\n with self.assertRaisesRegex(RuntimeError, \"1D target tensor expected\"):\n F.nll_loss(x, t)\n\n def test_nll_loss_invalid_weights(self, device):\n x = torch.randn((10, 3), device=device)\n t = torch.empty(10, dtype=torch.int64, device=device).random_(0, 3)\n invalid_weights = [\n torch.randn(4, device=device),\n torch.randn(1, 3, device=device),\n ]\n msg = \"weight tensor should be defined either for all 3 classes or no classes\"\n for weight in invalid_weights:\n with self.assertRaisesRegex(RuntimeError, msg):\n F.nll_loss(x, t, weight=weight)\n\n def _nll_loss_helper(self, input_size, reduction, expected, device):\n input = torch.rand(input_size, requires_grad=True, device=device)\n num_channels = input_size[1]\n target_size = (input_size[0], ) + tuple(input_size[2:])\n target = torch.randint(num_channels, target_size, device=device)\n\n output = F.nll_loss(input, target, reduction=reduction)\n # TODO(#38095): Replace assertEqualIgnoreType. See issue #38095\n self.assertEqualIgnoreType(output, expected)\n\n output.sum().backward()\n self.assertEqual(input.grad.size(), input.size())\n\n def test_nll_loss_empty_tensor_reduction_none(self, device):\n self._nll_loss_helper([0, 3], \"none\", torch.empty([0], device=device), device)\n self._nll_loss_helper([0, 3, 5, 7], \"none\", torch.empty([0, 5, 7], device=device), device)\n self._nll_loss_helper([2, 3, 0, 7], \"none\", torch.empty([2, 0, 7], device=device), device)\n self._nll_loss_helper([2, 3, 5, 0], \"none\", torch.empty([2, 5, 0], device=device), device)\n self._nll_loss_helper([2, 3, 5, 7, 0], \"none\", torch.empty([2, 5, 7, 0], device=device), device)\n\n @unittest.skipIf(TEST_WITH_UBSAN, \"division-by-zero error with UBSAN\")\n def test_nll_loss_empty_tensor_reduction_mean(self, device):\n nan = torch.tensor(float('nan'), device=device)\n self._nll_loss_helper([0, 3], \"mean\", nan, device)\n self._nll_loss_helper([0, 3, 5, 7], \"mean\", nan, device)\n self._nll_loss_helper([2, 3, 0, 7], \"mean\", nan, device)\n self._nll_loss_helper([2, 3, 5, 0], \"mean\", nan, device)\n self._nll_loss_helper([2, 3, 5, 7, 0], \"mean\", nan, device)\n\n def test_nll_loss_empty_tensor_reduction_sum(self, device):\n zero = torch.tensor(0, device=device)\n self._nll_loss_helper([0, 3], \"sum\", zero, device)\n self._nll_loss_helper([0, 3, 5, 7], \"sum\", zero, device)\n self._nll_loss_helper([2, 3, 0, 7], \"sum\", zero, device)\n self._nll_loss_helper([2, 3, 5, 0], \"sum\", zero, device)\n self._nll_loss_helper([2, 3, 5, 7, 0], \"sum\", zero, device)\n\n @unittest.skipIf(TEST_WITH_UBSAN, \"division-by-zero error with UBSAN\")\n def test_nll_loss_total_weight_is_zero(self, device):\n\n def helper(input_size):\n input = torch.ones(input_size, requires_grad=True, device=device)\n num_channels = input_size[1]\n target_size = (input_size[0], ) + tuple(input_size[2:])\n target = torch.zeros(target_size, dtype=torch.long, device=device)\n weight = torch.zeros([num_channels], device=device)\n self.assertEqual(F.nll_loss(input, target, weight, reduction=\"sum\").item(), 0.)\n self.assertEqual(F.nll_loss(input, target, weight, reduction=\"mean\").item(), float(\"nan\"))\n self.assertEqual(F.nll_loss(input, target, weight, reduction=\"none\"), torch.zeros(target.shape, device=device))\n\n helper([2, 3])\n helper([2, 3, 5, 7])\n helper([2, 3, 5, 7, 9])\n\n @unittest.skipIf(TEST_WITH_UBSAN, \"division-by-zero error with UBSAN\")\n def test_nll_loss_all_ignored(self, device):\n\n def helper(input_size):\n input = torch.ones(input_size, device=device)\n num_channels = input_size[1]\n target_size = (input_size[0], ) + tuple(input_size[2:])\n target = torch.zeros(target_size, dtype=torch.long, device=device)\n self.assertEqual(F.nll_loss(input, target, ignore_index=0, reduction=\"sum\").item(), 0)\n self.assertEqual(F.nll_loss(input, target, ignore_index=0, reduction=\"mean\").item(), float(\"nan\"))\n self.assertEqual(F.nll_loss(input, target, ignore_index=0, reduction=\"none\"), torch.zeros(target.shape, device=device))\n\n helper([2, 3])\n helper([2, 3, 5, 7])\n helper([2, 3, 5, 7, 9])\n\n def test_nll_loss_byte_target_matches_long(self, device):\n N, C = 10, 4\n input = torch.randn(N, C, device=device, requires_grad=True)\n target = torch.empty(N, dtype=torch.long, device=device).random_(0, C)\n\n def compute_result_and_gradient(reduction, target_dtype):\n input_ = input.detach()\n input_.requires_grad_()\n\n prob = F.log_softmax(input_, dim=-1)\n loss = nn.NLLLoss(reduction=reduction)\n result = loss(prob, target.to(target_dtype))\n result.sum().backward()\n\n return result, input_.grad\n\n for reduction in [\"none\", \"mean\", \"sum\"]:\n result_long, grad_long = compute_result_and_gradient(reduction, torch.long)\n result_byte, grad_byte = compute_result_and_gradient(reduction, torch.uint8)\n self.assertEqual(result_long, result_byte)\n self.assertEqual(grad_long, grad_byte)\n\n def test_cross_entropy_loss_prob_target_all_reductions(self, device):\n # Test with k-dimensional loss.\n for k in range(5):\n N, C = 5, 4\n other_dims = [torch.randint(2, 5, size=(1,)).item() for _ in range(k)]\n input = torch.randn(N, C, *other_dims, device=device, requires_grad=True)\n target = torch.randn(N, C, *other_dims, device=device, requires_grad=True)\n weight = torch.randn(C, device=device).abs()\n\n for reduction, w in product(['none', 'mean', 'sum'], [None, weight]):\n m = torch.nn.CrossEntropyLoss(weight=w, reduction=reduction)\n output = m(input, target)\n output_ref = loss_reference_fns['CrossEntropyLoss'](\n input, target, reduction=reduction, weight=w)\n self.assertEqual(output, output_ref)\n\n def test_cross_entropy_loss_prob_target_unit_weights(self, device):\n # Test with k-dimensional loss.\n for k in range(5):\n N, C = 5, 4\n other_dims = [torch.randint(2, 5, size=(1,)).item() for _ in range(k)]\n input = torch.randn(N, C, *other_dims, device=device, requires_grad=True)\n target = torch.randn(N, C, *other_dims, device=device, requires_grad=True)\n\n for reduction in ['none', 'mean', 'sum']:\n # Ensure result with unit weights is equivalent to result without weights.\n m = torch.nn.CrossEntropyLoss(reduction=reduction)\n unit_weight = torch.ones(C, device=device, dtype=target.dtype)\n m_unit = torch.nn.CrossEntropyLoss(weight=unit_weight, reduction=reduction)\n output = m(input, target)\n output_unit = m_unit(input, target)\n self.assertEqual(output, output_unit)\n\n def test_cross_entropy_loss_index_target_unit_weights(self, device):\n # Test with k-dimensional loss.\n for k in range(5):\n N, C = 5, 4\n other_dims = [torch.randint(2, 5, size=(1,)).item() for _ in range(k)]\n input = torch.randn(N, C, *other_dims, device=device, requires_grad=True)\n target = torch.empty(N, *other_dims, dtype=torch.long, device=device).random_(0, C)\n\n for reduction in ['none', 'mean', 'sum']:\n # Ensure result with unit weights is equivalent to result without weights.\n m = torch.nn.CrossEntropyLoss(reduction=reduction)\n unit_weight = torch.ones(C, device=device, dtype=input.dtype)\n m_unit = torch.nn.CrossEntropyLoss(weight=unit_weight, reduction=reduction)\n output = m(input, target)\n output_unit = m_unit(input, target)\n self.assertEqual(output, output_unit)\n\n def test_cross_entropy_loss_one_hot_target(self, device):\n # Test with k-dimensional loss.\n for k in range(5):\n N, C = 5, 4\n other_dims = [torch.randint(2, 5, size=(1,)).item() for _ in range(k)]\n input = torch.randn(N, C, *other_dims, device=device, requires_grad=True)\n target = torch.empty(N, *other_dims, dtype=torch.long, device=device).random_(0, C)\n weight = torch.randn(C, device=device).abs()\n\n # Get one-hot representation of the target.\n target_one_hot = F.one_hot(target, num_classes=C).to(input.dtype)\n # Need to put the C dim at index 1.\n target_one_hot = target_one_hot.permute(0, -1, *range(1, target_one_hot.dim() - 1))\n\n for reduction, w in product(['none', 'mean', 'sum'], [None, weight]):\n # Skip this case for now because soft and hard label CE are not consistent\n # in the way they apply class weights (see issue #61309).\n if reduction == 'mean' and weight is not None:\n continue\n\n # Ensure loss computed with class indices matches loss\n # computed with one-hot class probs.\n m = torch.nn.CrossEntropyLoss(weight=w, reduction=reduction)\n output = m(input, target)\n output_one_hot = m(input, target_one_hot)\n self.assertEqual(output, output_one_hot)\n\n def test_cross_entropy_label_smoothing_errors(self, device):\n N, C = 3, 4\n input_args = [\n (torch.randn((N, C), device=device), torch.arange(0, C, device=device)),\n (torch.randn((N, C), device=device), torch.randn(N, C, device=device))\n ]\n for input_arg in input_args:\n loss = nn.CrossEntropyLoss(label_smoothing=1.2)\n with self.assertRaisesRegex(RuntimeError,\n r\"label_smoothing must be between 0\\.0\"):\n loss(*input_arg)\n\n def test_cross_entropy_label_smoothing_consistent_index_target_and_probs(self, device):\n N, C = 10, 4\n ks = range(5)\n reductions = ['none', 'mean', 'sum']\n label_smoothings = [0.05, 0.15]\n\n for k, reduction, label_smoothing in product(ks, reductions, label_smoothings):\n other_dims = [torch.randint(2, 5, size=(1,)).item() for _ in range(k)]\n input = torch.randn(N, C, *other_dims, device=device, requires_grad=True)\n target = torch.empty(N, *other_dims, dtype=torch.long, device=device).random_(0, C)\n\n # construct target probablity that should have the same result as label_smoothing\n target_proba = F.one_hot(target, num_classes=C)\n # Need to put the C dim at index 1.\n target_proba = target_proba.permute(0, -1, *range(1, target_proba.dim() - 1))\n target_mask = (target_proba == 1)\n target_proba = target_proba.to(dtype=input.dtype)\n\n # y_k^ls = y_k * (1 - label_smoothing) + label_smoothing / n_classes\n # Get one-hot representation of the target.\n target_proba.masked_fill_(target_mask, 1 - label_smoothing + label_smoothing / C)\n target_proba.masked_fill_(~target_mask, label_smoothing / C)\n\n loss = nn.CrossEntropyLoss(reduction=reduction)\n output_with_prob = loss(input, target_proba)\n\n loss = nn.CrossEntropyLoss(\n reduction=reduction, label_smoothing=label_smoothing)\n output_with_index = loss(input, target)\n\n self.assertEqual(output_with_prob, output_with_index,\n rtol=1e-07, atol=1e-05)\n\n def test_cross_entropy_label_smoothing_with_probs(self, device):\n N, C = 10, 4\n ks = range(5)\n reductions = ['none', 'mean', 'sum']\n label_smoothings = [0.05, 0.15]\n\n # Test with k-dimensional loss.\n for k, label_smoothing in product(ks, label_smoothings):\n other_dims = [torch.randint(2, 5, size=(1,)).item() for _ in range(k)]\n input = torch.randn(N, C, *other_dims, device=device, requires_grad=True)\n target = F.log_softmax(torch.randn(N, C, *other_dims, device=device), dim=1)\n\n for reduction in reductions:\n # use with label_smoothing\n loss = nn.CrossEntropyLoss(reduction=reduction, label_smoothing=label_smoothing)\n output_with_smoothing = loss(input, target)\n\n # manually smoothing target\n # class_proba^ls = class_proba * (1 - label_smoothing) +\n # label_smoothing / n_classes\n target_with_smoothing = target * (1 - label_smoothing) + label_smoothing / C\n loss = nn.CrossEntropyLoss(reduction=reduction)\n output_with_manual_smoothing = loss(input, target_with_smoothing)\n\n self.assertEqual(output_with_smoothing, output_with_manual_smoothing)\n\n\n def test_cross_entropy_label_smoothing_weight_ignore_indices(self, device):\n reductions = ['none', 'sum', 'mean']\n label_smoothings = [0.05, 0.15]\n\n weight = torch.tensor([0.3, 0.6], device=device)\n inp1 = torch.tensor([[0.3, 0.4], [1, 2]], device=device)\n inp2 = torch.tensor([[0.3, 0.6], [1, 2]], device=device)\n\n targ_default_ignore_index = torch.tensor([-100, 1], device=device)\n targ_negative_ignore_index = torch.tensor([-2, 1], device=device)\n targ_positive_ignore_index = torch.tensor([2, 1], device=device)\n\n for reduction, label_smoothing, weight in product(reductions, label_smoothings, (None, weight)):\n def check_equal(loss, inp_targ_1, inp_targ_2):\n inp1, targ1 = inp_targ_1\n inp2, targ2 = inp_targ_2\n l1 = loss(inp1, targ1)\n l2 = loss(inp2, targ2)\n self.assertEqual(l1, l2)\n\n # Default ignore_index\n loss = nn.CrossEntropyLoss(reduction=reduction,\n label_smoothing=label_smoothing,\n weight=weight)\n check_equal(loss, (inp1, targ_default_ignore_index), (inp2, targ_default_ignore_index))\n if reduction != 'none':\n # Check that we correctly tally the denominator for `mean`\n # i.e. we don't count the ignored_idx at all.\n check_equal(loss, (inp1, targ_default_ignore_index), (inp2[1:], targ_default_ignore_index[1:]))\n\n # negative ignore_index\n loss = nn.CrossEntropyLoss(reduction=reduction,\n label_smoothing=label_smoothing,\n ignore_index=-2,\n weight=weight)\n check_equal(loss, (inp1, targ_negative_ignore_index), (inp2, targ_negative_ignore_index))\n if reduction != 'none':\n # Check that we correctly tally the denominator for `mean`\n # i.e. we don't count the ignored_idx at all.\n check_equal(loss, (inp1, targ_negative_ignore_index), (inp2[1:], targ_negative_ignore_index[1:]))\n\n # positive ignore_index\n loss = nn.CrossEntropyLoss(reduction=reduction,\n label_smoothing=label_smoothing,\n ignore_index=2,\n weight=weight)\n check_equal(loss, (inp1, targ_positive_ignore_index), (inp2, targ_positive_ignore_index))\n if reduction != 'none':\n # Check that we correctly tally the denominator for `mean`\n # i.e. we don't count the ignored_idx at all.\n check_equal(loss, (inp1, targ_positive_ignore_index), (inp2[1:], targ_positive_ignore_index[1:]))\n\n\n def test_softshrink_negative(self, device):\n input = torch.randn(5, device=device, requires_grad=True)\n m = torch.nn.Softshrink(-1)\n with self.assertRaisesRegex(RuntimeError,\n r'lambda must be greater or equal to 0, but found to be -1\\.'):\n m(input)\n\n def test_fold(self, device):\n def test_dtype(fn, input, dtype):\n input = input.detach().clone().to(dtype=dtype).requires_grad_(True)\n input2 = input.detach().clone().float().requires_grad_(True)\n out = fn(input)\n out.sum().backward()\n out2 = fn(input2)\n out2.sum().backward()\n self.assertEqual(out.dtype, dtype)\n self.assertEqual(input.grad.dtype, dtype)\n self.assertEqual(out, out2.to(dtype=dtype), atol=0.05, rtol=0)\n self.assertEqual(input.grad, input2.grad.to(dtype=dtype))\n\n def func(x):\n return F.fold(x, output_size=(4, 5), kernel_size=(2, 2))\n\n seeds = (44, 83, 71, 25, 999)\n for sd in seeds:\n torch.manual_seed(sd)\n x = torch.randn(1, 12, 12, device=device, requires_grad=True)\n gradcheck(func, [x], check_forward_ad=True)\n gradgradcheck(func, [x], check_fwd_over_rev=True)\n if device == 'cpu':\n test_dtype(func, x, torch.bfloat16)\n\n\n def test_logsigmoid_out(self, device):\n # this isn't actually documented, but was broken previously:\n # https://github.com/pytorch/pytorch/issues/36499\n x = torch.randn(2, 3, device=device).t()\n empty_out = torch.randn(0, device=device)\n self.assertEqual(F.logsigmoid(x), F.logsigmoid(x, out=empty_out))\n\n noncontig_out = torch.randn(2, 3, device=device).t()\n self.assertEqual(F.logsigmoid(x), F.logsigmoid(x, out=noncontig_out))\n\n def test_maxpool3d_non_square_backward(self, device):\n # previous CUDA routine of this backward calculates kernel launch grid size\n # with last two dimensions interchanged, so the tailing along the longer dim\n # get ignored. Here we test whether every position gets gradient.\n for dim in (2, 3, 4):\n shape = tuple(32 if i != dim else 256 for i in range(4))\n x = torch.randn(shape, device=device, requires_grad=True)\n F.max_pool3d(x, kernel_size=(1, 1, 1)).sum().backward()\n self.assertEqual(x.grad, torch.ones_like(x.grad))\n\n # Check that clip_grad_norm_ raises an error if the total norm of the\n # parameters' gradients is non-finite\n def test_clip_grad_norm_error_if_nonfinite(self, device):\n norms_pos = [0.1, 1, 2, 3.5, inf]\n norms_neg = [-0.1, -1, -2, -3.5]\n norms_except_0 = norms_pos + norms_neg\n norms_all = norms_except_0 + [0]\n\n # Each entry in test_cases has the following values, in this order:\n #\n # grad_only_one_elem If True, only one element of the parameter's\n # gradient is set to the scalar grad, and the\n # rest of the elements are 0. If False, all grad\n # elements are equal to the scalar.\n #\n # prefix_finite_grad_param If True, prefix a parameter that has a grad\n # of 1.\n #\n # scalars Scalars to use as the parameter's grad, through\n # multiplication\n #\n # norms_nonfinite Norm types that should produce nonfinite total norm\n #\n # norms_finite Norm types that should produce finite total norm\n test_cases = [\n # Test errors from an infinite grad\n (False, False, [inf, -inf], norms_except_0, [0]),\n (False, True, [inf, -inf], norms_pos, norms_neg + [0]),\n (True, False, [inf, -inf], norms_pos, norms_neg + [0]),\n (True, True, [inf, -inf], norms_pos, norms_neg + [0]),\n\n # Test errors from a NaN grad\n (False, False, [nan], norms_except_0, [0]),\n (False, True, [nan], norms_except_0, [0]),\n (True, False, [nan], norms_except_0, [0]),\n (True, True, [nan], norms_except_0, [0]),\n\n # Test a grad that should never error\n (False, False, [2e22, -2e22], [], norms_all),\n (False, True, [2e22, -2e22], [], norms_all),\n (True, False, [2e22, -2e22], [], norms_all),\n (True, True, [2e22, -2e22], [], norms_all),\n\n # Test a grad that will overflow to inf for only some norm orders\n (False, False, [2e200, -2e200], [3.5, 2, -2, -3.5], [inf, 1, 0.1, 0, -1, -0.1]),\n (False, True, [2e200, -2e200], [3.5, 2], norms_neg + [inf, 1, 0.1, 0]),\n (True, False, [2e200, -2e200], [3.5, 2], norms_neg + [inf, 1, 0.1, 0]),\n (True, True, [2e200, -2e200], [3.5, 2], norms_neg + [inf, 1, 0.1, 0]),\n ]\n\n def gen_parameters(scalar, grad_only_one_elem, prefix_finite_grad_param):\n param = torch.ones(10, dtype=torch.float64, device=device, requires_grad=True)\n\n if grad_only_one_elem:\n param[1].mul(scalar).sum().backward()\n else:\n param.mul(scalar).sum().backward()\n\n if prefix_finite_grad_param:\n prefix_param = torch.ones(1, dtype=torch.float64, device=device, requires_grad=True)\n prefix_param.mul(1).sum().backward()\n parameters = [prefix_param, param]\n else:\n parameters = [param]\n\n return parameters\n\n def run_test_case(norm_type, error_if_nonfinite, scalar, grad_only_one_elem, prefix_finite_grad_param, is_norm_nonfinite):\n msg = (\n f'norm_type: {norm_type}, ',\n f'error_if_nonfinite: {error_if_nonfinite}, '\n f'scalar: {scalar}, '\n f'grad_only_one_elem: {grad_only_one_elem}, '\n f'prefix_finite_grad_param: {prefix_finite_grad_param}, '\n f'is_norm_nonfinite: {is_norm_nonfinite}')\n\n parameters = gen_parameters(scalar, grad_only_one_elem, prefix_finite_grad_param)\n\n # Should only throw an error if the total norm is expected to be\n # nonfinite and `error_if_nonfinite=True`\n if is_norm_nonfinite and error_if_nonfinite:\n error_msg = f'The total norm of order {float(norm_type)} for gradients'\n\n grads_before = [p.grad.clone() for p in parameters]\n\n with self.assertRaisesRegex(RuntimeError, error_msg, msg=msg):\n clip_grad_norm_(parameters, 1, norm_type=norm_type, error_if_nonfinite=True)\n\n # Grad should not change if error is thrown\n grads_after = [p.grad for p in parameters]\n self.assertEqual(grads_before, grads_after, msg=msg)\n else:\n clip_grad_norm_(parameters, 1, norm_type=norm_type, error_if_nonfinite=error_if_nonfinite)\n\n for grad_only_one_elem, prefix_finite_grad_param, scalars, norms_nonfinite, norms_finite in test_cases:\n for error_if_nonfinite in [False, True]:\n for norm_type, scalar in product(norms_nonfinite, scalars):\n run_test_case(norm_type, error_if_nonfinite, scalar, grad_only_one_elem, prefix_finite_grad_param, True)\n\n for norm_type, scalar in product(norms_finite, scalars):\n run_test_case(norm_type, error_if_nonfinite, scalar, grad_only_one_elem, prefix_finite_grad_param, False)\n\n @onlyCUDA\n @deviceCountAtLeast(2)\n def test_clip_grad_norm_multi_device(self, devices):\n class TestModel(nn.Module):\n def __init__(self):\n super(TestModel, self).__init__()\n self.layer1 = nn.Linear(10, 10)\n self.layer2 = nn.Linear(10, 10)\n\n test_model = TestModel()\n test_model.layer1.to(devices[0])\n test_model.layer2.to(devices[1])\n ref_model = TestModel().to(devices[0])\n for norm_type in [2., math.inf]:\n for p in test_model.parameters():\n p.grad = torch.ones_like(p)\n for p in ref_model.parameters():\n p.grad = torch.ones_like(p)\n norm = clip_grad_norm_(test_model.parameters(), 0.5, norm_type=norm_type)\n expected = clip_grad_norm_(ref_model.parameters(), 0.5, norm_type=norm_type)\n self.assertEqual(norm, expected)\n for p, pe in zip(test_model.parameters(), ref_model.parameters()):\n self.assertEqual(p.grad.to(devices[0]), pe.grad)\n\n def test_elu_inplace_overlap(self, device):\n x = torch.randn((1, 6), device=device).expand((6, 6))\n with self.assertRaisesRegex(RuntimeError, 'unsupported operation'):\n F.elu(x, inplace=True)\n with self.assertRaisesRegex(RuntimeError, 'unsupported operation'):\n F.elu_(x)\n\n # Merge into OpInfo?\n @onlyNativeDeviceTypes\n def test_elu_inplace_with_neg_alpha(self, device):\n a = torch.tensor([-1., 1.], device=device, requires_grad=True)\n b = torch.nn.functional.elu_(a.clone(), alpha=-2)\n with self.assertRaisesRegex(RuntimeError, \"call out-of-place version\"):\n b.backward(torch.ones(2, device=device))\n\n a = torch.tensor([-1., 1.], device=device, requires_grad=True)\n b = torch.nn.functional.celu_(a.clone(), alpha=-2)\n with self.assertRaisesRegex(RuntimeError, \"call out-of-place version\"):\n b.backward(torch.ones(2, device=device))\n\n @expectedFailureMeta # https://github.com/pytorch/pytorch/issues/54897\n def test_hardswish_inplace_overlap(self, device):\n x = torch.randn((1, 6), device=device).expand((6, 6))\n with self.assertRaisesRegex(RuntimeError, 'unsupported operation'):\n F.hardswish(x, inplace=True)\n\n def test_silu_inplace_overlap(self, device):\n x = torch.randn((1, 6), device=device).expand((6, 6))\n with self.assertRaisesRegex(RuntimeError, 'unsupported operation'):\n F.silu(x, inplace=True)\n\n @onlyNativeDeviceTypes\n def test_mish_inplace_overlap(self, device):\n x = torch.randn((1, 6), device=device).expand((6, 6))\n with self.assertRaisesRegex(RuntimeError, 'unsupported operation'):\n F.mish(x, inplace=True)\n\n def test_softplus_inplace_overlap(self, device):\n x = torch.randn((1, 6), device=device).expand((6, 6))\n with self.assertRaisesRegex(RuntimeError, 'unsupported operation'):\n F.softplus(x, out=x)\n\n def test_softplus_low_threshold(self, device):\n # Ensure gradients are computed correctly with a low threshold.\n model = torch.nn.Softplus(threshold=1).double()\n input = torch.tensor(0.9, device=device, dtype=torch.double,\n requires_grad=True)\n output = model(input)\n torch.autograd.gradcheck(model, input)\n\n def test_softshrink_inplace_overlap(self, device):\n x = torch.randn((1, 6), device=device).expand((6, 6))\n with self.assertRaisesRegex(RuntimeError, 'unsupported operation'):\n F.softshrink(x, out=x)\n\n def test_leaky_relu_inplace_overlap(self, device):\n x = torch.randn((1, 6), device=device).expand((6, 6))\n with self.assertRaisesRegex(RuntimeError, 'unsupported operation'):\n F.leaky_relu(x, inplace=True)\n with self.assertRaisesRegex(RuntimeError, 'unsupported operation'):\n F.leaky_relu_(x)\n\n # Merge into OpInfo?\n def test_leaky_relu_inplace_with_neg_slope(self, device):\n a = torch.tensor([-1., 1.], device=device, requires_grad=True)\n b = torch.nn.functional.leaky_relu_(a.clone(), -2)\n with self.assertRaisesRegex(RuntimeError, \"call out-of-place version\"):\n b.backward(torch.ones(2, device=device))\n\n a = torch.tensor([-1., 1.], device=device, requires_grad=True)\n b = torch.nn.functional.rrelu_(a.clone(), -5.0, 1.0)\n with self.assertRaisesRegex(RuntimeError, \"call out-of-place version\"):\n b.backward(torch.ones(2, device=device))\n\n # Merge into OpInfo?\n def test_leaky_relu_inplace_with_zero_slope(self, device):\n a = torch.tensor([-2., 0., 2.], device=device, requires_grad=True)\n b = torch.nn.functional.leaky_relu_(a.clone(), 0.0)\n b.backward(torch.ones(3, device=device))\n expected = torch.tensor([0., 0., 1.], device=device)\n self.assertEqual(a.grad, expected)\n\n a_bf16 = torch.tensor([-2., 0., 2.], device=device, dtype=torch.bfloat16, requires_grad=True)\n b_bf16 = torch.nn.functional.leaky_relu_(a_bf16.clone(), 0.0)\n b_bf16.backward(torch.ones(3, device=device))\n expected_bf16 = torch.tensor([0., 0., 1.], device=device, dtype=torch.bfloat16)\n self.assertEqual(a_bf16.grad, expected_bf16)\n\n @onlyCPU\n def test_softshrink(self, device):\n x = torch.tensor([[1.21, 0.56, 0.5001, 0.4999, 1.2357, -0.4999, -0.5001, -1.154,\n 0.254, -0.24, -0.225, 0.104, 0.002, -0.001, 0.0574, 1.2344,\n 0.1748, -0.1797, -0.8125, 0.2051, -1.1328, 1.2344, -0.1562, 2.3554,\n -0.1953, 0.0304, -0.3613, -1.3047, 1.0312, 0.1436, -0.6953, 0.5664,\n -0.5820, -0.3301, 0.8203, 0.6133, 0.5938],\n [-0.8203, -1.2344, -0.5234, 2.5312, -0.4551, -0.6875, -1.5547, -0.2217,\n -0.3027, 2.6406, 1.3047, 0.2344, -1.6719, 0.2773, -1.3516, 3.4575,\n 0.4414, 0.2656, 2.1094, -1.5156, 1.2344, -0.4336, 0.6797, -3.5486,\n 0.9766, -0.4062, 1.4844, 0.7500, -1.7578, 0.7461, 1.6094, 8.5458,\n 0.3730, -0.3477, -1.0625, 0.3848, 0.0557]], device=device)\n expected = torch.tensor([[0.71, 0.06, 0.0001, 0., 0.7357, 0., -0.0001, -0.654,\n 0., 0., 0., 0., 0., 0., 0., 0.7344,\n 0., 0., -0.3125, 0., -0.6328, 0.7344, 0., 1.8554,\n 0., 0., 0., -0.8047, 0.5312, 0., -0.1953, 0.0664,\n -0.0820, 0.0, 0.3203, 0.1133, 0.0938],\n [-0.3203, -0.7344, -0.0234, 2.0312, 0.0, -0.1875, -1.0547, 0.,\n 0.0, 2.1406, 0.8047, 0., -1.1719, 0., -0.8516, 2.9575,\n 0., 0., 1.6094, -1.0156, 0.7344, 0., 0.1797, -3.0486,\n 0.4766, 0., 0.9844, 0.2500, -1.2578, 0.2461, 1.1094, 8.0458,\n 0., 0., -0.5625, 0., 0.]])\n softshrink = torch.nn.Softshrink()\n out = softshrink(x)\n self.assertEqual(out, expected, atol=1e-2, rtol=0)\n\n def test_threshold_inplace_overlap(self, device):\n # Inplace threshold is okay, because it is idempotent\n x = torch.randn((1, 6), device=device).expand((6, 6))\n F.threshold(x, 0.5, 0.5, inplace=True)\n F.threshold_(x, 0.5, 0.5)\n\n @onlyNativeDeviceTypes\n def test_triplet_margin_with_distance_loss_default_parity(self, device):\n # Test for `nn.TripletMarginWithDistanceLoss` and\n # `F.triplet_margin_with_distance_loss`. Checks\n # for parity against the respective non-distance-agnostic\n # implementations of triplet margin loss (``nn.TripletMarginLoss`\n # and `F.triplet_margin_loss`) under *default args*.\n\n for extra_args in \\\n itertools.product((0.5, 1, 1.5), (True, False), ('none', 'mean', 'sum')):\n kwargs = {'margin': extra_args[0], 'swap': extra_args[1], 'reduction': extra_args[2]}\n\n anchor = torch.randn(5, 10, device=device, requires_grad=True)\n positive = torch.randn(5, 10, device=device, requires_grad=True)\n negative = torch.randn(5, 10, device=device, requires_grad=True)\n\n # Test forward, functional\n expected = F.triplet_margin_loss(anchor, positive, negative, **kwargs)\n actual = F.triplet_margin_with_distance_loss(anchor, positive, negative, **kwargs)\n self.assertEqual(actual, expected, rtol=1e-6, atol=1e-6)\n\n # Test forward, module\n loss_ref = nn.TripletMarginLoss(**kwargs)\n loss_op = nn.TripletMarginWithDistanceLoss(**kwargs)\n self.assertEqual(loss_op(anchor, positive, negative),\n loss_ref(anchor, positive, negative),\n rtol=1e-6, atol=1e-6)\n\n # Test backward\n self.assertTrue(gradcheck(lambda a, p, n: F.triplet_margin_with_distance_loss(\n a, p, n, **kwargs), (anchor, positive, negative)))\n self.assertTrue(gradcheck(lambda a, p, n: loss_op(a, p, n),\n (anchor, positive, negative)))\n\n @onlyNativeDeviceTypes\n def test_triplet_margin_with_distance_loss(self, device):\n # Test for parity between `nn.TripletMarginWithDistanceLoss` and\n # `F.triplet_margin_with_distance_loss`.\n\n pairwise_distance = nn.PairwiseDistance()\n\n def cosine_distance(x, y):\n return 1.0 - F.cosine_similarity(x, y)\n\n distance_functions = (pairwise_distance, cosine_distance,\n lambda x, y: 1.0 - F.cosine_similarity(x, y))\n\n reductions = ('mean', 'none', 'sum')\n margins = (1.0, 1.5, 0.5)\n swaps = (True, False)\n\n for distance_fn, reduction, margin, swap \\\n in itertools.product(distance_functions, reductions, margins, swaps):\n anchor = torch.randn(5, 10, device=device, requires_grad=True)\n positive = torch.randn(5, 10, device=device, requires_grad=True)\n negative = torch.randn(5, 10, device=device, requires_grad=True)\n\n # Test backward\n self.assertTrue(gradcheck(lambda a, p, n: F.triplet_margin_with_distance_loss(\n a, p, n, distance_function=distance_fn, reduction=reduction, margin=margin, swap=swap),\n (anchor, positive, negative)))\n loss_op = nn.TripletMarginWithDistanceLoss(distance_function=distance_fn,\n reduction=reduction, margin=margin, swap=swap)\n self.assertTrue(gradcheck(lambda a, p, n: loss_op(\n a, p, n), (anchor, positive, negative)))\n traced_loss_op = torch.jit.trace(loss_op, (anchor, positive, negative))\n self.assertTrue(gradcheck(lambda a, p, n: traced_loss_op(\n a, p, n), (anchor, positive, negative)))\n\n # Test forward parity\n functional = F.triplet_margin_with_distance_loss(anchor, positive, negative,\n distance_function=distance_fn,\n reduction=reduction, margin=margin, swap=swap)\n modular = loss_op(anchor, positive, negative)\n traced = traced_loss_op(anchor, positive, negative)\n self.assertEqual(functional, modular, atol=1e-6, rtol=1e-6)\n self.assertEqual(traced, modular, atol=1e-6, rtol=1e-6)\n\n def test_to_complex(self, device):\n m = nn.Linear(3, 5).to(device)\n self.assertIs(m, m.to(device))\n m.to(torch.cfloat)\n self.assertIs(m.weight.dtype, torch.cfloat)\n m.to(torch.cdouble)\n self.assertIs(m.weight.dtype, torch.cdouble)\n m.to(torch.float)\n self.assertIs(m.weight.dtype, torch.float)\n with warnings.catch_warnings(record=True) as w:\n # Trigger warning\n m.to(torch.cfloat)\n # Check warning occurs\n self.assertEqual(len(w), 1)\n self.assertTrue(\"Complex modules are a new feature\" in str(w[-1].message))\n\n @skipMeta\n @dtypes(torch.float32, torch.float64)\n def test_module_to_empty(self, device, dtype):\n class MyModule(nn.Module):\n def __init__(self, in_features, out_features, device=None, dtype=None):\n super().__init__()\n factory_kwargs = {\"device\": device, \"dtype\": dtype}\n self.weight = nn.Parameter(torch.randn(in_features, out_features, **factory_kwargs))\n\n def forward(self, x):\n return x @ self.weight\n\n # Test meta module instantiation.\n input = torch.randn(5, 10, device=device, dtype=dtype)\n m = MyModule(10, 1, device='meta', dtype=dtype)\n m(input)\n\n # Test materializing meta module on a real device.\n m.to_empty(device=device)\n m(input)\n with torch.no_grad():\n torch.nn.init.kaiming_uniform_(m.weight)\n m(input)\n\n # Test creating meta module from materialized module.\n m.to_empty(device='meta')\n m(input)\n\n @skipMeta\n def test_skip_init(self, device):\n torch.manual_seed(1)\n m_initialized = torch.nn.Linear(5, 1)\n m_initialized.to(device)\n\n torch.manual_seed(1)\n m_uninitialized = torch.nn.utils.skip_init(torch.nn.Linear, 5, 1, device=device)\n\n self.assertEqual(m_initialized.weight.device, m_uninitialized.weight.device)\n self.assertFalse(torch.allclose(m_initialized.weight, m_uninitialized.weight))\n\n def test_adaptive_pool_invalid(self, device):\n inp_1d = (torch.randn(1, 1, 1, device=device), (-1,))\n inp_2d = (torch.randn(1, 1, 1, 1, device=device), (-1, 0))\n inp_3d = (torch.randn(1, 1, 1, 1, 1, device=device), (-1, 0, 2))\n module_input_dict = {torch.nn.AdaptiveAvgPool1d : inp_1d,\n torch.nn.AdaptiveAvgPool2d : inp_2d,\n torch.nn.AdaptiveAvgPool3d : inp_3d}\n\n for m, inp in module_input_dict.items():\n with self.assertRaisesRegex(RuntimeError,\n r\"elements of output_size must be greater than or equal to 0\"):\n t, output_size = inp\n m(output_size)(t)\n\n @dtypes(torch.float)\n @dtypesIfCUDA(torch.double, torch.float, torch.half)\n def test_transformerencoderlayer(self, device, dtype):\n # this is a deterministic test for TransformerEncoderLayer\n d_model = 4\n nhead = 2\n dim_feedforward = 16\n dropout = 0.0\n bsz = 2\n\n atol = 1e-5\n rtol = 1e-7\n if \"cuda\" in device:\n atol = 1e-3\n rtol = 1e-2\n\n def _test(training, batch_first, atol, rtol):\n def perm_fn(x):\n return x.transpose(1, 0) if batch_first else x\n\n model = nn.TransformerEncoderLayer(d_model, nhead, dim_feedforward, dropout,\n batch_first=batch_first, device=device, dtype=dtype)\n\n if not training:\n assert dropout == 0\n model = model.eval()\n\n # set constant weights of the model\n for idx, p in enumerate(model.parameters()):\n x = p.data\n sz = x.view(-1).size(0)\n shape = x.shape\n x = torch.cos(torch.arange(0, sz).float().view(shape))\n p.data.copy_(x)\n\n # deterministic input\n encoder_input = torch.tensor([[[20., 30., 40., 50.]]], device=device, dtype=dtype)\n result = model(encoder_input)\n ref_output = torch.tensor([[[2.258703, 0.127985, -0.697881, 0.170862]]], device=device, dtype=dtype)\n self.assertEqual(result.shape, ref_output.shape)\n torch.testing.assert_close(result, ref_output, atol=atol, rtol=rtol)\n # 0 values are NOT masked. This shouldn't mask anything.\n mask = torch.tensor([[0]], device=device) == 1\n # TODO: enable fast path for calls with a mask!\n result = model(encoder_input, src_key_padding_mask=mask)\n self.assertEqual(result.shape, ref_output.shape)\n torch.testing.assert_close(result, ref_output, atol=atol, rtol=rtol)\n # 1 values are masked. Since there is only 1 input embedding this\n # will result in nan.\n mask = torch.tensor([[1]], device=device) == 1\n result = model(encoder_input, src_key_padding_mask=mask)\n result = result.cpu().detach().numpy()\n self.assertTrue(np.isnan(result).all())\n\n # deterministic input\n encoder_input = perm_fn(torch.tensor([[[1., 2., 3., 4.]],\n [[5., 6., 7., 8.]]], device=device, dtype=dtype))\n result = model(encoder_input)\n ref_output = perm_fn(torch.tensor([[[2.272644, 0.119035, -0.691669, 0.153486]],\n [[2.272644, 0.119035, -0.691669, 0.153486]]], device=device, dtype=dtype))\n self.assertEqual(result.shape, ref_output.shape)\n torch.testing.assert_close(result, ref_output, atol=atol, rtol=rtol)\n # all 0 which is no masking\n mask = torch.tensor([[0, 0]], device=device) == 1\n result = model(encoder_input, src_key_padding_mask=mask)\n self.assertEqual(result.shape, ref_output.shape)\n torch.testing.assert_close(result, ref_output, atol=atol, rtol=rtol)\n mask = torch.tensor([[1, 0]], device=device) == 1\n result = model(encoder_input, src_key_padding_mask=mask)\n ref_output = perm_fn(torch.tensor([[[2.301516, 0.092249, -0.679101, 0.103088]],\n [[2.301516, 0.092249, -0.679101, 0.103088]]], device=device, dtype=dtype))\n self.assertEqual(result.shape, ref_output.shape)\n torch.testing.assert_close(result, ref_output, atol=atol, rtol=rtol)\n\n # deterministic input\n encoder_input = perm_fn(torch.tensor([[[0.7462, 0.6653, 0.5679, 0.4891],\n [0.5387, 0.1655, 0.3565, 0.0471]],\n [[0.8335, 0.2799, 0.5031, 0.2947],\n [0.1402, 0.0318, 0.7636, 0.1346]],\n [[0.6333, 0.9344, 0.1376, 0.9938],\n [0.8924, 0.2872, 0.6692, 0.2944]],\n [[0.9897, 0.6915, 0.3154, 0.1733],\n [0.8645, 0.3513, 0.3064, 0.0767]],\n [[0.8117, 0.2366, 0.4838, 0.7881],\n [0.3718, 0.4945, 0.9511, 0.0864]]], device=device, dtype=dtype))\n result = model(encoder_input)\n ref_output = perm_fn(torch.tensor([[[2.428589, 0.020835, -0.602055, -0.085249],\n [2.427987, 0.021213, -0.602496, -0.084103]],\n [[2.424689, 0.019155, -0.604793, -0.085672],\n [2.413863, 0.022211, -0.612486, -0.072490]],\n [[2.433774, 0.021598, -0.598343, -0.087548],\n [2.425104, 0.019748, -0.604515, -0.084839]],\n [[2.436185, 0.022682, -0.596625, -0.087261],\n [2.433556, 0.021891, -0.598509, -0.086832]],\n [[2.416246, 0.017512, -0.610712, -0.082961],\n [2.422901, 0.024187, -0.606178, -0.074929]]], device=device, dtype=dtype))\n self.assertEqual(result.shape, ref_output.shape)\n torch.testing.assert_close(result, ref_output, atol=atol, rtol=rtol)\n\n # all 0\n mask = torch.zeros([2, 5], device=device) == 1\n result = model(encoder_input, src_key_padding_mask=mask)\n self.assertEqual(result.shape, ref_output.shape)\n torch.testing.assert_close(result, ref_output, atol=atol, rtol=rtol)\n mask[0, 1] = 1\n mask[1, 3] = 1\n mask[1, 4] = 1\n result = model(encoder_input, src_key_padding_mask=mask)\n ref_output = perm_fn(torch.tensor([[[2.429026, 0.020793, -0.601741, -0.085642],\n [2.428811, 0.021445, -0.601912, -0.084252]],\n [[2.425009, 0.019155, -0.604566, -0.085899],\n [2.415408, 0.02249 , -0.611415, -0.073]],\n [[2.434199, 0.021682, -0.598039, -0.087699],\n [2.42598, 0.019941, -0.603896, -0.085091]],\n [[2.436457, 0.022736, -0.59643 , -0.08736],\n [2.434021, 0.022093, -0.598179, -0.08679]],\n [[2.416531, 0.017498, -0.610513, -0.083181],\n [2.4242, 0.024653, -0.605266, -0.074959]]], device=device, dtype=dtype))\n self.assertEqual(result.shape, ref_output.shape)\n torch.testing.assert_close(result, ref_output, atol=atol, rtol=rtol)\n\n # NestedTensor is only supported for the fast path\n # currently, which won't be used if training.\n if (batch_first and not training and\n ('cuda' in str(device) or 'cpu' in str(device)) and not TEST_WITH_CROSSREF):\n encoder_input[0][-1] = torch.zeros_like(encoder_input[0][1])\n mask = torch.zeros(encoder_input.shape[:-1], device=device, dtype=torch.bool)\n mask[0][-1] = True\n\n nt = torch.nested_tensor([encoder_input[0][:-1], encoder_input[1]], device=device)\n result = model(nt)\n ref_output = torch.tensor(\n [\n [\n [2.4268184, 0.02042419, -0.603311, -0.08476824],\n [2.423306, 0.01889652, -0.6057701, -0.08519465],\n [2.431538, 0.02078694, -0.5999354, -0.08746159],\n [2.4348664, 0.02212971, -0.5975677, -0.08733892],\n [2.423133, 0.02097577, -0.60594773, -0.08113337],\n ],\n [\n [2.4279876, 0.02121329, -0.60249615, -0.08410317],\n [2.4138637, 0.02221113, -0.6124869, -0.07249016],\n [2.4251041, 0.01974815, -0.6045152, -0.08483928],\n [2.4335563, 0.0218913, -0.59850943, -0.08683228],\n [2.4229012, 0.02418739, -0.6061784, -0.07492948],\n ],\n ],\n device=device, dtype=dtype\n )\n result = result.to_padded_tensor(0)\n ref_output[0][-1] = torch.zeros_like(\n ref_output[0][-1], device=device, dtype=dtype\n )\n result[0][-1] = torch.zeros_like(\n result[0][-1], device=device, dtype=dtype\n )\n self.assertEqual(tuple(result.shape), tuple(ref_output.shape))\n if 'cuda' in device:\n if dtype == torch.float:\n atol = 2e-4\n rtol = 4e-3\n else:\n atol = 7e-4\n rtol = 2e-2\n torch.testing.assert_close(result, ref_output, atol=atol, rtol=rtol)\n else:\n torch.testing.assert_close(result, ref_output)\n\n\n for batch_first in (True, False):\n for training in (True, False):\n if training:\n cm = contextlib.nullcontext()\n else:\n # Fast path requires inference mode.\n cm = torch.no_grad()\n with cm:\n _test(batch_first=batch_first, training=training, atol=atol, rtol=rtol)\n\n @dtypes(torch.float)\n @dtypesIfCUDA(torch.half, torch.float)\n def test_transformerencoderlayer_gelu(self, device, dtype):\n # this is a deterministic test for TransformerEncoderLayer with gelu activation\n d_model = 4\n nhead = 2\n dim_feedforward = 16\n dropout = 0.0\n bsz = 2\n\n atol = 0\n rtol = 1e-5\n if \"cuda\" in device:\n atol = 1e-3\n rtol = 1e-2\n\n def _test(activation, batch_first, training):\n def perm_fn(x):\n return x.transpose(1, 0) if batch_first else x\n\n model = nn.TransformerEncoderLayer(d_model, nhead, dim_feedforward, dropout,\n activation, batch_first=batch_first, device=device, dtype=dtype)\n if not training:\n assert dropout == 0\n model = model.eval()\n\n # set constant weights of the model\n for idx, p in enumerate(model.parameters()):\n x = p.data\n sz = x.view(-1).size(0)\n shape = x.shape\n x = torch.cos(torch.arange(0, sz).float().view(shape))\n p.data.copy_(x)\n\n # deterministic input\n encoder_input = torch.tensor([[[20., 30., 40., 50.]]], device=device, dtype=dtype)\n result = model(encoder_input)\n ref_output = torch.tensor([[[2.249815, 0.131006, -0.702199, 0.177868]]], device=device, dtype=dtype)\n torch.testing.assert_close(result, ref_output, rtol=rtol, atol=atol)\n\n # deterministic input\n encoder_input = perm_fn(torch.tensor([[[1., 2., 3., 4.]],\n [[5., 6., 7., 8.]]], device=device, dtype=dtype))\n result = model(encoder_input)\n ref_output = perm_fn(torch.tensor([[[2.264103, 0.121417, -0.696012, 0.159724]],\n [[2.264103, 0.121417, -0.696012, 0.159724]]], device=device, dtype=dtype))\n torch.testing.assert_close(result, ref_output, rtol=rtol, atol=atol)\n\n # deterministic input\n encoder_input = perm_fn(torch.tensor([[[0.7462, 0.6653, 0.5679, 0.4891],\n [0.5387, 0.1655, 0.3565, 0.0471]],\n [[0.8335, 0.2799, 0.5031, 0.2947],\n [0.1402, 0.0318, 0.7636, 0.1346]],\n [[0.6333, 0.9344, 0.1376, 0.9938],\n [0.8924, 0.2872, 0.6692, 0.2944]],\n [[0.9897, 0.6915, 0.3154, 0.1733],\n [0.8645, 0.3513, 0.3064, 0.0767]],\n [[0.8117, 0.2366, 0.4838, 0.7881],\n [0.3718, 0.4945, 0.9511, 0.0864]]], device=device, dtype=dtype))\n result = model(encoder_input)\n ref_output = perm_fn(torch.tensor([[[2.42163188, 0.03227153, -0.60714219, -0.05908082],\n [2.42151276, 0.03302179, -0.60722523, -0.05762651]],\n [[2.41926761, 0.02974034, -0.60879519, -0.0621269],\n [2.41626395, 0.03539356, -0.61087842, -0.04978623]],\n [[2.42382808, 0.03218872, -0.6055963, -0.06073591],\n [2.41983477, 0.03085259, -0.60840145, -0.06046414]],\n [[2.42500749, 0.03328855, -0.60476388, -0.0595334],\n [2.4237977, 0.03290575, -0.60561789, -0.05940082]],\n [[2.41383916, 0.02686345, -0.61256377, -0.06380707],\n [2.42000277, 0.03800944, -0.60824798, -0.04754947]]], device=device, dtype=dtype))\n torch.testing.assert_close(result, ref_output, rtol=rtol, atol=atol)\n for activation, batch_first, training in product(('gelu', F.gelu, nn.GELU()), (True, False), (True, False)):\n # Fast path requires inference mode.\n if training:\n cm = contextlib.nullcontext()\n else:\n cm = torch.no_grad()\n with cm:\n _test(activation=activation, batch_first=batch_first, training=training)\n\n\nclass TestModuleGlobalHooks(TestCase):\n\n def tearDown(self):\n nn.modules.module._global_backward_hooks = OrderedDict()\n nn.modules.module._global_forward_hooks = OrderedDict()\n nn.modules.module._global_forward_pre_hooks = OrderedDict()\n\n def test_module_global_hooks(self):\n module = nn.Sigmoid\n\n module_1 = module()\n module_2 = module()\n module_3 = module()\n\n input = torch.ones(5, 5, requires_grad=True)\n\n counter = {\n 'forwards': 0,\n 'backwards': 0\n }\n\n def fw_hook(inc, h_module, input, output):\n self.assertIsInstance(input, tuple)\n self.assertTrue(isinstance(output, torch.Tensor))\n self.assertTrue(isinstance(h_module, module))\n self.assertEqual(input[0], torch.ones(5, 5))\n self.assertEqual(output, torch.empty(5, 5).fill_(1 / (1 + 1 / math.e)))\n counter['forwards'] += inc\n\n def bw_hook(inc, h_module, grad_input, grad_output):\n self.assertIsInstance(grad_input, tuple)\n self.assertIsInstance(grad_output, tuple)\n self.assertTrue(isinstance(h_module, module))\n self.assertEqual(grad_output[0], torch.ones(5, 5) * 2)\n counter['backwards'] += inc\n\n test_fwd = nn.modules.module.register_module_forward_hook(lambda *args: fw_hook(1, *args))\n\n module_1(input)\n module_2(input)\n module_3(input)\n self.assertEqual(counter['forwards'], 3)\n self.assertEqual(counter['backwards'], 0)\n\n test_bwd = nn.modules.module.register_module_backward_hook(\n lambda *args: bw_hook(1, *args))\n\n output_1 = module_1(input)\n output_2 = module_2(input)\n output_3 = module_3(input)\n self.assertEqual(counter['forwards'], 6)\n self.assertEqual(counter['backwards'], 0)\n\n output_1.backward(torch.ones(5, 5) * 2, retain_graph=True)\n output_2.backward(torch.ones(5, 5) * 2, retain_graph=False)\n output_3.backward(torch.ones(5, 5) * 2, retain_graph=False)\n self.assertEqual(counter['forwards'], 6)\n self.assertEqual(counter['backwards'], 3)\n\n output_1.backward(torch.ones(5, 5) * 2, retain_graph=True)\n self.assertEqual(counter['forwards'], 6)\n self.assertEqual(counter['backwards'], 4)\n\n test2_fwd = nn.modules.module.register_module_forward_hook(lambda *args: fw_hook(2, *args))\n\n output = module_1(input)\n output = module_2(input)\n output = module_3(input)\n self.assertEqual(counter['forwards'], 15)\n self.assertEqual(counter['backwards'], 4)\n\n test2_bwd = nn.modules.module.register_module_backward_hook(lambda *args: bw_hook(2, *args))\n\n module_1(input).backward(torch.ones(5, 5) * 2)\n self.assertEqual(counter['forwards'], 18)\n self.assertEqual(counter['backwards'], 7)\n\n test2_bwd.remove()\n\n module_2(input).backward(torch.ones(5, 5) * 2)\n self.assertEqual(counter['forwards'], 21)\n self.assertEqual(counter['backwards'], 8)\n\n test2_fwd.remove()\n\n module_3(input).backward(torch.ones(5, 5) * 2)\n self.assertEqual(counter['forwards'], 22)\n self.assertEqual(counter['backwards'], 9)\n\n test_fwd.remove()\n test_bwd.remove()\n\n def test_module_global_hook_invalid_outputs(self):\n module = nn.Sigmoid()\n input = torch.randn(5, 5, requires_grad=True)\n\n def bw_fail1(self, grad_input, grad_output):\n return grad_input[:-1]\n\n def bw_fail2(self, grad_input, grad_output):\n return grad_input + (torch.randn(2, 2),)\n\n with nn.modules.module.register_module_backward_hook(bw_fail1):\n with self.assertRaisesRegex(RuntimeError, 'got 0, but expected 1'):\n module(input).sum().backward()\n\n with nn.modules.module.register_module_backward_hook(bw_fail2):\n with self.assertRaisesRegex(RuntimeError, 'got 2, but expected 1'):\n module(input).sum().backward()\n\n def test_module_backward_global_hook_writeable(self):\n module = nn.Sigmoid()\n input = torch.randn(5, 5, requires_grad=True)\n sig_x = torch.sigmoid(input)\n\n def bw_hook(module, grad_input, grad_output):\n for grad in grad_input:\n self.assertTrue(isinstance(grad, torch.Tensor))\n for grad in grad_output:\n self.assertTrue(isinstance(grad, torch.Tensor))\n return tuple(gi * 2 for gi in grad_input)\n\n nn.modules.module.register_module_backward_hook(bw_hook)\n module(input).backward(torch.ones(5, 5))\n expected_grad = sig_x * (1 - sig_x) * 2\n self.assertEqual(input.grad, expected_grad)\n\n def test_module_global_forward_preforward_hook_writeable(self):\n module = nn.Sigmoid()\n input = torch.randn(5, 5, requires_grad=True)\n sig_x = torch.sigmoid(input)\n\n def forward_pre_hook(m, input):\n return torch.nn.functional.relu(input[0])\n\n def forward_hook(m, input, output):\n return -output\n\n nn.modules.module.register_module_forward_pre_hook(forward_pre_hook)\n nn.modules.module.register_module_forward_hook(forward_hook)\n output = module(input)\n expected_res = -torch.sigmoid(torch.nn.functional.relu(input))\n self.assertEqual(output, expected_res)\n output.backward(torch.ones(5, 5) * 2, retain_graph=True)\n mask = (input > 0).double()\n expected_grad = -sig_x * (1 - sig_x) * 2 * mask\n self.assertEqual(input.grad, expected_grad)\n\n def test_module_forward_preforward_hook_removable(self):\n \"\"\"\n This test is to test when multiple pre-forward hook functions can be\n registered successfully and used correctly, if the handle can be removable\n during the pre-forward hook function call.\n \"\"\"\n module = nn.Sigmoid()\n\n def removable_hook(m, input):\n nonlocal handle\n handle.remove()\n return input\n\n def removable_hook_2(m, input):\n nonlocal handle_2\n handle_2.remove()\n return input\n\n handle = module.register_forward_pre_hook(removable_hook)\n handle_2 = module.register_forward_pre_hook(removable_hook_2)\n\n # make sure hook register is successful\n self.assertEqual(len(handle.hooks_dict_ref()), 2)\n self.assertEqual(len(handle_2.hooks_dict_ref()), 2)\n\n input = torch.randn(2, 2)\n output = module(input)\n self.assertEqual(torch.sigmoid(input), output)\n\n # make sure hook removal is successful\n self.assertFalse(handle.id in handle.hooks_dict_ref())\n self.assertFalse(handle_2.id in handle.hooks_dict_ref())\n self.assertEqual(len(handle.hooks_dict_ref()), 0)\n self.assertEqual(len(handle_2.hooks_dict_ref()), 0)\n\n def test_module_forward_forward_hook_removable(self):\n \"\"\"\n This test is to test when multiple forward hook functions can be registered\n successfully and used correctly, if the handle can be removable during the\n forward hook function call.\n \"\"\"\n module = nn.Sigmoid()\n\n def removable_hook(m, input, output):\n nonlocal handle\n handle.remove()\n return output\n\n def removable_hook_2(m, input, output):\n nonlocal handle_2\n handle_2.remove()\n return output\n\n handle = module.register_forward_hook(removable_hook)\n handle_2 = module.register_forward_hook(removable_hook_2)\n\n # make sure hook register is successful\n self.assertEqual(len(handle.hooks_dict_ref()), 2)\n self.assertEqual(len(handle_2.hooks_dict_ref()), 2)\n\n input = torch.randn(2, 2)\n output = module(input)\n self.assertEqual(torch.sigmoid(input), output)\n\n # make sure hook removal is successful\n self.assertFalse(handle.id in handle.hooks_dict_ref())\n self.assertFalse(handle_2.id in handle.hooks_dict_ref())\n self.assertEqual(len(handle.hooks_dict_ref()), 0)\n self.assertEqual(len(handle_2.hooks_dict_ref()), 0)\n\n def test_global_and_local_hooks_order(self):\n module = nn.Sigmoid()\n\n global_forward_pre_called = False\n local_forward_pre_called = False\n global_forward_called = False\n local_forward_called = False\n global_backward_called = False\n local_backward_called = False\n\n def global_forward_pre_hook(m, input):\n nonlocal global_forward_pre_called\n self.assertTrue(not local_forward_pre_called)\n global_forward_pre_called = True\n return input\n\n def local_forward_pre_hook(m, input):\n nonlocal local_forward_pre_called\n self.assertTrue(global_forward_pre_called)\n local_forward_pre_called = True\n return input\n\n def global_forward_hook(m, input, output):\n nonlocal global_forward_called\n self.assertTrue(not local_forward_called)\n global_forward_called = True\n return output\n\n def local_forward_hook(m, input, output):\n nonlocal local_forward_called\n self.assertTrue(global_forward_called)\n local_forward_called = True\n return output\n\n def global_backward_hook(m, input, output):\n nonlocal global_backward_called\n self.assertTrue(not local_backward_called)\n global_backward_called = True\n return input\n\n def local_backward_hook(m, input, output):\n nonlocal local_backward_called\n self.assertTrue(global_backward_called)\n local_backward_called = True\n return input\n\n input = torch.randn(5, 5, requires_grad=True)\n nn.modules.module.register_module_forward_pre_hook(global_forward_pre_hook)\n module.register_forward_pre_hook(local_forward_pre_hook)\n nn.modules.module.register_module_forward_hook(global_forward_hook)\n module.register_forward_hook(local_forward_hook)\n nn.modules.module.register_module_backward_hook(global_backward_hook)\n module.register_backward_hook(local_backward_hook)\n\n output = module(input)\n self.assertTrue(local_forward_called and local_forward_pre_called and global_forward_called and global_forward_pre_called)\n\n output.backward(torch.ones(5, 5), retain_graph=True)\n self.assertTrue(local_backward_called and global_backward_called)\n\n\nclass LazyModule(torch.nn.modules.lazy.LazyModuleMixin, torch.nn.Module):\n pass\n\n\nclass TestLazyModules(TestCase):\n\n @suppress_warnings\n def test_lazy_module_parameter(self):\n module = LazyModule()\n module.register_parameter('test_param', UninitializedParameter())\n self.assertTrue(module.has_uninitialized_params())\n state_dict = module.state_dict()\n self.assertIsInstance(state_dict['test_param'], UninitializedParameter)\n new_module = LazyModule()\n # An error is raised when there is an attempt to replace an existing parameter\n # with an uninitialized one\n new_module.register_parameter('test_param', nn.Parameter(torch.ones(5, 5)))\n with self.assertRaisesRegex(RuntimeError, 'shape of an uninitialized'):\n new_module.load_state_dict(state_dict)\n # Uninitialized parameters are overriden when the state dict to be loaded contains a valid one\n new_module = LazyModule()\n new_module.register_parameter('test_param', nn.Parameter(torch.ones(5, 5)))\n module.load_state_dict(new_module.state_dict())\n self.assertEqual(module.test_param, torch.ones((5, 5)))\n\n # Uninitialized parameters are left unchanged\n module = LazyModule()\n module.register_parameter('test_param', UninitializedParameter())\n self.assertTrue(module.has_uninitialized_params())\n\n new_module = LazyModule()\n new_module.register_parameter('test_param', UninitializedParameter())\n module.load_state_dict(new_module.state_dict())\n self.assertTrue(module.has_uninitialized_params())\n\n @suppress_warnings\n def test_lazy_module_buffer(self):\n module = LazyModule()\n module.register_buffer('test_buffer', UninitializedBuffer())\n self.assertTrue(module.has_uninitialized_params())\n state_dict = module.state_dict()\n self.assertIsInstance(state_dict['test_buffer'], UninitializedBuffer)\n new_module = LazyModule()\n # An error is raised when there is an attempt to replace an existing parameter\n # with an uninitialized one\n new_module.register_buffer('test_buffer', torch.ones(5, 5))\n with self.assertRaisesRegex(RuntimeError, 'shape of an uninitialized'):\n new_module.load_state_dict(state_dict)\n # Uninitialized parameters are overriden when the state dict to be loaded contains a valid one\n new_module = LazyModule()\n new_module.register_buffer('test_buffer', torch.ones(5, 5))\n module.load_state_dict(new_module.state_dict())\n self.assertEqual(module.test_buffer, torch.ones((5, 5)))\n\n # Uninitialized parameters are left unchanged\n module = LazyModule()\n module.register_buffer('test_buffer', UninitializedBuffer())\n self.assertTrue(module.has_uninitialized_params())\n\n new_module = LazyModule()\n new_module.register_buffer('test_buffer', UninitializedBuffer())\n module.load_state_dict(new_module.state_dict())\n module.load_state_dict(new_module.state_dict())\n self.assertTrue(module.has_uninitialized_params())\n\n @suppress_warnings\n def test_lazy_module_jit_param(self):\n module = LazyModule()\n module.register_parameter('test_param', UninitializedParameter())\n self.assertTrue(module.has_uninitialized_params())\n with self.assertRaisesRegex(RuntimeError, 'run a forward pass'):\n torch.jit.script(module)\n\n @suppress_warnings\n def test_lazy_module_jit_buffer(self):\n module = LazyModule()\n module.register_buffer('test_buffer', UninitializedBuffer())\n self.assertTrue(module.has_uninitialized_params())\n with self.assertRaisesRegex(RuntimeError, 'run a forward pass'):\n torch.jit.script(module)\n\n @suppress_warnings\n def test_lazy_share_memory_param(self):\n module = LazyModule()\n module.register_parameter('test_param', UninitializedParameter())\n self.assertTrue(module.has_uninitialized_params())\n with self.assertRaisesRegex(RuntimeError, 'share memory on an uninitialized'):\n module.share_memory()\n\n @suppress_warnings\n def test_lazy_share_memory_buffer(self):\n module = LazyModule()\n module.register_buffer('test_buffer', UninitializedBuffer())\n self.assertTrue(module.has_uninitialized_params())\n with self.assertRaisesRegex(RuntimeError, 'share memory on an uninitialized'):\n module.share_memory()\n\n @suppress_warnings\n def test_linear(self):\n module = nn.LazyLinear(10)\n self.assertIsInstance(module.weight, UninitializedParameter)\n self.assertIsInstance(module.bias, UninitializedParameter)\n input = torch.ones(5, 5)\n module(input)\n self.assertIsInstance(module, nn.Linear)\n self.assertNotIsInstance(module, nn.LazyLinear)\n self.assertTrue(module.weight.shape == (10, 5))\n self.assertTrue(module.bias.shape == (10,))\n y = module(input)\n self.assertTrue(torch.equal(torch.nn.functional.linear(input, module.weight, module.bias), y))\n\n @suppress_warnings\n def test_lazy_linear_pickle(self):\n module = nn.LazyLinear(10)\n self.assertIsInstance(module.weight, UninitializedParameter)\n self.assertIsInstance(module.bias, UninitializedParameter)\n module = pickle.loads(pickle.dumps(module))\n self.assertIsInstance(module, nn.LazyLinear)\n self.assertIsInstance(module.weight, UninitializedParameter)\n self.assertIsInstance(module.bias, UninitializedParameter)\n input = torch.ones(5, 5)\n module(input) # fully materialized\n new_module = pickle.loads(pickle.dumps(module))\n self.assertIsInstance(new_module, nn.Linear)\n self.assertNotIsInstance(new_module, nn.LazyLinear)\n self.assertTrue(new_module.weight.shape == (10, 5))\n self.assertNotIsInstance(new_module.weight, UninitializedParameter)\n self.assertTrue(new_module.bias.shape == (10,))\n self.assertNotIsInstance(new_module.bias, UninitializedParameter)\n\n @suppress_warnings\n def test_linear_state(self):\n module = nn.Linear(5, 10)\n lazy_module = nn.LazyLinear(10)\n lazy_module.load_state_dict(module.state_dict())\n # Parameters have been initialized but the module won't become a full\n # Linear one until the first iteration. This is due to\n # limitations on the state_dict loading logic\n self.assertFalse(lazy_module.has_uninitialized_params())\n self.assertTrue(lazy_module.weight.shape == (10, 5))\n self.assertTrue(lazy_module.bias.shape == (10,))\n\n module = nn.Linear(5, 10)\n lazy_module = nn.LazyLinear(10)\n with self.assertRaisesRegex(RuntimeError, 'shape of an uninitialized'):\n module.load_state_dict(lazy_module.state_dict())\n\n def _check_lazy_conv(self, cls, lazy_cls, func, init_args, input_shape,\n expected_weight_shape, expected_bias_shape):\n module = lazy_cls(*init_args)\n self.assertIsInstance(module.weight, UninitializedParameter)\n if module.bias is not None:\n self.assertIsInstance(module.bias, UninitializedParameter)\n input = torch.ones(*input_shape)\n module(input)\n self.assertIsInstance(module, cls)\n self.assertNotIsInstance(module, lazy_cls)\n self.assertEqual(module.weight.shape, expected_weight_shape)\n if module.bias is not None:\n self.assertEqual(module.bias.shape, expected_bias_shape)\n y = module(input)\n self.assertTrue(torch.equal(func(input, module.weight, module.bias), y))\n\n def _check_lazy_conv_pickle(self, cls, lazy_cls, init_args, input_shape,\n expected_weight_shape, expected_bias_shape):\n module = lazy_cls(*init_args)\n self.assertIsInstance(module.weight, UninitializedParameter)\n if module.bias is not None:\n self.assertIsInstance(module.bias, UninitializedParameter)\n module = pickle.loads(pickle.dumps(module))\n self.assertIsInstance(module, lazy_cls)\n self.assertIsInstance(module.weight, UninitializedParameter)\n if module.bias is not None:\n self.assertIsInstance(module.bias, UninitializedParameter)\n input = torch.ones(*input_shape)\n module(input) # fully materialized\n new_module = pickle.loads(pickle.dumps(module))\n self.assertIsInstance(new_module, cls)\n self.assertNotIsInstance(new_module, lazy_cls)\n self.assertEqual(new_module.weight.shape, expected_weight_shape)\n self.assertNotIsInstance(new_module.weight, UninitializedParameter)\n if new_module.bias is not None:\n self.assertEqual(new_module.bias.shape, expected_bias_shape)\n self.assertNotIsInstance(new_module.bias, UninitializedParameter)\n\n def _check_lazy_conv_state(self, gen_module, gen_lazy_module,\n expected_weight_shape, expected_bias_shape):\n module = gen_module()\n lazy_module = gen_lazy_module()\n lazy_module.load_state_dict(module.state_dict())\n # Parameters have been initialized but the module won't become a full\n # Conv one until the first iteration. This is due to\n # limitations on the state_dict loading logic\n self.assertFalse(lazy_module.has_uninitialized_params())\n self.assertEqual(lazy_module.weight.shape, expected_weight_shape)\n if lazy_module.bias is not None:\n self.assertEqual(lazy_module.bias.shape, expected_bias_shape)\n\n module = gen_module()\n lazy_module = gen_lazy_module()\n with self.assertRaisesRegex(RuntimeError, 'shape of an uninitialized'):\n module.load_state_dict(lazy_module.state_dict())\n\n\n def test_lazy_pre_forward_hook(self):\n \"\"\"\n This test is to test whether lazymodule can register other pre-forward hook\n functions successfully.\n \"\"\"\n class TestModule(torch.nn.modules.lazy.LazyModuleMixin, torch.nn.Module):\n def __init__(self):\n super().__init__()\n\n def initialize_parameters(self, input):\n return None\n\n def forward(self, input):\n return input\n\n def hook_function(module, input):\n return input[0] + 1\n\n module = TestModule()\n module.register_forward_pre_hook(hook_function)\n output = module(torch.zeros(2, 2))\n self.assertEqual(output, torch.ones(2, 2))\n\n def test_lazy_forward_hook(self):\n \"\"\"\n This test is to test whether lazymodule can register other forward hook\n functions successfully.\n \"\"\"\n class TestModule(torch.nn.modules.lazy.LazyModuleMixin, torch.nn.Module):\n def __init__(self):\n super().__init__()\n\n def initialize_parameters(self, input):\n return None\n\n def forward(self, input):\n return input\n\n def hook_function(module, input, output):\n return input[0] + 1\n\n module = TestModule()\n module.register_forward_hook(hook_function)\n output = module(torch.zeros(2, 2))\n self.assertEqual(output, torch.ones(2, 2))\n\n @suppress_warnings\n def test_lazy_conv1d(self):\n self._check_lazy_conv(nn.Conv1d, nn.LazyConv1d, torch.nn.functional.conv1d,\n (32, 2), (192, 16, 50), (32, 16, 2), (32,))\n\n @suppress_warnings\n def test_lazy_conv1d_pickle(self):\n self._check_lazy_conv_pickle(nn.Conv1d, nn.LazyConv1d, (32, 2), (192, 16, 50),\n (32, 16, 2), (32,))\n\n @suppress_warnings\n def test_lazy_conv1d_state(self):\n self._check_lazy_conv_state(lambda: nn.Conv1d(16, 32, 2),\n lambda: nn.LazyConv1d(32, 2),\n (32, 16, 2), (32,))\n\n @suppress_warnings\n def test_lazy_conv2d(self):\n self._check_lazy_conv(nn.Conv2d, nn.LazyConv2d, torch.nn.functional.conv2d,\n (32, 2), (192, 16, 8, 6), (32, 16, 2, 2), (32,))\n\n @suppress_warnings\n def test_lazy_conv2d_pickle(self):\n self._check_lazy_conv_pickle(nn.Conv2d, nn.LazyConv2d, (32, 2), (192, 16, 8, 6),\n (32, 16, 2, 2), (32,))\n\n @suppress_warnings\n def test_lazy_conv2d_state(self):\n self._check_lazy_conv_state(lambda: nn.Conv2d(16, 32, 2),\n lambda: nn.LazyConv2d(32, 2),\n (32, 16, 2, 2), (32,))\n\n @suppress_warnings\n def test_lazy_conv3d(self):\n self._check_lazy_conv(nn.Conv3d, nn.LazyConv3d, torch.nn.functional.conv3d,\n (32, 2), (192, 16, 8, 7, 6), (32, 16, 2, 2, 2), (32,))\n\n @suppress_warnings\n def test_lazy_conv3d_pickle(self):\n self._check_lazy_conv_pickle(nn.Conv3d, nn.LazyConv3d, (32, 2), (192, 16, 8, 7, 6),\n (32, 16, 2, 2, 2), (32,))\n\n @suppress_warnings\n def test_lazy_conv3d_state(self):\n self._check_lazy_conv_state(lambda: nn.Conv3d(16, 32, 2),\n lambda: nn.LazyConv3d(32, 2),\n (32, 16, 2, 2, 2), (32,))\n\n @suppress_warnings\n def test_lazy_conv_transposed1d(self):\n self._check_lazy_conv(nn.ConvTranspose1d, nn.LazyConvTranspose1d, torch.nn.functional.conv_transpose1d,\n (32, 2), (192, 16, 50), (16, 32, 2), (32,))\n\n @suppress_warnings\n def test_lazy_conv_transpose1d_pickle(self):\n self._check_lazy_conv_pickle(nn.ConvTranspose1d, nn.LazyConvTranspose1d, (32, 2),\n (192, 16, 50), (16, 32, 2), (32,))\n\n @suppress_warnings\n def test_lazy_conv_transpose1d_state(self):\n self._check_lazy_conv_state(lambda: nn.ConvTranspose1d(16, 32, 2),\n lambda: nn.LazyConvTranspose1d(32, 2),\n (16, 32, 2), (32,))\n\n @suppress_warnings\n def test_lazy_conv_transpose2d(self):\n self._check_lazy_conv(nn.ConvTranspose2d, nn.LazyConvTranspose2d, torch.nn.functional.conv_transpose2d,\n (32, 2), (192, 16, 8, 6), (16, 32, 2, 2), (32,))\n\n @suppress_warnings\n def test_lazy_conv_transpose2d_pickle(self):\n self._check_lazy_conv_pickle(nn.ConvTranspose2d, nn.LazyConvTranspose2d, (32, 2),\n (192, 16, 8, 6), (16, 32, 2, 2), (32,))\n\n @suppress_warnings\n def test_lazy_conv_transpose2d_state(self):\n self._check_lazy_conv_state(lambda: nn.ConvTranspose2d(16, 32, 2),\n lambda: nn.LazyConvTranspose2d(32, 2),\n (16, 32, 2, 2), (32,))\n\n @suppress_warnings\n def test_lazy_conv_transpose3d(self):\n self._check_lazy_conv(nn.ConvTranspose3d, nn.LazyConvTranspose3d, torch.nn.functional.conv_transpose3d,\n (32, 2), (192, 16, 8, 7, 6), (16, 32, 2, 2, 2), (32,))\n\n @suppress_warnings\n def test_lazy_conv_transpose3d_pickle(self):\n self._check_lazy_conv_pickle(nn.ConvTranspose3d, nn.LazyConvTranspose3d, (32, 2),\n (192, 16, 8, 7, 6), (16, 32, 2, 2, 2), (32,))\n\n @suppress_warnings\n def test_lazy_conv_transpose3d_state(self):\n self._check_lazy_conv_state(lambda: nn.ConvTranspose3d(16, 32, 2),\n lambda: nn.LazyConvTranspose3d(32, 2),\n (16, 32, 2, 2, 2), (32,))\n\n def _check_lazy_norm(self, cls, lazy_cls, input_shape):\n for affine in [False, True]:\n for track_running_stats in [False, True]:\n lazy_module = lazy_cls(affine=affine, track_running_stats=track_running_stats)\n\n if affine:\n self.assertIsInstance(lazy_module.weight, UninitializedParameter)\n self.assertIsInstance(lazy_module.bias, UninitializedParameter)\n if track_running_stats:\n self.assertIsInstance(lazy_module.running_mean, UninitializedBuffer)\n self.assertIsInstance(lazy_module.running_var, UninitializedBuffer)\n\n input = torch.ones(*input_shape)\n lazy_output = lazy_module(input)\n self.assertIsInstance(lazy_module, cls)\n self.assertNotIsInstance(lazy_module, lazy_cls)\n\n num_features = input_shape[1]\n module = cls(num_features, affine=affine, track_running_stats=track_running_stats)\n expected_output = module(input)\n\n self.assertEqual(lazy_output, expected_output)\n if module.weight is not None:\n self.assertEqual(lazy_module.weight.shape, module.weight.shape)\n self.assertEqual(lazy_module.weight, module.weight)\n if module.bias is not None:\n self.assertEqual(lazy_module.bias.shape, module.bias.shape)\n self.assertEqual(lazy_module.bias, module.bias)\n if module.running_mean is not None:\n self.assertEqual(lazy_module.running_mean.shape, module.running_mean.shape)\n self.assertEqual(lazy_module.running_mean, module.running_mean)\n if module.running_var is not None:\n self.assertEqual(lazy_module.running_var.shape, module.running_var.shape)\n self.assertEqual(lazy_module.running_var, module.running_var)\n if module.num_batches_tracked is not None:\n self.assertEqual(lazy_module.num_batches_tracked.shape, module.num_batches_tracked.shape)\n self.assertEqual(lazy_module.num_batches_tracked, module.num_batches_tracked)\n\n def _check_lazy_norm_pickle(self, cls, lazy_cls, input_shape):\n for affine in [False, True]:\n for track_running_stats in [False, True]:\n module = lazy_cls(affine=affine, track_running_stats=track_running_stats)\n module = pickle.loads(pickle.dumps(module))\n\n self.assertIsInstance(module, lazy_cls)\n if affine:\n self.assertIsInstance(module.weight, UninitializedParameter)\n self.assertIsInstance(module.bias, UninitializedParameter)\n if track_running_stats:\n self.assertIsInstance(module.running_mean, UninitializedBuffer)\n self.assertIsInstance(module.running_var, UninitializedBuffer)\n\n input = torch.ones(*input_shape)\n module(input) # fully materialized\n module = pickle.loads(pickle.dumps(module))\n\n self.assertNotIsInstance(module, lazy_cls)\n self.assertIsInstance(module, cls)\n if affine:\n self.assertNotIsInstance(module.weight, UninitializedParameter)\n self.assertNotIsInstance(module.bias, UninitializedParameter)\n if track_running_stats:\n self.assertNotIsInstance(module.running_mean, UninitializedBuffer)\n self.assertNotIsInstance(module.running_var, UninitializedBuffer)\n\n def _check_lazy_batchnorm_state(self, cls, lazy_cls):\n module = cls(10)\n lazy_module = lazy_cls(affine=True, track_running_stats=True)\n lazy_module.load_state_dict(module.state_dict())\n # Parameters have been initialized but the module won't become a full\n # Conv one until the first iteration. This is due to\n # limitations on the state_dict loading logic\n self.assertFalse(lazy_module.has_uninitialized_params())\n self.assertEqual(lazy_module.weight.shape, (10,))\n self.assertEqual(lazy_module.bias.shape, (10,))\n self.assertEqual(lazy_module.running_mean.shape, (10,))\n self.assertEqual(lazy_module.running_var.shape, (10,))\n\n module = cls(10)\n lazy_module = lazy_cls()\n with self.assertRaisesRegex(RuntimeError, 'shape of an uninitialized'):\n module.load_state_dict(lazy_module.state_dict())\n\n def _check_lazy_instancenorm_state(self, cls, lazy_cls):\n for affine in [False, True]:\n for track_running_stats in [False, True]:\n module = cls(10, affine=affine, track_running_stats=track_running_stats)\n lazy_module = lazy_cls(affine=affine, track_running_stats=track_running_stats)\n lazy_module.load_state_dict(module.state_dict())\n # Parameters have been initialized but the module won't become a full\n # InstanceNorm one until the first iteration. This is due to\n # limitations on the state_dict loading logic\n self.assertFalse(lazy_module.has_uninitialized_params())\n if affine:\n self.assertEqual(lazy_module.weight.shape, (10,))\n self.assertEqual(lazy_module.bias.shape, (10,))\n if track_running_stats:\n self.assertEqual(lazy_module.running_mean.shape, (10,))\n self.assertEqual(lazy_module.running_var.shape, (10,))\n\n module = cls(10, affine=True, track_running_stats=True)\n lazy_module = lazy_cls(affine=True, track_running_stats=True)\n with self.assertRaisesRegex(RuntimeError, 'shape of an uninitialized'):\n module.load_state_dict(lazy_module.state_dict())\n\n def test_lazy_batchnorm1d(self):\n self._check_lazy_norm(nn.BatchNorm1d, nn.LazyBatchNorm1d, (16, 3, 6))\n self._check_lazy_norm(nn.BatchNorm1d, nn.LazyBatchNorm1d, (16, 6))\n\n def test_lazy_batchnorm1d_pickle(self):\n self._check_lazy_norm_pickle(nn.BatchNorm1d, nn.LazyBatchNorm1d, (16, 3, 6))\n self._check_lazy_norm_pickle(nn.BatchNorm1d, nn.LazyBatchNorm1d, (16, 6))\n\n def test_lazy_batchnorm1d_state(self):\n self._check_lazy_batchnorm_state(nn.BatchNorm1d, nn.LazyBatchNorm1d)\n self._check_lazy_batchnorm_state(nn.BatchNorm1d, nn.LazyBatchNorm1d)\n\n def test_lazy_batchnorm2d(self):\n self._check_lazy_norm(nn.BatchNorm2d, nn.LazyBatchNorm2d, (16, 3, 6, 7))\n\n def test_lazy_batchnorm2d_pickle(self):\n self._check_lazy_norm_pickle(nn.BatchNorm2d, nn.LazyBatchNorm2d, (16, 3, 6, 7))\n\n def test_lazy_batchnorm2d_state(self):\n self._check_lazy_batchnorm_state(nn.BatchNorm2d, nn.LazyBatchNorm2d)\n self._check_lazy_batchnorm_state(nn.BatchNorm2d, nn.LazyBatchNorm2d)\n\n def test_lazy_batchnorm3d(self):\n self._check_lazy_norm(nn.BatchNorm3d, nn.LazyBatchNorm3d, (16, 3, 6, 7, 8))\n\n def test_lazy_batchnorm3d_pickle(self):\n self._check_lazy_norm_pickle(nn.BatchNorm3d, nn.LazyBatchNorm3d, (16, 3, 6, 7, 8))\n\n def test_lazy_batchnorm3d_state(self):\n self._check_lazy_batchnorm_state(nn.BatchNorm3d, nn.LazyBatchNorm3d)\n self._check_lazy_batchnorm_state(nn.BatchNorm3d, nn.LazyBatchNorm3d)\n\n def test_lazy_instancenorm1d(self):\n self._check_lazy_norm(nn.InstanceNorm1d, nn.LazyInstanceNorm1d, (16, 3, 6))\n\n def test_lazy_instancenorm1d_pickle(self):\n self._check_lazy_norm_pickle(nn.InstanceNorm1d, nn.LazyInstanceNorm1d, (16, 3, 6))\n\n def test_lazy_instancenorm1d_state(self):\n self._check_lazy_instancenorm_state(nn.InstanceNorm1d, nn.LazyInstanceNorm1d)\n self._check_lazy_instancenorm_state(nn.InstanceNorm1d, nn.LazyInstanceNorm1d)\n\n def test_lazy_instancenorm2d(self):\n self._check_lazy_norm(nn.InstanceNorm2d, nn.LazyInstanceNorm2d, (16, 3, 6, 7))\n\n def test_lazy_instancenorm2d_pickle(self):\n self._check_lazy_norm_pickle(nn.InstanceNorm2d, nn.LazyInstanceNorm2d, (16, 3, 6, 7))\n\n def test_lazy_instancenorm2d_state(self):\n self._check_lazy_instancenorm_state(nn.InstanceNorm2d, nn.LazyInstanceNorm2d)\n self._check_lazy_instancenorm_state(nn.InstanceNorm2d, nn.LazyInstanceNorm2d)\n\n def test_lazy_instancenorm3d(self):\n self._check_lazy_norm(nn.InstanceNorm3d, nn.LazyInstanceNorm3d, (16, 3, 6, 7, 8))\n\n def test_lazy_instancenorm3d_pickle(self):\n self._check_lazy_norm_pickle(nn.InstanceNorm3d, nn.LazyInstanceNorm3d, (16, 3, 6, 7, 8))\n\n def test_lazy_instancenorm3d_state(self):\n self._check_lazy_instancenorm_state(nn.InstanceNorm3d, nn.LazyInstanceNorm3d)\n self._check_lazy_instancenorm_state(nn.InstanceNorm3d, nn.LazyInstanceNorm3d)\n\n @suppress_warnings\n def test_materialize_dtype(self):\n module = LazyModule()\n module.register_parameter('test_param', UninitializedParameter())\n module.test_param.materialize(10)\n self.assertTrue(module.test_param.dtype == torch.float64)\n module = LazyModule()\n module.register_parameter('test_param', UninitializedParameter())\n module.half()\n module.test_param.materialize(10)\n self.assertTrue(module.test_param.dtype == torch.float16)\n\n @unittest.skipIf(not TEST_CUDA, 'CUDA not available')\n @suppress_warnings\n def test_materialize_device(self):\n module = LazyModule()\n module.register_parameter('test_param', UninitializedParameter())\n module.test_param.materialize(10)\n self.assertTrue(module.test_param.device.type == 'cpu')\n module = LazyModule()\n module.register_parameter('test_param', UninitializedParameter())\n module.cuda()\n module.test_param.materialize(10)\n self.assertTrue(module.test_param.device.type == 'cuda')\n\n @suppress_warnings\n def test_chained_initialization(self):\n class MyNetwork(torch.nn.Module):\n def __init__(self):\n super(MyNetwork, self).__init__()\n self.linear_1 = torch.nn.LazyLinear(15)\n self.linear_2 = torch.nn.LazyLinear(10)\n\n def forward(self, x):\n y = self.linear_1(x)\n return self.linear_2(y)\n\n net = MyNetwork()\n net(torch.ones(5, 10))\n self.assertTrue(net.linear_1.weight.shape == (15, 10))\n self.assertTrue(net.linear_1.bias.shape == (15,))\n self.assertTrue(net.linear_2.weight.shape == (10, 15))\n self.assertTrue(net.linear_2.bias.shape == (10,))\n\n @suppress_warnings\n def test_optimizer_pass(self):\n optimizers = [torch.optim.Adadelta, torch.optim.Adagrad, torch.optim.Adam,\n torch.optim.AdamW, torch.optim.Adamax,\n torch.optim.ASGD, torch.optim.SGD, torch.optim.Rprop,\n torch.optim.RMSprop, torch.optim.LBFGS]\n\n def run_step(module, optim):\n self.assertIsInstance(optim.param_groups[0]['params'][0], UninitializedParameter)\n module.test_param.materialize(10)\n self.assertIsInstance(optim.param_groups[0]['params'][0], Parameter)\n self.assertNotIsInstance(optim.param_groups[0]['params'][0], UninitializedParameter)\n for p in module.parameters():\n p.grad = torch.rand_like(p)\n if isinstance(optim, torch.optim.LBFGS):\n optim.step(lambda: 1.0)\n else:\n optim.step()\n\n for optim_cls in optimizers:\n module = LazyModule()\n module.register_parameter('test_param', UninitializedParameter())\n if optim_cls is torch.optim.SGD:\n optim = optim_cls(module.parameters(), lr=0.0)\n elif optim_cls is torch.optim.Adagrad:\n with self.assertRaisesRegex(ValueError, 'uninitialized parameter'):\n optim = optim_cls(module.parameters())\n continue\n else:\n optim = optim_cls(module.parameters())\n run_step(module, optim)\n\n @suppress_warnings\n def test_weight_norm(self):\n m = nn.LazyLinear(7)\n with self.assertRaisesRegex(ValueError, 'have uninitialized parameters.'):\n m = torch.nn.utils.weight_norm(m)\n\n @suppress_warnings\n def test_spectral_norm(self):\n m = nn.LazyLinear(7)\n with self.assertRaisesRegex(ValueError, 'have uninitialized parameters.'):\n m = torch.nn.utils.spectral_norm(m)\n\n @suppress_warnings\n def test_invalid_functions(self):\n param = torch.nn.parameter.UninitializedParameter()\n with self.assertRaisesRegex(ValueError, 'uninitialized parameter'):\n torch.empty_like(param)\n\n with self.assertRaisesRegex(ValueError, 'uninitialized parameter'):\n torch.add(param, param)\n\n with self.assertRaisesRegex(ValueError, 'uninitialized parameter'):\n param + param\n\nclass TestFunctionalPickle(TestCase):\n\n # issue gh-38137\n def test_pickle_softsign(self):\n # Make sure it does not throw an exception\n s = pickle.dumps(F.softsign)\n\nclass TestStateDictHooks(TestCase):\n\n def test_load_state_dict_pre_hook(self):\n\n m = nn.Linear(10, 10)\n m_state_dict = m.state_dict()\n\n m_load = nn.Linear(10, 10)\n\n hook_called = 0\n\n def hook_without_module(state_dict, prefix, local_metadata, strict, missing_keys, unexpected_keys, error_msgs):\n self.assertEqual(m_state_dict, state_dict)\n nonlocal hook_called\n hook_called += 1\n\n def hook_with_module(module, state_dict, prefix, local_metadata, strict, missing_keys, unexpected_keys, error_msgs):\n self.assertEqual(m_state_dict, state_dict)\n self.assertTrue(m_load is module)\n nonlocal hook_called\n hook_called += 1\n\n hook_called = 0\n m_load._register_load_state_dict_pre_hook(hook_without_module)\n m_load.load_state_dict(m_state_dict)\n self.assertEqual(1, hook_called)\n\n hook_called = 0\n m_load._register_load_state_dict_pre_hook(hook_with_module, True)\n m_load.load_state_dict(m_state_dict)\n self.assertEqual(2, hook_called)\n\n def test_load_state_dict_module_pre_hook(self):\n hook_called = 0\n\n # Test with module instance method as hook\n class MyModule(nn.Module):\n def __init__(self):\n super(MyModule, self).__init__()\n self.foo = torch.nn.Parameter(torch.rand(10))\n\n def my_pre_load_hook(self, state_dict, prefix, local_metadata, strict, missing_keys, unexpected_keys, error_msgs):\n assert [] == error_msgs\n assert [] == unexpected_keys\n assert [] == missing_keys\n assert strict\n nonlocal hook_called\n hook_called += 1\n\n def my_pre_load_hook_with_module(\n self,\n module,\n state_dict,\n prefix,\n local_metadata,\n strict,\n missing_keys,\n unexpected_keys,\n error_msgs,\n ):\n assert [] == error_msgs\n assert [] == unexpected_keys\n assert [] == missing_keys\n assert strict\n assert self is module\n nonlocal hook_called\n hook_called += 1\n\n # Test that hooks registered on a submodule are also called\n # appropriately, i.e. with the submodule as module argument in\n # my_pre_load_hook_with_module.\n class MyModuleContainer(nn.Module):\n def __init__(self, mod):\n super().__init__()\n self.mod = mod\n\n for ctor in [MyModuleContainer, lambda x: x]:\n m = ctor(MyModule())\n state_dict = m.state_dict()\n if isinstance(m, MyModuleContainer):\n mod = m.mod\n else:\n mod = m\n\n hook_called = 0\n mod._register_load_state_dict_pre_hook(\n mod.my_pre_load_hook\n )\n m.load_state_dict(state_dict)\n self.assertEqual(1, hook_called)\n\n hook_called = 0\n mod._register_load_state_dict_pre_hook(\n mod.my_pre_load_hook_with_module, True\n )\n m.load_state_dict(state_dict)\n self.assertEqual(2, hook_called)\n\n def test_load_state_dict_post_hook(self):\n hook_called = 0\n\n class MyModule(nn.Module):\n def __init__(self):\n super(MyModule, self).__init__()\n self.foo = torch.nn.Parameter(torch.rand(10))\n\n def my_post_load_hook(self, module, incompatible_keys):\n assert module is self\n nonlocal hook_called\n incompatible_keys.missing_keys.append(\"foo\")\n incompatible_keys.unexpected_keys.append(\"bar\")\n hook_called += 1\n\n nested = MyModule()\n wrapped = nn.ModuleList([nested])\n handle = nested.register_load_state_dict_post_hook(\n nested.my_post_load_hook,\n )\n # Hook must be called even if it is wrapped\n ret = wrapped.load_state_dict(wrapped.state_dict(), strict=False)\n self.assertEqual(hook_called, 1)\n # Ensure that the hook modified missing_keys and unexpected_keys\n missing = ret.missing_keys\n unexpected = ret.unexpected_keys\n self.assertEqual(missing, [\"foo\"])\n self.assertEqual(unexpected, [\"bar\"])\n # When called with strict=True, the error raised should mention the\n # missing and unexpected keys the hook added.\n with self.assertRaisesRegex(RuntimeError, \"foo.*\\n.*bar\"):\n wrapped.load_state_dict(wrapped.state_dict(), strict=True)\n self.assertEqual(hook_called, 2)\n # Removing the hook via handle.remove() should cause it not to\n # fire anymore.\n handle.remove()\n # Hook did not run so it should not have added any keys\n ret = wrapped.load_state_dict(wrapped.state_dict(), strict=False)\n self.assertEqual(ret.missing_keys, [])\n self.assertEqual(ret.unexpected_keys, [])\n # hook_called should not have been incremented\n self.assertEqual(hook_called, 2)\n\n def load_hook_clear_incompatible(module, incompatible_keys):\n incompatible_keys.missing_keys.clear()\n incompatible_keys.unexpected_keys.clear()\n\n nested.register_load_state_dict_post_hook(load_hook_clear_incompatible)\n state_dict = wrapped.state_dict()\n state_dict[\"extra\"] = torch.ones(1)\n # load state_dict with strict=True should not throw.\n ret = wrapped.load_state_dict(state_dict, strict=True)\n # explicitly ensure that the post hook clearned out incompatible_keys\n self.assertEqual([], ret.missing_keys)\n self.assertEqual([], ret.unexpected_keys)\n\n\ninstantiate_device_type_tests(TestNNDeviceType, globals())\ninstantiate_parametrized_tests(TestNN)\n\nif __name__ == '__main__':\n run_tests()\n" ]
[ [ "torch.nn.Hardshrink", "torch.zeros", "torch.nn.SyncBatchNorm.convert_sync_batchnorm", "torch.where", "torch.device", "torch.autograd.gradcheck", "torch.nn.utils.parametrizations.orthogonal", "torch.sqrt", "torch.nn.functional.max_unpool1d", "torch.nn.HuberLoss", "torch.nn.functional.channel_shuffle", "torch.nn.ReplicationPad1d", "torch.nn.ReflectionPad3d", "torch.nn.utils.skip_init", "torch.nn.Bilinear", "torch.cudnn_convolution_add_relu", "torch.testing._internal.common_utils.run_tests", "torch.nn.functional.max_unpool2d", "torch.nn.utils.convert_conv2d_weight_memory_format", "torch.nn.ConvTranspose3d", "torch.nn.init.trunc_normal_", "torch.nn.functional.cross_entropy", "torch.nn.utils.rnn.PackedSequence", "torch.testing._internal.common_utils.instantiate_parametrized_tests", "torch.nn.functional.fractional_max_pool3d", "torch.nn.functional.threshold_", "torch.nn.utils.rnn.pad_sequence", "torch.nn.utils.rnn.unpad_sequence", "torch.nn.functional.adaptive_avg_pool1d", "torch.nn.utils.rnn.pad_packed_sequence", "torch.nn.L1Loss", "torch.ao.quantization.get_default_qconfig", "torch.testing.assert_allclose", "torch.nn.utils.remove_weight_norm", "torch.linalg.qr", "torch.nn.functional.adaptive_avg_pool3d", "torch.nn.functional.sigmoid", "torch.testing._internal.common_device_type.deviceCountAtLeast", "torch.nn.utils.prune._compute_nparams_toprune", "torch.nn.AdaptiveMaxPool1d", "torch.nn.functional.elu_", "torch.testing._internal.common_utils.skipIfRocmVersionLessThan", "torch.nn.functional.conv1d", "torch.nn.functional.multi_head_attention_forward", "torch.nn.utils.prune.l1_unstructured", "numpy.random.rand", "torch.nn.utils.remove_spectral_norm", "torch.nn.LSTM", "torch.cosine_similarity", "torch.nn.Softshrink", "torch.nan_to_num", "torch.testing._internal.common_nn.CriterionTest", "torch.nn.functional.unfold", "torch.nn.utils.parametrize.transfer_parametrizations_and_params", "torch.testing._internal.common_utils.parametrize", "torch.nn.AvgPool1d", "torch.randn_like", "torch.nn.init.uniform_", "torch.nn.utils.fusion.fuse_conv_bn_eval", "torch.testing._internal.common_cuda.tf32_on", "torch.testing._internal.common_cuda.tf32_on_and_off", "torch.nn.TripletMarginWithDistanceLoss", "torch.nn.Embedding.from_pretrained", "torch.nn.AvgPool3d", "torch.jit.trace", "torch.linalg.matrix_rank", "torch.from_numpy", "torch.isfinite", "torch.nn.functional.pad", "numpy.isnan", "torch.testing._internal.common_utils.freeze_rng_state", "torch.nn.ModuleList", "torch.nn.functional.adaptive_avg_pool2d", "torch.nn.Conv3d", "torch.log", "torch.nn.functional.conv_tbc", "torch.nn.utils.clip_grad_norm_", "torch.backends.cudnn.flags", "torch.nn.functional.kl_div", "torch.nn.FractionalMaxPool3d", "torch.layer_norm", "torch.nn.functional.smooth_l1_loss", "torch.allclose", "torch.nn.MultiMarginLoss", "torch.inference_mode", "torch.nn.Threshold", "torch.nn.SmoothL1Loss", "torch.nn.BatchNorm1d", "torch.nn.functional.adaptive_max_pool3d", "torch.nn.parameter.UninitializedParameter", "torch.convolution", "torch.nn.utils.prune.L1Unstructured", "torch.nn.LayerNorm", "torch.overrides.has_torch_function", "torch.nn.functional.multilabel_soft_margin_loss", "torch.autograd.forward_ad.dual_level", "torch.nn.MaxUnpool3d", "torch.nn.EmbeddingBag", "torch.randn", "torch.equal", "torch.nn.utils.parametrizations.spectral_norm", "torch.nn.functional.hardswish", "torch.nn.utils.prune.is_pruned", "torch.backends.mkldnn.flags", "torch.backends.mkldnn.is_available", "torch.ones_like", "numpy.zeros", "torch.testing._internal.common_utils.get_function_arglist", "torch.empty_like", "torch.full", "torch.nn.functional.avg_pool2d", "torch.nn.Conv2d", "torch.nn.functional.mse_loss", "torch.nn.grad.conv1d_input", "torch.conv2d", "torch.nn.ReflectionPad2d", "torch.pdist", "torch.backends.cudnn.is_available", "torch.nn.Unfold", "torch.nn.grad.conv2d_input", "torch.nn.functional.fractional_max_pool2d", "torch.testing._internal.common_utils.TemporaryFileName", "numpy.expand_dims", "torch.nn.utils.parametrize.cached", "torch.nn.LazyConv2d", "torch.sum", "numpy.seterr", "torch.nn.BCEWithLogitsLoss", "torch.nn.utils.prune.CustomFromMask", "torch.testing._internal.common_utils.set_default_dtype", "torch.nn.utils.prune.RandomStructured", "torch.nn.init.eye_", "torch.einsum", "torch.nn.LSTMCell", "torch.nn.functional.cosine_embedding_loss", "torch.rand", "torch.nn.TransformerEncoder", "torch.native_dropout", "torch.testing.assert_close", "torch.nn.functional.gumbel_softmax", "torch._VF._add_relu", "torch.nn.functional.triplet_margin_with_distance_loss", "torch.nn.functional.softshrink", "torch.constant_pad_nd", "torch.nn.functional.ctc_loss", "torch.testing._internal.common_utils.gradcheck", "torch.random.set_rng_state", "torch.nn.Dropout2d", "torch.nn.functional.margin_ranking_loss", "torch.nn.ELU", "torch.nn.LocalResponseNorm", "torch.nn.LazyConv3d", "torch.cuda.synchronize", "torch.nn.AdaptiveMaxPool2d", "torch.nn.utils.prune._validate_pruning_amount_init", "torch.nn.EmbeddingBag.from_pretrained", "torch.nn.functional.relu", "torch.arange", "torch.nn.utils.rnn.pack_sequence", "torch.nn.ConvTranspose2d", "torch.nn.functional.conv2d", "torch.nn.utils.spectral_norm", "torch.cuda.empty_cache", "torch.nn.TransformerEncoderLayer", "torch.nn.functional.cosine_similarity", "torch.nn.utils.prune.identity", "torch.nn.functional.normalize", "torch.nn.MaxUnpool2d", "torch.nn.init.sparse_", "torch.nn.init.dirac_", "torch.nn.utils.prune.random_unstructured", "torch.nn.functional.max_pool1d", "numpy.dot", "torch.rand_like", "numpy.squeeze", "torch.nn.utils.rnn.unpack_sequence", "torch.nn.functional.avg_pool3d", "numpy.random.randn", "torch.nn.utils.prune.PruningContainer", "torch.jit.script", "torch.autograd.forward_ad.make_dual", "torch.nn.functional.softmin", "torch.testing._internal.common_utils._assertGradAndGradgradChecks", "torch.nn.functional.linear", "torch.nn.functional.softplus", "torch.nn.Sequential", "torch.sigmoid", "torch.ops.aten.convolution", "torch.testing._internal.common_dtype.integral_types", "torch.nn.functional.leaky_relu_", "numpy.random.random", "torch.nn.Flatten", "torch.testing._internal.common_device_type.skipCUDAIfRocmVersionLessThan", "torch.testing._internal.common_device_type.dtypesIfCUDA", "torch.nn.functional.one_hot", "torch.nn.init.kaiming_normal_", "torch.all", "torch.nn.utils.parametrize.remove_parametrizations", "torch.randint", "torch.nn.functional.poisson_nll_loss", "numpy.sqrt", "torch.nn.PixelUnshuffle", "torch.nn.functional.silu", "torch.nn.parallel._functions.Broadcast.apply", "torch.nn.GroupNorm", "torch.testing._internal.common_cuda.tf32_off", "torch.nn.utils.prune._validate_pruning_amount", "torch.nn.functional.triplet_margin_loss", "torch.fbgemm_linear_fp16_weight", "torch.nn.init.xavier_normal_", "torch.nn.utils.clip_grad_value_", "torch.nn.Linear", "torch.nn.LazyConvTranspose3d", "torch.nn.BatchNorm2d", "torch.nn.KLDivLoss", "torch.nn.functional.grid_sample", "torch.nn.functional.avg_pool1d", "torch._C._select_conv_backend", "torch.abs", "torch.repeat_interleave", "torch.testing._internal.common_dtype.floating_types_and", "torch.nn.CTCLoss", "torch.nn.functional.threshold", "torch.autograd.backward", "torch.nn.LazyConvTranspose1d", "torch.tensor", "torch.nn.functional.adaptive_max_pool2d", "torch.linalg.solve", "torch.nn.LogSigmoid", "torch.nonzero", "torch.LongTensor", "torch.mv", "torch.testing._internal.common_device_type.dtypes", "torch.nn.functional.pairwise_distance", "torch.nn.functional.hardtanh", "torch.stack", "torch.cuda.device_count", "scipy.stats.kstest", "torch.ops.aten._convolution_double_backward", "torch.nn.AdaptiveLogSoftmaxWithLoss.__call__", "torch.nn.Dropout3d", "torch.nn.Tanh", "numpy.ones", "torch.nn.ReLU", "torch.cat", "torch.nn.Embedding", "torch.nn.functional.interpolate", "torch.testing._internal.common_nn.NewModuleTest", "torch.finfo", "torch.nn.Softplus", "torch.add", "torch.nn.InstanceNorm1d", "torch.nn.TransformerDecoder", "torch.nn.MaxPool1d", "numpy.repeat", "torch.nn.AdaptiveLogSoftmaxWithLoss", "torch.DoubleTensor", "torch.nn.grad.conv3d_input", "torch.nn.Parameter", "numpy.linalg.inv", "torch.nn.modules.module.register_module_forward_hook", "torch.__future__.set_overwrite_module_params_on_conversion", "torch.nn.LazyConv1d", "numpy.testing.assert_allclose", "torch.nn.Hardsigmoid", "torch.nn.Unflatten", "torch.nn.GELU", "torch.nn.functional.gaussian_nll_loss", "torch.nn.functional.multilabel_margin_loss", "torch.nn.AdaptiveMaxPool3d", "torch.nn.MultiLabelMarginLoss", "torch.testing._internal.common_device_type.precisionOverride", "torch.nn.Softmax", "torch.nn.functional.nll_loss", "torch.nn.functional.mish", "torch.nn.ParameterDict", "torch.nn.LazyLinear", "torch.nn.functional.huber_loss", "torch.nn.functional.max_unpool3d", "torch.nn.functional.affine_grid", "torch.nn.CrossEntropyLoss", "torch.eye", "torch.testing._internal.common_dtype.get_all_math_dtypes", "torch.testing._internal.common_utils.gradgradcheck", "torch.nn.GRUCell", "torch.get_default_dtype", "torch._nnpack_spatial_convolution", "torch.nn.functional.max_pool2d", "torch.testing._internal.common_utils.download_file", "torch.nn.BCELoss", "torch.nn.ParameterList", "torch.batch_norm_stats", "torch.nn.functional.pdist", "torch.nn.utils.prune.remove", "torch.nn.functional.bilinear", "torch._nnpack_available", "torch._masked_softmax", "numpy.prod", "torch.nn.ConvTranspose1d", "torch.nn.DataParallel", "torch.nn.modules.module.register_module_forward_pre_hook", "torch.nn.functional.dropout", "torch.nn.GRU", "torch.nn.ModuleDict", "numpy.matmul", "torch.backends.cudnn.version", "numpy.full", "torch.testing._internal.common_dtype.floating_and_complex_types_and", "torch.testing._internal.common_device_type.get_all_device_types", "torch.autograd.grad", "torch.nn.init.constant_", "torch.nn.functional.binary_cross_entropy_with_logits", "torch.nn.Module", "torch.exp", "torch.nn.AvgPool2d", "torch.nn.init.normal_", "torch.nn.RNNCell", "numpy.array", "numpy.sum", "torch.nn.functional.log_softmax", "torch.nn.TripletMarginLoss", "torch.testing.make_tensor", "torch.testing._internal.common_device_type.skipCUDAIfCudnnVersionLessThan", "torch.nn.functional.l1_loss", "torch.nn.utils.prune.ln_structured", "torch.nn.Fold", "torch.nn.Hardswish", "torch.nn.RNN", "torch.nested_tensor", "torch.nn.LazyConvTranspose2d", "torch.cuda.is_available", "torch.nn.ReplicationPad2d", "torch.testing._internal.common_cuda.tf32_is_not_fp32", "torch.nn.MultiheadAttention", "torch.nn.functional.max_pool3d", "numpy.reshape", "torch.nn.TransformerDecoderLayer", "torch.nn.Sigmoid", "torch.nn.utils.parametrize.type_before_parametrizations", "torch.nn.functional.elu", "torch.nn.utils.parametrize.register_parametrization", "torch.nn.functional.conv3d", "torch.zeros_like", "torch.nn.functional.logsigmoid", "torch.nn.utils.prune.global_unstructured", "torch.nn.ReflectionPad1d", "torch.nn.functional.fold", "torch.nn.utils.parametrize.is_parametrized", "torch.nn.utils.weight_norm", "torch.manual_seed", "torch.log_softmax", "torch.matmul", "torch.nn.Upsample", "torch.nn.init.orthogonal_", "torch.testing._internal.common_device_type.skipCUDAIf", "torch.testing._internal.common_utils.subtest", "torch.batch_norm_backward_elemt", "torch.nn.functional.hinge_embedding_loss", "torch.load", "torch.set_default_dtype", "torch.testing._internal.common_device_type.largeTensorTest", "torch.nn.PairwiseDistance", "torch.save", "numpy.random.randint", "torch.nn.init.calculate_gain", "torch.ones", "torch.nn.utils.prune.LnStructured", "torch.relu", "torch.nn.FractionalMaxPool2d", "torch.nn.modules.utils.consume_prefix_in_state_dict_if_present", "torch.set_printoptions", "torch.nn.PixelShuffle", "torch.nn.LeakyReLU", "torch.nn.functional.leaky_relu", "numpy.transpose", "torch.nn.functional.embedding", "torch.linalg.svd", "torch.cudnn_convolution_relu", "torch.random.get_rng_state", "torch.nn.AdaptiveAvgPool3d", "torch.testing._internal.hypothesis_utils.tensor", "torch.nn.init.normal", "torch.mean", "torch.nn.functional.softmax", "numpy.amax", "torch.nn.ReplicationPad3d", "torch.nn.Transformer", "torch.nn.functional.adaptive_max_pool1d", "torch.no_grad", "torch.Size", "torch.nn.Dropout", "torch._grid_sampler_2d_cpu_fallback", "torch.nn.functional.embedding_bag", "torch.nn.utils.rnn.pack_padded_sequence", "torch.nn.MaxUnpool1d", "torch.nn.modules.module.register_module_backward_hook", "torch.nn.NLLLoss", "torch.nn.parameter.UninitializedBuffer", "torch.empty", "torch.dropout", "torch.nn.PReLU", "torch.nn.functional.multi_margin_loss", "torch.randint_like", "torch.fbgemm_pack_gemm_matrix_fp16", "torch.nn.Conv1d", "torch.nn.AdaptiveAvgPool1d", "torch.flip", "torch.isnan", "torch.nn.init.kaiming_uniform_", "torch.nn.MaxPool2d", "torch.nn.MaxPool3d", "torch.nn.AdaptiveAvgPool2d", "torch.nn.init.xavier_uniform_" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [ "0.13", "1.6", "0.14", "1.10", "0.15", "1.4", "0.16", "1.9", "0.19", "1.5", "0.18", "1.2", "1.7", "0.12", "1.0", "0.17", "1.3", "1.8" ], "tensorflow": [] } ]
denneb1/pylayers
[ "6aaa06175061a9120044c955b44e9168e9c7ee36", "6aaa06175061a9120044c955b44e9168e9c7ee36" ]
[ "pylayers/location/geometric/constraints/tdoa.py", "pylayers/antprop/rays.py" ]
[ "\"\"\"\n\n.. autoclass:: TDOA\n :members:\n\n\"\"\"\n# -*- coding:Utf-8 -*-\n#####################################################################\n#This file is part of RGPA.\n\n#Foobar is free software: you can redistribute it and/or modify\n#it under the terms of the GNU General Public License as published by\n#the Free Software Foundation, either version 3 of the License, or\n#(at your option) any later version.\n\n#Foobar is distributed in the hope that it will be useful,\n#but WITHOUT ANY WARRANTY; without even the implied warranty of\n#MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n#GNU General Public License for more details.\n\n#You should have received a copy of the GNU General Public License\n#along with Foobar. If not, see <http://www.gnu.org/licenses/>.\n\n#-------------------------------------------------------------------\n#authors :\n#Nicolas AMIOT : [email protected]\n#Bernard UGUEN : [email protected]\n#Mohamed LAARAIEDH : [email protected]\n#####################################################################\nimport numpy as np\nimport scipy as sp\nfrom copy import copy\nfrom pylayers.location.geometric.util.boxn import *\nfrom pylayers.location.geometric.constraints.constraint import *\n\n\nclass TDOA(Constraint):\n \"\"\" TDOA Constraint\n\n Description and evaluation of TDOA constraints\n\n Parameters\n ----------\n\n value : float\n Constraint value in ns. \n std : np.array\n Value standard deviation in ns. \n vcw : float\n scale factor.\n p : np.array 2 x ndim\n constraint centers\n\n Attributes\n ----------\n\n drange : difference of distance conversion from time self.value.\n sstd : difference of distance conversion from time self.std\n runable : True NOT USED\n evaluated :False NOT USED\n self.Id : Constraint ID\n\n from annulus bound:\n min : minimum value of observable\n max : maximum value of observable\n mean : mean value of observable\n\n Methods\n -------\n\n annulus_bound(self) : Compute the minimum and maximum distance of the enclosing annulus of the constraint\n tdoa_box(vcw) : find the enclosing box of TDOA constraint for a given vcw\n rescale(self,vcw) : rescale contraint boundary with a given scale factor 'vcw'\n inclusive(self,b) : Is constraint center is inside a box ?\n valid(self,b) : Test if Lbox is compatible with the constraint\n valid_v(self,lv) : Test if a liste of a vertexes from a box is compatible with the constraint. vertexes are obtained thanks to LBoxN.\n bd2coordinates()\n estvol(self) : Constraint Volume estimation\n\n See Also\n --------\n\n pylayers.location.geometric.constraints.Constraint\n\n\n \"\"\"\n def __init__(self, id='0', value=np.array(([45])), std=np.array((4.0)), vcw=3, p=np.array([[0, 0, 0], [10, 10, 10]]), origin={}):\n Constraint.__init__(self, type='TDOA', id=id, p=p, origin=origin)\n self.tdoa_axes(p)\n self.f = self.nv / 2\n self.Dmax = self.nv\n self.value = min(value, 2 * self.f / 0.3)\n self.value = max(self.value, -2 * self.f / 0.3)\n self.std = std\n self.vcw = vcw\n self.drange = self.value * 0.3\n self.sstd = self.std * 0.3\n self.tdoa_box(vcw)\n self.annulus_bound()\n\n\n def update(self):\n \"\"\" update constraint information\n \"\"\"\n # if self.p.any():\n # self.runable = True\n # else:\n # self.runable = False\n self.updc('p',value=self.p)\n self.updc('value',value=self.value)\n self.updc('std',value=self.std)\n self.sstd=self.std * 0.3\n self.range=self.value *0.3\n self.rescale(self.vcw)\n self.evaluated = False\n self.annulus_bound()\n\n def tdoa_axes(self, p):\n \"\"\"triedre [vn,wn,tn], support of the contraint\n \"\"\"\n #\n # Dmax\n #\n\n #\n # La boite du tdoa est tronquee\n #\n self.F1 = p[0, :]\n self.F2 = p[1, :]\n #\n #\n #\n v = self.F2 - self.F1\n self.nv = np.sqrt(np.dot(v, v))\n vn = v / (self.nv * 1.0)\n\n if self.ndim > 2:\n if np.abs(v[2]) < 0.9:\n w = np.array([v[1], -v[0], 0])\n else:\n w = np.array([v[2], 0, -v[0]])\n nw = np.sqrt(np.dot(w, w))\n wn = w / (nw * 1.0)\n tn = np.cross(vn, wn)\n self.triedre = [wn, tn, vn] # [z,x,y]\n else:\n w = np.array([v[1], -v[0]])\n nw = np.sqrt(np.dot(w, w))\n wn = w / (nw * 1.0)\n self.triedre = [wn, vn]\n\n def tdoa_box(self, vcw):\n \"\"\"create the inclusive box for a given vcw\n \"\"\"\n\n if self.ndim == 3:\n wn = self.triedre[0]\n tn = self.triedre[1]\n vn = self.triedre[2]\n\n if self.ndim == 2:\n wn = self.triedre[0]\n vn = self.triedre[1]\n\n eps = vcw * self.sstd\n delta = self.drange\n deltap = min(delta + eps, self.nv)\n deltam = max(delta - eps, -self.nv)\n c = delta / 2.\n cp = deltap / 2.\n cm = deltam / 2.\n arge = self.f ** 2 - c ** 2\n argep = self.f ** 2 - cp ** 2\n argem = self.f ** 2 - cm ** 2\n try:\n e = np.sqrt(arge)\n except:\n pdb.set_trace()\n ep = np.sqrt(argep)\n em = np.sqrt(argem)\n\n if cp < 0:\n pp = self.F1 + (self.f + cp) * vn\n else:\n if ep > 0:\n offset = (cp * np.sqrt((self.Dmax / ep) ** 2 + 1) - self.f)\n #print \"ep >0 : offset \",offset\n else:\n offset = -self.Dmax\n pp = self.F2 + offset * vn\n\n if cm < 0:\n if em > 0:\n offset = (cm * np.sqrt((self.Dmax / em) ** 2 + 1) + self.f)\n #print \"em >0 : offset \",offset\n else:\n offset = self.Dmax\n pm = self.F1 + offset * vn\n else:\n pm = self.F2 - (self.f - cm) * vn\n\n if self.ndim == 3:\n p1 = pp + self.Dmax * wn - self.Dmax * tn\n p2 = pp + self.Dmax * wn + self.Dmax * tn\n p3 = pp - self.Dmax * wn + self.Dmax * tn\n p4 = pp - self.Dmax * wn - self.Dmax * tn\n p5 = pm + self.Dmax * wn - self.Dmax * tn\n p6 = pm + self.Dmax * wn + self.Dmax * tn\n p7 = pm - self.Dmax * wn + self.Dmax * tn\n p8 = pm - self.Dmax * wn - self.Dmax * tn\n pquad = np.vstack((p1, p2, p3, p4, p5, p6, p7, p8))\n\n if self.ndim == 2:\n p1 = pp + self.Dmax * wn\n p2 = pp - self.Dmax * wn\n p3 = pm + self.Dmax * wn\n p4 = pm - self.Dmax * wn\n pquad = np.vstack((p1, p2, p3, p4))\n\n imin = np.min(pquad, axis=0)\n imax = np.max(pquad, axis=0)\n\n self.ep = ep\n self.em = em\n self.cp = cp\n self.cm = cm\n\n self.lbox = LBoxN(\n [BoxN(np.vstack((imin, imax)), ndim=np.shape(self.p)[1])])\n\n def annulus_bound(self):\n \"\"\" Compute the minimum and maximum distance of the enclosing annulus of the constraint for a given self.vcw\n \"\"\"\n if self.value > 0:\n self.cmin = self.drange - self.vcw * self.sstd\n self.cmax = self.drange + self.vcw * self.sstd\n else:\n self.cmin = self.drange + self.vcw * self.sstd\n self.cmax = self.drange - self.vcw * self.sstd\n\n self.mean = (self.cmin + self.cmax) / 2\n\n def repart(self, DD):\n \"\"\"\n \"\"\"\n return(1. / (self.sstd * np.sqrt(2 * np.pi)) * np.exp(-(DD - self.mean) ** 2 / (2 * self.sstd ** 2)))\n\n def rescale(self, vcw):\n \"\"\"\n rescale(vcw) : rescale constraint with vcw factor\n \"\"\"\n self.vcw = vcw\n #print self.vcw\n# pdb.set_trace()\n self.tdoa_box(self.vcw)\n # print 'TDOA', self.vcw\n #self.estvol() <= TO BE DONE IN TDOA\n\n # def inclusive(self, b):\n # \"\"\"A box b is inclusive for the constraint if self.p is included in the box\n\n # Parameters\n # ----------\n\n # b : BoxN\n # test if self.p is included in box b\n\n # \"\"\"\n # if b.inbox(self.p):\n # return True\n # else:\n # return False\n\n def valid(self, b):\n \"\"\"\n valid(b) : check if box b is valid for the given constraint\n\n A box is valid if it not not valid\n\n A box is not valid if all distances are greater than rangemax\n or all distances are less than rangemin\n \"\"\"\n\n v = b.bd2coord()\n P0 = np.outer(np.ones(len(v)), self.p[0, :])\n P1 = np.outer(np.ones(len(v)), self.p[1, :])\n F1v = np.sqrt(np.sum((P0 - v) * (P0 - v), axis=1))\n F2v = np.sqrt(np.sum((P1 - v) * (P1 - v), axis=1))\n D = (F1v - F2v)\n\n if self.value > 0:\n DDcmin = sum(D >= self.cmin)\n DDcmax = sum(D <= self.cmax)\n else:\n DDcmin = sum(D >= self.cmax)\n DDcmax = sum(D <= self.cmin)\n\n if DDcmin + DDcmax > 15:\n return(True)\n elif (DDcmin < 1) | (DDcmax < 1): # si toute points sont inf a cmin ou sup a cmax\n return('out')\n else:\n return(False)\n\n def valid_v(self, v):\n \"\"\"check if vertex are valid for the given constraint\n\n A box is valid if it not not valid\n\n A box is not valid if all distances are greater than rangemax\n or all distances are less than rangemin\n \"\"\"\n ppb = pow(2, len(self.p[0, :]))\n nbbox = int(len(v) / ppb)\n DDbound = np.zeros((4, len(v)), dtype='bool')\n TB = np.zeros((4, nbbox), dtype='bool')\n\n P0 = np.outer(np.ones(len(v)), self.p[0, :])\n P1 = np.outer(np.ones(len(v)), self.p[1, :])\n F1v = np.sqrt(np.sum((P0 - v) * (P0 - v), axis=1))\n F2v = np.sqrt(np.sum((P1 - v) * (P1 - v), axis=1))\n DD = (F1v - F2v)\n\n# if self.value > 0:\n# DD2 = (D>=self.cmin) & (D<=self.cmax)\n# else :\n# DD2 = (D>=self.cmax) & (D<=self.cmin)\n\n if self.value > 0:\n # calculate all distance from constraint origin to all vertexes\n #DD = np.sqrt(np.sum(D*D,axis=1))\n # for each box , find the vertex closest to the constraint origin and the farest.\n T = np.array((np.min(DD.reshape(nbbox, ppb),\n axis=1), np.max(DD.reshape(nbbox, ppb), axis=1)))\n\n TB[0, :] = (T[0, :] <= self.cmin)\n TB[1, :] = (T[0, :] <= self.cmax)\n TB[2, :] = (T[1, :] <= self.cmin)\n TB[3, :] = (T[1, :] <= self.cmax)\n DDbound[0, :] = (DD >= self.cmin)\n DDbound[1, :] = (DD <= self.cmax)\n\n else:\n # calculate all distance from constraint origin to all vertexes\n #DD = np.sqrt(np.sum(D*D,axis=1))\n # for each box , find the vertex closest to the constraint origin and the farest.\n T = np.array((np.min(DD.reshape(nbbox, ppb),\n axis=1), np.max(DD.reshape(nbbox, ppb), axis=1)))\n\n TB[0, :] = (T[0, :] <= self.cmax)\n TB[1, :] = (T[0, :] <= self.cmin)\n TB[2, :] = (T[1, :] <= self.cmax)\n TB[3, :] = (T[1, :] <= self.cmin)\n DDbound[0, :] = (DD >= self.cmax)\n DDbound[1, :] = (DD <= self.cmin)\n\n return DDbound, TB\n\n#\n# return DD2\n def inclusive(self, b):\n \"\"\" A box b is inclusive for the constraint if self.p is included in the box\n\n \"\"\"\n if b.inbox(self.p[0]) | b.inbox(self.p[1]):\n return True\n else:\n return False\n", "# -*- coding: latin1 -*-\nfrom __future__ import print_function\n\"\"\"\n.. currentmodule:: pylayers.antprop.rays\n\n.. autosummary::\n :members:\n\n\"\"\"\nimport doctest\nimport os\nimport sys\nimport glob\ntry:\n# from tvtk.api import tvtk\n# from mayavi.sources.vtk_data_source import VTKDataSource\n from mayavi import mlab\nexcept:\n print('Layout:Mayavi is not installed')\nimport pdb\nimport os\nimport copy\nif sys.version_info.major==2:\n import ConfigParser\nelse:\n import configparser\nimport glob\nimport doctest\nimport networkx as nx\nimport numpy as np\nimport scipy as sp\nimport matplotlib.pyplot as plt\nimport matplotlib.cm as cm\nimport struct as stru\nimport pylayers.util.geomutil as geu\nimport pylayers.util.pyutil as pyu\nfrom pylayers.util.project import *\nfrom pylayers.antprop.interactions import *\nfrom pylayers.antprop.slab import *\nfrom pylayers.antprop.channel import Ctilde\nfrom pylayers.gis.layout import Layout\nimport pylayers.signal.bsignal as bs\nimport shapely.geometry as shg\nimport h5py\nimport operator\n\n\nclass Rays(PyLayers, dict):\n \"\"\" Class handling a set of rays\n\n Attributes\n ----------\n\n pTx : np.array\n transmitter (3,)\n pRx : np.array\n receiver (3,)\n B : IntB\n B0 : IntB\n I : Interactions\n I.I : np.array\n (f,nI,3,3)\n I.T : IntT\n I.T.A : np.array\n (f,iT,3,3)\n I.R : IntR\n I.R.A : np.array\n (f,iR,3,3)\n I.D : IntD\n I.D.A : np.array\n (f,iD,3,3)\n Lfilename : string\n Layout name\n delays : np.array\n ray delays\n dis : np.array\n ray distance = delays*0.3\n nray : int\n number of rays\n evaluated : boolean\n are rays evaluated ?\n is3D : boolean\n are rays 2d or 3d rays ?\n isbased : boolean\n locbas has been applied ?\n filles : boolean\n filled has been applied ?\n los : boolean\n Line of sight boolean\n fGHz : np.array\n frequency points for evaluation\n origin_sig_name : string\n signature file which produces the rays\n\n\n Notes\n -----\n\n The Rays object is obtained from a signature.\n It is a container for a set of rays between a source\n and a target point defining a radio link.\n\n Once a Rays object has been obtained in 2D, it is transformed\n in 3D via the **to3D** method. This method takes two parameters :\n the height from floor to ceil, and the number N of\n multiple reflections to account for.\n\n Once the 3d rays have been calculated,\n the local basis are evaluated along those rays. This is\n done through the **locbas** method\n\n Once the local basis have been calculated the different\n interactions along rays can be informed via the **fillinter**\n method.\n\n Once the interactions are informed the field along rays can\n be evaluated via the **eval** method\n\n \"\"\"\n def __init__(self, pTx, pRx):\n \"\"\" object constructor\n\n Parameters\n ----------\n\n pTx : np.array\n transmitter coordinates\n pRx : np.array\n receiver coordinates\n\n \"\"\"\n\n self.pTx = pTx\n self.pRx = pRx\n self.nray = 0\n self.nray2D = 0\n self.raypt = 0\n self.los = False\n self.is3D = False\n self.isbased = False\n self.filled = False\n self.evaluated = False\n\n def __len__(self):\n Nray = 0\n for k in self.keys():\n sh = np.shape(self[k]['sig'])\n Nray = Nray + sh[2]\n return Nray\n\n\n # def __add__(self,r):\n\n # if (not r.is3D) and (not r.isbased) and (not self.is3D) and (not self.isbased) :\n # raise AttributeError('both Ray structures must be 3D and based to be added')\n\n\n # for ni in r:\n # if self.has_key(ni):\n # import ipdb\n # ipdb.set_trace()\n # # check if som rays already exists\n # # if so, don't add them\n # lur = np.array([])\n # for ur in range(self[ni]['pt'].shape[2]):\n # udifferent = np.where(np.all(np.all(r[ni]['pt'][...,ur][...,None]!=self[ni]['pt'],axis=0),axis=0))[0]\n # lur = np.hstack((lur,udifferent ))\n # import ipdb\n # ipdb.set_trace()\n\n # self[ni]['pt'] = np.concatenate((self[ni]['pt'],r[ni]['pt']),axis=2)\n # self[ni]['sig'] = np.concatenate((self[ni]['sig'],r[ni]['sig']),axis=2)\n # self[ni]['si'] = np.concatenate((self[ni]['si'],r[ni]['si']),axis=1)\n # self[ni]['rayidx'] = np.concatenate((self[ni]['rayidx'],r[ni]['rayidx']),axis=0)\n # self[ni]['dis'] = np.concatenate((self[ni]['dis'],r[ni]['dis']),axis=0)\n # self[ni]['vsi'] = np.concatenate((self[ni]['vsi'],r[ni]['vsi']),axis=1)\n # self[ni]['nbrays'] += 1\n # if ni != 0:\n # self[ni]['BiN'] = np.concatenate((self[ni]['BiN'],r[ni]['BiN']),axis=2)\n # self[ni]['Bi'] = np.concatenate((self[ni]['Bi'],r[ni]['Bi']),axis=3)\n # self[ni]['Bo'] = np.concatenate((self[ni]['Bo'],r[ni]['Bo']),axis=3)\n # self[ni]['Bo0'] = np.concatenate((self[ni]['Bo0'],r[ni]['Bo0']),axis=2)\n # self[ni]['scpr'] = np.concatenate((self[ni]['scpr'],r[ni]['scpr']),axis=1)\n # self[ni]['norm'] = np.concatenate((self[ni]['norm'],r[ni]['norm']),axis=2)\n\n # self[ni]['B'] = np.concatenate((self[ni]['B'],r[ni]['B']),axis=3)\n # self[ni]['aod'] = np.concatenate((self[ni]['aod'],r[ni]['aod']),axis=1)\n # self[ni]['aoa'] = np.concatenate((self[ni]['aoa'],r[ni]['aoa']),axis=1)\n # self[ni]['theta'] = np.concatenate((self[ni]['theta'],r[ni]['theta']),axis=1)\n\n # if r[ni].has_key('diffidx'):\n # if self[ni].has_key('diffidx'):\n # self[ni]['diffidx'] = np.concatenate((self[ni]['diffidx'],r[ni]['diffidx']))\n # self[ni]['diffvect'] = np.concatenate((self[ni]['diffvect'],r[ni]['diffvect']),axis=1)\n # self[ni]['diffslabs'].append(r[ni]['diffslabs'])\n \n # else:\n # self[ni]['diffidx'] = r['diffidx']\n # self[ni]['diffvect'] = r['diffvect']\n # self[ni]['diffslabs'] = r['diffslabs']\n\n # else:\n # self[ni]=r[ni]\n\n\n\n\n\n\n def __repr__(self):\n s = ''\n ni = 0\n nl = 0\n lgi = list(self.keys())\n lgi.sort()\n if self.is3D:\n s = self.__class__.__name__ + '3D\\n' + '----------'+'\\n'\n\n for k in lgi:\n r = self[k]['rayidx']\n nr = len(r)\n s = s + str(k)+' / '+str(nr)+ ' : '+str(r)+'\\n'\n ni = ni + nr*k\n nl = nl + nr*(2*k+1)\n nray2D = self.nray2D\n else:\n s = self.__class__.__name__ + '2D\\n' + '----------'+'\\n'\n nray2D = len(self)\n\n if self.los:\n s = s + \"LOS \"\n if self.isbased:\n s = s + \"based \"\n if self.filled:\n s = s + \"filled \"\n\n s = s + '\\n'\n s = s + 'N2Drays : '+ str(nray2D) + '\\n'\n if hasattr(self,'nb_origin_sig'):\n s = s + 'from '+ str(self.nb_origin_sig) + ' signatures\\n'\n s = s + '#Rays/#Sig: '+ str(nray2D/(1.*self.nb_origin_sig) )\n\n s = s + '\\npTx : '+ str(self.pTx) + '\\npRx : ' + str(self.pRx)+'\\n'\n\n if not self.is3D:\n ray_cpt = 0 \n for k in lgi:\n #sk = np.shape(self[k]['sig'])[2]\n s = s + str(k) + ':\\n'\n sig = self[k]['sig'][0,:]\n sha0 = sig.shape[0]\n sha1 = sig.shape[1]\n #pdb.set_trace()\n for l in np.arange(sha1):\n s = s + ' '+str(ray_cpt)+':'\n ray_cpt +=1\n for n in np.arange(sha0):\n s = s + ' '+str(sig[n,l])\n s = s+'\\n'\n #pdb.set_trace()\n #s = s + str(sk) + 'rays with' + str(k) + ' interactions'\n\n\n return(s)\n\n\n\n\n def saveh5(self,idx=0):\n \"\"\" save rays in hdf5 format\n\n Parameters\n ----------\n\n idx : int\n\n See Also\n --------\n\n loadh5\n\n \"\"\"\n\n filename = self.filename+'_'+str(idx)\n filenameh5=pyu.getlong(filename+'.h5',pstruc['DIRR3D'])\n\n\n\n # try/except to avoid loosing the h5 file if\n # read/write error\n try:\n f=h5py.File(filenameh5,'w')\n # keys not saved as attribute of h5py file\n notattr = ['I','B','B0','delays','dis']\n for a in self.__dict__.keys():\n if a not in notattr:\n f.attrs[a]=getattr(self,a)\n\n for k in self.keys():\n f.create_group(str(k))\n for kk in self[k].keys():\n if kk == 'sig2d':\n # Need to find an efficient way to save the signatures\n # 2d which have created the rays\n pass\n elif kk == 'nbrays':\n f[str(k)].create_dataset(kk,shape=(1,),data=np.array([self[k][kk]]))\n else:\n f[str(k)].create_dataset(kk,shape=np.shape(self[k][kk]),data=self[k][kk])\n f.close()\n except:\n f.close()\n raise NameError('Rays: issue when writting h5py file')\n print(filenameh5)\n\n\n def loadh5(self,filename=[],idx=0):\n \"\"\" load rays hdf5 format\n\n Parameters\n ----------\n\n idx : int \n\n \"\"\"\n if filename == []:\n filenameh5 = self.filename+'_'+str(idx)+'.h5'\n else :\n filenameh5 = filename\n\n filename=pyu.getlong(filenameh5,pstruc['DIRR3D'])\n print(filename)\n\n # try/except to avoid loosing the h5 file if\n # read/write error\n try:\n f = h5py.File(filename,'r')\n for k in f.keys():\n self.update({eval(k):{}})\n for kk in f[k].keys():\n self[eval(k)].update({kk:f[k][str(kk)][:]})\n\n for a,va in f.attrs.items():\n setattr(self,a,va)\n f.close()\n\n except:\n\n f.close()\n raise NameError('Rays: issue when reading h5py file')\n\n # fill if save was filled\n # temporary solution in order to avoid\n # creating save for Interactions classes\n\n if self.filled:\n #Lname = self.Lfilename\n Lname = '_'.join(self.filename.split('_')[0:-1]) + '.lay'\n #Lname = self.filename.split('_')[0] + '.lay'\n L=Layout(Lname)\n self.fillinter(L)\n\n if self.evaluated:\n return self.val(self.fGHz)\n\n def _saveh5(self,filenameh5,grpname):\n \"\"\" Save rays h5py format compliant with Links Class\n\n Parameters\n ----------\n\n filenameh5 : string\n filename of the h5py file (from Links Class)\n grpname : string\n groupname of the h5py file (from Links Class)\n\n See Also\n --------\n\n pylayers.simul.links\n\n \"\"\"\n\n filenameh5=pyu.getlong(filenameh5,pstruc['DIRLNK'])\n # try/except to avoid loosing the h5 file if\n # read/write error\n #try:\n\n fh5=h5py.File(filenameh5,'a')\n if self.is3D:\n if not grpname in fh5['ray'].keys():\n fh5['ray'].create_group(grpname)\n else :\n print('ray/'+grpname +'already exists in '+filenameh5)\n f = fh5['ray/'+grpname]\n\n\n else:\n if not grpname in fh5['ray2'].keys():\n fh5['ray2'].create_group(grpname)\n else :\n print('ray2/'+grpname +'already exists in '+filenameh5)\n f = fh5['ray2/'+grpname]\n # keys not saved as attribute of h5py file\n notattr = ['I','B','B0','dis']\n for a in self.__dict__.keys():\n if a not in notattr:\n if type(a)==str:\n a.encode('utf-8')\n if a=='_luw':\n la = [ x.encode('utf8') for x in getattr(self,a) ] \n f.attrs[a] = la\n else:\n f.attrs[a] = getattr(self,a)\n\n for k in self.keys():\n f.create_group(str(k))\n for kk in self[k].keys():\n if kk == 'sig2d':\n # Need to find an efficient way to save the signatures\n # 2d which have created the rays\n pass\n elif kk == 'nbrays':\n f[str(k)].create_dataset(kk,shape=(1,),data=np.array([self[k][kk]]))\n else:\n if kk=='diffslabs':\n ldiffslabs = [ x.encode('utf8') for x in self[k][kk] ]\n f[str(k)].create_dataset(kk,shape=np.shape(self[k][kk]),data=ldiffslabs)\n else:\n f[str(k)].create_dataset(kk,shape=np.shape(self[k][kk]),data=self[k][kk])\n fh5.close()\n #except:\n # fh5.close()\n # raise NameError('Rays: issue when writting h5py file')\n\n def _loadh5(self,filenameh5,grpname,**kwargs):\n \"\"\" load rays h5py format compliant with Links Class\n\n Parameters\n ----------\n\n filenameh5 : string\n filename of the h5py file (from Links Class)\n grpname : string\n groupname of the h5py file (from Links Class)\n kwargs may contain a L: layout object\n if L = [] the layout is loaded from the layout name stored\n into the h5 file\n if L = Layout the layout passed in arg is used\n\n See Also\n --------\n\n pylayers.simul.links\n\n \"\"\"\n\n\n filename=pyu.getlong(filenameh5,pstruc['DIRLNK'])\n # try/except to avoid loosing the h5 file if\n # read/write error\n try:\n fh5=h5py.File(filename,'r')\n\n if self.is3D:\n argfile = 'ray/'+grpname\n else:\n argfile = 'ray2/'+grpname\n\n f = fh5[argfile]\n\n for k in f.keys():\n self.update({eval(k):{}})\n for kk in f[k].keys():\n self[eval(k)].update({kk:f[k][str(kk)][:]})\n\n for a,va in f.attrs.items():\n setattr(self,a,va)\n\n\n fh5.close()\n\n except:\n\n fh5.close()\n raise NameError('Rays: issue when reading h5py file')\n\n # fill if save was filled\n # temporary solution in order to avoid\n # creating save for Interactions classes\n\n if self.filled:\n if 'L' in kwargs:\n self.L=kwargs['L']\n else:\n self.L = Layout(self.Lfilename,bbuild=True)\n try:\n self.L.dumpr()\n except:\n self.L.build()\n self.L.dumpw()\n # L=Layout(self.Lfilename,bbuild=True)\n self.fillinter(self.L)\n\n # if self.evaluated:\n # return self.eval(self.fGHz)\n\n\n def reciprocal(self):\n \"\"\" switch tx and rx\n\n \"\"\"\n\n r = Rays(self.pRx,self.pTx)\n r.is3D = self.is3D\n r.nray = self.nray\n r.origin_sig_name = self.origin_sig_name\n r.nb_origin_sig = self.nb_origin_sig\n\n for k in self:\n r[k]={}\n r[k]['pt']=self[k]['pt'][:,::-1,:]\n r[k]['sig']=self[k]['sig'][:,::-1,:]\n return(r)\n\n\n def check_reciprocity(self,r):\n \"\"\" check ray reciprocity in comparing two reciprocal rays\n\n Parameters\n ----------\n\n r : rays reciprocal to self\n\n\n \"\"\"\n # permutation of all termination points\n assert (self.pTx==r.pRx).all()\n assert (self.pRx==r.pTx).all()\n # for all group of interctions\n for k in self:\n # same distances\n assert (np.allclose(self[k]['dis'],r[k]['dis']))\n # same points when reading from right to left\n assert (np.allclose(self[k]['pt'],r[k]['pt'][:,::-1,:]))\n # same signature reading from right to left\n assert (np.allclose(self[k]['sig'],r[k]['sig'][:,::-1,:]))\n # if local basis have been evaluated\n if (self.isbased) & (r.isbased):\n #assert (np.allclose(self[k]['nstrwall'],r[k]['nstrwall'][:,::-1,:]))\n assert (np.allclose(self[k]['norm'],r[k]['norm'][:,::-1,:])), 'interaction block:' + str(k)\n #assert ((np.mod(self[k]['aoa']-r[k]['aod'],2*np.pi)==0).all())\n #assert ((np.mod(self[k]['aod']-r[k]['aoa'],2*np.pi)==0).all())\n # 1st output basis is equal to last input basis of the reciprocal ray\n assert (np.allclose(self[k]['Bo0'],r[k]['BiN'])), 'interaction block:' + str(k)\n # last input basis is equal to 1st output basis of the reciprocal ray\n assert (np.allclose(self[k]['BiN'],r[k]['Bo0'])), 'interaction block:' + str(k)\n # vsi vectors are inversed\n assert (np.allclose(self[k]['vsi'],-r[k]['vsi'][:,::-1,:])), 'interaction block:' + str(k)\n assert (np.allclose(abs(self[k]['scpr']),abs(r[k]['scpr'][::-1,:]))), 'interaction block:' + str(k)\n assert (np.allclose(self[k]['theta'],r[k]['theta'][::-1,:])), 'interaction block:' + str(k)\n assert (np.allclose(self[k]['Bi'],r[k]['Bo'][:,:,::-1,:])), 'interaction block:' + str(k)\n assert (np.allclose(self[k]['Bo'],r[k]['Bi'][:,:,::-1,:])), 'interaction block:' + str(k)\n assert (np.allclose(self[k]['B'],r[k]['B'][:,:,::-1,:].swapaxes(0,1))), 'interaction block:' + str(k)\n\n if self.evaluated :\n\n for ir in range(self.nray):\n\n iint1 = self.ray(ir)\n iint2 = r.ray(ir)\n\n # check Interactions\n A1 = self.I.I[:, iint1, :, :]\n A2 = r.I.I[:, iint2, :, :][:,::-1,:,:]\n assert np.allclose(A1,A2),pdb.set_trace()\n\n # check bases\n # ray 1 : B0 | B[0] | B[1] | B[2] | B[3] | B[4]\n # ray 2 : B[4] | B[3] | B[2] | B[1] | B[0] | B0\n assert np.allclose(self.B0.data[ir,:,:],r.B.data[iint2,:,:][-1,:,:].swapaxes(1,0))\n assert np.allclose(r.B0.data[ir,:,:],self.B.data[iint1,:,:][-1,:,:].swapaxes(1,0))\n assert np.allclose(self.B.data[iint1,:,:][:-1],r.B.data[iint2,:,:][:-1][::-1,:,:].swapaxes(2,1))\n\n\n\n def sort(self):\n \"\"\" sort rays\n \n TODO : not finished \n\n \"\"\"\n u = np.argsort(self.dis)\n\n\n def rayfromtyp_order(self,nD=[1],nR=[1],nT=[1],llo='&&'):\n \"\"\"\n Return rays from a given type (R|T|D) to a given order\n ( number of interaction)\n\n list logic operator : llo ['op0op1'] \n\n nD <op0> nR <op1> nT\n\n\n Parameters\n ----------\n\n nD = list|int\n requested number of Diffraction\n nR = list|int\n requested number of Reflection\n nT = list|int\n requested number of Transmission\n llo = list logic operator [op0,op1]\n nD <op0> nR <op1> nT\n\n\n Returns\n -------\n\n lr : list\n list of ray index matching the typ & order conditions\n\n\n\n \"\"\"\n\n if not isinstance(nD,list):\n nD=[nD]\n if not isinstance(nR,list):\n nR=[nR]\n if not isinstance(nT,list):\n nT=[nT]\n\n op = {'and':operator.and_,\n 'or':operator.or_,\n '&':operator.and_,\n '|':operator.or_,\n }\n\n\n lr=[]\n for ur,r in enumerate(range(self.nray)):\n li = self.ray2ityp(r)\n nRli = li.count('R')\n nTli = li.count('T')\n nDli = li.count('D')\n\n\n\n cD = (nDli in nD)\n cR = (nRli in nR)\n cT = (nTli in nT)\n\n # if (nDli in nD) and (nRli in nR) and (nTli in nT) :\n if op[llo[1].lower()]( op[llo[0].lower()](cD,cR) , cT):\n lr.append(r)\n elif (self.los) and (1 in nT ) and (0 in nD) and (0 in nR) and (ur == 0):\n lr.append(r)\n return lr\n\n\n def extract_typ_order(self,L,nD=[1],nR=[1],nT=[1],llo='&&'):\n \"\"\" Extract group of rays from a certain type (R|T|D) \n at a order ( <=> given number of interaction)\n\n list logic operator : llo [op0,op1]\n\n nD <op0> nR <op1> nT\n\n\n Parameters\n ----------\n\n L : Layout\n nD = list|int\n requested number of Diffraction\n nR = list|int\n requested number of Reflection\n nT = list|int\n requested number of Transmission\n llo = list logic operator [op0,op1]\n nD <op0> nR <op1> nT\n\n Returns\n -------\n\n R : Rays object\n New Rays object containing rays matching \n the typ/order conditions \n\n\n \"\"\"\n\n lr = self.rayfromtyp_order(nD=nD,nR=nR,nT=nT,llo=llo)\n return self.extract(lr,L)\n\n\n def extract(self,lnr,L):\n \"\"\" Extract a group of rays\n\n Parameters\n ----------\n\n lnr : list of rays indexes\n L : Layout\n\n \"\"\"\n\n\n if not isinstance(lnr,list):\n lnr=[lnr]\n\n r = Rays(self.pTx,self.pRx)\n r.is3D = self.is3D\n\n for unr,nr in enumerate(lnr):\n #r.nray2D = \n #r.nb_origin_sig = 1\n\n ni = self.ray2nbi(nr)\n ur = np.where(self[ni]['rayidx']==nr)[0][0]\n\n\n\n\n if ni == 0:\n los = True\n else:\n los = False\n\n if 'D' in self.typ(nr):\n diff=True\n else:\n diff=False\n\n\n if 'diffvect' in self[ni]:\n # check if the ray has diffraction interaction\n inter = self.ray2iidx(nr)[:,0]\n uD = np.where([i in inter for i in self[ni]['diffidx']])[0]\n else:\n uD=[]\n\n diffkey = ['diffvect','diffidx','diffslabs']\n\n cray = {}\n\n\n for k in self[ni].keys():\n\n if ni ==0:\n\n cray = self[ni]\n break\n\n elif k not in ['nbrays','rayidx','dis','nstrwall','nstrswall']:\n tab = self[ni][k]\n if type(tab)==np.ndarray and k not in diffkey:\n try:\n cray[k] = tab[...,ur][...,np.newaxis]\n except:\n import ipdb\n ipdb.set_trace()\n if diff : \n if k in diffkey :\n if k != 'diffslabs':\n cray[k]=tab[...,uD][...,np.newaxis]\n else:\n if len(uD)>0 :\n cray[k]=[tab[uD]]\n else:\n cray[k]=[]\n\n\n cray['nbrays'] = unr+1 # keep only one ray\n r.nray = unr+1\n #cray['rayidx']=np.array([self[ni]['rayidx'][nr]]) # ray index in the whole structure\n cray['rayidx'] = np.array([unr])\n cray['dis'] = np.array([self[ni]['dis'][ur]])\n\n\n if ni in r:\n\n # R[ni]['sig2d'].append(self[k]['sig2d'][ur])\n\n if not los :\n r[ni]['BiN'] = np.concatenate((r[ni]['BiN'],cray['BiN']),axis=2)\n r[ni]['Bo'] = np.concatenate((r[ni]['Bo'],cray['Bo']),axis=3)\n r[ni]['Bi'] = np.concatenate((r[ni]['Bi'],cray['Bi']),axis=3)\n\n\n if diff:\n if 'diffidx' in r[ni]:\n r[ni]['diffidx'] = np.concatenate((r[ni]['diffidx'],cray['diffidx']))\n r[ni]['diffvect'] = np.concatenate((r[ni]['diffvect'],cray['diffvect']),axis=1)\n r[ni]['diffslabs'].append(cray['diffslabs'])\n\n else:\n r[ni]['diffidx'] = cray['diffidx']\n r[ni]['diffvect'] = cray['diffvect']\n r[ni]['diffslabs'] = cray['diffslabs']\n\n r[ni]['nbrays'] += 1\n r[ni]['B'] = np.concatenate((r[ni]['B'], cray['B']), axis=3)\n\n r[ni]['pt'] = np.concatenate((r[ni]['pt'], cray['pt']), axis=2)\n r[ni]['rayidx'] = np.concatenate((r[ni]['rayidx'], cray['rayidx']), axis=0)\n r[ni]['Bo0'] = np.concatenate((r[ni]['Bo0'],cray['Bo0']), axis=2)\n\n r[ni]['scpr'] = np.concatenate((r[ni]['scpr'], cray['scpr']), axis=1)\n r[ni]['aod'] = np.concatenate((r[ni]['aod'], cray['aod']), axis=1)\n r[ni]['si'] = np.concatenate((r[ni]['si'], cray['si']), axis=1)\n r[ni]['sig'] = np.concatenate((r[ni]['sig'], cray['sig']), axis=2)\n # r[ni]['sig2d'] = np.concatenate((r[ni]['sig2d'],cray['sig2d']),axis=2)\n r[ni]['aoa'] = np.concatenate((r[ni]['aoa'], cray['aoa']), axis=1)\n r[ni]['vsi'] = np.concatenate((r[ni]['vsi'], cray['vsi']), axis=2)\n r[ni]['theta'] = np.concatenate((r[ni]['theta'], cray['theta']), axis=1)\n r[ni]['norm'] = np.concatenate((r[ni]['norm'], cray['norm']), axis=2)\n r[ni]['dis'] = np.concatenate((r[ni]['dis'], cray['dis']), axis=0)\n\n else:\n r[ni] = cray\n\n # r[ni]['rays'] = to be done HERE\n\n\n r.locbas(L)\n r.fillinter(L)\n return(r)\n\n def extract_old(self,nr,L):\n \"\"\" Extract a single ray\n\n Parameters\n ----------\n\n nr : ray index\n L : Layout\n\n \"\"\"\n\n r = Rays(self.pTx,self.pRx)\n r.is3D = self.is3D\n\n r.nray2D = 1\n r.nb_origin_sig = 1\n\n #ni = self._ray2nbi[nr]\n #ur = np.where(self[ni]['rayidx']==nr)[0][0]\n\n ni,ur = self.ir2a(nr)\n\n if 'D' in self.typ(nr):\n diff=True\n else:\n diff=False\n\n if 'diffvect' in self[ni]:\n # check if the ray has diffraction interaction\n inter = self.ray2iidx(nr)[:,0]\n uD = np.where([i in inter for i in self[ni]['diffidx']])[0]\n else:\n uD=[]\n\n diffkey = ['diffvect','diffidx','diffslabs']\n\n r[ni] = {}\n for k in self[ni].keys():\n if k not in ['nbrays','rayidx','dis','nstrwall','nstrswall']:\n tab = self[ni][k]\n if type(tab)==np.ndarray and k not in diffkey:\n r[ni][k] = tab[...,ur][...,np.newaxis]\n if diff : \n if k in diffkey :\n if k != 'diffslabs':\n r[ni][k]=tab[...,uD][...,np.newaxis]\n else:\n if len(uD)>0 :\n r[ni][k]=tab[uD]\n else:\n r[ni][k]=[]\n\n r[ni]['nrays'] = 1 # keep only one ray\n r.nray = 1\n #r[ni]['rayidx']=np.array([self[ni]['rayidx'][nr]]) # ray index in the whole structure\n r[ni]['rayidx'] = np.array([0])\n r[ni]['dis'] = np.array([self[ni]['dis'][ur]])\n r.locbas(L)\n r.fillinter(L)\n return(r)\n\n\n\n def show(self,**kwargs):\n \"\"\" plot 2D rays within the simulated environment\n\n Parameters\n ----------\n\n rlist : list (default []= all rays)\n list of indices of ray in interaction group\n graph : string t\n type of graph to be displayed\n 's','r','t',..\n fig : figure\n ax : axis\n L : Layout\n alpha : float\n 1\n linewidth : float\n 0.1\n color : string\n 'black'\n ms : int\n marker size : 5\n layout : boolean\n True\n points : boolean\n True\n ER : ray energy\n\n \"\"\"\n defaults = {'rlist': [],\n 'fig': [],\n 'ax': [],\n 'L': [],\n 'graph': 's',\n 'color': 'black',\n 'alpha': 1,\n 'linewidth': 0.5,\n 'ms': 5,\n 'vmin':0,\n 'vmax':-70,\n 'cmap': plt.cm.hot_r,\n 'layout': True,\n 'points': True,\n 'labels': False,\n 'bcolorbar': False\n }\n\n for key, value in defaults.items():\n if key not in kwargs:\n kwargs[key] = value\n\n if kwargs['fig'] ==[]:\n fig = plt.figure()\n\n if kwargs['ax'] ==[]:\n ax = fig.add_subplot(111)\n\n #\n # display the Layout\n #\n if kwargs['layout'] == True:\n if kwargs['L'] != []:\n fig,ax = kwargs['L'].showG(**kwargs)\n else :\n raise AttributeError('Please give a Layout file as argument')\n else:\n fig = kwargs['fig']\n ax = kwargs['ax']\n #\n # display Tx and Rx\n #\n if kwargs['points'] ==True:\n ax.plot(self.pTx[0], self.pTx[1], 'or',ms=kwargs['ms'])\n ax.plot(self.pRx[0], self.pRx[1], 'og',ms=kwargs['ms'])\n # i=-1 all rays\n # else block of interactions i\n # plot all rays\n if kwargs['rlist'] == []:\n\n # list of group of interactions\n lgrint = self.keys()\n\n for i in lgrint:\n # list of rays\n lray = range(len(self[i]['pt'][0, 0, :]))\n\n #if self.filled :\n # ax.set_title('rays index :'+ str(self[i]['rayidx']))\n\n for j in lray:\n\n addr_ray = (i,j)\n index_ray = self.a2ir(addr_ray)\n\n ray = np.hstack((self.pTx[0:2].reshape((2, 1)),\n np.hstack((self[i]['pt'][0:2, :, j],\n self.pRx[0:2].reshape((2, 1))))\n ))\n\n if 'ER' not in kwargs:\n ax.plot(ray[0, :], ray[1, :],\n alpha = kwargs['alpha'],\n color = kwargs['color'],\n linewidth = kwargs['linewidth'])\n else:\n EdB = 10*np.log10(ER[index_ray])\n ERdB = 10*np.log10(E)\n vscale = 1.-(max(ERdB)-EdB)/(max(ERdB)-min(ERdB))\n linewidth = 3*vscale\n alpha = vscale\n cmap = cm.hot\n color = cmap(vscale)\n ax.plot(ray[0, :], ray[1, :],\n alpha = alpha,\n color = color,\n linewidth = linewidth)\n\n ax.axis('off')\n #if self.filled :\n # ax.set_title('rays index :'+ str(self[i]['rayidx'][lray]))\n else:\n rlist = kwargs['rlist']\n # 3D ray\n if self.is3D:\n nbi = self._ray2nbi[rlist]\n nr = np.array((nbi,rlist))\n unb = np.unique(nr[0,:])\n unr = {int(i):np.where(nr[0,:]==i)[0] for i in unb}\n\n for i in unb:\n raynb = (nr[1,unr[i]]).astype(int)\n nbr = len(raynb)\n ptidx = [np.where(self[i]['rayidx']==x)[0][0] for x in raynb]\n for j in ptidx:\n\n ray = np.hstack((self.pTx[0:2].reshape((2, 1)),\n np.hstack((self[i]['pt'][0:2, :, j],\n self.pRx[0:2].reshape((2, 1))))\n ))\n ax.plot(ray[0, :], ray[1, :],\n alpha = kwargs['alpha'],\n color = kwargs['color'],\n linewidth = kwargs['linewidth'])\n ax.axis('off')\n # 2D ray\n else:\n for i in rlist:\n lray = range(len(self[i]['pt'][0, 0, :]))\n #if self.filled :\n # ax.set_title('rays index :'+ str(self[i]['rayidx']))\n for j in lray:\n ray = np.hstack((self.pTx[0:2].reshape((2, 1)),\n np.hstack((self[i]['pt'][0:2, :, j],\n self.pRx[0:2].reshape((2, 1))))\n ))\n ax.plot(ray[0, :], ray[1, :],\n alpha=kwargs['alpha'],\n color=kwargs['color'],\n linewidth=kwargs['linewidth'])\n ax.axis('off')\n\n if kwargs['bcolorbar']:\n # axes : left , bottom , width , height\n sm = plt.cm.ScalarMappable(cmap = kwargs['cmap'], norm = plt.Normalize(vmin=kwargs['vmin'],vmax=kwargs['vmax']))\n sm._A = [] # necessary set_array\n cax = fig.add_axes([0.18,0.35, 0.35, 0.025])\n #cb = plt.colorbar(sm,cax=cax,orientation='horizontal')\n cb = plt.colorbar(sm,cax=cax,orientation='horizontal')\n cb.ax.tick_params(labelsize=24)\n cb.set_label('Level (dB)', fontsize=24)\n\n return(fig,ax)\n\n def mirror(self, H=3, N=1, za = [], zb= []):\n \"\"\" mirror a ray termination\n\n Parameters\n ----------\n\n H : float\n ceil height (default 3m)\n if H=0 only floor reflection is calculated (outdoor case)\n if H=-1 floor and ceil reflection are inhibited (2D test case)\n N : int\n handle the number of mirror reflexions\n\n za : float\n height of the point where the parametrization starts ( e.g. pTx[2])\n\n zb : float\n height of the point where the parametrization ends ( e.g. pRx[2])\n\n\n Returns\n -------\n\n d : dict\n k : zm v: alpham\n k : zp v: alphap\n\n Examples\n --------\n\n >>> ptx = np.array([1,1,1.5])\n >>> prx = np.array([2,2,1.2])\n >>> r = Rays(ptx,prx)\n >>> d = r.mirror()\n >>> d[-1.5]\n array([ 0.55555556])\n\n Notes\n -----\n\n d is a dictionnary whose keys are heights along the vertical from where\n are emanating the reflected rays. Values of d are the parameterization\n (0< () <1) along the ray where are situated the different reflection\n points.\n\n\n \"\"\"\n\n\n\n km = np.arange(-N+1, N+1, 1)\n kp = np.arange(-N, N+1, 1)\n #\n # heights of transmitter and receiver\n #\n if za == []:\n za=self.pTx[2]\n if zb == []:\n zb=self.pRx[2]\n ht = za\n hr = zb\n\n assert (hr<H or H==0 or H == -1),\"mirror : receiver higher than ceil height\"\n assert (ht<H or H==0 or H == -1),\"mirror : transmitter higher than ceil height\"\n\n zkp = 2*kp*H + ht\n zkm = 2*km*H - ht\n\n d = {}\n if H>0:\n for zm in zkm:\n if zm < 0:\n bup = H\n pas = H\n km = int(np.ceil(zm/H))\n else:\n bup = 0\n pas = -H\n km = int(np.floor(zm/H))\n thrm = np.arange(km*H, bup, pas)\n d[zm] = abs(thrm-zm)/abs(hr-zm)\n\n for zp in zkp:\n if zp < 0:\n bup = H\n pas = H\n kp = int(np.ceil(zp/H))\n else:\n bup = 0\n pas = -H\n kp = int(np.floor(zp/H))\n thrp = np.arange(kp*H, bup, pas)\n d[zp] = abs(thrp-zp)/abs(hr-zp)\n elif H==0:\n d[-ht] = np.array([ht/(ht+hr)])\n d[ht] = np.array([])\n elif H==-1:\n d[ht] = np.array([])\n # print \"zp\",zp\n # print \"kp\",kp\n # print \"thrp\",thrp\n # print \"alphap\",d[zp]\n\n return d\n\n def to3D(self, L, H=3, N=1, rmoutceilR=True):\n \"\"\" transform 2D ray to 3D ray\n\n Parameters\n ----------\n\n L : Layout object\n\n H : float\n ceil height (default 3m)\n if H= 0 only floor reflection is calculated (outdoor case)\n if H=-1 floor and ceil reflection are inhibited (2D test case)\n N : int\n number of mirror reflexions\n rmoutceilR : bool\n Remove ceil reflexions in cycles (Gt nodes)\n with indoor=False attribute\n\n Returns\n -------\n\n r3d : Rays\n\n See Also\n --------\n\n mirror\n\n \"\"\"\n\n if H==-1:\n rmoutceilR=False\n\n tx = self.pTx\n rx = self.pRx\n\n #\n # Phase 1 : calculate Tx images height and parameterization in the\n # vertical plane\n #\n\n d = self.mirror(H=H, N=N, za=tx[2], zb=rx[2])\n\n #\n # Elimination of invalid diffraction point \n # If the diffaction point is a separation between 2 air wall \n # it should be removed.\n\n\n #\n # Phase 2 : calculate 2D parameterization in the horizontal plane\n #\n\n # for all group of interactions\n for i in self:\n\n pts = self[i]['pt'][0:2, :, :]\n sig = self[i]['sig']\n\n if pts.shape[2]!=0:\n # broadcasting of t and r\n t = self.pTx[0:2].reshape((2, 1, 1)) * \\\n np.ones((1, 1, len(pts[0, 0, :])))\n r = self.pRx[0:2].reshape((2, 1, 1)) * \\\n np.ones((1, 1, len(pts[0, 0, :])))\n pts1 = np.hstack((t, np.hstack((pts, r))))\n else:\n t = self.pTx[0:2].reshape((2, 1, 1))\n r = self.pRx[0:2].reshape((2, 1, 1))\n pts1 = np.hstack((t,r))\n # append t and r to interaction points in 2D\n\n\n si1 = pts1[:, 1:, :] - pts1[:, :-1, :]\n # array of all ray segments distances\n si = np.sqrt(np.sum(si1 * si1, axis=0))\n # array of cumulative distance of 2D ray\n al1 = np.cumsum(si, axis=0)\n\n # initialize parameterization parameter alpha\n self[i]['alpha'] = np.zeros(np.shape(si[:-1, :]))\n\n for j in range(len(self[i]['alpha'][:, 0])):\n # get alpha\n self[i]['alpha'][j, :] = np.sum(si[0:j+1, :], axis=0) \\\n /np.sum(si, axis=0)\n # get z coordinate\n self[i]['pt'][2, j, :] = tx[2] + self[i]['alpha'][j, :] \\\n * (rx[2] - tx[2])\n\n #\n # Phase 3 : Initialize 3D rays dictionnary\n #\n r3d = Rays(tx, rx)\n r3d.los = self.los\n r3d.is3D = True\n r3d.nray2D = len(self)\n r3d.nb_origin_sig = self.nb_origin_sig\n #\n # Phase 4 : Fill 3D rays information\n #\n # Two nested loops\n #\n # for all interaction group\n # for all type of 3D rays\n # 1) extension\n # 2) sort\n # 3) coordinates as a function of parameter\n #\n for k in self: # for all interaction group k\n # k = int(k)\n # Number of rays in interaction group k\n Nrayk = np.shape(self[k]['alpha'])[1]\n\n # get 2D horizontal parameterization\n a1 = self[k]['alpha']\n\n #if (k==1):\n # pdb.set_trace()\n # get 2D signature\n sig = self[k]['sig']\n #print \"signatures 2D \",sig\n #print \"----\"\n sigsave = copy.copy(sig)\n # add parameterization of tx and rx (0,1)\n a1 = np.concatenate((np.zeros((1, Nrayk)), a1, np.ones((1, Nrayk))))\n # reshape signature in adding tx and rx\n\n if sig.shape[0]!=0:\n sig = np.hstack((np.zeros((2, 1, Nrayk), dtype=int),\n sig,\n np.zeros((2, 1, Nrayk), dtype=int))) # add signature of Tx and Rx (0,0))\n else:\n sig = np.hstack((np.zeros((2, 1, Nrayk), dtype=int),\n np.zeros((2, 1, Nrayk), dtype=int)))\n # broadcast tx and rx\n Tx = tx.reshape(3, 1, 1)*np.ones((1, 1, Nrayk))\n Rx = rx.reshape(3, 1, 1)*np.ones((1, 1, Nrayk))\n\n if k!=0:\n # pte is the sequence of point in 3D ndim =3 ( ndim x k x Nrayk)\n pte = self[k]['pt']\n # ndim x k+2 x Nrayk\n pte = np.hstack((Tx, pte, Rx))\n else:\n pte = np.hstack((Tx, Rx))\n\n # extension\n for l in d: # for each vertical pattern (C,F,CF,FC,....)\n #print k,l,d[l]\n Nint = len(d[l]) # number of additional interaction\n #if ((k==1) & (l==5.0)):print\n if Nint > 0: # if new interaction ==> need extension\n # a1e : extended horizontal+vertical parameterization\n a1e = np.concatenate((a1, d[l].reshape(len(d[l]), 1)*\n np.ones((1, Nrayk))))\n # get sorted indices\n ks = np.argsort(a1e, axis=0)\n # a1es : extended sorted horizontal + vertical parameterization\n a1es = np.sort(a1e, axis=0)\n\n # #### Check if it exists the same parameter value in the horizontal plane\n # #### and the vertical plane. Move parameter if so.\n\n da1es = np.diff(a1es,axis=0)\n pda1es = np.where(da1es<1e-10)\n a1es[pda1es]=a1es[pda1es]-1e-3\n\n\n # prepare an extended sequence of points ( ndim x (Nint+k+2) x Nrayk )\n ptee = np.hstack((pte, np.zeros((3, Nint, Nrayk))))\n\n #\n # Boolean ceil/floor detector\n #\n # u is 4 (floor interaction )\n # 5 (ceil interaction )\n # depending on the vertical pattern l.\n #\n # l <0 corresponds to last reflexion on floor\n # l >0 corresponds to last reflexion on ceil\n #\n # u =0 (floor) or 1 (ceil)\n # if l < 0:\n # u = np.mod(range(Nint), 2)\n # else:\n # u = 1 - np.mod(range(Nint), 2)\n\n\n if l < 0 and Nint%2 ==1: # l<0 Nint odd\n u = np.mod(range(Nint), 2)\n\n elif l > 0 and Nint%2 ==1: # l>0 Nint odd\n u = 1 - np.mod(range(Nint), 2)\n\n\n elif l < 0 and Nint%2 ==0: # l<0 Nint even\n u = 1 - np.mod(range(Nint), 2)\n\n elif l > 0 and Nint%2 ==0: # l>0 Nint even\n u = np.mod(range(Nint), 2)\n\n #\n u = u + 4\n #\n # At that point we introduce the signature of the new\n # introduced points on the ceil and/or floor.\n #\n # A signature is composed of two lines\n # esigs sup line : interaction number\n # esigi inf line : interaction type\n #\n esigs = np.zeros((1, Nint, Nrayk), dtype=int)\n esigi = u.reshape(1, Nint, 1)* np.ones((1, 1, Nrayk), dtype=int)\n # esig : extension of the signature\n esig = np.vstack((esigs, esigi))\n # sige : signature extended ( 2 x (Nint+k+2) x Nrayk )\n sige = np.hstack((sig, esig))\n\n #\n # 2 x (Nint+k+2) x Nrayk\n #\n # sort extended sequence of points\n # and extended sequence of signatures with the sorting\n # index ks obtained from argsort of merge parametization\n #\n # sequence of extended sorted points\n #\n ptees = ptee[:, ks, range(Nrayk)]\n siges = sige[:, ks, range(Nrayk)]\n\n # extended and sorted signature\n iint_f, iray_f = np.where(siges[ 1, :] == 4) # floor interaction\n iint_c, iray_c = np.where(siges[ 1, :] == 5) # ceil interaction\n #print siges\n #\n # find the list of the previous and next point around the\n # new ceil or floor point. The case of successive ceil or\n # floor reflexion make\n #\n # Tous les points prcdents qui ne sont pas des Ceils ou\n # des floors et tous les points suivants qui ne sont pas\n # des points de rflexion ceil ou floor\n #\n # Afin de tenir compte du rayon et du groupe d'interactions\n # concerne, il faut passer un tuple qui concatene la valeur\n # de l'indice d'interaction floor ou ceil et l'indice de\n # rayons du groupe associe (d'ou le zip)\n #\n # Cette sequence d'instruction fixe le bug #133\n #\n # Antrieurement il y avait une hypothese de succession\n # immediate d'un point 2D renseigne.\n #\n try:\n iintm_f = [ np.where( (siges[1,0:x[0],x[1]]!=4) &\n (siges[1,0:x[0],x[1]]!=5))[0][-1]\n for x in zip(iint_f,iray_f) ]\n iintp_f = [ np.where( (siges[1,x[0]:,x[1]]!=4) &\n (siges[1,x[0]:,x[1]]!=5))[0][0]+x[0]\n for x in zip(iint_f,iray_f) ]\n iintm_c = [ np.where( (siges[1,0:x[0],x[1]]!=4) &\n (siges[1,0:x[0],x[1]]!=5))[0][-1]\n for x in zip(iint_c,iray_c) ]\n iintp_c = [ np.where( (siges[1,x[0]:,x[1]]!=4) &\n (siges[1,x[0]:,x[1]]!=5))[0][0]+x[0]\n for x in zip(iint_c,iray_c) ]\n except:\n pdb.set_trace()\n\n # Update coordinate in the horizontal plane\n #\n #\n # The new interaction ceil or floor has no coordinates in\n # the horizontal plane.\n # Those coordinates are evaluated first by finding a sub\n # parameterization of the point with respect to the two\n # known adjascent interaction point j-1 and j+1 (Thales)\n #\n\n #iintm_f = iint_f - 1\n #iintp_f = iint_f + 1\n\n #iintm_c = iint_c - 1\n #iintp_c = iint_c + 1\n\n\n #\n # If there are floor points\n #\n\n if len(iint_f)>0:\n a1esm_f = a1es[iintm_f, iray_f]\n a1esc_f = a1es[iint_f, iray_f]\n a1esp_f = a1es[iintp_f, iray_f]\n\n\n pteesm_f = ptees[0:2, iintm_f, iray_f]\n pteesp_f = ptees[0:2, iintp_f, iray_f]\n\n coeff_f = (a1esc_f-a1esm_f)/(a1esp_f-a1esm_f)\n\n ptees[0:2, iint_f, iray_f] = pteesm_f + coeff_f*(pteesp_f-pteesm_f)\n\n #\n # If there are ceil points\n #\n if len(iint_c)>0:\n a1esm_c = a1es[iintm_c, iray_c]\n a1esc_c = a1es[iint_c, iray_c]\n a1esp_c = a1es[iintp_c, iray_c]\n\n pteesm_c = ptees[0:2, iintm_c, iray_c]\n pteesp_c = ptees[0:2, iintp_c, iray_c]\n\n coeff_c = (a1esc_c-a1esm_c)/(a1esp_c-a1esm_c)\n ptees[0:2, iint_c, iray_c] = pteesm_c + coeff_c*(pteesp_c-pteesm_c)\n\n if H != 0:\n z = np.mod(l+a1es*(rx[2]-l), 2*H)\n pz = np.where(z > H)\n z[pz] = 2*H-z[pz]\n ptees[2, :] = z\n # case where ceil reflection are inhibited\n elif H==0:\n z = abs(l+a1es*(rx[2]-l))\n # pz = np.where(z > H)\n # z[pz] = 2*H-z[pz]\n ptees[2, :] = z\n\n # recopy old 2D parameterization (no extension)\n else:\n a1es = a1\n ks = np.argsort(a1es, axis=0)\n ptees = pte\n # fixing bug\n siges = copy.copy(sig)\n #print siges\n\n #---------------------------------\n # handling multi segment (iso segments)\n # Height of reflexion interaction\n # Height of diffraction interaction\n #---------------------------------\n #\n # ptes (3 x i+2 x r )\n if len(L.lsss)>0:\n #\n # lsss : list of sub segments ( iso segments siges)\n # lnss : list of diffaction point involving\n\n lsss = np.array(L.lsss)\n lnss = np.array(L.lnss)\n\n # array of structure element (nstr) with TxRx extension (nstr=0)\n anstr = siges[0,:,:]\n # type of interaction\n typi = siges[1,:,:]\n\n # lss : list of subsegments in the current signature\n #\n # scalability : avoid a loop over all the subsegments in lsss\n #\n lss = [ x for x in lsss if x in anstr.ravel()]\n\n ray_to_delete = []\n for s in lss:\n u = np.where(anstr==s)\n if len(u)>0:\n zs = ptees[2,u[0],u[1]]\n zinterval = L.Gs.node[s]['z']\n unot_in_interval = ~((zs<=zinterval[1]) & (zs>=zinterval[0]))\n ray_to_delete.extend(u[1][unot_in_interval])\n\n # lns : list of diffraction points in the current signature\n # with involving multi segments (iso)\n # scalability : avoid a loop over all the points in lnss\n #\n lns = [ x for x in lnss if x in anstr.ravel()]\n\n #\n # loop over multi diffraction points\n #\n\n for npt in lns:\n # diffraction cornet in espoo.lay\n #if npt==-225:\n # import ipdb\n # ipdb.set_trace()\n\n u = np.where(anstr==npt)\n if len(u)>0:\n # height of the diffraction point\n zp = ptees[2,u[0],u[1]]\n\n #\n # At which couple of segments belongs this height ?\n # get_diffslab function answers that question\n #\n\n ltu_seg,ltu_slab = L.get_diffslab(npt,zp)\n\n #\n # delete rays where diffraction point is connected to\n # 2 AIR segments\n #\n\n [ray_to_delete.append(u[1][i]) for i in range(len(zp))\n if ((ltu_slab[i][0]=='AIR') & (ltu_slab[i][1]=='AIR'))]\n # #zinterval = L.Gs.node[s]['z']\n # # if (zs<=zinterval[1]) & (zs>=zinterval[0]):\n # if ((tu_slab[0]!='AIR') & (tu_slab[1]!='AIR')):\n # #print(npt , zp)\n # pass\n # else:\n # ray_to_delete.append(u[1][0])\n\n # # nstr : structure number\n # nstr = np.delete(nstr,ray_to_delete,axis=1)\n # typi : type of interaction\n typi = np.delete(typi,ray_to_delete,axis=1)\n # 3d sequence of points\n ptees = np.delete(ptees,ray_to_delete,axis=2)\n # extended (floor/ceil) signature\n siges = np.delete(siges,ray_to_delete,axis=2)\n\n if rmoutceilR:\n # 1 determine Ceil reflexion index\n # uc (inter x ray)\n uc = np.where(siges[1,:,:]==5)\n ptc = ptees[:,uc[0],uc[1]]\n if len(uc[0]) !=0:\n P = shg.MultiPoint(ptc[:2,:].T)\n # to determine the cycle where ceil reflexions append\n # uinter(nb pt x nb cycles)\n mapnode = list(L.Gt.nodes())\n uinter = np.array([[L.Gt.node[x]['polyg'].contains(p) for x in mapnode if x>0] for p in P])\n # import ipdb\n # ipdb.set_trace()\n #[plt.scatter(p.xy[0],p.xy[1],c='r') for up,p in enumerate(P) if uinter[0,up]]\n #[ plt.scatter(p.xy[0],p.xy[1],c='r') for up,p in enumerate(P) if uinter[0,up]]\n # find points are indoor/outdoor cycles\n upt,ucy = np.where(uinter)\n uout = np.where([not L.Gt.node[mapnode[u+1]]['indoor'] for u in ucy])[0] #ucy+1 is to manage cycle 0\n # 3 remove ceil reflexions of outdoor cycles\n if len(uout)>0:\n ptees = np.delete(ptees,uc[1][uout],axis=2)\n siges = np.delete(siges,uc[1][uout],axis=2)\n sigsave = np.delete(sigsave,uc[1][uout],axis=2)\n\n if k+Nint in r3d:\n r3d[k+Nint]['pt'] = np.dstack((r3d[k+Nint]['pt'], ptees))\n r3d[k+Nint]['sig'] = np.dstack((r3d[k+Nint]['sig'], siges))\n r3d[k+Nint]['sig2d'].append(sigsave)\n else:\n if ptees.shape[2]!=0:\n r3d[k+Nint] = {}\n r3d[k+Nint]['pt'] = ptees\n r3d[k+Nint]['sig'] = siges\n r3d[k+Nint]['sig2d'] = [sigsave]\n # ax=plt.gca()\n # uu = np.where(ptees[2,...]==3.0)\n # ax.plot(ptees[0,uu[0],uu[1]],ptees[1,uu[0],uu[1]],'ok')\n # import ipdb\n # ipdb.set_trace()\n #\n # Add Line Of Sight ray information\n # pt = [tx,rx]\n # sig = [0,0]\n #\n #pdb.set_trace()\n # if (self.los) & (np.sqrt(np.sum((tx-rx)**2)) !=0) :\n # r3d[0] = {}\n # r3d[0]['sig'] = np.zeros((2,2,1))\n # r3d[0]['sig2d'] = np.zeros((2,2,1))\n # r3d[0]['pt'] = np.zeros((3,2,1))\n # r3d[0]['pt'][:,0,:] = tx[:,np.newaxis]\n # r3d[0]['pt'][:,1,:] = rx[:,np.newaxis]\n\n # r3d.nray = reduce(lambda x,y : y + np.shape(r3d[x]['sig'])[2],lnint)\n # count total number of ray\n # evaluate length of ray segment\n #\n # vsi\n # si\n # dis\n #\n val =0\n\n for k in r3d.keys():\n nrayk = np.shape(r3d[k]['sig'])[2]\n r3d[k]['nbrays'] = nrayk\n r3d[k]['rayidx'] = np.arange(nrayk)+val\n r3d.nray = r3d.nray + nrayk\n val=r3d[k]['rayidx'][-1]+1\n\n # 3 : x,y,z\n # i : interaction index\n # r : ray index\n #\n # k : group of interactions index\n #\n v = r3d[k]['pt'][:, 1:, :]-r3d[k]['pt'][:, 0:-1, :]\n lsi = np.sqrt(np.sum(v*v, axis=0))\n rlength = np.sum(lsi,axis=0)\n if (lsi.any()==0):\n pdb.set_trace()\n if not (lsi.all()>0):\n pdb.set_trace()\n #assert(lsi.all()>0)\n\n if (len(np.where(lsi==0.))==0) :\n pdb.set_trace()\n\n #\n # sort rays w.r.t their length\n #\n\n u = np.argsort(rlength)\n r3d[k]['pt'] = r3d[k]['pt'][:,:,u]\n r3d[k]['sig'] = r3d[k]['sig'][:,:,u]\n #r3d[k]['sig2d'] = r3d[k]['sig2d'][:,:,u]\n si = v/lsi # ndim , nint - 1 , nray\n\n # vsi : 3 x (i+1) x r\n r3d[k]['vsi'] = si[:,:,u]\n\n # si : (i+1) x r\n r3d[k]['si'] = lsi[:,u]\n r3d[k]['dis'] = rlength[u]\n\n r3d.delays = np.zeros((r3d.nray))\n for k in r3d.keys():\n ir = r3d[k]['rayidx']\n r3d.delays[ir] = r3d[k]['dis']/0.3\n\n\n r3d.origin_sig_name = self.origin_sig_name\n r3d.Lfilename = L._filename\n r3d.filename = L._filename.split('.')[0] + '_' + str(r3d.nray)\n return(r3d)\n\n\n def get_rays_slabs(self,L,ir):\n \"\"\" return the slabs for a given interaction index \n\n\n Parameters\n ----------\n\n L : Layout\n ir : interaction block\n\n Returns\n -------\n\n numpy array of slabs strings at the shape (ir,r)\n ir : number of interactions ( of the interaction block)\n r : number of rays\n\n \"\"\"\n\n v=np.vectorize( lambda t: L.Gs.node[t]['name'] if (t!=0) and (t>0) else '_')\n return v(self[ir]['sig'][0])\n\n\n def remove_aw(self,L):\n \"\"\" remove AIR interactions\n \"\"\"\n # def consecutive(data, stepsize=1):\n # return np.split(data, np.where(np.diff(data) != stepsize)[0]+1)\n\n\n R = Rays(self.pTx,self.pRx)\n R.__dict__.update(self.__dict__)\n # R.is3D=True\n # R.nray = self.nray\n # R.nray2D = self.nray2D\n # R.nray2D = self.nray2D\n # R.nray2D = self.nray2D\n\n for k in self:\n lr = self[k]['sig'].shape[1]\n\n inter = self.get_rays_slabs(L,k)\n\n for ur,r in enumerate(inter.T):\n\n not_air_mask = ~((r =='_AIR') | (r == 'AIR' ))\n nb_air = sum(~not_air_mask)\n if nb_air != 0 :\n new_bi = k-nb_air\n # +2 : add tx & rx interaciton\n # -1 : 2 interactions correspond to 1 distance\n lsi = new_bi + 2 - 1 \n si = np.zeros(lsi) \n si_old = self[k]['si'][:,ur]\n\n vsi = np.zeros((3,lsi)) \n vsi_old = self[k]['vsi'][...,ur]\n\n sig = self[k]['sig'][:,not_air_mask,ur][...,None]\n # sig2d = self[k]['sig2d'][0][...,ur]\n pt = self[k]['pt'][:,not_air_mask,ur][...,None]\n\n u = 0\n si_aw = 0\n\n # import ipdb\n # ipdb.set_trace()\n\n for uold,b in enumerate(not_air_mask[1:]):\n if b:\n # update new si with sum of all \n # distance from preceding airwall\n si[u] = si_old[uold] + si_aw\n # keep vsi from the last airwall\n # because vsi don't change on an airwall\n vsi[:,u] = vsi_old[:,uold] \n u += 1\n si_aw=0\n else:\n si_aw += si_old[uold]\n si = si[...,None]\n vsi = vsi[...,None]\n dis = np.array([np.sum(si)])\n assert np.allclose(dis,np.sum(si_old))\n\n\n\n else:\n # no air wall case, fill R with self values\n new_bi = k\n pt = self[k]['pt'][...,ur][...,None]\n sig = self[k]['sig'][...,ur][...,None]\n # sig2d = self[k]['sig2d'][0][...,ur]\n si = self[k]['si'][:,ur][:,None]\n vsi = self[k]['vsi'][...,ur][...,None]\n dis = np.array([self[k]['dis'][ur]])\n\n if new_bi in R:\n\n # R[new_bi]['sig2d'].append(self[k]['sig2d'][ur])\n R[new_bi]['pt'] = np.concatenate((R[new_bi]['pt'],pt),axis=2)\n R[new_bi]['sig'] = np.concatenate((R[new_bi]['sig'],sig),axis=2)\n R[new_bi]['rayidx'] = np.concatenate((R[new_bi]['rayidx'],np.array([self[k]['rayidx'][ur]])))\n R[new_bi]['si'] = np.concatenate((R[new_bi]['si'],si),axis=1)\n R[new_bi]['vsi'] = np.concatenate((R[new_bi]['vsi'],vsi),axis=2)\n R[new_bi]['dis'] = np.concatenate((R[new_bi]['dis'],dis),axis=0)\n else:\n R[new_bi] = {}\n # R[new_bi]['sig2d'] = [self[k]['sig2d'][0][...,ur]]\n R[new_bi]['pt'] = pt\n R[new_bi]['sig'] = sig\n R[new_bi]['rayidx'] = np.array([self[k]['rayidx'][ur]])\n R[new_bi]['si'] = si\n R[new_bi]['vsi'] = vsi\n R[new_bi]['dis'] = dis\n\n if 0 in R:\n R.los=True\n\n X = [[R[k]['rayidx'][u] for u in range(len(R[k]['rayidx']))] for k in R]\n R._rayidx_aw = sum(X,[])\n\n return R\n\n def length(self,typ=2):\n \"\"\" calculate length of rays\n\n Parameters\n ----------\n\n typ : int\n men1 : length of all segments\n 2 : accumulated length\n \"\"\"\n dk = {}\n for k in self: # for all interaction group k\n # 3 x Ni-1 x Nr\n vk = self[k]['pt'][:,1:,:]-self[k]['pt'][:,0:-1,:]\n d1 = np.sqrt(np.sum(vk*vk,axis=0))\n d2 = np.sum(d1,axis=0)\n if typ==1:\n dk[k] = d1\n if typ==2:\n dk[k] = d2\n return(dk)\n\n def simplify(self):\n if not self.is3D:\n return None\n\n for ir in self:\n print(self[ik]['si'])\n\n def locbas(self, L):\n \"\"\" calculate ray local bas\n\n Parameters\n ----------\n\n L : Layout\n\n Notes\n -----\n\n This method adds for each group of interactions the following members\n\n norm : np.array\n 3 x i x r (interaction vector)\n nstrwall : np.array\n nstr of interactions\n vsi : np.array\n 3 x (i+1) x r\n aod : np.array\n 2 x r\n aoa : np.array\n 2 x r\n BoO : np.array\n 3 x 3 x r\n Bi : np.array\n 3 x 3 x r\n Bo : np.array\n 3 x 3 x r\n BiN : np.array\n 3 x 3 x r\n scpr : np.array\n i x r\n theta : np.array\n i x r\n rays : int\n nbrays : int\n rayidx : np.array\n diffslabs : list\n diffvect : np.array\n (phi0,phi,beta,NN)\n\n\n \"\"\"\n\n #\n # extract normal in np.array\n #\n\n # nsegment x 3\n norm = np.array(list(nx.get_node_attributes(L.Gs,'norm').values()))\n\n # nsegment x k\n key = np.array(list(dict(nx.get_node_attributes(L.Gs,'norm')).keys()))\n\n # maximum number for refering to segment\n # not to be confused with a segment number\n\n nsmax = max(L.Gs.node.keys())\n\n mapping = np.zeros(nsmax+1, dtype=int)\n\n mapping[key] = np.arange(len(key), dtype=int)\n\n #\n # Structure number : nstr\n # the structure number is < 0 for points\n # > 0 for segments\n # A segment can have several subsegments (until 100)\n # nstrs is the nstr of the segment if subsegment :\n # nstr is the glabal which allows to recover the slab values\n #\n\n idx = np.array(())\n if self.los:\n idxts = 1\n nbrayt = 1\n else:\n idxts = 0\n nbrayt = 0\n\n # list of used wedges\n luw=[]\n\n lgi = list(self.keys())\n lgi.sort()\n for k in lgi:\n #\n # k is the number of interactions in the block\n #\n #print(k,self[11]['rayidx'])\n if k != 0:\n\n # structure number (segment or point)\n # nstr : i x r\n nstr = self[k]['sig'][0, 1:-1, :]\n\n # ityp : i x r\n ityp = self[k]['sig'][1, 1:-1, :]\n\n # nstr of underlying segment\n # position of interaction corresponding to a sub segment\n # print nstr\n #\n # uss : index of subsegment\n # subsegments are not nodes of Gs but have positive nstr index\n #\n\n uss = np.where(nstr > nsmax)\n\n # print uss\n\n nstrs = copy.copy(nstr)\n #\n # if subsegments have been found\n #\n if len(uss) >0:\n ind = nstr[uss]- nsmax-1\n nstrs[uss] = np.array(L.lsss)[ind]\n # print nstr\n #print nstrs\n #pdb.set_trace()\n nray = np.shape(nstr)[1]\n\n uwall = np.where((ityp == 2) | (ityp == 3))\n udiff = np.where((ityp == 1))\n ufloor= np.where((ityp == 4))\n uceil = np.where((ityp == 5))\n\n nstrwall = nstr[uwall[0], uwall[1]] # nstr of walls\n nstrswall = nstrs[uwall[0], uwall[1]] # nstrs of walls\n\n self[k]['nstrwall'] = nstrwall # store nstr without subsegment\n self[k]['nstrswall'] = nstrswall # store nstr with subsegment\n self[k]['norm'] = np.zeros((3, k, nray)) # 3 x int x nray\n\n # norm : 3 x i x r\n #\n # norm is the vector associated to the interaction\n # For the diffraction case the normal is replaced by the unit\n # vector along the wedge directed upward.\n #\n\n self[k]['norm'][:, uwall[0], uwall[1]] = norm[mapping[nstrswall],:].T\n self[k]['norm'][2, ufloor[0], ufloor[1]] = np.ones(len(ufloor[0]))\n self[k]['norm'][2, uceil[0], uceil[1]] = -np.ones(len(uceil[0]))\n self[k]['norm'][2, udiff[0], udiff[1]] = np.ones(len(udiff[0]))\n\n normcheck = np.sum(self[k]['norm']*self[k]['norm'],axis=0)\n\n assert normcheck.all()>0.99,pdb.set_trace()\n\n\n\n # 3 : x,y,z\n # i : interaction index\n # r : ray index\n #\n # k : group of interactions index\n #\n #v = self[k]['pt'][:, 1:, :]-self[k]['pt'][:, 0:-1, :]\n #lsi = np.sqrt(np.sum(v*v, axis=0))\n #if (lsi.any()==0):\n # pdb.set_trace()\n #assert(lsi.all()>0)\n #if (len(np.where(lsi==0.))==0) :\n # pdb.set_trace()\n\n #si = v/lsi # ndim , nint - 1 , nray\n\n # si : 3 x (i+1) x r\n si = self[k]['vsi']\n\n # si : (i+1) x r\n #self[k]['si'] = lsi\n #self[k]['dis'] = np.sum(lsi,axis=0)\n\n # normal : 3 x i x r\n vn = self[k]['norm']\n # s_in : 3 x i x r\n s_in = si[:, 0:-1, :]\n\n # s_out : 3 x i x r\n s_out = si[:, 1:, :]\n\n #\n # AOD (rad)\n #\n\n # th : ,r\n thd = np.arccos(si[2, 0, :])\n\n # ph : ,r\n phd = np.arctan2(si[1, 0, :], si[0, 0, :])\n\n # aod : 2 x r (radians)\n self[k]['aod'] = np.vstack((thd, phd))\n\n # eth : 3 x r\n eth = np.array([np.cos(thd) * np.cos(phd),\n np.cos(thd) * np.sin(phd),\n -np.sin(thd)])\n # eph : 3 x r\n eph = np.array([-np.sin(phd),\n np.cos(phd),\n np.zeros(len(phd))])\n\n # Bo0 : 3 x 3 x r\n Bo0 = np.concatenate((si[:, 0, None, :],\n eth[:, None, :],\n eph[:, None, :]), axis=1)\n\n self[k]['Bo0'] = Bo0\n\n #\n # scalar product si . norm\n #\n # vn : 3 x i x r\n # s_in : 3 x i x r\n\n #\n # scpr : i x r\n #\n\n scpr = np.sum(vn*si[:,0:-1,:], axis=0)\n self[k]['scpr'] = scpr\n self[k]['theta'] = np.arccos(abs(scpr)) # *180/np.pi\n\n def fix_colinear(w):\n \"\"\"\n w : vector\n \"\"\"\n nw = np.sqrt(np.sum(w*w, axis=0))\n u = np.where(nw==0)\n if len(u[0])!=0:\n logger.debug('colinear situation detected')\n if (u[0].any() or u[1].any()) \\\n or (u[0].any()==0 or u[1].any()==0):\n\n uu = np.array([u[0],u[1]]).T\n #determine which interaction and rays\n #present the colinearity issue\n uvv = abs(vn[2,uu[:,0],uu[:,1]])>0.99\n # uv : nbi x nbr colinear index\n uv = uu[uvv]\n # uh : nbi x nbr anti-colinear index\n uh = uu[np.logical_not(uvv)]\n try:\n #fix w for colinear index\n w[:,uv[:,0],uv[:,1]] = np.array(([1,0,0]))[:,None]\n # update normal\n nw[uv[:,0],uv[:,1]] = np.sqrt(np.sum(\n w[:,uv[:,0],uh[:,1]]*w[:,uv[:,0],uv[:,1]],axis=0))\n except:\n pass\n try:\n # fix w for anti-colinear index\n w[:,uh[:,0],uh[:,1]] = np.array(([0,0,1]))[:,None]\n # update normal\n nw[uh[:,0],uh[:,1]] = \\\n np.sqrt(np.sum(w[:,uh[:,0],uh[:,1]]*w[:,uh[:,0],uh[:,1]],axis=0))\n except:\n pass\n return w, nw\n #\n # Warning need to handle singular case when s_in // vn\n #\n # w : 3 x i x r\n #\n w = np.cross(s_in, vn, axisa=0, axisb=0, axisc=0)\n\n # nw : i x r\n w, nw = fix_colinear(w)\n\n wn = w/nw\n v = np.cross(wn, s_in, axisa=0, axisb=0, axisc=0)\n\n es_in = np.expand_dims(s_in, axis=1)\n\n ew = np.expand_dims(wn, axis=1)\n ev = np.expand_dims(v, axis=1)\n\n # Bi 3 x 3 x i x r\n Bi = np.concatenate((es_in,ew,ev),axis=1)\n # self[k]['Bi'] 3 x 3 x i x r\n self[k]['Bi'] = Bi\n ################################\n\n w = np.cross(s_out, vn, axisa=0, axisb=0, axisc=0)\n\n w, nw = fix_colinear(w)\n #wn = w/np.sqrt(np.sum(w*w, axis=0))\n wn = w/nw\n\n v = np.cross(wn, s_out, axisa=0, axisb=0, axisc=0)\n\n es_out = np.expand_dims(s_out, axis=1)\n ew = np.expand_dims(wn, axis=1)\n ev = np.expand_dims(v, axis=1)\n\n # Bi 3 x 3 x i x r\n Bo = np.concatenate((es_out,ew,ev),axis=1)\n\n # self[k]['Bo'] 3 x 3 x i x r\n self[k]['Bo'] = Bo\n #\n # AOA (rad)\n #\n\n # th : ,r\n # fix doa/dod reciprocity\n #th = np.arccos(si[2, -1, :])\n tha = np.arccos(si[2, -1, :])\n\n # th : ,r\n #ph = np.arctan2(si[1, -1, :], si[0, -1, :])\n pha = np.arctan2(si[1, -1, :], si[0, -1, :])\n\n # aoa : 2 x r (radians)\n self[k]['aoa'] = np.vstack((tha, pha))\n eth = np.array([np.cos(tha) * np.cos(pha),\n np.cos(tha) * np.sin(pha),\n -np.sin(tha)])\n eph = np.array([-np.sin(pha),\n np.cos(pha),\n np.zeros(len(pha))])\n # Bo0 : 3 x 3 x r\n BiN = np.concatenate((si[:,-1,None,:],\n eth[:, None, :],\n eph[:, None, :]), axis=1)\n\n\n self[k]['BiN'] = BiN\n #self[k]['BiN'] = np.concatenate((-si[:,-1,np.newaxis,:],eth[:,np.newaxis,:],\n # eph[:,np.newaxis,:]),axis=1)\n\n # Creation of B from Bi and Bo\n # is done after the potential diffraction\n # computation\n\n ## index creation\n ##################\n # create index for retrieving interactions\n\n # integer offset : total size idx\n\n idxts = idxts + idx.size\n\n idx = idxts + np.arange(ityp.size).reshape(np.shape(ityp),order='F')\n\n nbray = np.shape(idx)[1]\n\n self[k]['rays'] = idx\n self[k]['nbrays'] = nbray\n self[k]['rayidx'] = nbrayt + np.arange(nbray)\n\n # create a numpy array to relate the ray index to its corresponding\n # number of interactions\n #pdb.set_trace()\n _ray2nbi = np.ones((nbray), dtype=int)\n\n\n try:\n self._ray2nbi = np.hstack((self._ray2nbi,_ray2nbi))\n except:\n self._ray2nbi = _ray2nbi\n\n\n self._ray2nbi[self[k]['rayidx']] = k\n nbrayt = nbrayt + nbray\n self.raypt = self.raypt + self[k]['nbrays']\n\n #################################\n # Start diffraction specific case\n #################################\n\n if len(udiff[0]) != 0 :\n Z = np.where(ityp.T==1)\n udiff=Z[1],Z[0]\n\n # diffseg,udiffseg = np.unique(nstr[udiff],return_inverse=True)\n diffupt=nstr[udiff]\n # position of diff seg (- because iupnt accept > 0 reference to points)\n #\n # TO BE FIXED\n #\n #ptdiff = L.pt[:,L.iupnt[-diffupt]]\n ptdiff = np.array([ (L.Gs.pos[x][0],L.Gs.pos[x][1]) for x in diffupt ]).T\n\n self[k]['diffidx'] = idx[udiff[0],udiff[1]]\n # get tail head position of seg associated to diff point\n lair = L.name['AIR'] + L.name['_AIR']\n #aseg = map(lambda x : filter(lambda y : y not in lair,\n # nx.neighbors(L.Gs,x)),\n # diffupt)\n\n aseg = [ [ y for y in nx.neighbors(L.Gs,x) if y not in lair ] for x in diffupt ]\n\n #manage flat angle : diffraction by flat segment e.g. door limitation)\n [aseg[ix].extend(x) for ix,x in enumerate(aseg) if len(x)==1]\n # get points positions\n #pdb.set_trace()\n pts = np.array([ L.seg2pts([x[0],x[1]]) for x in aseg ])\n\n #self[k]['diffslabs']=[str(L.sl[L.Gs.node[x[0]]['name']])+'_'\n # + str(L.sl[L.Gs.node[x[1]]['name']]]) for x in aseg]\n self[k]['diffslabs']=[ L.Gs.node[x[0]]['name']+'@'\n + L.Gs.node[x[1]]['name'] for x in aseg]\n\n uwl = np.unique(self[k]['diffslabs']).tolist()\n luw.extend(uwl)\n\n\n pt1 = pts[:,0:2,0] #tail seg1\n ph1 = pts[:,2:4,0] #head seg1\n pt2 = pts[:,0:2,1] #tail seg2\n ph2 = pts[:,2:4,1] #head seg2\n\n\n #pts is (nb_diffraction_points x 4 x 2)\n #- The dimension 4 represent the 2x2 points: t1,h1 and t2,h2\n # tail and head of segment 1 and 2 respectively\n # a segment\n #- The dimension 2 is x,y\n #\n # The following aims to determine which tails and heads of\n # segments associated to a given diffraction point\n # are connected\n #\n #\n\n # point diff is pt1\n updpt1 = np.where(np.sum(ptdiff.T==pt1,axis=1)==2)[0]\n # point diff is ph1\n updph1 = np.where(np.sum(ptdiff.T==ph1,axis=1)==2)[0]\n\n # point diff is pt2\n updpt2 = np.where(np.sum(ptdiff.T==pt2,axis=1)==2)[0]\n\n # point diff is ph2\n updph2 = np.where(np.sum(ptdiff.T==ph2,axis=1)==2)[0]\n\n pa = np.empty((len(diffupt),2))\n pb = np.empty((len(diffupt),2))\n\n ####seg 1 :\n #if pt1 diff point => ph1 is the other point\n pa[updpt1]= ph1[updpt1]\n #if ph1 diff point => pt1 is the other point\n pa[updph1]= pt1[updph1]\n ####seg 2 :\n #if pt2 diff point => ph2 is the other point\n pb[updpt2]= ph2[updpt2]\n #if ph2 diff point => pt2 is the other point\n pb[updph2]= pt2[updph2]\n\n pt = ptdiff.T\n\n # NN : (nb_diffraction_points)\n # alpha wegde (a.k.a. wedge parameters, a.k.a wedge aperture)\n\n NN = (360.-geu.sector(pa.T,pb.T,pt.T))/180.\n # NN = (2.-NN)*np.pi\n\n #angle between face 0, diffraction point and s_in\n #s_in[:2,udiff[0],udiff[1]] :\n # s_in of insteractions udiff (2D) restricted to diffraction points\n vptpa = pt-pa\n vptpan = vptpa.T / np.sqrt(np.sum((vptpa)*(vptpa),axis=1))\n # vpapt= pa-pt # papt : direction vector of face 0\n # vpaptn = vpapt.T / np.sqrt(np.sum((vpapt)*(vpapt),axis=1))\n sid = s_in[:,udiff[0],udiff[1]] #s_in restricted to diff\n sod = s_out[:,udiff[0],udiff[1]] #s_out restricted to diff\n vnormz = self[k]['norm'][:, udiff[0], udiff[1]]\n\n\n #phi0 = arccos(dot(sid*vpavptn))\n # phi0 = geu.vecang(sid[:2],vpaptn)\n uleft = geu.isleft(pa.T,pt.T,pb.T)\n phi0 = geu.vecang(vptpan,sid[:2])\n phi0[~uleft] = geu.vecang(sid[:2,~uleft],vptpan[:,~uleft])\n # phi0 = np.arccos(np.sum(sid[:2]*vpaptn,axis=0))\n\n #phi = arccos(dot(sod*vpavptn))\n # phi = np.arccos(np.sum(-sod[:2]*vpaptn,axis=0))\n phi = geu.vecang(vptpan,-sod[:2])\n phi[~uleft] = geu.vecang(-sod[:2,~uleft],vptpan[:,~uleft])\n # beta\n #it is important to check if the sid comes from left or right\n #to this end assume that sid vector is composed\n #of 2 point : (0,0) and sid\n # compared to the position of the diffraction point in x\n # with an elevation=0\n sidxz = sid[[0,2]]\n vnormxz = vnormz[[0,2]]\n zero = np.zeros((2,ptdiff.shape[1]))\n zdiff = np.vstack((ptdiff[0],zero[0]))\n left = geu.isleft(zero,sidxz,zdiff)\n beta = np.arccos(np.sum(vnormz*sid,axis=0))\n\n # self[k]['diffvect'] is (4 x Nb_rays )\n # for axis 0 lenght 4 represent :\n # 0 => phi0\n # 1 => phi\n # 2 => beta\n # 3 => N (wedge parameter)\n self[k]['diffvect']=np.array((phi0,phi,beta,NN))\n\n ######\n #Bi diffract\n #####\n #w is the \\perp \\soft in diff\n w = np.cross(-sid, vnormz, axisa=0, axisb=0, axisc=0)\n\n # nw : i x r\n w, nw = fix_colinear(w)\n\n wn = w/nw\n # Handling channel reciprocity s_in --> -s_in\n #v = np.cross(wn, s_in, axisa=0, axisb=0, axisc=0)\n v = np.cross(wn, -sid, axisa=0, axisb=0, axisc=0)\n\n e_sid = np.expand_dims(-sid, axis=1)\n ew = np.expand_dims(wn, axis=1)\n ev = np.expand_dims(v, axis=1)\n\n # Bid 3 x 3 x (i,r)diff\n Bid = np.concatenate((e_sid,ev, ew), axis=1)\n\n #update Bi for diffracted rays\n Bi[:,:,udiff[0],udiff[1]] = Bid\n ######\n #Bo diffract\n #####\n w = np.cross(sod,vnormz, axisa=0, axisb=0, axisc=0)\n\n w, nw = fix_colinear(w)\n wn = w/nw\n\n #wn = w/np.sqrt(np.sum(w*w, axis=0))\n v = np.cross(wn, sod, axisa=0, axisb=0, axisc=0)\n\n e_sod = np.expand_dims(sod, axis=1)\n ew = np.expand_dims(wn, axis=1)\n ev = np.expand_dims(v, axis=1)\n # Bod 3 x 3 x (i,r)diff\n Bod = np.concatenate((e_sod,ev, ew), axis=1)\n\n #update Bo for diffracted rays\n Bo[:,:,udiff[0],udiff[1]] = Bod\n #################################\n # End of diffraction specific case\n ##################################\n\n\n #\n # pasting (Bo0,B,BiN)\n #\n\n # B : 3 x 3 x i x r\n\n Bo = np.concatenate((Bo0[:, :, np.newaxis, :], Bo), axis=2)\n Bi = np.concatenate((Bi, BiN[:, :, np.newaxis, :]), axis=2)\n\n # B : 3 x 3 x i x r\n\n self[k]['B'] = np.einsum('xv...,xw...->vw...', Bi, Bo)\n #self[k]['B'] = np.einsum('vx...,xw...->vw...', Bi, Bo)\n\n #BiN = np.array([si[:,-1,:], eth, eph]) # ndim x 3 x Nray\n #self[k]['BiN']=BiN\n # self[k]['B']=np.sum(self[k]['Bi'][:2,:2,np.newaxis]*self[k]['Bo'][np.newaxis,:2,:2],axis=1)\n\n\n # if los exists\n else :\n self[k]['nstrwall'] = np.array(())\n self[k]['norm'] = np.array(())\n si = np.sqrt(np.sum((self[0]['pt'][:,0]-self[0]['pt'][:,1])**2,axis=0))\n self[k]['si'] = np.vstack((si,0.))\n self[k]['vsi'] = (self[0]['pt'][:,1]-self[0]['pt'][:,0])/si\n self[k]['dis'] = np.array((si))\n\n vsi = self[k]['vsi']\n thd = np.arccos(vsi[2])\n phd = np.arctan2(vsi[1], vsi[0])\n\n self[k]['aod'] = np.vstack((thd, phd))\n self[k]['Bo0'] = np.array(())\n self[k]['scpr'] = np.array(())\n self[k]['theta'] = np.zeros((1,1))\n\n #\n # The following derivation of the doa is the actual chosen angle convention\n # Those angles are relative to natural spherical coordinates system in the gcs of the scene.\n #\n # for a LOS path :\n # tha = pi - thd\n # pha = phd - pi\n #\n #self[k]['aoa'] = np.vstack((np.pi-thd, phd-np.pi))\n self[k]['aoa'] = np.vstack((thd,phd))\n E = np.eye(2)[:,:,np.newaxis,np.newaxis]\n self[k]['B'] = np.dstack((E,E))\n ze = np.array([0])\n self[k]['rays'] = np.array(([[0]]))\n self[k]['nbrays'] = 1\n self[k]['rayidx'] = ze\n self.raypt = 1\n self._ray2nbi = ze\n self._luw = np.unique(luw).tolist()\n self.isbased = True\n\n def fillinter(self, L, append=False):\n \"\"\" fill ray interactions\n\n Parameters\n ----------\n\n L : Layout\n append : Boolean\n If True append new rays to existing structure\n\n\n Notes\n -------\n\n This method adds the following members\n\n I : Interactions\n B : IntB\n B0 : IntB\n\n \"\"\"\n\n # reinitialized ray pointer if not in append mode\n if not append:\n self.raypt = 0\n\n # stacked interactions\n I = Interactions(slab=L.sl)\n\n # rotation basis\n B = IntB(slab=L.sl)\n B0 = IntB(slab=L.sl)\n\n # # LOS Interaction\n # Los = IntL()\n\n # Reflexion\n R = IntR(slab=L.sl)\n\n # Transmission\n T = IntT(slab=L.sl)\n\n # Diffraction\n D = IntD(slab=L.sl)\n\n idx = np.array(())\n if self.los:\n idxts = 1\n nbrayt = 1\n else:\n idxts = 0\n nbrayt = 0\n\n\n # Transform dictionnary of slab name to array\n # slv = nx.get_node_attributes(L.Gs, \"name\").values()\n # slk = nx.get_node_attributes(L.Gs, \"name\").keys()\n # find all material used in simulation\n #uslv = np.unique(L.sla[1:])\n uslv = L.sl.keys()\n #\n # add CEIL and FLOOR\n #\n #uslv = np.hstack((uslv, np.array(('CEIL', 'FLOOR'))))\n\n # create reverse dictionnary with all material as a key\n # and associated point/segment as a value\n\n #dsla = {}\n #for s in uslv:\n # dsla[s] = np.where(s == np.array(slv))[0]\n\n nsmax = max(L.Gs.node.keys())\n #sla = np.zeros((nsmax+1), dtype='S20')\n\n # array type str with more than 1 character\n # warning use zeros instead of empty because slab zero\n # is virtually used before assigning correct slab to ceil and floor\n\n #\n # sla is an array of string.\n # each value of Gs node is the index of the corresponding slab\n #\n\n #sla[slk] = np.array(slv)\n\n R.dusl = dict.fromkeys(uslv, np.array((), dtype=int))\n T.dusl = dict.fromkeys(uslv, np.array((), dtype=int))\n\n #to be specified and limited to used wedges\n if hasattr(self,'_luw'):\n D.dusl = dict.fromkeys(self._luw, np.array((), dtype=int))\n\n # transmission/reflection slab array\n tsl = np.array(())\n rsl = np.array(())\n # diffraction wedge list\n dw = np.array(())\n\n # loop on group of interactions\n for k in self:\n\n if k !=0:\n\n uR = uT = uD = uRf = uRc = 0.\n\n # structure number (segment or point)\n # nstr : i x r\n nstr = self[k]['sig'][0, 1:-1, :]\n\n # ityp : i x r\n ityp = self[k]['sig'][1, 1:-1, :]\n\n # theta : i x r ( related to interactions )\n theta = self[k]['theta']\n\n # (i+1) x r\n si = self[k]['si']\n # distance in\n s_in = si[0:-1,:]\n # distance in\n s_out = si[1:,:]\n\n if 'diffvect' in self[k]:\n dvec = self[k]['diffvect']\n ldsl = self[k]['diffslabs']\n dix = self[k]['diffidx']\n\n\n ## flatten information\n ######################\n\n # flatten nstr (1 dimension)\n # size1 = i x r\n size1 = nstr.size\n\n # flatten ityp (method faster than np.ravel() )\n nstrf = np.reshape(nstr,size1,order='F')\n itypf = ityp.reshape(size1,order='F')\n thetaf = theta.reshape(size1,order='F')\n #sif = si[0, :, :].reshape(si[0, :, :].size)\n\n # ## index creation / already done in rays.locbas\n # ##################\n # # create index for retrieving interactions\n\n # # integer offset : total size idx\n\n # idxts = idxts + idx.size\n\n # idx = idxts + np.arange(ityp.size).reshape(np.shape(ityp),order='F')\n\n # nbray = np.shape(idx)[1]\n\n # self[k]['rays'] = idx\n # self[k]['nbrays'] = nbray\n # self[k]['rayidx'] = nbrayt + np.arange(nbray)\n # # create a numpy array to relate the ray index to its corresponding\n # # number of interactions\n\n # # _ray2nbi = np.ones((nbray))\n\n # #try:\n # # self._ray2nbi=np.hstack((self._ray2nbi,_ray2nbi))\n # #except:\n # # self._ray2nbi=_ray2nbi\n\n # #self._ray2nbi[self[k]['rayidx']] = k\n # nbrayt = nbrayt + nbray\n # #self.raypt = self.raypt + self[k]['nbrays']\n\n idxf = self[k]['rays'].reshape(self[k]['rays'].size,order='F')\n # (i+1)xr\n # \n\n size2 = si[:, :].size\n nbray = self[k]['nbrays']\n # TODO\n # dirty fix\n # nbray is either an int or an array. why ?\n if type(nbray)==np.ndarray:\n nbray=nbray[0]\n\n # ,(i+1)xr\n # sif = si[:, :].reshape(size2,order='F') # TO BE REMOVE\n s_inf = s_in[:, :].reshape(ityp.size,order='F')\n s_outf = s_out[:, :].reshape(ityp.size,order='F')\n\n # 2x2,(i+1)xr\n\n #\n # self[k]['B'] 3 x 3 x i x r\n #\n # first unitary matrix (3x3xr)\n b0 = self[k]['B'][:,:,0,:]\n # first unitary matrix 1:\n # dimension i and r are merged\n b = self[k]['B'][:,:,1:,:].reshape(3, 3, size2-nbray,order='F')\n\n\n ## find used slab\n ##################\n # find slab type for the rnstr\n # nstrf is a number of slab\n # this is a problem for handling subsegment\n #\n\n # seek for interactions position\n ################################\n\n uD = np.where((itypf == 1))[0]\n uR = np.where((itypf == 2))[0]\n uT = np.where((itypf == 3))[0]\n uRf = np.where((itypf == 4))[0]\n uRc = np.where((itypf == 5))[0]\n\n # assign floor and ceil slab\n ############################\n\n slT = [ L.Gs.node[x]['name'] for x in nstrf[uT] ]\n slR = [ L.Gs.node[x]['name'] for x in nstrf[uR] ]\n\n # WARNING\n # in future versions floor and ceil could be different for each cycle.\n # this information would be directly obtained from L.Gs\n # then the two following lines would have to be modified\n\n slRf = np.array(['FLOOR']*len(uRf))\n slRc = np.array(['CEIL']*len(uRc))\n\n\n # Fill the used slab\n #####################\n\n tsl = np.hstack((tsl, slT))\n rsl = np.hstack((rsl, slR, slRf, slRc))\n if 'diffvect' in self[k]:\n dw = np.hstack((dw,self[k]['diffslabs']))\n ## for s in uslv:\n ##\n ## T.dusl[s]=np.hstack((T.dusl[s],len(T.idx) + np.where(sl[uT]==s)[0]))\n ## R.dusl[s]=np.hstack((R.dusl[s],len(R.idx) + np.where(sl[uR]==s)[0]))\n ## R.dusl['FLOOR']=np.hstack((R.dusl['FLOOR'],len(R.idx)+len(uR) + np.where(sl[uRf]=='FLOOR')[0]))\n # R.dusl['CEIL']=np.hstack((R.dusl['CEIL'],len(R.idx)+len(uR)+len(uRf) +\n # np.where(sl[uRc]=='CEIL')[0]))\n\n # Basis\n # Hugr issue with B index\n # Friedman version Bs was entering in the index\n # maybe B can have the same index that interactions\n # but this must be managed when evaluation of CIR is made\n\n # BU 10/4/2013\n # .. todo: This is no longer idxf the good index\n # why the transposition b is first 2x2x(i+1)xr\n # idxf is (ixr)\n #\n # need to check how B is used in eval()\n #\n # Warning\n # -------\n # B.idx refers to an interaction index\n # whereas B0.idx refers to a ray number\n # B.stack(data=b.T, idx=idxf)\n # B0.stack(data=b0.T,idx=self[k]['rayidx'])\n\n B.stack(data=b.T, idx=idxf)\n B0.stack(data=b0.T,idx=self[k]['rayidx'])\n ### Reflexion\n ############\n ### wall reflexion\n #(theta, s_in,s_out)\n\n R.stack(data=np.array((thetaf[uR], s_inf[uR], s_outf[uR])).T,\n idx=idxf[uR])\n # floor reflexion\n R.stack(data=np.array((thetaf[uRf], s_inf[uRf], s_outf[uRf])).T,\n idx=idxf[uRf])\n # ceil reflexion\n R.stack(data=np.array((thetaf[uRc], s_inf[uRc], s_outf[uRc])).T,\n idx=idxf[uRc])\n\n # R.stack(data=np.array((thetaf[uR], sif[uR], sif[uR+1])).T,\n # idx=idxf[uR])\n # # floor reflexion\n # R.stack(data=np.array((thetaf[uRf], sif[uRf], sif[uRf+1])).T,\n # idx=idxf[uRf])\n # # ceil reflexion\n # R.stack(data=np.array((thetaf[uRc], sif[uRc], sif[uRc+1])).T,\n # idx=idxf[uRc])\n\n ### sl[idxf[uT]]\n # Transmision\n ############\n # (theta, s_in,s_out)\n # T.stack(data=np.array((thetaf[uT], sif[uT], sif[uT+1])).T, idx=idxf[uT])\n T.stack(data=np.array((thetaf[uT], s_inf[uT], s_outf[uT])).T, idx=idxf[uT])\n\n ###\n #Diffraction\n #phi0,phi,si,sd,N,mat0,matN,beta\n #\n\n if 'diffvect' in self[k]:\n # self[k]['diffvect'] = ((phi0,phi,beta,N) x (nb_rayxnb_interactions) )\n #si and so are stacked at the end of self[k]['diffvect']\n #as well:\n #data = (6 x (nb_rayxnb_interactions) )\n # ((phi0,phi,beta,N,sin,sout) x (nb_rayxnb_interactions) )\n data = np.vstack((self[k]['diffvect'],s_inf[uD],s_outf[uD]))\n D.stack(data=data.T,idx=self[k]['diffidx'])#idxf[uD])\n\n elif self.los:\n ze = np.array([0])\n #self[k]['rays'] = np.array(([[0]]))\n #self[k]['nbrays'] = 1\n #self[k]['rayidx'] = ze\n #self.raypt = 1\n #self._ray2nbi=ze\n B.stack(data=np.eye(3)[np.newaxis,:,:], idx=ze)\n B0.stack(data=np.eye(3)[np.newaxis,:,:],idx=ze)\n\n if len(tsl)>0:\n T.create_dusl(tsl)\n if len(rsl)>0:\n R.create_dusl(rsl)\n if len(dw)>0:\n D.create_dusl(dw)\n # create interactions structure\n self.I = I\n self.I.add([T, R, D])\n # create rotation base B\n self.B = B\n # create rotation base B0\n self.B0 = B0\n\n self.filled = True\n\n def eval(self,fGHz=np.array([2.4]),bfacdiv=False,ib=[]):\n \"\"\" field evaluation of rays\n\n Parameters\n ----------\n\n fGHz : array\n frequency in GHz\n ib : list of interactions block\n\n \"\"\"\n\n #print 'Rays evaluation'\n\n self.fGHz=fGHz\n\n # evaluation of all interactions\n #\n # core calculation of all interactions is done here\n #\n \n self.I.eval(fGHz)\n\n # if np.isnan(self.I.I).any():\n # pdb.set_trace()\n # evaluation of base B (2x2)\n # B and B0 do no depend on frequency\n # just an axis extension (np.newaxis)\n #pdb.set_trace()\n\n # 1 x i x 3 x 3\n B = self.B.data[np.newaxis,...]\n B = B.swapaxes(2,3)\n # 1 x r x 3 x 3\n B0 = self.B0.data[np.newaxis,...]\n B0 = B0.swapaxes(2,3)\n\n # Ct : f x r x 3 x 3\n Ct = np.zeros((self.I.nf, self.nray, 3, 3), dtype=complex)\n\n # delays : ,r\n self.delays = np.zeros((self.nray))\n\n # dis : ,r\n self.dis = np.zeros((self.nray))\n\n #nf : number of frequency point\n nf = self.I.nf\n\n aod= np.empty((2,self.nray))\n aoa= np.empty((2,self.nray))\n # loop on interaction blocks\n if ib==[]:\n ib=self.keys()\n \n # loop over group of interactions\n for l in ib:\n # ir : ray index\n\n ir = self[l]['rayidx']\n aoa[:,ir]=self[l]['aoa']\n aod[:,ir]=self[l]['aod']\n if l != 0:\n # l stands for the number of interactions\n r = self[l]['nbrays']\n # dirty fix should not be an array\n if type(r)==np.ndarray:\n r = r[0]\n # reshape in order to have a 1D list of index\n # reshape ray index\n rrl = self[l]['rays'].reshape(r*l,order='F')\n # get the corresponding evaluated interactions\n #\n # reshape error can be tricky to debug.\n #\n # f , r , l , 2 , 2\n A = self.I.I[:, rrl, :, :].reshape(self.I.nf, r, l, 3, 3)\n # get the corresponding unitary matrix B\n # 1 , r , l , 2 , 2\n #Bl = B[:, rrl, :, :].reshape(self.I.nf, r, l, 2, 2,order='F')\n Bl = B[:, rrl, :, :].reshape(1, r, l, 3, 3)\n # get the first unitary matrix B0l\n B0l = B0[:,ir,:, :]\n # get alpha\n alpha = self.I.alpha[rrl].reshape(r, l,order='F')\n # # get gamma\n gamma = self.I.gamma[rrl].reshape(r, l,order='F')\n # # get si0\n si0 = self.I.si0[rrl].reshape(r, l,order='F')\n # # get sout\n sout = self.I.sout[rrl].reshape(r, l,order='F')\n\n try:\n del Z\n except:\n pass\n\n\n #print \"\\nrays\",ir\n #print \"-----------------------\"\n ## loop on all the interactions of ray with l interactions\n for i in range(0, l):\n\n\n ############################################\n ## # Divergence factor D\n ### not yet implementented\n ############################################\n# if i == 0:\n# pdb.set_trace()\n# D0 = 1./si0[:,1]\n# rho1 = si0[:,1]*alpha[:,i]\n# rho2 = si0[:,1]*alpha[:,i]*gamma[:,i]\n# D =np.sqrt(\n# ( (rho1 ) / (rho1 + sout[:,i]) )\n# *( (rho2) / (rho2 + sout[:,i])))\n# D=D*D0\n# rho1=rho1+(sout[:,i]*alpha[:,i])\n# rho2=rho2+(sout[:,i]*alpha[:,i]*gamma[:,i])\n#\n# ## gerer le loss\n# if np.isnan(D).any():\n# p=np.nonzero(np.isnan(D))[0]\n# D[p]=1./sout[p,1]\n# else :\n# D=np.sqrt(\n# ( (rho1 ) / (rho1 + sout[:,i]) )\n# *( (rho2) / (rho2 + sout[:,i])))\n#\n# rho1=rho1+(sout[:,i]*alpha[:,i])\n# rho2=rho2+(sout[:,i]*alpha[:,i]*gamma[:,i])\n ############################################\n\n # A0 (X dot Y)\n # | | |\n # v v v\n ##########################\n ## B # I # B # I # B #\n ##########################\n # \\_____/ \\______/\n # | |\n # Atmp(i) Atmp(i+1)\n #\n # Z=Atmp(i) dot Atmp(i+1)\n\n #X = A [:, :, i, :, :]\n #Y = Bl[:, :, i, :, :]\n # pdb.set_trace()\n if i == 0:\n ## First Basis added\n Atmp = A[:, :, i, :, :]\n B00 = B0l[:, :, :, :]\n Z = np.sum(Atmp[..., :, :, np.newaxis]\n *B00[..., np.newaxis, :, :], axis=-2)\n else:\n Atmp = A[:, :, i, :, :]\n BB = Bl[:, :, i-1, :, :]\n Ztmp = np.sum(Atmp[..., :, :, np.newaxis]\n *BB[..., np.newaxis, :, :], axis=-2)\n\n\n Z = np.sum(Ztmp[..., :, :, np.newaxis]\n *Z[..., np.newaxis, :, :], axis=-2)\n\n if i == l-1:\n BB = Bl[:, :, i, :, :]\n Z = np.sum(BB[..., :, :, np.newaxis]\n *Z[..., np.newaxis, :, :], axis=-2)\n\n\n # fill the C tilde MDA\n\n Ct[:,ir, :, :] = Z[:, :, :, :]\n\n #\n if bfacdiv:\n Ct[:,ir, :, :] = Ct[:, ir, :, :]*1./(self[l]['dis'][np.newaxis, :, np.newaxis, np.newaxis])\n else:\n Ct[:,ir, :, :] = Ct[:, ir, :, :]*1./(self[l]['dis'][np.newaxis, :, np.newaxis, np.newaxis])\n self.delays[ir] = self[l]['dis']/0.3\n self.dis[ir] = self[l]['dis']\n #\n # true LOS when no interaction\n #\n if self.los:\n Ct[:,0, :, :]= np.eye(3,3)[None,None,:,:]\n #self[0]['dis'] = self[0]['si'][0]\n # Fris\n Ct[:,0, :, :] = Ct[:,0, :, :]*1./(self[0]['dis'][None, :, None, None])\n self.delays[0] = self[0]['dis']/0.3\n self.dis[0] = self[0]['dis']\n\n\n # To be corrected in a future version\n #\n # Ct : nf , Nray , theta , phi \n #\n # to \n #\n # Ct : Nray x nf , theta , phi \n #\n Ct = np.swapaxes(Ct, 1, 0)\n\n #c11 = Ct[:,:,0,0]\n #c12 = Ct[:,:,0,1]\n #c21 = Ct[:,:,1,0]\n #c22 = Ct[:,:,1,1]\n \n c11 = Ct[:,:,1,1]\n c12 = Ct[:,:,1,2]\n c21 = Ct[:,:,2,1]\n c22 = Ct[:,:,2,2]\n\n\n #\n # Construction of the Ctilde propagation channel structure\n #\n Cn = Ctilde()\n\n # Cn.Cpp = bs.FUsignal(self.I.fGHz, c11)\n # Cn.Cpt = bs.FUsignal(self.I.fGHz, c12)\n # Cn.Ctp = bs.FUsignal(self.I.fGHz, c21)\n # Cn.Ctt = bs.FUsignal(self.I.fGHz, c22)\n Cn.Ctt = bs.FUsignal(self.I.fGHz, c11)\n Cn.Ctp = bs.FUsignal(self.I.fGHz, c12)\n Cn.Cpt = bs.FUsignal(self.I.fGHz, c21)\n Cn.Cpp = bs.FUsignal(self.I.fGHz, c22)\n\n Cn.nfreq = self.I.nf\n Cn.nray = self.nray\n Cn.tauk = self.delays\n Cn.fGHz = self.I.fGHz\n # r x 2\n Cn.tang = aod.T\n Cn.tangl = aod.T\n # r x 2\n #\n # recover angle of arrival convention \n #\n Cn.rang = np.hstack([np.pi-aoa.T[:,[0]],aoa.T[:,[1]]-np.pi]) \n Cn.rangl = np.hstack([np.pi-aoa.T[:,[0]],aoa.T[:,[1]]-np.pi])\n # add aoa and aod\n\n self.evaluated = True\n\n return(Cn)\n\n\n def rayfromseg(self,ls):\n ''' DEPRECATED \n use raysfromnstr instead\n '''\n DeprecationWarning('function name update: use raysfromnstr instead')\n return self.rayfromnstr(ls)\n\n def rayfromnstr(self,ls):\n \"\"\" returns the indexes of rays for a given interaction list\n \"\"\"\n\n if not isinstance(ls,list):\n ls = [ls]\n\n lur = []\n for k in self:\n aib = self[k]['sig'][0,...]\n for i in ls :\n # import ipdb\n # ipdb.set_trace()\n ui, ur = np.where(aib == i)\n lur.extend(self[k]['rayidx'][ur].tolist())\n return np.sort(lur)\n\n def rayfromdelay(self,t0=0,t1=[]):\n \"\"\" returns the indexes of rays between 2 timestamps t0 and t1\n \"\"\"\n if t1 == []:\n t1 = self.delays.max()\n u = np.where((self.delays>t0) & (self.delays<t1))[0]\n return u\n\n\n\n\n\n def ray2slab(self,L,ir):\n \"\"\" return the slabs for a given interaction index \n\n\n Parameters\n ----------\n\n L : Layout\n ir : interaction block\n\n Returns\n -------\n\n numpy array of slabs strings at the shape (ir,r)\n ir : number of interactions ( of the interaction block)\n r : number of rays\n\n \"\"\"\n\n v=np.vectorize( lambda t: L.Gs.node[t]['name'] if (t!=0) and (t>0) else '_')\n return v(self[ir]['sig'][0])\n\n\n def ray(self, r):\n \"\"\" returns the index of interactions of r\n\n Parameters\n ----------\n\n r : integer\n ray index\n\n Returns\n -------\n\n ir : nd.array\n index of interactions of r\n\n Examples\n --------\n\n \"\"\"\n raypos = np.nonzero(self[self._ray2nbi[r]]['rayidx'] == r)[0]\n return(self[self._ray2nbi[r]]['rays'][:,raypos][:,0])\n\n def ir2a(self,ir):\n \"\"\" index ray 2 address ray\n\n Parameters\n ----------\n ir : integer\n \n Returns\n -------\n (ni,ux) : tuple address (group of interactions, index)\n\n \"\"\"\n assert ir < self.nray, \"wrong ray index\"\n ni = self._ray2nbi[ir]\n ur = np.where(self[ni]['rayidx']==ir)[0][0]\n return(ni,ur)\n\n def a2ir(self,t):\n \"\"\" address ray 2 index ray\n \n Parameters\n ----------\n t = (ni,ux) : tuple address (group of interactions, index)\n ray address \n \n Returns\n -------\n ir : integer\n ray index \n\n \"\"\"\n assert t[0] in self.keys(), \"wrong number of interactions\"\n ir = self[t[0]]['rayidx'][t[1]]\n return(ir)\n\n\n def ray2ityp(self,r):\n \"\"\" return interaction type for a given ray\n\n\n Parameters\n ----------\n\n r : integer\n ray index\n\n\n Returns\n -------\n \n lt : list\n list of type of interactions\n\n \"\"\"\n\n di = {1:'D',2:'R',3:'T',4:'R',5:'R'}\n sig = self.ray2sig(r)\n sig = sig[1,1:-1]\n return [di[s] for s in sig]\n\n\n def ray2nbi(self,r):\n \"\"\" Get interaction block/number of interactions of a given ray\n\n Parameters\n ----------\n\n r : integer\n ray index\n\n Returns\n -------\n\n nbi : int\n interaction block number\n \"\"\"\n i = self._ray2nbi[r]\n return i \n\n def ray2iidx(self,ir):\n \"\"\" Get interactions index of a given ray\n\n Parameters\n ----------\n\n ir : integer\n ray index\n\n Returns\n -------\n\n iidx : array\n interaction index \n \"\"\"\n unbi = self.ray2nbi(ir)\n ur = np.where(self[unbi]['rayidx']==ir)[0]\n return self[unbi]['rays'][:,ur]\n\n\n def ray2sig(self,ir):\n \"\"\" get signature to corresponding ray\n \"\"\"\n unbi = self.ray2nbi(ir)\n ur = np.where(self[unbi]['rayidx']==ir)[0]\n return self[unbi]['sig'][:,:,ur].squeeze()\n\n def ray2sig2d(self,ir):\n \"\"\" get signature to corresponding ray\n \"\"\"\n sig = self.ray2sig(ir)\n sig = sig.squeeze()\n sig = sig[:,1:-1] # remove extremal 0\n unfc = np.where(sig[1,:]<4)[0]# index floor cell\n sig2d = sig[:,unfc]\n return sig2d\n\n def ray2inter(self,ir,L,Si):\n \"\"\" get interaction list (Gi style) from a ray\n\n Parameters\n ----------\n\n ir : ray index\n L : Layout\n Si : Signatures object\n\n \"\"\"\n sig = self.ray2sig2d(ir)\n return Si.sig2inter(L,sig)\n\n\n def slab_nb(self, ir):\n \"\"\" returns the slab numbers of r\n\n Parameters\n ----------\n\n ir : integer\n ray index\n\n Returns\n -------\n\n isl : slabs number\n\n\n \"\"\"\n\n raypos = np.nonzero(self[self._ray2nbi[ir]]['rayidx'] == ir)[0]\n return(self[self._ray2nbi[ir]]['sig'][0,1:-1,raypos[0]])\n\n def vis(self,ir,L):\n typ = ['Tx'] + self.typ(ir) + ['Rx'] \n slab_nb = self.slab_nb(ir)\n slab_nb = np.insert(slab_nb,0,0)\n slab_nb = np.insert(slab_nb,len(slab_nb),0)\n nbi = self._ray2nbi[ir]\n raypos = np.nonzero(self[nbi]['rayidx'] == ir)[0]\n pt = self[nbi]['pt'][:,:,raypos]\n tz = pt[2].ravel()\n slab = [ L.Gs.node[x]['name'] for x in slab_nb if x > 0]\n st = ''\n for t in typ:\n st = st + t+' ' \n print(st)\n st = ''\n for s in slab_nb:\n st = st + str(s)+' ' \n print(st)\n st = ''\n for z in tz:\n st = st + str(z)+' ' \n print(st)\n print(slab)\n\n def typ(self, ir,fromR=True):\n \"\"\" returns interactions list type of a given ray\n\n Parameters\n ----------\n\n ir : integer\n ray index\n fromR : bool\n True : get information from signature in R\n False: get information in R.I\n\n \"\"\"\n #\n # In this function we can see that the ceil and floor \n # are hard coded as reflection. This is going to evolve \n # for implementation of multi floor \n #\n if fromR:\n di = {0:'L',1:'D',2:'R',3:'T',4:'R',5:'R'}\n nbi = self._ray2nbi[ir]\n raypos = np.nonzero(self[nbi]['rayidx'] == ir)[0]\n inter = self[nbi]['sig'][1,1:-1,raypos][0]\n return [di[i] for i in inter]\n else:\n a = self.ray(r)\n return(self.I.typ[a])\n\n def dump(self,ir,L,ifGHz=0,filename='dumpray.ray'):\n \"\"\" dump the full information of a ray in a file \n \"\"\"\n nbi = self._ray2nbi[ir] \n ur = np.where(self[nbi]['rayidx']==ir)[0][0]\n fd=open(filename,'w')\n fd.write('ray #'+str(ir)+'\\n')\n fd.write(str(ur)+ ' th ray from the group of ' + str(nbi)+' Interactions' +'\\n')\n cy_a = L.pt2cy(self.pTx)\n cy_b = L.pt2cy(self.pRx)\n\n #fd.write('Tx #'+str(self.pTx)+'\\n')\n #fd.write('Rx #'+str(self.pRx)+'\\n')\n if self.evaluated:\n ray = self.ray(ir)\n typ = self.typ(ir)\n slabnb = self.slab_nb(ir)\n fd.write(' ray #'+str(ray)+'\\n')\n #fd.write(' typ #'+str(typ)+'\\n')\n fd.write(' slab #'+str(slabnb)+'\\n')\n for k in range(nbi+2):\n if k==0:\n fd.write('Tx : ')\n elif k==(nbi+1):\n fd.write('Rx : ')\n else:\n six = slabnb[k-1]\n if six==0:\n slabname='FLOOR'\n cyc =[-2,-3]\n else:\n slabname = L.Gs.node[six]['name']\n cyc = L.Gs.node[six]['ncycles']\n if typ[k-1]=='T':\n fd.write('T '+slabname +' ('+str(six)+','+str(cyc[0])+','+str(cyc[1])+')')\n if typ[k-1]=='R':\n fd.write('R '+slabname +' ('+str(six)+',)')\n if typ[k-1]=='D':\n fd.write('D ('+str(six)+') :')\n\n fd.write(str(self[nbi]['pt'][:,k,ur])+'\\n' )\n if k==0:\n fd.write(' '+str(cy_a)+'\\n')\n elif k==(nbi+1):\n fd.write(' '+str(cy_b)+'\\n')\n if k==0:\n for l in range(3):\n if l<2:\n fd.write('\\t'+str(self[nbi]['Bo0'][l,:,ur])\n +'\\t'+str(self[nbi]['B'][l,:,0,ur])+'\\n')\n else:\n fd.write('\\t'+str(self[nbi]['Bo0'][l,:,ur]) +'\\n')\n elif k==(nbi+1):\n for l in range(3):\n fd.write('\\t'+str(self[nbi]['BiN'][l,:,ur])+'\\n')\n else:\n for l in range(3):\n if l<2:\n fd.write('\\t'+str(self[nbi]['Bi'][l,:,k-1,ur])+'\\t'+\n str(self[nbi]['Bo'][l,:,k-1,ur])\n +'\\t'+str(self[nbi]['B'][l,:,k-1,ur])+'\\n')\n else:\n fd.write('\\t'+str(self[nbi]['Bi'][l,:,k-1,ur])+'\\t'+\n str(self[nbi]['Bo'][l,:,k-1,ur])+'\\n')\n\n\n fd.close()\n\n\n def info(self,ir,ifGHz=0,bB=True,matrix=False):\n \"\"\" provides information for a given ray r\n\n Parameters\n ----------\n\n ir : int\n ray index\n ifGHz : int\n frequency index\n bB: boolean\n display Basis\n matrix :\n display matrix \n \"\"\"\n\n if self.evaluated:\n print('-------------------------')\n print('Informations of ray #', ir)\n print('-------------------------\\n')\n\n ray = self.ray(ir)\n typ = self.typ(ir)\n slabnb = self.slab_nb(ir)\n # if there is a diffraction, phi0, phi, beta are shown\n if 'D' in typ:\n diff =True\n print('{0:5} , {1:4}, {2:10}, {3:7}, {4:7}, {5:10}, {6:10}, {7:4}, {8:4}, {9:4}'\\\n .format('Index',\n 'type',\n 'slab', \n 'nstr' ,\n 'th(rad)',\n 'alpha',\n 'gamma2',\n 'phi0',\n 'phi',\n 'beta'))\n else :\n diff =False\n print('{0:5} , {1:4}, {2:10}, {3:7}, {4:7}, {5:10}, {6:10}'\\\n .format('Index',\n 'type',\n 'slab',\n 'nstr',\n 'th(rad)',\n 'alpha',\n 'gamma2'))\n print('{0:5} , {1:4}, {2:10}, {3:7}, {4:7.2}, {5:10.2}, {6:10.2}'\\\n .format(ir, 'B0','-', '-', '-', '-', '-'))\n\n for iidx, i in enumerate(typ):\n # import ipdb\n # ipdb.set_trace()\n if i == 'T' or i == 'R' or i =='D':\n I = getattr(self.I, i)\n for slab in I.dusl.keys():\n # print slab\n midx = I.dusl[slab]\n # print midx\n Iidx = np.array((I.idx))[midx]\n\n if i != 'D':\n th = I.data[I.dusl[slab], 0]\n gamma = I.gamma[midx]\n alpha = I.alpha[midx]\n else : \n # from IPython.core.debugger import Tracer\n # Tracer()()\n th=['-']*max(max(Iidx),1)\n gamma = ['NC']*max(max(Iidx),1)\n alpha = ['NC']*max(max(Iidx),1)\n udiff = np.where(self.I.D.idx==ray[iidx])[0]\n phi0 = self.I.D.phi0[udiff][0]\n phi=self.I.D.phi[udiff][0]\n beta=self.I.D.beta[udiff][0]\n for ii, Ii in enumerate(Iidx):\n if Ii == ray[iidx]:\n if i=='D': \n print('{0:5} , {1:4}, {2:10}, {3:7}, {4:7.2}, {5:10}, {6:10}, {7:3.4}, {8:3.4}, {9:3.4}'\\\n .format(Ii, i, slab, slabnb[iidx], th[ii], alpha[ii], gamma[ii],phi0,phi,beta))\n else:\n print('{0:5} , {1:4}, {2:10}, {3:7}, {4:7.2}, {5:10.2}, {6:10.2}'\\\n .format(Ii, i, slab, slabnb[iidx], th[ii], alpha[ii], gamma[ii]))\n\n else:\n if bB:\n print('{0:5} , {1:4}, {2:10}, {3:7}, {4:7.2}, {5:10.2}, {6:10.2}'.format(ray[iidx], 'B', '-', '-', '-', '-', '-'))\n # print '{0:5} , {1:4}, {2:10}, {3:7}, {4:10}, {5:10}'.format(ray[iidx], i, '-', '-', '-', '-')\n\n if matrix:\n print('\\n----------------------------------------')\n print(' Matrix of ray #', ir, 'at f=', self.I.fGHz[ifGHz])\n print('----------------------------------------')\n lmat = []\n ltran = []\n if bB:\n print('rotation matrix#', 'type: B0')\n\n B0 = self.B0.data[ir,:,:]\n addr = self.ir2a(ir)\n Bo0 = self[addr[0]]['Bo0'][:,:,addr[1]] \n Bi1 = self[addr[0]]['Bi'][:,:,0,addr[1]] \n U = np.dot(Bi1.T,Bo0)\n assert np.allclose(B0,U) \n lmat.append(B0)\n ltran.append(B0)\n print(B0)\n for iidx, i in enumerate(typ):\n print('interaction #', ray[iidx], 'type:', i)\n # f x l x 2 x 2\n I = self.I.I[ifGHz, ray[iidx], :, :]\n print(I)\n lmat.append(I)\n\n if bB:\n print('rotation matrix#',[ray[iidx]], 'type: B')\n B = self.B.data[ray[iidx], :, :]\n print(B) \n lmat.append(B)\n ltran.append(B)\n # evaluate matrix product\n PM0=np.eye(3)\n PM1=np.eye(3)\n for m in lmat[::-1]:\n PM0=np.dot(PM0,m)\n for m in ltran[::-1]:\n PM1=np.dot(PM1,m)\n print(\"matrix product with interactions (dB)\")\n print(20*np.log10(np.abs(PM0[1,1])),' ',20*np.log10(np.abs(PM0[1,2])))\n print(20*np.log10(np.abs(PM0[2,1])),' ',20*np.log10(np.abs(PM0[2,2])))\n print(\"matrix product without interactions (dB)\")\n print(20*np.log10(np.abs(PM1[1,1])),' ',20*np.log10(np.abs(PM1[1,2])))\n print(20*np.log10(np.abs(PM1[2,1])),' ',20*np.log10(np.abs(PM1[2,2])))\n return(PM0)\n\n else:\n print('\\nto display matrix, use matrix=True on call')\n else:\n print('Rays have not been evaluated yet')\n\n def signature(self, u , typ='full'):\n \"\"\" extract ray signature\n\n Parameters\n ----------\n\n u : tuple orr int \n if tuple addr \n if int index\n\n Returns\n -------\n\n sig : ndarray\n\n Notes\n -----\n\n Signature of a ray is store as a member\n\n r[nint]['sig']\n\n \"\"\"\n if type(u)==tuple:\n addr = u \n else:\n addr = self.ir2a(u) \n if typ=='full':\n sig = self[addr[0]]['sig'][:,:,addr[1]]\n else:\n pass\n return(sig)\n\n def show3d(self,\n ray,\n bdis=True,\n bbas=False,\n bstruc=True,\n col=np.array([1, 0, 1]),\n id=0,\n linewidth=1):\n \"\"\" plot a set of 3D rays\n\n Parameters\n ----------\n\n ray :\n block : int\n interaction block\n bdis : Boolean\n if False return .vect filename (True)\n bbas : Boolean\n display local basis (False)\n bstruc : Boolean\n display structure (True)\n col : ndarray() 1x3\n color of the ray ([1,0,1])\n id : Integer\n id of the ray (default 0)\n linewidth : Integer\n default 1\n\n \"\"\"\n\n filerac = pyu.getlong(\"ray\" + str(id), pstruc['DIRGEOM'])\n _filerac = pyu.getshort(filerac)\n filename_list = filerac + '.list'\n filename_vect = filerac + '.vect'\n\n try:\n fo = open(filename_vect, \"w\")\n except:\n raise NameError(filename)\n\n fo.write(\"appearance { linewidth %d }\\n\" % linewidth)\n\n fo.write(\"VECT\\n\")\n\n fo.write(\"1 %d 1\\n\\n\" % len(ray[0, :]))\n fo.write(\"%d\\n\" % len(ray[0, :]))\n fo.write(\"1\\n\")\n for i in range(len(ray[0, :])):\n fo.write(\"%g %g %g\\n\" % (ray[0, i], ray[1, i],\n ray[2, i]))\n # fo.write(\"%d %d %d 0\\n\" % (col[0],col[1],col[2]))\n fo.write(\"%g %g %g 0\\n\" % (col[0], col[1], col[2]))\n fo.close()\n\n #\n # Ajout des bases locales\n #\n\n\n fo = open(filename_list, \"w\")\n fo.write(\"LIST\\n\")\n fo.write(\"{<\" + filename_vect + \"}\\n\")\n if (bstruc):\n # fo.write(\"{<strucTxRx.off}\\n\")\n fo.write(\"{<\" + _filestr + \".off}\\n\")\n filename = filename_list\n fo.close()\n\n if (bdis):\n #\n # Geomview Visualisation\n #\n chaine = \"geomview -nopanel -b 1 1 1 \" + filename + \\\n \" 2>/dev/null &\"\n os.system(chaine)\n else:\n return(filename)\n\n def _show3(self,L=[],rlist=[],newfig=False,cmap='hot',**kwargs):\n \"\"\" plot 3D rays in environment using Mayavi\n\n Parameters\n ----------\n\n L : Layout object\n Layout to be displayed\n rlist : list\n list of index rays\n newfig : boolean (default: False)\n if true create a new mayavi figure\n else : use the current\n ER: Ray energy \n\n \"\"\"\n\n if newfig:\n mlab.clf()\n f = mlab.figure(bgcolor=(1, 1, 1), fgcolor=(0, 0, 0))\n else :\n f = mlab.gcf()\n # view=mlab.view()\n\n\n if L != []:\n try:\n L._filename\n except:\n raise NameError('L argument must be a layout object')\n\n L._show3()\n\n if 'ER' in kwargs:\n ER = kwargs['ER']\n color_range = np.linspace( 0, 1., len(ER))#np.linspace( 0, np.pi, len(ER))\n uER = ER.argsort()[::-1]\n colors= color_range[uER]\n\n if rlist ==[]:\n nbi = self.keys()\n for i in nbi:\n r = range(np.shape(self[i]['pt'])[2])\n ridx = self[i]['rayidx']\n # number of rays\n nbr = len(r) \n # current number of interactions\n cnbi = i + 2\n\n # import ipdb\n # ipdb.set_trace()\n pt = self[i]['pt'][:,:,r].reshape(3,cnbi*nbr,order='F')\n l0 = np.array([np.arange(0,cnbi-1)+k*cnbi for k in range(nbr)]).ravel()\n l1 = l0+1\n connection = np.vstack((l0,l1)).T\n if 'ER' in kwargs:\n rc = np.repeat(colors[ridx],cnbi)\n rc[::cnbi]=0\n src = mlab.pipeline.scalar_scatter(pt[0,:], pt[1,:], pt[2,:],rc,colormap=cmap)\n else: \n src = mlab.pipeline.scalar_scatter(pt[0,:], pt[1,:], pt[2,:])\n\n src.mlab_source.dataset.lines=connection\n src.update()\n lines = mlab.pipeline.stripper(src)\n mlab.pipeline.surface(lines,opacity=0.5,colormap=cmap)\n f.children[-1].name='Rays with ' + str(i) + 'interactions'\n else :\n\n nbi = self._ray2nbi[rlist]\n nr = np.array((nbi,rlist))\n unb = np.unique(nr[0,:])\n unr = {int(i):np.where(nr[0,:]==i)[0] for i in unb}\n\n for i in unb:\n raynb = (nr[1,unr[i]]).astype(int)\n nbr=len(raynb)\n ptidx = [np.where(self[i]['rayidx']==x)[0][0] for x in raynb]\n # current number of interactions\n cnbi = i + 2\n \n pt = self[i]['pt'][:,:,ptidx].reshape(3,cnbi*nbr,order='F')\n\n # lines = np.arange(cnbi*nbr).reshape(cnbi,nbr)\n lines = np.arange(cnbi*nbr).reshape(nbr,cnbi)\n\n # mesh = tvtk.PolyData(points=pt.T, polys=lines)\n mesh = tvtk.PolyData(points=pt.T, polys=lines)\n mlab.pipeline.surface(mlab.pipeline.extract_edges(mesh),\n color=(0, 0, 0), )\n f.children[-1].name='Rays with ' + str(int(i)) + 'interactions'\n\n # mlab.view(view[0],view[1],view[2],view[3])\n return(f)\n\n def show3(self,\n L=[],\n bdis=True,\n bstruc=True,\n bbasi = False,\n bbaso = False,\n id=0,\n ilist=[],\n raylist=[],centered=True):\n \"\"\" plot 3D rays within the simulated environment\n\n Parameters\n ----------\n\n bdis : boolean\n True\n bstruc : boolean\n True\n bbasi : boolean\n display input basis of each interaction of rays\n bbaso : boolean\n display ouput basis of each interaction of rays\n id : int\n L : Layout object\n Layout to be displayed\n ilist : list of group of interactions\n raylist : list of index rays\n centered : boolean\n if True center the layout before display\n\n\n \"\"\"\n\n try:\n L._filename\n except:\n raise NameError('L argument must be a layout object')\n\n if not centered:\n pg=np.array([[0],[0],[0]])\n\n strucname= L._filename.split('.')[0]\n pg = L.geomfile(centered=centered)\n pg = np.hstack((pg,0.)).reshape(3,1)\n\n if ilist == []:\n ilist = self.keys()\n pTx = self.pTx.reshape((3, 1))-pg\n pRx = self.pRx.reshape((3, 1))-pg\n filename = pyu.getlong(\"grRay\" + str(id) + \".list\", pstruc['DIRGEOM'])\n fo = open(filename, \"w\")\n fo.write(\"LIST\\n\")\n if bstruc:\n fo.write(\"{<\"+strucname+\".off}\\n\")\n if bbasi:\n if not self.isbased:\n raise NameError('Bases have not been computed (self.locbas(Layout)')\n else: \n base_listi = geu.Geomlist('baselisti',clear=True)\n base_listi.append(\"LIST\\n\")\n if bbaso:\n if not self.isbased:\n raise NameError('Bases have not been computed (self.locbas(Layout)')\n else: \n base_listo = geu.Geomlist('baselisto',clear=True)\n base_listo.append(\"LIST\\n\")\n\n # fo.write(\"{<strucTxRx.off}\\n\")\n\n k = 0\n for i in ilist:\n if raylist == []:\n rlist = range(np.shape(self[i]['pt'])[2])\n else:\n rlist = raylist\n for j in rlist:\n ray = np.hstack((pTx,np.hstack((self[i]['pt'][:, :, j]-pg, pRx))))\n # ray = rays[i]['pt'][:,:,j]\n col = np.array([0, 0, 0])\n # print ray\n fileray = self.show3d(ray=ray, bdis=False,\n bstruc=False, col=col, id=k)\n k += 1\n fo.write(\"{< \" + fileray + \" }\\n\")\n if bbasi:\n for inter in range(i):\n filebi = 'bi_' + str(j) + '_' + str(i) + '_' +str(inter)\n basi = geu.GeomVect(filebi)\n basi.geomBase(self[i]['Bi'][:,:,inter,j],pt=self[i]['pt'][:,inter+1,j]-pg[:,0])\n base_listi.append(\"{<\" + filebi +'.vect' \"}\\n\")\n filebi = 'bi_' + str(j) + '_' + str(i) + '_' +str(inter-1)\n basi = geu.GeomVect(filebi)\n basi.geomBase(self[i]['BiN'][:,:,j],pt=self[i]['pt'][:,-1,j]-pg[:,0])\n base_listi.append(\"{<\" + filebi +'.vect' \"}\\n\")\n if bbaso:\n for inter in range(i):\n filebo = 'bo_' + str(j) + '_' + str(i) + '_' +str(inter)\n baso = geu.GeomVect(filebo)\n baso.geomBase(self[i]['Bo'][:,:,inter,j],pt=self[i]['pt'][:,inter+1,j]-pg[:,0])\n base_listo.append(\"{<\" + filebo +'.vect' \"}\\n\")\n filebo = 'bo_' + str(j) + '_' + str(i) + '_' +str(inter+1)\n baso = geu.GeomVect(filebo)\n baso.geomBase(self[i]['Bo0'][:,:,j],pt=self[i]['pt'][:,0,j]-pg[:,0])\n base_listo.append(\"{<\" + filebo +'.vect' \"}\\n\")\n if bbasi:\n fo.write(\"{< \" + \"baselisti.list}\\n\")\n if bbaso: \n fo.write(\"{< \" + \"baselisto.list}\\n\")\n\n fo.close()\n if (bdis):\n chaine = \"geomview \" + filename + \" 2>/dev/null &\"\n os.system(chaine)\n else:\n return(filename)\n\n\nif __name__ == \"__main__\":\n doctest.testmod()\n" ]
[ [ "numpy.dot", "numpy.sqrt", "numpy.abs", "numpy.min", "numpy.max", "numpy.shape", "numpy.cross", "numpy.exp", "numpy.array", "numpy.zeros", "numpy.sum", "numpy.vstack" ], [ "numpy.dot", "numpy.expand_dims", "numpy.einsum", "numpy.vstack", "numpy.cumsum", "numpy.concatenate", "numpy.arctan2", "numpy.cross", "numpy.where", "numpy.hstack", "numpy.swapaxes", "numpy.allclose", "numpy.unique", "numpy.reshape", "numpy.arange", "numpy.eye", "numpy.sin", "numpy.ceil", "numpy.diff", "numpy.insert", "numpy.repeat", "numpy.zeros", "matplotlib.pyplot.figure", "numpy.logical_not", "numpy.nonzero", "numpy.arccos", "matplotlib.pyplot.Normalize", "numpy.delete", "numpy.log10", "numpy.floor", "numpy.argsort", "numpy.array", "numpy.sum", "numpy.abs", "numpy.cos", "numpy.sort", "numpy.ones", "numpy.dstack", "matplotlib.pyplot.colorbar", "numpy.vectorize", "numpy.shape", "numpy.mod", "numpy.empty" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
ikostina/openvino_training_extensions
[ "b320b41ecf6b8b4952f59c6e1d7eb5148a7149ee" ]
[ "tensorflow_toolkit/vehicle_attributes/infer_ie.py" ]
[ "from __future__ import print_function\nimport sys\nimport os\nfrom argparse import ArgumentParser\nimport logging as log\nimport numpy as np\nimport cv2\n\nfrom openvino.inference_engine import IENetwork, IEPlugin\n\ndef normalized_to_absolute(prediction):\n colorcar = np.zeros((1, 1, 3), dtype=np.uint8)\n for i in range(3):\n if prediction[i] < 0:\n colorcar[0, 0, i] = 0\n elif prediction[i] > 1:\n colorcar[0, 0, i] = 255\n else:\n colorcar[0, 0, i] = prediction[i]*255\n return colorcar\n\ndef annotation_to_type(restype):\n if restype == 0:\n vehtype = 'car'\n elif restype == 1:\n vehtype = 'bus'\n elif restype == 2:\n vehtype = 'truck'\n elif restype == 3:\n vehtype = 'van'\n else:\n vehtype = 'undefined'\n return vehtype\n\ndef build_argparser():\n parser = ArgumentParser()\n parser.add_argument(\"-m\", \"--model\", help=\"Path to an .xml file with a trained model.\", required=True, type=str)\n parser.add_argument(\"-l\", \"--cpu_extension\",\n help=\"MKLDNN (CPU)-targeted custom layers. \\\n Absolute path to a shared library with the kernels implementation\",\n type=str, default=None)\n parser.add_argument(\"-pp\", \"--plugin_dir\", help=\"Path to a plugin folder\", type=str, default=None)\n parser.add_argument(\"-d\", \"--device\",\n help=\"Specify the target device to infer on; CPU, GPU, FPGA or MYRIAD is acceptable. Sample \"\n \"will look for a suitable plugin for device specified (CPU by default)\", default=\"CPU\",\n type=str)\n parser.add_argument('input_image', help='Image with a vehicle')\n return parser\n\n# pylint: disable=too-many-locals\ndef main():\n log.basicConfig(format=\"[ %(levelname)s ] %(message)s\", level=log.INFO, stream=sys.stdout)\n args = build_argparser().parse_args()\n\n model_xml = args.model\n model_bin = os.path.splitext(model_xml)[0] + \".bin\"\n log.info(\"Initializing plugin for %s device...\", args.device)\n plugin = IEPlugin(device=args.device, plugin_dirs=args.plugin_dir)\n if args.cpu_extension and 'CPU' in args.device:\n plugin.add_cpu_extension(args.cpu_extension)\n # Read IR\n log.info(\"Reading IR...\")\n net = IENetwork.from_ir(model=model_xml, weights=model_bin)\n if \"CPU\" in plugin.device:\n supported_layers = plugin.get_supported_layers(net)\n not_supported_layers = [l for l in net.layers.keys() if l not in supported_layers]\n if not_supported_layers:\n log.error(\"Following layers are not supported by the plugin for specified device %s:\\n %s\",\n plugin.device, ', '.join(not_supported_layers))\n log.error(\"Please try to specify cpu extensions library path in sample's command line parameters using -l \"\n \"or --cpu_extension command line argument\")\n sys.exit(1)\n assert len(net.inputs.keys()) == 1, \"Sample supports only single input topologies\"\n assert len(net.outputs) == 2, \"Sample supports two output topologies\"\n input_blob = next(iter(net.inputs))\n log.info(\"Loading IR to the plugin...\")\n exec_net = plugin.load(network=net, num_requests=2)\n # Read and pre-process input image\n _, _, height, width = net.inputs[input_blob].shape\n del net\n frame = cv2.imread(args.input_image)\n in_frame = cv2.resize(frame, (width, height), interpolation=cv2.INTER_CUBIC)\n in_frame = in_frame.transpose((2, 0, 1)) # Change data layout from HWC to CHW\n\n res = exec_net.infer({input_blob: in_frame})\n vtype = annotation_to_type(np.argmax(res['resnet_v1_10/type'][0]))\n\n colorcar = normalized_to_absolute(res['resnet_v1_10/color'][0])\n rgb_color = cv2.cvtColor(colorcar, cv2.COLOR_LAB2BGR)[0, 0].tolist()\n\n img = cv2.imread(args.input_image)\n cv2.rectangle(img, (0, 0), (30, 30), rgb_color, -1)\n font = cv2.FONT_HERSHEY_PLAIN\n cv2.putText(img, vtype, (0, 15), font, 1, (0, 0, 0), 2, cv2.LINE_AA)\n cv2.imshow('Vehicle_attributes', img)\n _ = cv2.waitKey(0)\n del exec_net\n del plugin\nif __name__ == '__main__':\n sys.exit(main() or 0)\n" ]
[ [ "numpy.argmax", "numpy.zeros" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
OolongQian/SimpleView
[ "4eb0d2518de94ed095d3398223df0dba487d7841", "4eb0d2518de94ed095d3398223df0dba487d7841" ]
[ "ScanObjectNN/SpiderCNN/draw_cmat.py", "ScanObjectNN/PointCNN/train.py" ]
[ "import tensorflow as tf\nimport numpy as np\nimport argparse\nimport socket\nimport importlib\nimport time\nimport os\nimport scipy.misc\nimport sys\nBASE_DIR = os.path.dirname(os.path.abspath(__file__))\nROOT_DIR = BASE_DIR\nsys.path.append(BASE_DIR)\nsys.path.append(os.path.join(BASE_DIR, 'models'))\nsys.path.append(os.path.join(BASE_DIR, 'utils'))\nimport provider\nimport pc_util\nsys.path.append(os.path.join(BASE_DIR, '..'))\nimport data_utils\n\nimport itertools\nimport scipy.stats as stats\nimport matplotlib as mpl\nimport matplotlib.pyplot as plt\nfrom sklearn.metrics import confusion_matrix\n\nparser = argparse.ArgumentParser()\nparser.add_argument('--gpu', type=int, default=0, help='GPU to use [default: GPU 0]')\nparser.add_argument('--model', default='spidercnn_cls_xyz', help='Model name: dgcnn [default: dgcnn]')\nparser.add_argument('--batch_size', type=int, default=1, help='Batch Size during training [default: 1]')\nparser.add_argument('--num_point', type=int, default=1024, help='Point Number [256/512/1024/2048] [default: 1024]')\n\nparser.add_argument('--model_path', default='log/model.ckpt', help='model checkpoint file path [default: log/model.ckpt]')\nparser.add_argument('--dump_dir', default='confusion_matrix/', help='dump folder path [dump]')\nparser.add_argument('--with_bg', default = True, help='Whether to have background or not [default: True]')\nparser.add_argument('--norm', default = True, help='Whether to normalize data or not [default: False]')\nparser.add_argument('--center_data', default = False, help='Whether to explicitly center the data [default: False]')\n\nparser.add_argument('--test_file', default = 'h5_files/main_split/test_objectdataset_augmentedrot_scale75.h5', help='Location of test file')\n\nFLAGS = parser.parse_args()\n\n\nDATA_DIR = os.path.join(ROOT_DIR, '../../../../')\nBATCH_SIZE = FLAGS.batch_size\nNUM_POINT = FLAGS.num_point\nMODEL_PATH = FLAGS.model_path\nGPU_INDEX = FLAGS.gpu\nMODEL = importlib.import_module(FLAGS.model) # import network module\nDUMP_DIR = FLAGS.dump_dir\nif not os.path.exists(DUMP_DIR): os.mkdir(DUMP_DIR)\nLOG_FOUT = open(os.path.join(DUMP_DIR, 'log_evaluate.txt'), 'w')\nLOG_FOUT.write(str(FLAGS)+'\\n')\n\nWITH_BG = FLAGS.with_bg\nNORMALIZED = FLAGS.norm\nTEST_FILE = FLAGS.test_file\nCENTER_DATA = FLAGS.center_data\n\nNUM_CLASSES = 15\nSHAPE_NAMES = [line.rstrip() for line in \\\n open( '../training_data/shape_names_ext.txt')] \n\nHOSTNAME = socket.gethostname()\n\n\nnp.random.seed(0)\n\nprint(\"Normalized: \"+str(NORMALIZED))\nprint(\"Center Data: \"+str(CENTER_DATA))\n\nif (\".h5\" in TEST_FILE):\n TEST_DATA, TEST_LABELS = data_utils.load_h5(TEST_FILE)\nelse:\n TEST_DATA, TEST_LABELS = data_utils.load_data(TEST_FILE, NUM_POINT, with_bg_pl = WITH_BG) \n\nif (CENTER_DATA):\n TEST_DATA = data_utils.center_data(TEST_DATA)\n\nif (NORMALIZED):\n TEST_DATA = data_utils.normalize_data(TEST_DATA)\n\ndef log_string(out_str):\n LOG_FOUT.write(out_str+'\\n')\n LOG_FOUT.flush()\n print(out_str)\n\ndef evaluate(num_votes):\n is_training = False\n \n with tf.device('/gpu:'+str(GPU_INDEX)):\n pointclouds_pl = tf.placeholder(tf.float32, shape=(BATCH_SIZE, NUM_POINT, 3))\n labels_pl = tf.placeholder(tf.int32, shape=(BATCH_SIZE))\n is_training_pl = tf.placeholder(tf.bool, shape=())\n\n # simple model\n pred = MODEL.get_model(pointclouds_pl, is_training_pl)\n loss = MODEL.get_loss(pred, labels_pl)\n \n # Add ops to save and restore all the variables.\n saver = tf.train.Saver()\n \n # Create a session\n config = tf.ConfigProto()\n config.gpu_options.allow_growth = True\n config.allow_soft_placement = True\n config.log_device_placement = True\n sess = tf.Session(config=config)\n\n # Restore variables from disk.\n saver.restore(sess, MODEL_PATH)\n log_string(\"Model restored.\")\n\n ops = {'pointclouds_pl': pointclouds_pl,\n 'labels_pl': labels_pl,\n 'is_training_pl': is_training_pl,\n 'pred': pred,\n 'loss': loss}\n\n eval_one_epoch(sess, ops, num_votes)\n\n \ndef eval_one_epoch(sess, ops, num_votes=1, topk=1):\n error_cnt = 0\n is_training = False\n total_correct = 0\n total_seen = 0\n loss_sum = 0\n total_seen_class = [0 for _ in range(NUM_CLASSES)]\n total_correct_class = [0 for _ in range(NUM_CLASSES)]\n fout = open(os.path.join(DUMP_DIR, 'pred_label.txt'), 'w')\n\n if (\".h5\" in TEST_FILE):\n current_data, current_label = data_utils.get_current_data_h5(TEST_DATA, TEST_LABELS, NUM_POINT)\n else:\n current_data, current_label = data_utils.get_current_data(TEST_DATA, TEST_LABELS, NUM_POINT)\n\n current_label = np.squeeze(current_label)\n\n num_batches = current_data.shape[0]//BATCH_SIZE\n\n current_pred = []\n \n for batch_idx in range(num_batches):\n start_idx = batch_idx * BATCH_SIZE\n end_idx = (batch_idx+1) * BATCH_SIZE\n cur_batch_size = end_idx - start_idx\n \n # Aggregating BEG\n batch_loss_sum = 0 # sum of losses for the batch\n batch_pred_sum = np.zeros((cur_batch_size, NUM_CLASSES)) # score for classes\n batch_pred_classes = np.zeros((cur_batch_size, NUM_CLASSES)) # 0/1 for classes\n for vote_idx in range(num_votes):\n rotated_data = provider.rotate_point_cloud_by_angle(current_data[start_idx:end_idx, :, :],\n vote_idx/float(num_votes) * np.pi * 2)\n feed_dict = {ops['pointclouds_pl']: rotated_data,\n ops['labels_pl']: current_label[start_idx:end_idx],\n ops['is_training_pl']: is_training}\n loss_val, pred_val = sess.run([ops['loss'], ops['pred']],\n feed_dict=feed_dict)\n\n batch_pred_sum += pred_val\n batch_pred_val = np.argmax(pred_val, 1)\n for el_idx in range(cur_batch_size):\n batch_pred_classes[el_idx, batch_pred_val[el_idx]] += 1\n batch_loss_sum += (loss_val * cur_batch_size / float(num_votes))\n # pred_val_topk = np.argsort(batch_pred_sum, axis=-1)[:,-1*np.array(range(topk))-1]\n # pred_val = np.argmax(batch_pred_classes, 1)\n pred_val = np.argmax(batch_pred_sum, 1)\n # Aggregating END\n \n correct = np.sum(pred_val == current_label[start_idx:end_idx])\n # correct = np.sum(pred_val_topk[:,0:topk] == label_val)\n total_correct += correct\n total_seen += cur_batch_size\n loss_sum += batch_loss_sum\n\n for i in range(start_idx, end_idx):\n l = current_label[i]\n total_seen_class[l] += 1\n total_correct_class[l] += (pred_val[i-start_idx] == l)\n fout.write('%s, %s\\n' % (SHAPE_NAMES[pred_val[i-start_idx]], SHAPE_NAMES[l]))\n \n current_pred.append(pred_val[i-start_idx])\n \n log_string('total seen: %d' % (total_seen)) \n log_string('eval mean loss: %f' % (loss_sum / float(total_seen)))\n log_string('eval accuracy: %f' % (total_correct / float(total_seen)))\n log_string('eval avg class acc: %f' % (np.mean(np.array(total_correct_class)/np.array(total_seen_class,dtype=np.float))))\n \n class_accuracies = np.array(total_correct_class)/np.array(total_seen_class,dtype=np.float)\n for i, name in enumerate(SHAPE_NAMES):\n log_string('%10s:\\t%0.3f' % (name, class_accuracies[i]))\n \n #Plot confusion matrix\n current_pred = np.array(current_pred)\n groundtruth = current_label.flatten()\n predictions = current_pred.flatten()\n\n mat = confusion_matrix(groundtruth, predictions)\n\n plt.style.use('seaborn-paper')\n plt.rcParams[\"figure.figsize\"] = (10,10)\n ax = plt.subplot(111)\n cmap = plt.cm.Reds\n mat = mat.astype('float') / mat.sum(axis=1)[:, np.newaxis]\n mat = np.nan_to_num(mat, copy=True)\n\n plt.imshow(mat, interpolation='nearest', cmap=cmap)\n # cbar = plt.colorbar(fraction=0.03, pad=0.05, aspect=30)\n # cbar.ax.tick_params(labelsize=10)\n tick_marks = np.arange(len(SHAPE_NAMES))\n plt.xticks(tick_marks, SHAPE_NAMES, rotation=90)\n plt.yticks(tick_marks, SHAPE_NAMES)\n\n plt.ylabel('Ground truth')\n plt.xlabel('Prediction')\n\n for item in ([ax.title, ax.xaxis.label, ax.yaxis.label] +\n ax.get_xticklabels() + ax.get_yticklabels()):\n item.set_fontsize(36)\n\n plt.tight_layout()\n plt.savefig(os.path.join(DUMP_DIR,'matrix.pdf'))\n plt.show() \n\nif __name__=='__main__':\n with tf.Graph().as_default():\n evaluate(num_votes=1)\n LOG_FOUT.close()\n", "#!/usr/bin/python3\n\"\"\"Training and Validation On Classification Task.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport os\nimport sys\nimport math\nimport random\nimport shutil\nimport argparse\nimport importlib\nBASE_DIR = os.path.dirname(os.path.abspath(__file__))\nROOT_DIR = BASE_DIR\nsys.path.append(os.path.join(BASE_DIR, '..'))\nDATA_DIR = os.path.join(ROOT_DIR, '../../../../')\nimport data_utils\nimport numpy as np\nimport pointfly as pf\nimport tensorflow as tf\nfrom datetime import datetime\nimport provider\nimport h5py\n\nparser = argparse.ArgumentParser()\nparser.add_argument('--gpu', type=int, default=1, help='GPU to use [default: GPU 0]')\nparser.add_argument('--load_ckpt', '-l', help='Path to a check point file for load')\n\nparser.add_argument('--log_dir', '-s', default='log/', help='Path to folder for saving check points and summary')\nparser.add_argument('--with_bg', default = True, help='Whether to have background or not [default: True]')\nparser.add_argument('--norm', default = True, help='Whether to normalize data or not [default: False]')\nparser.add_argument('--center_data', default = True, help='Whether to explicitly center the data [default: False]')\nparser.add_argument('--num_class', type=int, default = 15, help='Number of classes to classify.')\n\nparser.add_argument('--train_file', default = 'h5_files/main_split/training_objectdataset_augmentedrot_scale75.h5', help='Location of training file')\nparser.add_argument('--test_file', default = 'h5_files/main_split/test_objectdataset_augmentedrot_scale75.h5', help='Location of test file')\n\nparser.add_argument('--model', '-m', default = 'pointcnn_cls', help='Model to use')\nparser.add_argument('--setting', '-x', default = 'modelnet_x3_l4', help='Setting to use')\nparser.add_argument('--epochs', help='Number of training epochs (default defined in setting)', type=int)\nparser.add_argument('--batch_size', help='Batch size (default defined in setting)', type=int)\nparser.add_argument('--num_point', type=int, default=1024, help='Point Number [default: 1024]')\n\nargs = parser.parse_args()\n\n\nGPU_INDEX = args.gpu\n\ntime_string = datetime.now().strftime('%Y-%m-%d-%H-%M-%S')\nroot_folder = args.log_dir\nif not os.path.exists(root_folder):\n os.makedirs(root_folder)\n\nWITH_BG = args.with_bg\nNORMALIZED = args.norm\nTRAIN_FILE = args.train_file\nTEST_FILE = args.test_file\nCENTER_DATA = args.center_data\n\nLOG_FOUT = open(os.path.join(root_folder, 'log_train.txt'), 'w')\nLOG_FOUT.write(str(args)+'\\n')\n\ndef log_string(out_str):\n LOG_FOUT.write(out_str+'\\n')\n LOG_FOUT.flush()\n print(out_str)\n\n\nmodel = importlib.import_module(args.model)\nsetting_path = os.path.join(os.path.dirname(__file__), args.model)\nsys.path.append(setting_path)\nsetting = importlib.import_module(args.setting)\n\nnum_epochs = args.epochs or setting.num_epochs\nbatch_size = args.batch_size or setting.batch_size\nsample_num = args.num_point\nstep_val = setting.step_val\nrotation_range = setting.rotation_range\nrotation_range_val = setting.rotation_range_val\nscaling_range = setting.scaling_range\nscaling_range_val = setting.scaling_range_val\njitter = setting.jitter\njitter_val = setting.jitter_val\npool_setting_val = None if not hasattr(setting, 'pool_setting_val') else setting.pool_setting_val\npool_setting_train = None if not hasattr(setting, 'pool_setting_train') else setting.pool_setting_train\n\n# Prepare inputs\nlog_string('{}-Preparing datasets...'.format(datetime.now()))\n\nNUM_CLASSES = args.num_class\nprint(\"Number of Classes: \"+str(NUM_CLASSES))\nprint(\"Normalized: \"+str(NORMALIZED))\nprint(\"Center Data: \"+str(CENTER_DATA))\n\nif (\".h5\" in TRAIN_FILE):\n TRAIN_DATA, TRAIN_LABELS = data_utils.load_h5(TRAIN_FILE)\nelse:\n TRAIN_DATA, TRAIN_LABELS = data_utils.load_data(TRAIN_FILE, sample_num, with_bg_pl = WITH_BG)\n\nif (\".h5\" in TEST_FILE):\n TEST_DATA, TEST_LABELS = data_utils.load_h5(TEST_FILE)\nelse:\n TEST_DATA, TEST_LABELS = data_utils.load_data(TEST_FILE, sample_num, with_bg_pl = WITH_BG) \n\nif (CENTER_DATA):\n TRAIN_DATA = data_utils.center_data(TRAIN_DATA)\n TEST_DATA = data_utils.center_data(TEST_DATA)\n\nif (NORMALIZED):\n TRAIN_DATA = data_utils.normalize_data(TRAIN_DATA)\n TEST_DATA = data_utils.normalize_data(TEST_DATA)\n\nnum_train = len(TRAIN_DATA)\nnum_val = len(TEST_DATA)\nprint('{}-{:d}/{:d} training/validation samples.'.format(datetime.now(), num_train, num_val))\n\ndef train():\n with tf.Graph().as_default():\n with tf.device('/gpu:'+str(GPU_INDEX)):\n # Placeholders\n xforms = tf.placeholder(tf.float32, shape=(None, 3, 3), name=\"xforms\")\n rotations = tf.placeholder(tf.float32, shape=(None, 3, 3), name=\"rotations\")\n jitter_range = tf.placeholder(tf.float32, shape=(1), name=\"jitter_range\")\n global_step = tf.Variable(0, trainable=False, name='global_step')\n is_training_pl = tf.placeholder(tf.bool, name='is_training')\n\n pointclouds_pl = tf.placeholder(tf.float32, shape=(batch_size, sample_num, 3), name='data_train')\n labels_pl = tf.placeholder(tf.int32, shape=(batch_size), name='label_train')\n\n points_augmented = pf.augment(pointclouds_pl, xforms, jitter_range)\n net = model.Net(points=points_augmented, features=None, is_training=is_training_pl, setting=setting)\n logits = net.logits\n probs = tf.nn.softmax(logits, name='probs')\n predictions = tf.argmax(probs, axis=-1, name='predictions')\n\n labels_2d = tf.expand_dims(labels_pl, axis=-1, name='labels_2d')\n labels_tile = tf.tile(labels_2d, (1, tf.shape(logits)[1]), name='labels_tile')\n loss_op = tf.reduce_mean(tf.losses.sparse_softmax_cross_entropy(labels=labels_tile, logits=logits))\n\n tf.summary.scalar('loss', loss_op)\n # with tf.name_scope('metrics'):\n # loss_mean_op, loss_mean_update_op = tf.metrics.mean(loss_op)\n # t_1_acc_op, t_1_acc_update_op = tf.metrics.accuracy(labels_tile, predictions)\n # t_1_per_class_acc_op, t_1_per_class_acc_update_op = tf.metrics.mean_per_class_accuracy(labels_tile,\n # predictions,\n # setting.num_class)\n # reset_metrics_op = tf.variables_initializer([var for var in tf.local_variables()\n # if var.name.split('/')[0] == 'metrics'])\n\n # _ = tf.summary.scalar('loss/train', tensor=loss_mean_op, collections=['train'])\n # _ = tf.summary.scalar('t_1_acc/train', tensor=t_1_acc_op, collections=['train'])\n # _ = tf.summary.scalar('t_1_per_class_acc/train', tensor=t_1_per_class_acc_op, collections=['train'])\n\n # _ = tf.summary.scalar('loss/val', tensor=loss_mean_op, collections=['val'])\n # _ = tf.summary.scalar('t_1_acc/val', tensor=t_1_acc_op, collections=['val'])\n # _ = tf.summary.scalar('t_1_per_class_acc/val', tensor=t_1_per_class_acc_op, collections=['val'])\n\n lr_exp_op = tf.train.exponential_decay(setting.learning_rate_base, global_step, setting.decay_steps,\n setting.decay_rate, staircase=True)\n lr_clip_op = tf.maximum(lr_exp_op, setting.learning_rate_min)\n _ = tf.summary.scalar('learning_rate', tensor=lr_clip_op, collections=['train'])\n reg_loss = setting.weight_decay * tf.losses.get_regularization_loss()\n if setting.optimizer == 'adam':\n optimizer = tf.train.AdamOptimizer(learning_rate=lr_clip_op, epsilon=setting.epsilon)\n elif setting.optimizer == 'momentum':\n optimizer = tf.train.MomentumOptimizer(learning_rate=lr_clip_op, momentum=setting.momentum, use_nesterov=True)\n update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)\n with tf.control_dependencies(update_ops):\n train_op = optimizer.minimize(loss_op + reg_loss, global_step=global_step)\n\n # Create a session\n config = tf.ConfigProto()\n config.gpu_options.allow_growth = True\n config.allow_soft_placement = True\n config.log_device_placement = False\n sess = tf.Session(config=config)\n\n init_op = tf.group(tf.global_variables_initializer(), tf.local_variables_initializer())\n\n saver = tf.train.Saver(max_to_keep=None)\n\n # backup all code\n # code_folder = os.path.abspath(os.path.dirname(__file__))\n # shutil.copytree(code_folder, os.path.join(root_folder)\n\n folder_ckpt = root_folder\n # if not os.path.exists(folder_ckpt):\n # os.makedirs(folder_ckpt)\n\n folder_summary = os.path.join(root_folder, 'summary')\n if not os.path.exists(folder_summary):\n os.makedirs(folder_summary) \n\n parameter_num = np.sum([np.prod(v.shape.as_list()) for v in tf.trainable_variables()])\n print('{}-Parameter number: {:d}.'.format(datetime.now(), parameter_num))\n\n sess.run(init_op)\n\n # saver.restore(sess, os.path.join(folder_ckpt, \"model.ckpt\"))\n # log_string(\"Model restored.\") \n\n merged = tf.summary.merge_all()\n train_writer = tf.summary.FileWriter(os.path.join(folder_summary, 'train'),\n sess.graph)\n test_writer = tf.summary.FileWriter(os.path.join(folder_summary, 'test'))\n\n ops = {'pointclouds_pl': pointclouds_pl,\n 'labels_pl': labels_pl,\n 'is_training_pl': is_training_pl,\n 'pred': probs,\n 'loss': loss_op,\n 'train_op': train_op,\n 'merged': merged,\n 'step': global_step,\n 'xforms': xforms,\n 'rotations': rotations,\n 'jitter_range': jitter_range}\n\n for epoch in range(num_epochs):\n log_string('**** EPOCH %03d ****' % (epoch))\n sys.stdout.flush()\n \n train_one_epoch(sess, ops, train_writer)\n eval_one_epoch(sess, ops, test_writer)\n \n # Save the variables to disk.\n # if epoch % 10 == 0:\n save_path = saver.save(sess, os.path.join(folder_ckpt, \"model.ckpt\"))\n log_string(\"Model saved in file: %s\" % save_path) \n\ndef train_one_epoch(sess, ops, train_writer):\n is_training = True\n\n #get current data, shuffle and set to numpy array with desired num_points\n # current_data, current_label = data_utils.get_current_data(TRAIN_DATA, TRAIN_LABELS, sample_num)\n # current_data, current_label = data_utils.get_current_data_h5(TRAIN_DATA, TRAIN_LABELS, sample_num)\n if (\".h5\" in TRAIN_FILE):\n current_data, current_label = data_utils.get_current_data_h5(TRAIN_DATA, TRAIN_LABELS, sample_num)\n else:\n current_data, current_label = data_utils.get_current_data(TRAIN_DATA, TRAIN_LABELS, sample_num)\n\n current_label = np.squeeze(current_label)\n\n num_batches = current_data.shape[0]//batch_size\n total_correct = 0\n total_seen = 0\n loss_sum = 0\n for batch_idx in range(num_batches):\n start_idx = batch_idx * batch_size\n end_idx = (batch_idx+1) * batch_size\n\n xforms_np, rotations_np = pf.get_xforms(batch_size,\n rotation_range=rotation_range,\n scaling_range=scaling_range,\n order=setting.rotation_order)\n\n # Augment batched point clouds by rotation and jittering\n feed_dict = {ops['pointclouds_pl']: current_data[start_idx:end_idx, :, :],\n ops['labels_pl']: current_label[start_idx:end_idx],\n ops['is_training_pl']: is_training,\n ops['xforms']: xforms_np,\n ops['rotations']: rotations_np,\n ops['jitter_range']: np.array([jitter])}\n\n summary, step, _, loss_val, pred_val = sess.run([ops['merged'], ops['step'],\n ops['train_op'], ops['loss'], ops['pred']], feed_dict=feed_dict)\n\n train_writer.add_summary(summary, step)\n pred_val = np.sum(pred_val, axis=1)\n pred_val = np.argmax(pred_val, 1)\n # print(pred_val)\n # print(current_label[start_idx:end_idx])\n\n correct = np.sum(pred_val == current_label[start_idx:end_idx])\n total_correct += correct\n total_seen += batch_size\n loss_sum += loss_val\n\n \n log_string('mean loss: %f' % (loss_sum / float(num_batches)))\n log_string('accuracy: %f' % (total_correct / float(total_seen)))\n\ndef eval_one_epoch(sess, ops, test_writer):\n is_training = False\n total_correct = 0\n total_seen = 0\n loss_sum = 0\n total_seen_class = [0 for _ in range(NUM_CLASSES)]\n total_correct_class = [0 for _ in range(NUM_CLASSES)]\n\n # current_data, current_label = data_utils.get_current_data(TEST_DATA, TEST_LABELS, sample_num)\n # current_data, current_label = data_utils.get_current_data_h5(TEST_DATA, TEST_LABELS, sample_num)\n if (\".h5\" in TEST_FILE):\n current_data, current_label = data_utils.get_current_data_h5(TEST_DATA, TEST_LABELS, sample_num)\n else:\n current_data, current_label = data_utils.get_current_data(TEST_DATA, TEST_LABELS, sample_num)\n\n current_label = np.squeeze(current_label)\n\n num_batches = current_data.shape[0]//batch_size\n \n for batch_idx in range(num_batches):\n start_idx = batch_idx * batch_size\n end_idx = (batch_idx+1) * batch_size\n\n xforms_np, rotations_np = pf.get_xforms(batch_size,\n rotation_range=rotation_range_val,\n scaling_range=scaling_range_val,\n order=setting.rotation_order)\n\n # Augment batched point clouds by rotation and jittering\n feed_dict = {ops['pointclouds_pl']: current_data[start_idx:end_idx, :, :],\n ops['labels_pl']: current_label[start_idx:end_idx],\n ops['is_training_pl']: is_training,\n ops['xforms']: xforms_np,\n ops['rotations']: rotations_np,\n ops['jitter_range']: np.array([jitter_val])}\n\n summary, step, loss_val, pred_val = sess.run([ops['merged'], ops['step'],\n ops['loss'], ops['pred']], feed_dict=feed_dict)\n\n pred_val = np.sum(pred_val, axis=1)\n pred_val = np.argmax(pred_val, 1)\n correct = np.sum(pred_val == current_label[start_idx:end_idx])\n total_correct += correct\n total_seen += batch_size\n loss_sum += (loss_val*batch_size)\n for i in range(start_idx, end_idx):\n l = current_label[i]\n total_seen_class[l] += 1\n total_correct_class[l] += (pred_val[i-start_idx] == l)\n \n log_string('eval mean loss: %f' % (loss_sum / float(total_seen)))\n log_string('eval accuracy: %f'% (total_correct / float(total_seen)))\n log_string('eval avg class acc: %f' % (np.mean(np.array(total_correct_class)/np.array(total_seen_class,dtype=np.float)))) \n\nif __name__ == '__main__':\n train()\n" ]
[ [ "matplotlib.pyplot.imshow", "numpy.squeeze", "sklearn.metrics.confusion_matrix", "numpy.nan_to_num", "tensorflow.Graph", "matplotlib.pyplot.tight_layout", "tensorflow.ConfigProto", "matplotlib.pyplot.subplot", "numpy.argmax", "tensorflow.Session", "tensorflow.train.Saver", "numpy.zeros", "matplotlib.pyplot.style.use", "tensorflow.placeholder", "numpy.array", "matplotlib.pyplot.show", "numpy.sum", "matplotlib.pyplot.xticks", "matplotlib.pyplot.ylabel", "numpy.random.seed", "matplotlib.pyplot.xlabel", "matplotlib.pyplot.yticks" ], [ "tensorflow.control_dependencies", "numpy.squeeze", "tensorflow.train.AdamOptimizer", "tensorflow.summary.scalar", "tensorflow.Graph", "tensorflow.Variable", "tensorflow.get_collection", "tensorflow.train.exponential_decay", "tensorflow.ConfigProto", "numpy.argmax", "tensorflow.train.MomentumOptimizer", "tensorflow.Session", "tensorflow.train.Saver", "tensorflow.trainable_variables", "tensorflow.argmax", "tensorflow.losses.sparse_softmax_cross_entropy", "tensorflow.shape", "tensorflow.placeholder", "tensorflow.global_variables_initializer", "tensorflow.summary.merge_all", "numpy.array", "numpy.sum", "tensorflow.nn.softmax", "tensorflow.local_variables_initializer", "tensorflow.losses.get_regularization_loss", "tensorflow.maximum", "tensorflow.expand_dims" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10" ] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10" ] } ]
XuezheMax/apollo
[ "852991fd769f80689415abee8653c0a5eedbab40" ]
[ "language_model/train_1bw.py" ]
[ "import os\nimport sys\n\ncurrent_path = os.path.dirname(os.path.realpath(__file__))\nroot_path = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))\nsys.path.append(root_path)\n\nimport argparse\nimport random\nimport pickle\nimport math\nimport json\nimport numpy as np\nimport torch\nfrom utils import clip_grad_norm_\nfrom torch.optim import SGD, Adam\nfrom torch.optim.lr_scheduler import MultiStepLR\n\nfrom language_model.model_word_ada.lm import LM\nfrom language_model.model_word_ada.dataset import LargeDataset, EvalDataset\nfrom optim import RAdamW, Apollo, AdaHessian, AdaBelief\n\n\ndef logging(info, logfile=None):\n print(info)\n if logfile is not None:\n print(info, file=logfile)\n logfile.flush()\n\n\ndef get_optimizer(opt, learning_rate, parameters, lr_decay, decay_rate, milestone, warmup_updates, init_lr, rebound):\n if opt == 'sgd':\n optimizer = SGD(parameters, lr=learning_rate, momentum=0.9, weight_decay=0., nesterov=True)\n elif opt == 'radam':\n optimizer = RAdamW(parameters, lr=learning_rate, betas=(0.9, 0.999), weight_decay=0.)\n elif opt == 'adam':\n optimizer = Adam(parameters, lr=learning_rate, betas=(0.9, 0.999), weight_decay=0.)\n elif opt == 'adabelief':\n optimizer = AdaBelief(parameters, lr=learning_rate, betas=(0.9, 0.999), eps=1e-12, weight_decay=0.)\n elif opt == 'apollo':\n optimizer = Apollo(parameters, lr=learning_rate, beta=0.9, eps=1e-4, rebound=rebound,\n warmup=warmup_updates, init_lr=init_lr, weight_decay=0.)\n elif opt == 'adahessian':\n optimizer = AdaHessian(parameters, lr=learning_rate, betas=(0.9, 0.999), eps=1e-4,\n warmup=warmup_updates, init_lr=init_lr, weight_decay=0.)\n else:\n raise ValueError('unknown optimizer: {}'.format(opt))\n\n opt_param = 'lr decay={} {}, decay rate={:.3f}'.format(lr_decay, milestone, decay_rate)\n scheduler = MultiStepLR(optimizer, milestones=milestone, gamma=decay_rate)\n\n if opt == 'apollo':\n opt_param += ', rebound={}'.format(rebound)\n if opt in ['apollo', 'adahessian']:\n opt_param += ', warmup={}, init_lr={:.1e}'.format(warmup_updates, init_lr)\n return optimizer, scheduler, opt_param\n\n\ndef evaluate(args, data_loader, lm_model):\n logging('evaluating', args.log)\n lm_model.eval()\n\n iterator = data_loader.get_tqdm()\n hx = None\n device = args.device\n total_loss = 0\n total_count = 0\n for word_t, label_t in iterator:\n word_t = word_t.to(device)\n label_t = label_t.to(device).reshape(-1)\n count = label_t.size(0)\n loss, hx = lm_model(word_t, label_t, hx=hx)\n total_loss += count * loss.item()\n total_count += count\n\n ppl = math.exp(total_loss / total_count)\n return ppl\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n parser.add_argument('--dataset_folder', default='data/billionwords/one_billion/')\n parser.add_argument('--batch_size', type=int, default=128)\n parser.add_argument('--sequence_length', type=int, default=20)\n parser.add_argument('--hid_dim', type=int, default=2048)\n parser.add_argument('--word_dim', type=int, default=300)\n parser.add_argument('--num_layers', type=int, default=2)\n parser.add_argument('--dropout', type=float, default=0.1)\n parser.add_argument('--epochs', type=int, default=14)\n parser.add_argument('--clip', type=float, default=0)\n parser.add_argument('--clip_mode', choices=['total', 'each'], default='total')\n parser.add_argument('--opt', choices=['sgd', 'adam', 'radam', 'adabelief', 'apollo', 'adahessian'], help='optimizer', required=True)\n parser.add_argument('--rnn_unit', choices=['gru', 'lstm', 'rnn'], default='lstm')\n parser.add_argument('--lr', type=float, required=True)\n parser.add_argument('--lr_decay', choices=['milestone'], default='milestone', help='Decay rate of learning rate')\n parser.add_argument('--decay_rate', type=float, default=0.1, help='Decay rate of learning rate')\n parser.add_argument('--milestone', type=int, nargs='+', default=[12, 18], help='Decrease learning rate at these epochs.')\n parser.add_argument('--rebound', choices=['constant', 'belief'], default='constant', help='type of recified bound of diagonal hessian')\n parser.add_argument('--warmup_updates', type=int, default=0, metavar='N', help='number of updates to warm up (default: 0)')\n parser.add_argument('--init_lr', type=float, default=0, help='initial learning rate')\n parser.add_argument('--cutoffs', nargs='+', default=[60000, 100000, 640000])\n parser.add_argument('--interval', type=int, default=1000)\n parser.add_argument('--model_path', help='path for saving model file.', required=True)\n parser.add_argument('--seed', type=int, default=None, metavar='S', help='random seed (default: None)')\n parser.add_argument('--run', type=int, default=1, metavar='N', help='number of runs for the experiment')\n parser.add_argument('--recover', action='store_true', help='recover the model from disk.')\n args = parser.parse_args()\n\n model_path = args.model_path\n if not os.path.exists(model_path):\n os.makedirs(model_path)\n if args.recover:\n args.log = open(os.path.join(model_path, 'log.run{}.txt'.format(args.run)), 'a')\n else:\n args.log = open(os.path.join(model_path, 'log.run{}.txt'.format(args.run)), 'w')\n args.checkpoint_name = os.path.join(model_path, 'checkpoint{}.tar'.format(args.run))\n\n args.cuda = torch.cuda.is_available()\n random_seed = args.seed\n if random_seed is not None:\n if args.recover:\n random_seed += random.randint(0, 1024)\n random.seed(random_seed)\n np.random.seed(random_seed)\n torch.manual_seed(random_seed)\n torch.cuda.manual_seed(random_seed)\n\n device = torch.device('cuda', 0) if args.cuda else torch.device('cpu')\n if args.cuda:\n torch.cuda.set_device(device)\n\n if args.opt == 'adahessian':\n torch.backends.cudnn.enabled = False\n\n logging(\"Args: \" + str(args), args.log)\n\n logging('loading dataset')\n dataset = pickle.load(open(args.dataset_folder + 'test.pk', 'rb'))\n w_map, test_data, range_idx = dataset['w_map'], dataset['test_data'], dataset['range']\n\n train_loader = LargeDataset(args.dataset_folder, range_idx, args.batch_size, args.sequence_length)\n test_loader = EvalDataset(test_data, args.batch_size)\n\n logging('building model')\n lm_model = LM(len(w_map), args.word_dim, args.rnn_unit, args.num_layers, args.hid_dim, dropout=args.dropout, cutoffs=args.cutoffs)\n lm_model.to(device)\n args.device = device\n\n logging('# of Parameters: %d' % sum([param.numel() for param in lm_model.parameters()]), args.log)\n\n opt = args.opt\n epochs = args.epochs\n clip = args.clip\n clip_mode = args.clip_mode\n lr_warmup = args.warmup_updates\n init_lr = args.init_lr\n lr_decay = args.lr_decay\n decay_rate = args.decay_rate\n milestone = args.milestone\n rebound = args.rebound\n optimizer, scheduler, opt_param = get_optimizer(opt, args.lr, lm_model.parameters(), warmup_updates=lr_warmup, init_lr=init_lr,\n lr_decay=lr_decay, decay_rate=decay_rate, milestone=milestone, rebound=rebound)\n create_graph = args.opt == 'adahessian'\n\n if args.recover:\n checkpoint_name = args.checkpoint_name\n print(f\"loading from checkpoint {checkpoint_name}\")\n checkpoint = torch.load(checkpoint_name, map_location=args.device)\n lm_model.load_state_dict(checkpoint['model'])\n optimizer.load_state_dict(checkpoint['optimizer'])\n scheduler.load_state_dict(checkpoint['scheduler'])\n\n start_epoch = checkpoint['epoch']\n batch_index = checkpoint['batch_index']\n best_epoch = checkpoint['best_epoch']\n best_ppl = checkpoint['best_ppl']\n numbers = checkpoint['numbers']\n train_loss = checkpoint['train_loss']\n del checkpoint\n with torch.no_grad():\n logging('Evaluating after resuming model...', args.log)\n test_ppl = evaluate(args, test_loader, lm_model)\n logging('test_ppl: {} @ epoch: {}, best_ppl: {} @ epoch: {}'.format(test_ppl, start_epoch - 1, best_ppl, best_epoch), args.log)\n else:\n start_epoch = 1\n batch_index = 0\n best_epoch = 0\n best_ppl = float('inf')\n numbers = {'train ppl': [], 'test ppl': []}\n train_loss = 0\n\n for epoch in range(start_epoch, epochs + 1):\n lr = scheduler.get_last_lr()[0]\n logging('#' * 90, args.log)\n logging('Epoch: {}/{} ({}, lr={:.6f}, clip={:.1f} ({}), {})'.format(epoch, epochs, opt, lr, clip, clip_mode, opt_param), args.log)\n iterator = train_loader.get_tqdm()\n full_epoch_loss = 0\n lm_model.train()\n\n hx = None\n for word_t, label_t in iterator:\n optimizer.zero_grad()\n\n if 1 == train_loader.cur_idx:\n hx = None\n\n word_t = word_t.to(device, non_blocking=True)\n label_t = label_t.to(device, non_blocking=True).reshape(-1)\n\n loss, hx = lm_model(word_t, label_t, hx=hx)\n train_loss += loss.item()\n if batch_index > 0 and batch_index % args.interval == 0:\n train_ppl = math.exp(train_loss / args.interval)\n logging('epoch_ppl: {} lr: {} @ batch_index: {}'.format(train_ppl, lr, batch_index), args.log)\n train_loss = 0\n numbers['train ppl'].append(train_ppl)\n\n batch_index += 1\n loss.backward(create_graph=create_graph)\n if clip > 0.:\n clip_grad_norm_(lm_model.parameters(), clip, mode=clip_mode)\n optimizer.step()\n\n scheduler.step()\n\n with torch.no_grad():\n test_ppl = evaluate(args, test_loader, lm_model)\n if test_ppl < best_ppl:\n best_ppl = test_ppl\n best_epoch = epoch\n logging('test_ppl: {} @ epoch: {}, best_ppl: {} @ epoch: {}'.format(test_ppl, epoch, best_ppl, best_epoch), args.log)\n numbers['test ppl'].append(test_ppl)\n json.dump(numbers, open(os.path.join(args.model_path, 'values.run{}.json'.format(args.run)), 'w'))\n\n # save checkpoint\n checkpoint_name = args.checkpoint_name\n torch.save({'epoch': epoch + 1,\n 'batch_index': batch_index,\n 'model': lm_model.state_dict(),\n 'optimizer': optimizer.state_dict(),\n 'scheduler': scheduler.state_dict(),\n 'best_epoch': best_epoch,\n 'best_ppl': best_ppl,\n 'train_loss': train_loss,\n 'numbers': numbers},\n checkpoint_name)\n" ]
[ [ "torch.optim.lr_scheduler.MultiStepLR", "torch.optim.Adam", "numpy.random.seed", "torch.cuda.manual_seed", "torch.cuda.set_device", "torch.manual_seed", "torch.load", "torch.no_grad", "torch.cuda.is_available", "torch.optim.SGD", "torch.device" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
edbeeching/transformers
[ "b18dfd95e1f60ae65a959a7b255fc06522170d1b" ]
[ "examples/pytorch/translation/run_translation.py" ]
[ "#!/usr/bin/env python\n# coding=utf-8\n# Copyright The HuggingFace Team and The HuggingFace Inc. team. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"\nFine-tuning the library models for sequence to sequence.\n\"\"\"\n# You can also adapt this script on your own sequence to sequence task. Pointers for this are left as comments.\n\nimport logging\nimport os\nimport sys\nfrom dataclasses import dataclass, field\nfrom typing import Optional\n\nimport datasets\nimport numpy as np\nfrom datasets import load_dataset, load_metric\n\nimport transformers\nfrom transformers import (\n AutoConfig,\n AutoModelForSeq2SeqLM,\n AutoTokenizer,\n DataCollatorForSeq2Seq,\n HfArgumentParser,\n M2M100Tokenizer,\n MBart50Tokenizer,\n MBart50TokenizerFast,\n MBartTokenizer,\n MBartTokenizerFast,\n Seq2SeqTrainer,\n Seq2SeqTrainingArguments,\n default_data_collator,\n set_seed,\n)\nfrom transformers.trainer_utils import get_last_checkpoint\nfrom transformers.utils import check_min_version\nfrom transformers.utils.versions import require_version\n\n\n# Will error if the minimal version of Transformers is not installed. Remove at your own risks.\ncheck_min_version(\"4.18.0.dev0\")\n\nrequire_version(\"datasets>=1.8.0\", \"To fix: pip install -r examples/pytorch/translation/requirements.txt\")\n\nlogger = logging.getLogger(__name__)\n\n# A list of all multilingual tokenizer which require src_lang and tgt_lang attributes.\nMULTILINGUAL_TOKENIZERS = [MBartTokenizer, MBartTokenizerFast, MBart50Tokenizer, MBart50TokenizerFast, M2M100Tokenizer]\n\n\n@dataclass\nclass ModelArguments:\n \"\"\"\n Arguments pertaining to which model/config/tokenizer we are going to fine-tune from.\n \"\"\"\n\n model_name_or_path: str = field(\n metadata={\"help\": \"Path to pretrained model or model identifier from huggingface.co/models\"}\n )\n config_name: Optional[str] = field(\n default=None, metadata={\"help\": \"Pretrained config name or path if not the same as model_name\"}\n )\n tokenizer_name: Optional[str] = field(\n default=None, metadata={\"help\": \"Pretrained tokenizer name or path if not the same as model_name\"}\n )\n cache_dir: Optional[str] = field(\n default=None,\n metadata={\"help\": \"Where to store the pretrained models downloaded from huggingface.co\"},\n )\n use_fast_tokenizer: bool = field(\n default=True,\n metadata={\"help\": \"Whether to use one of the fast tokenizer (backed by the tokenizers library) or not.\"},\n )\n model_revision: str = field(\n default=\"main\",\n metadata={\"help\": \"The specific model version to use (can be a branch name, tag name or commit id).\"},\n )\n use_auth_token: bool = field(\n default=False,\n metadata={\n \"help\": \"Will use the token generated when running `transformers-cli login` (necessary to use this script \"\n \"with private models).\"\n },\n )\n\n\n@dataclass\nclass DataTrainingArguments:\n \"\"\"\n Arguments pertaining to what data we are going to input our model for training and eval.\n \"\"\"\n\n source_lang: str = field(default=None, metadata={\"help\": \"Source language id for translation.\"})\n target_lang: str = field(default=None, metadata={\"help\": \"Target language id for translation.\"})\n\n dataset_name: Optional[str] = field(\n default=None, metadata={\"help\": \"The name of the dataset to use (via the datasets library).\"}\n )\n dataset_config_name: Optional[str] = field(\n default=None, metadata={\"help\": \"The configuration name of the dataset to use (via the datasets library).\"}\n )\n train_file: Optional[str] = field(default=None, metadata={\"help\": \"The input training data file (a jsonlines).\"})\n validation_file: Optional[str] = field(\n default=None,\n metadata={\n \"help\": \"An optional input evaluation data file to evaluate the metrics (sacreblue) on \"\n \"a jsonlines file.\"\n },\n )\n test_file: Optional[str] = field(\n default=None,\n metadata={\n \"help\": \"An optional input test data file to evaluate the metrics (sacreblue) on \" \"a jsonlines file.\"\n },\n )\n overwrite_cache: bool = field(\n default=False, metadata={\"help\": \"Overwrite the cached training and evaluation sets\"}\n )\n preprocessing_num_workers: Optional[int] = field(\n default=None,\n metadata={\"help\": \"The number of processes to use for the preprocessing.\"},\n )\n max_source_length: Optional[int] = field(\n default=1024,\n metadata={\n \"help\": \"The maximum total input sequence length after tokenization. Sequences longer \"\n \"than this will be truncated, sequences shorter will be padded.\"\n },\n )\n max_target_length: Optional[int] = field(\n default=128,\n metadata={\n \"help\": \"The maximum total sequence length for target text after tokenization. Sequences longer \"\n \"than this will be truncated, sequences shorter will be padded.\"\n },\n )\n val_max_target_length: Optional[int] = field(\n default=None,\n metadata={\n \"help\": \"The maximum total sequence length for validation target text after tokenization. Sequences longer \"\n \"than this will be truncated, sequences shorter will be padded. Will default to `max_target_length`.\"\n \"This argument is also used to override the ``max_length`` param of ``model.generate``, which is used \"\n \"during ``evaluate`` and ``predict``.\"\n },\n )\n pad_to_max_length: bool = field(\n default=False,\n metadata={\n \"help\": \"Whether to pad all samples to model maximum sentence length. \"\n \"If False, will pad the samples dynamically when batching to the maximum length in the batch. More \"\n \"efficient on GPU but very bad for TPU.\"\n },\n )\n max_train_samples: Optional[int] = field(\n default=None,\n metadata={\n \"help\": \"For debugging purposes or quicker training, truncate the number of training examples to this \"\n \"value if set.\"\n },\n )\n max_eval_samples: Optional[int] = field(\n default=None,\n metadata={\n \"help\": \"For debugging purposes or quicker training, truncate the number of evaluation examples to this \"\n \"value if set.\"\n },\n )\n max_predict_samples: Optional[int] = field(\n default=None,\n metadata={\n \"help\": \"For debugging purposes or quicker training, truncate the number of prediction examples to this \"\n \"value if set.\"\n },\n )\n num_beams: Optional[int] = field(\n default=None,\n metadata={\n \"help\": \"Number of beams to use for evaluation. This argument will be passed to ``model.generate``, \"\n \"which is used during ``evaluate`` and ``predict``.\"\n },\n )\n ignore_pad_token_for_loss: bool = field(\n default=True,\n metadata={\n \"help\": \"Whether to ignore the tokens corresponding to padded labels in the loss computation or not.\"\n },\n )\n source_prefix: Optional[str] = field(\n default=None, metadata={\"help\": \"A prefix to add before every source text (useful for T5 models).\"}\n )\n forced_bos_token: Optional[str] = field(\n default=None,\n metadata={\n \"help\": \"The token to force as the first generated token after the :obj:`decoder_start_token_id`.\"\n \"Useful for multilingual models like :doc:`mBART <../model_doc/mbart>` where the first generated token \"\n \"needs to be the target language token.(Usually it is the target language token)\"\n },\n )\n\n def __post_init__(self):\n if self.dataset_name is None and self.train_file is None and self.validation_file is None:\n raise ValueError(\"Need either a dataset name or a training/validation file.\")\n elif self.source_lang is None or self.target_lang is None:\n raise ValueError(\"Need to specify the source language and the target language.\")\n\n # accepting both json and jsonl file extensions, as\n # many jsonlines files actually have a .json extension\n valid_extensions = [\"json\", \"jsonl\"]\n\n if self.train_file is not None:\n extension = self.train_file.split(\".\")[-1]\n assert extension in valid_extensions, \"`train_file` should be a jsonlines file.\"\n if self.validation_file is not None:\n extension = self.validation_file.split(\".\")[-1]\n assert extension in valid_extensions, \"`validation_file` should be a jsonlines file.\"\n if self.val_max_target_length is None:\n self.val_max_target_length = self.max_target_length\n\n\ndef main():\n # See all possible arguments in src/transformers/training_args.py\n # or by passing the --help flag to this script.\n # We now keep distinct sets of args, for a cleaner separation of concerns.\n\n parser = HfArgumentParser((ModelArguments, DataTrainingArguments, Seq2SeqTrainingArguments))\n if len(sys.argv) == 2 and sys.argv[1].endswith(\".json\"):\n # If we pass only one argument to the script and it's the path to a json file,\n # let's parse it to get our arguments.\n model_args, data_args, training_args = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1]))\n else:\n model_args, data_args, training_args = parser.parse_args_into_dataclasses()\n\n # Setup logging\n logging.basicConfig(\n format=\"%(asctime)s - %(levelname)s - %(name)s - %(message)s\",\n datefmt=\"%m/%d/%Y %H:%M:%S\",\n handlers=[logging.StreamHandler(sys.stdout)],\n )\n\n log_level = training_args.get_process_log_level()\n logger.setLevel(log_level)\n datasets.utils.logging.set_verbosity(log_level)\n transformers.utils.logging.set_verbosity(log_level)\n transformers.utils.logging.enable_default_handler()\n transformers.utils.logging.enable_explicit_format()\n\n # Log on each process the small summary:\n logger.warning(\n f\"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}\"\n + f\"distributed training: {bool(training_args.local_rank != -1)}, 16-bits training: {training_args.fp16}\"\n )\n logger.info(f\"Training/evaluation parameters {training_args}\")\n\n if data_args.source_prefix is None and model_args.model_name_or_path in [\n \"t5-small\",\n \"t5-base\",\n \"t5-large\",\n \"t5-3b\",\n \"t5-11b\",\n ]:\n logger.warning(\n \"You're running a t5 model but didn't provide a source prefix, which is expected, e.g. with \"\n \"`--source_prefix 'translate English to German: ' `\"\n )\n\n # Detecting last checkpoint.\n last_checkpoint = None\n if os.path.isdir(training_args.output_dir) and training_args.do_train and not training_args.overwrite_output_dir:\n last_checkpoint = get_last_checkpoint(training_args.output_dir)\n if last_checkpoint is None and len(os.listdir(training_args.output_dir)) > 0:\n raise ValueError(\n f\"Output directory ({training_args.output_dir}) already exists and is not empty. \"\n \"Use --overwrite_output_dir to overcome.\"\n )\n elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:\n logger.info(\n f\"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change \"\n \"the `--output_dir` or add `--overwrite_output_dir` to train from scratch.\"\n )\n\n # Set seed before initializing model.\n set_seed(training_args.seed)\n\n # Get the datasets: you can either provide your own JSON training and evaluation files (see below)\n # or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/\n # (the dataset will be downloaded automatically from the datasets Hub).\n #\n # For translation, only JSON files are supported, with one field named \"translation\" containing two keys for the\n # source and target languages (unless you adapt what follows).\n #\n # In distributed training, the load_dataset function guarantee that only one local process can concurrently\n # download the dataset.\n if data_args.dataset_name is not None:\n # Downloading and loading a dataset from the hub.\n raw_datasets = load_dataset(\n data_args.dataset_name,\n data_args.dataset_config_name,\n cache_dir=model_args.cache_dir,\n use_auth_token=True if model_args.use_auth_token else None,\n )\n else:\n data_files = {}\n if data_args.train_file is not None:\n data_files[\"train\"] = data_args.train_file\n extension = data_args.train_file.split(\".\")[-1]\n if data_args.validation_file is not None:\n data_files[\"validation\"] = data_args.validation_file\n extension = data_args.validation_file.split(\".\")[-1]\n if data_args.test_file is not None:\n data_files[\"test\"] = data_args.test_file\n extension = data_args.test_file.split(\".\")[-1]\n raw_datasets = load_dataset(\n extension,\n data_files=data_files,\n cache_dir=model_args.cache_dir,\n use_auth_token=True if model_args.use_auth_token else None,\n )\n # See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at\n # https://huggingface.co/docs/datasets/loading_datasets.html.\n\n # Load pretrained model and tokenizer\n #\n # Distributed training:\n # The .from_pretrained methods guarantee that only one local process can concurrently\n # download model & vocab.\n config = AutoConfig.from_pretrained(\n model_args.config_name if model_args.config_name else model_args.model_name_or_path,\n cache_dir=model_args.cache_dir,\n revision=model_args.model_revision,\n use_auth_token=True if model_args.use_auth_token else None,\n )\n tokenizer = AutoTokenizer.from_pretrained(\n model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path,\n cache_dir=model_args.cache_dir,\n use_fast=model_args.use_fast_tokenizer,\n revision=model_args.model_revision,\n use_auth_token=True if model_args.use_auth_token else None,\n )\n model = AutoModelForSeq2SeqLM.from_pretrained(\n model_args.model_name_or_path,\n from_tf=bool(\".ckpt\" in model_args.model_name_or_path),\n config=config,\n cache_dir=model_args.cache_dir,\n revision=model_args.model_revision,\n use_auth_token=True if model_args.use_auth_token else None,\n )\n\n model.resize_token_embeddings(len(tokenizer))\n\n # Set decoder_start_token_id\n if model.config.decoder_start_token_id is None and isinstance(tokenizer, (MBartTokenizer, MBartTokenizerFast)):\n if isinstance(tokenizer, MBartTokenizer):\n model.config.decoder_start_token_id = tokenizer.lang_code_to_id[data_args.target_lang]\n else:\n model.config.decoder_start_token_id = tokenizer.convert_tokens_to_ids(data_args.target_lang)\n\n if model.config.decoder_start_token_id is None:\n raise ValueError(\"Make sure that `config.decoder_start_token_id` is correctly defined\")\n\n prefix = data_args.source_prefix if data_args.source_prefix is not None else \"\"\n\n # Preprocessing the datasets.\n # We need to tokenize inputs and targets.\n if training_args.do_train:\n column_names = raw_datasets[\"train\"].column_names\n elif training_args.do_eval:\n column_names = raw_datasets[\"validation\"].column_names\n elif training_args.do_predict:\n column_names = raw_datasets[\"test\"].column_names\n else:\n logger.info(\"There is nothing to do. Please pass `do_train`, `do_eval` and/or `do_predict`.\")\n return\n\n # For translation we set the codes of our source and target languages (only useful for mBART, the others will\n # ignore those attributes).\n if isinstance(tokenizer, tuple(MULTILINGUAL_TOKENIZERS)):\n assert data_args.target_lang is not None and data_args.source_lang is not None, (\n f\"{tokenizer.__class__.__name__} is a multilingual tokenizer which requires --source_lang and \"\n \"--target_lang arguments.\"\n )\n\n tokenizer.src_lang = data_args.source_lang\n tokenizer.tgt_lang = data_args.target_lang\n\n # For multilingual translation models like mBART-50 and M2M100 we need to force the target language token\n # as the first generated token. We ask the user to explicitly provide this as --forced_bos_token argument.\n forced_bos_token_id = (\n tokenizer.lang_code_to_id[data_args.forced_bos_token] if data_args.forced_bos_token is not None else None\n )\n model.config.forced_bos_token_id = forced_bos_token_id\n\n # Get the language codes for input/target.\n source_lang = data_args.source_lang.split(\"_\")[0]\n target_lang = data_args.target_lang.split(\"_\")[0]\n\n # Temporarily set max_target_length for training.\n max_target_length = data_args.max_target_length\n padding = \"max_length\" if data_args.pad_to_max_length else False\n\n if training_args.label_smoothing_factor > 0 and not hasattr(model, \"prepare_decoder_input_ids_from_labels\"):\n logger.warning(\n \"label_smoothing is enabled but the `prepare_decoder_input_ids_from_labels` method is not defined for\"\n f\"`{model.__class__.__name__}`. This will lead to loss being calculated twice and will take up more memory\"\n )\n\n def preprocess_function(examples):\n inputs = [ex[source_lang] for ex in examples[\"translation\"]]\n targets = [ex[target_lang] for ex in examples[\"translation\"]]\n inputs = [prefix + inp for inp in inputs]\n model_inputs = tokenizer(inputs, max_length=data_args.max_source_length, padding=padding, truncation=True)\n\n # Setup the tokenizer for targets\n with tokenizer.as_target_tokenizer():\n labels = tokenizer(targets, max_length=max_target_length, padding=padding, truncation=True)\n\n # If we are padding here, replace all tokenizer.pad_token_id in the labels by -100 when we want to ignore\n # padding in the loss.\n if padding == \"max_length\" and data_args.ignore_pad_token_for_loss:\n labels[\"input_ids\"] = [\n [(l if l != tokenizer.pad_token_id else -100) for l in label] for label in labels[\"input_ids\"]\n ]\n\n model_inputs[\"labels\"] = labels[\"input_ids\"]\n return model_inputs\n\n if training_args.do_train:\n if \"train\" not in raw_datasets:\n raise ValueError(\"--do_train requires a train dataset\")\n train_dataset = raw_datasets[\"train\"]\n if data_args.max_train_samples is not None:\n max_train_samples = min(len(train_dataset), data_args.max_train_samples)\n train_dataset = train_dataset.select(range(max_train_samples))\n with training_args.main_process_first(desc=\"train dataset map pre-processing\"):\n train_dataset = train_dataset.map(\n preprocess_function,\n batched=True,\n num_proc=data_args.preprocessing_num_workers,\n remove_columns=column_names,\n load_from_cache_file=not data_args.overwrite_cache,\n desc=\"Running tokenizer on train dataset\",\n )\n\n if training_args.do_eval:\n max_target_length = data_args.val_max_target_length\n if \"validation\" not in raw_datasets:\n raise ValueError(\"--do_eval requires a validation dataset\")\n eval_dataset = raw_datasets[\"validation\"]\n if data_args.max_eval_samples is not None:\n max_eval_samples = min(len(eval_dataset), data_args.max_eval_samples)\n eval_dataset = eval_dataset.select(range(max_eval_samples))\n with training_args.main_process_first(desc=\"validation dataset map pre-processing\"):\n eval_dataset = eval_dataset.map(\n preprocess_function,\n batched=True,\n num_proc=data_args.preprocessing_num_workers,\n remove_columns=column_names,\n load_from_cache_file=not data_args.overwrite_cache,\n desc=\"Running tokenizer on validation dataset\",\n )\n\n if training_args.do_predict:\n max_target_length = data_args.val_max_target_length\n if \"test\" not in raw_datasets:\n raise ValueError(\"--do_predict requires a test dataset\")\n predict_dataset = raw_datasets[\"test\"]\n if data_args.max_predict_samples is not None:\n max_predict_samples = min(len(predict_dataset), data_args.max_predict_samples)\n predict_dataset = predict_dataset.select(range(max_predict_samples))\n with training_args.main_process_first(desc=\"prediction dataset map pre-processing\"):\n predict_dataset = predict_dataset.map(\n preprocess_function,\n batched=True,\n num_proc=data_args.preprocessing_num_workers,\n remove_columns=column_names,\n load_from_cache_file=not data_args.overwrite_cache,\n desc=\"Running tokenizer on prediction dataset\",\n )\n\n # Data collator\n label_pad_token_id = -100 if data_args.ignore_pad_token_for_loss else tokenizer.pad_token_id\n if data_args.pad_to_max_length:\n data_collator = default_data_collator\n else:\n data_collator = DataCollatorForSeq2Seq(\n tokenizer,\n model=model,\n label_pad_token_id=label_pad_token_id,\n pad_to_multiple_of=8 if training_args.fp16 else None,\n )\n\n # Metric\n metric = load_metric(\"sacrebleu\")\n\n def postprocess_text(preds, labels):\n preds = [pred.strip() for pred in preds]\n labels = [[label.strip()] for label in labels]\n\n return preds, labels\n\n def compute_metrics(eval_preds):\n preds, labels = eval_preds\n if isinstance(preds, tuple):\n preds = preds[0]\n decoded_preds = tokenizer.batch_decode(preds, skip_special_tokens=True)\n if data_args.ignore_pad_token_for_loss:\n # Replace -100 in the labels as we can't decode them.\n labels = np.where(labels != -100, labels, tokenizer.pad_token_id)\n decoded_labels = tokenizer.batch_decode(labels, skip_special_tokens=True)\n\n # Some simple post-processing\n decoded_preds, decoded_labels = postprocess_text(decoded_preds, decoded_labels)\n\n result = metric.compute(predictions=decoded_preds, references=decoded_labels)\n result = {\"bleu\": result[\"score\"]}\n\n prediction_lens = [np.count_nonzero(pred != tokenizer.pad_token_id) for pred in preds]\n result[\"gen_len\"] = np.mean(prediction_lens)\n result = {k: round(v, 4) for k, v in result.items()}\n return result\n\n # Initialize our Trainer\n trainer = Seq2SeqTrainer(\n model=model,\n args=training_args,\n train_dataset=train_dataset if training_args.do_train else None,\n eval_dataset=eval_dataset if training_args.do_eval else None,\n tokenizer=tokenizer,\n data_collator=data_collator,\n compute_metrics=compute_metrics if training_args.predict_with_generate else None,\n )\n\n # Training\n if training_args.do_train:\n checkpoint = None\n if training_args.resume_from_checkpoint is not None:\n checkpoint = training_args.resume_from_checkpoint\n elif last_checkpoint is not None:\n checkpoint = last_checkpoint\n train_result = trainer.train(resume_from_checkpoint=checkpoint)\n trainer.save_model() # Saves the tokenizer too for easy upload\n\n metrics = train_result.metrics\n max_train_samples = (\n data_args.max_train_samples if data_args.max_train_samples is not None else len(train_dataset)\n )\n metrics[\"train_samples\"] = min(max_train_samples, len(train_dataset))\n\n trainer.log_metrics(\"train\", metrics)\n trainer.save_metrics(\"train\", metrics)\n trainer.save_state()\n\n # Evaluation\n results = {}\n max_length = (\n training_args.generation_max_length\n if training_args.generation_max_length is not None\n else data_args.val_max_target_length\n )\n num_beams = data_args.num_beams if data_args.num_beams is not None else training_args.generation_num_beams\n if training_args.do_eval:\n logger.info(\"*** Evaluate ***\")\n\n metrics = trainer.evaluate(max_length=max_length, num_beams=num_beams, metric_key_prefix=\"eval\")\n max_eval_samples = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(eval_dataset)\n metrics[\"eval_samples\"] = min(max_eval_samples, len(eval_dataset))\n\n trainer.log_metrics(\"eval\", metrics)\n trainer.save_metrics(\"eval\", metrics)\n\n if training_args.do_predict:\n logger.info(\"*** Predict ***\")\n\n predict_results = trainer.predict(\n predict_dataset, metric_key_prefix=\"predict\", max_length=max_length, num_beams=num_beams\n )\n metrics = predict_results.metrics\n max_predict_samples = (\n data_args.max_predict_samples if data_args.max_predict_samples is not None else len(predict_dataset)\n )\n metrics[\"predict_samples\"] = min(max_predict_samples, len(predict_dataset))\n\n trainer.log_metrics(\"predict\", metrics)\n trainer.save_metrics(\"predict\", metrics)\n\n if trainer.is_world_process_zero():\n if training_args.predict_with_generate:\n predictions = tokenizer.batch_decode(\n predict_results.predictions, skip_special_tokens=True, clean_up_tokenization_spaces=True\n )\n predictions = [pred.strip() for pred in predictions]\n output_prediction_file = os.path.join(training_args.output_dir, \"generated_predictions.txt\")\n with open(output_prediction_file, \"w\", encoding=\"utf-8\") as writer:\n writer.write(\"\\n\".join(predictions))\n\n kwargs = {\"finetuned_from\": model_args.model_name_or_path, \"tasks\": \"translation\"}\n if data_args.dataset_name is not None:\n kwargs[\"dataset_tags\"] = data_args.dataset_name\n if data_args.dataset_config_name is not None:\n kwargs[\"dataset_args\"] = data_args.dataset_config_name\n kwargs[\"dataset\"] = f\"{data_args.dataset_name} {data_args.dataset_config_name}\"\n else:\n kwargs[\"dataset\"] = data_args.dataset_name\n\n languages = [l for l in [data_args.source_lang, data_args.target_lang] if l is not None]\n if len(languages) > 0:\n kwargs[\"language\"] = languages\n\n if training_args.push_to_hub:\n trainer.push_to_hub(**kwargs)\n else:\n trainer.create_model_card(**kwargs)\n\n return results\n\n\ndef _mp_fn(index):\n # For xla_spawn (TPUs)\n main()\n\n\nif __name__ == \"__main__\":\n main()\n" ]
[ [ "numpy.mean", "numpy.where", "numpy.count_nonzero" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
kimbyeolhee/ML-DL-Algorithms-Study
[ "c8fb919083707dc43d400da35a4d176cc3af56fe", "c8fb919083707dc43d400da35a4d176cc3af56fe" ]
[ "ML/Naive Bayes/naive bayes implementaion/dataset.py", "DL/CNN/mnist_classification/models/fc_model.py" ]
[ "import pandas as pd\r\n\r\ndef load_data():\r\n training_sentences = [[], []]\r\n\r\n # 데이터 로드 및 Na값 제거\r\n df = pd.read_csv('./ratings.txt', header=0, delimiter='\\t')\r\n df = df.dropna(axis=0)\r\n df.reset_index(drop=True, inplace=True)\r\n\r\n for i in range(len(df)):\r\n if df['label'][i] == 0:\r\n training_sentences[0].append(df['document'][i])\r\n else:\r\n training_sentences[1].append(df['document'][i])\r\n return [' '.join(training_sentences[0]), ' '.join(training_sentences[1])]\r\n\r\nif __name__ == '__main__':\r\n temp = load_data()\r\n\r\n print(temp[0][:10])\r\n print(temp[1][:10])", "import torch\r\nimport torch.nn as nn\r\n\r\nclass FullyConnectedClassifier(nn.Module):\r\n\r\n def __init__(self, input_size, output_size):\r\n self.input_size = input_size\r\n self.output_size = output_size\r\n\r\n super().__init__()\r\n\r\n self.layers = nn.Sequential(\r\n nn.Linear(input_size, 500),\r\n nn.LeakyReLU(),\r\n nn.BatchNorm1d(500),\r\n nn.Linear(500, 400),\r\n nn.LeakyReLU(),\r\n nn.BatchNorm1d(400),\r\n nn.Linear(400, 300),\r\n nn.LeakyReLU(),\r\n nn.BatchNorm1d(300),\r\n nn.Linear(300, 200),\r\n nn.LeakyReLU(),\r\n nn.BatchNorm1d(200),\r\n nn.Linear(200, 100),\r\n nn.LeakyReLU(),\r\n nn.BatchNorm1d(100),\r\n nn.Linear(100, 50),\r\n nn.LeakyReLU(),\r\n nn.BatchNorm1d(50),\r\n nn.Linear(50, output_size),\r\n nn.LogSoftmax(dim = -1),\r\n )\r\n\r\n def forwrad(self, x):\r\n y = self.layers(x)\r\n\r\n return y" ]
[ [ "pandas.read_csv" ], [ "torch.nn.Linear", "torch.nn.LogSoftmax", "torch.nn.BatchNorm1d", "torch.nn.LeakyReLU" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "2.0", "1.4", "1.1", "1.5", "1.2", "1.3" ], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
DeepanshS/csdmpy
[ "bd4e138b10694491113b10177a89305697f1752c", "bd4e138b10694491113b10177a89305697f1752c" ]
[ "tests/numpy_wrapper/dimension_reduction_test.py", "csdmpy/helper_functions.py" ]
[ "# -*- coding: utf-8 -*-\n\"\"\"Test for the csdm object\n 1) sum, mean, var, std, prod.\n\"\"\"\nimport numpy as np\nimport pytest\n\nimport csdmpy as cp\n\ndata = np.random.rand(50 * 15).reshape(15, 5, 10)\na = cp.new()\n\ndim = [\n {\"type\": \"linear\", \"count\": 10, \"increment\": \"1\"},\n {\"type\": \"linear\", \"count\": 5, \"increment\": \"1\"},\n {\"type\": \"linear\", \"count\": 15, \"increment\": \"1\"},\n]\ndv = {\"type\": \"internal\", \"components\": [data.ravel()], \"quantity_type\": \"scalar\"}\n\na.dimensions += dim\na.add_dependent_variable(dv)\n\n\ndef test_exceptions():\n error = r\"Index/Indices are expected as integer\"\n with pytest.raises(TypeError, match=error):\n a.sum(axis=0.2)\n\n error = r\"Index/Indices are expected as integer\"\n with pytest.raises(TypeError, match=error):\n a.sum(axis=(1, 0.2))\n\n error = r\"The `index` 4 cannot be greater than the total number of dimensions - 1\"\n with pytest.raises(IndexError, match=error):\n a.sum(axis=4)\n\n\ndef test_sum():\n dimensions = [0, 1, 2]\n i = [[1, 2], [0, 2], [0, 1]]\n assert np.allclose(\n np.sum(a=a, axis=0).dependent_variables[0].components, data.sum(axis=-1)\n )\n assert np.allclose(\n np.sum(a, 0).dependent_variables[0].components, data.sum(axis=-1)\n )\n assert np.allclose(\n np.sum(a, 1).dependent_variables[0].components, data.sum(axis=-2)\n )\n for i_, dimension in zip(i, dimensions):\n b = a.sum(axis=dimension)\n components = b.dependent_variables[0].components[0]\n assert np.allclose(components, data.sum(axis=-dimension - 1))\n assert b.dimensions[0] == a.dimensions[i_[0]]\n assert b.dimensions[1] == a.dimensions[i_[1]]\n\n dimensions = [(0, 1), [0, 2], (1, 2)]\n i = [2, 1, 0]\n for i_, dimension in zip(i, dimensions):\n b = a.sum(axis=dimension)\n components = b.dependent_variables[0].components[0]\n dim_ = tuple(-i - 1 for i in dimension)\n assert np.allclose(components, data.sum(axis=dim_))\n assert b.dimensions[0] == a.dimensions[i_]\n\n b = a.sum()\n assert np.allclose(b, data.sum())\n\n assert np.allclose(a.sum(-1).dependent_variables[0].components, data.sum(axis=0))\n\n\ndef test_mean():\n dimensions = [0, 1, 2]\n i = [[1, 2], [0, 2], [0, 1]]\n for i_, dimension in zip(i, dimensions):\n b = a.mean(axis=dimension)\n components = b.dependent_variables[0].components[0]\n assert np.allclose(components, data.mean(axis=-dimension - 1))\n assert b.dimensions[0] == a.dimensions[i_[0]]\n assert b.dimensions[1] == a.dimensions[i_[1]]\n\n dimensions = [(0, 1), [0, 2], (1, 2)]\n i = [2, 1, 0]\n for i_, dimension in zip(i, dimensions):\n b = a.mean(axis=dimension)\n components = b.dependent_variables[0].components[0]\n dim_ = tuple(-i - 1 for i in dimension)\n assert np.allclose(components, data.mean(axis=dim_))\n assert b.dimensions[0] == a.dimensions[i_]\n\n b = a.mean()\n assert np.allclose(b, data.mean())\n\n\ndef test_var():\n dimensions = [0, 1, 2]\n i = [[1, 2], [0, 2], [0, 1]]\n for i_, dimension in zip(i, dimensions):\n b = a.var(axis=dimension)\n components = b.dependent_variables[0].components[0]\n assert np.allclose(components, data.var(axis=-dimension - 1))\n assert b.dimensions[0] == a.dimensions[i_[0]]\n assert b.dimensions[1] == a.dimensions[i_[1]]\n\n dimensions = [(0, 1), [0, 2], (1, 2)]\n i = [2, 1, 0]\n for i_, dimension in zip(i, dimensions):\n b = a.var(axis=dimension)\n components = b.dependent_variables[0].components[0]\n dim_ = tuple(-i - 1 for i in dimension)\n assert np.allclose(components, data.var(axis=dim_))\n assert b.dimensions[0] == a.dimensions[i_]\n\n b = a.var()\n assert np.allclose(b, data.var())\n\n\ndef test_std():\n dimensions = [0, 1, 2]\n i = [[1, 2], [0, 2], [0, 1]]\n for i_, dimension in zip(i, dimensions):\n b = a.std(axis=dimension)\n components = b.dependent_variables[0].components[0]\n assert np.allclose(components, data.std(axis=-dimension - 1))\n assert b.dimensions[0] == a.dimensions[i_[0]]\n assert b.dimensions[1] == a.dimensions[i_[1]]\n\n dimensions = [(0, 1), [0, 2], (1, 2)]\n i = [2, 1, 0]\n for i_, dimension in zip(i, dimensions):\n b = a.std(axis=dimension)\n components = b.dependent_variables[0].components[0]\n dim_ = tuple(-i - 1 for i in dimension)\n assert np.allclose(components, data.std(axis=dim_))\n assert b.dimensions[0] == a.dimensions[i_]\n\n b = a.std()\n assert np.allclose(b, data.std())\n\n\ndef test_prod():\n dimensions = [0, 1, 2]\n i = [[1, 2], [0, 2], [0, 1]]\n for i_, dimension in zip(i, dimensions):\n b = a.prod(axis=dimension)\n components = b.dependent_variables[0].components[0]\n assert np.allclose(components, data.prod(axis=-dimension - 1))\n assert b.dimensions[0] == a.dimensions[i_[0]]\n assert b.dimensions[1] == a.dimensions[i_[1]]\n\n dimensions = [(0, 1), [0, 2], (1, 2)]\n i = [2, 1, 0]\n for i_, dimension in zip(i, dimensions):\n b = a.prod(axis=dimension)\n components = b.dependent_variables[0].components[0]\n dim_ = tuple(-i - 1 for i in dimension)\n assert np.allclose(components, data.prod(axis=dim_))\n assert b.dimensions[0] == a.dimensions[i_]\n\n b = a.prod()\n assert np.allclose(b, data.prod())\n", "# -*- coding: utf-8 -*-\n\"\"\"Helper functions.\"\"\"\nfrom copy import deepcopy\nfrom warnings import warn\n\nimport matplotlib.projections as proj\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom matplotlib.image import NonUniformImage\n\n\n__author__ = \"Deepansh J. Srivastava\"\n__email__ = \"[email protected]\"\n\nscalar = [\"scalar\", \"vector_1\", \"pixel_1\", \"matrix_1_1\", \"symmetric_matrix_1\"]\n\n\ndef _get_label_from_dv(dv, i):\n \"\"\"Return label along with the unit of the dependent variable\n\n Args:\n dv: DependentVariable object.\n i: integer counter.\n \"\"\"\n name, unit = dv.name, dv.unit\n name = name if name != \"\" else str(i)\n label = f\"{name} / ({unit})\" if unit != \"\" else name\n return label\n\n\nclass CSDMAxes(plt.Axes):\n \"\"\"A custom CSDM data plot axes.\"\"\"\n\n name = \"csdm\"\n\n def plot(self, csdm, *args, **kwargs):\n \"\"\"Generate a figure axes using the `plot` method from the matplotlib library.\n\n Apply to all 1D datasets with single-component dependent-variables. For\n multiple dependent variables, the data from individual dependent-variables is\n plotted on the same figure.\n\n Args:\n csdm: A CSDM object of a one-dimensional dataset.\n kwargs: Additional keyword arguments for the matplotlib plot() method.\n\n Example\n -------\n\n >>> ax = plt.subplot(projection='csdm') # doctest: +SKIP\n >>> ax.plot(csdm_object) # doctest: +SKIP\n >>> plt.show() # doctest: +SKIP\n \"\"\"\n if csdm.__class__.__name__ != \"CSDM\":\n return super().plot(csdm, *args, **kwargs)\n\n return self._call_1D(csdm, \"plot\", *args, **kwargs)\n\n def scatter(self, csdm, *args, **kwargs):\n \"\"\"Generate a figure axes using the `scatter` method from the matplotlib\n library.\n\n Apply to all 1D datasets with single-component dependent-variables. For\n multiple dependent variables, the data from individual dependent-variables is\n plotted on the same figure.\n\n Args:\n csdm: A CSDM object of a one-dimensional dataset.\n kwargs: Additional keyword arguments for the matplotlib plot() method.\n\n Example\n -------\n\n >>> ax = plt.subplot(projection='csdm') # doctest: +SKIP\n >>> ax.scatter(csdm_object) # doctest: +SKIP\n >>> plt.show() # doctest: +SKIP\n \"\"\"\n if csdm.__class__.__name__ != \"CSDM\":\n return super().scatter(csdm, *args, **kwargs)\n\n return self._call_1D(csdm, \"scatter\", *args, **kwargs)\n\n def imshow(self, csdm, origin=\"lower\", *args, **kwargs):\n \"\"\"Generate a figure axes using the `imshow` method from the matplotlib library.\n\n Apply to all 2D datasets with either single-component (scalar),\n three-components (pixel_3), or four-components (pixel_4) dependent-variables.\n For single-component (scalar) dependent-variable, a colormap image is produced.\n For three-components (pixel_3) dependent-variable, an RGB image is produced.\n For four-components (pixel_4) dependent-variable, an RGBA image is produced.\n\n For multiple dependent variables, the data from individual dependent-variables\n is plotted on the same figure.\n\n Args:\n csdm: A CSDM object of a two-dimensional dataset with scalar, pixel_3, or\n pixel_4 quantity_type dependent variable.\n origin: The matplotlib `origin` argument. In matplotlib, the default is\n 'upper'. In csdmpy, however, the default to 'lower'.\n kwargs: Additional keyword arguments for the matplotlib imshow() method.\n\n Example\n -------\n\n >>> ax = plt.subplot(projection='csdm') # doctest: +SKIP\n >>> ax.imshow(csdm_object) # doctest: +SKIP\n >>> plt.show() # doctest: +SKIP\n\n \"\"\"\n if csdm.__class__.__name__ != \"CSDM\":\n return super().imshow(csdm, *args, **kwargs)\n\n x = csdm.dimensions\n\n if x[0].type == \"linear\" and x[1].type == \"linear\":\n return self._call_uniform_2D_image(csdm, origin=origin, *args, **kwargs)\n\n def contour(self, csdm, *args, **kwargs):\n \"\"\"Generate a figure axes using the `contour` method from the matplotlib\n library.\n\n Apply to all 2D datasets with a single-component (scalar) dependent-variables.\n For multiple dependent variables, the data from individual dependent-variables\n is plotted on the same figure.\n\n Args:\n csdm: A CSDM object of a two-dimensional dataset with scalar dependent\n variable.\n kwargs: Additional keyword arguments for the matplotlib contour() method.\n\n Example\n -------\n\n >>> ax = plt.subplot(projection='csdm') # doctest: +SKIP\n >>> ax.contour(csdm_object) # doctest: +SKIP\n >>> plt.show() # doctest: +SKIP\n\n \"\"\"\n if csdm.__class__.__name__ != \"CSDM\":\n return super().contour(csdm, *args, **kwargs)\n\n x = csdm.dimensions\n\n if x[0].type == \"linear\" and x[1].type == \"linear\":\n return self._call_uniform_2D_contour(csdm, \"contour\", *args, **kwargs)\n\n def contourf(self, csdm, *args, **kwargs):\n \"\"\"Generate a figure axes using the `contourf` method from the matplotlib\n library.\n\n Apply to all 2D datasets with a single-component (scalar) dependent-variables.\n For multiple dependent variables, the data from individual dependent-variables\n is plotted on the same figure.\n\n Args:\n csdm: A CSDM object of a two-dimensional dataset with scalar dependent\n variable.\n kwargs: Additional keyword arguments for the matplotlib contourf() method.\n\n Example\n -------\n\n >>> ax = plt.subplot(projection='csdm') # doctest: +SKIP\n >>> ax.contourf(csdm_object) # doctest: +SKIP\n >>> plt.show() # doctest: +SKIP\n \"\"\"\n if csdm.__class__.__name__ != \"CSDM\":\n return super().contour(csdm, *args, **kwargs)\n\n x = csdm.dimensions\n\n if x[0].type == \"linear\" and x[1].type == \"linear\":\n return self._call_uniform_2D_contour(csdm, \"contourf\", *args, **kwargs)\n\n def _call_1D(self, csdm, fn, *args, **kwargs):\n _check_1D_dataset(csdm)\n x = csdm.dimensions\n z = csdm.split()\n one = True if len(z) == 1 else False\n legend = False\n for i, item in enumerate(z):\n x_, y_ = item.to_list()\n # dv will always be at index 0 because we called the object.split() before.\n dv = item.dependent_variables[0]\n\n kwargs_ = deepcopy(kwargs)\n # add a default label if not provided by the user.\n if \"label\" not in kwargs_.keys():\n kwargs_[\"label\"] = dv.name if one else _get_label_from_dv(dv, i)\n if kwargs_[\"label\"] != \"\":\n legend = True\n\n if fn == \"plot\":\n r_plt = super().plot(x_, y_, *args, **kwargs_)\n if fn == \"scatter\":\n r_plt = super().scatter(x_, y_, *args, **kwargs_)\n\n self.set_xlim(x[0].coordinates.value.min(), x[0].coordinates.value.max())\n self.set_xlabel(x[0].axis_label)\n\n ylabel = dv.axis_label[0] if one else \"dimensionless\"\n self.set_ylabel(ylabel)\n # self.grid(color=\"gray\", linestyle=\"--\", linewidth=0.5)\n\n if legend:\n self.legend()\n\n return r_plt\n\n def _call_uniform_2D_contour(self, csdm, fn, *args, **kwargs):\n _check_2D_scalar_dataset(csdm)\n kw_keys = kwargs.keys()\n\n # set extent\n x = csdm.dimensions\n x0, x1 = x[0].coordinates.value, x[1].coordinates.value\n\n # add cmap for multiple dependent variables.\n cmaps_bool = False\n if \"cmaps\" in kw_keys:\n cmaps_bool = True\n cmaps = kwargs.pop(\"cmaps\")\n\n one = True if len(csdm.dependent_variables) == 1 else False\n\n for i, dv in enumerate(csdm.dependent_variables):\n y = dv.components\n if dv.quantity_type == \"scalar\":\n if cmaps_bool:\n kwargs[\"cmap\"] = cmaps[i]\n\n if fn == \"contour\":\n r_plt = super().contour(x0, x1, y[0], *args, **kwargs)\n if fn == \"contourf\":\n r_plt = super().contourf(x0, x1, y[0], *args, **kwargs)\n\n self.set_xlim(x0.min(), x0.max())\n self.set_ylim(x1.min(), x1.max())\n self.set_xlabel(x[0].axis_label)\n self.set_ylabel(x[1].axis_label)\n if one:\n self.set_title(dv.name)\n return r_plt\n\n def _call_uniform_2D_image(self, csdm, *args, **kwargs):\n _check_2D_scalar_and_pixel_dataset(csdm)\n\n kw_keys = kwargs.keys()\n\n # set extent\n x = csdm.dimensions\n x0, x1 = x[0].coordinates.value, x[1].coordinates.value\n extent = [x0[0], x0[-1], x1[0], x1[-1]]\n if kwargs[\"origin\"] == \"upper\":\n extent = [x0[0], x0[-1], x1[-1], x1[0]]\n if \"extent\" not in kw_keys:\n kwargs[\"extent\"] = extent\n\n # add cmap for multiple dependent variables.\n cmaps_bool = False\n if \"cmaps\" in kw_keys:\n cmaps_bool = True\n cmaps = kwargs.pop(\"cmaps\")\n\n one = True if len(csdm.dependent_variables) == 1 else False\n\n for i, dv in enumerate(csdm.dependent_variables):\n y = dv.components\n if dv.quantity_type == \"scalar\":\n if cmaps_bool:\n kwargs[\"cmap\"] = cmaps[i]\n\n r_plt = super().imshow(y[0], *args, **kwargs)\n\n if dv.quantity_type == \"pixel_3\":\n r_plt = super().imshow(np.moveaxis(y.copy(), 0, -1), *args, **kwargs)\n\n if dv.quantity_type == \"pixel_4\":\n r_plt = super().imshow(np.moveaxis(y.copy(), 0, -1), *args, **kwargs)\n\n self.set_xlabel(x[0].axis_label)\n self.set_ylabel(x[1].axis_label)\n if one:\n self.set_title(dv.name)\n return r_plt\n\n\ntry:\n proj.register_projection(CSDMAxes)\nexcept NameError:\n pass\n\n\ndef _check_1D_dataset(csdm):\n x, y = csdm.dimensions, csdm.dependent_variables\n\n message = (\n \"The function requires a 1D dataset with single-component dependent \"\n \"variables. For multiple dependent-variables, the data from all the \"\n \"dependent variables are ploted on the same figure.\"\n )\n if len(x) != 1:\n raise Exception(message)\n for y_ in y:\n if len(y_.components) != 1:\n raise Exception(message)\n\n\ndef _check_2D_scalar_and_pixel_dataset(csdm):\n x, y = csdm.dimensions, csdm.dependent_variables\n\n message = (\n \"The function requires a 2D dataset with a single-component (scalar), \"\n \"three components (pixel_3), or four components (pixel_4) dependent \"\n \"variables. The pixel_3 produces an RGB image while pixel_4, a RGBA image.\"\n )\n if len(x) != 2:\n raise Exception(message)\n for y_ in y:\n if len(y_.components) not in [1, 3, 4]:\n raise Exception(message)\n\n\ndef _check_2D_scalar_dataset(csdm):\n x, y = csdm.dimensions, csdm.dependent_variables\n\n message = (\n \"The function requires a 2D dataset with a single-component (scalar), \"\n \"dependent variables.\"\n )\n if len(x) != 2:\n raise Exception(message)\n for y_ in y:\n if len(y_.components) != 1:\n raise Exception(message)\n\n\n# --------- cp plot functions ---------- #\n\n\ndef _preview(data, reverse_axis=None, range_=None, **kwargs):\n \"\"\"Quick display of the data.\"\"\"\n if reverse_axis is not None:\n kwargs[\"reverse_axis\"] = reverse_axis\n\n if range_ is None:\n range_ = [[None, None], [None, None]]\n\n x = data.dimensions\n y = data.dependent_variables\n y_len = len(y)\n y_grid = int(y_len / 2) + 1\n\n if len(x) == 0:\n raise NotImplementedError(\n \"Preview of zero dimensional datasets is not implemented.\"\n )\n\n if len(x) > 2:\n raise NotImplementedError(\n \"Preview of three or higher dimensional datasets \" \"is not implemented.\"\n )\n\n if np.any([x[i].type == \"labeled\" for i in range(len(x))]):\n raise NotImplementedError(\"Preview of labeled dimensions is not implemented.\")\n\n fig = plt.gcf()\n if y_len <= 2:\n ax = fig.subplots(y_grid)\n ax = [[ax]] if y_len == 1 else [ax]\n else:\n ax = fig.subplots(y_grid, 2)\n\n if len(x) == 1:\n one_d_plots(ax, x, y, range_, **kwargs)\n\n if len(x) == 2:\n two_d_plots(ax, x, y, range_, **kwargs)\n\n return fig\n\n\ndef one_d_plots(ax, x, y, range_, **kwargs):\n \"\"\"A collection of possible 1D plots.\"\"\"\n for i, y_item in enumerate(y):\n i0 = int(i / 2)\n j0 = int(i % 2)\n ax_ = ax[i0][j0]\n\n if y_item.quantity_type in scalar:\n oneD_scalar(x, y_item, ax_, range_, **kwargs)\n if \"vector\" in y_item.quantity_type:\n vector_plot(x, y_item, ax_, range_, **kwargs)\n # if \"audio\" in y_item.quantity_type:\n # audio(x, y, i, fig, ax, **kwargs)\n\n\ndef two_d_plots(ax, x, y, range_, **kwargs):\n \"\"\"A collection of possible 2D plots.\"\"\"\n for i, y_item in enumerate(y):\n i0 = int(i / 2)\n j0 = int(i % 2)\n ax_ = ax[i0][j0]\n\n if y_item.quantity_type == \"pixel_3\":\n warn(\"This method interprets the `pixel_3` dataset as an RGB image.\")\n RGB_image(x, y_item, ax_, range_, **kwargs)\n\n if y_item.quantity_type in scalar:\n twoD_scalar(x, y_item, ax_, range_, **kwargs)\n\n if \"vector\" in y_item.quantity_type:\n vector_plot(x, y_item, ax_, range_, **kwargs)\n\n\ndef oneD_scalar(x, y, ax, range_, **kwargs):\n reverse = [False]\n if \"reverse_axis\" in kwargs.keys():\n reverse = kwargs[\"reverse_axis\"]\n kwargs.pop(\"reverse_axis\")\n\n components = y.components.shape[0]\n for k in range(components):\n ax.plot(x[0].coordinates, y.components[k], **kwargs)\n ax.set_xlim(x[0].coordinates.value.min(), x[0].coordinates.value.max())\n ax.set_xlabel(f\"{x[0].axis_label} - 0\")\n ax.set_ylabel(y.axis_label[0])\n ax.set_title(\"{0}\".format(y.name))\n ax.grid(color=\"gray\", linestyle=\"--\", linewidth=0.5)\n\n ax.set_xlim(range_[0])\n ax.set_ylim(range_[1])\n\n if reverse[0]:\n ax.invert_xaxis()\n\n\ndef twoD_scalar(x, y, ax, range_, **kwargs):\n reverse = [False, False]\n if \"reverse_axis\" in kwargs.keys():\n reverse = kwargs[\"reverse_axis\"]\n kwargs.pop(\"reverse_axis\")\n\n x0 = x[0].coordinates.value\n x1 = x[1].coordinates.value\n y00 = y.components[0]\n extent = [x0[0], x0[-1], x1[0], x1[-1]]\n if \"extent\" not in kwargs.keys():\n kwargs[\"extent\"] = extent\n\n if x[0].type == \"linear\" and x[1].type == \"linear\":\n if \"origin\" not in kwargs.keys():\n kwargs[\"origin\"] = \"lower\"\n if \"aspect\" not in kwargs.keys():\n kwargs[\"aspect\"] = \"auto\"\n\n cs = ax.imshow(y00, **kwargs)\n else:\n if \"interpolation\" not in kwargs.keys():\n kwargs[\"interpolation\"] = \"nearest\"\n\n cs = NonUniformImage(ax, **kwargs)\n cs.set_data(x0, x1, y00)\n ax.images.append(cs)\n\n cbar = ax.figure.colorbar(cs, ax=ax)\n cbar.ax.minorticks_off()\n cbar.set_label(y.axis_label[0])\n ax.set_xlim([extent[0], extent[1]])\n ax.set_ylim([extent[2], extent[3]])\n ax.set_xlabel(f\"{x[0].axis_label} - 0\")\n ax.set_ylabel(f\"{x[1].axis_label} - 1\")\n ax.set_title(\"{0}\".format(y.name))\n ax.grid(color=\"gray\", linestyle=\"--\", linewidth=0.5)\n\n ax.set_xlim(range_[0])\n ax.set_ylim(range_[1])\n\n if reverse[0]:\n ax.invert_xaxis()\n if reverse[1]:\n ax.invert_yaxis()\n\n\ndef vector_plot(x, y, ax, range_, **kwargs):\n reverse = [False, False]\n if \"reverse_axis\" in kwargs.keys():\n reverse = kwargs[\"reverse_axis\"]\n kwargs.pop(\"reverse_axis\")\n\n x0 = x[0].coordinates.value\n if len(x) == 2:\n x1 = x[1].coordinates.value\n else:\n x1 = np.zeros(1)\n\n x0, x1 = np.meshgrid(x0, x1)\n u1 = y.components[0]\n v1 = y.components[1]\n\n if \"pivot\" not in kwargs.keys():\n kwargs[\"pivot\"] = \"middle\"\n ax.quiver(x0, x1, u1, v1, **kwargs)\n ax.set_xlabel(f\"{x[0].axis_label} - 0\")\n ax.set_xlim(x[0].coordinates.value.min(), x[0].coordinates.value.max())\n if len(x) == 2:\n ax.set_ylim(x[1].coordinates.value.min(), x[1].coordinates.value.max())\n ax.set_ylabel(f\"{x[1].axis_label} - 1\")\n if reverse[1]:\n ax.invert_yaxis()\n else:\n ax.set_ylim([-y.components.max(), y.components.max()])\n ax.set_title(\"{0}\".format(y.name))\n ax.grid(color=\"gray\", linestyle=\"--\", linewidth=0.5)\n\n ax.set_xlim(range_[0])\n ax.set_ylim(range_[1])\n\n if reverse[0]:\n ax.invert_xaxis()\n\n\ndef RGB_image(x, y, ax, range_, **kwargs):\n reverse = [False, False]\n if \"reverse_axis\" in kwargs.keys():\n reverse = kwargs[\"reverse_axis\"]\n kwargs.pop(\"reverse_axis\")\n\n y0 = y.components\n ax.imshow(np.moveaxis(y0 / y0.max(), 0, -1), **kwargs)\n ax.set_title(\"{0}\".format(y.name))\n\n ax.set_xlim(range_[0])\n ax.set_ylim(range_[1])\n\n if reverse[0]:\n ax.invert_xaxis()\n if reverse[1]:\n ax.invert_yaxis()\n\n\n# def audio(x, y, i0, fig, ax):\n# try:\n# SOUND = 1\n# import sounddevice as sd\n# except ImportError:\n# SOUND = 0\n# string = (\n# \"Module 'sounddevice' is not installed. All audio data files will \"\n# \"not be played. To enable audio files, install 'sounddevice' using\"\n# \" 'pip install sounddevice'.\"\n# )\n# warn(string)\n\n# plot1D(x, y, i0, ax)\n# if SOUND == 1:\n# data_max = y[i0].components.max()\n# sd.play(0.9 * y[i0].components.T / data_max, 1 / x[0].increment.to(\"s\").value)\n" ]
[ [ "numpy.sum", "numpy.random.rand" ], [ "matplotlib.projections.register_projection", "matplotlib.pyplot.gcf", "numpy.meshgrid", "numpy.zeros", "matplotlib.image.NonUniformImage" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
CCHiggins/statsmodels
[ "300b6fba90c65c8e94b4f83e04f7ae1b0ceeac2e", "300b6fba90c65c8e94b4f83e04f7ae1b0ceeac2e", "300b6fba90c65c8e94b4f83e04f7ae1b0ceeac2e" ]
[ "examples/python/statespace_arma_0.py", "statsmodels/discrete/count_model.py", "docs/source/plots/graphics_functional_fboxplot.py" ]
[ "#!/usr/bin/env python\n# coding: utf-8\n\n# DO NOT EDIT\n# Autogenerated from the notebook statespace_arma_0.ipynb.\n# Edit the notebook and then sync the output with this file.\n#\n# flake8: noqa\n# DO NOT EDIT\n\n# # Autoregressive Moving Average (ARMA): Sunspots data\n\n# This notebook replicates the existing ARMA notebook using the\n# `statsmodels.tsa.statespace.SARIMAX` class rather than the\n# `statsmodels.tsa.ARMA` class.\n\nimport numpy as np\nfrom scipy import stats\nimport pandas as pd\nimport matplotlib.pyplot as plt\n\nimport statsmodels.api as sm\n\nfrom statsmodels.graphics.api import qqplot\n\n# ## Sunspots Data\n\nprint(sm.datasets.sunspots.NOTE)\n\ndta = sm.datasets.sunspots.load_pandas().data\n\ndta.index = pd.Index(pd.date_range(\"1700\", end=\"2009\", freq=\"A-DEC\"))\ndel dta[\"YEAR\"]\n\ndta.plot(figsize=(12, 4))\n\nfig = plt.figure(figsize=(12, 8))\nax1 = fig.add_subplot(211)\nfig = sm.graphics.tsa.plot_acf(dta.values.squeeze(), lags=40, ax=ax1)\nax2 = fig.add_subplot(212)\nfig = sm.graphics.tsa.plot_pacf(dta, lags=40, ax=ax2)\n\narma_mod20 = sm.tsa.statespace.SARIMAX(dta, order=(2, 0, 0),\n trend='c').fit(disp=False)\nprint(arma_mod20.params)\n\narma_mod30 = sm.tsa.statespace.SARIMAX(dta, order=(3, 0, 0),\n trend='c').fit(disp=False)\n\nprint(arma_mod20.aic, arma_mod20.bic, arma_mod20.hqic)\n\nprint(arma_mod30.params)\n\nprint(arma_mod30.aic, arma_mod30.bic, arma_mod30.hqic)\n\n# * Does our model obey the theory?\n\nsm.stats.durbin_watson(arma_mod30.resid)\n\nfig = plt.figure(figsize=(12, 4))\nax = fig.add_subplot(111)\nax = plt.plot(arma_mod30.resid)\n\nresid = arma_mod30.resid\n\nstats.normaltest(resid)\n\nfig = plt.figure(figsize=(12, 4))\nax = fig.add_subplot(111)\nfig = qqplot(resid, line='q', ax=ax, fit=True)\n\nfig = plt.figure(figsize=(12, 8))\nax1 = fig.add_subplot(211)\nfig = sm.graphics.tsa.plot_acf(resid, lags=40, ax=ax1)\nax2 = fig.add_subplot(212)\nfig = sm.graphics.tsa.plot_pacf(resid, lags=40, ax=ax2)\n\nr, q, p = sm.tsa.acf(resid, fft=True, qstat=True)\ndata = np.c_[r[1:], q, p]\nindex = pd.Index(range(1, q.shape[0] + 1), name=\"lag\")\ntable = pd.DataFrame(data, columns=[\"AC\", \"Q\", \"Prob(>Q)\"], index=index)\nprint(table)\n\n# * This indicates a lack of fit.\n\n# * In-sample dynamic prediction. How good does our model do?\n\npredict_sunspots = arma_mod30.predict(start='1990', end='2012', dynamic=True)\n\nfig, ax = plt.subplots(figsize=(12, 8))\ndta.loc['1950':].plot(ax=ax)\npredict_sunspots.plot(ax=ax, style='r')\n\n\ndef mean_forecast_err(y, yhat):\n return y.sub(yhat).mean()\n\n\nmean_forecast_err(dta.SUNACTIVITY, predict_sunspots)\n", "__all__ = [\"ZeroInflatedPoisson\", \"ZeroInflatedGeneralizedPoisson\",\n \"ZeroInflatedNegativeBinomialP\"]\n\nimport warnings\nimport numpy as np\nimport statsmodels.base.model as base\nimport statsmodels.base.wrapper as wrap\nimport statsmodels.regression.linear_model as lm\nfrom statsmodels.discrete.discrete_model import (DiscreteModel, CountModel,\n Poisson, Logit, CountResults,\n L1CountResults, Probit,\n _discrete_results_docs,\n _validate_l1_method,\n GeneralizedPoisson,\n NegativeBinomialP)\nfrom statsmodels.distributions import zipoisson, zigenpoisson, zinegbin\nfrom statsmodels.tools.numdiff import approx_fprime, approx_hess\nfrom statsmodels.tools.decorators import cache_readonly\nfrom statsmodels.tools.sm_exceptions import ConvergenceWarning\nfrom statsmodels.compat.pandas import Appender\n\n\n_doc_zi_params = \"\"\"\n exog_infl : array_like or None\n Explanatory variables for the binary inflation model, i.e. for\n mixing probability model. If None, then a constant is used.\n offset : array_like\n Offset is added to the linear prediction with coefficient equal to 1.\n exposure : array_like\n Log(exposure) is added to the linear prediction with coefficient\n equal to 1.\n inflation : {'logit', 'probit'}\n The model for the zero inflation, either Logit (default) or Probit\n \"\"\"\n\n\nclass GenericZeroInflated(CountModel):\n __doc__ = \"\"\"\n Generic Zero Inflated Model\n\n %(params)s\n %(extra_params)s\n\n Attributes\n ----------\n endog : ndarray\n A reference to the endogenous response variable\n exog : ndarray\n A reference to the exogenous design.\n exog_infl : ndarray\n A reference to the zero-inflated exogenous design.\n \"\"\" % {'params' : base._model_params_doc,\n 'extra_params' : _doc_zi_params + base._missing_param_doc}\n\n def __init__(self, endog, exog, exog_infl=None, offset=None,\n inflation='logit', exposure=None, missing='none', **kwargs):\n super(GenericZeroInflated, self).__init__(endog, exog, offset=offset,\n exposure=exposure,\n missing=missing, **kwargs)\n\n if exog_infl is None:\n self.k_inflate = 1\n self._no_exog_infl = True\n self.exog_infl = np.ones((endog.size, self.k_inflate),\n dtype=np.float64)\n else:\n self.exog_infl = exog_infl\n self.k_inflate = exog_infl.shape[1]\n self._no_exog_infl = False\n\n if len(exog.shape) == 1:\n self.k_exog = 1\n else:\n self.k_exog = exog.shape[1]\n\n self.infl = inflation\n if inflation == 'logit':\n self.model_infl = Logit(np.zeros(self.exog_infl.shape[0]),\n self.exog_infl)\n self._hessian_inflate = self._hessian_logit\n elif inflation == 'probit':\n self.model_infl = Probit(np.zeros(self.exog_infl.shape[0]),\n self.exog_infl)\n self._hessian_inflate = self._hessian_probit\n\n else:\n raise ValueError(\"inflation == %s, which is not handled\"\n % inflation)\n\n self.inflation = inflation\n self.k_extra = self.k_inflate\n\n if len(self.exog) != len(self.exog_infl):\n raise ValueError('exog and exog_infl have different number of'\n 'observation. `missing` handling is not supported')\n\n infl_names = ['inflate_%s' % i for i in self.model_infl.data.param_names]\n self.exog_names[:] = infl_names + list(self.exog_names)\n self.exog_infl = np.asarray(self.exog_infl, dtype=np.float64)\n\n self._init_keys.extend(['exog_infl', 'inflation'])\n self._null_drop_keys = ['exog_infl']\n\n def _get_exogs(self):\n \"\"\"list of exogs, for internal use in post-estimation\n \"\"\"\n return (self.exog, self.exog_infl)\n\n def loglike(self, params):\n \"\"\"\n Loglikelihood of Generic Zero Inflated model.\n\n Parameters\n ----------\n params : array_like\n The parameters of the model.\n\n Returns\n -------\n loglike : float\n The log-likelihood function of the model evaluated at `params`.\n See notes.\n\n Notes\n -----\n .. math:: \\\\ln L=\\\\sum_{y_{i}=0}\\\\ln(w_{i}+(1-w_{i})*P_{main\\\\_model})+\n \\\\sum_{y_{i}>0}(\\\\ln(1-w_{i})+L_{main\\\\_model})\n where P - pdf of main model, L - loglike function of main model.\n \"\"\"\n return np.sum(self.loglikeobs(params))\n\n def loglikeobs(self, params):\n \"\"\"\n Loglikelihood for observations of Generic Zero Inflated model.\n\n Parameters\n ----------\n params : array_like\n The parameters of the model.\n\n Returns\n -------\n loglike : ndarray\n The log likelihood for each observation of the model evaluated\n at `params`. See Notes for definition.\n\n Notes\n -----\n .. math:: \\\\ln L=\\\\ln(w_{i}+(1-w_{i})*P_{main\\\\_model})+\n \\\\ln(1-w_{i})+L_{main\\\\_model}\n where P - pdf of main model, L - loglike function of main model.\n\n for observations :math:`i=1,...,n`\n \"\"\"\n params_infl = params[:self.k_inflate]\n params_main = params[self.k_inflate:]\n\n y = self.endog\n w = self.model_infl.predict(params_infl)\n\n w = np.clip(w, np.finfo(float).eps, 1 - np.finfo(float).eps)\n llf_main = self.model_main.loglikeobs(params_main)\n zero_idx = np.nonzero(y == 0)[0]\n nonzero_idx = np.nonzero(y)[0]\n\n llf = np.zeros_like(y, dtype=np.float64)\n llf[zero_idx] = (np.log(w[zero_idx] +\n (1 - w[zero_idx]) * np.exp(llf_main[zero_idx])))\n llf[nonzero_idx] = np.log(1 - w[nonzero_idx]) + llf_main[nonzero_idx]\n\n return llf\n\n @Appender(DiscreteModel.fit.__doc__)\n def fit(self, start_params=None, method='bfgs', maxiter=35,\n full_output=1, disp=1, callback=None,\n cov_type='nonrobust', cov_kwds=None, use_t=None, **kwargs):\n if start_params is None:\n offset = getattr(self, \"offset\", 0) + getattr(self, \"exposure\", 0)\n if np.size(offset) == 1 and offset == 0:\n offset = None\n start_params = self._get_start_params()\n\n if callback is None:\n # work around perfect separation callback #3895\n callback = lambda *x: x\n\n mlefit = super(GenericZeroInflated, self).fit(start_params=start_params,\n maxiter=maxiter, disp=disp, method=method,\n full_output=full_output, callback=callback,\n **kwargs)\n\n zipfit = self.result_class(self, mlefit._results)\n result = self.result_class_wrapper(zipfit)\n\n if cov_kwds is None:\n cov_kwds = {}\n\n result._get_robustcov_results(cov_type=cov_type,\n use_self=True, use_t=use_t, **cov_kwds)\n return result\n\n @Appender(DiscreteModel.fit_regularized.__doc__)\n def fit_regularized(self, start_params=None, method='l1',\n maxiter='defined_by_method', full_output=1, disp=1, callback=None,\n alpha=0, trim_mode='auto', auto_trim_tol=0.01, size_trim_tol=1e-4,\n qc_tol=0.03, **kwargs):\n\n _validate_l1_method(method)\n\n if np.size(alpha) == 1 and alpha != 0:\n k_params = self.k_exog + self.k_inflate\n alpha = alpha * np.ones(k_params)\n\n extra = self.k_extra - self.k_inflate\n alpha_p = alpha[:-(self.k_extra - extra)] if (self.k_extra\n and np.size(alpha) > 1) else alpha\n if start_params is None:\n offset = getattr(self, \"offset\", 0) + getattr(self, \"exposure\", 0)\n if np.size(offset) == 1 and offset == 0:\n offset = None\n start_params = self.model_main.fit_regularized(\n start_params=start_params, method=method, maxiter=maxiter,\n full_output=full_output, disp=0, callback=callback,\n alpha=alpha_p, trim_mode=trim_mode, auto_trim_tol=auto_trim_tol,\n size_trim_tol=size_trim_tol, qc_tol=qc_tol, **kwargs).params\n start_params = np.append(np.ones(self.k_inflate), start_params)\n cntfit = super(CountModel, self).fit_regularized(\n start_params=start_params, method=method, maxiter=maxiter,\n full_output=full_output, disp=disp, callback=callback,\n alpha=alpha, trim_mode=trim_mode, auto_trim_tol=auto_trim_tol,\n size_trim_tol=size_trim_tol, qc_tol=qc_tol, **kwargs)\n\n discretefit = self.result_class_reg(self, cntfit)\n return self.result_class_reg_wrapper(discretefit)\n\n def score_obs(self, params):\n \"\"\"\n Generic Zero Inflated model score (gradient) vector of the log-likelihood\n\n Parameters\n ----------\n params : array_like\n The parameters of the model\n\n Returns\n -------\n score : ndarray, 1-D\n The score vector of the model, i.e. the first derivative of the\n loglikelihood function, evaluated at `params`\n \"\"\"\n params_infl = params[:self.k_inflate]\n params_main = params[self.k_inflate:]\n\n y = self.endog\n w = self.model_infl.predict(params_infl)\n w = np.clip(w, np.finfo(float).eps, 1 - np.finfo(float).eps)\n score_main = self.model_main.score_obs(params_main)\n llf_main = self.model_main.loglikeobs(params_main)\n llf = self.loglikeobs(params)\n zero_idx = np.nonzero(y == 0)[0]\n nonzero_idx = np.nonzero(y)[0]\n\n mu = self.model_main.predict(params_main)\n\n # TODO: need to allow for complex to use CS numerical derivatives\n dldp = np.zeros((self.exog.shape[0], self.k_exog), dtype=np.float64)\n dldw = np.zeros_like(self.exog_infl, dtype=np.float64)\n\n dldp[zero_idx,:] = (score_main[zero_idx].T *\n (1 - (w[zero_idx]) / np.exp(llf[zero_idx]))).T\n dldp[nonzero_idx,:] = score_main[nonzero_idx]\n\n if self.inflation == 'logit':\n dldw[zero_idx,:] = (self.exog_infl[zero_idx].T * w[zero_idx] *\n (1 - w[zero_idx]) *\n (1 - np.exp(llf_main[zero_idx])) /\n np.exp(llf[zero_idx])).T\n dldw[nonzero_idx,:] = -(self.exog_infl[nonzero_idx].T *\n w[nonzero_idx]).T\n elif self.inflation == 'probit':\n return approx_fprime(params, self.loglikeobs)\n\n return np.hstack((dldw, dldp))\n\n def score(self, params):\n return self.score_obs(params).sum(0)\n\n def _hessian_main(self, params):\n pass\n\n def _hessian_logit(self, params):\n params_infl = params[:self.k_inflate]\n params_main = params[self.k_inflate:]\n\n y = self.endog\n w = self.model_infl.predict(params_infl)\n w = np.clip(w, np.finfo(float).eps, 1 - np.finfo(float).eps)\n score_main = self.model_main.score_obs(params_main)\n llf_main = self.model_main.loglikeobs(params_main)\n llf = self.loglikeobs(params)\n zero_idx = np.nonzero(y == 0)[0]\n nonzero_idx = np.nonzero(y)[0]\n\n hess_arr = np.zeros((self.k_inflate, self.k_exog + self.k_inflate))\n\n pmf = np.exp(llf)\n\n #d2l/dw2\n for i in range(self.k_inflate):\n for j in range(i, -1, -1):\n hess_arr[i, j] = ((\n self.exog_infl[zero_idx, i] * self.exog_infl[zero_idx, j] *\n (w[zero_idx] * (1 - w[zero_idx]) * ((1 -\n np.exp(llf_main[zero_idx])) * (1 - 2 * w[zero_idx]) *\n np.exp(llf[zero_idx]) - (w[zero_idx] - w[zero_idx]**2) *\n (1 - np.exp(llf_main[zero_idx]))**2) /\n pmf[zero_idx]**2)).sum() -\n (self.exog_infl[nonzero_idx, i] * self.exog_infl[nonzero_idx, j] *\n w[nonzero_idx] * (1 - w[nonzero_idx])).sum())\n\n #d2l/dpdw\n for i in range(self.k_inflate):\n for j in range(self.k_exog):\n hess_arr[i, j + self.k_inflate] = -(score_main[zero_idx, j] *\n w[zero_idx] * (1 - w[zero_idx]) *\n self.exog_infl[zero_idx, i] / pmf[zero_idx]).sum()\n\n return hess_arr\n\n def _hessian_probit(self, params):\n pass\n\n def hessian(self, params):\n \"\"\"\n Generic Zero Inflated model Hessian matrix of the loglikelihood\n\n Parameters\n ----------\n params : array_like\n The parameters of the model\n\n Returns\n -------\n hess : ndarray, (k_vars, k_vars)\n The Hessian, second derivative of loglikelihood function,\n evaluated at `params`\n\n Notes\n -----\n \"\"\"\n hess_arr_main = self._hessian_main(params)\n hess_arr_infl = self._hessian_inflate(params)\n\n if hess_arr_main is None or hess_arr_infl is None:\n return approx_hess(params, self.loglike)\n\n dim = self.k_exog + self.k_inflate\n\n hess_arr = np.zeros((dim, dim))\n\n hess_arr[:self.k_inflate,:] = hess_arr_infl\n hess_arr[self.k_inflate:,self.k_inflate:] = hess_arr_main\n\n tri_idx = np.triu_indices(self.k_exog + self.k_inflate, k=1)\n hess_arr[tri_idx] = hess_arr.T[tri_idx]\n\n return hess_arr\n\n def predict(self, params, exog=None, exog_infl=None, exposure=None,\n offset=None, which='mean', y_values=None):\n \"\"\"\n Predict response variable or other statistic given exogenous variables.\n\n Parameters\n ----------\n params : array_like\n The parameters of the model.\n exog : ndarray, optional\n Explanatory variables for the main count model.\n If ``exog`` is None, then the data from the model will be used.\n exog_infl : ndarray, optional\n Explanatory variables for the zero-inflation model.\n ``exog_infl`` has to be provided if ``exog`` was provided unless\n ``exog_infl`` in the model is only a constant.\n offset : ndarray, optional\n Offset is added to the linear predictor of the mean function with\n coefficient equal to 1.\n Default is zero if exog is not None, and the model offset if exog\n is None.\n exposure : ndarray, optional\n Log(exposure) is added to the linear predictor with coefficient\n equal to 1. If exposure is specified, then it will be logged by\n the method. The user does not need to log it first.\n Default is one if exog is is not None, and it is the model exposure\n if exog is None.\n which : str (optional)\n Statitistic to predict. Default is 'mean'.\n\n - 'mean' : the conditional expectation of endog E(y | x),\n i.e. exp of linear predictor.\n - 'linear' : the linear predictor of the mean function.\n - 'var' : returns the estimated variance of endog implied by the\n model.\n - 'mean-main' : mean of the main count model\n - 'prob-main' : probability of selecting the main model.\n The probability of zero inflation is ``1 - prob-main``.\n - 'mean-nonzero' : expected value conditional on having observation\n larger than zero, E(y | X, y>0)\n - 'prob-zero' : probability of observing a zero count. P(y=0 | x)\n - 'prob' : probabilities of each count from 0 to max(endog), or\n for y_values if those are provided. This is a multivariate\n return (2-dim when predicting for several observations).\n\n y_values : array_like\n Values of the random variable endog at which pmf is evaluated.\n Only used if ``which=\"prob\"``\n \"\"\"\n no_exog = False\n if exog is None:\n no_exog = True\n exog = self.exog\n\n if exog_infl is None:\n if no_exog:\n exog_infl = self.exog_infl\n else:\n if self._no_exog_infl:\n exog_infl = np.ones((len(exog), 1))\n else:\n exog_infl = np.asarray(exog_infl)\n if exog_infl.ndim == 1 and self.k_inflate == 1:\n exog_infl = exog_infl[:, None]\n\n if exposure is None:\n if no_exog:\n exposure = getattr(self, 'exposure', 0)\n else:\n exposure = 0\n else:\n exposure = np.log(exposure)\n\n if offset is None:\n if no_exog:\n offset = getattr(self, 'offset', 0)\n else:\n offset = 0\n\n params_infl = params[:self.k_inflate]\n params_main = params[self.k_inflate:]\n\n prob_main = 1 - self.model_infl.predict(params_infl, exog_infl)\n\n lin_pred = np.dot(exog, params_main[:self.exog.shape[1]]) + exposure + offset\n\n # Refactor: This is pretty hacky,\n # there should be an appropriate predict method in model_main\n # this is just prob(y=0 | model_main)\n tmp_exog = self.model_main.exog\n tmp_endog = self.model_main.endog\n tmp_offset = getattr(self.model_main, 'offset', False)\n tmp_exposure = getattr(self.model_main, 'exposure', False)\n self.model_main.exog = exog\n self.model_main.endog = np.zeros((exog.shape[0]))\n self.model_main.offset = offset\n self.model_main.exposure = exposure\n llf = self.model_main.loglikeobs(params_main)\n self.model_main.exog = tmp_exog\n self.model_main.endog = tmp_endog\n # tmp_offset might be an array with elementwise equality testing\n #if np.size(tmp_offset) == 1 and tmp_offset[0] == 'no':\n if tmp_offset is False:\n del self.model_main.offset\n else:\n self.model_main.offset = tmp_offset\n #if np.size(tmp_exposure) == 1 and tmp_exposure[0] == 'no':\n if tmp_exposure is False:\n del self.model_main.exposure\n else:\n self.model_main.exposure = tmp_exposure\n # end hack\n\n prob_zero = (1 - prob_main) + prob_main * np.exp(llf)\n\n if which == 'mean':\n return prob_main * np.exp(lin_pred)\n elif which == 'mean-main':\n return np.exp(lin_pred)\n elif which == 'linear':\n return lin_pred\n elif which == 'mean-nonzero':\n return prob_main * np.exp(lin_pred) / (1 - prob_zero)\n elif which == 'prob-zero':\n return prob_zero\n elif which == 'prob-main':\n return prob_main\n elif which == 'var':\n mu = np.exp(lin_pred)\n return self._predict_var(params, mu, 1 - prob_main)\n elif which == 'prob':\n return self._predict_prob(params, exog, exog_infl, exposure,\n offset, y_values=y_values)\n else:\n raise ValueError('which = %s is not available' % which)\n\n def _derivative_predict(self, params, exog=None, transform='dydx'):\n \"\"\"NotImplemented\n \"\"\"\n raise NotImplementedError\n\n def _derivative_exog(self, params, exog=None, transform=\"dydx\",\n dummy_idx=None, count_idx=None):\n \"\"\"NotImplemented\n \"\"\"\n raise NotImplementedError\n\n def _deriv_mean_dparams(self, params):\n \"\"\"\n Derivative of the expected endog with respect to the parameters.\n\n Parameters\n ----------\n params : ndarray\n parameter at which score is evaluated\n\n Returns\n -------\n The value of the derivative of the expected endog with respect\n to the parameter vector.\n \"\"\"\n params_infl = params[:self.k_inflate]\n params_main = params[self.k_inflate:]\n\n w = self.model_infl.predict(params_infl)\n w = np.clip(w, np.finfo(float).eps, 1 - np.finfo(float).eps)\n mu = self.model_main.predict(params_main)\n\n score_infl = self.model_infl._deriv_mean_dparams(params_infl)\n score_main = self.model_main._deriv_mean_dparams(params_main)\n\n dmat_infl = - mu[:, None] * score_infl\n dmat_main = (1 - w[:, None]) * score_main\n\n dmat = np.column_stack((dmat_infl, dmat_main))\n return dmat\n\n def _deriv_score_obs_dendog(self, params):\n \"\"\"derivative of score_obs w.r.t. endog\n\n Parameters\n ----------\n params : ndarray\n parameter at which score is evaluated\n\n Returns\n -------\n derivative : ndarray_2d\n The derivative of the score_obs with respect to endog.\n \"\"\"\n raise NotImplementedError\n\n # The below currently does not work, discontinuity at zero\n # see https://github.com/statsmodels/statsmodels/pull/7951#issuecomment-996355875 # noqa\n from statsmodels.tools.numdiff import _approx_fprime_scalar\n endog_original = self.endog\n\n def f(y):\n if y.ndim == 2 and y.shape[1] == 1:\n y = y[:, 0]\n self.endog = y\n self.model_main.endog = y\n sf = self.score_obs(params)\n self.endog = endog_original\n self.model_main.endog = endog_original\n return sf\n\n ds = _approx_fprime_scalar(self.endog[:, None], f, epsilon=1e-2)\n\n return ds\n\n\nclass ZeroInflatedPoisson(GenericZeroInflated):\n __doc__ = \"\"\"\n Poisson Zero Inflated Model\n\n %(params)s\n %(extra_params)s\n\n Attributes\n ----------\n endog : ndarray\n A reference to the endogenous response variable\n exog : ndarray\n A reference to the exogenous design.\n exog_infl : ndarray\n A reference to the zero-inflated exogenous design.\n \"\"\" % {'params' : base._model_params_doc,\n 'extra_params' : _doc_zi_params + base._missing_param_doc}\n\n def __init__(self, endog, exog, exog_infl=None, offset=None, exposure=None,\n inflation='logit', missing='none', **kwargs):\n super(ZeroInflatedPoisson, self).__init__(endog, exog, offset=offset,\n inflation=inflation,\n exog_infl=exog_infl,\n exposure=exposure,\n missing=missing, **kwargs)\n self.model_main = Poisson(self.endog, self.exog, offset=offset,\n exposure=exposure)\n self.distribution = zipoisson\n self.result_class = ZeroInflatedPoissonResults\n self.result_class_wrapper = ZeroInflatedPoissonResultsWrapper\n self.result_class_reg = L1ZeroInflatedPoissonResults\n self.result_class_reg_wrapper = L1ZeroInflatedPoissonResultsWrapper\n\n def _hessian_main(self, params):\n params_infl = params[:self.k_inflate]\n params_main = params[self.k_inflate:]\n\n y = self.endog\n w = self.model_infl.predict(params_infl)\n w = np.clip(w, np.finfo(float).eps, 1 - np.finfo(float).eps)\n score = self.score(params)\n zero_idx = np.nonzero(y == 0)[0]\n nonzero_idx = np.nonzero(y)[0]\n\n mu = self.model_main.predict(params_main)\n\n hess_arr = np.zeros((self.k_exog, self.k_exog))\n\n coeff = (1 + w[zero_idx] * (np.exp(mu[zero_idx]) - 1))\n\n #d2l/dp2\n for i in range(self.k_exog):\n for j in range(i, -1, -1):\n hess_arr[i, j] = ((\n self.exog[zero_idx, i] * self.exog[zero_idx, j] *\n mu[zero_idx] * (w[zero_idx] - 1) * (1 / coeff -\n w[zero_idx] * mu[zero_idx] * np.exp(mu[zero_idx]) /\n coeff**2)).sum() - (mu[nonzero_idx] * self.exog[nonzero_idx, i] *\n self.exog[nonzero_idx, j]).sum())\n\n return hess_arr\n\n def _predict_prob(self, params, exog, exog_infl, exposure, offset,\n y_values=None):\n params_infl = params[:self.k_inflate]\n params_main = params[self.k_inflate:]\n\n if y_values is None:\n y_values = np.atleast_2d(np.arange(0, np.max(self.endog)+1))\n\n if len(exog_infl.shape) < 2:\n transform = True\n w = np.atleast_2d(\n self.model_infl.predict(params_infl, exog_infl))[:, None]\n else:\n transform = False\n w = self.model_infl.predict(params_infl, exog_infl)[:, None]\n\n w = np.clip(w, np.finfo(float).eps, 1 - np.finfo(float).eps)\n mu = self.model_main.predict(params_main, exog,\n offset=offset)[:, None]\n result = self.distribution.pmf(y_values, mu, w)\n return result[0] if transform else result\n\n def _predict_var(self, params, mu, prob_infl):\n \"\"\"predict values for conditional variance V(endog | exog)\n\n Parameters\n ----------\n params : array_like\n The model parameters. This is only used to extract extra params\n like dispersion parameter.\n mu : array_like\n Array of mean predictions for main model.\n prob_inlf : array_like\n Array of predicted probabilities of zero-inflation `w`.\n\n Returns\n -------\n Predicted conditional variance.\n \"\"\"\n w = prob_infl\n var_ = (1 - w) * mu * (1 + w * mu)\n return var_\n\n def _get_start_params(self):\n start_params = self.model_main.fit(disp=0, method=\"nm\").params\n start_params = np.append(np.ones(self.k_inflate) * 0.1, start_params)\n return start_params\n\n def get_distribution(self, params, exog=None, exog_infl=None,\n exposure=None, offset=None):\n \"\"\"Get frozen instance of distribution based on predicted parameters.\n\n Parameters\n ----------\n params : array_like\n The parameters of the model.\n exog : ndarray, optional\n Explanatory variables for the main count model.\n If ``exog`` is None, then the data from the model will be used.\n exog_infl : ndarray, optional\n Explanatory variables for the zero-inflation model.\n ``exog_infl`` has to be provided if ``exog`` was provided unless\n ``exog_infl`` in the model is only a constant.\n offset : ndarray, optional\n Offset is added to the linear predictor of the mean function with\n coefficient equal to 1.\n Default is zero if exog is not None, and the model offset if exog\n is None.\n exposure : ndarray, optional\n Log(exposure) is added to the linear predictor of the mean\n function with coefficient equal to 1. If exposure is specified,\n then it will be logged by the method. The user does not need to\n log it first.\n Default is one if exog is is not None, and it is the model exposure\n if exog is None.\n\n Returns\n -------\n Instance of frozen scipy distribution subclass.\n \"\"\"\n mu = self.predict(params, exog=exog, exog_infl=exog_infl,\n exposure=exposure, offset=offset, which=\"mean-main\")\n w = self.predict(params, exog=exog, exog_infl=exog_infl,\n exposure=exposure, offset=offset, which=\"prob-main\")\n\n distr = self.distribution(mu[:, None], 1 - w[:, None])\n return distr\n\n\nclass ZeroInflatedGeneralizedPoisson(GenericZeroInflated):\n __doc__ = \"\"\"\n Zero Inflated Generalized Poisson Model\n\n %(params)s\n %(extra_params)s\n\n Attributes\n ----------\n endog : ndarray\n A reference to the endogenous response variable\n exog : ndarray\n A reference to the exogenous design.\n exog_infl : ndarray\n A reference to the zero-inflated exogenous design.\n p : scalar\n P denotes parametrizations for ZIGP regression.\n \"\"\" % {'params' : base._model_params_doc,\n 'extra_params' : _doc_zi_params +\n \"\"\"p : float\n dispersion power parameter for the GeneralizedPoisson model. p=1 for\n ZIGP-1 and p=2 for ZIGP-2. Default is p=2\n \"\"\" + base._missing_param_doc}\n\n def __init__(self, endog, exog, exog_infl=None, offset=None, exposure=None,\n inflation='logit', p=2, missing='none', **kwargs):\n super(ZeroInflatedGeneralizedPoisson, self).__init__(endog, exog,\n offset=offset,\n inflation=inflation,\n exog_infl=exog_infl,\n exposure=exposure,\n missing=missing, **kwargs)\n self.model_main = GeneralizedPoisson(self.endog, self.exog,\n offset=offset, exposure=exposure, p=p)\n self.distribution = zigenpoisson\n self.k_exog += 1\n self.k_extra += 1\n self.exog_names.append(\"alpha\")\n self.result_class = ZeroInflatedGeneralizedPoissonResults\n self.result_class_wrapper = ZeroInflatedGeneralizedPoissonResultsWrapper\n self.result_class_reg = L1ZeroInflatedGeneralizedPoissonResults\n self.result_class_reg_wrapper = L1ZeroInflatedGeneralizedPoissonResultsWrapper\n\n def _get_init_kwds(self):\n kwds = super(ZeroInflatedGeneralizedPoisson, self)._get_init_kwds()\n kwds['p'] = self.model_main.parameterization + 1\n return kwds\n\n def _predict_prob(self, params, exog, exog_infl, exposure, offset,\n y_values=None):\n params_infl = params[:self.k_inflate]\n params_main = params[self.k_inflate:]\n\n p = self.model_main.parameterization + 1\n if y_values is None:\n y_values = np.atleast_2d(np.arange(0, np.max(self.endog)+1))\n\n if len(exog_infl.shape) < 2:\n transform = True\n w = np.atleast_2d(\n self.model_infl.predict(params_infl, exog_infl))[:, None]\n else:\n transform = False\n w = self.model_infl.predict(params_infl, exog_infl)[:, None]\n\n w[w == 1.] = np.nextafter(1, 0)\n mu = self.model_main.predict(params_main, exog,\n exposure=exposure, offset=offset)[:, None]\n result = self.distribution.pmf(y_values, mu, params_main[-1], p, w)\n return result[0] if transform else result\n\n def _predict_var(self, params, mu, prob_infl):\n \"\"\"predict values for conditional variance V(endog | exog)\n\n Parameters\n ----------\n params : array_like\n The model parameters. This is only used to extract extra params\n like dispersion parameter.\n mu : array_like\n Array of mean predictions for main model.\n prob_inlf : array_like\n Array of predicted probabilities of zero-inflation `w`.\n\n Returns\n -------\n Predicted conditional variance.\n \"\"\"\n alpha = params[-1]\n w = prob_infl\n p = self.model_main.parameterization\n var_ = (1 - w) * mu * ((1 + alpha * mu**p)**2 + w * mu)\n return var_\n\n def _get_start_params(self):\n with warnings.catch_warnings():\n warnings.simplefilter(\"ignore\", category=ConvergenceWarning)\n start_params = ZeroInflatedPoisson(self.endog, self.exog,\n exog_infl=self.exog_infl).fit(disp=0).params\n start_params = np.append(start_params, 0.1)\n return start_params\n\n @Appender(ZeroInflatedPoisson.get_distribution.__doc__)\n def get_distribution(self, params, exog=None, exog_infl=None,\n exposure=None, offset=None):\n\n p = self.model_main.parameterization + 1\n mu = self.predict(params, exog=exog, exog_infl=exog_infl,\n exposure=exposure, offset=offset, which=\"mean-main\")\n w = self.predict(params, exog=exog, exog_infl=exog_infl,\n exposure=exposure, offset=offset, which=\"prob-main\")\n distr = self.distribution(mu[:, None], params[-1], p, 1 - w[:, None])\n return distr\n\n\nclass ZeroInflatedNegativeBinomialP(GenericZeroInflated):\n __doc__ = \"\"\"\n Zero Inflated Generalized Negative Binomial Model\n\n %(params)s\n %(extra_params)s\n\n Attributes\n ----------\n endog : ndarray\n A reference to the endogenous response variable\n exog : ndarray\n A reference to the exogenous design.\n exog_infl : ndarray\n A reference to the zero-inflated exogenous design.\n p : scalar\n P denotes parametrizations for ZINB regression. p=1 for ZINB-1 and\n p=2 for ZINB-2. Default is p=2\n \"\"\" % {'params' : base._model_params_doc,\n 'extra_params' : _doc_zi_params +\n \"\"\"p : float\n dispersion power parameter for the NegativeBinomialP model. p=1 for\n ZINB-1 and p=2 for ZINM-2. Default is p=2\n \"\"\" + base._missing_param_doc}\n\n def __init__(self, endog, exog, exog_infl=None, offset=None, exposure=None,\n inflation='logit', p=2, missing='none', **kwargs):\n super(ZeroInflatedNegativeBinomialP, self).__init__(endog, exog,\n offset=offset,\n inflation=inflation,\n exog_infl=exog_infl,\n exposure=exposure,\n missing=missing, **kwargs)\n self.model_main = NegativeBinomialP(self.endog, self.exog,\n offset=offset, exposure=exposure, p=p)\n self.distribution = zinegbin\n self.k_exog += 1\n self.k_extra += 1\n self.exog_names.append(\"alpha\")\n self.result_class = ZeroInflatedNegativeBinomialResults\n self.result_class_wrapper = ZeroInflatedNegativeBinomialResultsWrapper\n self.result_class_reg = L1ZeroInflatedNegativeBinomialResults\n self.result_class_reg_wrapper = L1ZeroInflatedNegativeBinomialResultsWrapper\n\n def _get_init_kwds(self):\n kwds = super(ZeroInflatedNegativeBinomialP, self)._get_init_kwds()\n kwds['p'] = self.model_main.parameterization\n return kwds\n\n def _predict_prob(self, params, exog, exog_infl, exposure, offset,\n y_values=None):\n params_infl = params[:self.k_inflate]\n params_main = params[self.k_inflate:]\n\n p = self.model_main.parameterization\n if y_values is None:\n y_values = np.arange(0, np.max(self.endog)+1)\n\n if len(exog_infl.shape) < 2:\n transform = True\n w = np.atleast_2d(\n self.model_infl.predict(params_infl, exog_infl))[:, None]\n else:\n transform = False\n w = self.model_infl.predict(params_infl, exog_infl)[:, None]\n\n w = np.clip(w, np.finfo(float).eps, 1 - np.finfo(float).eps)\n mu = self.model_main.predict(params_main, exog,\n exposure=exposure, offset=offset)[:, None]\n result = self.distribution.pmf(y_values, mu, params_main[-1], p, w)\n return result[0] if transform else result\n\n def _predict_var(self, params, mu, prob_infl):\n \"\"\"predict values for conditional variance V(endog | exog)\n\n Parameters\n ----------\n params : array_like\n The model parameters. This is only used to extract extra params\n like dispersion parameter.\n mu : array_like\n Array of mean predictions for main model.\n prob_inlf : array_like\n Array of predicted probabilities of zero-inflation `w`.\n\n Returns\n -------\n Predicted conditional variance.\n \"\"\"\n alpha = params[-1]\n w = prob_infl\n p = self.model_main.parameterization\n var_ = (1 - w) * mu * (1 + alpha * mu**(p - 1) + w * mu)\n return var_\n\n def _get_start_params(self):\n with warnings.catch_warnings():\n warnings.simplefilter(\"ignore\", category=ConvergenceWarning)\n start_params = self.model_main.fit(disp=0, method='nm').params\n start_params = np.append(np.zeros(self.k_inflate), start_params)\n return start_params\n\n @Appender(ZeroInflatedPoisson.get_distribution.__doc__)\n def get_distribution(self, params, exog=None, exog_infl=None,\n exposure=None, offset=None):\n\n p = self.model_main.parameterization\n mu = self.predict(params, exog=exog, exog_infl=exog_infl,\n exposure=exposure, offset=offset, which=\"mean-main\")\n w = self.predict(params, exog=exog, exog_infl=exog_infl,\n exposure=exposure, offset=offset, which=\"prob-main\")\n\n distr = self.distribution(mu[:, None], params[-1], p, 1 - w[:, None])\n return distr\n\n\nclass ZeroInflatedResults(CountResults):\n\n def get_prediction(self, exog=None, exog_infl=None, exposure=None,\n offset=None, which='mean', average=False,\n agg_weights=None, y_values=None,\n transform=True, row_labels=None):\n\n import statsmodels.base._prediction_inference as pred\n\n pred_kwds = {\n 'exog_infl': exog_infl,\n 'exposure': exposure,\n 'offset': offset,\n 'y_values': y_values,\n }\n\n res = pred.get_prediction_delta(self, exog=exog, which=which,\n average=average,\n agg_weights=agg_weights,\n pred_kwds=pred_kwds)\n return res\n\n def get_influence(self):\n \"\"\"\n Influence and outlier measures\n\n See notes section for influence measures that do not apply for\n zero inflated models.\n\n Returns\n -------\n MLEInfluence\n The instance has methods to calculate the main influence and\n outlier measures as attributes.\n\n See Also\n --------\n statsmodels.stats.outliers_influence.MLEInfluence\n\n Notes\n -----\n ZeroInflated models have functions that are not differentiable\n with respect to sample endog if endog=0. This means that generalized\n leverage cannot be computed in the usual definition.\n\n Currently, both the generalized leverage, in `hat_matrix_diag`\n attribute and studetized residuals are not available. In the influence\n plot generalized leverage is replaced by a hat matrix diagonal that\n only takes combined exog into account, computed in the same way as\n for OLS. This is a measure for exog outliers but does not take\n specific features of the model into account.\n \"\"\"\n # same as sumper in DiscreteResults, only added for docstring\n from statsmodels.stats.outliers_influence import MLEInfluence\n return MLEInfluence(self)\n\n\nclass ZeroInflatedPoissonResults(ZeroInflatedResults):\n __doc__ = _discrete_results_docs % {\n \"one_line_description\": \"A results class for Zero Inflated Poisson\",\n \"extra_attr\": \"\"}\n\n @cache_readonly\n def _dispersion_factor(self):\n mu = self.predict(which='linear')\n w = 1 - self.predict() / np.exp(self.predict(which='linear'))\n return (1 + w * np.exp(mu))\n\n def get_margeff(self, at='overall', method='dydx', atexog=None,\n dummy=False, count=False):\n \"\"\"Get marginal effects of the fitted model.\n\n Not yet implemented for Zero Inflated Models\n \"\"\"\n raise NotImplementedError(\"not yet implemented for zero inflation\")\n\n\nclass L1ZeroInflatedPoissonResults(L1CountResults, ZeroInflatedPoissonResults):\n pass\n\n\nclass ZeroInflatedPoissonResultsWrapper(lm.RegressionResultsWrapper):\n pass\nwrap.populate_wrapper(ZeroInflatedPoissonResultsWrapper,\n ZeroInflatedPoissonResults)\n\n\nclass L1ZeroInflatedPoissonResultsWrapper(lm.RegressionResultsWrapper):\n pass\nwrap.populate_wrapper(L1ZeroInflatedPoissonResultsWrapper,\n L1ZeroInflatedPoissonResults)\n\n\nclass ZeroInflatedGeneralizedPoissonResults(ZeroInflatedResults):\n __doc__ = _discrete_results_docs % {\n \"one_line_description\": \"A results class for Zero Inflated Generalized Poisson\",\n \"extra_attr\": \"\"}\n\n @cache_readonly\n def _dispersion_factor(self):\n p = self.model.model_main.parameterization\n alpha = self.params[self.model.k_inflate:][-1]\n mu = np.exp(self.predict(which='linear'))\n w = 1 - self.predict() / mu\n return ((1 + alpha * mu**p)**2 + w * mu)\n\n def get_margeff(self, at='overall', method='dydx', atexog=None,\n dummy=False, count=False):\n \"\"\"Get marginal effects of the fitted model.\n\n Not yet implemented for Zero Inflated Models\n \"\"\"\n raise NotImplementedError(\"not yet implemented for zero inflation\")\n\n\nclass L1ZeroInflatedGeneralizedPoissonResults(L1CountResults,\n ZeroInflatedGeneralizedPoissonResults):\n pass\n\n\nclass ZeroInflatedGeneralizedPoissonResultsWrapper(\n lm.RegressionResultsWrapper):\n pass\nwrap.populate_wrapper(ZeroInflatedGeneralizedPoissonResultsWrapper,\n ZeroInflatedGeneralizedPoissonResults)\n\n\nclass L1ZeroInflatedGeneralizedPoissonResultsWrapper(\n lm.RegressionResultsWrapper):\n pass\nwrap.populate_wrapper(L1ZeroInflatedGeneralizedPoissonResultsWrapper,\n L1ZeroInflatedGeneralizedPoissonResults)\n\n\nclass ZeroInflatedNegativeBinomialResults(ZeroInflatedResults):\n __doc__ = _discrete_results_docs % {\n \"one_line_description\": \"A results class for Zero Inflated Generalized Negative Binomial\",\n \"extra_attr\": \"\"}\n\n @cache_readonly\n def _dispersion_factor(self):\n p = self.model.model_main.parameterization\n alpha = self.params[self.model.k_inflate:][-1]\n mu = np.exp(self.predict(which='linear'))\n w = 1 - self.predict() / mu\n return (1 + alpha * mu**(p-1) + w * mu)\n\n def get_margeff(self, at='overall', method='dydx', atexog=None,\n dummy=False, count=False):\n \"\"\"Get marginal effects of the fitted model.\n\n Not yet implemented for Zero Inflated Models\n \"\"\"\n raise NotImplementedError(\"not yet implemented for zero inflation\")\n\n\nclass L1ZeroInflatedNegativeBinomialResults(L1CountResults,\n ZeroInflatedNegativeBinomialResults):\n pass\n\n\nclass ZeroInflatedNegativeBinomialResultsWrapper(\n lm.RegressionResultsWrapper):\n pass\nwrap.populate_wrapper(ZeroInflatedNegativeBinomialResultsWrapper,\n ZeroInflatedNegativeBinomialResults)\n\n\nclass L1ZeroInflatedNegativeBinomialResultsWrapper(\n lm.RegressionResultsWrapper):\n pass\nwrap.populate_wrapper(L1ZeroInflatedNegativeBinomialResultsWrapper,\n L1ZeroInflatedNegativeBinomialResults)\n", "# -*- coding: utf-8 -*-\n\"\"\"\n\nCreated on Fri May 04 11:10:51 2012\n\nAuthor: Ralf Gommers\n\n\"\"\"\n\n#Load the El Nino dataset. Consists of 60 years worth of Pacific Ocean sea\n#surface temperature data.\n\nimport matplotlib.pyplot as plt\nimport numpy as np\n\nimport statsmodels.api as sm\n\ndata = sm.datasets.elnino.load()\n\n#Create a functional boxplot. We see that the years 1982-83 and 1997-98 are\n#outliers; these are the years where El Nino (a climate pattern\n#characterized by warming up of the sea surface and higher air pressures)\n#occurred with unusual intensity.\n\nfig = plt.figure()\nax = fig.add_subplot(111)\nres = sm.graphics.fboxplot(data.raw_data.iloc[:, 1:], wfactor=2.58,\n labels=data.raw_data.iloc[:, 0].astype(int),\n ax=ax)\n\nax.set_xlabel(\"Month of the year\")\nax.set_ylabel(\"Sea surface temperature (C)\")\nax.set_xticks(np.arange(13, step=3) - 1)\nax.set_xticklabels([\"\", \"Mar\", \"Jun\", \"Sep\", \"Dec\"])\nax.set_xlim([-0.2, 11.2])\n\n#plt.show()\n" ]
[ [ "scipy.stats.normaltest", "matplotlib.pyplot.subplots", "pandas.DataFrame", "matplotlib.pyplot.plot", "pandas.date_range", "matplotlib.pyplot.figure" ], [ "numpy.hstack", "numpy.log", "numpy.dot", "numpy.nonzero", "numpy.triu_indices", "numpy.asarray", "numpy.ones", "numpy.finfo", "numpy.max", "numpy.append", "numpy.size", "numpy.zeros_like", "numpy.column_stack", "numpy.exp", "numpy.zeros", "numpy.nextafter" ], [ "numpy.arange", "matplotlib.pyplot.figure" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "1.3", "0.19", "1.1", "1.5", "0.24", "0.20", "1.0", "0.25", "1.2" ], "scipy": [ "0.13", "1.6", "0.14", "1.10", "0.15", "1.4", "0.16", "1.9", "0.19", "1.5", "0.18", "1.2", "1.7", "0.12", "1.0", "0.17", "1.3", "1.8" ], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
bradyrx/xskillscore
[ "6521f9d114edf012a2829e5ac7f423190bb8de4a" ]
[ "xskillscore/tests/test_mask_skipna.py" ]
[ "import numpy as np\nimport pytest\nimport xarray as xr\n\nfrom xskillscore.core.deterministic import (\n mae,\n mape,\n median_absolute_error,\n mse,\n pearson_r,\n pearson_r_p_value,\n r2,\n rmse,\n smape,\n spearman_r,\n spearman_r_p_value,\n)\n\n# Should only have masking issues when pulling in masked\n# grid cells over space.\nAXES = ('time', 'lat', 'lon', ['lat', 'lon'], ['time', 'lat', 'lon'])\n\ndistance_metrics = [mae, mse, median_absolute_error, mape, smape, rmse]\ncorrelation_metrics = [\n pearson_r,\n r2,\n pearson_r_p_value,\n spearman_r,\n spearman_r_p_value,\n]\n\n\[email protected]\ndef a():\n time = xr.cftime_range('2000-01-01', '2000-01-03', freq='D')\n lats = np.arange(4)\n lons = np.arange(5)\n data = np.random.rand(len(time), len(lats), len(lons))\n da = xr.DataArray(data, coords=[time, lats, lons], dims=['time', 'lat', 'lon'])\n return da\n\n\[email protected]\ndef b(a):\n b = a.copy()\n b.values = np.random.rand(a.shape[0], a.shape[1], a.shape[2])\n return b\n\n\ndef mask_land_data(da):\n \"\"\"Masks sample data arbitrarily like a block of land.\"\"\"\n da.data[:, 1:3, 1:3] = np.nan\n return da\n\n\[email protected]('metric', correlation_metrics + distance_metrics)\[email protected]('dim', AXES)\ndef test_metrics_masked(a, b, dim, metric):\n \"\"\"Test for all distance-based metrics whether result of skipna does not\n contain any nans when applied along dim with nans.\"\"\"\n a_masked = mask_land_data(a)\n b_masked = mask_land_data(b)\n res_skipna = metric(a_masked, b_masked, dim, skipna=True)\n res_no_skipna = metric(a_masked, b_masked, dim, skipna=False)\n\n if 'lon' in dim or 'lat' in dim: # metric is applied along axis with nans\n # res_skipna shouldnt have nans\n if metric not in [spearman_r_p_value, pearson_r_p_value]:\n assert not np.isnan(res_skipna).any()\n # res_no_skipna should have different result then skipna\n assert (res_no_skipna != res_skipna).any()\n else: # metric is applied along axis without nans\n res_skipna_where_masked = res_skipna.isel(lon=[1, 2], lat=[1, 2])\n res_no_skipna_where_masked = res_no_skipna.isel(lon=[1, 2], lat=[1, 2])\n\n assert np.isnan(res_skipna_where_masked).all()\n assert np.isnan(res_no_skipna_where_masked).all()\n # res_skipna should have a few nans\n assert np.isnan(res_skipna).any()\n # res_no_skipna should have a few nans\n assert np.isnan(res_no_skipna).any()\n # # res_no_skipna should have different result then skipna\n assert (res_no_skipna != res_skipna).any()\n" ]
[ [ "numpy.isnan", "numpy.arange", "numpy.random.rand" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
mmabadal/dgcnn
[ "bb2e4c23917c9b47ee96519baeef1d25b6c2cdf0" ]
[ "sem_seg/indoor3d_util.py" ]
[ "import numpy as np\nimport glob\nimport os\nimport sys\n\n\n\ndef get_info_classes(cls_path):\n\n classes = []\n colors = []\n\n for line in open(cls_path):\n data = line.split()\n classes.append(data[0])\n colors.append([int(data[1]), int(data[2]), int(data[3])])\n\n labels = {cls: i for i, cls in enumerate(classes)}\n\n label2color = {classes.index(cls): colors[classes.index(cls)] for cls in classes}\n\n return classes, labels, label2color\n\n# -----------------------------------------------------------------------------\n# CONVERT ORIGINAL DATA TO OUR DATA_LABEL FILES\n# -----------------------------------------------------------------------------\n\ndef collect_point_label(anno_path, out_filename, cls_path, file_format='txt'):\n \"\"\" Convert original dataset files to data_label file (each line is XYZRGBL).\n We aggregated all the points from each instance in the room.\n\n Args:\n anno_path: path to annotations. e.g. Area_1/office_2/Annotations/\n out_filename: path to save collected points and labels (each line is XYZRGBL)\n file_format: txt or numpy, determines what file format to save.\n Returns:\n None\n Note:\n the points are shifted before save, the most negative point is now at origin.\n \"\"\"\n points_list = []\n\n g_classes, g_class2label, g_label2color = get_info_classes(cls_path)\n\n #print(g_class2label)\n\n \n\n for f in glob.glob(os.path.join(anno_path, '*.txt')):\n name = os.path.basename(f).split('.')[0]\n bits = name.split('_')\n cls = bits[0]\n\n points = np.loadtxt(f)\n labels = np.ones((points.shape[0],1)) * g_class2label[cls]\n points_list.append(np.concatenate([points, labels], 1)) # Nx7\n \n data_label = np.concatenate(points_list, 0)\n xyz_min = np.amin(data_label, axis=0)[0:3]\n data_label[:, 0:3] -= xyz_min\n \n if file_format=='txt':\n fout = open(out_filename, 'w')\n for i in range(data_label.shape[0]):\n fout.write('%f %f %f %d %d %d %d\\n' % \\\n (data_label[i,0], data_label[i,1], data_label[i,2],\n data_label[i,3], data_label[i,4], data_label[i,5],\n data_label[i,6]))\n fout.close()\n elif file_format=='numpy':\n np.save(out_filename, data_label)\n else:\n print('ERROR!! Unknown file format: %s, please use txt or numpy.' % \\\n (file_format))\n exit()\n\ndef point_label_to_obj(input_filename, out_filename, cls_path, label_color=True):\n \"\"\" For visualization of a room from data_label file,\n\tinput_filename: each line is X Y Z R G B L\n\tout_filename: OBJ filename,\n visualize input file by coloring point with label color\n easy_view: only visualize furnitures and floor\n \"\"\"\n\n g_classes, g_class2label, g_label2color = get_info_classes(cls_path)\n\n data_label = np.loadtxt(input_filename)\n data = data_label[:, 0:6]\n label = data_label[:, -1].astype(int)\n fout = open(out_filename, 'w')\n for i in range(data.shape[0]):\n color = g_label2color[label[i]]\n if label_color:\n fout.write('v %f %f %f %d %d %d\\n' % \\\n (data[i,0], data[i,1], data[i,2], color[0], color[1], color[2]))\n else:\n fout.write('v %f %f %f %d %d %d\\n' % \\\n (data[i,0], data[i,1], data[i,2], data[i,3], data[i,4], data[i,5]))\n fout.close()\n \n\n\n# -----------------------------------------------------------------------------\n# PREPARE BLOCK DATA FOR DEEPNETS TRAINING/TESTING\n# -----------------------------------------------------------------------------\n\ndef sample_data(data, num_sample):\n \"\"\" data is in N x ...\n we want to keep num_samplexC of them.\n if N > num_sample, we will randomly keep num_sample of them.\n if N < num_sample, we will randomly duplicate samples.\n \"\"\"\n empty = list()\n N = data.shape[0]\n if (N == 0):\n return data, empty\n elif (N == num_sample):\n return data, range(N)\n elif (N > num_sample):\n sample = np.random.choice(N, num_sample)\n return data[sample, ...], sample\n else:\n sample = np.random.choice(N, num_sample-N)\n dup_data = data[sample, ...]\n return np.concatenate([data, dup_data], 0), list(range(N))+list(sample)\n\ndef sample_data_label(data, label, num_sample):\n new_data, sample_indices = sample_data(data, num_sample)\n if len(sample_indices) != 0:\n new_label = label[sample_indices]\n else:\n new_label = np.array([])\n return new_data, new_label\n \ndef room2blocks(data, label, num_point, block_size=1.0, stride=1.0,\n random_sample=False, sample_num=None, sample_aug=1):\n \"\"\" Prepare block training data.\n Args:\n data: N x 6 numpy array, 012 are XYZ in meters, 345 are RGB in [0,1]\n assumes the data is shifted (min point is origin) and aligned\n (aligned with XYZ axis)\n label: N size uint8 numpy array from 0-12\n num_point: int, how many points to sample in each block\n block_size: float, physical size of the block in meters\n stride: float, stride for block sweeping\n random_sample: bool, if True, we will randomly sample blocks in the room\n sample_num: int, if random sample, how many blocks to sample\n [default: room area]\n sample_aug: if random sample, how much aug\n Returns:\n block_datas: K x num_point x 6 np array of XYZRGB, RGB is in [0,1]\n block_labels: K x num_point x 1 np array of uint8 labels\n \n TODO: for this version, blocking is in fixed, non-overlapping pattern.\n \"\"\"\n assert(stride<=block_size)\n\n limit = np.amax(data, 0)[0:3]\n\n if block_size == 0.1:\n lessthan = 25\n if block_size == 0.2:\n lessthan = 100\n #print(lessthan)\n # Get the corner location for our sampling blocks \n xbeg_list = []\n ybeg_list = []\n if not random_sample:\n num_block_x = int(np.ceil((limit[0] - block_size) / stride)) + 1\n num_block_y = int(np.ceil((limit[1] - block_size) / stride)) + 1\n for i in range(num_block_x):\n for j in range(num_block_y):\n xbeg_list.append(i*stride)\n ybeg_list.append(j*stride)\n else:\n num_block_x = int(np.ceil(limit[0] / block_size))\n num_block_y = int(np.ceil(limit[1] / block_size))\n if sample_num is None:\n sample_num = num_block_x * num_block_y * sample_aug\n for _ in range(sample_num):\n xbeg = np.random.uniform(-block_size, limit[0]) \n ybeg = np.random.uniform(-block_size, limit[1]) \n xbeg_list.append(xbeg)\n ybeg_list.append(ybeg)\n\n # Collect blocks\n block_data_list = []\n block_label_list = []\n idx = 0\n for idx in range(len(xbeg_list)): \n xbeg = xbeg_list[idx]\n ybeg = ybeg_list[idx]\n xcond = (data[:,0]<=xbeg+block_size) & (data[:,0]>=xbeg)\n ycond = (data[:,1]<=ybeg+block_size) & (data[:,1]>=ybeg)\n cond = xcond & ycond\n if np.sum(cond) < 0: # lessthan: # 0 // discard block if there are less than lessthan pts.\n print(\"DISCARTED BLOCK\")\n continue\n \n block_data = data[cond, :]\n block_label = label[cond]\n \n # randomly subsample data\n block_data_sampled, block_label_sampled = \\\n sample_data_label(block_data, block_label, num_point)\n if block_label_sampled.size == 0:\n continue\n block_data_list.append(np.expand_dims(block_data_sampled, 0))\n block_label_list.append(np.expand_dims(block_label_sampled, 0))\n \n\n if len(block_data_list) > 0:\n data_batch = np.concatenate(block_data_list, 0)\n label_batch = np.concatenate(block_label_list, 0)\n return data_batch, label_batch\n else:\n return np.array([]), np.array([])\n\n\n\ndef room2blocks_plus(data_label, num_point, block_size, stride,\n random_sample, sample_num, sample_aug):\n \"\"\" room2block with input filename and RGB preprocessing.\n \"\"\"\n data = data_label[:,0:6]\n data[:,3:6] /= 255.0\n label = data_label[:,-1].astype(np.uint8)\n \n return room2blocks(data, label, num_point, block_size, stride,\n random_sample, sample_num, sample_aug)\n \ndef room2blocks_wrapper(data_label_filename, num_point, block_size=1.0, stride=1.0,\n random_sample=False, sample_num=None, sample_aug=1):\n if data_label_filename[-3:] == 'txt':\n data_label = np.loadtxt(data_label_filename)\n elif data_label_filename[-3:] == 'npy':\n data_label = np.load(data_label_filename)\n else:\n print('Unknown file type! exiting.')\n exit()\n return room2blocks_plus(data_label, num_point, block_size, stride,\n random_sample, sample_num, sample_aug)\n\ndef room2blocks_plus_normalized(data_label, num_point, block_size, stride,\n random_sample, sample_num, sample_aug):\n \"\"\" room2block, with input filename and RGB preprocessing.\n for each block centralize XYZ, add normalized XYZ as 678 channels\n \"\"\"\n data = data_label[:,0:6]\n data[:,3:6] /= 255.0\n label = data_label[:,-1].astype(np.uint8)\n max_room_x = max(data[:,0])\n max_room_y = max(data[:,1])\n max_room_z = max(data[:,2])\n \n data_batch, label_batch = room2blocks(data, label, num_point, block_size, stride,\n random_sample, sample_num, sample_aug)\n new_data_batch = np.zeros((data_batch.shape[0], num_point, 9))\n for b in range(data_batch.shape[0]):\n new_data_batch[b, :, 6] = data_batch[b, :, 0]/max_room_x\n new_data_batch[b, :, 7] = data_batch[b, :, 1]/max_room_y\n new_data_batch[b, :, 8] = data_batch[b, :, 2]/max_room_z\n minx = min(data_batch[b, :, 0])\n miny = min(data_batch[b, :, 1])\n data_batch[b, :, 0] -= (minx+block_size/2)\n data_batch[b, :, 1] -= (miny+block_size/2)\n new_data_batch[:, :, 0:6] = data_batch\n return new_data_batch, label_batch\n\ndef room2blocks_plus_normalized_parsed(data_label, max_data, num_point, block_size, stride,\n random_sample, sample_num, sample_aug):\n \"\"\" room2block, with input filename and RGB preprocessing.\n for each block centralize XYZ, add normalized XYZ as 678 channels\n \"\"\"\n data = data_label[:,0:6]\n data[:,3:6] /= 255.0\n label = data_label[:,-1].astype(np.uint8)\n \n data_batch, label_batch = room2blocks(data, label, num_point, block_size, stride,\n random_sample, sample_num, sample_aug)\n\n if data_batch.size != 0:\n new_data_batch = np.zeros((data_batch.shape[0], num_point, 9))\n for b in range(data_batch.shape[0]):\n new_data_batch[b, :, 6] = data_batch[b, :, 0]/max_data[0]\n new_data_batch[b, :, 7] = data_batch[b, :, 1]/max_data[1]\n new_data_batch[b, :, 8] = data_batch[b, :, 2]/max_data[2]\n minx = min(data_batch[b, :, 0])\n miny = min(data_batch[b, :, 1])\n data_batch[b, :, 0] -= (minx+block_size/2)\n data_batch[b, :, 1] -= (miny+block_size/2)\n new_data_batch[:, :, 0:6] = data_batch\n return new_data_batch, label_batch\n else:\n return np.array([]), np.array([])\n\n\ndef room2blocks_wrapper_normalized(data_label_filename, num_point, block_size=1.0, stride=1.0,\n random_sample=False, sample_num=None, sample_aug=1):\n if data_label_filename[-3:] == 'txt':\n data_label = np.loadtxt(data_label_filename)\n elif data_label_filename[-3:] == 'npy':\n data_label = np.load(data_label_filename)\n else:\n print('Unknown file type! exiting.')\n exit()\n return room2blocks_plus_normalized(data_label, num_point, block_size, stride,\n random_sample, sample_num, sample_aug)\n\ndef room2samples(data, label, sample_num_point):\n \"\"\" Prepare whole room samples.\n\n Args:\n data: N x 6 numpy array, 012 are XYZ in meters, 345 are RGB in [0,1]\n assumes the data is shifted (min point is origin) and\n aligned (aligned with XYZ axis)\n label: N size uint8 numpy array from 0-12\n sample_num_point: int, how many points to sample in each sample\n Returns:\n sample_datas: K x sample_num_point x 9\n numpy array of XYZRGBX'Y'Z', RGB is in [0,1]\n sample_labels: K x sample_num_point x 1 np array of uint8 labels\n \"\"\"\n N = data.shape[0]\n order = np.arange(N)\n np.random.shuffle(order) \n data = data[order, :]\n label = label[order]\n\n batch_num = int(np.ceil(N / float(sample_num_point)))\n sample_datas = np.zeros((batch_num, sample_num_point, 6))\n sample_labels = np.zeros((batch_num, sample_num_point, 1))\n\n for i in range(batch_num):\n beg_idx = i*sample_num_point\n end_idx = min((i+1)*sample_num_point, N)\n num = end_idx - beg_idx\n sample_datas[i,0:num,:] = data[beg_idx:end_idx, :]\n sample_labels[i,0:num,0] = label[beg_idx:end_idx]\n if num < sample_num_point:\n makeup_indices = np.random.choice(N, sample_num_point - num)\n sample_datas[i,num:,:] = data[makeup_indices, :]\n sample_labels[i,num:,0] = label[makeup_indices]\n return sample_datas, sample_labels\n\ndef room2samples_plus_normalized(data_label, num_point):\n \"\"\" room2sample, with input filename and RGB preprocessing.\n for each block centralize XYZ, add normalized XYZ as 678 channels\n \"\"\"\n data = data_label[:,0:6]\n data[:,3:6] /= 255.0\n label = data_label[:,-1].astype(np.uint8)\n max_room_x = max(data[:,0])\n max_room_y = max(data[:,1])\n max_room_z = max(data[:,2])\n #print(max_room_x, max_room_y, max_room_z)\n \n data_batch, label_batch = room2samples(data, label, num_point)\n new_data_batch = np.zeros((data_batch.shape[0], num_point, 9))\n for b in range(data_batch.shape[0]):\n new_data_batch[b, :, 6] = data_batch[b, :, 0]/max_room_x\n new_data_batch[b, :, 7] = data_batch[b, :, 1]/max_room_y\n new_data_batch[b, :, 8] = data_batch[b, :, 2]/max_room_z\n #minx = min(data_batch[b, :, 0])\n #miny = min(data_batch[b, :, 1])\n #data_batch[b, :, 0] -= (minx+block_size/2)\n #data_batch[b, :, 1] -= (miny+block_size/2)\n new_data_batch[:, :, 0:6] = data_batch\n return new_data_batch, label_batch\n\n\ndef room2samples_wrapper_normalized(data_label_filename, num_point):\n if data_label_filename[-3:] == 'txt':\n data_label = np.loadtxt(data_label_filename)\n elif data_label_filename[-3:] == 'npy':\n data_label = np.load(data_label_filename)\n else:\n print('Unknown file type! exiting.')\n exit()\n return room2samples_plus_normalized(data_label, num_point)\n\n\n# -----------------------------------------------------------------------------\n# EXTRACT INSTANCE BBOX FROM ORIGINAL DATA (for detection evaluation)\n# -----------------------------------------------------------------------------\n\ndef collect_bounding_box(anno_path, out_filename, cls_path):\n \"\"\" Compute bounding boxes from each instance in original dataset files on\n one room. **We assume the bbox is aligned with XYZ coordinate.**\n \n Args:\n anno_path: path to annotations. e.g. Area_1/office_2/Annotations/\n out_filename: path to save instance bounding boxes for that room.\n each line is x1 y1 z1 x2 y2 z2 label,\n where (x1,y1,z1) is the point on the diagonal closer to origin\n Returns:\n None\n Note:\n room points are shifted, the most negative point is now at origin.\n \"\"\"\n bbox_label_list = []\n\n g_classes, g_class2label, g_label2color = get_info_classes(cls_path)\n\n for f in glob.glob(os.path.join(anno_path, '*.txt')):\n name = os.path.basename(f).split('.')[0]\n bits = name.split('_')\n cls = \"\"\n for i in range(len(bits)-1):\n cls = cls + bits[i]\n if i == len(bits)-2:\n break\n cls = cls + \"_\"\n\n if cls not in g_classes: # note: in some room there is 'staris' class..\n cls = 'clutter'\n points = np.loadtxt(f)\n label = g_class2label[cls]\n # Compute tightest axis aligned bounding box\n xyz_min = np.amin(points[:, 0:3], axis=0)\n xyz_max = np.amax(points[:, 0:3], axis=0)\n ins_bbox_label = np.expand_dims(\n np.concatenate([xyz_min, xyz_max, np.array([label])], 0), 0)\n bbox_label_list.append(ins_bbox_label)\n\n bbox_label = np.concatenate(bbox_label_list, 0)\n room_xyz_min = np.amin(bbox_label[:, 0:3], axis=0)\n bbox_label[:, 0:3] -= room_xyz_min \n bbox_label[:, 3:6] -= room_xyz_min \n\n fout = open(out_filename, 'w')\n for i in range(bbox_label.shape[0]):\n fout.write('%f %f %f %f %f %f %d\\n' % \\\n (bbox_label[i,0], bbox_label[i,1], bbox_label[i,2],\n bbox_label[i,3], bbox_label[i,4], bbox_label[i,5],\n bbox_label[i,6]))\n fout.close()\n\ndef bbox_label_to_obj(input_filename, out_filename_prefix, cls_path):\n \"\"\" Visualization of bounding boxes.\n \n Args:\n input_filename: each line is x1 y1 z1 x2 y2 z2 label\n out_filename_prefix: OBJ filename prefix,\n visualize object by g_label2color\n easy_view: if True, only visualize furniture and floor\n Returns:\n output a list of OBJ file and MTL files with the same prefix\n \"\"\"\n bbox_label = np.loadtxt(input_filename)\n bbox = bbox_label[:, 0:6]\n label = bbox_label[:, -1].astype(int)\n v_cnt = 0 # count vertex\n ins_cnt = 0 # count instance\n\n g_classes, g_class2label, g_label2color = get_info_classes(cls_path)\n\n for i in range(bbox.shape[0]):\n obj_filename = out_filename_prefix+'_'+g_classes[label[i]]+'_'+str(ins_cnt)+'.obj'\n mtl_filename = out_filename_prefix+'_'+g_classes[label[i]]+'_'+str(ins_cnt)+'.mtl'\n fout_obj = open(obj_filename, 'w')\n fout_mtl = open(mtl_filename, 'w')\n fout_obj.write('mtllib %s\\n' % (os.path.basename(mtl_filename)))\n\n length = bbox[i, 3:6] - bbox[i, 0:3]\n a = length[0]\n b = length[1]\n c = length[2]\n x = bbox[i, 0]\n y = bbox[i, 1]\n z = bbox[i, 2]\n color = np.array(g_label2color[label[i]], dtype=float) / 255.0\n\n material = 'material%d' % (ins_cnt)\n fout_obj.write('usemtl %s\\n' % (material))\n fout_obj.write('v %f %f %f\\n' % (x,y,z+c))\n fout_obj.write('v %f %f %f\\n' % (x,y+b,z+c))\n fout_obj.write('v %f %f %f\\n' % (x+a,y+b,z+c))\n fout_obj.write('v %f %f %f\\n' % (x+a,y,z+c))\n fout_obj.write('v %f %f %f\\n' % (x,y,z))\n fout_obj.write('v %f %f %f\\n' % (x,y+b,z))\n fout_obj.write('v %f %f %f\\n' % (x+a,y+b,z))\n fout_obj.write('v %f %f %f\\n' % (x+a,y,z))\n fout_obj.write('g default\\n')\n v_cnt = 0 # for individual box\n fout_obj.write('f %d %d %d %d\\n' % (4+v_cnt, 3+v_cnt, 2+v_cnt, 1+v_cnt))\n fout_obj.write('f %d %d %d %d\\n' % (1+v_cnt, 2+v_cnt, 6+v_cnt, 5+v_cnt))\n fout_obj.write('f %d %d %d %d\\n' % (7+v_cnt, 6+v_cnt, 2+v_cnt, 3+v_cnt))\n fout_obj.write('f %d %d %d %d\\n' % (4+v_cnt, 8+v_cnt, 7+v_cnt, 3+v_cnt))\n fout_obj.write('f %d %d %d %d\\n' % (5+v_cnt, 8+v_cnt, 4+v_cnt, 1+v_cnt))\n fout_obj.write('f %d %d %d %d\\n' % (5+v_cnt, 6+v_cnt, 7+v_cnt, 8+v_cnt))\n fout_obj.write('\\n')\n\n fout_mtl.write('newmtl %s\\n' % (material))\n fout_mtl.write('Kd %f %f %f\\n' % (color[0], color[1], color[2]))\n fout_mtl.write('\\n')\n fout_obj.close()\n fout_mtl.close() \n\n v_cnt += 8\n ins_cnt += 1\n\ndef bbox_label_to_obj_room(input_filename, out_filename_prefix, cls_path, permute=None, center=False):\n \"\"\" Visualization of bounding boxes.\n \n Args:\n input_filename: each line is x1 y1 z1 x2 y2 z2 label\n out_filename_prefix: OBJ filename prefix,\n visualize object by g_label2color\n easy_view: if True, only visualize furniture and floor\n permute: if not None, permute XYZ for rendering, e.g. [0 2 1]\n center: if True, move obj to have zero origin\n Returns:\n output a list of OBJ file and MTL files with the same prefix\n \"\"\"\n bbox_label = np.loadtxt(input_filename)\n bbox = bbox_label[:, 0:6]\n\n g_classes, g_class2label, g_label2color = get_info_classes(cls_path)\n\n if permute is not None:\n assert(len(permute)==3)\n permute = np.array(permute)\n bbox[:,0:3] = bbox[:,permute]\n bbox[:,3:6] = bbox[:,permute+3]\n if center:\n xyz_max = np.amax(bbox[:,3:6], 0)\n bbox[:,0:3] -= (xyz_max/2.0)\n bbox[:,3:6] -= (xyz_max/2.0)\n bbox /= np.max(xyz_max/2.0)\n label = bbox_label[:, -1].astype(int)\n obj_filename = out_filename_prefix+'.obj' \n mtl_filename = out_filename_prefix+'.mtl'\n\n fout_obj = open(obj_filename, 'w')\n fout_mtl = open(mtl_filename, 'w')\n fout_obj.write('mtllib %s\\n' % (os.path.basename(mtl_filename)))\n v_cnt = 0 # count vertex\n ins_cnt = 0 # count instance\n for i in range(bbox.shape[0]):\n\n length = bbox[i, 3:6] - bbox[i, 0:3]\n a = length[0]\n b = length[1]\n c = length[2]\n x = bbox[i, 0]\n y = bbox[i, 1]\n z = bbox[i, 2]\n color = np.array(g_label2color[label[i]], dtype=float) / 255.0\n\n material = 'material%d' % (ins_cnt)\n fout_obj.write('usemtl %s\\n' % (material))\n fout_obj.write('v %f %f %f\\n' % (x,y,z+c))\n fout_obj.write('v %f %f %f\\n' % (x,y+b,z+c))\n fout_obj.write('v %f %f %f\\n' % (x+a,y+b,z+c))\n fout_obj.write('v %f %f %f\\n' % (x+a,y,z+c))\n fout_obj.write('v %f %f %f\\n' % (x,y,z))\n fout_obj.write('v %f %f %f\\n' % (x,y+b,z))\n fout_obj.write('v %f %f %f\\n' % (x+a,y+b,z))\n fout_obj.write('v %f %f %f\\n' % (x+a,y,z))\n fout_obj.write('g default\\n')\n fout_obj.write('f %d %d %d %d\\n' % (4+v_cnt, 3+v_cnt, 2+v_cnt, 1+v_cnt))\n fout_obj.write('f %d %d %d %d\\n' % (1+v_cnt, 2+v_cnt, 6+v_cnt, 5+v_cnt))\n fout_obj.write('f %d %d %d %d\\n' % (7+v_cnt, 6+v_cnt, 2+v_cnt, 3+v_cnt))\n fout_obj.write('f %d %d %d %d\\n' % (4+v_cnt, 8+v_cnt, 7+v_cnt, 3+v_cnt))\n fout_obj.write('f %d %d %d %d\\n' % (5+v_cnt, 8+v_cnt, 4+v_cnt, 1+v_cnt))\n fout_obj.write('f %d %d %d %d\\n' % (5+v_cnt, 6+v_cnt, 7+v_cnt, 8+v_cnt))\n fout_obj.write('\\n')\n\n fout_mtl.write('newmtl %s\\n' % (material))\n fout_mtl.write('Kd %f %f %f\\n' % (color[0], color[1], color[2]))\n fout_mtl.write('\\n')\n\n v_cnt += 8\n ins_cnt += 1\n\n fout_obj.close()\n fout_mtl.close() \n\n\ndef collect_point_bounding_box(anno_path, out_filename, file_format, cls_path):\n \"\"\" Compute bounding boxes from each instance in original dataset files on\n one room. **We assume the bbox is aligned with XYZ coordinate.**\n Save both the point XYZRGB and the bounding box for the point's\n parent element.\n \n Args:\n anno_path: path to annotations. e.g. Area_1/office_2/Annotations/\n out_filename: path to save instance bounding boxes for each point,\n plus the point's XYZRGBL\n each line is XYZRGBL offsetX offsetY offsetZ a b c,\n where cx = X+offsetX, cy=X+offsetY, cz=Z+offsetZ\n where (cx,cy,cz) is center of the box, a,b,c are distances from center\n to the surfaces of the box, i.e. x1 = cx-a, x2 = cx+a, y1=cy-b etc.\n file_format: output file format, txt or numpy\n Returns:\n None\n\n Note:\n room points are shifted, the most negative point is now at origin.\n \"\"\"\n point_bbox_list = []\n\n g_classes, g_class2label, g_label2color = get_info_classes(cls_path)\n\n for f in glob.glob(os.path.join(anno_path, '*.txt')):\n cls = os.path.basename(f).split('_')[0]\n if cls not in g_classes: # note: in some room there is 'staris' class..\n cls = 'clutter'\n points = np.loadtxt(f) # Nx6\n label = g_class2label[cls] # N,\n # Compute tightest axis aligned bounding box\n xyz_min = np.amin(points[:, 0:3], axis=0) # 3,\n xyz_max = np.amax(points[:, 0:3], axis=0) # 3,\n xyz_center = (xyz_min + xyz_max) / 2\n dimension = (xyz_max - xyz_min) / 2\n\n xyz_offsets = xyz_center - points[:,0:3] # Nx3\n dimensions = np.ones((points.shape[0],3)) * dimension # Nx3\n labels = np.ones((points.shape[0],1)) * label # N\n point_bbox_list.append(np.concatenate([points, labels,\n xyz_offsets, dimensions], 1)) # Nx13\n\n point_bbox = np.concatenate(point_bbox_list, 0) # KxNx13\n room_xyz_min = np.amin(point_bbox[:, 0:3], axis=0)\n point_bbox[:, 0:3] -= room_xyz_min \n\n if file_format == 'txt':\n fout = open(out_filename, 'w')\n for i in range(point_bbox.shape[0]):\n fout.write('%f %f %f %d %d %d %d %f %f %f %f %f %f\\n' % \\\n (point_bbox[i,0], point_bbox[i,1], point_bbox[i,2],\n point_bbox[i,3], point_bbox[i,4], point_bbox[i,5],\n point_bbox[i,6],\n point_bbox[i,7], point_bbox[i,8], point_bbox[i,9],\n point_bbox[i,10], point_bbox[i,11], point_bbox[i,12]))\n \n fout.close()\n elif file_format == 'numpy':\n np.save(out_filename, point_bbox)\n else:\n print('ERROR!! Unknown file format: %s, please use txt or numpy.' % \\\n (file_format))\n exit()\n\n\n" ]
[ [ "numpy.amax", "numpy.expand_dims", "numpy.random.choice", "numpy.amin", "numpy.arange", "numpy.load", "numpy.random.shuffle", "numpy.ones", "numpy.concatenate", "numpy.max", "numpy.save", "numpy.ceil", "numpy.random.uniform", "numpy.array", "numpy.zeros", "numpy.sum", "numpy.loadtxt" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
mirrorcoloured/slcypi
[ "c47975b3523f770d12a521c82e2dfca181e3f35b", "c47975b3523f770d12a521c82e2dfca181e3f35b" ]
[ "MA/ImageAnalysis.py", "MA/Robot_V002J.py" ]
[ "\n# Import statements\n#import pygame\n#import pygame.camera\n#from PIL import Image\nimport cv2\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nclass ImageAnalysis():\n \"\"\"Class with methods for image analysis\"\"\"\n\n def __init__(self):\n \"\"\"Initialize method\"\"\"\n print(\"Initiate ImageAnalysis\")\n\n # Set starting values\n WITDH = 320\n HEIGHT = 240\n filterLower = np.array([5,0,0])\n filterUpper = np.array([75,255,255])\n blockAnalyseYstart = 0\n blockAnalyseYend = 100\n\n def faceDetection(self, bgr):\n gray = cv2.cvtColor(bgr, cv2.COLOR_BGR2GRAY)\n faces = face_cascade.detectMultiScale(gray, 1.3, 5)\n\n for (x,y,w,h) in faces:\n cv2.rectangle(bgr,(x,y),(x+w,y+h),(255,0,0),2)\n roi_gray = gray[y:y+h, x:x+w]\n roi_color = img[y:y+h, x:x+w]\n\n eyes = eye_cascade.detectMultiScale(roi_gray)\n for (ex,ey,ew,eh) in eyes:\n cv2.rectangle(roi_color,(ex,ey),(ex+ew,ey+eh),(0,255,0),2)\n \n return(faces,bgr)\n\n def opticalFlow(self, current, previous, hsv):\n prvs = cv2.cvtColor(previous,cv2.COLOR_BGR2GRAY)\n next = cv2.cvtColor(current,cv2.COLOR_BGR2GRAY)\n flow = cv2.calcOpticalFlowFarneback(prvs,next, None, 0.5, 3, 15, 3, 5, 1.2, 0)\n mag, ang = cv2.cartToPolar(flow[...,0], flow[...,1])\n hsv[...,0] = ang*180/np.pi/2\n hsv[...,2] = cv2.normalize(mag,None,0,255,cv2.NORM_MINMAX)\n bgr = cv2.cvtColor(hsv,cv2.COLOR_HSV2BGR)\n return(bgr)\n \n def featureMatch(self,current,previous):\n orb = cv2.ORB_create()\n orb = cv2.ORB()\n cv2.ocl.setUseOpenCL(False)\n kp1, des1 = orb.detectAndCompute(current,None)\n kp2, des2 = orb.detectAndCompute(previous,None)\n bf = cv2.BFMatcher(cv2.NORM_HAMMING, crossCheck=True)\n matches = bf.match(des1,des2)\n matches = sorted(matches, key = lambda x:x.distance)\n res = cv2.drawMatches(current,kp1,previous,kp2,matches[:],None, flags=2)\n res = cv2.resize(res, (320,240))\n return(res) \n \n def edgeDetection(self, bgr):\n laplacian = cv2.Laplacian(bgr,cv2.CV_64F)\n return(laplacian)\n #sobelx = cv2.Sobel(frame,cv2.CV_64F,1,0,ksize=5)\n #sobely = cv2.Sobel(frame,cv2.CV_64F,0,1,ksize=5)\n\n # Method to apply color filter\n def colorFilter(self, bgr, erode = False, dilate = False):\n hsv = cv2.cvtColor(bgr, cv2.COLOR_BGR2HSV)\n mask = cv2.inRange(hsv, self.filterLower, self.filterUpper)\n if erode == True:\n kernel = np.ones((5,5),np.uint8)\n mask = cv2.erode(mask,kernel,iterations = 1)\n if dilate == True:\n kernel = np.ones((5,5),np.uint8)\n mask = cv2.dilate(mask,kernel,iterations = 1)\n res = cv2.bitwise_and(bgr, bgr, mask=mask)\n return(res, mask)\n \n def smooth(self,img):\n kernel = np.ones((15,15),np.float32)/225\n smoothed = cv2.filter2D(img,-1,kernel)\n return(smoothed)\n\n def blurring(self,img):\n blur = cv2.GaussianBlur(img,(15,15),0)\n return(blur)\n\n def medianBlurring(self,img):\n median = cv2.medianBlur(img,15)\n return(median)\n\n def bilateralBlur(self,img):\n bilateral = cv2.bilateralFilter(img,15,75,75)\n return(bilateral)\n\n def blockAnalyze(self,mask):\n # Assumes 320 width\n sum = 0\n count = 0\n for x in range(5):\n #self.blockAnalyseYstart:self.blockAnalyseYend\n blockCount = np.sum(mask[x*64:x*64+63,0:200]) / 255 \n sum = sum + blockCount * x\n count = count + blockCount\n\n if count > 0:\n overallMean = float(sum) / count \n direction = (overallMean - 2) / 2\n return direction, count\n else:\n return -999, count\n\n\n \n", "# Import statements\nimport sys\nsys.path.append(\"/home/pi/Documents/Robots/slcypi/MA\") ### ADD PATH\nsys.path.append(\"/home/pi/Documents/Robots/slcypi/HAT_Python3\") ### ADD PATH\nimport cv2\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom Tank import Tank\nimport picamera\nimport picamera.array\nimport time\nimport pygame\nfrom scipy import ndimage\nfrom time import sleep\n\n# Settings\nWIDTH = 320\nHEIGHT = 240\n\n# Initialize Tank\nrobot = Tank()\nrobot.correctDirections(True,True,True)\n\n# Initialize Pygame\npygame.init()\npygame.display.set_caption('My Robot')\nscreen = pygame.display.set_mode((WIDTH,HEIGHT),0)\n\n# Image analysis\nlower = np.array([25,35,70])\nupper = np.array([75,255,205])\n\ndef blockAnalyze(mask):\n # Assume 320,240 image\n mask = np.transpose(mask)\n sum = 0\n count = 0\n for x in range(5):\n blockCount = np.sum(mask[x*64:x*64+63,0:100]) / 255 \n sum = sum + blockCount * x\n count = count + blockCount\n\n if count > 0:\n overallMean = float(sum) / count \n direction = (overallMean - 2) / 2\n return direction, count\n else:\n return -999, count\n\n \n# Analyze line function\ndef analyzeLine(mask, WIDTH, HEIGHT):\n\n startY = 0.4\n endY = 0.6\n sum = 0\n count = 0.1\n for x in range(0,WIDTH):\n for y in range(int(HEIGHT*startY),int(HEIGHT*endY)):\n if mask[y,x] == 255:\n sum = sum + x\n count = count + 1\n \n if count > 5:\n\n # Compute average\n average = sum / count\n \n # standardize\n direction = (average - (WIDTH / 2)) / (WIDTH /2) \n \n return direction, count\n else:\n return -999, count\n\n\n \n\nauto = False \ndone = False\nstartTime = time.time()\nprint(startTime)\nwith picamera.PiCamera() as camera:\n with picamera.array.PiRGBArray(camera) as stream:\n camera.resolution = (320, 240)\n\n while done == False:\n \n camera.capture(stream, 'bgr', use_video_port=True)\n # stream.array now contains the image data in BGR order\n \n # Image process\n frame = stream.array \n hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)\n #hsv = cv2.transpose(hsv)\n mask = cv2.inRange(hsv, lower, upper)\n res = cv2.bitwise_and(frame, frame, mask=mask)\n res = cv2.transpose(res)\n sface = pygame.surfarray.make_surface(res) \n\n # Display image\n screen.blit(sface,(0,0))\n pygame.display.update() \n \n # User events\n for event in pygame.event.get():\n if event.type == pygame.KEYDOWN:\n if (event.key == pygame.K_ESCAPE):\n done = True\n if event.key == (pygame.K_UP):\n robot.driveSync(1)\n if event.key == (pygame.K_DOWN):\n robot.driveSync(-1)\n if (event.key == pygame.K_LEFT):\n robot.rotateSync(1,45)\n if (event.key == pygame.K_RIGHT):\n robot.rotateSync(-1,45) \n if (event.key == pygame.K_q):\n auto = True\n if (event.key == pygame.K_w):\n auto = False\n robot.driveSync(0)\n robot.rotateSync(0)\n if (event.key == pygame.K_7):\n upper[0] = upper[0] + 5\n print(upper)\n if (event.key == pygame.K_u):\n upper[0] = upper[0] - 5\n print(upper)\n if (event.key == pygame.K_j):\n lower[0] = lower[0] + 5\n print(lower)\n if (event.key == pygame.K_m):\n lower[0] = lower[0] - 5\n print(lower)\n\n if (event.key == pygame.K_8):\n upper[1] = upper[1] + 5\n print(upper)\n if (event.key == pygame.K_i):\n upper[1] = upper[1] - 5\n print(upper)\n if (event.key == pygame.K_k):\n lower[1] = lower[1] + 5\n print(lower)\n if (event.key == pygame.K_COMMA):\n lower[1] = lower[1] - 5\n print(lower)\n\n if (event.key == pygame.K_9):\n upper[2] = upper[2] + 5\n print(upper)\n if (event.key == pygame.K_o):\n upper[2] = upper[2] - 5\n print(upper)\n if (event.key == pygame.K_l):\n lower[2] = lower[2] + 5\n print(lower)\n if (event.key == pygame.K_PERIOD):\n lower[2] = lower[2] - 5\n print(lower)\n \n if event.type == pygame.KEYUP:\n if event.key == (pygame.K_UP):\n robot.driveSync(0)\n if event.key == (pygame.K_DOWN):\n robot.driveSync(0)\n if (event.key == pygame.K_LEFT):\n robot.rotateSync(0)\n if (event.key == pygame.K_RIGHT):\n robot.rotateSync(0)\n\n #aRes = blockAnalyze(mask)\n #print(aRes) \n \n # Autonomous\n if auto == True:\n \n # Analyze line\n #aRes = analyzeLine(mask, WIDTH, HEIGHT)\n aRes = blockAnalyze(mask)\n print(aRes) \n dir = aRes[0]\n count = aRes[1]\n \n # Drive \n if abs(dir) > 0.20:\n rotateSpeed = 50\n if abs(dir) > 0.5:\n rotateSpeed = 70\n if dir > 0:\n print(\"Rotate -1\")\n #robot.driveSync(0)\n robot.rotateSync(-1, rotateSpeed)\n sleep(0.05)\n robot.rotateSync(0)\n else:\n print(\"Rotate 1\")\n #robot.driveSync(0)\n robot.rotateSync(1, rotateSpeed)\n sleep(0.05)\n robot.rotateSync(0)\n #else: \n ##robot.rotateSync(0)\n #robot.driveSync(1)\n #sleep(0.1)\n #robot.driveSync(0)\n if dir > -999:\n relCount = (1 - abs(dir)) * count\n driveSpeed = int(relCount / 3000 * 50)\n if driveSpeed > 45 : \n robot.driveSync(1, driveSpeed)\n else:\n robot.driveSync(0)\n else:\n robot.driveSync(0)\n \n # Handle stream\n stream.seek(0)\n stream.truncate()\n\n # Compute fps\n lapseTime = (time.time() - startTime)\n startTime = time.time()\n if lapseTime > 0:\n fps = 1.0 / lapseTime\n print(\"fps: \" + str(fps))\n\n" ]
[ [ "numpy.array", "numpy.sum", "numpy.ones" ], [ "numpy.array", "numpy.sum", "numpy.transpose" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
longhuang318/mobile_robot_rl
[ "8f1755be4856f1be8994dd5a156b4278960e6d46" ]
[ "mobile_robot_rl/networks/models.py" ]
[ "from copy import deepcopy\nfrom typing import Callable\nfrom typing import Optional\nfrom typing import Tuple\nfrom typing import Union\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nfrom mobile_robot_rl.networks.heads import DeterministicPolicyHead\nfrom mobile_robot_rl.networks.heads import GaussianPolicyHead\nfrom mobile_robot_rl.networks.heads import ValueHead\n\n\nclass Critic(nn.Module):\n def __init__(self, phi: nn.Module):\n super(Critic, self).__init__()\n self._phi = deepcopy(phi)\n self._value = ValueHead(self._phi.output_dim)\n\n def forward(self,\n action: torch.Tensor,\n state: Tuple[torch.Tensor, ...]) -> torch.Tensor:\n if isinstance(state, tuple):\n return self._value(self._phi(action, *state))\n return self._value(self._phi(action, state))\n\n\nclass DistributionalCritic(nn.Module):\n def __init__(self,\n phi: nn.Module,\n distribution_type: str,\n support_dim: int):\n super(DistributionalCritic, self).__init__()\n assert distribution_type in ('categorical', 'quantile')\n\n self._phi = deepcopy(phi)\n self._dist = ValueHead(self._phi.output_dim, support_dim)\n self._support_dim = support_dim\n self._distribution_type = distribution_type\n\n def forward(self, *x):\n probs = self._dist(x).view(-1, 1, self._support_dim)\n if self._distribution_type == 'categorical':\n return F.softmax(probs, dim=-1)\n return probs\n\n\nclass DoubleCritic(nn.Module):\n def __init__(self, phi: Union[Tuple[nn.Module, nn.Module], nn.Module]):\n super(DoubleCritic, self).__init__()\n if isinstance(phi, tuple):\n self._critic_1 = Critic(phi[0])\n self._critic_2 = Critic(phi[1])\n else:\n self._critic_1 = Critic(phi)\n self._critic_2 = Critic(phi)\n\n def q1_parameters(self):\n return self._critic_1.parameters()\n\n def q2_parameters(self):\n return self._critic_2.parameters()\n\n def forward(self,\n action: torch.Tensor,\n state: Tuple[torch.Tensor, ...]) -> Tuple[torch.Tensor, ...]:\n return self._critic_1(action, state), self._critic_2(action, state)\n\n\nclass DeterministicActor(nn.Module):\n def __init__(self,\n phi: nn.Module,\n output_dim: int,\n fan_init: bool = False,\n activation_fn: Callable = torch.tanh):\n super(DeterministicActor, self).__init__()\n self._phi = deepcopy(phi)\n self._head = DeterministicPolicyHead(\n input_dim=self._phi.output_dim,\n output_dim=output_dim,\n fan_init=fan_init,\n activation_fn=activation_fn)\n\n def forward(self, x: torch.Tensor) -> torch.Tensor:\n return self._head(self._phi(x))\n\n\nclass GaussianActor(nn.Module):\n def __init__(self,\n phi: nn.Module,\n output_dim: int,\n std_limits: Tuple[float, float] = (-20.0, 2.0),\n independent_std: bool = False,\n squash: bool = True,\n reparameterize: bool = True,\n fan_init: bool = True):\n super(GaussianActor, self).__init__()\n self._phi = phi\n self._head = GaussianPolicyHead(\n input_dim=self._phi.output_dim,\n output_dim=output_dim,\n std_limits=std_limits,\n independent_std=independent_std,\n squash=squash,\n reparameterize=reparameterize,\n fan_init=fan_init)\n\n def forward(self,\n x: torch.Tensor,\n raw_action: Optional[torch.Tensor] = None,\n deterministic: bool = False) -> Tuple[torch.Tensor, ...]:\n if isinstance(x, tuple):\n return self._head.sample(self._phi(*x), raw_action, deterministic)\n return self._head.sample(self._phi(x), raw_action, deterministic)\n\n\nif __name__ == '__main__':\n import mobile_robot_rl.networks.bodies as b\n head = GaussianActor(b.FusionModel(4, (256, )), 2)\n print(head((torch.rand(1, 4, 14), torch.rand(1, 4, 32, 32))))\n" ]
[ [ "torch.nn.functional.softmax", "torch.rand" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
tlambert03/pycuda-affine
[ "b815aaa49e4ac55417f9b4916fb4effbad699ab8" ]
[ "pycuda_transforms/transform.py" ]
[ "import pycuda.autoinit # noqa\nfrom pycuda.compiler import SourceModule\nimport pycuda.driver as cuda\nfrom pycuda import gpuarray\nimport numpy as np\nimport os\nfrom functools import wraps\n\ncubic_dir = os.path.join(os.path.dirname(__file__), \"cubic\")\nwith open(__file__.replace(\".py\", \".cu\"), \"r\") as f:\n mod_affine = SourceModule(f.read(), no_extern_c=True, include_dirs=[cubic_dir])\n\n_affine2D = mod_affine.get_function(\"affine2D\")\n# _affine2D_RA = mod_affine.get_function(\"affine2D_RA\")\n_affine3D = mod_affine.get_function(\"affine3D\")\n# _affine3D_RA = mod_affine.get_function(\"affine3D_RA\")\ntexref2D = mod_affine.get_texref(\"texref2d\")\ntexref3D = mod_affine.get_texref(\"texref3d\")\ntexref2D.set_address_mode(0, cuda.address_mode.BORDER)\ntexref2D.set_address_mode(1, cuda.address_mode.BORDER)\ntexref3D.set_address_mode(0, cuda.address_mode.BORDER)\ntexref3D.set_address_mode(1, cuda.address_mode.BORDER)\ntexref3D.set_address_mode(2, cuda.address_mode.BORDER)\n\n\ns2c2dx = None\ns2c2dy = None\ns2c3dx = None\ns2c3dy = None\ns2c3dz = None\n\n\ndef import_prefilter_2D():\n global s2c2dx, s2c2dy\n with open(os.path.join(cubic_dir, \"cubicPrefilter2D.cu\"), \"r\") as f:\n modcubic2 = SourceModule(f.read(), no_extern_c=True, include_dirs=[cubic_dir])\n s2c2dx = modcubic2.get_function(\"SamplesToCoefficients2DX\")\n s2c2dy = modcubic2.get_function(\"SamplesToCoefficients2DY\")\n\n\ndef import_prefilter_3D():\n global s2c3dx, s2c3dy, s2c3dz\n with open(os.path.join(cubic_dir, \"cubicPrefilter3D.cu\"), \"r\") as f:\n modcubic3 = SourceModule(f.read(), no_extern_c=True, include_dirs=[cubic_dir])\n s2c3dx = modcubic3.get_function(\"SamplesToCoefficients3DX\")\n s2c3dy = modcubic3.get_function(\"SamplesToCoefficients3DY\")\n s2c3dz = modcubic3.get_function(\"SamplesToCoefficients3DZ\")\n\n\ndef _bind_tex(array):\n assert array.ndim in (2, 3), \"Texture binding only valid for 2 or 3D arrays\"\n if isinstance(array, np.ndarray):\n ary = cuda.np_to_array(array, \"F\" if np.isfortran(array) else \"C\")\n elif isinstance(array, gpuarray.GPUArray):\n ary = cuda.gpuarray_to_array(array, \"F\" if array.flags.f_contiguous else \"C\")\n else:\n raise ValueError(\"Can only bind numpy arrays or GPUarray\")\n if array.ndim == 2:\n texref2D.set_array(ary)\n elif array.ndim == 3:\n texref3D.set_array(ary)\n\n\ndef _set_tex_filter_mode(mode, ndim):\n assert mode in [\n \"linear\",\n \"point\", # aka nearest neighbor\n \"nearest\",\n \"cubic\",\n \"cubic-prefilter\",\n ], f\"unrecognized interpolation mode: {mode}\"\n if mode == \"linear\":\n # default is point\n if ndim == 3:\n texref3D.set_filter_mode(cuda.filter_mode.LINEAR)\n elif ndim == 2:\n texref2D.set_filter_mode(cuda.filter_mode.LINEAR)\n\n\ndef _with_bound_texture(func):\n @wraps(func)\n def wrapper(*args, **kwargs):\n args = list(args)\n array = args[0]\n # so far these all require float32\n if not array.dtype == np.float32:\n array = array.astype(np.float32)\n kmode = kwargs.get(\"mode\", \"\")\n if (\"pre\" in kmode and \"cub\" in kmode) or any(\n [(\"pre\" in x and \"cub\" in x) for x in args if isinstance(x, str)]\n ):\n array = spline_filter(array)\n # bind array to textureRef\n _bind_tex(array)\n args[0] = array\n _set_tex_filter_mode(kwargs.get(\"mode\", \"nearest\"), ndim=array.ndim)\n return func(*args, **kwargs)\n\n return wrapper\n\n\ndef make_translation_matrix(mag):\n if len(mag) == 3:\n tmat = np.eye(4)\n tmat[0, 3] = mag[2]\n tmat[1, 3] = mag[1]\n tmat[2, 3] = mag[0]\n elif len(mag) == 2:\n tmat = np.eye(3)\n tmat[0, 2] = mag[1]\n tmat[1, 2] = mag[0]\n return tmat\n\n\ndef make_scaling_matrix(scalar):\n if len(scalar) == 3:\n tmat = np.eye(4)\n tmat[0, 0] = 1 / scalar[2]\n tmat[1, 1] = 1 / scalar[1]\n tmat[2, 2] = 1 / scalar[0]\n elif len(scalar) == 2:\n tmat = np.eye(3)\n tmat[0, 0] = 1 / scalar[1]\n tmat[1, 1] = 1 / scalar[0]\n return tmat\n\n\ndef make_rotation_matrix(array, angle, axis=0):\n theta = angle * np.pi / 180\n _sin = np.sin(theta)\n _cos = np.cos(theta)\n if array.ndim == 3:\n nz, ny, nx = array.shape\n # first translate the middle of the image to the origin\n T1 = np.array(\n [[1, 0, 0, nx / 2], [0, 1, 0, ny / 2], [0, 0, 1, nz / 2], [0, 0, 0, 1]]\n )\n # then rotate theta degrees about the Y axis\n if axis in (0, \"z\", \"Z\"):\n R = np.array(\n [[_cos, _sin, 0, 0], [-_sin, _cos, 0, 0], [0, 0, 1, 0], [0, 0, 0, 1]]\n )\n elif axis in (1, \"y\", \"Y\"):\n R = np.array(\n [[_cos, 0, -_sin, 0], [0, 1, 0, 0], [_sin, 0, _cos, 0], [0, 0, 0, 1]]\n )\n elif axis in (2, \"x\", \"X\"):\n R = np.array(\n [[1, 0, 0, 0], [0, _cos, _sin, 0], [0, -_sin, _cos, 0], [0, 0, 0, 1]]\n )\n else:\n raise ValueError(\"Unrecognized axis of rotation: {}\".format(axis))\n # then translate back to the original origin\n T2 = np.array(\n [[1, 0, 0, -nx / 2], [0, 1, 0, -ny / 2], [0, 0, 1, -nz / 2], [0, 0, 0, 1]]\n )\n return np.dot(np.dot(np.dot(np.eye(4), T1), R), T2)\n if array.ndim == 2:\n ny, nx = array.shape\n # first translate the middle of the image to the origin\n T1 = np.array([[1, 0, nx / 2], [0, 1, ny / 2], [0, 0, 1]])\n # then rotate theta degrees\n R = np.array([[_cos, -_sin, 0], [_sin, _cos, 0], [0, 0, 1]])\n # then translate back to the original origin\n T2 = np.array([[1, 0, -nx / 2], [0, 1, -ny / 2], [0, 0, 1]])\n return np.dot(np.dot(np.dot(np.eye(3), T1), R), T2)\n raise ValueError(\"Can only do 2D and 3D rotations\")\n\n\ndef _make_grid(shape, blocks):\n if len(shape) == 3:\n out_z, out_y, out_x = shape\n bx, by, bz = blocks\n return ((out_x + bx - 1) // bx, (out_y + by - 1) // by, (out_z + bz - 1) // bz)\n elif len(shape) == 2:\n out_y, out_x = shape\n bx, by = blocks[:2]\n return ((out_x + bx - 1) // bx, (out_y + by - 1) // by, 1)\n\n\ndef _do_affine(shape, tmat, mode, blocks):\n tmat = tmat.T\n if len(shape) == 3:\n if not tmat.shape == (4, 4):\n raise ValueError(f\"3D transformation matrix must be 4x4, saw {tmat.shape}\")\n _func = _affine3D\n _tref = texref3D\n elif len(shape) == 2:\n if not tmat.shape == (3, 3):\n raise ValueError(f\"3D transformation matrix must be 3x3, saw {tmat.shape}\")\n _func = _affine2D\n _tref = texref2D\n\n output = gpuarray.empty(shape, dtype=np.float32)\n grid = _make_grid(shape, blocks)\n _func(\n output,\n *np.flip(np.int32(output.shape)),\n cuda.In(tmat.astype(np.float32).ravel()),\n np.int32(\"cubic\" in mode.lower()),\n texrefs=[_tref],\n block=blocks,\n grid=grid,\n )\n return output\n\n\n@_with_bound_texture\ndef zoom(input, zoom, mode=\"nearest\", blocks=(16, 16, 4), **kwargs):\n \"\"\"scale array with nearest neighbors or linear interpolation\n\n If a float, `zoom` is the same for each axis. If a sequence,\n `zoom` should contain one value for each axis.\n \"\"\"\n if isinstance(zoom, (int, float)):\n zoom = tuple([zoom] * input.ndim)\n assert (\n len(zoom) == input.ndim\n ), \"scalar must either be a scalar or a list with the same length as array.ndim\"\n\n # make scaling array\n tmat = make_scaling_matrix(zoom)\n outshape = tuple(int(x) for x in np.round(np.array(input.shape) * zoom))\n return _do_affine(outshape, tmat, mode, blocks)\n\n\n@_with_bound_texture\ndef rotate(input, angle, axis=0, mode=\"nearest\", blocks=(16, 16, 4), **kwargs):\n \"\"\"Rotate an array.\n\n axis can be either 0,1,2 or z,y,x\n \"\"\"\n tmat = make_rotation_matrix(input, angle, axis)\n return _do_affine(input.shape, tmat, mode, blocks)\n\n\n@_with_bound_texture\ndef shift(input, shift, mode=\"nearest\", blocks=(16, 16, 4), **kwargs):\n \"\"\"translate array\n\n mag is number of pixels to translate in (z,y,x)\n must be tuple with length array.ndim\n \"\"\"\n if isinstance(shift, (int, float)):\n shift = tuple([shift] * input.ndim)\n assert (\n len(shift) == input.ndim\n ), \"shift must either be a scalar or a list with the same length as array.ndim\"\n\n tmat = make_translation_matrix(shift)\n return _do_affine(input.shape, tmat, mode, blocks)\n\n\n@_with_bound_texture\ndef affine_transform(\n input, matrix, output_shape=None, mode=\"nearest\", blocks=(16, 16, 4), **kwargs\n):\n \"\"\"Apply an affine transformation.\n\n Args:\n input (pycuda.gpuarray): The input array.\n matrix (pycuda.gpuarray): The inverse coordinate transformation matrix,\n mapping output coordinates to input coordinates. If ``ndim`` is the\n number of dimensions of ``input``, the given matrix must be of shape\n ``(ndim + 1, ndim + 1)``: (assume that the transformation is\n specified using homogeneous coordinates).\n mode (str): type of interpolation ('nearest', 'linear', 'cubic', 'cubic-prefilter')\n Returns:\n pycuda.gpuarray\n .. seealso:: :func:`scipy.ndimage.affine_transform`\n \"\"\"\n shape = output_shape if output_shape is not None else input.shape\n return _do_affine(shape, matrix, mode, blocks)\n\n\ndef pow2divider(num):\n if num == 0:\n return 0\n divider = 1\n while (num & divider) == 0:\n divider <<= 1\n return divider\n\n\ndef _cubic_bspline_prefilter_3D(ary_gpu):\n if s2c3dx is None:\n import_prefilter_3D()\n depth, height, width = np.int32(ary_gpu.shape)\n pitch = np.int32(width * 4) # width of a row in the image in bytes\n dimX = np.int32(min(min(pow2divider(width), pow2divider(height)), 64))\n dimY = np.int32(min(min(pow2divider(depth), pow2divider(height)), 512 / dimX))\n blocks = (int(dimX), int(dimY), 1)\n gridX = (int(height // dimX), int(depth // dimY), 1)\n gridY = (int(width // dimX), int(depth // dimY), 1)\n gridZ = (int(width // dimX), int(height // dimY), 1)\n s2c3dx(ary_gpu, pitch, width, height, depth, block=blocks, grid=gridX)\n s2c3dy(ary_gpu, pitch, width, height, depth, block=blocks, grid=gridY)\n s2c3dz(ary_gpu, pitch, width, height, depth, block=blocks, grid=gridZ)\n return ary_gpu\n\n\ndef _cubic_bspline_prefilter_2D(ary_gpu):\n if s2c2dx is None:\n import_prefilter_2D()\n height, width = np.int32(ary_gpu.shape)\n pitch = np.int32(width * 4) # width of a row in the image in bytes\n blockx = (int(min(pow2divider(height), 64)), 1, 1)\n blocky = (int(min(pow2divider(width), 64)), 1, 1)\n gridX = (int(height // blockx[0]), 1, 1)\n gridY = (int(width // blocky[0]), 1, 1)\n s2c2dx(ary_gpu, pitch, width, height, block=blockx, grid=gridX)\n s2c2dy(ary_gpu, pitch, width, height, block=blocky, grid=gridY)\n return ary_gpu\n\n\ndef spline_filter(array):\n if not isinstance(array, gpuarray.GPUArray):\n ary_gpu = gpuarray.to_gpu(np.ascontiguousarray(array).astype(np.float32))\n if array.ndim == 2:\n return _cubic_bspline_prefilter_2D(ary_gpu)\n elif array.ndim == 3:\n return _cubic_bspline_prefilter_3D(ary_gpu)\n" ]
[ [ "numpy.ascontiguousarray", "numpy.eye", "numpy.int32", "numpy.isfortran", "numpy.cos", "numpy.sin", "numpy.array" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
albarqounilab/MONAI
[ "d4d173362b71a9af6c5414db591994f799e4fd2c", "bb0b307d68021a243011a58fd82a1d275f00a51a", "bb0b307d68021a243011a58fd82a1d275f00a51a", "bb0b307d68021a243011a58fd82a1d275f00a51a" ]
[ "monai/networks/nets/resnet.py", "tests/test_compute_roc_auc.py", "monai/metrics/metric.py", "tests/test_divisible_pad.py" ]
[ "# Copyright 2020 - 2021 MONAI Consortium\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n# http://www.apache.org/licenses/LICENSE-2.0\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom functools import partial\nfrom typing import Any, Callable, List, Type, Union\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nfrom monai.networks.layers.factories import Conv, Norm, Pool\n\n__all__ = [\"ResNet\", \"resnet10\", \"resnet18\", \"resnet34\", \"resnet50\", \"resnet101\", \"resnet152\", \"resnet200\"]\n\n\ndef get_inplanes():\n return [64, 128, 256, 512]\n\n\ndef get_avgpool():\n return [(0), (1), (1, 1), (1, 1, 1)]\n\n\ndef get_conv1(conv1_t_size: int, conv1_t_stride: int):\n return (\n [(0), (conv1_t_size), (conv1_t_size, 7), (conv1_t_size, 7, 7)],\n [(0), (conv1_t_stride), (conv1_t_stride, 2), (conv1_t_stride, 2, 2)],\n [(0), (conv1_t_size // 2), (conv1_t_size // 2, 3), (conv1_t_size // 2, 3, 3)],\n )\n\n\nclass ResNetBlock(nn.Module):\n expansion = 1\n\n def __init__(\n self,\n in_planes: int,\n planes: int,\n spatial_dims: int = 3,\n stride: int = 1,\n downsample: Union[nn.Module, partial, None] = None,\n ) -> None:\n \"\"\"\n Args:\n in_planes: number of input channels.\n planes: number of output channels.\n spatial_dims: number of spatial dimensions of the input image.\n stride: stride to use for first conv layer.\n downsample: which downsample layer to use.\n \"\"\"\n super(ResNetBlock, self).__init__()\n\n conv_type: Callable = Conv[Conv.CONV, spatial_dims]\n norm_type: Callable = Norm[Norm.BATCH, spatial_dims]\n\n self.conv1 = conv_type(in_planes, planes, kernel_size=3, padding=1, stride=stride, bias=False)\n self.bn1 = norm_type(planes)\n self.relu = nn.ReLU(inplace=True)\n self.conv2 = conv_type(planes, planes, kernel_size=3, padding=1, bias=False)\n self.bn2 = norm_type(planes)\n self.downsample = downsample\n self.stride = stride\n\n def forward(self, x: torch.Tensor) -> torch.Tensor:\n residual = x\n\n out: torch.Tensor = self.conv1(x)\n out = self.bn1(out)\n out = self.relu(out)\n\n out = self.conv2(out)\n out = self.bn2(out)\n\n if self.downsample is not None:\n residual = self.downsample(x)\n\n out += residual\n out = self.relu(out)\n\n return out\n\n\nclass ResNetBottleneck(nn.Module):\n expansion = 4\n\n def __init__(\n self,\n in_planes: int,\n planes: int,\n spatial_dims: int = 3,\n stride: int = 1,\n downsample: Union[nn.Module, partial, None] = None,\n ) -> None:\n \"\"\"\n Args:\n in_planes: number of input channels.\n planes: number of output channels (taking expansion into account).\n spatial_dims: number of spatial dimensions of the input image.\n stride: stride to use for second conv layer.\n downsample: which downsample layer to use.\n \"\"\"\n\n super(ResNetBottleneck, self).__init__()\n\n conv_type: Callable = Conv[Conv.CONV, spatial_dims]\n norm_type: Callable = Norm[Norm.BATCH, spatial_dims]\n\n self.conv1 = conv_type(in_planes, planes, kernel_size=1, bias=False)\n self.bn1 = norm_type(planes)\n self.conv2 = conv_type(planes, planes, kernel_size=3, stride=stride, padding=1, bias=False)\n self.bn2 = norm_type(planes)\n self.conv3 = conv_type(planes, planes * self.expansion, kernel_size=1, bias=False)\n self.bn3 = norm_type(planes * self.expansion)\n self.relu = nn.ReLU(inplace=True)\n self.downsample = downsample\n self.stride = stride\n\n def forward(self, x: torch.Tensor) -> torch.Tensor:\n residual = x\n\n out: torch.Tensor = self.conv1(x)\n out = self.bn1(out)\n out = self.relu(out)\n\n out = self.conv2(out)\n out = self.bn2(out)\n out = self.relu(out)\n\n out = self.conv3(out)\n out = self.bn3(out)\n\n if self.downsample is not None:\n residual = self.downsample(x)\n\n out += residual\n out = self.relu(out)\n\n return out\n\n\nclass ResNet(nn.Module):\n \"\"\"\n ResNet based on: `Deep Residual Learning for Image Recognition <https://arxiv.org/pdf/1512.03385.pdf>`_\n and `Can Spatiotemporal 3D CNNs Retrace the History of 2D CNNs and ImageNet? <https://arxiv.org/pdf/1711.09577.pdf>`_.\n Adapted from `<https://github.com/kenshohara/3D-ResNets-PyTorch/tree/master/models>`_.\n Args:\n block: which ResNet block to use, either Basic or Bottleneck.\n layers: how many layers to use.\n block_inplanes: determine the size of planes at each step. Also tuneable with widen_factor.\n spatial_dims: number of spatial dimensions of the input image.\n n_input_channels: number of input channels for first convolutional layer.\n conv1_t_size: size of first convolution layer, determines kernel and padding.\n conv1_t_stride: stride of first convolution layer.\n no_max_pool: bool argument to determine if to use maxpool layer.\n shortcut_type: which downsample block to use.\n widen_factor: widen output for each layer.\n n_classes: number of output (classifications)\n \"\"\"\n\n def __init__(\n self,\n block: Type[Union[ResNetBlock, ResNetBottleneck]],\n layers: List[int],\n block_inplanes: List[int],\n spatial_dims: int = 3,\n n_input_channels: int = 3,\n conv1_t_size: int = 7,\n conv1_t_stride: int = 1,\n no_max_pool: bool = False,\n shortcut_type: str = \"B\",\n widen_factor: float = 1.0,\n n_classes: int = 400,\n feed_forward: bool = True,\n ) -> None:\n\n super(ResNet, self).__init__()\n\n conv_type: Type[Union[nn.Conv1d, nn.Conv2d, nn.Conv3d]] = Conv[Conv.CONV, spatial_dims]\n norm_type: Type[Union[nn.BatchNorm1d, nn.BatchNorm2d, nn.BatchNorm3d]] = Norm[Norm.BATCH, spatial_dims]\n pool_type: Type[Union[nn.MaxPool1d, nn.MaxPool2d, nn.MaxPool3d]] = Pool[Pool.MAX, spatial_dims]\n avgp_type: Type[Union[nn.AdaptiveAvgPool1d, nn.AdaptiveAvgPool2d, nn.AdaptiveAvgPool3d]] = Pool[\n Pool.ADAPTIVEAVG, spatial_dims\n ]\n\n block_avgpool = get_avgpool()\n conv1_kernel, conv1_stride, con1_padding = get_conv1(conv1_t_size, conv1_t_stride)\n block_inplanes = [int(x * widen_factor) for x in block_inplanes]\n\n self.in_planes = block_inplanes[0]\n self.no_max_pool = no_max_pool\n\n self.conv1 = conv_type(\n n_input_channels,\n self.in_planes,\n kernel_size=conv1_kernel[spatial_dims],\n stride=conv1_stride[spatial_dims],\n padding=con1_padding[spatial_dims],\n bias=False,\n )\n self.bn1 = norm_type(self.in_planes)\n self.relu = nn.ReLU(inplace=True)\n self.maxpool = pool_type(kernel_size=3, stride=2, padding=1)\n self.layer1 = self._make_layer(block, block_inplanes[0], layers[0], spatial_dims, shortcut_type)\n self.layer2 = self._make_layer(block, block_inplanes[1], layers[1], spatial_dims, shortcut_type, stride=2)\n self.layer3 = self._make_layer(block, block_inplanes[2], layers[2], spatial_dims, shortcut_type, stride=2)\n self.layer4 = self._make_layer(block, block_inplanes[3], layers[3], spatial_dims, shortcut_type, stride=2)\n self.avgpool = avgp_type(block_avgpool[spatial_dims])\n\n if feed_forward:\n self.fc = nn.Linear(block_inplanes[3] * block.expansion, n_classes)\n\n for m in self.modules():\n if isinstance(m, conv_type):\n nn.init.kaiming_normal_(torch.as_tensor(m.weight), mode=\"fan_out\", nonlinearity=\"relu\")\n elif isinstance(m, norm_type):\n nn.init.constant_(torch.as_tensor(m.weight), 1)\n nn.init.constant_(torch.as_tensor(m.bias), 0)\n elif isinstance(m, nn.Linear):\n nn.init.constant_(torch.as_tensor(m.bias), 0)\n\n def _downsample_basic_block(self, x: torch.Tensor, planes: int, stride: int, spatial_dims: int = 3) -> torch.Tensor:\n assert spatial_dims == 3\n out: torch.Tensor = F.avg_pool3d(x, kernel_size=1, stride=stride)\n zero_pads = torch.zeros(out.size(0), planes - out.size(1), out.size(2), out.size(3), out.size(4))\n if isinstance(out.data, torch.FloatTensor):\n zero_pads = zero_pads.cuda()\n\n out = torch.cat([out.data, zero_pads], dim=1)\n\n return out\n\n def _make_layer(\n self,\n block: Type[Union[ResNetBlock, ResNetBottleneck]],\n planes: int,\n blocks: int,\n spatial_dims: int,\n shortcut_type: str,\n stride: int = 1,\n ) -> nn.Sequential:\n\n conv_type: Callable = Conv[Conv.CONV, spatial_dims]\n norm_type: Callable = Norm[Norm.BATCH, spatial_dims]\n\n downsample: Union[nn.Module, partial, None] = None\n if stride != 1 or self.in_planes != planes * block.expansion:\n if shortcut_type == \"A\":\n downsample = partial(\n self._downsample_basic_block, planes=planes * block.expansion, kernel_size=1, stride=stride\n )\n else:\n downsample = nn.Sequential(\n conv_type(self.in_planes, planes * block.expansion, kernel_size=1, stride=stride),\n norm_type(planes * block.expansion),\n )\n\n layers = []\n layers.append(\n block(\n in_planes=self.in_planes, planes=planes, spatial_dims=spatial_dims, stride=stride, downsample=downsample\n )\n )\n self.in_planes = planes * block.expansion\n for _i in range(1, blocks):\n layers.append(block(self.in_planes, planes, spatial_dims=spatial_dims))\n\n return nn.Sequential(*layers)\n\n def forward(self, x: torch.Tensor) -> torch.Tensor:\n x = self.conv1(x)\n x = self.bn1(x)\n x = self.relu(x)\n if not self.no_max_pool:\n x = self.maxpool(x)\n\n x = self.layer1(x)\n x = self.layer2(x)\n x = self.layer3(x)\n x = self.layer4(x)\n\n x = self.avgpool(x)\n\n x = x.view(x.size(0), -1)\n x = self.fc(x)\n\n return x\n\n\ndef _resnet(\n arch: str,\n block: Type[Union[ResNetBlock, ResNetBottleneck]],\n layers: List[int],\n block_inplanes: List[int],\n pretrained: bool,\n progress: bool,\n **kwargs: Any,\n) -> ResNet:\n model = ResNet(block, layers, block_inplanes, **kwargs)\n if pretrained:\n # Author of paper zipped the state_dict on googledrive,\n # so would need to download, unzip and read (2.8gb file for a ~150mb state dict).\n # Would like to load dict from url but need somewhere to save the state dicts.\n raise NotImplementedError(\"Currently not implemented, see comments in source code\")\n return model\n\n\ndef resnet10(pretrained: bool = False, progress: bool = True, **kwargs: Any) -> ResNet:\n \"\"\"ResNet-10 with optional pretrained support when `spatial_dims` is 3.\n\n Pretraining from `Med3D: Transfer Learning for 3D Medical Image Analysis <https://arxiv.org/pdf/1904.00625.pdf>`_.\n\n Args:\n pretrained (bool): If True, returns a model pre-trained on 23 medical datasets\n progress (bool): If True, displays a progress bar of the download to stderr\n \"\"\"\n return _resnet(\"resnet10\", ResNetBlock, [1, 1, 1, 1], get_inplanes(), pretrained, progress, **kwargs)\n\n\ndef resnet18(pretrained: bool = False, progress: bool = True, **kwargs: Any) -> ResNet:\n \"\"\"ResNet-18 with optional pretrained support when `spatial_dims` is 3.\n\n Pretraining from `Med3D: Transfer Learning for 3D Medical Image Analysis <https://arxiv.org/pdf/1904.00625.pdf>`_.\n\n Args:\n pretrained (bool): If True, returns a model pre-trained on 23 medical datasets\n progress (bool): If True, displays a progress bar of the download to stderr\n \"\"\"\n return _resnet(\"resnet18\", ResNetBlock, [2, 2, 2, 2], get_inplanes(), pretrained, progress, **kwargs)\n\n\ndef resnet34(pretrained: bool = False, progress: bool = True, **kwargs: Any) -> ResNet:\n \"\"\"ResNet-34 with optional pretrained support when `spatial_dims` is 3.\n\n Pretraining from `Med3D: Transfer Learning for 3D Medical Image Analysis <https://arxiv.org/pdf/1904.00625.pdf>`_.\n\n Args:\n pretrained (bool): If True, returns a model pre-trained on 23 medical datasets\n progress (bool): If True, displays a progress bar of the download to stderr\n \"\"\"\n return _resnet(\"resnet34\", ResNetBlock, [3, 4, 6, 3], get_inplanes(), pretrained, progress, **kwargs)\n\n\ndef resnet50(pretrained: bool = False, progress: bool = True, **kwargs: Any) -> ResNet:\n \"\"\"ResNet-50 with optional pretrained support when `spatial_dims` is 3.\n\n Pretraining from `Med3D: Transfer Learning for 3D Medical Image Analysis <https://arxiv.org/pdf/1904.00625.pdf>`_.\n\n Args:\n pretrained (bool): If True, returns a model pre-trained on 23 medical datasets\n progress (bool): If True, displays a progress bar of the download to stderr\n \"\"\"\n return _resnet(\"resnet50\", ResNetBottleneck, [3, 4, 6, 3], get_inplanes(), pretrained, progress, **kwargs)\n\n\ndef resnet101(pretrained: bool = False, progress: bool = True, **kwargs: Any) -> ResNet:\n \"\"\"ResNet-101 with optional pretrained support when `spatial_dims` is 3.\n\n Pretraining from `Med3D: Transfer Learning for 3D Medical Image Analysis <https://arxiv.org/pdf/1904.00625.pdf>`_.\n\n Args:\n pretrained (bool): If True, returns a model pre-trained on 8 medical datasets\n progress (bool): If True, displays a progress bar of the download to stderr\n \"\"\"\n return _resnet(\"resnet101\", ResNetBottleneck, [3, 4, 23, 3], get_inplanes(), pretrained, progress, **kwargs)\n\n\ndef resnet152(pretrained: bool = False, progress: bool = True, **kwargs: Any) -> ResNet:\n \"\"\"ResNet-152 with optional pretrained support when `spatial_dims` is 3.\n\n Pretraining from `Med3D: Transfer Learning for 3D Medical Image Analysis <https://arxiv.org/pdf/1904.00625.pdf>`_.\n\n Args:\n pretrained (bool): If True, returns a model pre-trained on 8 medical datasets\n progress (bool): If True, displays a progress bar of the download to stderr\n \"\"\"\n return _resnet(\"resnet152\", ResNetBottleneck, [3, 8, 36, 3], get_inplanes(), pretrained, progress, **kwargs)\n\n\ndef resnet200(pretrained: bool = False, progress: bool = True, **kwargs: Any) -> ResNet:\n \"\"\"ResNet-200 with optional pretrained support when `spatial_dims` is 3.\n\n Pretraining from `Med3D: Transfer Learning for 3D Medical Image Analysis <https://arxiv.org/pdf/1904.00625.pdf>`_.\n\n Args:\n pretrained (bool): If True, returns a model pre-trained on 8 medical datasets\n progress (bool): If True, displays a progress bar of the download to stderr\n \"\"\"\n return _resnet(\"resnet200\", ResNetBottleneck, [3, 24, 36, 3], get_inplanes(), pretrained, progress, **kwargs)\n", "# Copyright 2020 - 2021 MONAI Consortium\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n# http://www.apache.org/licenses/LICENSE-2.0\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport unittest\n\nimport numpy as np\nimport torch\nfrom parameterized import parameterized\n\nfrom monai.data import decollate_batch\nfrom monai.metrics import ROCAUCMetric, compute_roc_auc\nfrom monai.transforms import Activations, AsDiscrete, Compose, ToTensor\n\nTEST_CASE_1 = [\n torch.tensor([[0.1, 0.9], [0.3, 1.4], [0.2, 0.1], [0.1, 0.5]]),\n torch.tensor([[0], [1], [0], [1]]),\n True,\n True,\n \"macro\",\n 0.75,\n]\n\nTEST_CASE_2 = [\n torch.tensor([[0.5], [0.5], [0.2], [8.3]]),\n torch.tensor([[0], [1], [0], [1]]),\n False,\n False,\n \"macro\",\n 0.875,\n]\n\nTEST_CASE_3 = [\n torch.tensor([[0.5], [0.5], [0.2], [8.3]]),\n torch.tensor([0, 1, 0, 1]),\n False,\n False,\n \"macro\",\n 0.875,\n]\n\nTEST_CASE_4 = [\n torch.tensor([0.5, 0.5, 0.2, 8.3]),\n torch.tensor([0, 1, 0, 1]),\n False,\n False,\n \"macro\",\n 0.875,\n]\n\nTEST_CASE_5 = [\n torch.tensor([[0.1, 0.9], [0.3, 1.4], [0.2, 0.1], [0.1, 0.5]]),\n torch.tensor([[0], [1], [0], [1]]),\n True,\n True,\n \"none\",\n [0.75, 0.75],\n]\n\nTEST_CASE_6 = [\n torch.tensor([[0.1, 0.9], [0.3, 1.4], [0.2, 0.1], [0.1, 0.5], [0.1, 0.5]]),\n torch.tensor([[1, 0], [0, 1], [0, 0], [1, 1], [0, 1]]),\n True,\n False,\n \"weighted\",\n 0.56667,\n]\n\nTEST_CASE_7 = [\n torch.tensor([[0.1, 0.9], [0.3, 1.4], [0.2, 0.1], [0.1, 0.5], [0.1, 0.5]]),\n torch.tensor([[1, 0], [0, 1], [0, 0], [1, 1], [0, 1]]),\n True,\n False,\n \"micro\",\n 0.62,\n]\n\n\nclass TestComputeROCAUC(unittest.TestCase):\n @parameterized.expand([TEST_CASE_1, TEST_CASE_2, TEST_CASE_3, TEST_CASE_4, TEST_CASE_5, TEST_CASE_6, TEST_CASE_7])\n def test_value(self, y_pred, y, softmax, to_onehot, average, expected_value):\n y_pred_trans = Compose([ToTensor(), Activations(softmax=softmax)])\n y_trans = Compose([ToTensor(), AsDiscrete(to_onehot=to_onehot, n_classes=2)])\n y_pred = torch.stack([y_pred_trans(i) for i in decollate_batch(y_pred)], dim=0)\n y = torch.stack([y_trans(i) for i in decollate_batch(y)], dim=0)\n result = compute_roc_auc(y_pred=y_pred, y=y, average=average)\n np.testing.assert_allclose(expected_value, result, rtol=1e-5)\n\n @parameterized.expand([TEST_CASE_1, TEST_CASE_2, TEST_CASE_3, TEST_CASE_4, TEST_CASE_5, TEST_CASE_6, TEST_CASE_7])\n def test_class_value(self, y_pred, y, softmax, to_onehot, average, expected_value):\n y_pred_trans = Compose([ToTensor(), Activations(softmax=softmax)])\n y_trans = Compose([ToTensor(), AsDiscrete(to_onehot=to_onehot, n_classes=2)])\n y_pred = [y_pred_trans(i) for i in decollate_batch(y_pred)]\n y = [y_trans(i) for i in decollate_batch(y)]\n metric = ROCAUCMetric(average=average)\n metric(y_pred=y_pred, y=y)\n result = metric.aggregate()\n metric.reset()\n np.testing.assert_allclose(expected_value, result, rtol=1e-5)\n\n\nif __name__ == \"__main__\":\n unittest.main()\n", "# Copyright 2020 - 2021 MONAI Consortium\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n# http://www.apache.org/licenses/LICENSE-2.0\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom abc import ABC, abstractmethod\nfrom typing import Any, List, Optional\n\nimport torch\n\nfrom monai.config import TensorOrList\nfrom monai.utils import evenly_divisible_all_gather\n\n\nclass Metric(ABC):\n \"\"\"\n Base class of all Metrics interface.\n `__call__` is designed to execute metric computation.\n\n \"\"\"\n\n @abstractmethod\n def __call__(self, *args: Any, **kwds: Any):\n \"\"\"\n API to execute the metric computation.\n\n \"\"\"\n raise NotImplementedError(f\"Subclass {self.__class__.__name__} must implement this method.\")\n\n\nclass IterationMetric(Metric):\n \"\"\"\n Base class of Metrics interface for computation on a batch of tensors, usually the data of 1 iteration.\n `__call__` is supposed to compute independent logic for several samples of `y_pred` and `y`(optional).\n Ususally, subclass only needs to implement the `_compute_tensor` function for computation process.\n The input data shape should be `list of channel-first tensors` or a `batch-first tensor`.\n\n \"\"\"\n\n def __call__(self, y_pred: TensorOrList, y: Optional[TensorOrList] = None): # type: ignore\n \"\"\"\n Execute basic computation for model prediction and ground truth.\n It can support both `list of channel-first Tensor` and `batch-first Tensor`.\n And users can execute on every batch of data, then accumulate the results, or\n accumulate the original `y_pred` and `y`, then execute on the accumulated data.\n\n Args:\n y_pred: the model prediction data to compute, must be a list of `channel-first` Tensor\n or a `batch-first` Tensor.\n y: the ground truth to compute, must be a list of `channel-first` Tensor\n or a `batch-first` Tensor.\n\n \"\"\"\n ret: TensorOrList\n if isinstance(y_pred, (list, tuple)) or isinstance(y, (list, tuple)):\n # if y_pred or y is a list of channel-first data, add batch dim and compute metric\n ret = self._compute_list(y_pred, y)\n elif isinstance(y_pred, torch.Tensor):\n y_ = y.detach() if y is not None and isinstance(y, torch.Tensor) else None\n ret = self._compute_tensor(y_pred.detach(), y_)\n else:\n raise ValueError(\"y_pred or y must be a list of `channel-first` Tensors or a `batch-first` Tensor.\")\n\n return ret\n\n def _compute_list(self, y_pred: TensorOrList, y: Optional[TensorOrList] = None):\n \"\"\"\n Excute the computation for the y_pred and y items of a iteration, the data is in the list shape.\n Will concat the results to guarantee the output shape of ret is BCHW[D], otherwise it's list of batch-first,\n which is against our principle that data in metrics should be BCHW[D] or list of channel-first.\n Note: subclass may enhance the operation with multi-threads to accelerate.\n\n \"\"\"\n ret: TensorOrList\n if y is not None:\n ret = [self._compute_tensor(p.detach().unsqueeze(0), y_.detach().unsqueeze(0)) for p, y_ in zip(y_pred, y)]\n else:\n ret = [self._compute_tensor(p_.detach().unsqueeze(0), None) for p_ in y_pred]\n # concat the list of results\n if isinstance(ret[0], torch.Tensor):\n ret = torch.cat(ret, dim=0)\n elif isinstance(ret[0], (list, tuple)) and all([isinstance(i, torch.Tensor) for i in ret[0]]):\n # if _compute_tensor() returned not only 1 Tensor, concat them separately\n ret = [torch.cat([k[i] for k in ret], dim=0) for i in range(len(ret[0]))]\n\n return ret\n\n @abstractmethod\n def _compute_tensor(self, y_pred: torch.Tensor, y: Optional[torch.Tensor] = None):\n \"\"\"\n computation logic for the y_pred and y of a iteration, the data should be `batch-first` Tensors.\n Every subclass metric should implement its own computation logic according to its algorithm.\n\n \"\"\"\n raise NotImplementedError(f\"Subclass {self.__class__.__name__} must implement this method.\")\n\n\nclass Cumulative(ABC):\n \"\"\"\n Utility class for the typical cumulative computation process based on PyTorch Tensors.\n It cumulates tensors in the buffer, then sync across distributed ranks and aggregate.\n\n To speed up computation with multi-processing, PyTorch programs usually split data to distributed ranks\n by `DistributedSampler` before an epoch, every rank then computes only based on its own data part and\n `add` to the buffers in its process. Eventually, sync the values of all ranks to compute the final results.\n\n Note: the data list should have the same length every time calling `add()` in a round,\n it will automatically create buffers according to the length of data list.\n\n Typically, this class is expected to execute the steps referring to below examples::\n\n cum = Cumulative()\n cum.add(x, y)\n cum.add(a, b)\n cum.add(c, d)\n cum.agrregate()\n result = cum.get_buffer()\n cum.reset()\n\n \"\"\"\n\n def __init__(self):\n self.buffer_num: int = 0\n self._buffers: Optional[List[List[torch.Tensor]]] = None\n self._synced_tensors: Optional[List[Optional[torch.Tensor]]] = None\n self._synced: bool = False\n\n def reset(self):\n \"\"\"\n Reset the buffers for cumulative tensors and the synced results.\n\n \"\"\"\n self._buffers = None\n self._synced_tensors = None\n self._synced = False\n\n def add(self, *data: torch.Tensor):\n \"\"\"\n Add samples to the cumulative buffers.\n\n Args:\n data: list of input tensor, make sure the input data order is always the same in a round.\n every item of data will be added to the corresponding buffer.\n\n \"\"\"\n data_len = len(data)\n if self._buffers is None:\n self._buffers = [[] for _ in range(data_len)]\n elif len(self._buffers) != data_len:\n raise ValueError(f\"data length: {data_len} doesn't match buffers length: {len(self._buffers)}.\")\n if self._synced_tensors is None:\n self._synced_tensors = [None for _ in range(data_len)]\n\n for i, d in enumerate(data):\n if not isinstance(d, torch.Tensor):\n raise ValueError(f\"the data to cumulate in a buffer must be PyTorch Tensor, but got: {type(d)}.\")\n self._buffers[i].append(d)\n self._synced = False\n\n @abstractmethod\n def aggregate(self, *args: Any, **kwds: Any):\n \"\"\"\n Aggregate final results based on the buffers.\n\n \"\"\"\n raise NotImplementedError(f\"Subclass {self.__class__.__name__} must implement this method.\")\n\n def _sync(self):\n \"\"\"\n All gather the buffers across distributed ranks for aggregating.\n Every buffer will be concatenated as a PyTorch Tensor.\n\n \"\"\"\n self._synced_tensors = [evenly_divisible_all_gather(torch.cat(b, dim=0), concat=True) for b in self._buffers]\n self._synced = True\n\n def get_buffer(self):\n \"\"\"\n Get the synced buffers list.\n A typical usage is to generate the metrics report based on the raw metric details.\n\n \"\"\"\n if not self._synced:\n self._sync()\n return self._synced_tensors[0] if len(self._synced_tensors) == 1 else self._synced_tensors\n\n\nclass CumulativeIterationMetric(Cumulative, IterationMetric):\n \"\"\"\n Base class of cumulative metric which computes on batch data of every iteration and aggregate.\n Typically, it computes some intermediate results for every iteration, cumulates in buffers,\n then syncs across all the distributed ranks and aggregates for the final result when epoch completed.\n\n \"\"\"\n\n def __call__(self, y_pred: TensorOrList, y: Optional[TensorOrList] = None): # type: ignore\n \"\"\"\n Execute basic computation for model prediction and ground truth.\n It can support both `list of channel-first Tensor` and `batch-first Tensor`.\n Users call this API to execute computation on every batch of data, then accumulate the results,\n or accumulate the original `y_pred` and `y`, then execute on the accumulated data.\n\n Args:\n y_pred: the model prediction data to compute, must be a list of `channel-first` Tensor\n or a `batch-first` Tensor.\n y: the ground truth to compute, must be a list of `channel-first` Tensor\n or a `batch-first` Tensor.\n\n \"\"\"\n ret = super().__call__(y_pred=y_pred, y=y)\n if isinstance(ret, (tuple, list)):\n self.add(*ret)\n else:\n self.add(ret)\n\n return ret\n", "# Copyright 2020 - 2021 MONAI Consortium\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n# http://www.apache.org/licenses/LICENSE-2.0\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport unittest\n\nimport numpy as np\nfrom parameterized import parameterized\n\nfrom monai.transforms import DivisiblePad\n\n# pad first dim to be divisible by 7, the second unchanged.\nTEST_CASE_1 = [\n {\"k\": (7, -1), \"mode\": \"constant\"},\n np.zeros((3, 8, 7)),\n np.zeros((3, 14, 7)),\n]\n\n# pad all dimensions to be divisible by 5\nTEST_CASE_2 = [\n {\"k\": 5, \"mode\": \"constant\"},\n np.zeros((3, 10, 5, 17)),\n np.zeros((3, 10, 5, 20)),\n]\n\n\nclass TestDivisiblePad(unittest.TestCase):\n @parameterized.expand([TEST_CASE_1, TEST_CASE_2])\n def test_pad_shape(self, input_param, input_data, expected_val):\n padder = DivisiblePad(**input_param)\n result = padder(input_data)\n self.assertAlmostEqual(result.shape, expected_val.shape)\n result = padder(input_data, mode=input_param[\"mode\"])\n self.assertAlmostEqual(result.shape, expected_val.shape)\n\n def test_pad_kwargs(self):\n padder = DivisiblePad(k=5, mode=\"constant\", constant_values=((0, 0), (1, 1), (2, 2)))\n result = padder(np.zeros((3, 8, 4)))\n np.testing.assert_allclose(result[:, :1, :4], np.ones((3, 1, 4)))\n np.testing.assert_allclose(result[:, :, 4:5], np.ones((3, 10, 1)) + 1)\n\n\nif __name__ == \"__main__\":\n unittest.main()\n" ]
[ [ "torch.nn.Sequential", "torch.cat", "torch.nn.Linear", "torch.nn.functional.avg_pool3d", "torch.nn.ReLU", "torch.as_tensor" ], [ "numpy.testing.assert_allclose", "torch.tensor" ], [ "torch.cat" ], [ "numpy.zeros", "numpy.ones" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
mohsinkhn/tpu
[ "d90362f8b42432763f45d57f541390bc46cd703d", "d90362f8b42432763f45d57f541390bc46cd703d", "d90362f8b42432763f45d57f541390bc46cd703d" ]
[ "models/official/retinanet/retinanet_main.py", "models/experimental/show_and_tell/show_and_tell_tpu_test.py", "models/experimental/mask_rcnn/box_utils.py" ]
[ "# Copyright 2018 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Training script for RetinaNet.\n\n\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport os\n\nfrom absl import app\nfrom absl import flags\nimport numpy as np\nimport tensorflow as tf\n\nimport dataloader\nimport retinanet_model\n\n# Cloud TPU Cluster Resolvers\nflags.DEFINE_string(\n 'tpu',\n default=None,\n help='The Cloud TPU to use for training. This should be either the name '\n 'used when creating the Cloud TPU, or a grpc://ip.address.of.tpu:8470 '\n 'url.')\nflags.DEFINE_string(\n 'gcp_project',\n default=None,\n help='Project name for the Cloud TPU-enabled project. If not specified, we '\n 'will attempt to automatically detect the GCE project from metadata.')\nflags.DEFINE_string(\n 'tpu_zone',\n default=None,\n help='GCE zone where the Cloud TPU is located in. If not specified, we '\n 'will attempt to automatically detect the GCE project from metadata.')\n\n# Model specific paramenters\nflags.DEFINE_string(\n 'eval_master',\n default='',\n help='GRPC URL of the eval master. Set to an appropiate value when running '\n 'on CPU/GPU')\nflags.DEFINE_bool('use_tpu', True, 'Use TPUs rather than CPUs')\nflags.DEFINE_bool(\n 'use_xla', False,\n 'Use XLA even if use_tpu is false. If use_tpu is true, we always use XLA, '\n 'and this flag has no effect.')\nflags.DEFINE_string('model_dir', None, 'Location of model_dir')\nflags.DEFINE_string(\n 'resnet_checkpoint', '',\n 'Location of the ResNet50 checkpoint to use for model '\n 'initialization.')\nflags.DEFINE_string('hparams', '',\n 'Comma separated k=v pairs of hyperparameters.')\nflags.DEFINE_integer(\n 'num_cores', default=8, help='Number of TPU cores for training')\nflags.DEFINE_bool('use_spatial_partition', False, 'Use spatial partition.')\nflags.DEFINE_integer(\n 'num_cores_per_replica',\n default=8,\n help='Number of TPU cores per'\n 'replica when using spatial partition.')\nflags.DEFINE_multi_integer(\n 'input_partition_dims', [1, 4, 2, 1],\n 'A list that describes the partition dims for all the tensors.')\nflags.DEFINE_integer('train_batch_size', 64, 'training batch size')\nflags.DEFINE_integer('eval_batch_size', 1, 'evaluation batch size')\nflags.DEFINE_integer('eval_samples', 5000, 'The number of samples for '\n 'evaluation.')\nflags.DEFINE_integer('iterations_per_loop', 100,\n 'Number of iterations per TPU training loop')\nflags.DEFINE_string(\n 'training_file_pattern', None,\n 'Glob for training data files (e.g., COCO train - minival set)')\nflags.DEFINE_string('validation_file_pattern', None,\n 'Glob for evaluation tfrecords (e.g., COCO val2017 set)')\nflags.DEFINE_string('val_json_file', None,\n 'COCO validation JSON containing golden bounding boxes.')\nflags.DEFINE_integer('num_examples_per_epoch', 120000,\n 'Number of examples in one epoch')\nflags.DEFINE_integer('num_epochs', 15, 'Number of epochs for training')\nflags.DEFINE_string('mode', 'train',\n 'Mode to run: train or eval (default: train)')\nflags.DEFINE_bool('eval_after_training', False, 'Run one eval after the '\n 'training finishes.')\n\n# For Eval mode\nflags.DEFINE_integer('min_eval_interval', 180,\n 'Minimum seconds between evaluations.')\nflags.DEFINE_integer(\n 'eval_timeout', None,\n 'Maximum seconds between checkpoints before evaluation terminates.')\n\nFLAGS = flags.FLAGS\n\n\ndef serving_input_fn(image_size):\n \"\"\"Input function for SavedModels and TF serving.\"\"\"\n\n def _decode_and_crop(img_bytes):\n img = tf.image.decode_jpeg(img_bytes)\n img = tf.image.resize_image_with_crop_or_pad(img, image_size, image_size)\n img = tf.image.convert_image_dtype(img, tf.float32)\n return img\n\n image_bytes_list = tf.placeholder(shape=[None], dtype=tf.string)\n images = tf.map_fn(\n _decode_and_crop, image_bytes_list, back_prop=False, dtype=tf.float32)\n images = tf.reshape(images, [-1, image_size, image_size, 3])\n return tf.estimator.export.TensorServingInputReceiver(\n images, {'image_bytes': image_bytes_list})\n\n\ndef main(argv):\n del argv # Unused.\n\n if FLAGS.use_tpu:\n tpu_cluster_resolver = tf.contrib.cluster_resolver.TPUClusterResolver(\n FLAGS.tpu, zone=FLAGS.tpu_zone, project=FLAGS.gcp_project)\n tpu_grpc_url = tpu_cluster_resolver.get_master()\n tf.Session.reset(tpu_grpc_url)\n else:\n tpu_cluster_resolver = None\n\n # Check data path\n if FLAGS.mode in ('train',\n 'train_and_eval') and FLAGS.training_file_pattern is None:\n raise RuntimeError('You must specify --training_file_pattern for training.')\n if FLAGS.mode in ('eval', 'train_and_eval'):\n if FLAGS.validation_file_pattern is None:\n raise RuntimeError('You must specify --validation_file_pattern '\n 'for evaluation.')\n if FLAGS.val_json_file is None:\n raise RuntimeError('You must specify --val_json_file for evaluation.')\n\n # Parse hparams\n hparams = retinanet_model.default_hparams()\n hparams.parse(FLAGS.hparams)\n\n # The following is for spatial partitioning. `features` has one tensor while\n # `labels` had 4 + (`max_level` - `min_level` + 1) * 2 tensors. The input\n # partition is performed on `features` and all partitionable tensors of\n # `labels`, see the partition logic below.\n # In the TPUEstimator context, the meaning of `shard` and `replica` is the\n # same; follwing the API, here has mixed use of both.\n if FLAGS.use_spatial_partition:\n # Checks input_partition_dims agrees with num_cores_per_replica.\n if FLAGS.num_cores_per_replica != np.prod(FLAGS.input_partition_dims):\n raise RuntimeError('--num_cores_per_replica must be a product of array'\n 'elements in --input_partition_dims.')\n\n labels_partition_dims = {\n 'mean_num_positives': None,\n 'source_ids': None,\n 'groundtruth_data': None,\n 'image_scales': None,\n }\n # The Input Partition Logic: We partition only the partition-able tensors.\n # Spatial partition requires that the to-be-partitioned tensors must have a\n # dimension that is a multiple of `partition_dims`. Depending on the\n # `partition_dims` and the `image_size` and the `max_level` in hparams, some\n # high-level anchor labels (i.e., `cls_targets` and `box_targets`) cannot\n # be partitioned. For example, when `partition_dims` is [1, 4, 2, 1], image\n # size is 1536, `max_level` is 9, `cls_targets_8` has a shape of\n # [batch_size, 6, 6, 9], which cannot be partitioned (6 % 4 != 0). In this\n # case, the level-8 and level-9 target tensors are not partition-able, and\n # the highest partition-able level is 7.\n image_size = hparams.get('image_size')\n for level in range(hparams.get('min_level'), hparams.get('max_level') + 1):\n\n def _can_partition(spatial_dim):\n partitionable_index = np.where(\n spatial_dim % np.array(FLAGS.input_partition_dims) == 0)\n return len(partitionable_index[0]) == len(FLAGS.input_partition_dims)\n\n spatial_dim = image_size // (2**level)\n if _can_partition(spatial_dim):\n labels_partition_dims['box_targets_%d' %\n level] = FLAGS.input_partition_dims\n labels_partition_dims['cls_targets_%d' %\n level] = FLAGS.input_partition_dims\n else:\n labels_partition_dims['box_targets_%d' % level] = None\n labels_partition_dims['cls_targets_%d' % level] = None\n\n num_cores_per_replica = FLAGS.num_cores_per_replica\n input_partition_dims = [FLAGS.input_partition_dims, labels_partition_dims]\n num_shards = FLAGS.num_cores // num_cores_per_replica\n else:\n num_cores_per_replica = None\n input_partition_dims = None\n num_shards = FLAGS.num_cores\n\n params = dict(\n hparams.values(),\n num_shards=num_shards,\n num_examples_per_epoch=FLAGS.num_examples_per_epoch,\n use_tpu=FLAGS.use_tpu,\n resnet_checkpoint=FLAGS.resnet_checkpoint,\n val_json_file=FLAGS.val_json_file,\n mode=FLAGS.mode,\n )\n config_proto = tf.ConfigProto(\n allow_soft_placement=True, log_device_placement=False)\n if FLAGS.use_xla and not FLAGS.use_tpu:\n config_proto.graph_options.optimizer_options.global_jit_level = (\n tf.OptimizerOptions.ON_1)\n\n tpu_config = tf.contrib.tpu.TPUConfig(\n FLAGS.iterations_per_loop,\n num_shards=num_shards,\n num_cores_per_replica=num_cores_per_replica,\n input_partition_dims=input_partition_dims,\n per_host_input_for_training=tf.contrib.tpu.InputPipelineConfig\n .PER_HOST_V2)\n\n run_config = tf.contrib.tpu.RunConfig(\n cluster=tpu_cluster_resolver,\n evaluation_master=FLAGS.eval_master,\n model_dir=FLAGS.model_dir,\n log_step_count_steps=FLAGS.iterations_per_loop,\n session_config=config_proto,\n tpu_config=tpu_config,\n )\n\n # TPU Estimator\n if FLAGS.mode == 'train':\n tf.logging.info(params)\n train_estimator = tf.contrib.tpu.TPUEstimator(\n model_fn=retinanet_model.retinanet_model_fn,\n use_tpu=FLAGS.use_tpu,\n train_batch_size=FLAGS.train_batch_size,\n config=run_config,\n params=params)\n train_estimator.train(\n input_fn=dataloader.InputReader(\n FLAGS.training_file_pattern, is_training=True),\n max_steps=int((FLAGS.num_epochs * FLAGS.num_examples_per_epoch) /\n FLAGS.train_batch_size))\n\n # Run evaluation after training finishes.\n eval_params = dict(\n params,\n use_tpu=False,\n input_rand_hflip=False,\n resnet_checkpoint=None,\n is_training_bn=False,\n use_bfloat16=False,\n )\n eval_estimator = tf.contrib.tpu.TPUEstimator(\n model_fn=retinanet_model.retinanet_model_fn,\n use_tpu=False,\n train_batch_size=FLAGS.train_batch_size,\n eval_batch_size=FLAGS.eval_batch_size,\n config=run_config,\n params=eval_params)\n if FLAGS.eval_after_training:\n\n if FLAGS.val_json_file is None:\n raise RuntimeError('You must specify --val_json_file for evaluation.')\n\n eval_results = eval_estimator.evaluate(\n input_fn=dataloader.InputReader(\n FLAGS.validation_file_pattern, is_training=False),\n steps=FLAGS.eval_samples // FLAGS.eval_batch_size)\n tf.logging.info('Eval results: %s' % eval_results)\n if FLAGS.model_dir:\n eval_estimator.export_saved_model(\n export_dir_base=FLAGS.model_dir,\n serving_input_receiver_fn=lambda: serving_input_fn(hparams.image_size)\n )\n\n elif FLAGS.mode == 'eval':\n # Eval only runs on CPU or GPU host with batch_size = 1.\n # Override the default options: disable randomization in the input pipeline\n # and don't run on the TPU.\n # Also, disable use_bfloat16 for eval on CPU/GPU.\n eval_params = dict(\n params,\n use_tpu=False,\n input_rand_hflip=False,\n resnet_checkpoint=None,\n is_training_bn=False,\n use_bfloat16=False,\n )\n\n eval_estimator = tf.contrib.tpu.TPUEstimator(\n model_fn=retinanet_model.retinanet_model_fn,\n use_tpu=False,\n train_batch_size=FLAGS.train_batch_size,\n eval_batch_size=FLAGS.eval_batch_size,\n config=run_config,\n params=eval_params)\n\n def terminate_eval():\n tf.logging.info('Terminating eval after %d seconds of no checkpoints' %\n FLAGS.eval_timeout)\n return True\n\n # Run evaluation when there's a new checkpoint\n for ckpt in tf.contrib.training.checkpoints_iterator(\n FLAGS.model_dir,\n min_interval_secs=FLAGS.min_eval_interval,\n timeout=FLAGS.eval_timeout,\n timeout_fn=terminate_eval):\n\n tf.logging.info('Starting to evaluate.')\n try:\n eval_results = eval_estimator.evaluate(\n input_fn=dataloader.InputReader(\n FLAGS.validation_file_pattern, is_training=False),\n steps=FLAGS.eval_samples // FLAGS.eval_batch_size)\n tf.logging.info('Eval results: %s' % eval_results)\n\n # Terminate eval job when final checkpoint is reached\n current_step = int(os.path.basename(ckpt).split('-')[1])\n total_step = int((FLAGS.num_epochs * FLAGS.num_examples_per_epoch) /\n FLAGS.train_batch_size)\n if current_step >= total_step:\n tf.logging.info(\n 'Evaluation finished after training step %d' % current_step)\n break\n eval_estimator.export_saved_model(\n export_dir_base=FLAGS.model_dir,\n serving_input_receiver_fn=\n lambda: serving_input_fn(hparams.image_size))\n\n except tf.errors.NotFoundError:\n # Since the coordinator is on a different job than the TPU worker,\n # sometimes the TPU worker does not finish initializing until long after\n # the CPU job tells it to start evaluating. In this case, the checkpoint\n # file could have been deleted already.\n tf.logging.info(\n 'Checkpoint %s no longer exists, skipping checkpoint' % ckpt)\n\n elif FLAGS.mode == 'train_and_eval':\n for cycle in range(FLAGS.num_epochs):\n tf.logging.info('Starting training cycle, epoch: %d.' % cycle)\n train_estimator = tf.contrib.tpu.TPUEstimator(\n model_fn=retinanet_model.retinanet_model_fn,\n use_tpu=FLAGS.use_tpu,\n train_batch_size=FLAGS.train_batch_size,\n config=run_config,\n params=params)\n train_estimator.train(\n input_fn=dataloader.InputReader(\n FLAGS.training_file_pattern, is_training=True),\n steps=int(FLAGS.num_examples_per_epoch / FLAGS.train_batch_size))\n\n tf.logging.info('Starting evaluation cycle, epoch: %d.' % cycle)\n # Run evaluation after every epoch.\n eval_params = dict(\n params,\n use_tpu=False,\n input_rand_hflip=False,\n resnet_checkpoint=None,\n is_training_bn=False,\n )\n\n eval_estimator = tf.contrib.tpu.TPUEstimator(\n model_fn=retinanet_model.retinanet_model_fn,\n use_tpu=False,\n train_batch_size=FLAGS.train_batch_size,\n eval_batch_size=FLAGS.eval_batch_size,\n config=run_config,\n params=eval_params)\n eval_results = eval_estimator.evaluate(\n input_fn=dataloader.InputReader(\n FLAGS.validation_file_pattern, is_training=False),\n steps=FLAGS.eval_samples // FLAGS.eval_batch_size)\n tf.logging.info('Evaluation results: %s' % eval_results)\n eval_estimator.export_saved_model(\n export_dir_base=FLAGS.model_dir,\n serving_input_receiver_fn=lambda: serving_input_fn(hparams.image_size))\n\n else:\n tf.logging.info('Mode not found.')\n\n\nif __name__ == '__main__':\n tf.logging.set_verbosity(tf.logging.INFO)\n app.run(main)\n", "# Copyright 2018 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Test show-and-tell model is TPU compatible.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport contextlib\n\n# Standard Imports\nimport numpy as np\nimport tensorflow.google as tf\n\nimport configuration\nimport show_and_tell_model\n\ntpu = tf.contrib.tpu\n\n\[email protected]\ndef _reset_for_test():\n tf.reset_default_graph()\n yield tf.Session('')\n\n\nclass ShowAndTellTPUTest(tf.test.TestCase):\n\n def testCallModelFnWithPlaceholders(self):\n with _reset_for_test() as session:\n config = configuration.ModelConfig()\n model = show_and_tell_model.ShowAndTellModel(config, mode='train')\n\n def model_fn(images, input_seq, target_seq, input_mask):\n model.build_model_for_tpu(images, input_seq, target_seq, input_mask)\n return model.total_loss\n\n images = tf.placeholder(tf.float32, shape=(1, 224, 224, 3))\n input_seq = tf.placeholder(tf.int32, shape=(1, 128))\n target_seq = tf.placeholder(tf.int32, shape=(1, 128))\n input_mask = tf.placeholder(tf.int32, shape=(1, 128))\n\n tpu_model_fn = tpu.rewrite(model_fn,\n [images, input_seq, target_seq, input_mask])\n caption = np.random.randint(low=0, high=1000, size=128).reshape((1, 128))\n session.run(tpu.initialize_system())\n session.run(tf.global_variables_initializer())\n inputs = {\n images: np.random.randn(1, 224, 224, 3),\n input_seq: caption,\n target_seq: caption,\n input_mask: np.random.random_integers(0, 1, size=128).reshape(1, 128),\n }\n session.run(tpu_model_fn, inputs)\n session.run(tpu.shutdown_system())\n\n\nif __name__ == '__main__':\n tf.logging.set_verbosity(tf.logging.INFO)\n tf.test.main()\n", "# Copyright 2019 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Util functions to manipulate boxes.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\n# Standard Imports\n\nimport numpy as np\nimport tensorflow as tf\n\n\nBBOX_XFORM_CLIP = np.log(1000. / 16.)\nNMS_TILE_SIZE = 512\n\n\ndef bbox_overlap(boxes, gt_boxes):\n \"\"\"Calculates the overlap between proposal and ground truth boxes.\n\n Some `gt_boxes` may have been padded. The returned `iou` tensor for these\n boxes will be -1.\n\n Args:\n boxes: a tensor with a shape of [batch_size, N, 4]. N is the number of\n proposals before groundtruth assignment (e.g., rpn_post_nms_topn). The\n last dimension is the pixel coordinates in [ymin, xmin, ymax, xmax] form.\n gt_boxes: a tensor with a shape of [batch_size, MAX_NUM_INSTANCES, 4]. This\n tensor might have paddings with a negative value.\n Returns:\n iou: a tensor with as a shape of [batch_size, N, MAX_NUM_INSTANCES].\n \"\"\"\n with tf.name_scope('bbox_overlap'):\n bb_y_min, bb_x_min, bb_y_max, bb_x_max = tf.split(\n value=boxes, num_or_size_splits=4, axis=2)\n gt_y_min, gt_x_min, gt_y_max, gt_x_max = tf.split(\n value=gt_boxes, num_or_size_splits=4, axis=2)\n\n # Calculates the intersection area.\n i_xmin = tf.maximum(bb_x_min, tf.transpose(gt_x_min, [0, 2, 1]))\n i_xmax = tf.minimum(bb_x_max, tf.transpose(gt_x_max, [0, 2, 1]))\n i_ymin = tf.maximum(bb_y_min, tf.transpose(gt_y_min, [0, 2, 1]))\n i_ymax = tf.minimum(bb_y_max, tf.transpose(gt_y_max, [0, 2, 1]))\n i_area = tf.maximum((i_xmax - i_xmin), 0) * tf.maximum((i_ymax - i_ymin), 0)\n\n # Calculates the union area.\n bb_area = (bb_y_max - bb_y_min) * (bb_x_max - bb_x_min)\n gt_area = (gt_y_max - gt_y_min) * (gt_x_max - gt_x_min)\n # Adds a small epsilon to avoid divide-by-zero.\n u_area = bb_area + tf.transpose(gt_area, [0, 2, 1]) - i_area + 1e-8\n\n # Calculates IoU.\n iou = i_area / u_area\n\n # Fills -1 for padded ground truth boxes.\n padding_mask = tf.less(i_xmin, tf.zeros_like(i_xmin))\n iou = tf.where(padding_mask, -tf.ones_like(iou), iou)\n\n return iou\n\n\ndef top_k(scores, k, boxes_list):\n \"\"\"A wrapper that returns top-k scores and correponding boxes.\n\n This functions selects the top-k scores and boxes as follows.\n\n indices = argsort(scores)[:k]\n scores = scores[indices]\n outputs = []\n for boxes in boxes_list:\n outputs.append(boxes[indices, :])\n return scores, outputs\n\n Args:\n scores: a tensor with a shape of [batch_size, N]. N is the number of scores.\n k: an integer for selecting the top-k elements.\n boxes_list: a list containing at least one element. Each element has a shape\n of [batch_size, N, 4].\n Returns:\n scores: the selected top-k scores with a shape of [batch_size, k].\n outputs: the list containing the corresponding boxes in the order of the\n input `boxes_list`.\n \"\"\"\n assert isinstance(boxes_list, list)\n assert boxes_list\n\n with tf.name_scope('top_k_wrapper'):\n scores, top_k_indices = tf.nn.top_k(scores, k=k)\n batch_size, _ = scores.get_shape().as_list()\n outputs = []\n for boxes in boxes_list:\n boxes_index_offsets = tf.range(batch_size) * tf.shape(boxes)[1]\n boxes_indices = tf.reshape(top_k_indices +\n tf.expand_dims(boxes_index_offsets, 1), [-1])\n boxes = tf.reshape(\n tf.gather(tf.reshape(boxes, [-1, 4]), boxes_indices),\n [batch_size, -1, 4])\n outputs.append(boxes)\n return scores, outputs\n\n\ndef _self_suppression(iou, _, iou_sum):\n batch_size = tf.shape(iou)[0]\n can_suppress_others = tf.cast(\n tf.reshape(tf.reduce_max(iou, 1) <= 0.5, [batch_size, -1, 1]), iou.dtype)\n iou_suppressed = tf.reshape(\n tf.cast(tf.reduce_max(can_suppress_others * iou, 1) <= 0.5, iou.dtype),\n [batch_size, -1, 1]) * iou\n iou_sum_new = tf.reduce_sum(iou_suppressed, [1, 2])\n return [\n iou_suppressed,\n tf.reduce_any(iou_sum - iou_sum_new > 0.5), iou_sum_new\n ]\n\n\ndef _cross_suppression(boxes, box_slice, iou_threshold, inner_idx):\n batch_size = tf.shape(boxes)[0]\n new_slice = tf.slice(boxes, [0, inner_idx * NMS_TILE_SIZE, 0],\n [batch_size, NMS_TILE_SIZE, 4])\n iou = bbox_overlap(new_slice, box_slice)\n ret_slice = tf.expand_dims(\n tf.cast(tf.reduce_all(iou < iou_threshold, [1]), box_slice.dtype),\n 2) * box_slice\n return boxes, ret_slice, iou_threshold, inner_idx + 1\n\n\ndef _suppression_loop_body(boxes, iou_threshold, output_size, idx):\n \"\"\"Process boxes in the range [idx*NMS_TILE_SIZE, (idx+1)*NMS_TILE_SIZE).\n\n Args:\n boxes: a tensor with a shape of [batch_size, anchors, 4].\n iou_threshold: a float representing the threshold for deciding whether boxes\n overlap too much with respect to IOU.\n output_size: an int32 tensor of size [batch_size]. Representing the number\n of selected boxes for each batch.\n idx: an integer scalar representing induction variable.\n\n Returns:\n boxes: updated boxes.\n iou_threshold: pass down iou_threshold to the next iteration.\n output_size: the updated output_size.\n idx: the updated induction variable.\n \"\"\"\n num_tiles = tf.shape(boxes)[1] // NMS_TILE_SIZE\n batch_size = tf.shape(boxes)[0]\n\n # Iterates over tiles that can possibly suppress the current tile.\n box_slice = tf.slice(boxes, [0, idx * NMS_TILE_SIZE, 0],\n [batch_size, NMS_TILE_SIZE, 4])\n _, box_slice, _, _ = tf.while_loop(\n lambda _boxes, _box_slice, _threshold, inner_idx: inner_idx < idx,\n _cross_suppression, [boxes, box_slice, iou_threshold,\n tf.constant(0)])\n\n # Iterates over the current tile to compute self-suppression.\n iou = bbox_overlap(box_slice, box_slice)\n mask = tf.expand_dims(\n tf.reshape(tf.range(NMS_TILE_SIZE), [1, -1]) > tf.reshape(\n tf.range(NMS_TILE_SIZE), [-1, 1]), 0)\n iou *= tf.cast(tf.logical_and(mask, iou >= iou_threshold), iou.dtype)\n suppressed_iou, _, _ = tf.while_loop(\n lambda _iou, loop_condition, _iou_sum: loop_condition, _self_suppression,\n [iou, tf.constant(True),\n tf.reduce_sum(iou, [1, 2])])\n suppressed_box = tf.reduce_sum(suppressed_iou, 1) > 0\n box_slice *= tf.expand_dims(1.0 - tf.cast(suppressed_box, box_slice.dtype), 2)\n\n # Uses box_slice to update the input boxes.\n mask = tf.reshape(\n tf.cast(tf.equal(tf.range(num_tiles), idx), boxes.dtype), [1, -1, 1, 1])\n boxes = tf.tile(tf.expand_dims(\n box_slice, [1]), [1, num_tiles, 1, 1]) * mask + tf.reshape(\n boxes, [batch_size, num_tiles, NMS_TILE_SIZE, 4]) * (1 - mask)\n boxes = tf.reshape(boxes, [batch_size, -1, 4])\n\n # Updates output_size.\n output_size += tf.reduce_sum(\n tf.cast(tf.reduce_any(box_slice > 0, [2]), tf.int32), [1])\n return boxes, iou_threshold, output_size, idx + 1\n\n\ndef sorted_non_max_suppression_padded(scores,\n boxes,\n max_output_size,\n iou_threshold):\n \"\"\"A wrapper that handles non-maximum suppression.\n\n Assumption:\n * The boxes are sorted by scores unless the box is a dot (all coordinates\n are zero).\n * Boxes with higher scores can be used to suppress boxes with lower scores.\n\n The overal design of the algorithm is to handle boxes tile-by-tile:\n\n boxes = boxes.pad_to_multiply_of(tile_size)\n num_tiles = len(boxes) // tile_size\n output_boxes = []\n for i in range(num_tiles):\n box_tile = boxes[i*tile_size : (i+1)*tile_size]\n for j in range(i - 1):\n suppressing_tile = boxes[j*tile_size : (j+1)*tile_size]\n iou = bbox_overlap(box_tile, suppressing_tile)\n # if the box is suppressed in iou, clear it to a dot\n box_tile *= _update_boxes(iou)\n # Iteratively handle the diagnal tile.\n iou = _box_overlap(box_tile, box_tile)\n iou_changed = True\n while iou_changed:\n # boxes that are not suppressed by anything else\n suppressing_boxes = _get_suppressing_boxes(iou)\n # boxes that are suppressed by suppressing_boxes\n suppressed_boxes = _get_suppressed_boxes(iou, suppressing_boxes)\n # clear iou to 0 for boxes that are suppressed, as they cannot be used\n # to suppress other boxes any more\n new_iou = _clear_iou(iou, suppressed_boxes)\n iou_changed = (new_iou != iou)\n iou = new_iou\n # remaining boxes that can still suppress others, are selected boxes.\n output_boxes.append(_get_suppressing_boxes(iou))\n if len(output_boxes) >= max_output_size:\n break\n\n Args:\n scores: a tensor with a shape of [batch_size, anchors].\n boxes: a tensor with a shape of [batch_size, anchors, 4].\n max_output_size: a scalar integer `Tensor` representing the maximum number\n of boxes to be selected by non max suppression.\n iou_threshold: a float representing the threshold for deciding whether boxes\n overlap too much with respect to IOU.\n\n Returns:\n nms_scores: a tensor with a shape of [batch_size, anchors]. It has same\n dtype as input scores.\n nms_proposals: a tensor with a shape of [batch_size, anchors, 4]. It has\n same dtype as input boxes.\n \"\"\"\n batch_size = tf.shape(boxes)[0]\n num_boxes = tf.shape(boxes)[1]\n pad = tf.cast(\n tf.ceil(tf.cast(num_boxes, tf.float32) / NMS_TILE_SIZE),\n tf.int32) * NMS_TILE_SIZE - num_boxes\n boxes = tf.pad(tf.cast(boxes, tf.float32), [[0, 0], [0, pad], [0, 0]])\n scores = tf.pad(tf.cast(scores, tf.float32), [[0, 0], [0, pad]])\n num_boxes += pad\n\n def _loop_cond(unused_boxes, unused_threshold, output_size, idx):\n return tf.logical_and(\n tf.reduce_min(output_size) < max_output_size,\n idx < num_boxes // NMS_TILE_SIZE)\n\n selected_boxes, _, output_size, _ = tf.while_loop(\n _loop_cond, _suppression_loop_body, [\n boxes, iou_threshold,\n tf.zeros([batch_size], tf.int32),\n tf.constant(0)\n ])\n idx = num_boxes - tf.cast(\n tf.nn.top_k(\n tf.cast(tf.reduce_any(selected_boxes > 0, [2]), tf.int32) *\n tf.expand_dims(tf.range(num_boxes, 0, -1), 0), max_output_size)[0],\n tf.int32)\n idx = tf.minimum(idx, num_boxes - 1)\n idx = tf.reshape(\n idx + tf.reshape(tf.range(batch_size) * num_boxes, [-1, 1]), [-1])\n boxes = tf.reshape(\n tf.gather(tf.reshape(boxes, [-1, 4]), idx),\n [batch_size, max_output_size, 4])\n boxes = boxes * tf.cast(\n tf.reshape(tf.range(max_output_size), [1, -1, 1]) < tf.reshape(\n output_size, [-1, 1, 1]), boxes.dtype)\n scores = tf.reshape(\n tf.gather(tf.reshape(scores, [-1, 1]), idx),\n [batch_size, max_output_size])\n scores = scores * tf.cast(\n tf.reshape(tf.range(max_output_size), [1, -1]) < tf.reshape(\n output_size, [-1, 1]), scores.dtype)\n return scores, boxes\n\n\ndef encode_boxes(boxes, anchors, weights=None):\n \"\"\"Encode boxes to targets.\n\n Args:\n boxes: a tensor whose last dimension is 4 representing the coordinates\n of boxes in ymin, xmin, ymax, xmax order.\n anchors: a tensor whose shape is the same as `boxes` representing the\n coordinates of anchors in ymin, xmin, ymax, xmax order.\n weights: None or a list of four float numbers used to scale coordinates.\n\n Returns:\n encoded_boxes: a tensor whose shape is the same as `boxes` representing the\n encoded box targets.\n \"\"\"\n with tf.name_scope('encode_box'):\n boxes = tf.cast(boxes, dtype=anchors.dtype)\n ymin, xmin, ymax, xmax = tf.split(\n boxes, num_or_size_splits=4, axis=-1)\n box_h = ymax - ymin + 1.0\n box_w = xmax - xmin + 1.0\n box_yc = ymin + 0.5 * box_h\n box_xc = xmin + 0.5 * box_w\n\n anchor_ymin, anchor_xmin, anchor_ymax, anchor_xmax = (\n tf.split(anchors, num_or_size_splits=4, axis=-1))\n anchor_h = anchor_ymax - anchor_ymin + 1.0\n anchor_w = anchor_xmax - anchor_xmin + 1.0\n anchor_yc = anchor_ymin + 0.5 * anchor_h\n anchor_xc = anchor_xmin + 0.5 * anchor_w\n\n encoded_dy = (box_yc - anchor_yc) / anchor_h\n encoded_dx = (box_xc - anchor_xc) / anchor_w\n encoded_dh = tf.log(box_h / anchor_h)\n encoded_dw = tf.log(box_w / anchor_w)\n if weights:\n encoded_dy *= weights[0]\n encoded_dx *= weights[1]\n encoded_dh *= weights[2]\n encoded_dw *= weights[3]\n\n encoded_boxes = tf.concat(\n [encoded_dy, encoded_dx, encoded_dh, encoded_dw],\n axis=-1)\n return encoded_boxes\n\n\ndef decode_boxes(encoded_boxes, anchors, weights=None):\n \"\"\"Decode boxes.\n\n Args:\n encoded_boxes: a tensor whose last dimension is 4 representing the\n coordinates of encoded boxes in ymin, xmin, ymax, xmax order.\n anchors: a tensor whose shape is the same as `boxes` representing the\n coordinates of anchors in ymin, xmin, ymax, xmax order.\n weights: None or a list of four float numbers used to scale coordinates.\n\n Returns:\n encoded_boxes: a tensor whose shape is the same as `boxes` representing the\n decoded box targets.\n \"\"\"\n with tf.name_scope('decode_box'):\n encoded_boxes = tf.cast(encoded_boxes, dtype=anchors.dtype)\n dy, dx, dh, dw = tf.split(\n encoded_boxes, num_or_size_splits=4, axis=-1)\n if weights:\n dy /= weights[0]\n dx /= weights[1]\n dh /= weights[2]\n dw /= weights[3]\n dh = tf.minimum(dh, BBOX_XFORM_CLIP)\n dw = tf.minimum(dw, BBOX_XFORM_CLIP)\n\n anchor_ymin, anchor_xmin, anchor_ymax, anchor_xmax = tf.split(\n anchors, num_or_size_splits=4, axis=-1)\n\n anchor_h = anchor_ymax - anchor_ymin + 1.0\n anchor_w = anchor_xmax - anchor_xmin + 1.0\n anchor_yc = anchor_ymin + 0.5 * anchor_h\n anchor_xc = anchor_xmin + 0.5 * anchor_w\n\n decoded_boxes_yc = dy * anchor_h + anchor_yc\n decoded_boxes_xc = dx * anchor_w + anchor_xc\n decoded_boxes_h = tf.exp(dh) * anchor_h\n decoded_boxes_w = tf.exp(dw) * anchor_w\n\n decoded_boxes_ymin = decoded_boxes_yc - 0.5 * decoded_boxes_h\n decoded_boxes_xmin = decoded_boxes_xc - 0.5 * decoded_boxes_w\n decoded_boxes_ymax = decoded_boxes_ymin + decoded_boxes_h - 1.0\n decoded_boxes_xmax = decoded_boxes_xmin + decoded_boxes_w - 1.0\n\n decoded_boxes = tf.concat(\n [decoded_boxes_ymin, decoded_boxes_xmin,\n decoded_boxes_ymax, decoded_boxes_xmax],\n axis=-1)\n return decoded_boxes\n\n\ndef clip_boxes(boxes, height, width):\n \"\"\"Clip boxes.\n\n Args:\n boxes: a tensor whose last dimension is 4 representing the coordinates\n of boxes in ymin, xmin, ymax, xmax order.\n height: an integer, a scalar or a tensor such as all but the last dimensions\n are the same as `boxes`. The last dimension is 1. It represents the height\n of the image.\n width: an integer, a scalar or a tensor such as all but the last dimensions\n are the same as `boxes`. The last dimension is 1. It represents the width\n of the image.\n\n Returns:\n clipped_boxes: a tensor whose shape is the same as `boxes` representing the\n clipped boxes.\n \"\"\"\n with tf.name_scope('clip_box'):\n y_min, x_min, y_max, x_max = tf.split(\n boxes, num_or_size_splits=4, axis=-1)\n\n height = tf.cast(height, dtype=boxes.dtype)\n width = tf.cast(width, dtype=boxes.dtype)\n clipped_y_min = tf.maximum(tf.minimum(y_min, height - 1.0), 0.0)\n clipped_y_max = tf.maximum(tf.minimum(y_max, height - 1.0), 0.0)\n clipped_x_min = tf.maximum(tf.minimum(x_min, width - 1.0), 0.0)\n clipped_x_max = tf.maximum(tf.minimum(x_max, width - 1.0), 0.0)\n\n clipped_boxes = tf.concat(\n [clipped_y_min, clipped_x_min, clipped_y_max, clipped_x_max],\n axis=-1)\n return clipped_boxes\n\n\ndef filter_boxes(boxes, scores, min_size, height, width, scale):\n \"\"\"Filter out boxes that are too small.\n\n Args:\n boxes: a tensor whose last dimension is 4 representing the coordinates\n of boxes in ymin, xmin, ymax, xmax order.\n scores: a tensor such as all but the last dimensions are the same as\n `boxes`. The last dimension is 1. It represents the scores.\n min_size: an integer specifying the minimal size.\n height: an integer, a scalar or a tensor such as all but the last dimensions\n are the same as `boxes`. The last dimension is 1. It represents the height\n of the image.\n width: an integer, a scalar or a tensor such as all but the last dimensions\n are the same as `boxes`. The last dimension is 1. It represents the width\n of the image.\n scale: an integer, a scalar or a tensor such as all but the last dimensions\n are the same as `boxes`. The last dimension is 1. It represents the scale\n of the image.\n\n Returns:\n filtered_boxes: a tensor whose shape is the same as `boxes` representing the\n filtered boxes.\n filtered_scores: a tensor whose shape is the same as `scores` representing\n the filtered scores.\n \"\"\"\n with tf.name_scope('filter_box'):\n y_min, x_min, y_max, x_max = tf.split(\n boxes, num_or_size_splits=4, axis=-1)\n\n h = y_max - y_min + 1.0\n w = x_max - x_min + 1.0\n yc = y_min + h / 2.0\n xc = x_min + w / 2.0\n\n height = tf.cast(height, dtype=boxes.dtype)\n width = tf.cast(width, dtype=boxes.dtype)\n scale = tf.cast(scale, dtype=boxes.dtype)\n min_size = tf.cast(tf.maximum(min_size, 1), dtype=boxes.dtype)\n size_mask = tf.logical_and(\n tf.greater_equal(h, min_size * scale),\n tf.greater_equal(w, min_size * scale))\n center_mask = tf.logical_and(tf.less(yc, height), tf.less(xc, width))\n selected_mask = tf.logical_and(size_mask, center_mask)\n\n filtered_scores = tf.where(selected_mask, scores, tf.zeros_like(scores))\n filtered_boxes = tf.cast(selected_mask, dtype=boxes.dtype) * boxes\n return filtered_boxes, filtered_scores\n\n\ndef to_normalized_coordinates(boxes, height, width):\n \"\"\"Converted absolute box coordinates to normalized ones.\n\n Args:\n boxes: a tensor whose last dimension is 4 representing the coordinates\n of boxes in ymin, xmin, ymax, xmax order.\n height: an integer, a scalar or a tensor such as all but the last dimensions\n are the same as `boxes`. The last dimension is 1. It represents the height\n of the image.\n width: an integer, a scalar or a tensor such as all but the last dimensions\n are the same as `boxes`. The last dimension is 1. It represents the width\n of the image.\n\n Returns:\n normalized_boxes: a tensor whose shape is the same as `boxes` representing\n the boxes in normalized coordinates.\n \"\"\"\n with tf.name_scope('normalize_box'):\n height = tf.cast(height, dtype=boxes.dtype)\n width = tf.cast(width, dtype=boxes.dtype)\n\n y_min, x_min, y_max, x_max = tf.split(\n boxes, num_or_size_splits=4, axis=-1)\n\n y_min /= height\n y_max /= height\n x_min /= width\n x_max /= width\n\n normalized_boxes = tf.concat([y_min, x_min, y_max, x_max], axis=-1)\n return normalized_boxes\n\n\ndef to_absolute_coordinates(boxes, height, width):\n \"\"\"Converted normalized box coordinates to absolute ones.\n\n Args:\n boxes: a tensor whose last dimension is 4 representing the coordinates\n of boxes in ymin, xmin, ymax, xmax order.\n height: an integer, a scalar or a tensor such as all but the last dimensions\n are the same as `boxes`. The last dimension is 1. It represents the height\n of the image.\n width: an integer, a scalar or a tensor such as all but the last dimensions\n are the same as `boxes`. The last dimension is 1. It represents the width\n of the image.\n\n Returns:\n absolute_boxes: a tensor whose shape is the same as `boxes` representing the\n boxes in absolute coordinates.\n \"\"\"\n with tf.name_scope('denormalize_box'):\n height = tf.cast(height, dtype=boxes.dtype)\n width = tf.cast(width, dtype=boxes.dtype)\n\n y_min, x_min, y_max, x_max = tf.split(\n boxes, num_or_size_splits=4, axis=-1)\n y_min *= height\n y_max *= height\n x_min *= width\n x_max *= width\n\n absolute_boxes = tf.concat([y_min, x_min, y_max, x_max], axis=-1)\n return absolute_boxes\n" ]
[ [ "tensorflow.contrib.cluster_resolver.TPUClusterResolver", "tensorflow.contrib.tpu.TPUEstimator", "tensorflow.image.resize_image_with_crop_or_pad", "tensorflow.Session.reset", "tensorflow.reshape", "tensorflow.estimator.export.TensorServingInputReceiver", "tensorflow.placeholder", "tensorflow.contrib.tpu.RunConfig", "tensorflow.ConfigProto", "tensorflow.map_fn", "tensorflow.logging.info", "tensorflow.logging.set_verbosity", "tensorflow.contrib.tpu.TPUConfig", "tensorflow.image.convert_image_dtype", "numpy.prod", "numpy.array", "tensorflow.contrib.training.checkpoints_iterator", "tensorflow.image.decode_jpeg" ], [ "tensorflow.google.placeholder", "tensorflow.google.reset_default_graph", "tensorflow.google.Session", "tensorflow.google.global_variables_initializer", "numpy.random.randn", "numpy.random.random_integers", "tensorflow.google.logging.set_verbosity", "tensorflow.google.test.main", "numpy.random.randint" ], [ "tensorflow.concat", "tensorflow.zeros", "tensorflow.reduce_sum", "tensorflow.minimum", "tensorflow.cast", "tensorflow.nn.top_k", "tensorflow.name_scope", "numpy.log", "tensorflow.shape", "tensorflow.reduce_any", "tensorflow.less", "tensorflow.exp", "tensorflow.zeros_like", "tensorflow.split", "tensorflow.reduce_max", "tensorflow.transpose", "tensorflow.constant", "tensorflow.range", "tensorflow.slice", "tensorflow.maximum", "tensorflow.reshape", "tensorflow.ones_like", "tensorflow.expand_dims", "tensorflow.reduce_min", "tensorflow.log", "tensorflow.greater_equal", "tensorflow.reduce_all", "tensorflow.logical_and" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10" ] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10" ] } ]
WilliamJamieson/astropy-benchmarks
[ "dcff9e1d8a584f7933743a91682806647a4c1e86" ]
[ "benchmarks/units.py" ]
[ "import copy\n\nimport numpy as np\nfrom astropy import units as u\n\n\n# Unit tests\n\ndef time_unit_compose():\n u.Ry.compose()\n\n\ndef time_unit_to():\n u.m.to(u.pc)\n\n\ndef time_unit_parse():\n u.Unit('1e-07 kg m2 / s2')\n\n\ndef time_simple_unit_parse():\n u.Unit('1 d')\n\n\ndef time_very_simple_unit_parse():\n u.Unit('d')\n\n\ndef mem_unit():\n return u.erg\n\n\ndef time_compose_to_bases():\n x = copy.copy(u.Ry)\n x.cgs\n\n\ndef time_compose_complex():\n # Composing a complex unit can be very inefficient\n (u.kg / u.s ** 3 * u.au ** 2.5 / u.yr ** 0.5 / u.sr ** 2).compose()\n\n\n# Quantity tests\n\na = np.arange(100000.)\nb1 = [1., 2., 3.]\nb2 = np.asarray(b1)\nq0 = u.Quantity(1., u.s)\nq1 = u.Quantity(a, u.m)\nq2 = u.Quantity(a[:10000], u.deg)\n\n\ndef time_quantity_creation():\n u.Quantity(a, u.m)\n\n\ndef time_quantity_creation_nocopy():\n u.Quantity(a, u.m, copy=False)\n\n\ndef time_quantity_view():\n q1.view(u.Quantity)\n\n\ndef time_quantity_init_scalar():\n 3. * u.m / u.s\n\n\ndef time_quantity_init_array():\n a * u.m / u.s\n\n\ndef time_quantity_init_small_list():\n \"\"\"\n https://github.com/astropy/astropy/issues/7546 reported high overhead\n for small list.\n \"\"\"\n b1 * u.m / u.s\n\n\ndef time_quantity_init_small_array():\n \"\"\"\n https://github.com/astropy/astropy/issues/7546 reported high overhead\n for small array.\n \"\"\"\n b2 * u.m / u.s\n\n\ndef time_quantity_scalar_conversion():\n (3. * u.m / u.s).to(u.km / u.hour)\n\n\ndef time_quantity_array_conversion():\n (a * u.m / u.s).to(u.km / u.hour)\n\n\ndef time_quantity_times_unit():\n q1 * u.m\n\n\ndef time_quantity_times_quantity():\n q1 * q0\n\n\ndef time_quantity_ufunc_sin():\n np.sin(q2)\n\n\nclass TimeQuantityOpSmallArray:\n \"\"\"\n Operator benchmarks from https://github.com/astropy/astropy/issues/7546\n for a small Numpy array.\n \"\"\"\n def setup(self):\n data = np.array([1., 2., 3.])\n self.data = data * u.g\n self.out_sq = data * u.g ** 2\n self.out_sqrt = data * u.g ** 0.5\n\n def time_quantity_square(self):\n self.data ** 2\n\n def time_quantity_np_square(self):\n np.power(self.data, 2)\n\n def time_quantity_np_square_out(self):\n np.power(self.data, 2, out=self.out_sq)\n\n def time_quantity_sqrt(self):\n self.data ** 0.5\n\n def time_quantity_np_sqrt(self):\n np.sqrt(self.data)\n\n def time_quantity_np_sqrt_out(self):\n np.sqrt(self.data, out=self.out_sqrt)\n\n\nclass TimeQuantityOpLargeArray(TimeQuantityOpSmallArray):\n \"\"\"\n Like :class:`TimeQuantityOpSmallArray` but for a large Numpy array.\n \"\"\"\n def setup(self):\n data = np.arange(1e6) + 1\n self.data = data * u.g\n self.out_sq = data * u.g ** 2\n self.out_sqrt = data * u.g ** 0.5\n\n\nclass TimeQuantityOpSmallArrayDiffUnit:\n \"\"\"\n Operator benchmarks from https://github.com/astropy/astropy/issues/7546\n for small Numpy arrays with different units.\n \"\"\"\n def setup(self):\n data = np.array([1., 2., 3.])\n self.data = data * u.g\n\n # A different but dimensionally compatible unit\n self.data2 = 0.001 * data * u.kg\n\n def time_quantity_equal(self):\n # Same as operator.eq\n self.data == self.data2\n\n def time_quantity_np_equal(self):\n np.equal(self.data, self.data2)\n\n def time_quantity_truediv(self):\n # Since benchmark is PY3 only, this is always true divide.\n # Same as operator.truediv\n self.data / self.data2\n\n def time_quantity_np_truediv(self):\n np.true_divide(self.data, self.data2)\n\n def time_quantity_mul(self):\n # Same as operator.mul\n self.data * self.data2\n\n def time_quantity_np_multiply(self):\n np.multiply(self.data, self.data2)\n\n def time_quantity_sub(self):\n # Same as operator.sub\n self.data - self.data2\n\n def time_quantity_np_subtract(self):\n np.subtract(self.data, self.data2)\n\n def time_quantity_add(self):\n # Same as operator.add\n self.data + self.data2\n\n def time_quantity_np_add(self):\n np.add(self.data, self.data2)\n\n\nclass TimeQuantityOpSmallArraySameUnit(TimeQuantityOpSmallArrayDiffUnit):\n \"\"\"\n Operator benchmarks from https://github.com/astropy/astropy/issues/7546\n for small Numpy arrays with same units.\n \"\"\"\n def setup(self):\n data = np.array([1., 2., 3.])\n self.data = data * u.g\n self.data2 = self.data.copy()\n\n\nclass TimeQuantityOpLargeArrayDiffUnit(TimeQuantityOpSmallArrayDiffUnit):\n \"\"\"\n Like :class:`TimeQuantityOpSmallArrayDiffUnit` but for large Numpy arrays.\n \"\"\"\n def setup(self):\n data = np.arange(1e6) + 1\n self.data = data * u.g\n\n # A different but dimensionally compatible unit\n self.data2 = 0.001 * data * u.kg\n\n\nclass TimeQuantityOpLargeArraySameUnit(TimeQuantityOpSmallArrayDiffUnit):\n \"\"\"\n Like :class:`TimeQuantityOpSmallArraySameUnit` but for large Numpy arrays.\n \"\"\"\n def setup(self):\n data = np.arange(1e6) + 1\n self.data = data * u.g\n self.data2 = self.data.copy()\n" ]
[ [ "numpy.true_divide", "numpy.sqrt", "numpy.multiply", "numpy.power", "numpy.asarray", "numpy.arange", "numpy.subtract", "numpy.sin", "numpy.equal", "numpy.add", "numpy.array" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
lorenzocestaro/pandas
[ "a73e4518cf3d10fd239cdbd1be3bcda43443bf2a", "a73e4518cf3d10fd239cdbd1be3bcda43443bf2a", "a73e4518cf3d10fd239cdbd1be3bcda43443bf2a", "a73e4518cf3d10fd239cdbd1be3bcda43443bf2a", "a73e4518cf3d10fd239cdbd1be3bcda43443bf2a" ]
[ "pandas/tseries/period.py", "pandas/tests/tools/test_merge.py", "pandas/tests/indexes/period/test_indexing.py", "pandas/indexes/multi.py", "pandas/tests/tseries/test_timezones.py" ]
[ "# pylint: disable=E1101,E1103,W0232\nfrom datetime import datetime, timedelta\nimport numpy as np\nimport warnings\n\n\nfrom pandas.core import common as com\nfrom pandas.types.common import (is_integer,\n is_float,\n is_object_dtype,\n is_integer_dtype,\n is_float_dtype,\n is_scalar,\n is_datetime64_dtype,\n is_datetime64tz_dtype,\n is_timedelta64_dtype,\n is_period_dtype,\n is_bool_dtype,\n pandas_dtype,\n _ensure_object)\nfrom pandas.types.dtypes import PeriodDtype\nfrom pandas.types.generic import ABCSeries\n\nimport pandas.tseries.frequencies as frequencies\nfrom pandas.tseries.frequencies import get_freq_code as _gfc\nfrom pandas.tseries.index import DatetimeIndex, Int64Index, Index\nfrom pandas.tseries.tdi import TimedeltaIndex\nfrom pandas.tseries.base import DatelikeOps, DatetimeIndexOpsMixin\nfrom pandas.tseries.tools import parse_time_string\nimport pandas.tseries.offsets as offsets\n\nfrom pandas._libs.lib import infer_dtype\nfrom pandas._libs import tslib, period\nfrom pandas._libs.period import (Period, IncompatibleFrequency,\n get_period_field_arr, _validate_end_alias,\n _quarter_to_myear)\n\nfrom pandas.core.base import _shared_docs\nfrom pandas.indexes.base import _index_shared_docs, _ensure_index\n\nfrom pandas import compat\nfrom pandas.util.decorators import (Appender, Substitution, cache_readonly,\n deprecate_kwarg)\nfrom pandas.compat import zip, u\n\nimport pandas.indexes.base as ibase\n_index_doc_kwargs = dict(ibase._index_doc_kwargs)\n_index_doc_kwargs.update(\n dict(target_klass='PeriodIndex or list of Periods'))\n\n\ndef _field_accessor(name, alias, docstring=None):\n def f(self):\n base, mult = _gfc(self.freq)\n return get_period_field_arr(alias, self._values, base)\n f.__name__ = name\n f.__doc__ = docstring\n return property(f)\n\n\ndef dt64arr_to_periodarr(data, freq, tz):\n if data.dtype != np.dtype('M8[ns]'):\n raise ValueError('Wrong dtype: %s' % data.dtype)\n\n freq = Period._maybe_convert_freq(freq)\n base, mult = _gfc(freq)\n return period.dt64arr_to_periodarr(data.view('i8'), base, tz)\n\n# --- Period index sketch\n\n\n_DIFFERENT_FREQ_INDEX = period._DIFFERENT_FREQ_INDEX\n\n\ndef _period_index_cmp(opname, nat_result=False):\n \"\"\"\n Wrap comparison operations to convert datetime-like to datetime64\n \"\"\"\n\n def wrapper(self, other):\n if isinstance(other, Period):\n func = getattr(self._values, opname)\n other_base, _ = _gfc(other.freq)\n if other.freq != self.freq:\n msg = _DIFFERENT_FREQ_INDEX.format(self.freqstr, other.freqstr)\n raise IncompatibleFrequency(msg)\n\n result = func(other.ordinal)\n elif isinstance(other, PeriodIndex):\n if other.freq != self.freq:\n msg = _DIFFERENT_FREQ_INDEX.format(self.freqstr, other.freqstr)\n raise IncompatibleFrequency(msg)\n\n result = getattr(self._values, opname)(other._values)\n\n mask = self._isnan | other._isnan\n if mask.any():\n result[mask] = nat_result\n\n return result\n elif other is tslib.NaT:\n result = np.empty(len(self._values), dtype=bool)\n result.fill(nat_result)\n else:\n other = Period(other, freq=self.freq)\n func = getattr(self._values, opname)\n result = func(other.ordinal)\n\n if self.hasnans:\n result[self._isnan] = nat_result\n\n return result\n return wrapper\n\n\ndef _new_PeriodIndex(cls, **d):\n # GH13277 for unpickling\n if d['data'].dtype == 'int64':\n values = d.pop('data')\n return cls._from_ordinals(values=values, **d)\n\n\nclass PeriodIndex(DatelikeOps, DatetimeIndexOpsMixin, Int64Index):\n \"\"\"\n Immutable ndarray holding ordinal values indicating regular periods in\n time such as particular years, quarters, months, etc. A value of 1 is the\n period containing the Gregorian proleptic datetime Jan 1, 0001 00:00:00.\n This ordinal representation is from the scikits.timeseries project.\n\n For instance,\n # construct period for day 1/1/1 and get the first second\n i = Period(year=1,month=1,day=1,freq='D').asfreq('S', 'S')\n i.ordinal\n ===> 1\n\n Index keys are boxed to Period objects which carries the metadata (eg,\n frequency information).\n\n Parameters\n ----------\n data : array-like (1-dimensional), optional\n Optional period-like data to construct index with\n copy : bool\n Make a copy of input ndarray\n freq : string or period object, optional\n One of pandas period strings or corresponding objects\n start : starting value, period-like, optional\n If data is None, used as the start point in generating regular\n period data.\n periods : int, optional, > 0\n Number of periods to generate, if generating index. Takes precedence\n over end argument\n end : end value, period-like, optional\n If periods is none, generated index will extend to first conforming\n period on or just past end argument\n year : int, array, or Series, default None\n month : int, array, or Series, default None\n quarter : int, array, or Series, default None\n day : int, array, or Series, default None\n hour : int, array, or Series, default None\n minute : int, array, or Series, default None\n second : int, array, or Series, default None\n tz : object, default None\n Timezone for converting datetime64 data to Periods\n dtype : str or PeriodDtype, default None\n\n Examples\n --------\n >>> idx = PeriodIndex(year=year_arr, quarter=q_arr)\n\n >>> idx2 = PeriodIndex(start='2000', end='2010', freq='A')\n \"\"\"\n _box_scalars = True\n _typ = 'periodindex'\n _attributes = ['name', 'freq']\n _datetimelike_ops = ['year', 'month', 'day', 'hour', 'minute', 'second',\n 'weekofyear', 'week', 'dayofweek', 'weekday',\n 'dayofyear', 'quarter', 'qyear', 'freq',\n 'days_in_month', 'daysinmonth',\n 'to_timestamp', 'asfreq', 'start_time', 'end_time',\n 'is_leap_year']\n _is_numeric_dtype = False\n _infer_as_myclass = True\n\n freq = None\n\n __eq__ = _period_index_cmp('__eq__')\n __ne__ = _period_index_cmp('__ne__', nat_result=True)\n __lt__ = _period_index_cmp('__lt__')\n __gt__ = _period_index_cmp('__gt__')\n __le__ = _period_index_cmp('__le__')\n __ge__ = _period_index_cmp('__ge__')\n\n def __new__(cls, data=None, ordinal=None, freq=None, start=None, end=None,\n periods=None, copy=False, name=None, tz=None, dtype=None,\n **kwargs):\n\n if periods is not None:\n if is_float(periods):\n periods = int(periods)\n elif not is_integer(periods):\n raise ValueError('Periods must be a number, got %s' %\n str(periods))\n\n if name is None and hasattr(data, 'name'):\n name = data.name\n\n if dtype is not None:\n dtype = pandas_dtype(dtype)\n if not is_period_dtype(dtype):\n raise ValueError('dtype must be PeriodDtype')\n if freq is None:\n freq = dtype.freq\n elif freq != dtype.freq:\n msg = 'specified freq and dtype are different'\n raise IncompatibleFrequency(msg)\n\n # coerce freq to freq object, otherwise it can be coerced elementwise\n # which is slow\n if freq:\n freq = Period._maybe_convert_freq(freq)\n\n if data is None:\n if ordinal is not None:\n data = np.asarray(ordinal, dtype=np.int64)\n else:\n data, freq = cls._generate_range(start, end, periods,\n freq, kwargs)\n return cls._from_ordinals(data, name=name, freq=freq)\n\n if isinstance(data, PeriodIndex):\n if freq is None or freq == data.freq: # no freq change\n freq = data.freq\n data = data._values\n else:\n base1, _ = _gfc(data.freq)\n base2, _ = _gfc(freq)\n data = period.period_asfreq_arr(data._values,\n base1, base2, 1)\n return cls._simple_new(data, name=name, freq=freq)\n\n # not array / index\n if not isinstance(data, (np.ndarray, PeriodIndex,\n DatetimeIndex, Int64Index)):\n if is_scalar(data) or isinstance(data, Period):\n cls._scalar_data_error(data)\n\n # other iterable of some kind\n if not isinstance(data, (list, tuple)):\n data = list(data)\n\n data = np.asarray(data)\n\n # datetime other than period\n if is_datetime64_dtype(data.dtype):\n data = dt64arr_to_periodarr(data, freq, tz)\n return cls._from_ordinals(data, name=name, freq=freq)\n\n # check not floats\n if infer_dtype(data) == 'floating' and len(data) > 0:\n raise TypeError(\"PeriodIndex does not allow \"\n \"floating point in construction\")\n\n # anything else, likely an array of strings or periods\n data = _ensure_object(data)\n freq = freq or period.extract_freq(data)\n data = period.extract_ordinals(data, freq)\n return cls._from_ordinals(data, name=name, freq=freq)\n\n @classmethod\n def _generate_range(cls, start, end, periods, freq, fields):\n if freq is not None:\n freq = Period._maybe_convert_freq(freq)\n\n field_count = len(fields)\n if com._count_not_none(start, end) > 0:\n if field_count > 0:\n raise ValueError('Can either instantiate from fields '\n 'or endpoints, but not both')\n subarr, freq = _get_ordinal_range(start, end, periods, freq)\n elif field_count > 0:\n subarr, freq = _range_from_fields(freq=freq, **fields)\n else:\n raise ValueError('Not enough parameters to construct '\n 'Period range')\n\n return subarr, freq\n\n @classmethod\n def _simple_new(cls, values, name=None, freq=None, **kwargs):\n \"\"\"\n Values can be any type that can be coerced to Periods.\n Ordinals in an ndarray are fastpath-ed to `_from_ordinals`\n \"\"\"\n if not is_integer_dtype(values):\n values = np.array(values, copy=False)\n if len(values) > 0 and is_float_dtype(values):\n raise TypeError(\"PeriodIndex can't take floats\")\n return cls(values, name=name, freq=freq, **kwargs)\n\n return cls._from_ordinals(values, name, freq, **kwargs)\n\n @classmethod\n def _from_ordinals(cls, values, name=None, freq=None, **kwargs):\n \"\"\"\n Values should be int ordinals\n `__new__` & `_simple_new` cooerce to ordinals and call this method\n \"\"\"\n\n values = np.array(values, dtype='int64', copy=False)\n\n result = object.__new__(cls)\n result._data = values\n result.name = name\n if freq is None:\n raise ValueError('freq is not specified and cannot be inferred')\n result.freq = Period._maybe_convert_freq(freq)\n result._reset_identity()\n return result\n\n def _shallow_copy_with_infer(self, values=None, **kwargs):\n \"\"\" we always want to return a PeriodIndex \"\"\"\n return self._shallow_copy(values=values, **kwargs)\n\n def _shallow_copy(self, values=None, freq=None, **kwargs):\n if freq is None:\n freq = self.freq\n if values is None:\n values = self._values\n return super(PeriodIndex, self)._shallow_copy(values=values,\n freq=freq, **kwargs)\n\n def _coerce_scalar_to_index(self, item):\n \"\"\"\n we need to coerce a scalar to a compat for our index type\n\n Parameters\n ----------\n item : scalar item to coerce\n \"\"\"\n return PeriodIndex([item], **self._get_attributes_dict())\n\n def __contains__(self, key):\n if isinstance(key, Period):\n if key.freq != self.freq:\n return False\n else:\n return key.ordinal in self._engine\n else:\n try:\n self.get_loc(key)\n return True\n except Exception:\n return False\n return False\n\n @property\n def asi8(self):\n return self._values.view('i8')\n\n @cache_readonly\n def _int64index(self):\n return Int64Index(self.asi8, name=self.name, fastpath=True)\n\n @property\n def values(self):\n return self.asobject.values\n\n @property\n def _values(self):\n return self._data\n\n def __array__(self, dtype=None):\n if is_integer_dtype(dtype):\n return self.asi8\n else:\n return self.asobject.values\n\n def __array_wrap__(self, result, context=None):\n \"\"\"\n Gets called after a ufunc. Needs additional handling as\n PeriodIndex stores internal data as int dtype\n\n Replace this to __numpy_ufunc__ in future version\n \"\"\"\n if isinstance(context, tuple) and len(context) > 0:\n func = context[0]\n if (func is np.add):\n pass\n elif (func is np.subtract):\n name = self.name\n left = context[1][0]\n right = context[1][1]\n if (isinstance(left, PeriodIndex) and\n isinstance(right, PeriodIndex)):\n name = left.name if left.name == right.name else None\n return Index(result, name=name)\n elif isinstance(left, Period) or isinstance(right, Period):\n return Index(result, name=name)\n elif isinstance(func, np.ufunc):\n if 'M->M' not in func.types:\n msg = \"ufunc '{0}' not supported for the PeriodIndex\"\n # This should be TypeError, but TypeError cannot be raised\n # from here because numpy catches.\n raise ValueError(msg.format(func.__name__))\n\n if is_bool_dtype(result):\n return result\n # the result is object dtype array of Period\n # cannot pass _simple_new as it is\n return self._shallow_copy(result, freq=self.freq, name=self.name)\n\n @property\n def _box_func(self):\n return lambda x: Period._from_ordinal(ordinal=x, freq=self.freq)\n\n def _to_embed(self, keep_tz=False):\n \"\"\"\n return an array repr of this object, potentially casting to object\n \"\"\"\n return self.asobject.values\n\n @property\n def _formatter_func(self):\n return lambda x: \"'%s'\" % x\n\n def asof_locs(self, where, mask):\n \"\"\"\n where : array of timestamps\n mask : array of booleans where data is not NA\n\n \"\"\"\n where_idx = where\n if isinstance(where_idx, DatetimeIndex):\n where_idx = PeriodIndex(where_idx.values, freq=self.freq)\n\n locs = self._values[mask].searchsorted(where_idx._values, side='right')\n\n locs = np.where(locs > 0, locs - 1, 0)\n result = np.arange(len(self))[mask].take(locs)\n\n first = mask.argmax()\n result[(locs == 0) & (where_idx._values < self._values[first])] = -1\n\n return result\n\n @Appender(_index_shared_docs['astype'])\n def astype(self, dtype, copy=True, how='start'):\n dtype = pandas_dtype(dtype)\n if is_object_dtype(dtype):\n return self.asobject\n elif is_integer_dtype(dtype):\n if copy:\n return self._int64index.copy()\n else:\n return self._int64index\n elif is_datetime64_dtype(dtype):\n return self.to_timestamp(how=how)\n elif is_datetime64tz_dtype(dtype):\n return self.to_timestamp(how=how).tz_localize(dtype.tz)\n elif is_period_dtype(dtype):\n return self.asfreq(freq=dtype.freq)\n raise ValueError('Cannot cast PeriodIndex to dtype %s' % dtype)\n\n @Substitution(klass='PeriodIndex')\n @Appender(_shared_docs['searchsorted'])\n @deprecate_kwarg(old_arg_name='key', new_arg_name='value')\n def searchsorted(self, value, side='left', sorter=None):\n if isinstance(value, Period):\n if value.freq != self.freq:\n msg = _DIFFERENT_FREQ_INDEX.format(self.freqstr, value.freqstr)\n raise IncompatibleFrequency(msg)\n value = value.ordinal\n elif isinstance(value, compat.string_types):\n value = Period(value, freq=self.freq).ordinal\n\n return self._values.searchsorted(value, side=side, sorter=sorter)\n\n @property\n def is_all_dates(self):\n return True\n\n @property\n def is_full(self):\n \"\"\"\n Returns True if there are any missing periods from start to end\n \"\"\"\n if len(self) == 0:\n return True\n if not self.is_monotonic:\n raise ValueError('Index is not monotonic')\n values = self.values\n return ((values[1:] - values[:-1]) < 2).all()\n\n def asfreq(self, freq=None, how='E'):\n \"\"\"\n Convert the PeriodIndex to the specified frequency `freq`.\n\n Parameters\n ----------\n\n freq : str\n a frequency\n how : str {'E', 'S'}\n 'E', 'END', or 'FINISH' for end,\n 'S', 'START', or 'BEGIN' for start.\n Whether the elements should be aligned to the end\n or start within pa period. January 31st ('END') vs.\n Janury 1st ('START') for example.\n\n Returns\n -------\n\n new : PeriodIndex with the new frequency\n\n Examples\n --------\n >>> pidx = pd.period_range('2010-01-01', '2015-01-01', freq='A')\n >>> pidx\n <class 'pandas.tseries.period.PeriodIndex'>\n [2010, ..., 2015]\n Length: 6, Freq: A-DEC\n\n >>> pidx.asfreq('M')\n <class 'pandas.tseries.period.PeriodIndex'>\n [2010-12, ..., 2015-12]\n Length: 6, Freq: M\n\n >>> pidx.asfreq('M', how='S')\n <class 'pandas.tseries.period.PeriodIndex'>\n [2010-01, ..., 2015-01]\n Length: 6, Freq: M\n \"\"\"\n how = _validate_end_alias(how)\n\n freq = Period._maybe_convert_freq(freq)\n\n base1, mult1 = _gfc(self.freq)\n base2, mult2 = _gfc(freq)\n\n asi8 = self.asi8\n # mult1 can't be negative or 0\n end = how == 'E'\n if end:\n ordinal = asi8 + mult1 - 1\n else:\n ordinal = asi8\n\n new_data = period.period_asfreq_arr(ordinal, base1, base2, end)\n\n if self.hasnans:\n new_data[self._isnan] = tslib.iNaT\n\n return self._simple_new(new_data, self.name, freq=freq)\n\n def to_datetime(self, dayfirst=False):\n \"\"\"\n DEPRECATED: use :meth:`to_timestamp` instead.\n\n Cast to DatetimeIndex.\n \"\"\"\n warnings.warn(\"to_datetime is deprecated. Use self.to_timestamp(...)\",\n FutureWarning, stacklevel=2)\n return self.to_timestamp()\n\n year = _field_accessor('year', 0, \"The year of the period\")\n month = _field_accessor('month', 3, \"The month as January=1, December=12\")\n day = _field_accessor('day', 4, \"The days of the period\")\n hour = _field_accessor('hour', 5, \"The hour of the period\")\n minute = _field_accessor('minute', 6, \"The minute of the period\")\n second = _field_accessor('second', 7, \"The second of the period\")\n weekofyear = _field_accessor('week', 8, \"The week ordinal of the year\")\n week = weekofyear\n dayofweek = _field_accessor('dayofweek', 10,\n \"The day of the week with Monday=0, Sunday=6\")\n weekday = dayofweek\n dayofyear = day_of_year = _field_accessor('dayofyear', 9,\n \"The ordinal day of the year\")\n quarter = _field_accessor('quarter', 2, \"The quarter of the date\")\n qyear = _field_accessor('qyear', 1)\n days_in_month = _field_accessor('days_in_month', 11,\n \"The number of days in the month\")\n daysinmonth = days_in_month\n\n @property\n def is_leap_year(self):\n \"\"\" Logical indicating if the date belongs to a leap year \"\"\"\n return tslib._isleapyear_arr(self.year)\n\n @property\n def start_time(self):\n return self.to_timestamp(how='start')\n\n @property\n def end_time(self):\n return self.to_timestamp(how='end')\n\n def _mpl_repr(self):\n # how to represent ourselves to matplotlib\n return self.asobject.values\n\n def to_timestamp(self, freq=None, how='start'):\n \"\"\"\n Cast to DatetimeIndex\n\n Parameters\n ----------\n freq : string or DateOffset, default 'D' for week or longer, 'S'\n otherwise\n Target frequency\n how : {'s', 'e', 'start', 'end'}\n\n Returns\n -------\n DatetimeIndex\n \"\"\"\n how = _validate_end_alias(how)\n\n if freq is None:\n base, mult = _gfc(self.freq)\n freq = frequencies.get_to_timestamp_base(base)\n else:\n freq = Period._maybe_convert_freq(freq)\n\n base, mult = _gfc(freq)\n new_data = self.asfreq(freq, how)\n\n new_data = period.periodarr_to_dt64arr(new_data._values, base)\n return DatetimeIndex(new_data, freq='infer', name=self.name)\n\n def _maybe_convert_timedelta(self, other):\n if isinstance(other, (timedelta, np.timedelta64, offsets.Tick)):\n offset = frequencies.to_offset(self.freq.rule_code)\n if isinstance(offset, offsets.Tick):\n nanos = tslib._delta_to_nanoseconds(other)\n offset_nanos = tslib._delta_to_nanoseconds(offset)\n if nanos % offset_nanos == 0:\n return nanos // offset_nanos\n elif isinstance(other, offsets.DateOffset):\n freqstr = other.rule_code\n base = frequencies.get_base_alias(freqstr)\n if base == self.freq.rule_code:\n return other.n\n msg = _DIFFERENT_FREQ_INDEX.format(self.freqstr, other.freqstr)\n raise IncompatibleFrequency(msg)\n elif isinstance(other, np.ndarray):\n if is_integer_dtype(other):\n return other\n elif is_timedelta64_dtype(other):\n offset = frequencies.to_offset(self.freq)\n if isinstance(offset, offsets.Tick):\n nanos = tslib._delta_to_nanoseconds(other)\n offset_nanos = tslib._delta_to_nanoseconds(offset)\n if (nanos % offset_nanos).all() == 0:\n return nanos // offset_nanos\n elif is_integer(other):\n # integer is passed to .shift via\n # _add_datetimelike_methods basically\n # but ufunc may pass integer to _add_delta\n return other\n # raise when input doesn't have freq\n msg = \"Input has different freq from PeriodIndex(freq={0})\"\n raise IncompatibleFrequency(msg.format(self.freqstr))\n\n def _add_delta(self, other):\n ordinal_delta = self._maybe_convert_timedelta(other)\n return self.shift(ordinal_delta)\n\n def _sub_datelike(self, other):\n if other is tslib.NaT:\n new_data = np.empty(len(self), dtype=np.int64)\n new_data.fill(tslib.iNaT)\n return TimedeltaIndex(new_data, name=self.name)\n return NotImplemented\n\n def _sub_period(self, other):\n if self.freq != other.freq:\n msg = _DIFFERENT_FREQ_INDEX.format(self.freqstr, other.freqstr)\n raise IncompatibleFrequency(msg)\n\n asi8 = self.asi8\n new_data = asi8 - other.ordinal\n\n if self.hasnans:\n new_data = new_data.astype(np.float64)\n new_data[self._isnan] = np.nan\n # result must be Int64Index or Float64Index\n return Index(new_data, name=self.name)\n\n def shift(self, n):\n \"\"\"\n Specialized shift which produces an PeriodIndex\n\n Parameters\n ----------\n n : int\n Periods to shift by\n\n Returns\n -------\n shifted : PeriodIndex\n \"\"\"\n values = self._values + n * self.freq.n\n if self.hasnans:\n values[self._isnan] = tslib.iNaT\n return self._shallow_copy(values=values)\n\n @cache_readonly\n def dtype(self):\n return PeriodDtype.construct_from_string(self.freq)\n\n @property\n def inferred_type(self):\n # b/c data is represented as ints make sure we can't have ambiguous\n # indexing\n return 'period'\n\n def get_value(self, series, key):\n \"\"\"\n Fast lookup of value from 1-dimensional ndarray. Only use this if you\n know what you're doing\n \"\"\"\n s = com._values_from_object(series)\n try:\n return com._maybe_box(self,\n super(PeriodIndex, self).get_value(s, key),\n series, key)\n except (KeyError, IndexError):\n try:\n asdt, parsed, reso = parse_time_string(key, self.freq)\n grp = frequencies.Resolution.get_freq_group(reso)\n freqn = frequencies.get_freq_group(self.freq)\n\n vals = self._values\n\n # if our data is higher resolution than requested key, slice\n if grp < freqn:\n iv = Period(asdt, freq=(grp, 1))\n ord1 = iv.asfreq(self.freq, how='S').ordinal\n ord2 = iv.asfreq(self.freq, how='E').ordinal\n\n if ord2 < vals[0] or ord1 > vals[-1]:\n raise KeyError(key)\n\n pos = np.searchsorted(self._values, [ord1, ord2])\n key = slice(pos[0], pos[1] + 1)\n return series[key]\n elif grp == freqn:\n key = Period(asdt, freq=self.freq).ordinal\n return com._maybe_box(self, self._engine.get_value(s, key),\n series, key)\n else:\n raise KeyError(key)\n except TypeError:\n pass\n\n key = Period(key, self.freq).ordinal\n return com._maybe_box(self, self._engine.get_value(s, key),\n series, key)\n\n @Appender(_index_shared_docs['get_indexer'] % _index_doc_kwargs)\n def get_indexer(self, target, method=None, limit=None, tolerance=None):\n target = _ensure_index(target)\n\n if hasattr(target, 'freq') and target.freq != self.freq:\n msg = _DIFFERENT_FREQ_INDEX.format(self.freqstr, target.freqstr)\n raise IncompatibleFrequency(msg)\n\n if isinstance(target, PeriodIndex):\n target = target.asi8\n\n if tolerance is not None:\n tolerance = self._convert_tolerance(tolerance)\n return Index.get_indexer(self._int64index, target, method,\n limit, tolerance)\n\n def _get_unique_index(self, dropna=False):\n \"\"\"\n wrap Index._get_unique_index to handle NaT\n \"\"\"\n res = super(PeriodIndex, self)._get_unique_index(dropna=dropna)\n if dropna:\n res = res.dropna()\n return res\n\n def get_loc(self, key, method=None, tolerance=None):\n \"\"\"\n Get integer location for requested label\n\n Returns\n -------\n loc : int\n \"\"\"\n try:\n return self._engine.get_loc(key)\n except KeyError:\n if is_integer(key):\n raise\n\n try:\n asdt, parsed, reso = parse_time_string(key, self.freq)\n key = asdt\n except TypeError:\n pass\n\n try:\n key = Period(key, freq=self.freq)\n except ValueError:\n # we cannot construct the Period\n # as we have an invalid type\n raise KeyError(key)\n\n try:\n ordinal = tslib.iNaT if key is tslib.NaT else key.ordinal\n if tolerance is not None:\n tolerance = self._convert_tolerance(tolerance)\n return self._int64index.get_loc(ordinal, method, tolerance)\n\n except KeyError:\n raise KeyError(key)\n\n def _maybe_cast_slice_bound(self, label, side, kind):\n \"\"\"\n If label is a string or a datetime, cast it to Period.ordinal according\n to resolution.\n\n Parameters\n ----------\n label : object\n side : {'left', 'right'}\n kind : {'ix', 'loc', 'getitem'}\n\n Returns\n -------\n bound : Period or object\n\n Notes\n -----\n Value of `side` parameter should be validated in caller.\n\n \"\"\"\n assert kind in ['ix', 'loc', 'getitem']\n\n if isinstance(label, datetime):\n return Period(label, freq=self.freq)\n elif isinstance(label, compat.string_types):\n try:\n _, parsed, reso = parse_time_string(label, self.freq)\n bounds = self._parsed_string_to_bounds(reso, parsed)\n return bounds[0 if side == 'left' else 1]\n except Exception:\n raise KeyError(label)\n elif is_integer(label) or is_float(label):\n self._invalid_indexer('slice', label)\n\n return label\n\n def _parsed_string_to_bounds(self, reso, parsed):\n if reso == 'year':\n t1 = Period(year=parsed.year, freq='A')\n elif reso == 'month':\n t1 = Period(year=parsed.year, month=parsed.month, freq='M')\n elif reso == 'quarter':\n q = (parsed.month - 1) // 3 + 1\n t1 = Period(year=parsed.year, quarter=q, freq='Q-DEC')\n elif reso == 'day':\n t1 = Period(year=parsed.year, month=parsed.month, day=parsed.day,\n freq='D')\n elif reso == 'hour':\n t1 = Period(year=parsed.year, month=parsed.month, day=parsed.day,\n hour=parsed.hour, freq='H')\n elif reso == 'minute':\n t1 = Period(year=parsed.year, month=parsed.month, day=parsed.day,\n hour=parsed.hour, minute=parsed.minute, freq='T')\n elif reso == 'second':\n t1 = Period(year=parsed.year, month=parsed.month, day=parsed.day,\n hour=parsed.hour, minute=parsed.minute,\n second=parsed.second, freq='S')\n else:\n raise KeyError(reso)\n return (t1.asfreq(self.freq, how='start'),\n t1.asfreq(self.freq, how='end'))\n\n def _get_string_slice(self, key):\n if not self.is_monotonic:\n raise ValueError('Partial indexing only valid for '\n 'ordered time series')\n\n key, parsed, reso = parse_time_string(key, self.freq)\n grp = frequencies.Resolution.get_freq_group(reso)\n freqn = frequencies.get_freq_group(self.freq)\n if reso in ['day', 'hour', 'minute', 'second'] and not grp < freqn:\n raise KeyError(key)\n\n t1, t2 = self._parsed_string_to_bounds(reso, parsed)\n return slice(self.searchsorted(t1.ordinal, side='left'),\n self.searchsorted(t2.ordinal, side='right'))\n\n def _convert_tolerance(self, tolerance):\n tolerance = DatetimeIndexOpsMixin._convert_tolerance(self, tolerance)\n return self._maybe_convert_timedelta(tolerance)\n\n def insert(self, loc, item):\n if not isinstance(item, Period) or self.freq != item.freq:\n return self.asobject.insert(loc, item)\n\n idx = np.concatenate((self[:loc].asi8, np.array([item.ordinal]),\n self[loc:].asi8))\n return self._shallow_copy(idx)\n\n def join(self, other, how='left', level=None, return_indexers=False):\n \"\"\"\n See Index.join\n \"\"\"\n self._assert_can_do_setop(other)\n\n result = Int64Index.join(self, other, how=how, level=level,\n return_indexers=return_indexers)\n\n if return_indexers:\n result, lidx, ridx = result\n return self._apply_meta(result), lidx, ridx\n return self._apply_meta(result)\n\n def _assert_can_do_setop(self, other):\n super(PeriodIndex, self)._assert_can_do_setop(other)\n\n if not isinstance(other, PeriodIndex):\n raise ValueError('can only call with other PeriodIndex-ed objects')\n\n if self.freq != other.freq:\n msg = _DIFFERENT_FREQ_INDEX.format(self.freqstr, other.freqstr)\n raise IncompatibleFrequency(msg)\n\n def _wrap_union_result(self, other, result):\n name = self.name if self.name == other.name else None\n result = self._apply_meta(result)\n result.name = name\n return result\n\n def _apply_meta(self, rawarr):\n if not isinstance(rawarr, PeriodIndex):\n rawarr = PeriodIndex._from_ordinals(rawarr, freq=self.freq,\n name=self.name)\n return rawarr\n\n def _format_native_types(self, na_rep=u('NaT'), date_format=None,\n **kwargs):\n\n values = self.asobject.values\n\n if date_format:\n formatter = lambda dt: dt.strftime(date_format)\n else:\n formatter = lambda dt: u('%s') % dt\n\n if self.hasnans:\n mask = self._isnan\n values[mask] = na_rep\n imask = ~mask\n values[imask] = np.array([formatter(dt) for dt\n in values[imask]])\n else:\n values = np.array([formatter(dt) for dt in values])\n return values\n\n def __setstate__(self, state):\n \"\"\"Necessary for making this object picklable\"\"\"\n\n if isinstance(state, dict):\n super(PeriodIndex, self).__setstate__(state)\n\n elif isinstance(state, tuple):\n\n # < 0.15 compat\n if len(state) == 2:\n nd_state, own_state = state\n data = np.empty(nd_state[1], dtype=nd_state[2])\n np.ndarray.__setstate__(data, nd_state)\n\n # backcompat\n self.freq = Period._maybe_convert_freq(own_state[1])\n\n else: # pragma: no cover\n data = np.empty(state)\n np.ndarray.__setstate__(self, state)\n\n self._data = data\n\n else:\n raise Exception(\"invalid pickle state\")\n\n _unpickle_compat = __setstate__\n\n def tz_convert(self, tz):\n \"\"\"\n Convert tz-aware DatetimeIndex from one time zone to another (using\n pytz/dateutil)\n\n Parameters\n ----------\n tz : string, pytz.timezone, dateutil.tz.tzfile or None\n Time zone for time. Corresponding timestamps would be converted to\n time zone of the TimeSeries.\n None will remove timezone holding UTC time.\n\n Returns\n -------\n normalized : DatetimeIndex\n\n Note\n ----\n Not currently implemented for PeriodIndex\n \"\"\"\n raise NotImplementedError(\"Not yet implemented for PeriodIndex\")\n\n def tz_localize(self, tz, infer_dst=False):\n \"\"\"\n Localize tz-naive DatetimeIndex to given time zone (using\n pytz/dateutil), or remove timezone from tz-aware DatetimeIndex\n\n Parameters\n ----------\n tz : string, pytz.timezone, dateutil.tz.tzfile or None\n Time zone for time. Corresponding timestamps would be converted to\n time zone of the TimeSeries.\n None will remove timezone holding local time.\n infer_dst : boolean, default False\n Attempt to infer fall dst-transition hours based on order\n\n Returns\n -------\n localized : DatetimeIndex\n\n Note\n ----\n Not currently implemented for PeriodIndex\n \"\"\"\n raise NotImplementedError(\"Not yet implemented for PeriodIndex\")\n\n\nPeriodIndex._add_numeric_methods_disabled()\nPeriodIndex._add_logical_methods_disabled()\nPeriodIndex._add_datetimelike_methods()\n\n\ndef _get_ordinal_range(start, end, periods, freq, mult=1):\n if com._count_not_none(start, end, periods) < 2:\n raise ValueError('Must specify 2 of start, end, periods')\n\n if freq is not None:\n _, mult = _gfc(freq)\n\n if start is not None:\n start = Period(start, freq)\n if end is not None:\n end = Period(end, freq)\n\n is_start_per = isinstance(start, Period)\n is_end_per = isinstance(end, Period)\n\n if is_start_per and is_end_per and start.freq != end.freq:\n raise ValueError('Start and end must have same freq')\n if (start is tslib.NaT or end is tslib.NaT):\n raise ValueError('Start and end must not be NaT')\n\n if freq is None:\n if is_start_per:\n freq = start.freq\n elif is_end_per:\n freq = end.freq\n else: # pragma: no cover\n raise ValueError('Could not infer freq from start/end')\n\n if periods is not None:\n periods = periods * mult\n if start is None:\n data = np.arange(end.ordinal - periods + mult,\n end.ordinal + 1, mult,\n dtype=np.int64)\n else:\n data = np.arange(start.ordinal, start.ordinal + periods, mult,\n dtype=np.int64)\n else:\n data = np.arange(start.ordinal, end.ordinal + 1, mult, dtype=np.int64)\n\n return data, freq\n\n\ndef _range_from_fields(year=None, month=None, quarter=None, day=None,\n hour=None, minute=None, second=None, freq=None):\n if hour is None:\n hour = 0\n if minute is None:\n minute = 0\n if second is None:\n second = 0\n if day is None:\n day = 1\n\n ordinals = []\n\n if quarter is not None:\n if freq is None:\n freq = 'Q'\n base = frequencies.FreqGroup.FR_QTR\n else:\n base, mult = _gfc(freq)\n if base != frequencies.FreqGroup.FR_QTR:\n raise AssertionError(\"base must equal FR_QTR\")\n\n year, quarter = _make_field_arrays(year, quarter)\n for y, q in zip(year, quarter):\n y, m = _quarter_to_myear(y, q, freq)\n val = period.period_ordinal(y, m, 1, 1, 1, 1, 0, 0, base)\n ordinals.append(val)\n else:\n base, mult = _gfc(freq)\n arrays = _make_field_arrays(year, month, day, hour, minute, second)\n for y, mth, d, h, mn, s in zip(*arrays):\n ordinals.append(period.period_ordinal(\n y, mth, d, h, mn, s, 0, 0, base))\n\n return np.array(ordinals, dtype=np.int64), freq\n\n\ndef _make_field_arrays(*fields):\n length = None\n for x in fields:\n if isinstance(x, (list, np.ndarray, ABCSeries)):\n if length is not None and len(x) != length:\n raise ValueError('Mismatched Period array lengths')\n elif length is None:\n length = len(x)\n\n arrays = [np.asarray(x) if isinstance(x, (np.ndarray, list, ABCSeries))\n else np.repeat(x, length) for x in fields]\n\n return arrays\n\n\ndef pnow(freq=None):\n # deprecation, xref #13790\n import warnings\n\n warnings.warn(\"pd.pnow() and pandas.tseries.period.pnow() \"\n \"are deprecated. Please use Period.now()\",\n FutureWarning, stacklevel=2)\n return Period.now(freq=freq)\n\n\ndef period_range(start=None, end=None, periods=None, freq='D', name=None):\n \"\"\"\n Return a fixed frequency datetime index, with day (calendar) as the default\n frequency\n\n\n Parameters\n ----------\n start : starting value, period-like, optional\n end : ending value, period-like, optional\n periods : int, default None\n Number of periods in the index\n freq : str/DateOffset, default 'D'\n Frequency alias\n name : str, default None\n Name for the resulting PeriodIndex\n\n Returns\n -------\n prng : PeriodIndex\n \"\"\"\n return PeriodIndex(start=start, end=end, periods=periods,\n freq=freq, name=name)\n", "# pylint: disable=E1103\n\nimport pytest\nfrom datetime import datetime\nfrom numpy.random import randn\nfrom numpy import nan\nimport numpy as np\nimport random\n\nimport pandas as pd\nfrom pandas.compat import lrange, lzip\nfrom pandas.tools.concat import concat\nfrom pandas.tools.merge import merge, MergeError\nfrom pandas.util.testing import assert_frame_equal, assert_series_equal\nfrom pandas.types.dtypes import CategoricalDtype\nfrom pandas.types.common import is_categorical_dtype, is_object_dtype\nfrom pandas import DataFrame, Index, MultiIndex, Series, Categorical\nimport pandas.util.testing as tm\n\n\nN = 50\nNGROUPS = 8\n\n\ndef get_test_data(ngroups=NGROUPS, n=N):\n unique_groups = lrange(ngroups)\n arr = np.asarray(np.tile(unique_groups, n // ngroups))\n\n if len(arr) < n:\n arr = np.asarray(list(arr) + unique_groups[:n - len(arr)])\n\n random.shuffle(arr)\n return arr\n\n\nclass TestMerge(tm.TestCase):\n\n def setUp(self):\n # aggregate multiple columns\n self.df = DataFrame({'key1': get_test_data(),\n 'key2': get_test_data(),\n 'data1': np.random.randn(N),\n 'data2': np.random.randn(N)})\n\n # exclude a couple keys for fun\n self.df = self.df[self.df['key2'] > 1]\n\n self.df2 = DataFrame({'key1': get_test_data(n=N // 5),\n 'key2': get_test_data(ngroups=NGROUPS // 2,\n n=N // 5),\n 'value': np.random.randn(N // 5)})\n\n self.left = DataFrame({'key': ['a', 'b', 'c', 'd', 'e', 'e', 'a'],\n 'v1': np.random.randn(7)})\n self.right = DataFrame({'v2': np.random.randn(4)},\n index=['d', 'b', 'c', 'a'])\n\n def test_merge_inner_join_empty(self):\n # GH 15328\n df_empty = pd.DataFrame()\n df_a = pd.DataFrame({'a': [1, 2]}, index=[0, 1], dtype='int64')\n result = pd.merge(df_empty, df_a, left_index=True, right_index=True)\n expected = pd.DataFrame({'a': []}, index=[], dtype='int64')\n assert_frame_equal(result, expected)\n\n def test_merge_common(self):\n joined = merge(self.df, self.df2)\n exp = merge(self.df, self.df2, on=['key1', 'key2'])\n tm.assert_frame_equal(joined, exp)\n\n def test_merge_index_singlekey_right_vs_left(self):\n left = DataFrame({'key': ['a', 'b', 'c', 'd', 'e', 'e', 'a'],\n 'v1': np.random.randn(7)})\n right = DataFrame({'v2': np.random.randn(4)},\n index=['d', 'b', 'c', 'a'])\n\n merged1 = merge(left, right, left_on='key',\n right_index=True, how='left', sort=False)\n merged2 = merge(right, left, right_on='key',\n left_index=True, how='right', sort=False)\n assert_frame_equal(merged1, merged2.loc[:, merged1.columns])\n\n merged1 = merge(left, right, left_on='key',\n right_index=True, how='left', sort=True)\n merged2 = merge(right, left, right_on='key',\n left_index=True, how='right', sort=True)\n assert_frame_equal(merged1, merged2.loc[:, merged1.columns])\n\n def test_merge_index_singlekey_inner(self):\n left = DataFrame({'key': ['a', 'b', 'c', 'd', 'e', 'e', 'a'],\n 'v1': np.random.randn(7)})\n right = DataFrame({'v2': np.random.randn(4)},\n index=['d', 'b', 'c', 'a'])\n\n # inner join\n result = merge(left, right, left_on='key', right_index=True,\n how='inner')\n expected = left.join(right, on='key').loc[result.index]\n assert_frame_equal(result, expected)\n\n result = merge(right, left, right_on='key', left_index=True,\n how='inner')\n expected = left.join(right, on='key').loc[result.index]\n assert_frame_equal(result, expected.loc[:, result.columns])\n\n def test_merge_misspecified(self):\n self.assertRaises(ValueError, merge, self.left, self.right,\n left_index=True)\n self.assertRaises(ValueError, merge, self.left, self.right,\n right_index=True)\n\n self.assertRaises(ValueError, merge, self.left, self.left,\n left_on='key', on='key')\n\n self.assertRaises(ValueError, merge, self.df, self.df2,\n left_on=['key1'], right_on=['key1', 'key2'])\n\n def test_index_and_on_parameters_confusion(self):\n self.assertRaises(ValueError, merge, self.df, self.df2, how='left',\n left_index=False, right_index=['key1', 'key2'])\n self.assertRaises(ValueError, merge, self.df, self.df2, how='left',\n left_index=['key1', 'key2'], right_index=False)\n self.assertRaises(ValueError, merge, self.df, self.df2, how='left',\n left_index=['key1', 'key2'],\n right_index=['key1', 'key2'])\n\n def test_merge_overlap(self):\n merged = merge(self.left, self.left, on='key')\n exp_len = (self.left['key'].value_counts() ** 2).sum()\n self.assertEqual(len(merged), exp_len)\n self.assertIn('v1_x', merged)\n self.assertIn('v1_y', merged)\n\n def test_merge_different_column_key_names(self):\n left = DataFrame({'lkey': ['foo', 'bar', 'baz', 'foo'],\n 'value': [1, 2, 3, 4]})\n right = DataFrame({'rkey': ['foo', 'bar', 'qux', 'foo'],\n 'value': [5, 6, 7, 8]})\n\n merged = left.merge(right, left_on='lkey', right_on='rkey',\n how='outer', sort=True)\n\n exp = pd.Series(['bar', 'baz', 'foo', 'foo', 'foo', 'foo', np.nan],\n name='lkey')\n tm.assert_series_equal(merged['lkey'], exp)\n\n exp = pd.Series(['bar', np.nan, 'foo', 'foo', 'foo', 'foo', 'qux'],\n name='rkey')\n tm.assert_series_equal(merged['rkey'], exp)\n\n exp = pd.Series([2, 3, 1, 1, 4, 4, np.nan], name='value_x')\n tm.assert_series_equal(merged['value_x'], exp)\n\n exp = pd.Series([6, np.nan, 5, 8, 5, 8, 7], name='value_y')\n tm.assert_series_equal(merged['value_y'], exp)\n\n def test_merge_copy(self):\n left = DataFrame({'a': 0, 'b': 1}, index=lrange(10))\n right = DataFrame({'c': 'foo', 'd': 'bar'}, index=lrange(10))\n\n merged = merge(left, right, left_index=True,\n right_index=True, copy=True)\n\n merged['a'] = 6\n self.assertTrue((left['a'] == 0).all())\n\n merged['d'] = 'peekaboo'\n self.assertTrue((right['d'] == 'bar').all())\n\n def test_merge_nocopy(self):\n left = DataFrame({'a': 0, 'b': 1}, index=lrange(10))\n right = DataFrame({'c': 'foo', 'd': 'bar'}, index=lrange(10))\n\n merged = merge(left, right, left_index=True,\n right_index=True, copy=False)\n\n merged['a'] = 6\n self.assertTrue((left['a'] == 6).all())\n\n merged['d'] = 'peekaboo'\n self.assertTrue((right['d'] == 'peekaboo').all())\n\n def test_intelligently_handle_join_key(self):\n # #733, be a bit more 1337 about not returning unconsolidated DataFrame\n\n left = DataFrame({'key': [1, 1, 2, 2, 3],\n 'value': lrange(5)}, columns=['value', 'key'])\n right = DataFrame({'key': [1, 1, 2, 3, 4, 5],\n 'rvalue': lrange(6)})\n\n joined = merge(left, right, on='key', how='outer')\n expected = DataFrame({'key': [1, 1, 1, 1, 2, 2, 3, 4, 5],\n 'value': np.array([0, 0, 1, 1, 2, 3, 4,\n np.nan, np.nan]),\n 'rvalue': [0, 1, 0, 1, 2, 2, 3, 4, 5]},\n columns=['value', 'key', 'rvalue'])\n assert_frame_equal(joined, expected)\n\n def test_merge_join_key_dtype_cast(self):\n # #8596\n\n df1 = DataFrame({'key': [1], 'v1': [10]})\n df2 = DataFrame({'key': [2], 'v1': [20]})\n df = merge(df1, df2, how='outer')\n self.assertEqual(df['key'].dtype, 'int64')\n\n df1 = DataFrame({'key': [True], 'v1': [1]})\n df2 = DataFrame({'key': [False], 'v1': [0]})\n df = merge(df1, df2, how='outer')\n\n # GH13169\n # this really should be bool\n self.assertEqual(df['key'].dtype, 'object')\n\n df1 = DataFrame({'val': [1]})\n df2 = DataFrame({'val': [2]})\n lkey = np.array([1])\n rkey = np.array([2])\n df = merge(df1, df2, left_on=lkey, right_on=rkey, how='outer')\n self.assertEqual(df['key_0'].dtype, 'int64')\n\n def test_handle_join_key_pass_array(self):\n left = DataFrame({'key': [1, 1, 2, 2, 3],\n 'value': lrange(5)}, columns=['value', 'key'])\n right = DataFrame({'rvalue': lrange(6)})\n key = np.array([1, 1, 2, 3, 4, 5])\n\n merged = merge(left, right, left_on='key', right_on=key, how='outer')\n merged2 = merge(right, left, left_on=key, right_on='key', how='outer')\n\n assert_series_equal(merged['key'], merged2['key'])\n self.assertTrue(merged['key'].notnull().all())\n self.assertTrue(merged2['key'].notnull().all())\n\n left = DataFrame({'value': lrange(5)}, columns=['value'])\n right = DataFrame({'rvalue': lrange(6)})\n lkey = np.array([1, 1, 2, 2, 3])\n rkey = np.array([1, 1, 2, 3, 4, 5])\n\n merged = merge(left, right, left_on=lkey, right_on=rkey, how='outer')\n self.assert_series_equal(merged['key_0'],\n Series([1, 1, 1, 1, 2, 2, 3, 4, 5],\n name='key_0'))\n\n left = DataFrame({'value': lrange(3)})\n right = DataFrame({'rvalue': lrange(6)})\n\n key = np.array([0, 1, 1, 2, 2, 3], dtype=np.int64)\n merged = merge(left, right, left_index=True, right_on=key, how='outer')\n self.assert_series_equal(merged['key_0'], Series(key, name='key_0'))\n\n def test_no_overlap_more_informative_error(self):\n dt = datetime.now()\n df1 = DataFrame({'x': ['a']}, index=[dt])\n\n df2 = DataFrame({'y': ['b', 'c']}, index=[dt, dt])\n self.assertRaises(MergeError, merge, df1, df2)\n\n def test_merge_non_unique_indexes(self):\n\n dt = datetime(2012, 5, 1)\n dt2 = datetime(2012, 5, 2)\n dt3 = datetime(2012, 5, 3)\n dt4 = datetime(2012, 5, 4)\n\n df1 = DataFrame({'x': ['a']}, index=[dt])\n df2 = DataFrame({'y': ['b', 'c']}, index=[dt, dt])\n _check_merge(df1, df2)\n\n # Not monotonic\n df1 = DataFrame({'x': ['a', 'b', 'q']}, index=[dt2, dt, dt4])\n df2 = DataFrame({'y': ['c', 'd', 'e', 'f', 'g', 'h']},\n index=[dt3, dt3, dt2, dt2, dt, dt])\n _check_merge(df1, df2)\n\n df1 = DataFrame({'x': ['a', 'b']}, index=[dt, dt])\n df2 = DataFrame({'y': ['c', 'd']}, index=[dt, dt])\n _check_merge(df1, df2)\n\n def test_merge_non_unique_index_many_to_many(self):\n dt = datetime(2012, 5, 1)\n dt2 = datetime(2012, 5, 2)\n dt3 = datetime(2012, 5, 3)\n df1 = DataFrame({'x': ['a', 'b', 'c', 'd']},\n index=[dt2, dt2, dt, dt])\n df2 = DataFrame({'y': ['e', 'f', 'g', ' h', 'i']},\n index=[dt2, dt2, dt3, dt, dt])\n _check_merge(df1, df2)\n\n def test_left_merge_empty_dataframe(self):\n left = DataFrame({'key': [1], 'value': [2]})\n right = DataFrame({'key': []})\n\n result = merge(left, right, on='key', how='left')\n assert_frame_equal(result, left)\n\n result = merge(right, left, on='key', how='right')\n assert_frame_equal(result, left)\n\n def test_merge_left_empty_right_empty(self):\n # GH 10824\n left = pd.DataFrame([], columns=['a', 'b', 'c'])\n right = pd.DataFrame([], columns=['x', 'y', 'z'])\n\n exp_in = pd.DataFrame([], columns=['a', 'b', 'c', 'x', 'y', 'z'],\n index=pd.Index([], dtype=object),\n dtype=object)\n\n for kwarg in [dict(left_index=True, right_index=True),\n dict(left_index=True, right_on='x'),\n dict(left_on='a', right_index=True),\n dict(left_on='a', right_on='x')]:\n\n result = pd.merge(left, right, how='inner', **kwarg)\n tm.assert_frame_equal(result, exp_in)\n result = pd.merge(left, right, how='left', **kwarg)\n tm.assert_frame_equal(result, exp_in)\n result = pd.merge(left, right, how='right', **kwarg)\n tm.assert_frame_equal(result, exp_in)\n result = pd.merge(left, right, how='outer', **kwarg)\n tm.assert_frame_equal(result, exp_in)\n\n def test_merge_left_empty_right_notempty(self):\n # GH 10824\n left = pd.DataFrame([], columns=['a', 'b', 'c'])\n right = pd.DataFrame([[1, 2, 3], [4, 5, 6], [7, 8, 9]],\n columns=['x', 'y', 'z'])\n\n exp_out = pd.DataFrame({'a': np.array([np.nan] * 3, dtype=object),\n 'b': np.array([np.nan] * 3, dtype=object),\n 'c': np.array([np.nan] * 3, dtype=object),\n 'x': [1, 4, 7],\n 'y': [2, 5, 8],\n 'z': [3, 6, 9]},\n columns=['a', 'b', 'c', 'x', 'y', 'z'])\n exp_in = exp_out[0:0] # make empty DataFrame keeping dtype\n # result will have object dtype\n exp_in.index = exp_in.index.astype(object)\n\n def check1(exp, kwarg):\n result = pd.merge(left, right, how='inner', **kwarg)\n tm.assert_frame_equal(result, exp)\n result = pd.merge(left, right, how='left', **kwarg)\n tm.assert_frame_equal(result, exp)\n\n def check2(exp, kwarg):\n result = pd.merge(left, right, how='right', **kwarg)\n tm.assert_frame_equal(result, exp)\n result = pd.merge(left, right, how='outer', **kwarg)\n tm.assert_frame_equal(result, exp)\n\n for kwarg in [dict(left_index=True, right_index=True),\n dict(left_index=True, right_on='x')]:\n check1(exp_in, kwarg)\n check2(exp_out, kwarg)\n\n kwarg = dict(left_on='a', right_index=True)\n check1(exp_in, kwarg)\n exp_out['a'] = [0, 1, 2]\n check2(exp_out, kwarg)\n\n kwarg = dict(left_on='a', right_on='x')\n check1(exp_in, kwarg)\n exp_out['a'] = np.array([np.nan] * 3, dtype=object)\n check2(exp_out, kwarg)\n\n def test_merge_left_notempty_right_empty(self):\n # GH 10824\n left = pd.DataFrame([[1, 2, 3], [4, 5, 6], [7, 8, 9]],\n columns=['a', 'b', 'c'])\n right = pd.DataFrame([], columns=['x', 'y', 'z'])\n\n exp_out = pd.DataFrame({'a': [1, 4, 7],\n 'b': [2, 5, 8],\n 'c': [3, 6, 9],\n 'x': np.array([np.nan] * 3, dtype=object),\n 'y': np.array([np.nan] * 3, dtype=object),\n 'z': np.array([np.nan] * 3, dtype=object)},\n columns=['a', 'b', 'c', 'x', 'y', 'z'])\n exp_in = exp_out[0:0] # make empty DataFrame keeping dtype\n # result will have object dtype\n exp_in.index = exp_in.index.astype(object)\n\n def check1(exp, kwarg):\n result = pd.merge(left, right, how='inner', **kwarg)\n tm.assert_frame_equal(result, exp)\n result = pd.merge(left, right, how='right', **kwarg)\n tm.assert_frame_equal(result, exp)\n\n def check2(exp, kwarg):\n result = pd.merge(left, right, how='left', **kwarg)\n tm.assert_frame_equal(result, exp)\n result = pd.merge(left, right, how='outer', **kwarg)\n tm.assert_frame_equal(result, exp)\n\n for kwarg in [dict(left_index=True, right_index=True),\n dict(left_index=True, right_on='x'),\n dict(left_on='a', right_index=True),\n dict(left_on='a', right_on='x')]:\n check1(exp_in, kwarg)\n check2(exp_out, kwarg)\n\n def test_merge_nosort(self):\n # #2098, anything to do?\n\n from datetime import datetime\n\n d = {\"var1\": np.random.randint(0, 10, size=10),\n \"var2\": np.random.randint(0, 10, size=10),\n \"var3\": [datetime(2012, 1, 12), datetime(2011, 2, 4),\n datetime(\n 2010, 2, 3), datetime(2012, 1, 12),\n datetime(\n 2011, 2, 4), datetime(2012, 4, 3),\n datetime(\n 2012, 3, 4), datetime(2008, 5, 1),\n datetime(2010, 2, 3), datetime(2012, 2, 3)]}\n df = DataFrame.from_dict(d)\n var3 = df.var3.unique()\n var3.sort()\n new = DataFrame.from_dict({\"var3\": var3,\n \"var8\": np.random.random(7)})\n\n result = df.merge(new, on=\"var3\", sort=False)\n exp = merge(df, new, on='var3', sort=False)\n assert_frame_equal(result, exp)\n\n self.assertTrue((df.var3.unique() == result.var3.unique()).all())\n\n def test_merge_nan_right(self):\n df1 = DataFrame({\"i1\": [0, 1], \"i2\": [0, 1]})\n df2 = DataFrame({\"i1\": [0], \"i3\": [0]})\n result = df1.join(df2, on=\"i1\", rsuffix=\"_\")\n expected = (DataFrame({'i1': {0: 0.0, 1: 1}, 'i2': {0: 0, 1: 1},\n 'i1_': {0: 0, 1: np.nan},\n 'i3': {0: 0.0, 1: np.nan},\n None: {0: 0, 1: 0}})\n .set_index(None)\n .reset_index()[['i1', 'i2', 'i1_', 'i3']])\n assert_frame_equal(result, expected, check_dtype=False)\n\n df1 = DataFrame({\"i1\": [0, 1], \"i2\": [0.5, 1.5]})\n df2 = DataFrame({\"i1\": [0], \"i3\": [0.7]})\n result = df1.join(df2, rsuffix=\"_\", on='i1')\n expected = (DataFrame({'i1': {0: 0, 1: 1}, 'i1_': {0: 0.0, 1: nan},\n 'i2': {0: 0.5, 1: 1.5},\n 'i3': {0: 0.69999999999999996,\n 1: nan}})\n [['i1', 'i2', 'i1_', 'i3']])\n assert_frame_equal(result, expected)\n\n def test_merge_type(self):\n class NotADataFrame(DataFrame):\n\n @property\n def _constructor(self):\n return NotADataFrame\n\n nad = NotADataFrame(self.df)\n result = nad.merge(self.df2, on='key1')\n\n tm.assertIsInstance(result, NotADataFrame)\n\n def test_join_append_timedeltas(self):\n\n import datetime as dt\n from pandas import NaT\n\n # timedelta64 issues with join/merge\n # GH 5695\n\n d = {'d': dt.datetime(2013, 11, 5, 5, 56), 't': dt.timedelta(0, 22500)}\n df = DataFrame(columns=list('dt'))\n df = df.append(d, ignore_index=True)\n result = df.append(d, ignore_index=True)\n expected = DataFrame({'d': [dt.datetime(2013, 11, 5, 5, 56),\n dt.datetime(2013, 11, 5, 5, 56)],\n 't': [dt.timedelta(0, 22500),\n dt.timedelta(0, 22500)]})\n assert_frame_equal(result, expected)\n\n td = np.timedelta64(300000000)\n lhs = DataFrame(Series([td, td], index=[\"A\", \"B\"]))\n rhs = DataFrame(Series([td], index=[\"A\"]))\n\n result = lhs.join(rhs, rsuffix='r', how=\"left\")\n expected = DataFrame({'0': Series([td, td], index=list('AB')),\n '0r': Series([td, NaT], index=list('AB'))})\n assert_frame_equal(result, expected)\n\n def test_other_datetime_unit(self):\n # GH 13389\n df1 = pd.DataFrame({'entity_id': [101, 102]})\n s = pd.Series([None, None], index=[101, 102], name='days')\n\n for dtype in ['datetime64[D]', 'datetime64[h]', 'datetime64[m]',\n 'datetime64[s]', 'datetime64[ms]', 'datetime64[us]',\n 'datetime64[ns]']:\n\n df2 = s.astype(dtype).to_frame('days')\n # coerces to datetime64[ns], thus sholuld not be affected\n self.assertEqual(df2['days'].dtype, 'datetime64[ns]')\n\n result = df1.merge(df2, left_on='entity_id', right_index=True)\n\n exp = pd.DataFrame({'entity_id': [101, 102],\n 'days': np.array(['nat', 'nat'],\n dtype='datetime64[ns]')},\n columns=['entity_id', 'days'])\n tm.assert_frame_equal(result, exp)\n\n def test_other_timedelta_unit(self):\n # GH 13389\n df1 = pd.DataFrame({'entity_id': [101, 102]})\n s = pd.Series([None, None], index=[101, 102], name='days')\n\n for dtype in ['timedelta64[D]', 'timedelta64[h]', 'timedelta64[m]',\n 'timedelta64[s]', 'timedelta64[ms]', 'timedelta64[us]',\n 'timedelta64[ns]']:\n\n df2 = s.astype(dtype).to_frame('days')\n self.assertEqual(df2['days'].dtype, dtype)\n\n result = df1.merge(df2, left_on='entity_id', right_index=True)\n\n exp = pd.DataFrame({'entity_id': [101, 102],\n 'days': np.array(['nat', 'nat'],\n dtype=dtype)},\n columns=['entity_id', 'days'])\n tm.assert_frame_equal(result, exp)\n\n def test_overlapping_columns_error_message(self):\n df = DataFrame({'key': [1, 2, 3],\n 'v1': [4, 5, 6],\n 'v2': [7, 8, 9]})\n df2 = DataFrame({'key': [1, 2, 3],\n 'v1': [4, 5, 6],\n 'v2': [7, 8, 9]})\n\n df.columns = ['key', 'foo', 'foo']\n df2.columns = ['key', 'bar', 'bar']\n expected = DataFrame({'key': [1, 2, 3],\n 'v1': [4, 5, 6],\n 'v2': [7, 8, 9],\n 'v3': [4, 5, 6],\n 'v4': [7, 8, 9]})\n expected.columns = ['key', 'foo', 'foo', 'bar', 'bar']\n assert_frame_equal(merge(df, df2), expected)\n\n # #2649, #10639\n df2.columns = ['key1', 'foo', 'foo']\n self.assertRaises(ValueError, merge, df, df2)\n\n def test_merge_on_datetime64tz(self):\n\n # GH11405\n left = pd.DataFrame({'key': pd.date_range('20151010', periods=2,\n tz='US/Eastern'),\n 'value': [1, 2]})\n right = pd.DataFrame({'key': pd.date_range('20151011', periods=3,\n tz='US/Eastern'),\n 'value': [1, 2, 3]})\n\n expected = DataFrame({'key': pd.date_range('20151010', periods=4,\n tz='US/Eastern'),\n 'value_x': [1, 2, np.nan, np.nan],\n 'value_y': [np.nan, 1, 2, 3]})\n result = pd.merge(left, right, on='key', how='outer')\n assert_frame_equal(result, expected)\n\n left = pd.DataFrame({'value': pd.date_range('20151010', periods=2,\n tz='US/Eastern'),\n 'key': [1, 2]})\n right = pd.DataFrame({'value': pd.date_range('20151011', periods=2,\n tz='US/Eastern'),\n 'key': [2, 3]})\n expected = DataFrame({\n 'value_x': list(pd.date_range('20151010', periods=2,\n tz='US/Eastern')) + [pd.NaT],\n 'value_y': [pd.NaT] + list(pd.date_range('20151011', periods=2,\n tz='US/Eastern')),\n 'key': [1, 2, 3]})\n result = pd.merge(left, right, on='key', how='outer')\n assert_frame_equal(result, expected)\n self.assertEqual(result['value_x'].dtype, 'datetime64[ns, US/Eastern]')\n self.assertEqual(result['value_y'].dtype, 'datetime64[ns, US/Eastern]')\n\n def test_merge_on_periods(self):\n left = pd.DataFrame({'key': pd.period_range('20151010', periods=2,\n freq='D'),\n 'value': [1, 2]})\n right = pd.DataFrame({'key': pd.period_range('20151011', periods=3,\n freq='D'),\n 'value': [1, 2, 3]})\n\n expected = DataFrame({'key': pd.period_range('20151010', periods=4,\n freq='D'),\n 'value_x': [1, 2, np.nan, np.nan],\n 'value_y': [np.nan, 1, 2, 3]})\n result = pd.merge(left, right, on='key', how='outer')\n assert_frame_equal(result, expected)\n\n left = pd.DataFrame({'value': pd.period_range('20151010', periods=2,\n freq='D'),\n 'key': [1, 2]})\n right = pd.DataFrame({'value': pd.period_range('20151011', periods=2,\n freq='D'),\n 'key': [2, 3]})\n\n exp_x = pd.period_range('20151010', periods=2, freq='D')\n exp_y = pd.period_range('20151011', periods=2, freq='D')\n expected = DataFrame({'value_x': list(exp_x) + [pd.NaT],\n 'value_y': [pd.NaT] + list(exp_y),\n 'key': [1, 2, 3]})\n result = pd.merge(left, right, on='key', how='outer')\n assert_frame_equal(result, expected)\n self.assertEqual(result['value_x'].dtype, 'object')\n self.assertEqual(result['value_y'].dtype, 'object')\n\n def test_indicator(self):\n # PR #10054. xref #7412 and closes #8790.\n df1 = DataFrame({'col1': [0, 1], 'col_left': [\n 'a', 'b'], 'col_conflict': [1, 2]})\n df1_copy = df1.copy()\n\n df2 = DataFrame({'col1': [1, 2, 3, 4, 5], 'col_right': [2, 2, 2, 2, 2],\n 'col_conflict': [1, 2, 3, 4, 5]})\n df2_copy = df2.copy()\n\n df_result = DataFrame({\n 'col1': [0, 1, 2, 3, 4, 5],\n 'col_conflict_x': [1, 2, np.nan, np.nan, np.nan, np.nan],\n 'col_left': ['a', 'b', np.nan, np.nan, np.nan, np.nan],\n 'col_conflict_y': [np.nan, 1, 2, 3, 4, 5],\n 'col_right': [np.nan, 2, 2, 2, 2, 2]})\n df_result['_merge'] = Categorical(\n ['left_only', 'both', 'right_only',\n 'right_only', 'right_only', 'right_only'],\n categories=['left_only', 'right_only', 'both'])\n\n df_result = df_result[['col1', 'col_conflict_x', 'col_left',\n 'col_conflict_y', 'col_right', '_merge']]\n\n test = merge(df1, df2, on='col1', how='outer', indicator=True)\n assert_frame_equal(test, df_result)\n test = df1.merge(df2, on='col1', how='outer', indicator=True)\n assert_frame_equal(test, df_result)\n\n # No side effects\n assert_frame_equal(df1, df1_copy)\n assert_frame_equal(df2, df2_copy)\n\n # Check with custom name\n df_result_custom_name = df_result\n df_result_custom_name = df_result_custom_name.rename(\n columns={'_merge': 'custom_name'})\n\n test_custom_name = merge(\n df1, df2, on='col1', how='outer', indicator='custom_name')\n assert_frame_equal(test_custom_name, df_result_custom_name)\n test_custom_name = df1.merge(\n df2, on='col1', how='outer', indicator='custom_name')\n assert_frame_equal(test_custom_name, df_result_custom_name)\n\n # Check only accepts strings and booleans\n with tm.assertRaises(ValueError):\n merge(df1, df2, on='col1', how='outer', indicator=5)\n with tm.assertRaises(ValueError):\n df1.merge(df2, on='col1', how='outer', indicator=5)\n\n # Check result integrity\n\n test2 = merge(df1, df2, on='col1', how='left', indicator=True)\n self.assertTrue((test2._merge != 'right_only').all())\n test2 = df1.merge(df2, on='col1', how='left', indicator=True)\n self.assertTrue((test2._merge != 'right_only').all())\n\n test3 = merge(df1, df2, on='col1', how='right', indicator=True)\n self.assertTrue((test3._merge != 'left_only').all())\n test3 = df1.merge(df2, on='col1', how='right', indicator=True)\n self.assertTrue((test3._merge != 'left_only').all())\n\n test4 = merge(df1, df2, on='col1', how='inner', indicator=True)\n self.assertTrue((test4._merge == 'both').all())\n test4 = df1.merge(df2, on='col1', how='inner', indicator=True)\n self.assertTrue((test4._merge == 'both').all())\n\n # Check if working name in df\n for i in ['_right_indicator', '_left_indicator', '_merge']:\n df_badcolumn = DataFrame({'col1': [1, 2], i: [2, 2]})\n\n with tm.assertRaises(ValueError):\n merge(df1, df_badcolumn, on='col1',\n how='outer', indicator=True)\n with tm.assertRaises(ValueError):\n df1.merge(df_badcolumn, on='col1', how='outer', indicator=True)\n\n # Check for name conflict with custom name\n df_badcolumn = DataFrame(\n {'col1': [1, 2], 'custom_column_name': [2, 2]})\n\n with tm.assertRaises(ValueError):\n merge(df1, df_badcolumn, on='col1', how='outer',\n indicator='custom_column_name')\n with tm.assertRaises(ValueError):\n df1.merge(df_badcolumn, on='col1', how='outer',\n indicator='custom_column_name')\n\n # Merge on multiple columns\n df3 = DataFrame({'col1': [0, 1], 'col2': ['a', 'b']})\n\n df4 = DataFrame({'col1': [1, 1, 3], 'col2': ['b', 'x', 'y']})\n\n hand_coded_result = DataFrame({'col1': [0, 1, 1, 3],\n 'col2': ['a', 'b', 'x', 'y']})\n hand_coded_result['_merge'] = Categorical(\n ['left_only', 'both', 'right_only', 'right_only'],\n categories=['left_only', 'right_only', 'both'])\n\n test5 = merge(df3, df4, on=['col1', 'col2'],\n how='outer', indicator=True)\n assert_frame_equal(test5, hand_coded_result)\n test5 = df3.merge(df4, on=['col1', 'col2'],\n how='outer', indicator=True)\n assert_frame_equal(test5, hand_coded_result)\n\n\ndef _check_merge(x, y):\n for how in ['inner', 'left', 'outer']:\n result = x.join(y, how=how)\n\n expected = merge(x.reset_index(), y.reset_index(), how=how,\n sort=True)\n expected = expected.set_index('index')\n\n # TODO check_names on merge?\n assert_frame_equal(result, expected, check_names=False)\n\n\nclass TestMergeMulti(tm.TestCase):\n\n def setUp(self):\n self.index = MultiIndex(levels=[['foo', 'bar', 'baz', 'qux'],\n ['one', 'two', 'three']],\n labels=[[0, 0, 0, 1, 1, 2, 2, 3, 3, 3],\n [0, 1, 2, 0, 1, 1, 2, 0, 1, 2]],\n names=['first', 'second'])\n self.to_join = DataFrame(np.random.randn(10, 3), index=self.index,\n columns=['j_one', 'j_two', 'j_three'])\n\n # a little relevant example with NAs\n key1 = ['bar', 'bar', 'bar', 'foo', 'foo', 'baz', 'baz', 'qux',\n 'qux', 'snap']\n key2 = ['two', 'one', 'three', 'one', 'two', 'one', 'two', 'two',\n 'three', 'one']\n\n data = np.random.randn(len(key1))\n self.data = DataFrame({'key1': key1, 'key2': key2,\n 'data': data})\n\n def test_merge_on_multikey(self):\n joined = self.data.join(self.to_join, on=['key1', 'key2'])\n\n join_key = Index(lzip(self.data['key1'], self.data['key2']))\n indexer = self.to_join.index.get_indexer(join_key)\n ex_values = self.to_join.values.take(indexer, axis=0)\n ex_values[indexer == -1] = np.nan\n expected = self.data.join(DataFrame(ex_values,\n columns=self.to_join.columns))\n\n # TODO: columns aren't in the same order yet\n assert_frame_equal(joined, expected.loc[:, joined.columns])\n\n left = self.data.join(self.to_join, on=['key1', 'key2'], sort=True)\n right = expected.loc[:, joined.columns].sort_values(['key1', 'key2'],\n kind='mergesort')\n assert_frame_equal(left, right)\n\n def test_left_join_multi_index(self):\n icols = ['1st', '2nd', '3rd']\n\n def bind_cols(df):\n iord = lambda a: 0 if a != a else ord(a)\n f = lambda ts: ts.map(iord) - ord('a')\n return (f(df['1st']) + f(df['3rd']) * 1e2 +\n df['2nd'].fillna(0) * 1e4)\n\n def run_asserts(left, right):\n for sort in [False, True]:\n res = left.join(right, on=icols, how='left', sort=sort)\n\n self.assertTrue(len(left) < len(res) + 1)\n self.assertFalse(res['4th'].isnull().any())\n self.assertFalse(res['5th'].isnull().any())\n\n tm.assert_series_equal(\n res['4th'], - res['5th'], check_names=False)\n result = bind_cols(res.iloc[:, :-2])\n tm.assert_series_equal(res['4th'], result, check_names=False)\n self.assertTrue(result.name is None)\n\n if sort:\n tm.assert_frame_equal(\n res, res.sort_values(icols, kind='mergesort'))\n\n out = merge(left, right.reset_index(), on=icols,\n sort=sort, how='left')\n\n res.index = np.arange(len(res))\n tm.assert_frame_equal(out, res)\n\n lc = list(map(chr, np.arange(ord('a'), ord('z') + 1)))\n left = DataFrame(np.random.choice(lc, (5000, 2)),\n columns=['1st', '3rd'])\n left.insert(1, '2nd', np.random.randint(0, 1000, len(left)))\n\n i = np.random.permutation(len(left))\n right = left.iloc[i].copy()\n\n left['4th'] = bind_cols(left)\n right['5th'] = - bind_cols(right)\n right.set_index(icols, inplace=True)\n\n run_asserts(left, right)\n\n # inject some nulls\n left.loc[1::23, '1st'] = np.nan\n left.loc[2::37, '2nd'] = np.nan\n left.loc[3::43, '3rd'] = np.nan\n left['4th'] = bind_cols(left)\n\n i = np.random.permutation(len(left))\n right = left.iloc[i, :-1]\n right['5th'] = - bind_cols(right)\n right.set_index(icols, inplace=True)\n\n run_asserts(left, right)\n\n def test_merge_right_vs_left(self):\n # compare left vs right merge with multikey\n for sort in [False, True]:\n merged1 = self.data.merge(self.to_join, left_on=['key1', 'key2'],\n right_index=True, how='left', sort=sort)\n\n merged2 = self.to_join.merge(self.data, right_on=['key1', 'key2'],\n left_index=True, how='right',\n sort=sort)\n\n merged2 = merged2.loc[:, merged1.columns]\n assert_frame_equal(merged1, merged2)\n\n def test_compress_group_combinations(self):\n\n # ~ 40000000 possible unique groups\n key1 = tm.rands_array(10, 10000)\n key1 = np.tile(key1, 2)\n key2 = key1[::-1]\n\n df = DataFrame({'key1': key1, 'key2': key2,\n 'value1': np.random.randn(20000)})\n\n df2 = DataFrame({'key1': key1[::2], 'key2': key2[::2],\n 'value2': np.random.randn(10000)})\n\n # just to hit the label compression code path\n merge(df, df2, how='outer')\n\n def test_left_join_index_preserve_order(self):\n\n left = DataFrame({'k1': [0, 1, 2] * 8,\n 'k2': ['foo', 'bar'] * 12,\n 'v': np.array(np.arange(24), dtype=np.int64)})\n\n index = MultiIndex.from_tuples([(2, 'bar'), (1, 'foo')])\n right = DataFrame({'v2': [5, 7]}, index=index)\n\n result = left.join(right, on=['k1', 'k2'])\n\n expected = left.copy()\n expected['v2'] = np.nan\n expected.loc[(expected.k1 == 2) & (expected.k2 == 'bar'), 'v2'] = 5\n expected.loc[(expected.k1 == 1) & (expected.k2 == 'foo'), 'v2'] = 7\n\n tm.assert_frame_equal(result, expected)\n tm.assert_frame_equal(\n result.sort_values(['k1', 'k2'], kind='mergesort'),\n left.join(right, on=['k1', 'k2'], sort=True))\n\n # test join with multi dtypes blocks\n left = DataFrame({'k1': [0, 1, 2] * 8,\n 'k2': ['foo', 'bar'] * 12,\n 'k3': np.array([0, 1, 2] * 8, dtype=np.float32),\n 'v': np.array(np.arange(24), dtype=np.int32)})\n\n index = MultiIndex.from_tuples([(2, 'bar'), (1, 'foo')])\n right = DataFrame({'v2': [5, 7]}, index=index)\n\n result = left.join(right, on=['k1', 'k2'])\n\n expected = left.copy()\n expected['v2'] = np.nan\n expected.loc[(expected.k1 == 2) & (expected.k2 == 'bar'), 'v2'] = 5\n expected.loc[(expected.k1 == 1) & (expected.k2 == 'foo'), 'v2'] = 7\n\n tm.assert_frame_equal(result, expected)\n tm.assert_frame_equal(\n result.sort_values(['k1', 'k2'], kind='mergesort'),\n left.join(right, on=['k1', 'k2'], sort=True))\n\n # do a right join for an extra test\n joined = merge(right, left, left_index=True,\n right_on=['k1', 'k2'], how='right')\n tm.assert_frame_equal(joined.loc[:, expected.columns], expected)\n\n def test_left_join_index_multi_match_multiindex(self):\n left = DataFrame([\n ['X', 'Y', 'C', 'a'],\n ['W', 'Y', 'C', 'e'],\n ['V', 'Q', 'A', 'h'],\n ['V', 'R', 'D', 'i'],\n ['X', 'Y', 'D', 'b'],\n ['X', 'Y', 'A', 'c'],\n ['W', 'Q', 'B', 'f'],\n ['W', 'R', 'C', 'g'],\n ['V', 'Y', 'C', 'j'],\n ['X', 'Y', 'B', 'd']],\n columns=['cola', 'colb', 'colc', 'tag'],\n index=[3, 2, 0, 1, 7, 6, 4, 5, 9, 8])\n\n right = DataFrame([\n ['W', 'R', 'C', 0],\n ['W', 'Q', 'B', 3],\n ['W', 'Q', 'B', 8],\n ['X', 'Y', 'A', 1],\n ['X', 'Y', 'A', 4],\n ['X', 'Y', 'B', 5],\n ['X', 'Y', 'C', 6],\n ['X', 'Y', 'C', 9],\n ['X', 'Q', 'C', -6],\n ['X', 'R', 'C', -9],\n ['V', 'Y', 'C', 7],\n ['V', 'R', 'D', 2],\n ['V', 'R', 'D', -1],\n ['V', 'Q', 'A', -3]],\n columns=['col1', 'col2', 'col3', 'val'])\n\n right.set_index(['col1', 'col2', 'col3'], inplace=True)\n result = left.join(right, on=['cola', 'colb', 'colc'], how='left')\n\n expected = DataFrame([\n ['X', 'Y', 'C', 'a', 6],\n ['X', 'Y', 'C', 'a', 9],\n ['W', 'Y', 'C', 'e', nan],\n ['V', 'Q', 'A', 'h', -3],\n ['V', 'R', 'D', 'i', 2],\n ['V', 'R', 'D', 'i', -1],\n ['X', 'Y', 'D', 'b', nan],\n ['X', 'Y', 'A', 'c', 1],\n ['X', 'Y', 'A', 'c', 4],\n ['W', 'Q', 'B', 'f', 3],\n ['W', 'Q', 'B', 'f', 8],\n ['W', 'R', 'C', 'g', 0],\n ['V', 'Y', 'C', 'j', 7],\n ['X', 'Y', 'B', 'd', 5]],\n columns=['cola', 'colb', 'colc', 'tag', 'val'],\n index=[3, 3, 2, 0, 1, 1, 7, 6, 6, 4, 4, 5, 9, 8])\n\n tm.assert_frame_equal(result, expected)\n\n result = left.join(right, on=['cola', 'colb', 'colc'],\n how='left', sort=True)\n\n tm.assert_frame_equal(\n result,\n expected.sort_values(['cola', 'colb', 'colc'], kind='mergesort'))\n\n # GH7331 - maintain left frame order in left merge\n right.reset_index(inplace=True)\n right.columns = left.columns[:3].tolist() + right.columns[-1:].tolist()\n result = merge(left, right, how='left', on=left.columns[:-1].tolist())\n expected.index = np.arange(len(expected))\n tm.assert_frame_equal(result, expected)\n\n def test_left_join_index_multi_match(self):\n left = DataFrame([\n ['c', 0],\n ['b', 1],\n ['a', 2],\n ['b', 3]],\n columns=['tag', 'val'],\n index=[2, 0, 1, 3])\n\n right = DataFrame([\n ['a', 'v'],\n ['c', 'w'],\n ['c', 'x'],\n ['d', 'y'],\n ['a', 'z'],\n ['c', 'r'],\n ['e', 'q'],\n ['c', 's']],\n columns=['tag', 'char'])\n\n right.set_index('tag', inplace=True)\n result = left.join(right, on='tag', how='left')\n\n expected = DataFrame([\n ['c', 0, 'w'],\n ['c', 0, 'x'],\n ['c', 0, 'r'],\n ['c', 0, 's'],\n ['b', 1, nan],\n ['a', 2, 'v'],\n ['a', 2, 'z'],\n ['b', 3, nan]],\n columns=['tag', 'val', 'char'],\n index=[2, 2, 2, 2, 0, 1, 1, 3])\n\n tm.assert_frame_equal(result, expected)\n\n result = left.join(right, on='tag', how='left', sort=True)\n tm.assert_frame_equal(\n result, expected.sort_values('tag', kind='mergesort'))\n\n # GH7331 - maintain left frame order in left merge\n result = merge(left, right.reset_index(), how='left', on='tag')\n expected.index = np.arange(len(expected))\n tm.assert_frame_equal(result, expected)\n\n def test_left_merge_na_buglet(self):\n left = DataFrame({'id': list('abcde'), 'v1': randn(5),\n 'v2': randn(5), 'dummy': list('abcde'),\n 'v3': randn(5)},\n columns=['id', 'v1', 'v2', 'dummy', 'v3'])\n right = DataFrame({'id': ['a', 'b', np.nan, np.nan, np.nan],\n 'sv3': [1.234, 5.678, np.nan, np.nan, np.nan]})\n\n merged = merge(left, right, on='id', how='left')\n\n rdf = right.drop(['id'], axis=1)\n expected = left.join(rdf)\n tm.assert_frame_equal(merged, expected)\n\n def test_merge_na_keys(self):\n data = [[1950, \"A\", 1.5],\n [1950, \"B\", 1.5],\n [1955, \"B\", 1.5],\n [1960, \"B\", np.nan],\n [1970, \"B\", 4.],\n [1950, \"C\", 4.],\n [1960, \"C\", np.nan],\n [1965, \"C\", 3.],\n [1970, \"C\", 4.]]\n\n frame = DataFrame(data, columns=[\"year\", \"panel\", \"data\"])\n\n other_data = [[1960, 'A', np.nan],\n [1970, 'A', np.nan],\n [1955, 'A', np.nan],\n [1965, 'A', np.nan],\n [1965, 'B', np.nan],\n [1955, 'C', np.nan]]\n other = DataFrame(other_data, columns=['year', 'panel', 'data'])\n\n result = frame.merge(other, how='outer')\n\n expected = frame.fillna(-999).merge(other.fillna(-999), how='outer')\n expected = expected.replace(-999, np.nan)\n\n tm.assert_frame_equal(result, expected)\n\n def test_join_multi_levels(self):\n\n # GH 3662\n # merge multi-levels\n household = (\n DataFrame(\n dict(household_id=[1, 2, 3],\n male=[0, 1, 0],\n wealth=[196087.3, 316478.7, 294750]),\n columns=['household_id', 'male', 'wealth'])\n .set_index('household_id'))\n portfolio = (\n DataFrame(\n dict(household_id=[1, 2, 2, 3, 3, 3, 4],\n asset_id=[\"nl0000301109\", \"nl0000289783\", \"gb00b03mlx29\",\n \"gb00b03mlx29\", \"lu0197800237\", \"nl0000289965\",\n np.nan],\n name=[\"ABN Amro\", \"Robeco\", \"Royal Dutch Shell\",\n \"Royal Dutch Shell\",\n \"AAB Eastern Europe Equity Fund\",\n \"Postbank BioTech Fonds\", np.nan],\n share=[1.0, 0.4, 0.6, 0.15, 0.6, 0.25, 1.0]),\n columns=['household_id', 'asset_id', 'name', 'share'])\n .set_index(['household_id', 'asset_id']))\n result = household.join(portfolio, how='inner')\n expected = (\n DataFrame(\n dict(male=[0, 1, 1, 0, 0, 0],\n wealth=[196087.3, 316478.7, 316478.7,\n 294750.0, 294750.0, 294750.0],\n name=['ABN Amro', 'Robeco', 'Royal Dutch Shell',\n 'Royal Dutch Shell',\n 'AAB Eastern Europe Equity Fund',\n 'Postbank BioTech Fonds'],\n share=[1.00, 0.40, 0.60, 0.15, 0.60, 0.25],\n household_id=[1, 2, 2, 3, 3, 3],\n asset_id=['nl0000301109', 'nl0000289783', 'gb00b03mlx29',\n 'gb00b03mlx29', 'lu0197800237',\n 'nl0000289965']))\n .set_index(['household_id', 'asset_id'])\n .reindex(columns=['male', 'wealth', 'name', 'share']))\n assert_frame_equal(result, expected)\n\n assert_frame_equal(result, expected)\n\n # equivalency\n result2 = (merge(household.reset_index(), portfolio.reset_index(),\n on=['household_id'], how='inner')\n .set_index(['household_id', 'asset_id']))\n assert_frame_equal(result2, expected)\n\n result = household.join(portfolio, how='outer')\n expected = (concat([\n expected,\n (DataFrame(\n dict(share=[1.00]),\n index=MultiIndex.from_tuples(\n [(4, np.nan)],\n names=['household_id', 'asset_id'])))\n ], axis=0).reindex(columns=expected.columns))\n assert_frame_equal(result, expected)\n\n # invalid cases\n household.index.name = 'foo'\n\n def f():\n household.join(portfolio, how='inner')\n self.assertRaises(ValueError, f)\n\n portfolio2 = portfolio.copy()\n portfolio2.index.set_names(['household_id', 'foo'])\n\n def f():\n portfolio2.join(portfolio, how='inner')\n self.assertRaises(ValueError, f)\n\n def test_join_multi_levels2(self):\n\n # some more advanced merges\n # GH6360\n household = (\n DataFrame(\n dict(household_id=[1, 2, 2, 3, 3, 3, 4],\n asset_id=[\"nl0000301109\", \"nl0000301109\", \"gb00b03mlx29\",\n \"gb00b03mlx29\", \"lu0197800237\", \"nl0000289965\",\n np.nan],\n share=[1.0, 0.4, 0.6, 0.15, 0.6, 0.25, 1.0]),\n columns=['household_id', 'asset_id', 'share'])\n .set_index(['household_id', 'asset_id']))\n\n log_return = DataFrame(dict(\n asset_id=[\"gb00b03mlx29\", \"gb00b03mlx29\",\n \"gb00b03mlx29\", \"lu0197800237\", \"lu0197800237\"],\n t=[233, 234, 235, 180, 181],\n log_return=[.09604978, -.06524096, .03532373, .03025441, .036997]\n )).set_index([\"asset_id\", \"t\"])\n\n expected = (\n DataFrame(dict(\n household_id=[2, 2, 2, 3, 3, 3, 3, 3],\n asset_id=[\"gb00b03mlx29\", \"gb00b03mlx29\",\n \"gb00b03mlx29\", \"gb00b03mlx29\",\n \"gb00b03mlx29\", \"gb00b03mlx29\",\n \"lu0197800237\", \"lu0197800237\"],\n t=[233, 234, 235, 233, 234, 235, 180, 181],\n share=[0.6, 0.6, 0.6, 0.15, 0.15, 0.15, 0.6, 0.6],\n log_return=[.09604978, -.06524096, .03532373,\n .09604978, -.06524096, .03532373,\n .03025441, .036997]\n ))\n .set_index([\"household_id\", \"asset_id\", \"t\"])\n .reindex(columns=['share', 'log_return']))\n\n def f():\n household.join(log_return, how='inner')\n self.assertRaises(NotImplementedError, f)\n\n # this is the equivalency\n result = (merge(household.reset_index(), log_return.reset_index(),\n on=['asset_id'], how='inner')\n .set_index(['household_id', 'asset_id', 't']))\n assert_frame_equal(result, expected)\n\n expected = (\n DataFrame(dict(\n household_id=[1, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 4],\n asset_id=[\"nl0000301109\", \"nl0000289783\", \"gb00b03mlx29\",\n \"gb00b03mlx29\", \"gb00b03mlx29\",\n \"gb00b03mlx29\", \"gb00b03mlx29\", \"gb00b03mlx29\",\n \"lu0197800237\", \"lu0197800237\",\n \"nl0000289965\", None],\n t=[None, None, 233, 234, 235, 233, 234,\n 235, 180, 181, None, None],\n share=[1.0, 0.4, 0.6, 0.6, 0.6, 0.15,\n 0.15, 0.15, 0.6, 0.6, 0.25, 1.0],\n log_return=[None, None, .09604978, -.06524096, .03532373,\n .09604978, -.06524096, .03532373,\n .03025441, .036997, None, None]\n ))\n .set_index([\"household_id\", \"asset_id\", \"t\"]))\n\n def f():\n household.join(log_return, how='outer')\n self.assertRaises(NotImplementedError, f)\n\n\[email protected]\ndef df():\n return DataFrame(\n {'A': ['foo', 'bar'],\n 'B': Series(['foo', 'bar']).astype('category'),\n 'C': [1, 2],\n 'D': [1.0, 2.0],\n 'E': Series([1, 2], dtype='uint64'),\n 'F': Series([1, 2], dtype='int32')})\n\n\nclass TestMergeDtypes(object):\n\n def test_different(self, df):\n\n # we expect differences by kind\n # to be ok, while other differences should return object\n\n left = df\n for col in df.columns:\n right = DataFrame({'A': df[col]})\n result = pd.merge(left, right, on='A')\n assert is_object_dtype(result.A.dtype)\n\n @pytest.mark.parametrize('d1', [np.int64, np.int32,\n np.int16, np.int8, np.uint8])\n @pytest.mark.parametrize('d2', [np.int64, np.float64,\n np.float32, np.float16])\n def test_join_multi_dtypes(self, d1, d2):\n\n dtype1 = np.dtype(d1)\n dtype2 = np.dtype(d2)\n\n left = DataFrame({'k1': np.array([0, 1, 2] * 8, dtype=dtype1),\n 'k2': ['foo', 'bar'] * 12,\n 'v': np.array(np.arange(24), dtype=np.int64)})\n\n index = MultiIndex.from_tuples([(2, 'bar'), (1, 'foo')])\n right = DataFrame({'v2': np.array([5, 7], dtype=dtype2)}, index=index)\n\n result = left.join(right, on=['k1', 'k2'])\n\n expected = left.copy()\n\n if dtype2.kind == 'i':\n dtype2 = np.dtype('float64')\n expected['v2'] = np.array(np.nan, dtype=dtype2)\n expected.loc[(expected.k1 == 2) & (expected.k2 == 'bar'), 'v2'] = 5\n expected.loc[(expected.k1 == 1) & (expected.k2 == 'foo'), 'v2'] = 7\n\n tm.assert_frame_equal(result, expected)\n\n result = left.join(right, on=['k1', 'k2'], sort=True)\n expected.sort_values(['k1', 'k2'], kind='mergesort', inplace=True)\n tm.assert_frame_equal(result, expected)\n\n\[email protected]\ndef left():\n np.random.seed(1234)\n return DataFrame(\n {'X': Series(np.random.choice(\n ['foo', 'bar'],\n size=(10,))).astype('category', categories=['foo', 'bar']),\n 'Y': np.random.choice(['one', 'two', 'three'], size=(10,))})\n\n\[email protected]\ndef right():\n np.random.seed(1234)\n return DataFrame(\n {'X': Series(['foo', 'bar']).astype('category',\n categories=['foo', 'bar']),\n 'Z': [1, 2]})\n\n\nclass TestMergeCategorical(object):\n\n def test_identical(self, left):\n # merging on the same, should preserve dtypes\n merged = pd.merge(left, left, on='X')\n result = merged.dtypes.sort_index()\n expected = Series([CategoricalDtype(),\n np.dtype('O'),\n np.dtype('O')],\n index=['X', 'Y_x', 'Y_y'])\n assert_series_equal(result, expected)\n\n def test_basic(self, left, right):\n # we have matching Categorical dtypes in X\n # so should preserve the merged column\n merged = pd.merge(left, right, on='X')\n result = merged.dtypes.sort_index()\n expected = Series([CategoricalDtype(),\n np.dtype('O'),\n np.dtype('int64')],\n index=['X', 'Y', 'Z'])\n assert_series_equal(result, expected)\n\n def test_other_columns(self, left, right):\n # non-merge columns should preserve if possible\n right = right.assign(Z=right.Z.astype('category'))\n\n merged = pd.merge(left, right, on='X')\n result = merged.dtypes.sort_index()\n expected = Series([CategoricalDtype(),\n np.dtype('O'),\n CategoricalDtype()],\n index=['X', 'Y', 'Z'])\n assert_series_equal(result, expected)\n\n # categories are preserved\n assert left.X.values.is_dtype_equal(merged.X.values)\n assert right.Z.values.is_dtype_equal(merged.Z.values)\n\n @pytest.mark.parametrize(\n 'change', [lambda x: x,\n lambda x: x.astype('category',\n categories=['bar', 'foo']),\n lambda x: x.astype('category',\n categories=['foo', 'bar', 'bah']),\n lambda x: x.astype('category', ordered=True)])\n @pytest.mark.parametrize('how', ['inner', 'outer', 'left', 'right'])\n def test_dtype_on_merged_different(self, change, how, left, right):\n # our merging columns, X now has 2 different dtypes\n # so we must be object as a result\n\n X = change(right.X.astype('object'))\n right = right.assign(X=X)\n assert is_categorical_dtype(left.X.values)\n assert not left.X.values.is_dtype_equal(right.X.values)\n\n merged = pd.merge(left, right, on='X', how=how)\n\n result = merged.dtypes.sort_index()\n expected = Series([np.dtype('O'),\n np.dtype('O'),\n np.dtype('int64')],\n index=['X', 'Y', 'Z'])\n assert_series_equal(result, expected)\n", "from datetime import datetime\n\nimport numpy as np\nimport pandas as pd\nfrom pandas.util import testing as tm\nfrom pandas.compat import lrange\nfrom pandas._libs import tslib\nfrom pandas import (PeriodIndex, Series, DatetimeIndex,\n period_range, Period, _np_version_under1p9)\n\n\nclass TestGetItem(tm.TestCase):\n\n def setUp(self):\n pass\n\n def test_getitem(self):\n idx1 = pd.period_range('2011-01-01', '2011-01-31', freq='D',\n name='idx')\n\n for idx in [idx1]:\n result = idx[0]\n self.assertEqual(result, pd.Period('2011-01-01', freq='D'))\n\n result = idx[-1]\n self.assertEqual(result, pd.Period('2011-01-31', freq='D'))\n\n result = idx[0:5]\n expected = pd.period_range('2011-01-01', '2011-01-05', freq='D',\n name='idx')\n self.assert_index_equal(result, expected)\n self.assertEqual(result.freq, expected.freq)\n self.assertEqual(result.freq, 'D')\n\n result = idx[0:10:2]\n expected = pd.PeriodIndex(['2011-01-01', '2011-01-03',\n '2011-01-05',\n '2011-01-07', '2011-01-09'],\n freq='D', name='idx')\n self.assert_index_equal(result, expected)\n self.assertEqual(result.freq, expected.freq)\n self.assertEqual(result.freq, 'D')\n\n result = idx[-20:-5:3]\n expected = pd.PeriodIndex(['2011-01-12', '2011-01-15',\n '2011-01-18',\n '2011-01-21', '2011-01-24'],\n freq='D', name='idx')\n self.assert_index_equal(result, expected)\n self.assertEqual(result.freq, expected.freq)\n self.assertEqual(result.freq, 'D')\n\n result = idx[4::-1]\n expected = PeriodIndex(['2011-01-05', '2011-01-04', '2011-01-03',\n '2011-01-02', '2011-01-01'],\n freq='D', name='idx')\n self.assert_index_equal(result, expected)\n self.assertEqual(result.freq, expected.freq)\n self.assertEqual(result.freq, 'D')\n\n def test_getitem_index(self):\n idx = period_range('2007-01', periods=10, freq='M', name='x')\n\n result = idx[[1, 3, 5]]\n exp = pd.PeriodIndex(['2007-02', '2007-04', '2007-06'],\n freq='M', name='x')\n tm.assert_index_equal(result, exp)\n\n result = idx[[True, True, False, False, False,\n True, True, False, False, False]]\n exp = pd.PeriodIndex(['2007-01', '2007-02', '2007-06', '2007-07'],\n freq='M', name='x')\n tm.assert_index_equal(result, exp)\n\n def test_getitem_partial(self):\n rng = period_range('2007-01', periods=50, freq='M')\n ts = Series(np.random.randn(len(rng)), rng)\n\n self.assertRaises(KeyError, ts.__getitem__, '2006')\n\n result = ts['2008']\n self.assertTrue((result.index.year == 2008).all())\n\n result = ts['2008':'2009']\n self.assertEqual(len(result), 24)\n\n result = ts['2008-1':'2009-12']\n self.assertEqual(len(result), 24)\n\n result = ts['2008Q1':'2009Q4']\n self.assertEqual(len(result), 24)\n\n result = ts[:'2009']\n self.assertEqual(len(result), 36)\n\n result = ts['2009':]\n self.assertEqual(len(result), 50 - 24)\n\n exp = result\n result = ts[24:]\n tm.assert_series_equal(exp, result)\n\n ts = ts[10:].append(ts[10:])\n self.assertRaisesRegexp(KeyError,\n \"left slice bound for non-unique \"\n \"label: '2008'\",\n ts.__getitem__, slice('2008', '2009'))\n\n def test_getitem_datetime(self):\n rng = period_range(start='2012-01-01', periods=10, freq='W-MON')\n ts = Series(lrange(len(rng)), index=rng)\n\n dt1 = datetime(2011, 10, 2)\n dt4 = datetime(2012, 4, 20)\n\n rs = ts[dt1:dt4]\n tm.assert_series_equal(rs, ts)\n\n def test_getitem_nat(self):\n idx = pd.PeriodIndex(['2011-01', 'NaT', '2011-02'], freq='M')\n self.assertEqual(idx[0], pd.Period('2011-01', freq='M'))\n self.assertIs(idx[1], tslib.NaT)\n\n s = pd.Series([0, 1, 2], index=idx)\n self.assertEqual(s[pd.NaT], 1)\n\n s = pd.Series(idx, index=idx)\n self.assertEqual(s[pd.Period('2011-01', freq='M')],\n pd.Period('2011-01', freq='M'))\n self.assertIs(s[pd.NaT], tslib.NaT)\n\n def test_getitem_list_periods(self):\n # GH 7710\n rng = period_range(start='2012-01-01', periods=10, freq='D')\n ts = Series(lrange(len(rng)), index=rng)\n exp = ts.iloc[[1]]\n tm.assert_series_equal(ts[[Period('2012-01-02', freq='D')]], exp)\n\n def test_getitem_seconds(self):\n # GH 6716\n didx = DatetimeIndex(start='2013/01/01 09:00:00', freq='S',\n periods=4000)\n pidx = PeriodIndex(start='2013/01/01 09:00:00', freq='S', periods=4000)\n\n for idx in [didx, pidx]:\n # getitem against index should raise ValueError\n values = ['2014', '2013/02', '2013/01/02', '2013/02/01 9H',\n '2013/02/01 09:00']\n for v in values:\n if _np_version_under1p9:\n with tm.assertRaises(ValueError):\n idx[v]\n else:\n # GH7116\n # these show deprecations as we are trying\n # to slice with non-integer indexers\n # with tm.assertRaises(IndexError):\n # idx[v]\n continue\n\n s = Series(np.random.rand(len(idx)), index=idx)\n tm.assert_series_equal(s['2013/01/01 10:00'], s[3600:3660])\n tm.assert_series_equal(s['2013/01/01 9H'], s[:3600])\n for d in ['2013/01/01', '2013/01', '2013']:\n tm.assert_series_equal(s[d], s)\n\n def test_getitem_day(self):\n # GH 6716\n # Confirm DatetimeIndex and PeriodIndex works identically\n didx = DatetimeIndex(start='2013/01/01', freq='D', periods=400)\n pidx = PeriodIndex(start='2013/01/01', freq='D', periods=400)\n\n for idx in [didx, pidx]:\n # getitem against index should raise ValueError\n values = ['2014', '2013/02', '2013/01/02', '2013/02/01 9H',\n '2013/02/01 09:00']\n for v in values:\n\n if _np_version_under1p9:\n with tm.assertRaises(ValueError):\n idx[v]\n else:\n # GH7116\n # these show deprecations as we are trying\n # to slice with non-integer indexers\n # with tm.assertRaises(IndexError):\n # idx[v]\n continue\n\n s = Series(np.random.rand(len(idx)), index=idx)\n tm.assert_series_equal(s['2013/01'], s[0:31])\n tm.assert_series_equal(s['2013/02'], s[31:59])\n tm.assert_series_equal(s['2014'], s[365:])\n\n invalid = ['2013/02/01 9H', '2013/02/01 09:00']\n for v in invalid:\n with tm.assertRaises(KeyError):\n s[v]\n\n\nclass TestIndexing(tm.TestCase):\n\n def test_get_loc_msg(self):\n idx = period_range('2000-1-1', freq='A', periods=10)\n bad_period = Period('2012', 'A')\n self.assertRaises(KeyError, idx.get_loc, bad_period)\n\n try:\n idx.get_loc(bad_period)\n except KeyError as inst:\n self.assertEqual(inst.args[0], bad_period)\n\n def test_get_loc_nat(self):\n didx = DatetimeIndex(['2011-01-01', 'NaT', '2011-01-03'])\n pidx = PeriodIndex(['2011-01-01', 'NaT', '2011-01-03'], freq='M')\n\n # check DatetimeIndex compat\n for idx in [didx, pidx]:\n self.assertEqual(idx.get_loc(pd.NaT), 1)\n self.assertEqual(idx.get_loc(None), 1)\n self.assertEqual(idx.get_loc(float('nan')), 1)\n self.assertEqual(idx.get_loc(np.nan), 1)\n\n def test_take(self):\n # GH 10295\n idx1 = pd.period_range('2011-01-01', '2011-01-31', freq='D',\n name='idx')\n\n for idx in [idx1]:\n result = idx.take([0])\n self.assertEqual(result, pd.Period('2011-01-01', freq='D'))\n\n result = idx.take([5])\n self.assertEqual(result, pd.Period('2011-01-06', freq='D'))\n\n result = idx.take([0, 1, 2])\n expected = pd.period_range('2011-01-01', '2011-01-03', freq='D',\n name='idx')\n self.assert_index_equal(result, expected)\n self.assertEqual(result.freq, 'D')\n self.assertEqual(result.freq, expected.freq)\n\n result = idx.take([0, 2, 4])\n expected = pd.PeriodIndex(['2011-01-01', '2011-01-03',\n '2011-01-05'], freq='D', name='idx')\n self.assert_index_equal(result, expected)\n self.assertEqual(result.freq, expected.freq)\n self.assertEqual(result.freq, 'D')\n\n result = idx.take([7, 4, 1])\n expected = pd.PeriodIndex(['2011-01-08', '2011-01-05',\n '2011-01-02'],\n freq='D', name='idx')\n self.assert_index_equal(result, expected)\n self.assertEqual(result.freq, expected.freq)\n self.assertEqual(result.freq, 'D')\n\n result = idx.take([3, 2, 5])\n expected = PeriodIndex(['2011-01-04', '2011-01-03', '2011-01-06'],\n freq='D', name='idx')\n self.assert_index_equal(result, expected)\n self.assertEqual(result.freq, expected.freq)\n self.assertEqual(result.freq, 'D')\n\n result = idx.take([-3, 2, 5])\n expected = PeriodIndex(['2011-01-29', '2011-01-03', '2011-01-06'],\n freq='D', name='idx')\n self.assert_index_equal(result, expected)\n self.assertEqual(result.freq, expected.freq)\n self.assertEqual(result.freq, 'D')\n\n def test_take_misc(self):\n index = PeriodIndex(start='1/1/10', end='12/31/12', freq='D',\n name='idx')\n expected = PeriodIndex([datetime(2010, 1, 6), datetime(2010, 1, 7),\n datetime(2010, 1, 9), datetime(2010, 1, 13)],\n freq='D', name='idx')\n\n taken1 = index.take([5, 6, 8, 12])\n taken2 = index[[5, 6, 8, 12]]\n\n for taken in [taken1, taken2]:\n tm.assert_index_equal(taken, expected)\n tm.assertIsInstance(taken, PeriodIndex)\n self.assertEqual(taken.freq, index.freq)\n self.assertEqual(taken.name, expected.name)\n\n def test_take_fill_value(self):\n # GH 12631\n idx = pd.PeriodIndex(['2011-01-01', '2011-02-01', '2011-03-01'],\n name='xxx', freq='D')\n result = idx.take(np.array([1, 0, -1]))\n expected = pd.PeriodIndex(['2011-02-01', '2011-01-01', '2011-03-01'],\n name='xxx', freq='D')\n tm.assert_index_equal(result, expected)\n\n # fill_value\n result = idx.take(np.array([1, 0, -1]), fill_value=True)\n expected = pd.PeriodIndex(['2011-02-01', '2011-01-01', 'NaT'],\n name='xxx', freq='D')\n tm.assert_index_equal(result, expected)\n\n # allow_fill=False\n result = idx.take(np.array([1, 0, -1]), allow_fill=False,\n fill_value=True)\n expected = pd.PeriodIndex(['2011-02-01', '2011-01-01', '2011-03-01'],\n name='xxx', freq='D')\n tm.assert_index_equal(result, expected)\n\n msg = ('When allow_fill=True and fill_value is not None, '\n 'all indices must be >= -1')\n with tm.assertRaisesRegexp(ValueError, msg):\n idx.take(np.array([1, 0, -2]), fill_value=True)\n with tm.assertRaisesRegexp(ValueError, msg):\n idx.take(np.array([1, 0, -5]), fill_value=True)\n\n with tm.assertRaises(IndexError):\n idx.take(np.array([1, -5]))\n", "\n# pylint: disable=E1101,E1103,W0232\nimport datetime\nimport warnings\nfrom functools import partial\nfrom sys import getsizeof\n\nimport numpy as np\nfrom pandas._libs import index as libindex, lib, Timestamp\n\nfrom pandas.compat import range, zip, lrange, lzip, map\nfrom pandas.compat.numpy import function as nv\nfrom pandas import compat\n\nfrom pandas.types.common import (_ensure_int64,\n _ensure_platform_int,\n is_object_dtype,\n is_iterator,\n is_list_like,\n is_scalar)\nfrom pandas.types.missing import isnull, array_equivalent\nfrom pandas.core.common import (_values_from_object,\n is_bool_indexer,\n is_null_slice,\n PerformanceWarning,\n UnsortedIndexError)\n\n\nimport pandas.core.base as base\nfrom pandas.util.decorators import (Appender, cache_readonly,\n deprecate, deprecate_kwarg)\nimport pandas.core.common as com\nimport pandas.core.missing as missing\nimport pandas.core.algorithms as algos\nfrom pandas.formats.printing import pprint_thing\n\nfrom pandas.core.config import get_option\n\nfrom pandas.indexes.base import (Index, _ensure_index,\n _get_na_value, InvalidIndexError,\n _index_shared_docs)\nfrom pandas.indexes.frozen import FrozenNDArray, FrozenList, _ensure_frozen\nimport pandas.indexes.base as ibase\n_index_doc_kwargs = dict(ibase._index_doc_kwargs)\n_index_doc_kwargs.update(\n dict(klass='MultiIndex',\n target_klass='MultiIndex or list of tuples'))\n\n\nclass MultiIndex(Index):\n \"\"\"\n A multi-level, or hierarchical, index object for pandas objects\n\n Parameters\n ----------\n levels : sequence of arrays\n The unique labels for each level\n labels : sequence of arrays\n Integers for each level designating which label at each location\n sortorder : optional int\n Level of sortedness (must be lexicographically sorted by that\n level)\n names : optional sequence of objects\n Names for each of the index levels. (name is accepted for compat)\n copy : boolean, default False\n Copy the meta-data\n verify_integrity : boolean, default True\n Check that the levels/labels are consistent and valid\n \"\"\"\n\n # initialize to zero-length tuples to make everything work\n _typ = 'multiindex'\n _names = FrozenList()\n _levels = FrozenList()\n _labels = FrozenList()\n _comparables = ['names']\n _engine_type = libindex.MultiIndexEngine\n rename = Index.set_names\n\n def __new__(cls, levels=None, labels=None, sortorder=None, names=None,\n copy=False, verify_integrity=True, _set_identity=True,\n name=None, **kwargs):\n\n # compat with Index\n if name is not None:\n names = name\n if levels is None or labels is None:\n raise TypeError(\"Must pass both levels and labels\")\n if len(levels) != len(labels):\n raise ValueError('Length of levels and labels must be the same.')\n if len(levels) == 0:\n raise ValueError('Must pass non-zero number of levels/labels')\n if len(levels) == 1:\n if names:\n name = names[0]\n else:\n name = None\n return Index(levels[0], name=name, copy=True).take(labels[0])\n\n result = object.__new__(MultiIndex)\n\n # we've already validated levels and labels, so shortcut here\n result._set_levels(levels, copy=copy, validate=False)\n result._set_labels(labels, copy=copy, validate=False)\n\n if names is not None:\n # handles name validation\n result._set_names(names)\n\n if sortorder is not None:\n result.sortorder = int(sortorder)\n else:\n result.sortorder = sortorder\n\n if verify_integrity:\n result._verify_integrity()\n if _set_identity:\n result._reset_identity()\n return result\n\n def _verify_integrity(self, labels=None, levels=None):\n \"\"\"\n\n Parameters\n ----------\n labels : optional list\n Labels to check for validity. Defaults to current labels.\n levels : optional list\n Levels to check for validity. Defaults to current levels.\n\n Raises\n ------\n ValueError\n * if length of levels and labels don't match or any label would\n exceed level bounds\n \"\"\"\n # NOTE: Currently does not check, among other things, that cached\n # nlevels matches nor that sortorder matches actually sortorder.\n labels = labels or self.labels\n levels = levels or self.levels\n\n if len(levels) != len(labels):\n raise ValueError(\"Length of levels and labels must match. NOTE:\"\n \" this index is in an inconsistent state.\")\n label_length = len(self.labels[0])\n for i, (level, label) in enumerate(zip(levels, labels)):\n if len(label) != label_length:\n raise ValueError(\"Unequal label lengths: %s\" %\n ([len(lab) for lab in labels]))\n if len(label) and label.max() >= len(level):\n raise ValueError(\"On level %d, label max (%d) >= length of\"\n \" level (%d). NOTE: this index is in an\"\n \" inconsistent state\" % (i, label.max(),\n len(level)))\n\n def _get_levels(self):\n return self._levels\n\n def _set_levels(self, levels, level=None, copy=False, validate=True,\n verify_integrity=False):\n # This is NOT part of the levels property because it should be\n # externally not allowed to set levels. User beware if you change\n # _levels directly\n if validate and len(levels) == 0:\n raise ValueError('Must set non-zero number of levels.')\n if validate and level is None and len(levels) != self.nlevels:\n raise ValueError('Length of levels must match number of levels.')\n if validate and level is not None and len(levels) != len(level):\n raise ValueError('Length of levels must match length of level.')\n\n if level is None:\n new_levels = FrozenList(\n _ensure_index(lev, copy=copy)._shallow_copy()\n for lev in levels)\n else:\n level = [self._get_level_number(l) for l in level]\n new_levels = list(self._levels)\n for l, v in zip(level, levels):\n new_levels[l] = _ensure_index(v, copy=copy)._shallow_copy()\n new_levels = FrozenList(new_levels)\n\n if verify_integrity:\n self._verify_integrity(levels=new_levels)\n\n names = self.names\n self._levels = new_levels\n if any(names):\n self._set_names(names)\n\n self._tuples = None\n self._reset_cache()\n\n def set_levels(self, levels, level=None, inplace=False,\n verify_integrity=True):\n \"\"\"\n Set new levels on MultiIndex. Defaults to returning\n new index.\n\n Parameters\n ----------\n levels : sequence or list of sequence\n new level(s) to apply\n level : int, level name, or sequence of int/level names (default None)\n level(s) to set (None for all levels)\n inplace : bool\n if True, mutates in place\n verify_integrity : bool (default True)\n if True, checks that levels and labels are compatible\n\n Returns\n -------\n new index (of same type and class...etc)\n\n\n Examples\n --------\n >>> idx = MultiIndex.from_tuples([(1, u'one'), (1, u'two'),\n (2, u'one'), (2, u'two')],\n names=['foo', 'bar'])\n >>> idx.set_levels([['a','b'], [1,2]])\n MultiIndex(levels=[[u'a', u'b'], [1, 2]],\n labels=[[0, 0, 1, 1], [0, 1, 0, 1]],\n names=[u'foo', u'bar'])\n >>> idx.set_levels(['a','b'], level=0)\n MultiIndex(levels=[[u'a', u'b'], [u'one', u'two']],\n labels=[[0, 0, 1, 1], [0, 1, 0, 1]],\n names=[u'foo', u'bar'])\n >>> idx.set_levels(['a','b'], level='bar')\n MultiIndex(levels=[[1, 2], [u'a', u'b']],\n labels=[[0, 0, 1, 1], [0, 1, 0, 1]],\n names=[u'foo', u'bar'])\n >>> idx.set_levels([['a','b'], [1,2]], level=[0,1])\n MultiIndex(levels=[[u'a', u'b'], [1, 2]],\n labels=[[0, 0, 1, 1], [0, 1, 0, 1]],\n names=[u'foo', u'bar'])\n \"\"\"\n if level is not None and not is_list_like(level):\n if not is_list_like(levels):\n raise TypeError(\"Levels must be list-like\")\n if is_list_like(levels[0]):\n raise TypeError(\"Levels must be list-like\")\n level = [level]\n levels = [levels]\n elif level is None or is_list_like(level):\n if not is_list_like(levels) or not is_list_like(levels[0]):\n raise TypeError(\"Levels must be list of lists-like\")\n\n if inplace:\n idx = self\n else:\n idx = self._shallow_copy()\n idx._reset_identity()\n idx._set_levels(levels, level=level, validate=True,\n verify_integrity=verify_integrity)\n if not inplace:\n return idx\n\n # remove me in 0.14 and change to read only property\n __set_levels = deprecate(\"setting `levels` directly\",\n partial(set_levels, inplace=True,\n verify_integrity=True),\n alt_name=\"set_levels\")\n levels = property(fget=_get_levels, fset=__set_levels)\n\n def _get_labels(self):\n return self._labels\n\n def _set_labels(self, labels, level=None, copy=False, validate=True,\n verify_integrity=False):\n\n if validate and level is None and len(labels) != self.nlevels:\n raise ValueError(\"Length of labels must match number of levels\")\n if validate and level is not None and len(labels) != len(level):\n raise ValueError('Length of labels must match length of levels.')\n\n if level is None:\n new_labels = FrozenList(\n _ensure_frozen(lab, lev, copy=copy)._shallow_copy()\n for lev, lab in zip(self.levels, labels))\n else:\n level = [self._get_level_number(l) for l in level]\n new_labels = list(self._labels)\n for l, lev, lab in zip(level, self.levels, labels):\n new_labels[l] = _ensure_frozen(\n lab, lev, copy=copy)._shallow_copy()\n new_labels = FrozenList(new_labels)\n\n if verify_integrity:\n self._verify_integrity(labels=new_labels)\n\n self._labels = new_labels\n self._tuples = None\n self._reset_cache()\n\n def set_labels(self, labels, level=None, inplace=False,\n verify_integrity=True):\n \"\"\"\n Set new labels on MultiIndex. Defaults to returning\n new index.\n\n Parameters\n ----------\n labels : sequence or list of sequence\n new labels to apply\n level : int, level name, or sequence of int/level names (default None)\n level(s) to set (None for all levels)\n inplace : bool\n if True, mutates in place\n verify_integrity : bool (default True)\n if True, checks that levels and labels are compatible\n\n Returns\n -------\n new index (of same type and class...etc)\n\n Examples\n --------\n >>> idx = MultiIndex.from_tuples([(1, u'one'), (1, u'two'),\n (2, u'one'), (2, u'two')],\n names=['foo', 'bar'])\n >>> idx.set_labels([[1,0,1,0], [0,0,1,1]])\n MultiIndex(levels=[[1, 2], [u'one', u'two']],\n labels=[[1, 0, 1, 0], [0, 0, 1, 1]],\n names=[u'foo', u'bar'])\n >>> idx.set_labels([1,0,1,0], level=0)\n MultiIndex(levels=[[1, 2], [u'one', u'two']],\n labels=[[1, 0, 1, 0], [0, 1, 0, 1]],\n names=[u'foo', u'bar'])\n >>> idx.set_labels([0,0,1,1], level='bar')\n MultiIndex(levels=[[1, 2], [u'one', u'two']],\n labels=[[0, 0, 1, 1], [0, 0, 1, 1]],\n names=[u'foo', u'bar'])\n >>> idx.set_labels([[1,0,1,0], [0,0,1,1]], level=[0,1])\n MultiIndex(levels=[[1, 2], [u'one', u'two']],\n labels=[[1, 0, 1, 0], [0, 0, 1, 1]],\n names=[u'foo', u'bar'])\n \"\"\"\n if level is not None and not is_list_like(level):\n if not is_list_like(labels):\n raise TypeError(\"Labels must be list-like\")\n if is_list_like(labels[0]):\n raise TypeError(\"Labels must be list-like\")\n level = [level]\n labels = [labels]\n elif level is None or is_list_like(level):\n if not is_list_like(labels) or not is_list_like(labels[0]):\n raise TypeError(\"Labels must be list of lists-like\")\n\n if inplace:\n idx = self\n else:\n idx = self._shallow_copy()\n idx._reset_identity()\n idx._set_labels(labels, level=level, verify_integrity=verify_integrity)\n if not inplace:\n return idx\n\n # remove me in 0.14 and change to readonly property\n __set_labels = deprecate(\"setting labels directly\",\n partial(set_labels, inplace=True,\n verify_integrity=True),\n alt_name=\"set_labels\")\n labels = property(fget=_get_labels, fset=__set_labels)\n\n def copy(self, names=None, dtype=None, levels=None, labels=None,\n deep=False, _set_identity=False, **kwargs):\n \"\"\"\n Make a copy of this object. Names, dtype, levels and labels can be\n passed and will be set on new copy.\n\n Parameters\n ----------\n names : sequence, optional\n dtype : numpy dtype or pandas type, optional\n levels : sequence, optional\n labels : sequence, optional\n\n Returns\n -------\n copy : MultiIndex\n\n Notes\n -----\n In most cases, there should be no functional difference from using\n ``deep``, but if ``deep`` is passed it will attempt to deepcopy.\n This could be potentially expensive on large MultiIndex objects.\n \"\"\"\n name = kwargs.get('name')\n names = self._validate_names(name=name, names=names, deep=deep)\n\n if deep:\n from copy import deepcopy\n if levels is None:\n levels = deepcopy(self.levels)\n if labels is None:\n labels = deepcopy(self.labels)\n else:\n if levels is None:\n levels = self.levels\n if labels is None:\n labels = self.labels\n return MultiIndex(levels=levels, labels=labels, names=names,\n sortorder=self.sortorder, verify_integrity=False,\n _set_identity=_set_identity)\n\n def __array__(self, dtype=None):\n \"\"\" the array interface, return my values \"\"\"\n return self.values\n\n def view(self, cls=None):\n \"\"\" this is defined as a copy with the same identity \"\"\"\n result = self.copy()\n result._id = self._id\n return result\n\n def _shallow_copy_with_infer(self, values=None, **kwargs):\n return self._shallow_copy(values, **kwargs)\n\n @Appender(_index_shared_docs['_shallow_copy'])\n def _shallow_copy(self, values=None, **kwargs):\n if values is not None:\n if 'name' in kwargs:\n kwargs['names'] = kwargs.pop('name', None)\n # discards freq\n kwargs.pop('freq', None)\n return MultiIndex.from_tuples(values, **kwargs)\n return self.view()\n\n @cache_readonly\n def dtype(self):\n return np.dtype('O')\n\n def _is_memory_usage_qualified(self):\n \"\"\" return a boolean if we need a qualified .info display \"\"\"\n def f(l):\n return 'mixed' in l or 'string' in l or 'unicode' in l\n return any([f(l) for l in self._inferred_type_levels])\n\n @Appender(Index.memory_usage.__doc__)\n def memory_usage(self, deep=False):\n # we are overwriting our base class to avoid\n # computing .values here which could materialize\n # a tuple representation uncessarily\n return self._nbytes(deep)\n\n @cache_readonly\n def nbytes(self):\n \"\"\" return the number of bytes in the underlying data \"\"\"\n return self._nbytes(False)\n\n def _nbytes(self, deep=False):\n \"\"\"\n return the number of bytes in the underlying data\n deeply introspect the level data if deep=True\n\n include the engine hashtable\n\n *this is in internal routine*\n\n \"\"\"\n level_nbytes = sum((i.memory_usage(deep=deep) for i in self.levels))\n label_nbytes = sum((i.nbytes for i in self.labels))\n names_nbytes = sum((getsizeof(i) for i in self.names))\n result = level_nbytes + label_nbytes + names_nbytes\n\n # include our engine hashtable\n result += self._engine.sizeof(deep=deep)\n return result\n\n def _format_attrs(self):\n \"\"\"\n Return a list of tuples of the (attr,formatted_value)\n \"\"\"\n attrs = [\n ('levels', ibase.default_pprint(self._levels,\n max_seq_items=False)),\n ('labels', ibase.default_pprint(self._labels,\n max_seq_items=False))]\n if not all(name is None for name in self.names):\n attrs.append(('names', ibase.default_pprint(self.names)))\n if self.sortorder is not None:\n attrs.append(('sortorder', ibase.default_pprint(self.sortorder)))\n return attrs\n\n def _format_space(self):\n return \"\\n%s\" % (' ' * (len(self.__class__.__name__) + 1))\n\n def _format_data(self):\n # we are formatting thru the attributes\n return None\n\n def __len__(self):\n return len(self.labels[0])\n\n def _get_names(self):\n return FrozenList(level.name for level in self.levels)\n\n def _set_names(self, names, level=None, validate=True):\n \"\"\"\n sets names on levels. WARNING: mutates!\n\n Note that you generally want to set this *after* changing levels, so\n that it only acts on copies\n \"\"\"\n\n # GH 15110\n # Don't allow a single string for names in a MultiIndex\n if names is not None and not is_list_like(names):\n raise ValueError('Names should be list-like for a MultiIndex')\n names = list(names)\n\n if validate and level is not None and len(names) != len(level):\n raise ValueError('Length of names must match length of level.')\n if validate and level is None and len(names) != self.nlevels:\n raise ValueError('Length of names must match number of levels in '\n 'MultiIndex.')\n\n if level is None:\n level = range(self.nlevels)\n else:\n level = [self._get_level_number(l) for l in level]\n\n # set the name\n for l, name in zip(level, names):\n self.levels[l].rename(name, inplace=True)\n\n names = property(fset=_set_names, fget=_get_names,\n doc=\"Names of levels in MultiIndex\")\n\n def _reference_duplicate_name(self, name):\n \"\"\"\n Returns True if the name refered to in self.names is duplicated.\n \"\"\"\n # count the times name equals an element in self.names.\n return sum(name == n for n in self.names) > 1\n\n def _format_native_types(self, na_rep='nan', **kwargs):\n new_levels = []\n new_labels = []\n\n # go through the levels and format them\n for level, label in zip(self.levels, self.labels):\n level = level._format_native_types(na_rep=na_rep, **kwargs)\n # add nan values, if there are any\n mask = (label == -1)\n if mask.any():\n nan_index = len(level)\n level = np.append(level, na_rep)\n label = label.values()\n label[mask] = nan_index\n new_levels.append(level)\n new_labels.append(label)\n\n # reconstruct the multi-index\n mi = MultiIndex(levels=new_levels, labels=new_labels, names=self.names,\n sortorder=self.sortorder, verify_integrity=False)\n\n return mi.values\n\n @Appender(_index_shared_docs['_get_grouper_for_level'])\n def _get_grouper_for_level(self, mapper, level):\n indexer = self.labels[level]\n level_index = self.levels[level]\n\n if mapper is not None:\n # Handle group mapping function and return\n level_values = self.levels[level].take(indexer)\n grouper = level_values.map(mapper)\n return grouper, None, None\n\n labels, uniques = algos.factorize(indexer, sort=True)\n\n if len(uniques) > 0 and uniques[0] == -1:\n # Handle NAs\n mask = indexer != -1\n ok_labels, uniques = algos.factorize(indexer[mask],\n sort=True)\n\n labels = np.empty(len(indexer), dtype=indexer.dtype)\n labels[mask] = ok_labels\n labels[~mask] = -1\n\n if len(uniques) < len(level_index):\n # Remove unobserved levels from level_index\n level_index = level_index.take(uniques)\n\n grouper = level_index.take(labels)\n\n return grouper, labels, level_index\n\n @property\n def _constructor(self):\n return MultiIndex.from_tuples\n\n @cache_readonly\n def inferred_type(self):\n return 'mixed'\n\n @staticmethod\n def _from_elements(values, labels=None, levels=None, names=None,\n sortorder=None):\n return MultiIndex(levels, labels, names, sortorder=sortorder)\n\n def _get_level_number(self, level):\n try:\n count = self.names.count(level)\n if count > 1:\n raise ValueError('The name %s occurs multiple times, use a '\n 'level number' % level)\n level = self.names.index(level)\n except ValueError:\n if not isinstance(level, int):\n raise KeyError('Level %s not found' % str(level))\n elif level < 0:\n level += self.nlevels\n if level < 0:\n orig_level = level - self.nlevels\n raise IndexError('Too many levels: Index has only %d '\n 'levels, %d is not a valid level number' %\n (self.nlevels, orig_level))\n # Note: levels are zero-based\n elif level >= self.nlevels:\n raise IndexError('Too many levels: Index has only %d levels, '\n 'not %d' % (self.nlevels, level + 1))\n return level\n\n _tuples = None\n\n @cache_readonly\n def _engine(self):\n return self._engine_type(lambda: self, len(self))\n\n @property\n def values(self):\n if self._tuples is not None:\n return self._tuples\n\n values = []\n for lev, lab in zip(self.levels, self.labels):\n # Need to box timestamps, etc.\n box = hasattr(lev, '_box_values')\n # Try to minimize boxing.\n if box and len(lev) > len(lab):\n taken = lev._box_values(algos.take_1d(lev._values, lab))\n elif box:\n taken = algos.take_1d(lev._box_values(lev._values), lab,\n fill_value=_get_na_value(lev.dtype.type))\n else:\n taken = algos.take_1d(np.asarray(lev._values), lab)\n values.append(taken)\n\n self._tuples = lib.fast_zip(values)\n return self._tuples\n\n # fml\n @property\n def _is_v1(self):\n return False\n\n @property\n def _is_v2(self):\n return False\n\n @property\n def _has_complex_internals(self):\n # to disable groupby tricks\n return True\n\n @cache_readonly\n def is_monotonic(self):\n \"\"\"\n return if the index is monotonic increasing (only equal or\n increasing) values.\n \"\"\"\n return self.is_monotonic_increasing\n\n @cache_readonly\n def is_monotonic_increasing(self):\n \"\"\"\n return if the index is monotonic increasing (only equal or\n increasing) values.\n \"\"\"\n\n # reversed() because lexsort() wants the most significant key last.\n values = [self._get_level_values(i).values\n for i in reversed(range(len(self.levels)))]\n try:\n sort_order = np.lexsort(values)\n return Index(sort_order).is_monotonic\n except TypeError:\n\n # we have mixed types and np.lexsort is not happy\n return Index(self.values).is_monotonic\n\n @property\n def is_monotonic_decreasing(self):\n \"\"\"\n return if the index is monotonic decreasing (only equal or\n decreasing) values.\n \"\"\"\n return False\n\n @cache_readonly\n def is_unique(self):\n return not self.duplicated().any()\n\n @cache_readonly\n def _have_mixed_levels(self):\n \"\"\" return a boolean list indicated if we have mixed levels \"\"\"\n return ['mixed' in l for l in self._inferred_type_levels]\n\n @cache_readonly\n def _inferred_type_levels(self):\n \"\"\" return a list of the inferred types, one for each level \"\"\"\n return [i.inferred_type for i in self.levels]\n\n @cache_readonly\n def _hashed_values(self):\n \"\"\" return a uint64 ndarray of my hashed values \"\"\"\n from pandas.tools.hashing import hash_tuples\n return hash_tuples(self)\n\n def _hashed_indexing_key(self, key):\n \"\"\"\n validate and return the hash for the provided key\n\n *this is internal for use for the cython routines*\n\n Paramters\n ---------\n key : string or tuple\n\n Returns\n -------\n np.uint64\n\n Notes\n -----\n we need to stringify if we have mixed levels\n\n \"\"\"\n from pandas.tools.hashing import hash_tuples\n\n if not isinstance(key, tuple):\n return hash_tuples(key)\n\n if not len(key) == self.nlevels:\n raise KeyError\n\n def f(k, stringify):\n if stringify and not isinstance(k, compat.string_types):\n k = str(k)\n return k\n key = tuple([f(k, stringify)\n for k, stringify in zip(key, self._have_mixed_levels)])\n return hash_tuples(key)\n\n @deprecate_kwarg('take_last', 'keep', mapping={True: 'last',\n False: 'first'})\n @Appender(base._shared_docs['duplicated'] % _index_doc_kwargs)\n def duplicated(self, keep='first'):\n from pandas.core.sorting import get_group_index\n from pandas._libs.hashtable import duplicated_int64\n\n shape = map(len, self.levels)\n ids = get_group_index(self.labels, shape, sort=False, xnull=False)\n\n return duplicated_int64(ids, keep)\n\n @Appender(ibase._index_shared_docs['fillna'])\n def fillna(self, value=None, downcast=None):\n # isnull is not implemented for MultiIndex\n raise NotImplementedError('isnull is not defined for MultiIndex')\n\n @Appender(_index_shared_docs['dropna'])\n def dropna(self, how='any'):\n nans = [label == -1 for label in self.labels]\n if how == 'any':\n indexer = np.any(nans, axis=0)\n elif how == 'all':\n indexer = np.all(nans, axis=0)\n else:\n raise ValueError(\"invalid how option: {0}\".format(how))\n\n new_labels = [label[~indexer] for label in self.labels]\n return self.copy(labels=new_labels, deep=True)\n\n def get_value(self, series, key):\n # somewhat broken encapsulation\n from pandas.core.indexing import maybe_droplevels\n\n # Label-based\n s = _values_from_object(series)\n k = _values_from_object(key)\n\n def _try_mi(k):\n # TODO: what if a level contains tuples??\n loc = self.get_loc(k)\n new_values = series._values[loc]\n new_index = self[loc]\n new_index = maybe_droplevels(new_index, k)\n return series._constructor(new_values, index=new_index,\n name=series.name).__finalize__(self)\n\n try:\n return self._engine.get_value(s, k)\n except KeyError as e1:\n try:\n return _try_mi(key)\n except KeyError:\n pass\n\n try:\n return libindex.get_value_at(s, k)\n except IndexError:\n raise\n except TypeError:\n # generator/iterator-like\n if is_iterator(key):\n raise InvalidIndexError(key)\n else:\n raise e1\n except Exception: # pragma: no cover\n raise e1\n except TypeError:\n\n # a Timestamp will raise a TypeError in a multi-index\n # rather than a KeyError, try it here\n # note that a string that 'looks' like a Timestamp will raise\n # a KeyError! (GH5725)\n if (isinstance(key, (datetime.datetime, np.datetime64)) or\n (compat.PY3 and isinstance(key, compat.string_types))):\n try:\n return _try_mi(key)\n except (KeyError):\n raise\n except:\n pass\n\n try:\n return _try_mi(Timestamp(key))\n except:\n pass\n\n raise InvalidIndexError(key)\n\n def _get_level_values(self, level):\n \"\"\"\n Return vector of label values for requested level,\n equal to the length of the index\n\n **this is an internal method**\n\n Parameters\n ----------\n level : int level\n\n Returns\n -------\n values : ndarray\n \"\"\"\n\n unique = self.levels[level]\n labels = self.labels[level]\n filled = algos.take_1d(unique._values, labels,\n fill_value=unique._na_value)\n values = unique._shallow_copy(filled)\n return values\n\n def get_level_values(self, level):\n \"\"\"\n Return vector of label values for requested level,\n equal to the length of the index\n\n Parameters\n ----------\n level : int or level name\n\n Returns\n -------\n values : Index\n \"\"\"\n level = self._get_level_number(level)\n values = self._get_level_values(level)\n return values\n\n def format(self, space=2, sparsify=None, adjoin=True, names=False,\n na_rep=None, formatter=None):\n if len(self) == 0:\n return []\n\n stringified_levels = []\n for lev, lab in zip(self.levels, self.labels):\n na = na_rep if na_rep is not None else _get_na_rep(lev.dtype.type)\n\n if len(lev) > 0:\n\n formatted = lev.take(lab).format(formatter=formatter)\n\n # we have some NA\n mask = lab == -1\n if mask.any():\n formatted = np.array(formatted, dtype=object)\n formatted[mask] = na\n formatted = formatted.tolist()\n\n else:\n # weird all NA case\n formatted = [pprint_thing(na if isnull(x) else x,\n escape_chars=('\\t', '\\r', '\\n'))\n for x in algos.take_1d(lev._values, lab)]\n stringified_levels.append(formatted)\n\n result_levels = []\n for lev, name in zip(stringified_levels, self.names):\n level = []\n\n if names:\n level.append(pprint_thing(name,\n escape_chars=('\\t', '\\r', '\\n'))\n if name is not None else '')\n\n level.extend(np.array(lev, dtype=object))\n result_levels.append(level)\n\n if sparsify is None:\n sparsify = get_option(\"display.multi_sparse\")\n\n if sparsify:\n sentinel = ''\n # GH3547\n # use value of sparsify as sentinel, unless it's an obvious\n # \"Truthey\" value\n if sparsify not in [True, 1]:\n sentinel = sparsify\n # little bit of a kludge job for #1217\n result_levels = _sparsify(result_levels, start=int(names),\n sentinel=sentinel)\n\n if adjoin:\n from pandas.formats.format import _get_adjustment\n adj = _get_adjustment()\n return adj.adjoin(space, *result_levels).split('\\n')\n else:\n return result_levels\n\n def _to_safe_for_reshape(self):\n \"\"\" convert to object if we are a categorical \"\"\"\n return self.set_levels([i._to_safe_for_reshape() for i in self.levels])\n\n def to_frame(self, index=True):\n \"\"\"\n Create a DataFrame with the columns the levels of the MultiIndex\n\n .. versionadded:: 0.20.0\n\n Parameters\n ----------\n index : boolean, default True\n return this MultiIndex as the index\n\n Returns\n -------\n DataFrame\n \"\"\"\n\n from pandas import DataFrame\n result = DataFrame({(name or level):\n self._get_level_values(level)\n for name, level in\n zip(self.names, range(len(self.levels)))},\n copy=False)\n if index:\n result.index = self\n return result\n\n def to_hierarchical(self, n_repeat, n_shuffle=1):\n \"\"\"\n Return a MultiIndex reshaped to conform to the\n shapes given by n_repeat and n_shuffle.\n\n Useful to replicate and rearrange a MultiIndex for combination\n with another Index with n_repeat items.\n\n Parameters\n ----------\n n_repeat : int\n Number of times to repeat the labels on self\n n_shuffle : int\n Controls the reordering of the labels. If the result is going\n to be an inner level in a MultiIndex, n_shuffle will need to be\n greater than one. The size of each label must divisible by\n n_shuffle.\n\n Returns\n -------\n MultiIndex\n\n Examples\n --------\n >>> idx = MultiIndex.from_tuples([(1, u'one'), (1, u'two'),\n (2, u'one'), (2, u'two')])\n >>> idx.to_hierarchical(3)\n MultiIndex(levels=[[1, 2], [u'one', u'two']],\n labels=[[0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1],\n [0, 0, 0, 1, 1, 1, 0, 0, 0, 1, 1, 1]])\n \"\"\"\n levels = self.levels\n labels = [np.repeat(x, n_repeat) for x in self.labels]\n # Assumes that each label is divisible by n_shuffle\n labels = [x.reshape(n_shuffle, -1).ravel(order='F') for x in labels]\n names = self.names\n return MultiIndex(levels=levels, labels=labels, names=names)\n\n @property\n def is_all_dates(self):\n return False\n\n def is_lexsorted(self):\n \"\"\"\n Return True if the labels are lexicographically sorted\n \"\"\"\n return self.lexsort_depth == self.nlevels\n\n def is_lexsorted_for_tuple(self, tup):\n \"\"\"\n Return True if we are correctly lexsorted given the passed tuple\n \"\"\"\n return len(tup) <= self.lexsort_depth\n\n @cache_readonly\n def lexsort_depth(self):\n if self.sortorder is not None:\n if self.sortorder == 0:\n return self.nlevels\n else:\n return 0\n\n int64_labels = [_ensure_int64(lab) for lab in self.labels]\n for k in range(self.nlevels, 0, -1):\n if lib.is_lexsorted(int64_labels[:k]):\n return k\n\n return 0\n\n @classmethod\n def from_arrays(cls, arrays, sortorder=None, names=None):\n \"\"\"\n Convert arrays to MultiIndex\n\n Parameters\n ----------\n arrays : list / sequence of array-likes\n Each array-like gives one level's value for each data point.\n len(arrays) is the number of levels.\n sortorder : int or None\n Level of sortedness (must be lexicographically sorted by that\n level)\n\n Returns\n -------\n index : MultiIndex\n\n Examples\n --------\n >>> arrays = [[1, 1, 2, 2], ['red', 'blue', 'red', 'blue']]\n >>> MultiIndex.from_arrays(arrays, names=('number', 'color'))\n\n See Also\n --------\n MultiIndex.from_tuples : Convert list of tuples to MultiIndex\n MultiIndex.from_product : Make a MultiIndex from cartesian product\n of iterables\n \"\"\"\n if len(arrays) == 1:\n name = None if names is None else names[0]\n return Index(arrays[0], name=name)\n\n # Check if lengths of all arrays are equal or not,\n # raise ValueError, if not\n for i in range(1, len(arrays)):\n if len(arrays[i]) != len(arrays[i - 1]):\n raise ValueError('all arrays must be same length')\n\n from pandas.core.categorical import _factorize_from_iterables\n\n labels, levels = _factorize_from_iterables(arrays)\n if names is None:\n names = [getattr(arr, \"name\", None) for arr in arrays]\n\n return MultiIndex(levels=levels, labels=labels, sortorder=sortorder,\n names=names, verify_integrity=False)\n\n @classmethod\n def from_tuples(cls, tuples, sortorder=None, names=None):\n \"\"\"\n Convert list of tuples to MultiIndex\n\n Parameters\n ----------\n tuples : list / sequence of tuple-likes\n Each tuple is the index of one row/column.\n sortorder : int or None\n Level of sortedness (must be lexicographically sorted by that\n level)\n\n Returns\n -------\n index : MultiIndex\n\n Examples\n --------\n >>> tuples = [(1, u'red'), (1, u'blue'),\n (2, u'red'), (2, u'blue')]\n >>> MultiIndex.from_tuples(tuples, names=('number', 'color'))\n\n See Also\n --------\n MultiIndex.from_arrays : Convert list of arrays to MultiIndex\n MultiIndex.from_product : Make a MultiIndex from cartesian product\n of iterables\n \"\"\"\n if len(tuples) == 0:\n # I think this is right? Not quite sure...\n raise TypeError('Cannot infer number of levels from empty list')\n\n if isinstance(tuples, (np.ndarray, Index)):\n if isinstance(tuples, Index):\n tuples = tuples._values\n\n arrays = list(lib.tuples_to_object_array(tuples).T)\n elif isinstance(tuples, list):\n arrays = list(lib.to_object_array_tuples(tuples).T)\n else:\n arrays = lzip(*tuples)\n\n return MultiIndex.from_arrays(arrays, sortorder=sortorder, names=names)\n\n @classmethod\n def from_product(cls, iterables, sortorder=None, names=None):\n \"\"\"\n Make a MultiIndex from the cartesian product of multiple iterables\n\n Parameters\n ----------\n iterables : list / sequence of iterables\n Each iterable has unique labels for each level of the index.\n sortorder : int or None\n Level of sortedness (must be lexicographically sorted by that\n level).\n names : list / sequence of strings or None\n Names for the levels in the index.\n\n Returns\n -------\n index : MultiIndex\n\n Examples\n --------\n >>> numbers = [0, 1, 2]\n >>> colors = [u'green', u'purple']\n >>> MultiIndex.from_product([numbers, colors],\n names=['number', 'color'])\n MultiIndex(levels=[[0, 1, 2], [u'green', u'purple']],\n labels=[[0, 0, 1, 1, 2, 2], [0, 1, 0, 1, 0, 1]],\n names=[u'number', u'color'])\n\n See Also\n --------\n MultiIndex.from_arrays : Convert list of arrays to MultiIndex\n MultiIndex.from_tuples : Convert list of tuples to MultiIndex\n \"\"\"\n from pandas.core.categorical import _factorize_from_iterables\n from pandas.tools.util import cartesian_product\n\n labels, levels = _factorize_from_iterables(iterables)\n labels = cartesian_product(labels)\n\n return MultiIndex(levels=levels, labels=labels, sortorder=sortorder,\n names=names)\n\n @property\n def nlevels(self):\n return len(self.levels)\n\n @property\n def levshape(self):\n return tuple(len(x) for x in self.levels)\n\n def __contains__(self, key):\n hash(key)\n # work around some kind of odd cython bug\n try:\n self.get_loc(key)\n return True\n except LookupError:\n return False\n\n def __reduce__(self):\n \"\"\"Necessary for making this object picklable\"\"\"\n d = dict(levels=[lev for lev in self.levels],\n labels=[label for label in self.labels],\n sortorder=self.sortorder, names=list(self.names))\n return ibase._new_Index, (self.__class__, d), None\n\n def __setstate__(self, state):\n \"\"\"Necessary for making this object picklable\"\"\"\n\n if isinstance(state, dict):\n levels = state.get('levels')\n labels = state.get('labels')\n sortorder = state.get('sortorder')\n names = state.get('names')\n\n elif isinstance(state, tuple):\n\n nd_state, own_state = state\n levels, labels, sortorder, names = own_state\n\n self._set_levels([Index(x) for x in levels], validate=False)\n self._set_labels(labels)\n self._set_names(names)\n self.sortorder = sortorder\n self._verify_integrity()\n self._reset_identity()\n\n def __getitem__(self, key):\n if is_scalar(key):\n retval = []\n for lev, lab in zip(self.levels, self.labels):\n if lab[key] == -1:\n retval.append(np.nan)\n else:\n retval.append(lev[lab[key]])\n\n return tuple(retval)\n else:\n if is_bool_indexer(key):\n key = np.asarray(key)\n sortorder = self.sortorder\n else:\n # cannot be sure whether the result will be sorted\n sortorder = None\n\n new_labels = [lab[key] for lab in self.labels]\n\n return MultiIndex(levels=self.levels, labels=new_labels,\n names=self.names, sortorder=sortorder,\n verify_integrity=False)\n\n @Appender(_index_shared_docs['take'] % _index_doc_kwargs)\n def take(self, indices, axis=0, allow_fill=True,\n fill_value=None, **kwargs):\n nv.validate_take(tuple(), kwargs)\n indices = _ensure_platform_int(indices)\n taken = self._assert_take_fillable(self.labels, indices,\n allow_fill=allow_fill,\n fill_value=fill_value,\n na_value=-1)\n return MultiIndex(levels=self.levels, labels=taken,\n names=self.names, verify_integrity=False)\n\n def _assert_take_fillable(self, values, indices, allow_fill=True,\n fill_value=None, na_value=None):\n \"\"\" Internal method to handle NA filling of take \"\"\"\n # only fill if we are passing a non-None fill_value\n if allow_fill and fill_value is not None:\n if (indices < -1).any():\n msg = ('When allow_fill=True and fill_value is not None, '\n 'all indices must be >= -1')\n raise ValueError(msg)\n taken = [lab.take(indices) for lab in self.labels]\n mask = indices == -1\n if mask.any():\n masked = []\n for new_label in taken:\n label_values = new_label.values()\n label_values[mask] = na_value\n masked.append(FrozenNDArray(label_values))\n taken = masked\n else:\n taken = [lab.take(indices) for lab in self.labels]\n return taken\n\n def append(self, other):\n \"\"\"\n Append a collection of Index options together\n\n Parameters\n ----------\n other : Index or list/tuple of indices\n\n Returns\n -------\n appended : Index\n \"\"\"\n if not isinstance(other, (list, tuple)):\n other = [other]\n\n if all((isinstance(o, MultiIndex) and o.nlevels >= self.nlevels)\n for o in other):\n arrays = []\n for i in range(self.nlevels):\n label = self._get_level_values(i)\n appended = [o._get_level_values(i) for o in other]\n arrays.append(label.append(appended))\n return MultiIndex.from_arrays(arrays, names=self.names)\n\n to_concat = (self.values, ) + tuple(k._values for k in other)\n new_tuples = np.concatenate(to_concat)\n\n # if all(isinstance(x, MultiIndex) for x in other):\n try:\n return MultiIndex.from_tuples(new_tuples, names=self.names)\n except:\n return Index(new_tuples)\n\n def argsort(self, *args, **kwargs):\n return self.values.argsort(*args, **kwargs)\n\n @deprecate_kwarg(old_arg_name='n', new_arg_name='repeats')\n def repeat(self, repeats, *args, **kwargs):\n nv.validate_repeat(args, kwargs)\n return MultiIndex(levels=self.levels,\n labels=[label.view(np.ndarray).repeat(repeats)\n for label in self.labels], names=self.names,\n sortorder=self.sortorder, verify_integrity=False)\n\n def where(self, cond, other=None):\n raise NotImplementedError(\".where is not supported for \"\n \"MultiIndex operations\")\n\n def drop(self, labels, level=None, errors='raise'):\n \"\"\"\n Make new MultiIndex with passed list of labels deleted\n\n Parameters\n ----------\n labels : array-like\n Must be a list of tuples\n level : int or level name, default None\n\n Returns\n -------\n dropped : MultiIndex\n \"\"\"\n if level is not None:\n return self._drop_from_level(labels, level)\n\n try:\n if not isinstance(labels, (np.ndarray, Index)):\n labels = com._index_labels_to_array(labels)\n indexer = self.get_indexer(labels)\n mask = indexer == -1\n if mask.any():\n if errors != 'ignore':\n raise ValueError('labels %s not contained in axis' %\n labels[mask])\n indexer = indexer[~mask]\n except Exception:\n pass\n\n inds = []\n for label in labels:\n try:\n loc = self.get_loc(label)\n # get_loc returns either an integer, a slice, or a boolean\n # mask\n if isinstance(loc, int):\n inds.append(loc)\n elif isinstance(loc, slice):\n inds.extend(lrange(loc.start, loc.stop))\n elif is_bool_indexer(loc):\n if self.lexsort_depth == 0:\n warnings.warn('dropping on a non-lexsorted multi-index'\n ' without a level parameter may impact '\n 'performance.',\n PerformanceWarning,\n stacklevel=3)\n loc = loc.nonzero()[0]\n inds.extend(loc)\n else:\n msg = 'unsupported indexer of type {}'.format(type(loc))\n raise AssertionError(msg)\n except KeyError:\n if errors != 'ignore':\n raise\n\n return self.delete(inds)\n\n def _drop_from_level(self, labels, level):\n labels = com._index_labels_to_array(labels)\n i = self._get_level_number(level)\n index = self.levels[i]\n values = index.get_indexer(labels)\n\n mask = ~lib.ismember(self.labels[i], set(values))\n\n return self[mask]\n\n def droplevel(self, level=0):\n \"\"\"\n Return Index with requested level removed. If MultiIndex has only 2\n levels, the result will be of Index type not MultiIndex.\n\n Parameters\n ----------\n level : int/level name or list thereof\n\n Notes\n -----\n Does not check if result index is unique or not\n\n Returns\n -------\n index : Index or MultiIndex\n \"\"\"\n levels = level\n if not isinstance(levels, (tuple, list)):\n levels = [level]\n\n new_levels = list(self.levels)\n new_labels = list(self.labels)\n new_names = list(self.names)\n\n levnums = sorted(self._get_level_number(lev) for lev in levels)[::-1]\n\n for i in levnums:\n new_levels.pop(i)\n new_labels.pop(i)\n new_names.pop(i)\n\n if len(new_levels) == 1:\n\n # set nan if needed\n mask = new_labels[0] == -1\n result = new_levels[0].take(new_labels[0])\n if mask.any():\n result = result.putmask(mask, np.nan)\n\n result.name = new_names[0]\n return result\n else:\n return MultiIndex(levels=new_levels, labels=new_labels,\n names=new_names, verify_integrity=False)\n\n def swaplevel(self, i=-2, j=-1):\n \"\"\"\n Swap level i with level j. Do not change the ordering of anything\n\n Parameters\n ----------\n i, j : int, string (can be mixed)\n Level of index to be swapped. Can pass level name as string.\n\n Returns\n -------\n swapped : MultiIndex\n\n .. versionchanged:: 0.18.1\n\n The indexes ``i`` and ``j`` are now optional, and default to\n the two innermost levels of the index.\n\n \"\"\"\n new_levels = list(self.levels)\n new_labels = list(self.labels)\n new_names = list(self.names)\n\n i = self._get_level_number(i)\n j = self._get_level_number(j)\n\n new_levels[i], new_levels[j] = new_levels[j], new_levels[i]\n new_labels[i], new_labels[j] = new_labels[j], new_labels[i]\n new_names[i], new_names[j] = new_names[j], new_names[i]\n\n return MultiIndex(levels=new_levels, labels=new_labels,\n names=new_names, verify_integrity=False)\n\n def reorder_levels(self, order):\n \"\"\"\n Rearrange levels using input order. May not drop or duplicate levels\n\n Parameters\n ----------\n \"\"\"\n order = [self._get_level_number(i) for i in order]\n if len(order) != self.nlevels:\n raise AssertionError('Length of order must be same as '\n 'number of levels (%d), got %d' %\n (self.nlevels, len(order)))\n new_levels = [self.levels[i] for i in order]\n new_labels = [self.labels[i] for i in order]\n new_names = [self.names[i] for i in order]\n\n return MultiIndex(levels=new_levels, labels=new_labels,\n names=new_names, verify_integrity=False)\n\n def __getslice__(self, i, j):\n return self.__getitem__(slice(i, j))\n\n def sortlevel(self, level=0, ascending=True, sort_remaining=True):\n \"\"\"\n Sort MultiIndex at the requested level. The result will respect the\n original ordering of the associated factor at that level.\n\n Parameters\n ----------\n level : list-like, int or str, default 0\n If a string is given, must be a name of the level\n If list-like must be names or ints of levels.\n ascending : boolean, default True\n False to sort in descending order\n Can also be a list to specify a directed ordering\n sort_remaining : sort by the remaining levels after level.\n\n Returns\n -------\n sorted_index : pd.MultiIndex\n Resulting index\n indexer : np.ndarray\n Indices of output values in original index\n\n \"\"\"\n from pandas.core.sorting import indexer_from_factorized\n\n if isinstance(level, (compat.string_types, int)):\n level = [level]\n level = [self._get_level_number(lev) for lev in level]\n sortorder = None\n\n # we have a directed ordering via ascending\n if isinstance(ascending, list):\n if not len(level) == len(ascending):\n raise ValueError(\"level must have same length as ascending\")\n\n from pandas.core.sorting import lexsort_indexer\n indexer = lexsort_indexer(self.labels, orders=ascending)\n\n # level ordering\n else:\n\n labels = list(self.labels)\n shape = list(self.levshape)\n\n # partition labels and shape\n primary = tuple(labels.pop(lev - i) for i, lev in enumerate(level))\n primshp = tuple(shape.pop(lev - i) for i, lev in enumerate(level))\n\n if sort_remaining:\n primary += primary + tuple(labels)\n primshp += primshp + tuple(shape)\n else:\n sortorder = level[0]\n\n indexer = indexer_from_factorized(primary, primshp,\n compress=False)\n\n if not ascending:\n indexer = indexer[::-1]\n\n indexer = _ensure_platform_int(indexer)\n new_labels = [lab.take(indexer) for lab in self.labels]\n\n new_index = MultiIndex(labels=new_labels, levels=self.levels,\n names=self.names, sortorder=sortorder,\n verify_integrity=False)\n\n return new_index, indexer\n\n def _convert_listlike_indexer(self, keyarr, kind=None):\n \"\"\"\n Parameters\n ----------\n keyarr : list-like\n Indexer to convert.\n\n Returns\n -------\n tuple (indexer, keyarr)\n indexer is an ndarray or None if cannot convert\n keyarr are tuple-safe keys\n \"\"\"\n indexer, keyarr = super(MultiIndex, self)._convert_listlike_indexer(\n keyarr, kind=kind)\n\n # are we indexing a specific level\n if indexer is None and len(keyarr) and not isinstance(keyarr[0],\n tuple):\n level = 0\n _, indexer = self.reindex(keyarr, level=level)\n\n # take all\n if indexer is None:\n indexer = np.arange(len(self))\n\n check = self.levels[0].get_indexer(keyarr)\n mask = check == -1\n if mask.any():\n raise KeyError('%s not in index' % keyarr[mask])\n\n return indexer, keyarr\n\n @Appender(_index_shared_docs['get_indexer'] % _index_doc_kwargs)\n def get_indexer(self, target, method=None, limit=None, tolerance=None):\n method = missing.clean_reindex_fill_method(method)\n target = _ensure_index(target)\n\n # empty indexer\n if is_list_like(target) and not len(target):\n return _ensure_platform_int(np.array([]))\n\n if not isinstance(target, MultiIndex):\n try:\n target = MultiIndex.from_tuples(target)\n except (TypeError, ValueError):\n\n # let's instead try with a straight Index\n if method is None:\n return Index(self.values).get_indexer(target,\n method=method,\n limit=limit,\n tolerance=tolerance)\n\n if not self.is_unique:\n raise Exception('Reindexing only valid with uniquely valued Index '\n 'objects')\n\n if method == 'pad' or method == 'backfill':\n if tolerance is not None:\n raise NotImplementedError(\"tolerance not implemented yet \"\n 'for MultiIndex')\n indexer = self._get_fill_indexer(target, method, limit)\n elif method == 'nearest':\n raise NotImplementedError(\"method='nearest' not implemented yet \"\n 'for MultiIndex; see GitHub issue 9365')\n else:\n # we may not compare equally because of hashing if we\n # don't have the same dtypes\n if self._inferred_type_levels != target._inferred_type_levels:\n return Index(self.values).get_indexer(target.values)\n\n indexer = self._engine.get_indexer(target)\n\n return _ensure_platform_int(indexer)\n\n @Appender(_index_shared_docs['get_indexer_non_unique'] % _index_doc_kwargs)\n def get_indexer_non_unique(self, target):\n return super(MultiIndex, self).get_indexer_non_unique(target)\n\n def reindex(self, target, method=None, level=None, limit=None,\n tolerance=None):\n \"\"\"\n Create index with target's values (move/add/delete values as necessary)\n\n Returns\n -------\n new_index : pd.MultiIndex\n Resulting index\n indexer : np.ndarray or None\n Indices of output values in original index\n\n \"\"\"\n # GH6552: preserve names when reindexing to non-named target\n # (i.e. neither Index nor Series).\n preserve_names = not hasattr(target, 'names')\n\n if level is not None:\n if method is not None:\n raise TypeError('Fill method not supported if level passed')\n\n # GH7774: preserve dtype/tz if target is empty and not an Index.\n # target may be an iterator\n target = ibase._ensure_has_len(target)\n if len(target) == 0 and not isinstance(target, Index):\n idx = self.levels[level]\n attrs = idx._get_attributes_dict()\n attrs.pop('freq', None) # don't preserve freq\n target = type(idx)._simple_new(np.empty(0, dtype=idx.dtype),\n **attrs)\n else:\n target = _ensure_index(target)\n target, indexer, _ = self._join_level(target, level, how='right',\n return_indexers=True,\n keep_order=False)\n else:\n target = _ensure_index(target)\n if self.equals(target):\n indexer = None\n else:\n if self.is_unique:\n indexer = self.get_indexer(target, method=method,\n limit=limit,\n tolerance=tolerance)\n else:\n raise Exception(\"cannot handle a non-unique multi-index!\")\n\n if not isinstance(target, MultiIndex):\n if indexer is None:\n target = self\n elif (indexer >= 0).all():\n target = self.take(indexer)\n else:\n # hopefully?\n target = MultiIndex.from_tuples(target)\n\n if (preserve_names and target.nlevels == self.nlevels and\n target.names != self.names):\n target = target.copy(deep=False)\n target.names = self.names\n\n return target, indexer\n\n def get_slice_bound(self, label, side, kind):\n\n if not isinstance(label, tuple):\n label = label,\n return self._partial_tup_index(label, side=side)\n\n def slice_locs(self, start=None, end=None, step=None, kind=None):\n \"\"\"\n For an ordered MultiIndex, compute the slice locations for input\n labels. They can be tuples representing partial levels, e.g. for a\n MultiIndex with 3 levels, you can pass a single value (corresponding to\n the first level), or a 1-, 2-, or 3-tuple.\n\n Parameters\n ----------\n start : label or tuple, default None\n If None, defaults to the beginning\n end : label or tuple\n If None, defaults to the end\n step : int or None\n Slice step\n kind : string, optional, defaults None\n\n Returns\n -------\n (start, end) : (int, int)\n\n Notes\n -----\n This function assumes that the data is sorted by the first level\n \"\"\"\n # This function adds nothing to its parent implementation (the magic\n # happens in get_slice_bound method), but it adds meaningful doc.\n return super(MultiIndex, self).slice_locs(start, end, step, kind=kind)\n\n def _partial_tup_index(self, tup, side='left'):\n if len(tup) > self.lexsort_depth:\n raise KeyError('Key length (%d) was greater than MultiIndex'\n ' lexsort depth (%d)' %\n (len(tup), self.lexsort_depth))\n\n n = len(tup)\n start, end = 0, len(self)\n zipped = zip(tup, self.levels, self.labels)\n for k, (lab, lev, labs) in enumerate(zipped):\n section = labs[start:end]\n\n if lab not in lev:\n if not lev.is_type_compatible(lib.infer_dtype([lab])):\n raise TypeError('Level type mismatch: %s' % lab)\n\n # short circuit\n loc = lev.searchsorted(lab, side=side)\n if side == 'right' and loc >= 0:\n loc -= 1\n return start + section.searchsorted(loc, side=side)\n\n idx = lev.get_loc(lab)\n if k < n - 1:\n end = start + section.searchsorted(idx, side='right')\n start = start + section.searchsorted(idx, side='left')\n else:\n return start + section.searchsorted(idx, side=side)\n\n def get_loc(self, key, method=None):\n \"\"\"\n Get integer location, slice or boolean mask for requested label or\n tuple. If the key is past the lexsort depth, the return may be a\n boolean mask array, otherwise it is always a slice or int.\n\n Parameters\n ----------\n key : label or tuple\n method : None\n\n Returns\n -------\n loc : int, slice object or boolean mask\n \"\"\"\n if method is not None:\n raise NotImplementedError('only the default get_loc method is '\n 'currently supported for MultiIndex')\n\n def _maybe_to_slice(loc):\n \"\"\"convert integer indexer to boolean mask or slice if possible\"\"\"\n if not isinstance(loc, np.ndarray) or loc.dtype != 'int64':\n return loc\n\n loc = lib.maybe_indices_to_slice(loc, len(self))\n if isinstance(loc, slice):\n return loc\n\n mask = np.empty(len(self), dtype='bool')\n mask.fill(False)\n mask[loc] = True\n return mask\n\n if not isinstance(key, tuple):\n loc = self._get_level_indexer(key, level=0)\n return _maybe_to_slice(loc)\n\n keylen = len(key)\n if self.nlevels < keylen:\n raise KeyError('Key length ({0}) exceeds index depth ({1})'\n ''.format(keylen, self.nlevels))\n\n if keylen == self.nlevels and self.is_unique:\n\n def _maybe_str_to_time_stamp(key, lev):\n if lev.is_all_dates and not isinstance(key, Timestamp):\n try:\n return Timestamp(key, tz=getattr(lev, 'tz', None))\n except Exception:\n pass\n return key\n\n key = _values_from_object(key)\n key = tuple(map(_maybe_str_to_time_stamp, key, self.levels))\n return self._engine.get_loc(key)\n\n # -- partial selection or non-unique index\n # break the key into 2 parts based on the lexsort_depth of the index;\n # the first part returns a continuous slice of the index; the 2nd part\n # needs linear search within the slice\n i = self.lexsort_depth\n lead_key, follow_key = key[:i], key[i:]\n start, stop = (self.slice_locs(lead_key, lead_key)\n if lead_key else (0, len(self)))\n\n if start == stop:\n raise KeyError(key)\n\n if not follow_key:\n return slice(start, stop)\n\n warnings.warn('indexing past lexsort depth may impact performance.',\n PerformanceWarning, stacklevel=10)\n\n loc = np.arange(start, stop, dtype='int64')\n\n for i, k in enumerate(follow_key, len(lead_key)):\n mask = self.labels[i][loc] == self.levels[i].get_loc(k)\n if not mask.all():\n loc = loc[mask]\n if not len(loc):\n raise KeyError(key)\n\n return (_maybe_to_slice(loc) if len(loc) != stop - start else\n slice(start, stop))\n\n def get_loc_level(self, key, level=0, drop_level=True):\n \"\"\"\n Get integer location slice for requested label or tuple\n\n Parameters\n ----------\n key : label or tuple\n level : int/level name or list thereof\n\n Returns\n -------\n loc : int or slice object\n \"\"\"\n\n def maybe_droplevels(indexer, levels, drop_level):\n if not drop_level:\n return self[indexer]\n # kludgearound\n orig_index = new_index = self[indexer]\n levels = [self._get_level_number(i) for i in levels]\n for i in sorted(levels, reverse=True):\n try:\n new_index = new_index.droplevel(i)\n except:\n\n # no dropping here\n return orig_index\n return new_index\n\n if isinstance(level, (tuple, list)):\n if len(key) != len(level):\n raise AssertionError('Key for location must have same '\n 'length as number of levels')\n result = None\n for lev, k in zip(level, key):\n loc, new_index = self.get_loc_level(k, level=lev)\n if isinstance(loc, slice):\n mask = np.zeros(len(self), dtype=bool)\n mask[loc] = True\n loc = mask\n\n result = loc if result is None else result & loc\n\n return result, maybe_droplevels(result, level, drop_level)\n\n level = self._get_level_number(level)\n\n # kludge for #1796\n if isinstance(key, list):\n key = tuple(key)\n\n if isinstance(key, tuple) and level == 0:\n\n try:\n if key in self.levels[0]:\n indexer = self._get_level_indexer(key, level=level)\n new_index = maybe_droplevels(indexer, [0], drop_level)\n return indexer, new_index\n except TypeError:\n pass\n\n if not any(isinstance(k, slice) for k in key):\n\n # partial selection\n # optionally get indexer to avoid re-calculation\n def partial_selection(key, indexer=None):\n if indexer is None:\n indexer = self.get_loc(key)\n ilevels = [i for i in range(len(key))\n if key[i] != slice(None, None)]\n return indexer, maybe_droplevels(indexer, ilevels,\n drop_level)\n\n if len(key) == self.nlevels:\n\n if self.is_unique:\n\n # here we have a completely specified key, but are\n # using some partial string matching here\n # GH4758\n all_dates = [(l.is_all_dates and\n not isinstance(k, compat.string_types))\n for k, l in zip(key, self.levels)]\n can_index_exactly = any(all_dates)\n if (any([l.is_all_dates\n for k, l in zip(key, self.levels)]) and\n not can_index_exactly):\n indexer = self.get_loc(key)\n\n # we have a multiple selection here\n if (not isinstance(indexer, slice) or\n indexer.stop - indexer.start != 1):\n return partial_selection(key, indexer)\n\n key = tuple(self[indexer].tolist()[0])\n\n return (self._engine.get_loc(\n _values_from_object(key)), None)\n\n else:\n return partial_selection(key)\n else:\n return partial_selection(key)\n else:\n indexer = None\n for i, k in enumerate(key):\n if not isinstance(k, slice):\n k = self._get_level_indexer(k, level=i)\n if isinstance(k, slice):\n # everything\n if k.start == 0 and k.stop == len(self):\n k = slice(None, None)\n else:\n k_index = k\n\n if isinstance(k, slice):\n if k == slice(None, None):\n continue\n else:\n raise TypeError(key)\n\n if indexer is None:\n indexer = k_index\n else: # pragma: no cover\n indexer &= k_index\n if indexer is None:\n indexer = slice(None, None)\n ilevels = [i for i in range(len(key))\n if key[i] != slice(None, None)]\n return indexer, maybe_droplevels(indexer, ilevels, drop_level)\n else:\n indexer = self._get_level_indexer(key, level=level)\n return indexer, maybe_droplevels(indexer, [level], drop_level)\n\n def _get_level_indexer(self, key, level=0, indexer=None):\n # return an indexer, boolean array or a slice showing where the key is\n # in the totality of values\n # if the indexer is provided, then use this\n\n level_index = self.levels[level]\n labels = self.labels[level]\n\n def convert_indexer(start, stop, step, indexer=indexer, labels=labels):\n # given the inputs and the labels/indexer, compute an indexer set\n # if we have a provided indexer, then this need not consider\n # the entire labels set\n\n r = np.arange(start, stop, step)\n if indexer is not None and len(indexer) != len(labels):\n\n # we have an indexer which maps the locations in the labels\n # that we have already selected (and is not an indexer for the\n # entire set) otherwise this is wasteful so we only need to\n # examine locations that are in this set the only magic here is\n # that the result are the mappings to the set that we have\n # selected\n from pandas import Series\n mapper = Series(indexer)\n indexer = labels.take(_ensure_platform_int(indexer))\n result = Series(Index(indexer).isin(r).nonzero()[0])\n m = result.map(mapper)._values\n\n else:\n m = np.zeros(len(labels), dtype=bool)\n m[np.in1d(labels, r,\n assume_unique=Index(labels).is_unique)] = True\n\n return m\n\n if isinstance(key, slice):\n # handle a slice, returnig a slice if we can\n # otherwise a boolean indexer\n\n try:\n if key.start is not None:\n start = level_index.get_loc(key.start)\n else:\n start = 0\n if key.stop is not None:\n stop = level_index.get_loc(key.stop)\n else:\n stop = len(level_index) - 1\n step = key.step\n except KeyError:\n\n # we have a partial slice (like looking up a partial date\n # string)\n start = stop = level_index.slice_indexer(key.start, key.stop,\n key.step, kind='loc')\n step = start.step\n\n if isinstance(start, slice) or isinstance(stop, slice):\n # we have a slice for start and/or stop\n # a partial date slicer on a DatetimeIndex generates a slice\n # note that the stop ALREADY includes the stopped point (if\n # it was a string sliced)\n return convert_indexer(start.start, stop.stop, step)\n\n elif level > 0 or self.lexsort_depth == 0 or step is not None:\n # need to have like semantics here to right\n # searching as when we are using a slice\n # so include the stop+1 (so we include stop)\n return convert_indexer(start, stop + 1, step)\n else:\n # sorted, so can return slice object -> view\n i = labels.searchsorted(start, side='left')\n j = labels.searchsorted(stop, side='right')\n return slice(i, j, step)\n\n else:\n\n loc = level_index.get_loc(key)\n if level > 0 or self.lexsort_depth == 0:\n return np.array(labels == loc, dtype=bool)\n else:\n # sorted, so can return slice object -> view\n try:\n loc = labels.dtype.type(loc)\n except TypeError:\n # this occurs when loc is a slice (partial string indexing)\n # but the TypeError raised by searchsorted in this case\n # is catched in Index._has_valid_type()\n pass\n i = labels.searchsorted(loc, side='left')\n j = labels.searchsorted(loc, side='right')\n return slice(i, j)\n\n def get_locs(self, tup):\n \"\"\"\n Given a tuple of slices/lists/labels/boolean indexer to a level-wise\n spec produce an indexer to extract those locations\n\n Parameters\n ----------\n key : tuple of (slices/list/labels)\n\n Returns\n -------\n locs : integer list of locations or boolean indexer suitable\n for passing to iloc\n \"\"\"\n\n # must be lexsorted to at least as many levels\n if not self.is_lexsorted_for_tuple(tup):\n raise UnsortedIndexError('MultiIndex Slicing requires the index '\n 'to be fully lexsorted tuple len ({0}), '\n 'lexsort depth ({1})'\n .format(len(tup), self.lexsort_depth))\n\n # indexer\n # this is the list of all values that we want to select\n n = len(self)\n indexer = None\n\n def _convert_to_indexer(r):\n # return an indexer\n if isinstance(r, slice):\n m = np.zeros(n, dtype=bool)\n m[r] = True\n r = m.nonzero()[0]\n elif is_bool_indexer(r):\n if len(r) != n:\n raise ValueError(\"cannot index with a boolean indexer \"\n \"that is not the same length as the \"\n \"index\")\n r = r.nonzero()[0]\n from .numeric import Int64Index\n return Int64Index(r)\n\n def _update_indexer(idxr, indexer=indexer):\n if indexer is None:\n indexer = Index(np.arange(n))\n if idxr is None:\n return indexer\n return indexer & idxr\n\n for i, k in enumerate(tup):\n\n if is_bool_indexer(k):\n # a boolean indexer, must be the same length!\n k = np.asarray(k)\n indexer = _update_indexer(_convert_to_indexer(k),\n indexer=indexer)\n\n elif is_list_like(k):\n # a collection of labels to include from this level (these\n # are or'd)\n indexers = None\n for x in k:\n try:\n idxrs = _convert_to_indexer(\n self._get_level_indexer(x, level=i,\n indexer=indexer))\n indexers = (idxrs if indexers is None\n else indexers | idxrs)\n except KeyError:\n\n # ignore not founds\n continue\n\n if indexers is not None:\n indexer = _update_indexer(indexers, indexer=indexer)\n else:\n from .numeric import Int64Index\n # no matches we are done\n return Int64Index([])._values\n\n elif is_null_slice(k):\n # empty slice\n indexer = _update_indexer(None, indexer=indexer)\n\n elif isinstance(k, slice):\n\n # a slice, include BOTH of the labels\n indexer = _update_indexer(_convert_to_indexer(\n self._get_level_indexer(k, level=i, indexer=indexer)),\n indexer=indexer)\n else:\n # a single label\n indexer = _update_indexer(_convert_to_indexer(\n self.get_loc_level(k, level=i, drop_level=False)[0]),\n indexer=indexer)\n\n # empty indexer\n if indexer is None:\n return Int64Index([])._values\n return indexer._values\n\n def truncate(self, before=None, after=None):\n \"\"\"\n Slice index between two labels / tuples, return new MultiIndex\n\n Parameters\n ----------\n before : label or tuple, can be partial. Default None\n None defaults to start\n after : label or tuple, can be partial. Default None\n None defaults to end\n\n Returns\n -------\n truncated : MultiIndex\n \"\"\"\n if after and before and after < before:\n raise ValueError('after < before')\n\n i, j = self.levels[0].slice_locs(before, after)\n left, right = self.slice_locs(before, after)\n\n new_levels = list(self.levels)\n new_levels[0] = new_levels[0][i:j]\n\n new_labels = [lab[left:right] for lab in self.labels]\n new_labels[0] = new_labels[0] - i\n\n return MultiIndex(levels=new_levels, labels=new_labels,\n verify_integrity=False)\n\n def equals(self, other):\n \"\"\"\n Determines if two MultiIndex objects have the same labeling information\n (the levels themselves do not necessarily have to be the same)\n\n See also\n --------\n equal_levels\n \"\"\"\n if self.is_(other):\n return True\n\n if not isinstance(other, Index):\n return False\n\n if not isinstance(other, MultiIndex):\n return array_equivalent(self._values,\n _values_from_object(_ensure_index(other)))\n\n if self.nlevels != other.nlevels:\n return False\n\n if len(self) != len(other):\n return False\n\n for i in range(self.nlevels):\n slabels = self.labels[i]\n slabels = slabels[slabels != -1]\n svalues = algos.take_nd(np.asarray(self.levels[i]._values),\n slabels, allow_fill=False)\n\n olabels = other.labels[i]\n olabels = olabels[olabels != -1]\n ovalues = algos.take_nd(np.asarray(other.levels[i]._values),\n olabels, allow_fill=False)\n\n # since we use NaT both datetime64 and timedelta64\n # we can have a situation where a level is typed say\n # timedelta64 in self (IOW it has other values than NaT)\n # but types datetime64 in other (where its all NaT)\n # but these are equivalent\n if len(svalues) == 0 and len(ovalues) == 0:\n continue\n\n if not array_equivalent(svalues, ovalues):\n return False\n\n return True\n\n def equal_levels(self, other):\n \"\"\"\n Return True if the levels of both MultiIndex objects are the same\n\n \"\"\"\n if self.nlevels != other.nlevels:\n return False\n\n for i in range(self.nlevels):\n if not self.levels[i].equals(other.levels[i]):\n return False\n return True\n\n def union(self, other):\n \"\"\"\n Form the union of two MultiIndex objects, sorting if possible\n\n Parameters\n ----------\n other : MultiIndex or array / Index of tuples\n\n Returns\n -------\n Index\n\n >>> index.union(index2)\n \"\"\"\n self._assert_can_do_setop(other)\n other, result_names = self._convert_can_do_setop(other)\n\n if len(other) == 0 or self.equals(other):\n return self\n\n uniq_tuples = lib.fast_unique_multiple([self._values, other._values])\n return MultiIndex.from_arrays(lzip(*uniq_tuples), sortorder=0,\n names=result_names)\n\n def intersection(self, other):\n \"\"\"\n Form the intersection of two MultiIndex objects, sorting if possible\n\n Parameters\n ----------\n other : MultiIndex or array / Index of tuples\n\n Returns\n -------\n Index\n \"\"\"\n self._assert_can_do_setop(other)\n other, result_names = self._convert_can_do_setop(other)\n\n if self.equals(other):\n return self\n\n self_tuples = self._values\n other_tuples = other._values\n uniq_tuples = sorted(set(self_tuples) & set(other_tuples))\n if len(uniq_tuples) == 0:\n return MultiIndex(levels=[[]] * self.nlevels,\n labels=[[]] * self.nlevels,\n names=result_names, verify_integrity=False)\n else:\n return MultiIndex.from_arrays(lzip(*uniq_tuples), sortorder=0,\n names=result_names)\n\n def difference(self, other):\n \"\"\"\n Compute sorted set difference of two MultiIndex objects\n\n Returns\n -------\n diff : MultiIndex\n \"\"\"\n self._assert_can_do_setop(other)\n other, result_names = self._convert_can_do_setop(other)\n\n if len(other) == 0:\n return self\n\n if self.equals(other):\n return MultiIndex(levels=[[]] * self.nlevels,\n labels=[[]] * self.nlevels,\n names=result_names, verify_integrity=False)\n\n difference = sorted(set(self._values) - set(other._values))\n\n if len(difference) == 0:\n return MultiIndex(levels=[[]] * self.nlevels,\n labels=[[]] * self.nlevels,\n names=result_names, verify_integrity=False)\n else:\n return MultiIndex.from_tuples(difference, sortorder=0,\n names=result_names)\n\n @Appender(_index_shared_docs['astype'])\n def astype(self, dtype, copy=True):\n if not is_object_dtype(np.dtype(dtype)):\n raise TypeError('Setting %s dtype to anything other than object '\n 'is not supported' % self.__class__)\n elif copy is True:\n return self._shallow_copy()\n return self\n\n def _convert_can_do_setop(self, other):\n result_names = self.names\n\n if not hasattr(other, 'names'):\n if len(other) == 0:\n other = MultiIndex(levels=[[]] * self.nlevels,\n labels=[[]] * self.nlevels,\n verify_integrity=False)\n else:\n msg = 'other must be a MultiIndex or a list of tuples'\n try:\n other = MultiIndex.from_tuples(other)\n except:\n raise TypeError(msg)\n else:\n result_names = self.names if self.names == other.names else None\n return other, result_names\n\n def insert(self, loc, item):\n \"\"\"\n Make new MultiIndex inserting new item at location\n\n Parameters\n ----------\n loc : int\n item : tuple\n Must be same length as number of levels in the MultiIndex\n\n Returns\n -------\n new_index : Index\n \"\"\"\n # Pad the key with empty strings if lower levels of the key\n # aren't specified:\n if not isinstance(item, tuple):\n item = (item, ) + ('', ) * (self.nlevels - 1)\n elif len(item) != self.nlevels:\n raise ValueError('Item must have length equal to number of '\n 'levels.')\n\n new_levels = []\n new_labels = []\n for k, level, labels in zip(item, self.levels, self.labels):\n if k not in level:\n # have to insert into level\n # must insert at end otherwise you have to recompute all the\n # other labels\n lev_loc = len(level)\n level = level.insert(lev_loc, k)\n else:\n lev_loc = level.get_loc(k)\n\n new_levels.append(level)\n new_labels.append(np.insert(_ensure_int64(labels), loc, lev_loc))\n\n return MultiIndex(levels=new_levels, labels=new_labels,\n names=self.names, verify_integrity=False)\n\n def delete(self, loc):\n \"\"\"\n Make new index with passed location deleted\n\n Returns\n -------\n new_index : MultiIndex\n \"\"\"\n new_labels = [np.delete(lab, loc) for lab in self.labels]\n return MultiIndex(levels=self.levels, labels=new_labels,\n names=self.names, verify_integrity=False)\n\n get_major_bounds = slice_locs\n\n __bounds = None\n\n @property\n def _bounds(self):\n \"\"\"\n Return or compute and return slice points for level 0, assuming\n sortedness\n \"\"\"\n if self.__bounds is None:\n inds = np.arange(len(self.levels[0]))\n self.__bounds = self.labels[0].searchsorted(inds)\n\n return self.__bounds\n\n def _wrap_joined_index(self, joined, other):\n names = self.names if self.names == other.names else None\n return MultiIndex.from_tuples(joined, names=names)\n\n @Appender(Index.isin.__doc__)\n def isin(self, values, level=None):\n if level is None:\n return lib.ismember(np.array(self), set(values))\n else:\n num = self._get_level_number(level)\n levs = self.levels[num]\n labs = self.labels[num]\n\n sought_labels = levs.isin(values).nonzero()[0]\n if levs.size == 0:\n return np.zeros(len(labs), dtype=np.bool_)\n else:\n return np.lib.arraysetops.in1d(labs, sought_labels)\n\n\nMultiIndex._add_numeric_methods_disabled()\nMultiIndex._add_numeric_methods_add_sub_disabled()\nMultiIndex._add_logical_methods_disabled()\n\n\ndef _sparsify(label_list, start=0, sentinel=''):\n pivoted = lzip(*label_list)\n k = len(label_list)\n\n result = pivoted[:start + 1]\n prev = pivoted[start]\n\n for cur in pivoted[start + 1:]:\n sparse_cur = []\n\n for i, (p, t) in enumerate(zip(prev, cur)):\n if i == k - 1:\n sparse_cur.append(t)\n result.append(sparse_cur)\n break\n\n if p == t:\n sparse_cur.append(sentinel)\n else:\n sparse_cur.extend(cur[i:])\n result.append(sparse_cur)\n break\n\n prev = cur\n\n return lzip(*result)\n\n\ndef _get_na_rep(dtype):\n return {np.datetime64: 'NaT', np.timedelta64: 'NaT'}.get(dtype, 'NaN')\n", "# pylint: disable-msg=E1101,W0612\nimport pytz\nimport numpy as np\nfrom distutils.version import LooseVersion\nfrom datetime import datetime, timedelta, tzinfo, date\nfrom pytz import NonExistentTimeError\n\nimport pandas.util.testing as tm\nimport pandas.tseries.tools as tools\nimport pandas.tseries.offsets as offsets\nfrom pandas.compat import lrange, zip\nfrom pandas.tseries.index import bdate_range, date_range\nfrom pandas.types.dtypes import DatetimeTZDtype\nfrom pandas._libs import tslib\nfrom pandas import (Index, Series, DataFrame, isnull, Timestamp, NaT,\n DatetimeIndex, to_datetime)\nfrom pandas.util.testing import (assert_frame_equal, assert_series_equal,\n set_timezone)\n\ntry:\n import pytz # noqa\nexcept ImportError:\n pass\n\ntry:\n import dateutil\nexcept ImportError:\n pass\n\n\nclass FixedOffset(tzinfo):\n \"\"\"Fixed offset in minutes east from UTC.\"\"\"\n\n def __init__(self, offset, name):\n self.__offset = timedelta(minutes=offset)\n self.__name = name\n\n def utcoffset(self, dt):\n return self.__offset\n\n def tzname(self, dt):\n return self.__name\n\n def dst(self, dt):\n return timedelta(0)\n\n\nfixed_off = FixedOffset(-420, '-07:00')\nfixed_off_no_name = FixedOffset(-330, None)\n\n\nclass TestTimeZoneSupportPytz(tm.TestCase):\n\n def setUp(self):\n tm._skip_if_no_pytz()\n\n def tz(self, tz):\n # Construct a timezone object from a string. Overridden in subclass to\n # parameterize tests.\n return pytz.timezone(tz)\n\n def tzstr(self, tz):\n # Construct a timezone string from a string. Overridden in subclass to\n # parameterize tests.\n return tz\n\n def localize(self, tz, x):\n return tz.localize(x)\n\n def cmptz(self, tz1, tz2):\n # Compare two timezones. Overridden in subclass to parameterize\n # tests.\n return tz1.zone == tz2.zone\n\n def test_utc_to_local_no_modify(self):\n rng = date_range('3/11/2012', '3/12/2012', freq='H', tz='utc')\n rng_eastern = rng.tz_convert(self.tzstr('US/Eastern'))\n\n # Values are unmodified\n self.assertTrue(np.array_equal(rng.asi8, rng_eastern.asi8))\n\n self.assertTrue(self.cmptz(rng_eastern.tz, self.tz('US/Eastern')))\n\n def test_utc_to_local_no_modify_explicit(self):\n rng = date_range('3/11/2012', '3/12/2012', freq='H', tz='utc')\n rng_eastern = rng.tz_convert(self.tz('US/Eastern'))\n\n # Values are unmodified\n self.assert_numpy_array_equal(rng.asi8, rng_eastern.asi8)\n\n self.assertEqual(rng_eastern.tz, self.tz('US/Eastern'))\n\n def test_localize_utc_conversion(self):\n # Localizing to time zone should:\n # 1) check for DST ambiguities\n # 2) convert to UTC\n\n rng = date_range('3/10/2012', '3/11/2012', freq='30T')\n\n converted = rng.tz_localize(self.tzstr('US/Eastern'))\n expected_naive = rng + offsets.Hour(5)\n self.assert_numpy_array_equal(converted.asi8, expected_naive.asi8)\n\n # DST ambiguity, this should fail\n rng = date_range('3/11/2012', '3/12/2012', freq='30T')\n # Is this really how it should fail??\n self.assertRaises(NonExistentTimeError, rng.tz_localize,\n self.tzstr('US/Eastern'))\n\n def test_localize_utc_conversion_explicit(self):\n # Localizing to time zone should:\n # 1) check for DST ambiguities\n # 2) convert to UTC\n\n rng = date_range('3/10/2012', '3/11/2012', freq='30T')\n converted = rng.tz_localize(self.tz('US/Eastern'))\n expected_naive = rng + offsets.Hour(5)\n self.assertTrue(np.array_equal(converted.asi8, expected_naive.asi8))\n\n # DST ambiguity, this should fail\n rng = date_range('3/11/2012', '3/12/2012', freq='30T')\n # Is this really how it should fail??\n self.assertRaises(NonExistentTimeError, rng.tz_localize,\n self.tz('US/Eastern'))\n\n def test_timestamp_tz_localize(self):\n stamp = Timestamp('3/11/2012 04:00')\n\n result = stamp.tz_localize(self.tzstr('US/Eastern'))\n expected = Timestamp('3/11/2012 04:00', tz=self.tzstr('US/Eastern'))\n self.assertEqual(result.hour, expected.hour)\n self.assertEqual(result, expected)\n\n def test_timestamp_tz_localize_explicit(self):\n stamp = Timestamp('3/11/2012 04:00')\n\n result = stamp.tz_localize(self.tz('US/Eastern'))\n expected = Timestamp('3/11/2012 04:00', tz=self.tz('US/Eastern'))\n self.assertEqual(result.hour, expected.hour)\n self.assertEqual(result, expected)\n\n def test_timestamp_constructed_by_date_and_tz(self):\n # Fix Issue 2993, Timestamp cannot be constructed by datetime.date\n # and tz correctly\n\n result = Timestamp(date(2012, 3, 11), tz=self.tzstr('US/Eastern'))\n\n expected = Timestamp('3/11/2012', tz=self.tzstr('US/Eastern'))\n self.assertEqual(result.hour, expected.hour)\n self.assertEqual(result, expected)\n\n def test_timestamp_constructed_by_date_and_tz_explicit(self):\n # Fix Issue 2993, Timestamp cannot be constructed by datetime.date\n # and tz correctly\n\n result = Timestamp(date(2012, 3, 11), tz=self.tz('US/Eastern'))\n\n expected = Timestamp('3/11/2012', tz=self.tz('US/Eastern'))\n self.assertEqual(result.hour, expected.hour)\n self.assertEqual(result, expected)\n\n def test_timestamp_to_datetime_tzoffset(self):\n # tzoffset\n from dateutil.tz import tzoffset\n tzinfo = tzoffset(None, 7200)\n expected = Timestamp('3/11/2012 04:00', tz=tzinfo)\n result = Timestamp(expected.to_pydatetime())\n self.assertEqual(expected, result)\n\n def test_timedelta_push_over_dst_boundary(self):\n # #1389\n\n # 4 hours before DST transition\n stamp = Timestamp('3/10/2012 22:00', tz=self.tzstr('US/Eastern'))\n\n result = stamp + timedelta(hours=6)\n\n # spring forward, + \"7\" hours\n expected = Timestamp('3/11/2012 05:00', tz=self.tzstr('US/Eastern'))\n\n self.assertEqual(result, expected)\n\n def test_timedelta_push_over_dst_boundary_explicit(self):\n # #1389\n\n # 4 hours before DST transition\n stamp = Timestamp('3/10/2012 22:00', tz=self.tz('US/Eastern'))\n\n result = stamp + timedelta(hours=6)\n\n # spring forward, + \"7\" hours\n expected = Timestamp('3/11/2012 05:00', tz=self.tz('US/Eastern'))\n\n self.assertEqual(result, expected)\n\n def test_tz_localize_dti(self):\n dti = DatetimeIndex(start='1/1/2005', end='1/1/2005 0:00:30.256',\n freq='L')\n dti2 = dti.tz_localize(self.tzstr('US/Eastern'))\n\n dti_utc = DatetimeIndex(start='1/1/2005 05:00',\n end='1/1/2005 5:00:30.256', freq='L', tz='utc')\n\n self.assert_numpy_array_equal(dti2.values, dti_utc.values)\n\n dti3 = dti2.tz_convert(self.tzstr('US/Pacific'))\n self.assert_numpy_array_equal(dti3.values, dti_utc.values)\n\n dti = DatetimeIndex(start='11/6/2011 1:59', end='11/6/2011 2:00',\n freq='L')\n self.assertRaises(pytz.AmbiguousTimeError, dti.tz_localize,\n self.tzstr('US/Eastern'))\n\n dti = DatetimeIndex(start='3/13/2011 1:59', end='3/13/2011 2:00',\n freq='L')\n self.assertRaises(pytz.NonExistentTimeError, dti.tz_localize,\n self.tzstr('US/Eastern'))\n\n def test_tz_localize_empty_series(self):\n # #2248\n\n ts = Series()\n\n ts2 = ts.tz_localize('utc')\n self.assertTrue(ts2.index.tz == pytz.utc)\n\n ts2 = ts.tz_localize(self.tzstr('US/Eastern'))\n self.assertTrue(self.cmptz(ts2.index.tz, self.tz('US/Eastern')))\n\n def test_astimezone(self):\n utc = Timestamp('3/11/2012 22:00', tz='UTC')\n expected = utc.tz_convert(self.tzstr('US/Eastern'))\n result = utc.astimezone(self.tzstr('US/Eastern'))\n self.assertEqual(expected, result)\n tm.assertIsInstance(result, Timestamp)\n\n def test_create_with_tz(self):\n stamp = Timestamp('3/11/2012 05:00', tz=self.tzstr('US/Eastern'))\n self.assertEqual(stamp.hour, 5)\n\n rng = date_range('3/11/2012 04:00', periods=10, freq='H',\n tz=self.tzstr('US/Eastern'))\n\n self.assertEqual(stamp, rng[1])\n\n utc_stamp = Timestamp('3/11/2012 05:00', tz='utc')\n self.assertIs(utc_stamp.tzinfo, pytz.utc)\n self.assertEqual(utc_stamp.hour, 5)\n\n stamp = Timestamp('3/11/2012 05:00').tz_localize('utc')\n self.assertEqual(utc_stamp.hour, 5)\n\n def test_create_with_fixed_tz(self):\n off = FixedOffset(420, '+07:00')\n start = datetime(2012, 3, 11, 5, 0, 0, tzinfo=off)\n end = datetime(2012, 6, 11, 5, 0, 0, tzinfo=off)\n rng = date_range(start=start, end=end)\n self.assertEqual(off, rng.tz)\n\n rng2 = date_range(start, periods=len(rng), tz=off)\n self.assert_index_equal(rng, rng2)\n\n rng3 = date_range('3/11/2012 05:00:00+07:00',\n '6/11/2012 05:00:00+07:00')\n self.assertTrue((rng.values == rng3.values).all())\n\n def test_create_with_fixedoffset_noname(self):\n off = fixed_off_no_name\n start = datetime(2012, 3, 11, 5, 0, 0, tzinfo=off)\n end = datetime(2012, 6, 11, 5, 0, 0, tzinfo=off)\n rng = date_range(start=start, end=end)\n self.assertEqual(off, rng.tz)\n\n idx = Index([start, end])\n self.assertEqual(off, idx.tz)\n\n def test_date_range_localize(self):\n rng = date_range('3/11/2012 03:00', periods=15, freq='H',\n tz='US/Eastern')\n rng2 = DatetimeIndex(['3/11/2012 03:00', '3/11/2012 04:00'],\n tz='US/Eastern')\n rng3 = date_range('3/11/2012 03:00', periods=15, freq='H')\n rng3 = rng3.tz_localize('US/Eastern')\n\n self.assert_index_equal(rng, rng3)\n\n # DST transition time\n val = rng[0]\n exp = Timestamp('3/11/2012 03:00', tz='US/Eastern')\n\n self.assertEqual(val.hour, 3)\n self.assertEqual(exp.hour, 3)\n self.assertEqual(val, exp) # same UTC value\n self.assert_index_equal(rng[:2], rng2)\n\n # Right before the DST transition\n rng = date_range('3/11/2012 00:00', periods=2, freq='H',\n tz='US/Eastern')\n rng2 = DatetimeIndex(['3/11/2012 00:00', '3/11/2012 01:00'],\n tz='US/Eastern')\n self.assert_index_equal(rng, rng2)\n exp = Timestamp('3/11/2012 00:00', tz='US/Eastern')\n self.assertEqual(exp.hour, 0)\n self.assertEqual(rng[0], exp)\n exp = Timestamp('3/11/2012 01:00', tz='US/Eastern')\n self.assertEqual(exp.hour, 1)\n self.assertEqual(rng[1], exp)\n\n rng = date_range('3/11/2012 00:00', periods=10, freq='H',\n tz='US/Eastern')\n self.assertEqual(rng[2].hour, 3)\n\n def test_utc_box_timestamp_and_localize(self):\n rng = date_range('3/11/2012', '3/12/2012', freq='H', tz='utc')\n rng_eastern = rng.tz_convert(self.tzstr('US/Eastern'))\n\n tz = self.tz('US/Eastern')\n expected = rng[-1].astimezone(tz)\n\n stamp = rng_eastern[-1]\n self.assertEqual(stamp, expected)\n self.assertEqual(stamp.tzinfo, expected.tzinfo)\n\n # right tzinfo\n rng = date_range('3/13/2012', '3/14/2012', freq='H', tz='utc')\n rng_eastern = rng.tz_convert(self.tzstr('US/Eastern'))\n # test not valid for dateutil timezones.\n # self.assertIn('EDT', repr(rng_eastern[0].tzinfo))\n self.assertTrue('EDT' in repr(rng_eastern[0].tzinfo) or 'tzfile' in\n repr(rng_eastern[0].tzinfo))\n\n def test_timestamp_tz_convert(self):\n strdates = ['1/1/2012', '3/1/2012', '4/1/2012']\n idx = DatetimeIndex(strdates, tz=self.tzstr('US/Eastern'))\n\n conv = idx[0].tz_convert(self.tzstr('US/Pacific'))\n expected = idx.tz_convert(self.tzstr('US/Pacific'))[0]\n\n self.assertEqual(conv, expected)\n\n def test_pass_dates_localize_to_utc(self):\n strdates = ['1/1/2012', '3/1/2012', '4/1/2012']\n\n idx = DatetimeIndex(strdates)\n conv = idx.tz_localize(self.tzstr('US/Eastern'))\n\n fromdates = DatetimeIndex(strdates, tz=self.tzstr('US/Eastern'))\n\n self.assertEqual(conv.tz, fromdates.tz)\n self.assert_numpy_array_equal(conv.values, fromdates.values)\n\n def test_field_access_localize(self):\n strdates = ['1/1/2012', '3/1/2012', '4/1/2012']\n rng = DatetimeIndex(strdates, tz=self.tzstr('US/Eastern'))\n self.assertTrue((rng.hour == 0).all())\n\n # a more unusual time zone, #1946\n dr = date_range('2011-10-02 00:00', freq='h', periods=10,\n tz=self.tzstr('America/Atikokan'))\n\n expected = np.arange(10, dtype=np.int32)\n self.assert_numpy_array_equal(dr.hour, expected)\n\n def test_with_tz(self):\n tz = self.tz('US/Central')\n\n # just want it to work\n start = datetime(2011, 3, 12, tzinfo=pytz.utc)\n dr = bdate_range(start, periods=50, freq=offsets.Hour())\n self.assertIs(dr.tz, pytz.utc)\n\n # DateRange with naive datetimes\n dr = bdate_range('1/1/2005', '1/1/2009', tz=pytz.utc)\n dr = bdate_range('1/1/2005', '1/1/2009', tz=tz)\n\n # normalized\n central = dr.tz_convert(tz)\n self.assertIs(central.tz, tz)\n comp = self.localize(tz, central[0].to_pydatetime().replace(\n tzinfo=None)).tzinfo\n self.assertIs(central[0].tz, comp)\n\n # compare vs a localized tz\n comp = self.localize(tz,\n dr[0].to_pydatetime().replace(tzinfo=None)).tzinfo\n self.assertIs(central[0].tz, comp)\n\n # datetimes with tzinfo set\n dr = bdate_range(datetime(2005, 1, 1, tzinfo=pytz.utc),\n '1/1/2009', tz=pytz.utc)\n\n self.assertRaises(Exception, bdate_range,\n datetime(2005, 1, 1, tzinfo=pytz.utc), '1/1/2009',\n tz=tz)\n\n def test_tz_localize(self):\n dr = bdate_range('1/1/2009', '1/1/2010')\n dr_utc = bdate_range('1/1/2009', '1/1/2010', tz=pytz.utc)\n localized = dr.tz_localize(pytz.utc)\n self.assert_index_equal(dr_utc, localized)\n\n def test_with_tz_ambiguous_times(self):\n tz = self.tz('US/Eastern')\n\n # March 13, 2011, spring forward, skip from 2 AM to 3 AM\n dr = date_range(datetime(2011, 3, 13, 1, 30), periods=3,\n freq=offsets.Hour())\n self.assertRaises(pytz.NonExistentTimeError, dr.tz_localize, tz)\n\n # after dst transition, it works\n dr = date_range(datetime(2011, 3, 13, 3, 30), periods=3,\n freq=offsets.Hour(), tz=tz)\n\n # November 6, 2011, fall back, repeat 2 AM hour\n dr = date_range(datetime(2011, 11, 6, 1, 30), periods=3,\n freq=offsets.Hour())\n self.assertRaises(pytz.AmbiguousTimeError, dr.tz_localize, tz)\n\n # UTC is OK\n dr = date_range(datetime(2011, 3, 13), periods=48,\n freq=offsets.Minute(30), tz=pytz.utc)\n\n def test_ambiguous_infer(self):\n # November 6, 2011, fall back, repeat 2 AM hour\n # With no repeated hours, we cannot infer the transition\n tz = self.tz('US/Eastern')\n dr = date_range(datetime(2011, 11, 6, 0), periods=5,\n freq=offsets.Hour())\n self.assertRaises(pytz.AmbiguousTimeError, dr.tz_localize, tz)\n\n # With repeated hours, we can infer the transition\n dr = date_range(datetime(2011, 11, 6, 0), periods=5,\n freq=offsets.Hour(), tz=tz)\n times = ['11/06/2011 00:00', '11/06/2011 01:00', '11/06/2011 01:00',\n '11/06/2011 02:00', '11/06/2011 03:00']\n di = DatetimeIndex(times)\n localized = di.tz_localize(tz, ambiguous='infer')\n self.assert_index_equal(dr, localized)\n with tm.assert_produces_warning(FutureWarning):\n localized_old = di.tz_localize(tz, infer_dst=True)\n self.assert_index_equal(dr, localized_old)\n self.assert_index_equal(dr, DatetimeIndex(times, tz=tz,\n ambiguous='infer'))\n\n # When there is no dst transition, nothing special happens\n dr = date_range(datetime(2011, 6, 1, 0), periods=10,\n freq=offsets.Hour())\n localized = dr.tz_localize(tz)\n localized_infer = dr.tz_localize(tz, ambiguous='infer')\n self.assert_index_equal(localized, localized_infer)\n with tm.assert_produces_warning(FutureWarning):\n localized_infer_old = dr.tz_localize(tz, infer_dst=True)\n self.assert_index_equal(localized, localized_infer_old)\n\n def test_ambiguous_flags(self):\n # November 6, 2011, fall back, repeat 2 AM hour\n tz = self.tz('US/Eastern')\n\n # Pass in flags to determine right dst transition\n dr = date_range(datetime(2011, 11, 6, 0), periods=5,\n freq=offsets.Hour(), tz=tz)\n times = ['11/06/2011 00:00', '11/06/2011 01:00', '11/06/2011 01:00',\n '11/06/2011 02:00', '11/06/2011 03:00']\n\n # Test tz_localize\n di = DatetimeIndex(times)\n is_dst = [1, 1, 0, 0, 0]\n localized = di.tz_localize(tz, ambiguous=is_dst)\n self.assert_index_equal(dr, localized)\n self.assert_index_equal(dr, DatetimeIndex(times, tz=tz,\n ambiguous=is_dst))\n\n localized = di.tz_localize(tz, ambiguous=np.array(is_dst))\n self.assert_index_equal(dr, localized)\n\n localized = di.tz_localize(tz,\n ambiguous=np.array(is_dst).astype('bool'))\n self.assert_index_equal(dr, localized)\n\n # Test constructor\n localized = DatetimeIndex(times, tz=tz, ambiguous=is_dst)\n self.assert_index_equal(dr, localized)\n\n # Test duplicate times where infer_dst fails\n times += times\n di = DatetimeIndex(times)\n\n # When the sizes are incompatible, make sure error is raised\n self.assertRaises(Exception, di.tz_localize, tz, ambiguous=is_dst)\n\n # When sizes are compatible and there are repeats ('infer' won't work)\n is_dst = np.hstack((is_dst, is_dst))\n localized = di.tz_localize(tz, ambiguous=is_dst)\n dr = dr.append(dr)\n self.assert_index_equal(dr, localized)\n\n # When there is no dst transition, nothing special happens\n dr = date_range(datetime(2011, 6, 1, 0), periods=10,\n freq=offsets.Hour())\n is_dst = np.array([1] * 10)\n localized = dr.tz_localize(tz)\n localized_is_dst = dr.tz_localize(tz, ambiguous=is_dst)\n self.assert_index_equal(localized, localized_is_dst)\n\n # construction with an ambiguous end-point\n # GH 11626\n tz = self.tzstr(\"Europe/London\")\n\n def f():\n date_range(\"2013-10-26 23:00\", \"2013-10-27 01:00\",\n tz=\"Europe/London\", freq=\"H\")\n self.assertRaises(pytz.AmbiguousTimeError, f)\n\n times = date_range(\"2013-10-26 23:00\", \"2013-10-27 01:00\", freq=\"H\",\n tz=tz, ambiguous='infer')\n self.assertEqual(times[0], Timestamp('2013-10-26 23:00', tz=tz,\n freq=\"H\"))\n if dateutil.__version__ != LooseVersion('2.6.0'):\n # GH 14621\n self.assertEqual(times[-1], Timestamp('2013-10-27 01:00', tz=tz,\n freq=\"H\"))\n\n def test_ambiguous_nat(self):\n tz = self.tz('US/Eastern')\n times = ['11/06/2011 00:00', '11/06/2011 01:00', '11/06/2011 01:00',\n '11/06/2011 02:00', '11/06/2011 03:00']\n di = DatetimeIndex(times)\n localized = di.tz_localize(tz, ambiguous='NaT')\n\n times = ['11/06/2011 00:00', np.NaN, np.NaN, '11/06/2011 02:00',\n '11/06/2011 03:00']\n di_test = DatetimeIndex(times, tz='US/Eastern')\n\n # left dtype is datetime64[ns, US/Eastern]\n # right is datetime64[ns, tzfile('/usr/share/zoneinfo/US/Eastern')]\n self.assert_numpy_array_equal(di_test.values, localized.values)\n\n def test_ambiguous_bool(self):\n # make sure that we are correctly accepting bool values as ambiguous\n\n # gh-14402\n t = Timestamp('2015-11-01 01:00:03')\n expected0 = Timestamp('2015-11-01 01:00:03-0500', tz='US/Central')\n expected1 = Timestamp('2015-11-01 01:00:03-0600', tz='US/Central')\n\n def f():\n t.tz_localize('US/Central')\n self.assertRaises(pytz.AmbiguousTimeError, f)\n\n result = t.tz_localize('US/Central', ambiguous=True)\n self.assertEqual(result, expected0)\n\n result = t.tz_localize('US/Central', ambiguous=False)\n self.assertEqual(result, expected1)\n\n s = Series([t])\n expected0 = Series([expected0])\n expected1 = Series([expected1])\n\n def f():\n s.dt.tz_localize('US/Central')\n self.assertRaises(pytz.AmbiguousTimeError, f)\n\n result = s.dt.tz_localize('US/Central', ambiguous=True)\n assert_series_equal(result, expected0)\n\n result = s.dt.tz_localize('US/Central', ambiguous=[True])\n assert_series_equal(result, expected0)\n\n result = s.dt.tz_localize('US/Central', ambiguous=False)\n assert_series_equal(result, expected1)\n\n result = s.dt.tz_localize('US/Central', ambiguous=[False])\n assert_series_equal(result, expected1)\n\n def test_nonexistent_raise_coerce(self):\n # See issue 13057\n from pytz.exceptions import NonExistentTimeError\n times = ['2015-03-08 01:00', '2015-03-08 02:00', '2015-03-08 03:00']\n index = DatetimeIndex(times)\n tz = 'US/Eastern'\n self.assertRaises(NonExistentTimeError,\n index.tz_localize, tz=tz)\n self.assertRaises(NonExistentTimeError,\n index.tz_localize, tz=tz, errors='raise')\n result = index.tz_localize(tz=tz, errors='coerce')\n test_times = ['2015-03-08 01:00-05:00', 'NaT',\n '2015-03-08 03:00-04:00']\n expected = DatetimeIndex(test_times)\\\n .tz_localize('UTC').tz_convert('US/Eastern')\n tm.assert_index_equal(result, expected)\n\n # test utility methods\n def test_infer_tz(self):\n eastern = self.tz('US/Eastern')\n utc = pytz.utc\n\n _start = datetime(2001, 1, 1)\n _end = datetime(2009, 1, 1)\n\n start = self.localize(eastern, _start)\n end = self.localize(eastern, _end)\n assert (tools._infer_tzinfo(start, end) is self.localize(\n eastern, _start).tzinfo)\n assert (tools._infer_tzinfo(start, None) is self.localize(\n eastern, _start).tzinfo)\n assert (tools._infer_tzinfo(None, end) is self.localize(eastern,\n _end).tzinfo)\n\n start = utc.localize(_start)\n end = utc.localize(_end)\n assert (tools._infer_tzinfo(start, end) is utc)\n\n end = self.localize(eastern, _end)\n self.assertRaises(Exception, tools._infer_tzinfo, start, end)\n self.assertRaises(Exception, tools._infer_tzinfo, end, start)\n\n def test_tz_string(self):\n result = date_range('1/1/2000', periods=10,\n tz=self.tzstr('US/Eastern'))\n expected = date_range('1/1/2000', periods=10, tz=self.tz('US/Eastern'))\n\n self.assert_index_equal(result, expected)\n\n def test_take_dont_lose_meta(self):\n tm._skip_if_no_pytz()\n rng = date_range('1/1/2000', periods=20, tz=self.tzstr('US/Eastern'))\n\n result = rng.take(lrange(5))\n self.assertEqual(result.tz, rng.tz)\n self.assertEqual(result.freq, rng.freq)\n\n def test_index_with_timezone_repr(self):\n rng = date_range('4/13/2010', '5/6/2010')\n\n rng_eastern = rng.tz_localize(self.tzstr('US/Eastern'))\n\n rng_repr = repr(rng_eastern)\n self.assertIn('2010-04-13 00:00:00', rng_repr)\n\n def test_index_astype_asobject_tzinfos(self):\n # #1345\n\n # dates around a dst transition\n rng = date_range('2/13/2010', '5/6/2010', tz=self.tzstr('US/Eastern'))\n\n objs = rng.asobject\n for i, x in enumerate(objs):\n exval = rng[i]\n self.assertEqual(x, exval)\n self.assertEqual(x.tzinfo, exval.tzinfo)\n\n objs = rng.astype(object)\n for i, x in enumerate(objs):\n exval = rng[i]\n self.assertEqual(x, exval)\n self.assertEqual(x.tzinfo, exval.tzinfo)\n\n def test_localized_at_time_between_time(self):\n from datetime import time\n\n rng = date_range('4/16/2012', '5/1/2012', freq='H')\n ts = Series(np.random.randn(len(rng)), index=rng)\n\n ts_local = ts.tz_localize(self.tzstr('US/Eastern'))\n\n result = ts_local.at_time(time(10, 0))\n expected = ts.at_time(time(10, 0)).tz_localize(self.tzstr(\n 'US/Eastern'))\n assert_series_equal(result, expected)\n self.assertTrue(self.cmptz(result.index.tz, self.tz('US/Eastern')))\n\n t1, t2 = time(10, 0), time(11, 0)\n result = ts_local.between_time(t1, t2)\n expected = ts.between_time(t1,\n t2).tz_localize(self.tzstr('US/Eastern'))\n assert_series_equal(result, expected)\n self.assertTrue(self.cmptz(result.index.tz, self.tz('US/Eastern')))\n\n def test_string_index_alias_tz_aware(self):\n rng = date_range('1/1/2000', periods=10, tz=self.tzstr('US/Eastern'))\n ts = Series(np.random.randn(len(rng)), index=rng)\n\n result = ts['1/3/2000']\n self.assertAlmostEqual(result, ts[2])\n\n def test_fixed_offset(self):\n dates = [datetime(2000, 1, 1, tzinfo=fixed_off),\n datetime(2000, 1, 2, tzinfo=fixed_off),\n datetime(2000, 1, 3, tzinfo=fixed_off)]\n result = to_datetime(dates)\n self.assertEqual(result.tz, fixed_off)\n\n def test_fixedtz_topydatetime(self):\n dates = np.array([datetime(2000, 1, 1, tzinfo=fixed_off),\n datetime(2000, 1, 2, tzinfo=fixed_off),\n datetime(2000, 1, 3, tzinfo=fixed_off)])\n result = to_datetime(dates).to_pydatetime()\n self.assert_numpy_array_equal(dates, result)\n result = to_datetime(dates)._mpl_repr()\n self.assert_numpy_array_equal(dates, result)\n\n def test_convert_tz_aware_datetime_datetime(self):\n # #1581\n\n tz = self.tz('US/Eastern')\n\n dates = [datetime(2000, 1, 1), datetime(2000, 1, 2),\n datetime(2000, 1, 3)]\n\n dates_aware = [self.localize(tz, x) for x in dates]\n result = to_datetime(dates_aware)\n self.assertTrue(self.cmptz(result.tz, self.tz('US/Eastern')))\n\n converted = to_datetime(dates_aware, utc=True)\n ex_vals = np.array([Timestamp(x).value for x in dates_aware])\n self.assert_numpy_array_equal(converted.asi8, ex_vals)\n self.assertIs(converted.tz, pytz.utc)\n\n def test_to_datetime_utc(self):\n from dateutil.parser import parse\n arr = np.array([parse('2012-06-13T01:39:00Z')], dtype=object)\n\n result = to_datetime(arr, utc=True)\n self.assertIs(result.tz, pytz.utc)\n\n def test_to_datetime_tzlocal(self):\n from dateutil.parser import parse\n from dateutil.tz import tzlocal\n dt = parse('2012-06-13T01:39:00Z')\n dt = dt.replace(tzinfo=tzlocal())\n\n arr = np.array([dt], dtype=object)\n\n result = to_datetime(arr, utc=True)\n self.assertIs(result.tz, pytz.utc)\n\n rng = date_range('2012-11-03 03:00', '2012-11-05 03:00', tz=tzlocal())\n arr = rng.to_pydatetime()\n result = to_datetime(arr, utc=True)\n self.assertIs(result.tz, pytz.utc)\n\n def test_frame_no_datetime64_dtype(self):\n\n # after 7822\n # these retain the timezones on dict construction\n\n dr = date_range('2011/1/1', '2012/1/1', freq='W-FRI')\n dr_tz = dr.tz_localize(self.tzstr('US/Eastern'))\n e = DataFrame({'A': 'foo', 'B': dr_tz}, index=dr)\n tz_expected = DatetimeTZDtype('ns', dr_tz.tzinfo)\n self.assertEqual(e['B'].dtype, tz_expected)\n\n # GH 2810 (with timezones)\n datetimes_naive = [ts.to_pydatetime() for ts in dr]\n datetimes_with_tz = [ts.to_pydatetime() for ts in dr_tz]\n df = DataFrame({'dr': dr,\n 'dr_tz': dr_tz,\n 'datetimes_naive': datetimes_naive,\n 'datetimes_with_tz': datetimes_with_tz})\n result = df.get_dtype_counts().sort_index()\n expected = Series({'datetime64[ns]': 2,\n str(tz_expected): 2}).sort_index()\n assert_series_equal(result, expected)\n\n def test_hongkong_tz_convert(self):\n # #1673\n dr = date_range('2012-01-01', '2012-01-10', freq='D', tz='Hongkong')\n\n # it works!\n dr.hour\n\n def test_tz_convert_unsorted(self):\n dr = date_range('2012-03-09', freq='H', periods=100, tz='utc')\n dr = dr.tz_convert(self.tzstr('US/Eastern'))\n\n result = dr[::-1].hour\n exp = dr.hour[::-1]\n tm.assert_almost_equal(result, exp)\n\n def test_shift_localized(self):\n dr = date_range('2011/1/1', '2012/1/1', freq='W-FRI')\n dr_tz = dr.tz_localize(self.tzstr('US/Eastern'))\n\n result = dr_tz.shift(1, '10T')\n self.assertEqual(result.tz, dr_tz.tz)\n\n def test_tz_aware_asfreq(self):\n dr = date_range('2011-12-01', '2012-07-20', freq='D',\n tz=self.tzstr('US/Eastern'))\n\n s = Series(np.random.randn(len(dr)), index=dr)\n\n # it works!\n s.asfreq('T')\n\n def test_static_tzinfo(self):\n # it works!\n index = DatetimeIndex([datetime(2012, 1, 1)], tz=self.tzstr('EST'))\n index.hour\n index[0]\n\n def test_tzaware_datetime_to_index(self):\n d = [datetime(2012, 8, 19, tzinfo=self.tz('US/Eastern'))]\n\n index = DatetimeIndex(d)\n self.assertTrue(self.cmptz(index.tz, self.tz('US/Eastern')))\n\n def test_date_range_span_dst_transition(self):\n # #1778\n\n # Standard -> Daylight Savings Time\n dr = date_range('03/06/2012 00:00', periods=200, freq='W-FRI',\n tz='US/Eastern')\n\n self.assertTrue((dr.hour == 0).all())\n\n dr = date_range('2012-11-02', periods=10, tz=self.tzstr('US/Eastern'))\n self.assertTrue((dr.hour == 0).all())\n\n def test_convert_datetime_list(self):\n dr = date_range('2012-06-02', periods=10,\n tz=self.tzstr('US/Eastern'), name='foo')\n dr2 = DatetimeIndex(list(dr), name='foo')\n self.assert_index_equal(dr, dr2)\n self.assertEqual(dr.tz, dr2.tz)\n self.assertEqual(dr2.name, 'foo')\n\n def test_frame_from_records_utc(self):\n rec = {'datum': 1.5,\n 'begin_time': datetime(2006, 4, 27, tzinfo=pytz.utc)}\n\n # it works\n DataFrame.from_records([rec], index='begin_time')\n\n def test_frame_reset_index(self):\n dr = date_range('2012-06-02', periods=10, tz=self.tzstr('US/Eastern'))\n df = DataFrame(np.random.randn(len(dr)), dr)\n roundtripped = df.reset_index().set_index('index')\n xp = df.index.tz\n rs = roundtripped.index.tz\n self.assertEqual(xp, rs)\n\n def test_dateutil_tzoffset_support(self):\n from dateutil.tz import tzoffset\n values = [188.5, 328.25]\n tzinfo = tzoffset(None, 7200)\n index = [datetime(2012, 5, 11, 11, tzinfo=tzinfo),\n datetime(2012, 5, 11, 12, tzinfo=tzinfo)]\n series = Series(data=values, index=index)\n\n self.assertEqual(series.index.tz, tzinfo)\n\n # it works! #2443\n repr(series.index[0])\n\n def test_getitem_pydatetime_tz(self):\n index = date_range(start='2012-12-24 16:00', end='2012-12-24 18:00',\n freq='H', tz=self.tzstr('Europe/Berlin'))\n ts = Series(index=index, data=index.hour)\n time_pandas = Timestamp('2012-12-24 17:00',\n tz=self.tzstr('Europe/Berlin'))\n time_datetime = self.localize(\n self.tz('Europe/Berlin'), datetime(2012, 12, 24, 17, 0))\n self.assertEqual(ts[time_pandas], ts[time_datetime])\n\n def test_index_drop_dont_lose_tz(self):\n # #2621\n ind = date_range(\"2012-12-01\", periods=10, tz=\"utc\")\n ind = ind.drop(ind[-1])\n\n self.assertTrue(ind.tz is not None)\n\n def test_datetimeindex_tz(self):\n \"\"\" Test different DatetimeIndex constructions with timezone\n Follow-up of #4229\n \"\"\"\n\n arr = ['11/10/2005 08:00:00', '11/10/2005 09:00:00']\n\n idx1 = to_datetime(arr).tz_localize(self.tzstr('US/Eastern'))\n idx2 = DatetimeIndex(start=\"2005-11-10 08:00:00\", freq='H', periods=2,\n tz=self.tzstr('US/Eastern'))\n idx3 = DatetimeIndex(arr, tz=self.tzstr('US/Eastern'))\n idx4 = DatetimeIndex(np.array(arr), tz=self.tzstr('US/Eastern'))\n\n for other in [idx2, idx3, idx4]:\n self.assert_index_equal(idx1, other)\n\n def test_datetimeindex_tz_nat(self):\n idx = to_datetime([Timestamp(\"2013-1-1\", tz=self.tzstr('US/Eastern')),\n NaT])\n\n self.assertTrue(isnull(idx[1]))\n self.assertTrue(idx[0].tzinfo is not None)\n\n\nclass TestTimeZoneSupportDateutil(TestTimeZoneSupportPytz):\n\n def setUp(self):\n tm._skip_if_no_dateutil()\n\n def tz(self, tz):\n \"\"\"\n Construct a dateutil timezone.\n Use tslib.maybe_get_tz so that we get the filename on the tz right\n on windows. See #7337.\n \"\"\"\n return tslib.maybe_get_tz('dateutil/' + tz)\n\n def tzstr(self, tz):\n \"\"\" Construct a timezone string from a string. Overridden in subclass\n to parameterize tests. \"\"\"\n return 'dateutil/' + tz\n\n def cmptz(self, tz1, tz2):\n \"\"\" Compare two timezones. Overridden in subclass to parameterize\n tests. \"\"\"\n return tz1 == tz2\n\n def localize(self, tz, x):\n return x.replace(tzinfo=tz)\n\n def test_utc_with_system_utc(self):\n # Skipped on win32 due to dateutil bug\n tm._skip_if_windows()\n\n from pandas._libs.tslib import maybe_get_tz\n\n # from system utc to real utc\n ts = Timestamp('2001-01-05 11:56', tz=maybe_get_tz('dateutil/UTC'))\n # check that the time hasn't changed.\n self.assertEqual(ts, ts.tz_convert(dateutil.tz.tzutc()))\n\n # from system utc to real utc\n ts = Timestamp('2001-01-05 11:56', tz=maybe_get_tz('dateutil/UTC'))\n # check that the time hasn't changed.\n self.assertEqual(ts, ts.tz_convert(dateutil.tz.tzutc()))\n\n def test_tz_convert_hour_overflow_dst(self):\n # Regression test for:\n # https://github.com/pandas-dev/pandas/issues/13306\n\n # sorted case US/Eastern -> UTC\n ts = ['2008-05-12 09:50:00',\n '2008-12-12 09:50:35',\n '2009-05-12 09:50:32']\n tt = to_datetime(ts).tz_localize('US/Eastern')\n ut = tt.tz_convert('UTC')\n expected = np.array([13, 14, 13], dtype=np.int32)\n self.assert_numpy_array_equal(ut.hour, expected)\n\n # sorted case UTC -> US/Eastern\n ts = ['2008-05-12 13:50:00',\n '2008-12-12 14:50:35',\n '2009-05-12 13:50:32']\n tt = to_datetime(ts).tz_localize('UTC')\n ut = tt.tz_convert('US/Eastern')\n expected = np.array([9, 9, 9], dtype=np.int32)\n self.assert_numpy_array_equal(ut.hour, expected)\n\n # unsorted case US/Eastern -> UTC\n ts = ['2008-05-12 09:50:00',\n '2008-12-12 09:50:35',\n '2008-05-12 09:50:32']\n tt = to_datetime(ts).tz_localize('US/Eastern')\n ut = tt.tz_convert('UTC')\n expected = np.array([13, 14, 13], dtype=np.int32)\n self.assert_numpy_array_equal(ut.hour, expected)\n\n # unsorted case UTC -> US/Eastern\n ts = ['2008-05-12 13:50:00',\n '2008-12-12 14:50:35',\n '2008-05-12 13:50:32']\n tt = to_datetime(ts).tz_localize('UTC')\n ut = tt.tz_convert('US/Eastern')\n expected = np.array([9, 9, 9], dtype=np.int32)\n self.assert_numpy_array_equal(ut.hour, expected)\n\n def test_tz_convert_hour_overflow_dst_timestamps(self):\n # Regression test for:\n # https://github.com/pandas-dev/pandas/issues/13306\n\n tz = self.tzstr('US/Eastern')\n\n # sorted case US/Eastern -> UTC\n ts = [Timestamp('2008-05-12 09:50:00', tz=tz),\n Timestamp('2008-12-12 09:50:35', tz=tz),\n Timestamp('2009-05-12 09:50:32', tz=tz)]\n tt = to_datetime(ts)\n ut = tt.tz_convert('UTC')\n expected = np.array([13, 14, 13], dtype=np.int32)\n self.assert_numpy_array_equal(ut.hour, expected)\n\n # sorted case UTC -> US/Eastern\n ts = [Timestamp('2008-05-12 13:50:00', tz='UTC'),\n Timestamp('2008-12-12 14:50:35', tz='UTC'),\n Timestamp('2009-05-12 13:50:32', tz='UTC')]\n tt = to_datetime(ts)\n ut = tt.tz_convert('US/Eastern')\n expected = np.array([9, 9, 9], dtype=np.int32)\n self.assert_numpy_array_equal(ut.hour, expected)\n\n # unsorted case US/Eastern -> UTC\n ts = [Timestamp('2008-05-12 09:50:00', tz=tz),\n Timestamp('2008-12-12 09:50:35', tz=tz),\n Timestamp('2008-05-12 09:50:32', tz=tz)]\n tt = to_datetime(ts)\n ut = tt.tz_convert('UTC')\n expected = np.array([13, 14, 13], dtype=np.int32)\n self.assert_numpy_array_equal(ut.hour, expected)\n\n # unsorted case UTC -> US/Eastern\n ts = [Timestamp('2008-05-12 13:50:00', tz='UTC'),\n Timestamp('2008-12-12 14:50:35', tz='UTC'),\n Timestamp('2008-05-12 13:50:32', tz='UTC')]\n tt = to_datetime(ts)\n ut = tt.tz_convert('US/Eastern')\n expected = np.array([9, 9, 9], dtype=np.int32)\n self.assert_numpy_array_equal(ut.hour, expected)\n\n def test_tslib_tz_convert_trans_pos_plus_1__bug(self):\n # Regression test for tslib.tz_convert(vals, tz1, tz2).\n # See https://github.com/pandas-dev/pandas/issues/4496 for details.\n for freq, n in [('H', 1), ('T', 60), ('S', 3600)]:\n idx = date_range(datetime(2011, 3, 26, 23),\n datetime(2011, 3, 27, 1), freq=freq)\n idx = idx.tz_localize('UTC')\n idx = idx.tz_convert('Europe/Moscow')\n\n expected = np.repeat(np.array([3, 4, 5], dtype=np.int32),\n np.array([n, n, 1]))\n self.assert_numpy_array_equal(idx.hour, expected)\n\n def test_tslib_tz_convert_dst(self):\n for freq, n in [('H', 1), ('T', 60), ('S', 3600)]:\n # Start DST\n idx = date_range('2014-03-08 23:00', '2014-03-09 09:00', freq=freq,\n tz='UTC')\n idx = idx.tz_convert('US/Eastern')\n expected = np.repeat(np.array([18, 19, 20, 21, 22, 23,\n 0, 1, 3, 4, 5], dtype=np.int32),\n np.array([n, n, n, n, n, n, n, n, n, n, 1]))\n self.assert_numpy_array_equal(idx.hour, expected)\n\n idx = date_range('2014-03-08 18:00', '2014-03-09 05:00', freq=freq,\n tz='US/Eastern')\n idx = idx.tz_convert('UTC')\n expected = np.repeat(np.array([23, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9],\n dtype=np.int32),\n np.array([n, n, n, n, n, n, n, n, n, n, 1]))\n self.assert_numpy_array_equal(idx.hour, expected)\n\n # End DST\n idx = date_range('2014-11-01 23:00', '2014-11-02 09:00', freq=freq,\n tz='UTC')\n idx = idx.tz_convert('US/Eastern')\n expected = np.repeat(np.array([19, 20, 21, 22, 23,\n 0, 1, 1, 2, 3, 4], dtype=np.int32),\n np.array([n, n, n, n, n, n, n, n, n, n, 1]))\n self.assert_numpy_array_equal(idx.hour, expected)\n\n idx = date_range('2014-11-01 18:00', '2014-11-02 05:00', freq=freq,\n tz='US/Eastern')\n idx = idx.tz_convert('UTC')\n expected = np.repeat(np.array([22, 23, 0, 1, 2, 3, 4, 5, 6,\n 7, 8, 9, 10], dtype=np.int32),\n np.array([n, n, n, n, n, n, n, n, n,\n n, n, n, 1]))\n self.assert_numpy_array_equal(idx.hour, expected)\n\n # daily\n # Start DST\n idx = date_range('2014-03-08 00:00', '2014-03-09 00:00', freq='D',\n tz='UTC')\n idx = idx.tz_convert('US/Eastern')\n self.assert_numpy_array_equal(idx.hour,\n np.array([19, 19], dtype=np.int32))\n\n idx = date_range('2014-03-08 00:00', '2014-03-09 00:00', freq='D',\n tz='US/Eastern')\n idx = idx.tz_convert('UTC')\n self.assert_numpy_array_equal(idx.hour,\n np.array([5, 5], dtype=np.int32))\n\n # End DST\n idx = date_range('2014-11-01 00:00', '2014-11-02 00:00', freq='D',\n tz='UTC')\n idx = idx.tz_convert('US/Eastern')\n self.assert_numpy_array_equal(idx.hour,\n np.array([20, 20], dtype=np.int32))\n\n idx = date_range('2014-11-01 00:00', '2014-11-02 000:00', freq='D',\n tz='US/Eastern')\n idx = idx.tz_convert('UTC')\n self.assert_numpy_array_equal(idx.hour,\n np.array([4, 4], dtype=np.int32))\n\n def test_tzlocal(self):\n # GH 13583\n ts = Timestamp('2011-01-01', tz=dateutil.tz.tzlocal())\n self.assertEqual(ts.tz, dateutil.tz.tzlocal())\n self.assertTrue(\"tz='tzlocal()')\" in repr(ts))\n\n tz = tslib.maybe_get_tz('tzlocal()')\n self.assertEqual(tz, dateutil.tz.tzlocal())\n\n # get offset using normal datetime for test\n offset = dateutil.tz.tzlocal().utcoffset(datetime(2011, 1, 1))\n offset = offset.total_seconds() * 1000000000\n self.assertEqual(ts.value + offset, Timestamp('2011-01-01').value)\n\n def test_tz_localize_tzlocal(self):\n # GH 13583\n offset = dateutil.tz.tzlocal().utcoffset(datetime(2011, 1, 1))\n offset = int(offset.total_seconds() * 1000000000)\n\n dti = date_range(start='2001-01-01', end='2001-03-01')\n dti2 = dti.tz_localize(dateutil.tz.tzlocal())\n tm.assert_numpy_array_equal(dti2.asi8 + offset, dti.asi8)\n\n dti = date_range(start='2001-01-01', end='2001-03-01',\n tz=dateutil.tz.tzlocal())\n dti2 = dti.tz_localize(None)\n tm.assert_numpy_array_equal(dti2.asi8 - offset, dti.asi8)\n\n def test_tz_convert_tzlocal(self):\n # GH 13583\n # tz_convert doesn't affect to internal\n dti = date_range(start='2001-01-01', end='2001-03-01', tz='UTC')\n dti2 = dti.tz_convert(dateutil.tz.tzlocal())\n tm.assert_numpy_array_equal(dti2.asi8, dti.asi8)\n\n dti = date_range(start='2001-01-01', end='2001-03-01',\n tz=dateutil.tz.tzlocal())\n dti2 = dti.tz_convert(None)\n tm.assert_numpy_array_equal(dti2.asi8, dti.asi8)\n\n\nclass TestTimeZoneCacheKey(tm.TestCase):\n\n def test_cache_keys_are_distinct_for_pytz_vs_dateutil(self):\n tzs = pytz.common_timezones\n for tz_name in tzs:\n if tz_name == 'UTC':\n # skip utc as it's a special case in dateutil\n continue\n tz_p = tslib.maybe_get_tz(tz_name)\n tz_d = tslib.maybe_get_tz('dateutil/' + tz_name)\n if tz_d is None:\n # skip timezones that dateutil doesn't know about.\n continue\n self.assertNotEqual(tslib._p_tz_cache_key(\n tz_p), tslib._p_tz_cache_key(tz_d))\n\n\nclass TestTimeZones(tm.TestCase):\n timezones = ['UTC', 'Asia/Tokyo', 'US/Eastern', 'dateutil/US/Pacific']\n\n def setUp(self):\n tm._skip_if_no_pytz()\n\n def test_replace(self):\n # GH 14621\n # GH 7825\n # replacing datetime components with and w/o presence of a timezone\n dt = Timestamp('2016-01-01 09:00:00')\n result = dt.replace(hour=0)\n expected = Timestamp('2016-01-01 00:00:00')\n self.assertEqual(result, expected)\n\n for tz in self.timezones:\n dt = Timestamp('2016-01-01 09:00:00', tz=tz)\n result = dt.replace(hour=0)\n expected = Timestamp('2016-01-01 00:00:00', tz=tz)\n self.assertEqual(result, expected)\n\n # we preserve nanoseconds\n dt = Timestamp('2016-01-01 09:00:00.000000123', tz=tz)\n result = dt.replace(hour=0)\n expected = Timestamp('2016-01-01 00:00:00.000000123', tz=tz)\n self.assertEqual(result, expected)\n\n # test all\n dt = Timestamp('2016-01-01 09:00:00.000000123', tz=tz)\n result = dt.replace(year=2015, month=2, day=2, hour=0, minute=5,\n second=5, microsecond=5, nanosecond=5)\n expected = Timestamp('2015-02-02 00:05:05.000005005', tz=tz)\n self.assertEqual(result, expected)\n\n # error\n def f():\n dt.replace(foo=5)\n self.assertRaises(TypeError, f)\n\n def f():\n dt.replace(hour=0.1)\n self.assertRaises(ValueError, f)\n\n # assert conversion to naive is the same as replacing tzinfo with None\n dt = Timestamp('2013-11-03 01:59:59.999999-0400', tz='US/Eastern')\n self.assertEqual(dt.tz_localize(None), dt.replace(tzinfo=None))\n\n def test_ambiguous_compat(self):\n # validate that pytz and dateutil are compat for dst\n # when the transition happens\n tm._skip_if_no_dateutil()\n tm._skip_if_no_pytz()\n\n pytz_zone = 'Europe/London'\n dateutil_zone = 'dateutil/Europe/London'\n result_pytz = (Timestamp('2013-10-27 01:00:00')\n .tz_localize(pytz_zone, ambiguous=0))\n result_dateutil = (Timestamp('2013-10-27 01:00:00')\n .tz_localize(dateutil_zone, ambiguous=0))\n self.assertEqual(result_pytz.value, result_dateutil.value)\n self.assertEqual(result_pytz.value, 1382835600000000000)\n\n # dateutil 2.6 buggy w.r.t. ambiguous=0\n if dateutil.__version__ != LooseVersion('2.6.0'):\n # GH 14621\n # https://github.com/dateutil/dateutil/issues/321\n self.assertEqual(result_pytz.to_pydatetime().tzname(),\n result_dateutil.to_pydatetime().tzname())\n self.assertEqual(str(result_pytz), str(result_dateutil))\n\n # 1 hour difference\n result_pytz = (Timestamp('2013-10-27 01:00:00')\n .tz_localize(pytz_zone, ambiguous=1))\n result_dateutil = (Timestamp('2013-10-27 01:00:00')\n .tz_localize(dateutil_zone, ambiguous=1))\n self.assertEqual(result_pytz.value, result_dateutil.value)\n self.assertEqual(result_pytz.value, 1382832000000000000)\n\n # dateutil < 2.6 is buggy w.r.t. ambiguous timezones\n if dateutil.__version__ > LooseVersion('2.5.3'):\n # GH 14621\n self.assertEqual(str(result_pytz), str(result_dateutil))\n self.assertEqual(result_pytz.to_pydatetime().tzname(),\n result_dateutil.to_pydatetime().tzname())\n\n def test_index_equals_with_tz(self):\n left = date_range('1/1/2011', periods=100, freq='H', tz='utc')\n right = date_range('1/1/2011', periods=100, freq='H', tz='US/Eastern')\n\n self.assertFalse(left.equals(right))\n\n def test_tz_localize_naive(self):\n rng = date_range('1/1/2011', periods=100, freq='H')\n\n conv = rng.tz_localize('US/Pacific')\n exp = date_range('1/1/2011', periods=100, freq='H', tz='US/Pacific')\n\n self.assert_index_equal(conv, exp)\n\n def test_tz_localize_roundtrip(self):\n for tz in self.timezones:\n idx1 = date_range(start='2014-01-01', end='2014-12-31', freq='M')\n idx2 = date_range(start='2014-01-01', end='2014-12-31', freq='D')\n idx3 = date_range(start='2014-01-01', end='2014-03-01', freq='H')\n idx4 = date_range(start='2014-08-01', end='2014-10-31', freq='T')\n for idx in [idx1, idx2, idx3, idx4]:\n localized = idx.tz_localize(tz)\n expected = date_range(start=idx[0], end=idx[-1], freq=idx.freq,\n tz=tz)\n tm.assert_index_equal(localized, expected)\n\n with tm.assertRaises(TypeError):\n localized.tz_localize(tz)\n\n reset = localized.tz_localize(None)\n tm.assert_index_equal(reset, idx)\n self.assertTrue(reset.tzinfo is None)\n\n def test_series_frame_tz_localize(self):\n\n rng = date_range('1/1/2011', periods=100, freq='H')\n ts = Series(1, index=rng)\n\n result = ts.tz_localize('utc')\n self.assertEqual(result.index.tz.zone, 'UTC')\n\n df = DataFrame({'a': 1}, index=rng)\n result = df.tz_localize('utc')\n expected = DataFrame({'a': 1}, rng.tz_localize('UTC'))\n self.assertEqual(result.index.tz.zone, 'UTC')\n assert_frame_equal(result, expected)\n\n df = df.T\n result = df.tz_localize('utc', axis=1)\n self.assertEqual(result.columns.tz.zone, 'UTC')\n assert_frame_equal(result, expected.T)\n\n # Can't localize if already tz-aware\n rng = date_range('1/1/2011', periods=100, freq='H', tz='utc')\n ts = Series(1, index=rng)\n tm.assertRaisesRegexp(TypeError, 'Already tz-aware', ts.tz_localize,\n 'US/Eastern')\n\n def test_series_frame_tz_convert(self):\n rng = date_range('1/1/2011', periods=200, freq='D', tz='US/Eastern')\n ts = Series(1, index=rng)\n\n result = ts.tz_convert('Europe/Berlin')\n self.assertEqual(result.index.tz.zone, 'Europe/Berlin')\n\n df = DataFrame({'a': 1}, index=rng)\n result = df.tz_convert('Europe/Berlin')\n expected = DataFrame({'a': 1}, rng.tz_convert('Europe/Berlin'))\n self.assertEqual(result.index.tz.zone, 'Europe/Berlin')\n assert_frame_equal(result, expected)\n\n df = df.T\n result = df.tz_convert('Europe/Berlin', axis=1)\n self.assertEqual(result.columns.tz.zone, 'Europe/Berlin')\n assert_frame_equal(result, expected.T)\n\n # can't convert tz-naive\n rng = date_range('1/1/2011', periods=200, freq='D')\n ts = Series(1, index=rng)\n tm.assertRaisesRegexp(TypeError, \"Cannot convert tz-naive\",\n ts.tz_convert, 'US/Eastern')\n\n def test_tz_convert_roundtrip(self):\n for tz in self.timezones:\n idx1 = date_range(start='2014-01-01', end='2014-12-31', freq='M',\n tz='UTC')\n exp1 = date_range(start='2014-01-01', end='2014-12-31', freq='M')\n\n idx2 = date_range(start='2014-01-01', end='2014-12-31', freq='D',\n tz='UTC')\n exp2 = date_range(start='2014-01-01', end='2014-12-31', freq='D')\n\n idx3 = date_range(start='2014-01-01', end='2014-03-01', freq='H',\n tz='UTC')\n exp3 = date_range(start='2014-01-01', end='2014-03-01', freq='H')\n\n idx4 = date_range(start='2014-08-01', end='2014-10-31', freq='T',\n tz='UTC')\n exp4 = date_range(start='2014-08-01', end='2014-10-31', freq='T')\n\n for idx, expected in [(idx1, exp1), (idx2, exp2), (idx3, exp3),\n (idx4, exp4)]:\n converted = idx.tz_convert(tz)\n reset = converted.tz_convert(None)\n tm.assert_index_equal(reset, expected)\n self.assertTrue(reset.tzinfo is None)\n tm.assert_index_equal(reset, converted.tz_convert(\n 'UTC').tz_localize(None))\n\n def test_join_utc_convert(self):\n rng = date_range('1/1/2011', periods=100, freq='H', tz='utc')\n\n left = rng.tz_convert('US/Eastern')\n right = rng.tz_convert('Europe/Berlin')\n\n for how in ['inner', 'outer', 'left', 'right']:\n result = left.join(left[:-5], how=how)\n tm.assertIsInstance(result, DatetimeIndex)\n self.assertEqual(result.tz, left.tz)\n\n result = left.join(right[:-5], how=how)\n tm.assertIsInstance(result, DatetimeIndex)\n self.assertEqual(result.tz.zone, 'UTC')\n\n def test_join_aware(self):\n rng = date_range('1/1/2011', periods=10, freq='H')\n ts = Series(np.random.randn(len(rng)), index=rng)\n\n ts_utc = ts.tz_localize('utc')\n\n self.assertRaises(Exception, ts.__add__, ts_utc)\n self.assertRaises(Exception, ts_utc.__add__, ts)\n\n test1 = DataFrame(np.zeros((6, 3)),\n index=date_range(\"2012-11-15 00:00:00\", periods=6,\n freq=\"100L\", tz=\"US/Central\"))\n test2 = DataFrame(np.zeros((3, 3)),\n index=date_range(\"2012-11-15 00:00:00\", periods=3,\n freq=\"250L\", tz=\"US/Central\"),\n columns=lrange(3, 6))\n\n result = test1.join(test2, how='outer')\n ex_index = test1.index.union(test2.index)\n\n self.assert_index_equal(result.index, ex_index)\n self.assertTrue(result.index.tz.zone == 'US/Central')\n\n # non-overlapping\n rng = date_range(\"2012-11-15 00:00:00\", periods=6, freq=\"H\",\n tz=\"US/Central\")\n\n rng2 = date_range(\"2012-11-15 12:00:00\", periods=6, freq=\"H\",\n tz=\"US/Eastern\")\n\n result = rng.union(rng2)\n self.assertTrue(result.tz.zone == 'UTC')\n\n def test_align_aware(self):\n idx1 = date_range('2001', periods=5, freq='H', tz='US/Eastern')\n idx2 = date_range('2001', periods=5, freq='2H', tz='US/Eastern')\n df1 = DataFrame(np.random.randn(len(idx1), 3), idx1)\n df2 = DataFrame(np.random.randn(len(idx2), 3), idx2)\n new1, new2 = df1.align(df2)\n self.assertEqual(df1.index.tz, new1.index.tz)\n self.assertEqual(df2.index.tz, new2.index.tz)\n\n # # different timezones convert to UTC\n\n # frame\n df1_central = df1.tz_convert('US/Central')\n new1, new2 = df1.align(df1_central)\n self.assertEqual(new1.index.tz, pytz.UTC)\n self.assertEqual(new2.index.tz, pytz.UTC)\n\n # series\n new1, new2 = df1[0].align(df1_central[0])\n self.assertEqual(new1.index.tz, pytz.UTC)\n self.assertEqual(new2.index.tz, pytz.UTC)\n\n # combination\n new1, new2 = df1.align(df1_central[0], axis=0)\n self.assertEqual(new1.index.tz, pytz.UTC)\n self.assertEqual(new2.index.tz, pytz.UTC)\n\n df1[0].align(df1_central, axis=0)\n self.assertEqual(new1.index.tz, pytz.UTC)\n self.assertEqual(new2.index.tz, pytz.UTC)\n\n def test_append_aware(self):\n rng1 = date_range('1/1/2011 01:00', periods=1, freq='H',\n tz='US/Eastern')\n rng2 = date_range('1/1/2011 02:00', periods=1, freq='H',\n tz='US/Eastern')\n ts1 = Series([1], index=rng1)\n ts2 = Series([2], index=rng2)\n ts_result = ts1.append(ts2)\n\n exp_index = DatetimeIndex(['2011-01-01 01:00', '2011-01-01 02:00'],\n tz='US/Eastern')\n exp = Series([1, 2], index=exp_index)\n assert_series_equal(ts_result, exp)\n self.assertEqual(ts_result.index.tz, rng1.tz)\n\n rng1 = date_range('1/1/2011 01:00', periods=1, freq='H', tz='UTC')\n rng2 = date_range('1/1/2011 02:00', periods=1, freq='H', tz='UTC')\n ts1 = Series([1], index=rng1)\n ts2 = Series([2], index=rng2)\n ts_result = ts1.append(ts2)\n\n exp_index = DatetimeIndex(['2011-01-01 01:00', '2011-01-01 02:00'],\n tz='UTC')\n exp = Series([1, 2], index=exp_index)\n assert_series_equal(ts_result, exp)\n utc = rng1.tz\n self.assertEqual(utc, ts_result.index.tz)\n\n # GH 7795\n # different tz coerces to object dtype, not UTC\n rng1 = date_range('1/1/2011 01:00', periods=1, freq='H',\n tz='US/Eastern')\n rng2 = date_range('1/1/2011 02:00', periods=1, freq='H',\n tz='US/Central')\n ts1 = Series([1], index=rng1)\n ts2 = Series([2], index=rng2)\n ts_result = ts1.append(ts2)\n exp_index = Index([Timestamp('1/1/2011 01:00', tz='US/Eastern'),\n Timestamp('1/1/2011 02:00', tz='US/Central')])\n exp = Series([1, 2], index=exp_index)\n assert_series_equal(ts_result, exp)\n\n def test_append_dst(self):\n rng1 = date_range('1/1/2016 01:00', periods=3, freq='H',\n tz='US/Eastern')\n rng2 = date_range('8/1/2016 01:00', periods=3, freq='H',\n tz='US/Eastern')\n ts1 = Series([1, 2, 3], index=rng1)\n ts2 = Series([10, 11, 12], index=rng2)\n ts_result = ts1.append(ts2)\n\n exp_index = DatetimeIndex(['2016-01-01 01:00', '2016-01-01 02:00',\n '2016-01-01 03:00', '2016-08-01 01:00',\n '2016-08-01 02:00', '2016-08-01 03:00'],\n tz='US/Eastern')\n exp = Series([1, 2, 3, 10, 11, 12], index=exp_index)\n assert_series_equal(ts_result, exp)\n self.assertEqual(ts_result.index.tz, rng1.tz)\n\n def test_append_aware_naive(self):\n rng1 = date_range('1/1/2011 01:00', periods=1, freq='H')\n rng2 = date_range('1/1/2011 02:00', periods=1, freq='H',\n tz='US/Eastern')\n ts1 = Series(np.random.randn(len(rng1)), index=rng1)\n ts2 = Series(np.random.randn(len(rng2)), index=rng2)\n ts_result = ts1.append(ts2)\n\n self.assertTrue(ts_result.index.equals(ts1.index.asobject.append(\n ts2.index.asobject)))\n\n # mixed\n rng1 = date_range('1/1/2011 01:00', periods=1, freq='H')\n rng2 = lrange(100)\n ts1 = Series(np.random.randn(len(rng1)), index=rng1)\n ts2 = Series(np.random.randn(len(rng2)), index=rng2)\n ts_result = ts1.append(ts2)\n self.assertTrue(ts_result.index.equals(ts1.index.asobject.append(\n ts2.index)))\n\n def test_equal_join_ensure_utc(self):\n rng = date_range('1/1/2011', periods=10, freq='H', tz='US/Eastern')\n ts = Series(np.random.randn(len(rng)), index=rng)\n\n ts_moscow = ts.tz_convert('Europe/Moscow')\n\n result = ts + ts_moscow\n self.assertIs(result.index.tz, pytz.utc)\n\n result = ts_moscow + ts\n self.assertIs(result.index.tz, pytz.utc)\n\n df = DataFrame({'a': ts})\n df_moscow = df.tz_convert('Europe/Moscow')\n result = df + df_moscow\n self.assertIs(result.index.tz, pytz.utc)\n\n result = df_moscow + df\n self.assertIs(result.index.tz, pytz.utc)\n\n def test_arith_utc_convert(self):\n rng = date_range('1/1/2011', periods=100, freq='H', tz='utc')\n\n perm = np.random.permutation(100)[:90]\n ts1 = Series(np.random.randn(90),\n index=rng.take(perm).tz_convert('US/Eastern'))\n\n perm = np.random.permutation(100)[:90]\n ts2 = Series(np.random.randn(90),\n index=rng.take(perm).tz_convert('Europe/Berlin'))\n\n result = ts1 + ts2\n\n uts1 = ts1.tz_convert('utc')\n uts2 = ts2.tz_convert('utc')\n expected = uts1 + uts2\n\n self.assertEqual(result.index.tz, pytz.UTC)\n assert_series_equal(result, expected)\n\n def test_intersection(self):\n rng = date_range('1/1/2011', periods=100, freq='H', tz='utc')\n\n left = rng[10:90][::-1]\n right = rng[20:80][::-1]\n\n self.assertEqual(left.tz, rng.tz)\n result = left.intersection(right)\n self.assertEqual(result.tz, left.tz)\n\n def test_timestamp_equality_different_timezones(self):\n utc_range = date_range('1/1/2000', periods=20, tz='UTC')\n eastern_range = utc_range.tz_convert('US/Eastern')\n berlin_range = utc_range.tz_convert('Europe/Berlin')\n\n for a, b, c in zip(utc_range, eastern_range, berlin_range):\n self.assertEqual(a, b)\n self.assertEqual(b, c)\n self.assertEqual(a, c)\n\n self.assertTrue((utc_range == eastern_range).all())\n self.assertTrue((utc_range == berlin_range).all())\n self.assertTrue((berlin_range == eastern_range).all())\n\n def test_datetimeindex_tz(self):\n rng = date_range('03/12/2012 00:00', periods=10, freq='W-FRI',\n tz='US/Eastern')\n rng2 = DatetimeIndex(data=rng, tz='US/Eastern')\n self.assert_index_equal(rng, rng2)\n\n def test_normalize_tz(self):\n rng = date_range('1/1/2000 9:30', periods=10, freq='D',\n tz='US/Eastern')\n\n result = rng.normalize()\n expected = date_range('1/1/2000', periods=10, freq='D',\n tz='US/Eastern')\n self.assert_index_equal(result, expected)\n\n self.assertTrue(result.is_normalized)\n self.assertFalse(rng.is_normalized)\n\n rng = date_range('1/1/2000 9:30', periods=10, freq='D', tz='UTC')\n\n result = rng.normalize()\n expected = date_range('1/1/2000', periods=10, freq='D', tz='UTC')\n self.assert_index_equal(result, expected)\n\n self.assertTrue(result.is_normalized)\n self.assertFalse(rng.is_normalized)\n\n from dateutil.tz import tzlocal\n rng = date_range('1/1/2000 9:30', periods=10, freq='D', tz=tzlocal())\n result = rng.normalize()\n expected = date_range('1/1/2000', periods=10, freq='D', tz=tzlocal())\n self.assert_index_equal(result, expected)\n\n self.assertTrue(result.is_normalized)\n self.assertFalse(rng.is_normalized)\n\n def test_normalize_tz_local(self):\n # GH 13459\n from dateutil.tz import tzlocal\n\n timezones = ['US/Pacific', 'US/Eastern', 'UTC', 'Asia/Kolkata',\n 'Asia/Shanghai', 'Australia/Canberra']\n\n for timezone in timezones:\n with set_timezone(timezone):\n rng = date_range('1/1/2000 9:30', periods=10, freq='D',\n tz=tzlocal())\n\n result = rng.normalize()\n expected = date_range('1/1/2000', periods=10, freq='D',\n tz=tzlocal())\n self.assert_index_equal(result, expected)\n\n self.assertTrue(result.is_normalized)\n self.assertFalse(rng.is_normalized)\n\n def test_tzaware_offset(self):\n dates = date_range('2012-11-01', periods=3, tz='US/Pacific')\n offset = dates + offsets.Hour(5)\n self.assertEqual(dates[0] + offsets.Hour(5), offset[0])\n\n # GH 6818\n for tz in ['UTC', 'US/Pacific', 'Asia/Tokyo']:\n dates = date_range('2010-11-01 00:00', periods=3, tz=tz, freq='H')\n expected = DatetimeIndex(['2010-11-01 05:00', '2010-11-01 06:00',\n '2010-11-01 07:00'], freq='H', tz=tz)\n\n offset = dates + offsets.Hour(5)\n self.assert_index_equal(offset, expected)\n offset = dates + np.timedelta64(5, 'h')\n self.assert_index_equal(offset, expected)\n offset = dates + timedelta(hours=5)\n self.assert_index_equal(offset, expected)\n\n def test_nat(self):\n # GH 5546\n dates = [NaT]\n idx = DatetimeIndex(dates)\n idx = idx.tz_localize('US/Pacific')\n self.assert_index_equal(idx, DatetimeIndex(dates, tz='US/Pacific'))\n idx = idx.tz_convert('US/Eastern')\n self.assert_index_equal(idx, DatetimeIndex(dates, tz='US/Eastern'))\n idx = idx.tz_convert('UTC')\n self.assert_index_equal(idx, DatetimeIndex(dates, tz='UTC'))\n\n dates = ['2010-12-01 00:00', '2010-12-02 00:00', NaT]\n idx = DatetimeIndex(dates)\n idx = idx.tz_localize('US/Pacific')\n self.assert_index_equal(idx, DatetimeIndex(dates, tz='US/Pacific'))\n idx = idx.tz_convert('US/Eastern')\n expected = ['2010-12-01 03:00', '2010-12-02 03:00', NaT]\n self.assert_index_equal(idx, DatetimeIndex(expected, tz='US/Eastern'))\n\n idx = idx + offsets.Hour(5)\n expected = ['2010-12-01 08:00', '2010-12-02 08:00', NaT]\n self.assert_index_equal(idx, DatetimeIndex(expected, tz='US/Eastern'))\n idx = idx.tz_convert('US/Pacific')\n expected = ['2010-12-01 05:00', '2010-12-02 05:00', NaT]\n self.assert_index_equal(idx, DatetimeIndex(expected, tz='US/Pacific'))\n\n idx = idx + np.timedelta64(3, 'h')\n expected = ['2010-12-01 08:00', '2010-12-02 08:00', NaT]\n self.assert_index_equal(idx, DatetimeIndex(expected, tz='US/Pacific'))\n\n idx = idx.tz_convert('US/Eastern')\n expected = ['2010-12-01 11:00', '2010-12-02 11:00', NaT]\n self.assert_index_equal(idx, DatetimeIndex(expected, tz='US/Eastern'))\n\n\nclass TestTslib(tm.TestCase):\n\n def test_tslib_tz_convert(self):\n def compare_utc_to_local(tz_didx, utc_didx):\n f = lambda x: tslib.tz_convert_single(x, 'UTC', tz_didx.tz)\n result = tslib.tz_convert(tz_didx.asi8, 'UTC', tz_didx.tz)\n result_single = np.vectorize(f)(tz_didx.asi8)\n self.assert_numpy_array_equal(result, result_single)\n\n def compare_local_to_utc(tz_didx, utc_didx):\n f = lambda x: tslib.tz_convert_single(x, tz_didx.tz, 'UTC')\n result = tslib.tz_convert(utc_didx.asi8, tz_didx.tz, 'UTC')\n result_single = np.vectorize(f)(utc_didx.asi8)\n self.assert_numpy_array_equal(result, result_single)\n\n for tz in ['UTC', 'Asia/Tokyo', 'US/Eastern', 'Europe/Moscow']:\n # US: 2014-03-09 - 2014-11-11\n # MOSCOW: 2014-10-26 / 2014-12-31\n tz_didx = date_range('2014-03-01', '2015-01-10', freq='H', tz=tz)\n utc_didx = date_range('2014-03-01', '2015-01-10', freq='H')\n compare_utc_to_local(tz_didx, utc_didx)\n # local tz to UTC can be differ in hourly (or higher) freqs because\n # of DST\n compare_local_to_utc(tz_didx, utc_didx)\n\n tz_didx = date_range('2000-01-01', '2020-01-01', freq='D', tz=tz)\n utc_didx = date_range('2000-01-01', '2020-01-01', freq='D')\n compare_utc_to_local(tz_didx, utc_didx)\n compare_local_to_utc(tz_didx, utc_didx)\n\n tz_didx = date_range('2000-01-01', '2100-01-01', freq='A', tz=tz)\n utc_didx = date_range('2000-01-01', '2100-01-01', freq='A')\n compare_utc_to_local(tz_didx, utc_didx)\n compare_local_to_utc(tz_didx, utc_didx)\n\n # Check empty array\n result = tslib.tz_convert(np.array([], dtype=np.int64),\n tslib.maybe_get_tz('US/Eastern'),\n tslib.maybe_get_tz('Asia/Tokyo'))\n self.assert_numpy_array_equal(result, np.array([], dtype=np.int64))\n\n # Check all-NaT array\n result = tslib.tz_convert(np.array([tslib.iNaT], dtype=np.int64),\n tslib.maybe_get_tz('US/Eastern'),\n tslib.maybe_get_tz('Asia/Tokyo'))\n self.assert_numpy_array_equal(result, np.array(\n [tslib.iNaT], dtype=np.int64))\n" ]
[ [ "pandas._libs.period.Period._maybe_convert_freq", "pandas.util.decorators.deprecate_kwarg", "pandas.tseries.frequencies.to_offset", "pandas._libs.period.period_ordinal", "pandas._libs.period.Period.now", "numpy.asarray", "pandas.types.common.is_float", "pandas.types.common.is_float_dtype", "pandas.tseries.frequencies.get_to_timestamp_base", "pandas.tseries.frequencies.get_freq_code", "numpy.dtype", "pandas.types.common.is_period_dtype", "numpy.searchsorted", "pandas.types.dtypes.PeriodDtype.construct_from_string", "pandas.types.common.is_bool_dtype", "pandas.types.common.is_timedelta64_dtype", "numpy.where", "pandas._libs.period.Period._from_ordinal", "pandas.types.common.pandas_dtype", "numpy.arange", "pandas.types.common.is_integer_dtype", "pandas.indexes.base._ensure_index", "pandas._libs.tslib._isleapyear_arr", "pandas._libs.period.periodarr_to_dt64arr", "pandas._libs.period._quarter_to_myear", "pandas._libs.lib.infer_dtype", "pandas._libs.tslib._delta_to_nanoseconds", "pandas._libs.period.IncompatibleFrequency", "pandas.tseries.frequencies.get_freq_group", "numpy.repeat", "pandas.types.common.is_integer", "pandas.util.decorators.Appender", "pandas.tseries.index.Index", "pandas.types.common.is_scalar", "pandas._libs.period.period_asfreq_arr", "pandas.compat.u", "pandas.core.common._count_not_none", "pandas.tseries.index.Int64Index", "pandas._libs.period.extract_freq", "pandas.tseries.index.DatetimeIndex", "pandas.tseries.frequencies.Resolution.get_freq_group", "pandas._libs.period.extract_ordinals", "pandas._libs.period.get_period_field_arr", "pandas.tseries.tools.parse_time_string", "numpy.array", "pandas.tseries.frequencies.get_base_alias", "pandas.tseries.index.Int64Index.join", "pandas.types.common.is_object_dtype", "pandas.tseries.index.Index.get_indexer", "pandas._libs.period._validate_end_alias", "pandas.util.decorators.Substitution", "pandas.tseries.base.DatetimeIndexOpsMixin._convert_tolerance", "pandas.types.common._ensure_object", "pandas.core.common._values_from_object", "pandas.compat.zip", "pandas.tseries.tdi.TimedeltaIndex", "pandas.types.common.is_datetime64tz_dtype", "pandas.types.common.is_datetime64_dtype", "numpy.ndarray.__setstate__", "pandas._libs.period.Period", "numpy.empty" ], [ "pandas.merge", "pandas.util.testing.assertIsInstance", "pandas.Series", "pandas.MultiIndex.from_tuples", "pandas.DataFrame", "numpy.dtype", "pandas.util.testing.assert_frame_equal", "numpy.random.randn", "pandas.compat.lzip", "numpy.random.randint", "numpy.arange", "pandas.util.testing.assert_series_equal", "pandas.Index", "pandas.util.testing.rands_array", "pandas.MultiIndex", "pandas.types.common.is_categorical_dtype", "pandas.Categorical", "numpy.random.choice", "numpy.timedelta64", "pandas.tools.merge.merge", "pandas.DataFrame.from_dict", "pandas.date_range", "numpy.array", "pandas.types.common.is_object_dtype", "numpy.random.random", "numpy.random.seed", "pandas.period_range", "numpy.tile", "pandas.util.testing.assertRaises", "pandas.types.dtypes.CategoricalDtype", "pandas.compat.lrange" ], [ "pandas.util.testing.assertIsInstance", "pandas.PeriodIndex", "pandas.Series", "pandas.period_range", "pandas.util.testing.assertRaisesRegexp", "pandas.util.testing.assert_series_equal", "pandas.DatetimeIndex", "pandas.util.testing.assertRaises", "pandas.util.testing.assert_index_equal", "pandas.Period", "numpy.array" ], [ "pandas.util.decorators.deprecate_kwarg", "pandas.Series", "pandas.types.missing.isnull", "numpy.asarray", "pandas._libs.lib.is_lexsorted", "pandas._libs.lib.tuples_to_object_array", "numpy.dtype", "pandas.indexes.frozen.FrozenList", "numpy.concatenate", "numpy.all", "pandas.compat.map", "numpy.any", "pandas.core.sorting.get_group_index", "pandas.core.missing.clean_reindex_fill_method", "pandas.indexes.base.InvalidIndexError", "pandas.formats.format._get_adjustment", "pandas.compat.lzip", "pandas._libs.index.get_value_at", "pandas.core.config.get_option", "pandas.types.common.is_iterator", "pandas._libs.lib.fast_unique_multiple", "numpy.arange", "pandas.indexes.frozen.FrozenNDArray", "pandas.indexes.base._ensure_index", "pandas.core.algorithms.factorize", "numpy.lexsort", "pandas.indexes.base._get_na_value", "pandas.types.common._ensure_int64", "pandas._libs.lib.fast_zip", "pandas.types.common.is_list_like", "numpy.repeat", "pandas.core.sorting.lexsort_indexer", "numpy.zeros", "pandas.indexes.frozen._ensure_frozen", "pandas.types.common.is_scalar", "pandas.util.decorators.Appender", "pandas.types.missing.array_equivalent", "pandas.core.categorical._factorize_from_iterables", "numpy.lib.arraysetops.in1d", "pandas.indexes.base.Index", "pandas.indexes.base.default_pprint", "pandas._libs.Timestamp", "numpy.delete", "pandas.core.sorting.indexer_from_factorized", "pandas._libs.hashtable.duplicated_int64", "numpy.append", "pandas.tools.util.cartesian_product", "pandas.types.common._ensure_platform_int", "numpy.array", "pandas.indexes.base._ensure_has_len", "pandas.core.common._index_labels_to_array", "pandas._libs.lib.to_object_array_tuples", "pandas.core.common.is_bool_indexer", "pandas.core.algorithms.take_1d", "pandas.core.common.is_null_slice", "pandas.formats.printing.pprint_thing", "pandas.tools.hashing.hash_tuples", "pandas.core.common._values_from_object", "pandas.compat.zip", "pandas.compat.lrange", "pandas._libs.lib.infer_dtype", "pandas.compat.numpy.function.validate_repeat", "pandas.core.indexing.maybe_droplevels", "numpy.empty", "pandas.compat.range" ], [ "pandas.tseries.offsets.Hour", "pandas.to_datetime", "pandas.util.testing.assertIsInstance", "pandas.Series", "pandas._libs.tslib._p_tz_cache_key", "pandas._libs.tslib.tz_convert_single", "pandas.util.testing._skip_if_no_pytz", "pandas.util.testing.assert_produces_warning", "pandas.DataFrame", "pandas.util.testing._skip_if_windows", "pandas.util.testing.assert_frame_equal", "pandas.util.testing.assert_index_equal", "numpy.random.randn", "pandas.DataFrame.from_records", "pandas.types.dtypes.DatetimeTZDtype", "pandas._libs.tslib.tz_convert", "numpy.hstack", "pandas._libs.tslib.maybe_get_tz", "pandas.util.testing.assert_numpy_array_equal", "numpy.arange", "pandas.Index", "pandas.DatetimeIndex", "pandas.util.testing.assert_series_equal", "pandas.tseries.index.bdate_range", "pandas.util.testing.set_timezone", "numpy.zeros", "pandas.util.testing.assert_almost_equal", "numpy.timedelta64", "numpy.array", "pandas.util.testing._skip_if_no_dateutil", "numpy.array_equal", "pandas.util.testing.assertRaisesRegexp", "pandas.isnull", "pandas.tseries.tools._infer_tzinfo", "pandas.tseries.index.date_range", "pandas.util.testing.assertRaises", "numpy.random.permutation", "numpy.vectorize", "pandas.compat.zip", "pandas.tseries.offsets.Minute", "pandas.Timestamp", "pandas.compat.lrange" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [ "0.19" ], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "0.19", "0.24", "0.20" ], "scipy": [], "tensorflow": [] } ]
salt-die/nurses
[ "68fc12bfed7af7fedb0e45d5215ff922ee981f6b" ]
[ "nurses/widgets/array_win.py" ]
[ "import curses\n\nimport numpy as np\n\nfrom .widget import Widget, BORDER_STYLES\n\n\nclass ArrayWin(Widget):\n \"\"\"\n A Widget whose state is stored an a numpy array and whose __getitem__ / __setitem__ use numpy indexing to update text.\n\n Other Parameters\n ----------------\n border: optional\n The border type; one of `nurses.widget.BORDER_STYLES`. (by default a widget has no border)\n border_color: optional\n A curses color_pair. If a border is given, border_color will be the color of the border. (the default is `color`)\n\n Notes\n -----\n __getitem__ and __setitem__ call the respective buffer functions directly, so one can slice\n and write to a Widget as if it was a numpy array.\n \"\"\"\n\n default_character = \" \"\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self._buffer = None\n self._colors = None\n\n def update_geometry(self):\n if self.root is None:\n return\n\n super().update_geometry()\n\n if self._buffer is None:\n h, w = self.height, self.width\n self._buffer = np.full((h, w), self.default_character)\n self._colors = np.full((h, w), self.color)\n\n if self.has_border:\n self.border(self.border_style, self.border_color)\n\n @property\n def colors(self):\n return self._colors[1: -1, 1: -1] if self.has_border else self._colors\n\n @colors.setter\n def colors(self, array):\n if self.has_border:\n self._colors[1: -1, 1: -1] = array\n else:\n self._colors = array\n\n @property\n def buffer(self):\n return self._buffer[1: -1, 1: -1] if self.has_border else self._buffer\n\n @buffer.setter\n def buffer(self, array):\n if self.has_border:\n self._buffer[1: -1, 1: -1] = array\n else:\n self._buffer = array\n\n def _resize(self):\n if self.window is None:\n return\n\n if self.has_border:\n self._buffer[:, -1] = self._buffer[-1] = self.default_character # Erase the right-most/bottom-most border in case widget expands\n\n height, width = self.height, self.width\n old_h, old_w = self._buffer.shape\n min_h, min_w = min(height, old_h), min(width, old_w)\n\n new_buffer = np.full((height, width), self.default_character)\n new_buffer[:min_h, :min_w] = self._buffer[:min_h, :min_w]\n\n new_colors = np.full((height, width), self.color)\n new_colors[:min_h, :min_w] = self._colors[:min_h, :min_w]\n\n self._buffer = new_buffer\n self._colors = new_colors\n\n super()._resize()\n\n if self.has_border:\n self.border(self.border_style, self.border_color)\n\n def push(self):\n \"\"\"Write the buffers to the window.\n \"\"\"\n it = np.nditer((self._buffer, self._colors), [\"multi_index\"])\n for char, color in it:\n\n # Newline character on the last corner of a window will advance the cursor out-of-bounds causing an error\n # TODO: Implement a more perfomant solution, either check the index, catch the error, or add an extra row to the windows\n if (ch := str(char)) == \"\\n\":\n ch = \" \"\n\n y, x = it.multi_index\n self.window.addstr(y, x, ch, color)\n\n def refresh(self):\n self.push()\n super().refresh()\n\n def __getitem__(self, key):\n \"\"\"\n `buffer.__getitem__` except offset if `self.has_border` is true\n (i.e., `buffer[1: -1, 1: -1].__getitem__` if `self.has_border`).\n \"\"\"\n return self.buffer[key]\n\n def __setitem__(self, key, text):\n \"\"\"\n Coerce `text` into a ndarray then call `buffer.__setitem__(key, text)`.\n\n Notes\n -----\n If `len(text) > 1`, `text` is coerced into an array or an array of arrays (depending on the presence of newlines).\n If the array's shape can't be cast to `self.buffer` it will be rotated and tried again (setting the text vertically).\n\n If `self.has_border` is true then indices will be offset automatically.\n (i.e., `buffer[1: -1, 1: -1].__setitem__` will be called instead)\n\n Examples\n --------\n >>> my_widget[2:4, :13] = \"Hello, World!\\\\nI'm a widget!\"\n \"\"\"\n if \"\\n\" in text:\n text = np.array(tuple(map(tuple, text.rstrip(\"\\n\").splitlines())))\n elif len(text) > 1:\n text = np.array(tuple(text))\n\n try:\n self.buffer[key] = text\n except ValueError:\n self.buffer[key] = np.rot90(text if len(text.shape) == 2 else text[None, ], -1) # Try to fit the text vertically\n\n def border(self, style=\"light\", color=None):\n \"\"\"\n Draw a border on the edges of the widget.\n\n Parameters\n ----------\n style: optional\n The style of the border, can be one of `nurses.widget.BORDER_STYLES`. (the default is \"light\")\n\n color: optional\n The color of the border. (the default is the widget's `color`)\n\n Notes\n -----\n Methods such as `__setitem__`, `roll`, `scroll`, `_resize` will take care to preserve the border\n as long as `has_border` is truth-y. To disable this behavior set `has_border` to False or call\n this method with `read_only=False`.\n \"\"\"\n if self._buffer is None:\n return\n\n self.border_style = style\n self.border_color = color\n\n ul, ur, v, h, ll, lr = BORDER_STYLES[style]\n\n b = self._buffer\n b[(0, -1), :] = h\n b[:, (0, -1)] = v\n b[ 0, 0] = ul\n b[ 0, -1] = ur\n b[-1, 0] = ll\n b[-1, -1] = lr\n\n c = self._colors\n c[0] = c[-1] = c[:, 0] = c[:, -1] = color or self.color\n\n def roll(self, shift=1, vertical=False):\n \"\"\"\n Roll the contents of the widget. Items that roll beyond the last position are re-introduced at the first.\n\n Parameters\n ----------\n shift: optional\n Number of places to shift the contents. `shift` may be negative. (the default is 1)\n\n vertical: optional\n Whether to roll vertically. (the default is `False`, i.e., rolls are horizontal by default)\n \"\"\"\n axis = (-shift, 0) if vertical else (0, -shift)\n self.buffer = np.roll(self.buffer, axis, (0, 1))\n self.colors = np.roll(self.colors, axis, (0, 1))\n\n def scroll(self, lines=1):\n \"\"\"\n Scroll the contents of the buffer upwards or downwards, erasing the last/first lines.\n\n Parameters\n ----------\n lines: optional\n Number of lines to scroll. To scroll down, lines should be negative. (the default is 1)\n \"\"\"\n self.roll(lines, vertical=True)\n slice_ = slice(-lines, None) if lines > 0 else slice(None, -lines)\n self.buffer[slice_] = self.default_character\n self.colors[slice_] = self.color\n" ]
[ [ "numpy.roll", "numpy.nditer", "numpy.full" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
salkaevruslan/sosed
[ "fa948fd339ff3ff08eea1ca3afb5884e63c6e5f3" ]
[ "sosed_test/test_data_processing.py" ]
[ "import unittest\nimport os\nimport numpy as np\n\nfrom pathlib import Path\nfrom collections import Counter\nfrom unittest.mock import patch\n\nfrom sosed.data_processing import *\n\n\nclass ProcessedDataTest(unittest.TestCase):\n\n @classmethod\n def setUpClass(cls):\n cls.actual_index = [0, 1]\n cls.actual_docword = {\n 0: {\n 'project1': Counter({1: 1, 2: 2, 3: 3, 5: 4}),\n 'project2': Counter({1: 2, 3: 10, 4: 100})\n },\n 1: {\n 'project5': Counter({10: 2, 30: 10, 40: 100}),\n 'project4': Counter({1: 2, 3: 4, 5: 6, 7: 8, 9: 10}),\n 'project3': Counter({10: 10, 20: 220, 33: 333, 5: 1}),\n }\n }\n cls.actual_vocab = {\n 0: {\n 'a': 1,\n 'bb': 2,\n 'ccc': 3,\n 'dddd': 4,\n 'eeeee': 5\n },\n 1: {\n 'on': 1,\n 'going': 3,\n 'is': 5,\n 'weird': 7,\n 'something': 9,\n 'thirtythree': 33,\n 'a': 10,\n 'bb': 20,\n 'ccc': 30,\n 'dddd': 40,\n 'eeeee': 50\n }\n }\n\n cls.folder = Path('test_data', 'test_output')\n cls.processed_data = ProcessedData(cls.folder)\n\n def test_indices(self):\n self.assertEqual(sorted(self.actual_index), sorted(self.processed_data.indices()))\n\n def test_docword(self):\n self.assertEqual(self.actual_docword[0], self.processed_data.load_docword(0))\n self.assertEqual(self.actual_docword[1], self.processed_data.load_docword(1))\n\n def test_vocab(self):\n self.assertEqual(self.actual_vocab[0], self.processed_data.load_tokens_vocab(0))\n self.assertEqual(self.actual_vocab[1], self.processed_data.load_tokens_vocab(1))\n\n def test_repo_names(self):\n self.assertFalse(self.processed_data.has_stored_repo_names())\n repo_names = ['project1', 'project2', 'project5', 'project4', 'project3']\n self.processed_data.store_repo_names(repo_names)\n self.assertTrue(self.processed_data.has_stored_repo_names())\n self.assertEqual(repo_names, self.processed_data.load_repo_names())\n\n def test_repo_vectors(self):\n self.assertFalse(self.processed_data.has_stored_repo_vectors())\n repo_vectors = np.random.randn(10, 20)\n self.processed_data.store_repo_vectors(repo_vectors)\n self.assertTrue(self.processed_data.has_stored_repo_vectors())\n self.assertTrue(np.all(repo_vectors == self.processed_data.load_repo_vectors()))\n\n @classmethod\n def tearDownClass(cls):\n names_file = cls.folder / 'repo_names.txt'\n if names_file.exists():\n os.remove(names_file)\n\n vectors_file = cls.folder / 'repo_vectors.npy'\n if vectors_file.exists():\n os.remove(vectors_file)\n\n\nclass DataProcessingTest(unittest.TestCase):\n\n @classmethod\n def setUpClass(cls):\n cls.folder = Path('test_data', 'test_data')\n cls.tokens_file = cls.folder / 'tokens.txt'\n cls.clusters_file = cls.folder / 'clusters.npy'\n cls.real_tokens = ['we', 'are', 'the', 'champions', 'my', 'friends', 'and', 'keep', 'on', 'fighting', 'till', 'end']\n cls.real_clusters = np.arange(len(cls.real_tokens))\n cls.short_tokens = cls.real_tokens[::2]\n cls.short_clusters = cls.real_clusters[::2]\n np.save(cls.folder / 'clusters', cls.short_clusters, allow_pickle=True)\n\n @patch('sosed.data_processing.get_clusters_file')\n @patch('sosed.data_processing.get_tokens_file')\n def test_assign_clusters(self, mock_get_tokens_file, mock_get_clusters_file):\n mock_get_clusters_file.return_value = self.clusters_file\n mock_get_tokens_file.return_value = self.tokens_file\n\n tokens_vocab = {token: i for i, token in enumerate(self.real_tokens)}\n proper_assignment = {ind: None for ind in tokens_vocab.values()}\n proper_assignment.update({\n tokens_vocab[token]: cluster\n for token, cluster in zip(self.short_tokens, self.short_clusters) if token in tokens_vocab\n })\n self.assertEqual(proper_assignment, assign_clusters(tokens_vocab))\n\n @patch('sosed.data_processing.embedding_dim')\n def test_compute_vectors(self, mock_embedding_dim):\n n_projects = 3\n dim = 8\n\n mock_embedding_dim.return_value = dim\n\n actual_repo_names = [f'project_{i}' for i in range(1, n_projects + 1)]\n tokens_to_clusters = {i: i % dim for i in range(dim * dim)}\n docword = {\n project: Counter({token: i + 1 for token in tokens_to_clusters})\n for i, project in enumerate(actual_repo_names)\n }\n actual_vectors = np.array([[i * dim for _ in range(dim)] for i in range(1, n_projects + 1)], dtype=np.float32)\n\n repo_names, vectors = compute_vectors(docword, tokens_to_clusters)\n\n self.assertEqual(actual_repo_names, repo_names)\n self.assertEqual((n_projects, dim), vectors.shape)\n self.assertTrue(np.all(actual_vectors == vectors))\n\n def test_normalize_vectors(self):\n n_projects = 3\n dim = 8\n\n vectors = np.random.randn(n_projects, dim)\n normalized_vectors = normalize_vectors(vectors)\n\n self.assertEqual((n_projects, dim), normalized_vectors.shape)\n for vec, norm_vec in zip(vectors, normalized_vectors):\n actual_norm_vec = vec / np.linalg.norm(vec)\n for i in range(dim):\n self.assertAlmostEqual(actual_norm_vec[i], norm_vec[i])\n\n @patch('sosed.data_processing.embedding_dim')\n def test_similarity_index(self, mock_embedding_dim):\n n_projects = 10\n dim = 16\n\n mock_embedding_dim.return_value = dim\n\n embedding = np.random.random((n_projects, dim)).astype('float32')\n embedding[:, 0] += np.arange(n_projects) / 1000.\n embedding = normalize_vectors(embedding)\n\n index = build_similarity_index(embedding)\n dist, idx = index.search(embedding, 3)\n\n for i, inds in enumerate(idx):\n self.assertEqual(i, inds[0])\n\n @classmethod\n def tearDownClass(cls):\n clusters_file = cls.clusters_file\n if clusters_file.exists():\n os.remove(clusters_file)\n\n\nif __name__ == '__main__':\n unittest.main()\n" ]
[ [ "numpy.random.random", "numpy.arange", "numpy.linalg.norm", "numpy.save", "numpy.all", "numpy.random.randn" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
jdtuck/scikit-fda
[ "28259dffbc45dfc8dbf3c12839b928f9df200351", "28259dffbc45dfc8dbf3c12839b928f9df200351", "28259dffbc45dfc8dbf3c12839b928f9df200351", "28259dffbc45dfc8dbf3c12839b928f9df200351", "28259dffbc45dfc8dbf3c12839b928f9df200351", "28259dffbc45dfc8dbf3c12839b928f9df200351", "28259dffbc45dfc8dbf3c12839b928f9df200351" ]
[ "skfda/representation/basis/_basis.py", "examples/plot_explore.py", "skfda/representation/interpolation.py", "examples/plot_boxplot.py", "skfda/preprocessing/registration/elastic.py", "skfda/preprocessing/dim_reduction/variable_selection/maxima_hunting.py", "skfda/exploratory/visualization/_boxplot.py" ]
[ "\"\"\"Module for functional data manipulation in a basis system.\n\nDefines functional data object in a basis function system representation and\nthe corresponding basis classes.\n\n\"\"\"\nfrom abc import ABC, abstractmethod\nimport copy\nimport warnings\n\nimport numpy as np\n\nfrom ..._utils import (_domain_range, _same_domain,\n _reshape_eval_points)\n\n\n__author__ = \"Miguel Carbajo Berrocal\"\n__email__ = \"[email protected]\"\n\n# aux functions\n\n\ndef _check_domain(domain_range):\n for domain in domain_range:\n if len(domain) != 2 or domain[0] >= domain[1]:\n raise ValueError(f\"The interval {domain} is not well-defined.\")\n\n\nclass Basis(ABC):\n \"\"\"Defines the structure of a basis function system.\n\n Attributes:\n domain_range (tuple): a tuple of length 2 containing the initial and\n end values of the interval over which the basis can be evaluated.\n n_basis (int): number of functions in the basis.\n\n \"\"\"\n\n def __init__(self, domain_range=None, n_basis=1):\n \"\"\"Basis constructor.\n\n Args:\n domain_range (tuple or list of tuples, optional): Definition of the\n interval where the basis defines a space. Defaults to (0,1).\n n_basis: Number of functions that form the basis. Defaults to 1.\n \"\"\"\n\n if domain_range is not None:\n\n domain_range = _domain_range(domain_range)\n\n # Some checks\n _check_domain(domain_range)\n\n if n_basis < 1:\n raise ValueError(\"The number of basis has to be strictly \"\n \"possitive.\")\n\n self._domain_range = domain_range\n self._n_basis = n_basis\n\n super().__init__()\n\n @property\n def dim_domain(self):\n return 1\n\n @property\n def dim_codomain(self):\n return 1\n\n @property\n def domain_range(self):\n if self._domain_range is None:\n return ((0, 1),) * self.dim_domain\n else:\n return self._domain_range\n\n @property\n def n_basis(self):\n return self._n_basis\n\n @abstractmethod\n def _evaluate(self, eval_points):\n \"\"\"Subclasses must override this to provide basis evaluation.\"\"\"\n pass\n\n def evaluate(self, eval_points, *, derivative=0):\n \"\"\"Evaluate Basis objects and its derivatives.\n\n Evaluates the basis function system or its derivatives at a list of\n given values.\n\n Args:\n eval_points (array_like): List of points where the basis is\n evaluated.\n\n Returns:\n (numpy.darray): Matrix whose rows are the values of the each\n basis function or its derivatives at the values specified in\n eval_points.\n\n \"\"\"\n if derivative < 0:\n raise ValueError(\"derivative only takes non-negative values.\")\n elif derivative != 0:\n warnings.warn(\"Parameter derivative is deprecated. Use the \"\n \"derivative function instead.\", DeprecationWarning)\n return self.derivative(order=derivative)(eval_points)\n\n eval_points = _reshape_eval_points(eval_points,\n aligned=True,\n n_samples=self.n_basis,\n dim_domain=self.dim_domain)\n\n return self._evaluate(eval_points).reshape(\n (self.n_basis, len(eval_points), self.dim_codomain))\n\n def __call__(self, *args, **kwargs):\n return self.evaluate(*args, **kwargs)\n\n def __len__(self):\n return self.n_basis\n\n def derivative(self, *, order=1):\n \"\"\"Construct a FDataBasis object containing the derivative.\n\n Args:\n order (int, optional): Order of the derivative. Defaults to 1.\n\n Returns:\n (FDataBasis): Derivative object.\n\n \"\"\"\n\n return self.to_basis().derivative(order=order)\n\n def _derivative_basis_and_coefs(self, coefs, order=1):\n \"\"\"\n Subclasses can override this to provide derivative construction.\n\n A basis can provide derivative evaluation at given points\n without providing a basis representation for its derivatives,\n although is recommended to provide both if possible.\n\n \"\"\"\n raise NotImplementedError(f\"{type(self)} basis does not support \"\n \"the construction of a basis of the \"\n \"derivatives.\")\n\n def plot(self, chart=None, **kwargs):\n \"\"\"Plot the basis object or its derivatives.\n\n Args:\n chart (figure object, axe or list of axes, optional): figure over\n with the graphs are plotted or axis over where the graphs are\n plotted.\n **kwargs: keyword arguments to be passed to the\n fdata.plot function.\n\n Returns:\n fig (figure): figure object in which the graphs are plotted.\n\n \"\"\"\n self.to_basis().plot(chart=chart, **kwargs)\n\n def _coordinate_nonfull(self, fdatabasis, key):\n \"\"\"\n Returns a fdatagrid for the coordinate functions indexed by key.\n\n Subclasses can override this to provide coordinate indexing.\n\n The key parameter has been already validated and is an integer or\n slice in the range [0, self.dim_codomain.\n\n \"\"\"\n raise NotImplementedError(\"Coordinate indexing not implemented\")\n\n def _coordinate(self, fdatabasis, key):\n \"\"\"Returns a fdatagrid for the coordinate functions indexed by key.\"\"\"\n\n # Raises error if not in range and normalize key\n r_key = range(self.dim_codomain)[key]\n\n if isinstance(r_key, range) and len(r_key) == 0:\n raise IndexError(\"Empty number of coordinates selected\")\n\n # Full fdatabasis case\n if (self.dim_codomain == 1 and r_key == 0) or (\n isinstance(r_key, range) and len(r_key) == self.dim_codomain):\n\n return fdatabasis.copy()\n\n else:\n\n return self._coordinate_nonfull(fdatabasis=fdatabasis, key=r_key)\n\n def rescale(self, domain_range=None):\n r\"\"\"Return a copy of the basis with a new domain range, with the\n corresponding values rescaled to the new bounds.\n\n Args:\n domain_range (tuple, optional): Definition of the interval\n where the basis defines a space. Defaults uses the same as\n the original basis.\n \"\"\"\n\n return self.copy(domain_range=domain_range)\n\n def copy(self, domain_range=None):\n \"\"\"Basis copy\"\"\"\n\n new_copy = copy.deepcopy(self)\n\n if domain_range is not None:\n domain_range = _domain_range(domain_range)\n\n # Some checks\n _check_domain(domain_range)\n\n new_copy._domain_range = domain_range\n\n return new_copy\n\n def to_basis(self):\n from . import FDataBasis\n return FDataBasis(self.copy(), np.identity(self.n_basis))\n\n def _list_to_R(self, knots):\n retstring = \"c(\"\n for i in range(0, len(knots)):\n retstring = retstring + str(knots[i]) + \", \"\n return retstring[0:len(retstring) - 2] + \")\"\n\n def _to_R(self):\n raise NotImplementedError\n\n def inner_product_matrix(self, other=None):\n r\"\"\"Return the Inner Product Matrix of a pair of basis.\n\n The Inner Product Matrix is defined as\n\n .. math::\n IP_{ij} = \\langle\\phi_i, \\theta_j\\rangle\n\n where :math:`\\phi_i` is the ith element of the basi and\n :math:`\\theta_j` is the jth element of the second basis.\n This matrix helps on the calculation of the inner product\n between objects on two basis and for the change of basis.\n\n Args:\n other (:class:`Basis`): Basis to compute the inner product\n matrix. If not basis is given, it computes the matrix with\n itself returning the Gram Matrix\n\n Returns:\n numpy.array: Inner Product Matrix of two basis\n\n \"\"\"\n from ...misc import inner_product_matrix\n\n if other is None or self == other:\n return self.gram_matrix()\n\n return inner_product_matrix(self, other)\n\n def _gram_matrix_numerical(self):\n \"\"\"\n Compute the Gram matrix numerically.\n\n \"\"\"\n from ...misc import inner_product_matrix\n\n return inner_product_matrix(self, force_numerical=True)\n\n def _gram_matrix(self):\n \"\"\"\n Compute the Gram matrix.\n\n Subclasses may override this method for improving computation\n of the Gram matrix.\n\n \"\"\"\n return self._gram_matrix_numerical()\n\n def gram_matrix(self):\n r\"\"\"Return the Gram Matrix of a basis\n\n The Gram Matrix is defined as\n\n .. math::\n G_{ij} = \\langle\\phi_i, \\phi_j\\rangle\n\n where :math:`\\phi_i` is the ith element of the basis. This is a\n symmetric matrix and positive-semidefinite.\n\n Returns:\n numpy.array: Gram Matrix of the basis.\n\n \"\"\"\n\n gram = getattr(self, \"_gram_matrix_cached\", None)\n\n if gram is None:\n gram = self._gram_matrix()\n self._gram_matrix_cached = gram\n\n return gram\n\n def _add_same_basis(self, coefs1, coefs2):\n return self.copy(), coefs1 + coefs2\n\n def _add_constant(self, coefs, constant):\n coefs = coefs.copy()\n constant = np.array(constant)\n coefs[:, 0] = coefs[:, 0] + constant\n\n return self.copy(), coefs\n\n def _sub_same_basis(self, coefs1, coefs2):\n return self.copy(), coefs1 - coefs2\n\n def _sub_constant(self, coefs, other):\n coefs = coefs.copy()\n other = np.array(other)\n coefs[:, 0] = coefs[:, 0] - other\n\n return self.copy(), coefs\n\n def _mul_constant(self, coefs, other):\n coefs = coefs.copy()\n other = np.atleast_2d(other).reshape(-1, 1)\n coefs = coefs * other\n\n return self.copy(), coefs\n\n def __repr__(self):\n \"\"\"Representation of a Basis object.\"\"\"\n return (f\"{self.__class__.__name__}(domain_range={self.domain_range}, \"\n f\"n_basis={self.n_basis})\")\n\n def __eq__(self, other):\n \"\"\"Equality of Basis\"\"\"\n return (type(self) == type(other)\n and _same_domain(self, other)\n and self.n_basis == other.n_basis)\n\n def __hash__(self):\n \"\"\"Hash of Basis\"\"\"\n return hash((self.domain_range, self.n_basis))\n", "\"\"\"\nExploring data\n==============\n\nExplores the Tecator data set by plotting the functional data and calculating\nmeans and derivatives.\n\"\"\"\n\n# Author: Miguel Carbajo Berrocal\n# License: MIT\n\nimport skfda\n\nimport numpy as np\n\n\n##############################################################################\n# In this example we are going to explore the functional properties of the\n# :func:`Tecator <skfda.datasets.fetch_tecator>` dataset. This dataset\n# measures the infrared absorbance spectrum of meat samples. The objective is\n# to predict the fat, water, and protein content of the samples.\n#\n# In this example we only want to discriminate between meat with less than 20%\n# of fat, and meat with a higher fat content.\ndataset = skfda.datasets.fetch_tecator()\nfd = dataset['data']\ny = dataset['target']\ntarget_feature_names = dataset['target_feature_names']\nfat = y[:, np.asarray(target_feature_names) == 'Fat'].ravel()\n\n##############################################################################\n# We will now plot in red samples containing less than 20% of fat and in blue\n# the rest.\n\nlow_fat = fat < 20\nlabels = np.full(fd.n_samples, 'high fat')\nlabels[low_fat] = 'low fat'\ncolors = {'high fat': 'red',\n 'low fat': 'blue'}\n\nfig = fd.plot(group=labels, group_colors=colors,\n linewidth=0.5, alpha=0.7, legend=True)\n\n##############################################################################\n# The means of each group are the following ones.\n\nmean_low = skfda.exploratory.stats.mean(fd[low_fat])\nmean_high = skfda.exploratory.stats.mean(fd[~low_fat])\n\nmeans = mean_high.concatenate(mean_low)\n\nmeans.dataset_name = fd.dataset_name + ' - means'\nmeans.plot(group=['high fat', 'low fat'], group_colors=colors,\n linewidth=0.5, legend=True)\n\n##############################################################################\n# In this dataset, the vertical shift in the original trajectories is not\n# very significative for predicting the fat content. However, the shape of the\n# curve is very relevant. We can observe that looking at the first and second\n# derivatives.\n#\n# The first derivative is shown below:\n\nfdd = fd.derivative()\nfig = fdd.plot(group=labels, group_colors=colors,\n linewidth=0.5, alpha=0.7, legend=True)\n\n##############################################################################\n# We now show the second derivative:\nfdd = fd.derivative(order=2)\nfig = fdd.plot(group=labels, group_colors=colors,\n linewidth=0.5, alpha=0.7, legend=True)\n", "\"\"\"\nModule to interpolate functional data objects.\n\"\"\"\n\n\nimport abc\n\nfrom scipy.interpolate import (PchipInterpolator, UnivariateSpline,\n RectBivariateSpline, RegularGridInterpolator)\n\nimport numpy as np\n\nfrom .._utils import _to_array_maybe_ragged\nfrom .evaluator import Evaluator\n\n\nclass _SplineList(abc.ABC):\n r\"\"\"ABC for list of interpolations.\"\"\"\n\n def __init__(self, fdatagrid,\n interpolation_order=1,\n smoothness_parameter=0.):\n\n super().__init__()\n\n self.fdatagrid = fdatagrid\n self.interpolation_order = interpolation_order\n self.smoothness_parameter = smoothness_parameter\n\n @abc.abstractmethod\n def _evaluate_one(self, spl, t, derivative=0):\n \"\"\"Evaluates one spline of the list.\"\"\"\n pass\n\n def _evaluate_codomain(self, spl_m, t, derivative=0):\n \"\"\"Evaluator of multidimensional sample\"\"\"\n return np.array([self._evaluate_one(spl, t, derivative)\n for spl in spl_m]).T\n\n def evaluate(self, fdata, eval_points, *, derivative=0, aligned=True):\n\n if aligned:\n # Points evaluated inside the domain\n res = np.apply_along_axis(\n self._evaluate_codomain, 1,\n self.splines, eval_points, derivative)\n res = res.reshape(fdata.n_samples, eval_points.shape[0],\n fdata.dim_codomain)\n\n else:\n res = _to_array_maybe_ragged([self._evaluate_codomain(\n s, e, derivative=derivative)\n for s, e in zip(self.splines, eval_points)])\n\n return res\n\n\nclass _SplineList1D(_SplineList):\n r\"\"\"List of interpolations for curves.\n\n List of interpolations for objects with domain\n dimension = 1. Calling internally during the creation of the\n evaluator.\n\n Uses internally the scipy interpolation UnivariateSpline or\n PchipInterpolator.\n\n Args:\n fdatagrid (FDatagrid): Fdatagrid to interpolate.\n interpolation_order (int, optional): Order of the interpolation, 1\n for linear interpolation, 2 for cuadratic, 3 for cubic and so\n on. In case of curves and surfaces there is available\n interpolation up to degree 5. For higher dimensional objects\n only linear or nearest interpolation is available. Default\n lineal interpolation.\n smoothness_parameter (float, optional): Penalisation to perform\n smoothness interpolation. Option only available for curves and\n surfaces. If 0 the residuals of the interpolation will be 0.\n Defaults 0.\n monotone (boolean, optional): Performs monotone interpolation in\n curves using a PCHIP interpolator. Only valid for curves (domain\n dimension equal to 1) and interpolation order equal to 1 or 3.\n Defaults false.\n\n Returns:\n (np.ndarray): Array of size n_samples x dim_codomain with the\n corresponding interpolation of the sample i, and image dimension j\n in the entry (i,j) of the array.\n\n Raises:\n ValueError: If the value of the interpolation k is not valid.\n\n \"\"\"\n\n def __init__(self, fdatagrid,\n interpolation_order=1,\n smoothness_parameter=0.,\n monotone=False):\n\n super().__init__(\n fdatagrid=fdatagrid,\n interpolation_order=interpolation_order,\n smoothness_parameter=smoothness_parameter)\n\n self.monotone = monotone\n\n if self.interpolation_order > 5 or self.interpolation_order < 1:\n raise ValueError(f\"Invalid degree of interpolation \"\n f\"({self.interpolation_order}). Must be \"\n f\"an integer greater than 0 and lower or \"\n f\"equal than 5.\")\n\n if self.monotone and self.smoothness_parameter != 0:\n raise ValueError(\"Smoothing interpolation is not supported with \"\n \"monotone interpolation\")\n\n if self.monotone and (self.interpolation_order == 2\n or self.interpolation_order == 4):\n raise ValueError(f\"monotone interpolation of degree \"\n f\"{self.interpolation_order}\"\n f\"not supported.\")\n\n # Monotone interpolation of degree 1 is performed with linear spline\n monotone = self.monotone\n if self.monotone and self.interpolation_order == 1:\n monotone = False\n\n grid_points = fdatagrid.grid_points[0]\n\n if monotone:\n def constructor(data):\n \"\"\"Constructs an unidimensional cubic monotone interpolation\"\"\"\n return PchipInterpolator(grid_points, data)\n\n else:\n\n def constructor(data):\n \"\"\"Constructs an unidimensional interpolation\"\"\"\n return UnivariateSpline(\n grid_points, data,\n s=self.smoothness_parameter,\n k=self.interpolation_order)\n\n self.splines = np.apply_along_axis(\n constructor, 1, fdatagrid.data_matrix)\n\n def _evaluate_one(self, spl, t, derivative=0):\n try:\n return spl(t, derivative)[:, 0]\n except ValueError:\n return np.zeros_like(t)\n\n\nclass _SplineList2D(_SplineList):\n r\"\"\"List of interpolations for surfaces.\n\n List of interpolations for objects with domain\n dimension = 2. Calling internally during the creationg of the\n evaluator.\n\n Uses internally the scipy interpolation RectBivariateSpline.\n\n Args:\n fdatagrid (FDatagrid): Fdatagrid to interpolate.\n interpolation_order (int, optional): Order of the interpolation, 1\n for linear interpolation, 2 for cuadratic, 3 for cubic and so\n on. In case of curves and surfaces there is available\n interpolation up to degree 5. For higher dimensional objects\n only linear or nearest interpolation is available. Default\n lineal interpolation.\n smoothness_parameter (float, optional): Penalisation to perform\n smoothness interpolation. Option only available for curves and\n surfaces. If 0 the residuals of the interpolation will be 0.\n Defaults 0.\n monotone (boolean, optional): Performs monotone interpolation in\n curves using a PCHIP interpolator. Only valid for curves (domain\n dimension equal to 1) and interpolation order equal to 1 or 3.\n Defaults false.\n\n Returns:\n (np.ndarray): Array of size n_samples x dim_codomain with the\n corresponding interpolation of the sample i, and image dimension j\n in the entry (i,j) of the array.\n\n Raises:\n ValueError: If the value of the interpolation k is not valid.\n\n \"\"\"\n\n def __init__(self, fdatagrid,\n interpolation_order=1,\n smoothness_parameter=0.):\n\n super().__init__(\n fdatagrid=fdatagrid,\n interpolation_order=interpolation_order,\n smoothness_parameter=smoothness_parameter)\n\n if np.isscalar(self.interpolation_order):\n kx = ky = self.interpolation_order\n elif len(self.interpolation_order) != 2:\n raise ValueError(\"k should be numeric or a tuple of length 2.\")\n else:\n kx = self.interpolation_order[0]\n ky = self.interpolation_order[1]\n\n if kx > 5 or kx <= 0 or ky > 5 or ky <= 0:\n raise ValueError(f\"Invalid degree of interpolation ({kx},{ky}). \"\n f\"Must be an integer greater than 0 and lower or \"\n f\"equal than 5.\")\n\n # Matrix of splines\n self.splines = np.empty(\n (fdatagrid.n_samples, fdatagrid.dim_codomain), dtype=object)\n\n for i in range(fdatagrid.n_samples):\n for j in range(fdatagrid.dim_codomain):\n self.splines[i, j] = RectBivariateSpline(\n fdatagrid.grid_points[0],\n fdatagrid.grid_points[1],\n fdatagrid.data_matrix[i, :, :, j],\n kx=kx, ky=ky,\n s=self.smoothness_parameter)\n\n def _evaluate_one(self, spl, t, derivative=0):\n if np.isscalar(derivative):\n derivative = 2 * [derivative]\n elif len(derivative) != 2:\n raise ValueError(\"derivative should be a numeric value \"\n \"or a tuple of length 2 with (dx,dy).\")\n\n return spl(t[:, 0], t[:, 1], dx=derivative[0], dy=derivative[1],\n grid=False)\n\n\nclass _SplineListND(_SplineList):\n r\"\"\"List of interpolations.\n\n List of interpolations for objects with domain\n dimension > 2. Calling internally during the creationg of the\n evaluator.\n\n Only linear and nearest interpolations are available for objects with\n domain dimension >= 3. Uses internally the scipy interpolation\n RegularGridInterpolator.\n\n Args:\n grid_points (np.ndarray): Sample points of the fdatagrid.\n data_matrix (np.ndarray): Data matrix of the fdatagrid.\n k (integer): Order of the spline interpolations.\n\n Returns:\n (np.ndarray): Array of size n_samples x dim_codomain with the\n corresponding interpolation of the sample i, and image dimension j\n in the entry (i,j) of the array.\n\n Raises:\n ValueError: If the value of the interpolation k is not valid.\n\n \"\"\"\n\n def __init__(self, fdatagrid,\n interpolation_order=1,\n smoothness_parameter=0.):\n\n super().__init__(\n fdatagrid=fdatagrid,\n interpolation_order=interpolation_order,\n smoothness_parameter=smoothness_parameter)\n\n if self.smoothness_parameter != 0:\n raise ValueError(\"Smoothing interpolation is only supported with \"\n \"domain dimension up to 2, s should be 0.\")\n\n # Parses method of interpolation\n if self.interpolation_order == 0:\n method = 'nearest'\n elif self.interpolation_order == 1:\n method = 'linear'\n else:\n raise ValueError(\"interpolation order should be 0 (nearest) or 1 \"\n \"(linear).\")\n\n self.splines = np.empty(\n (fdatagrid.n_samples, fdatagrid.dim_codomain), dtype=object)\n\n for i in range(fdatagrid.n_samples):\n for j in range(fdatagrid.dim_codomain):\n self.splines[i, j] = RegularGridInterpolator(\n fdatagrid.grid_points, fdatagrid.data_matrix[i, ..., j],\n method, False)\n\n def _evaluate_one(self, spl, t, derivative=0):\n\n if derivative != 0:\n raise ValueError(\"derivates not suported for functional data \"\n \" with domain dimension greater than 2.\")\n\n return spl(t)\n\n\nclass SplineInterpolation(Evaluator):\n r\"\"\"Spline interpolation of :class:`FDataGrid`.\n\n Spline interpolation of discretized functional objects. Implements\n different interpolation methods based in splines, using the sample\n points of the grid as nodes to interpolate.\n\n See the interpolation example to a detailled explanation.\n\n Attributes:\n interpolation_order (int, optional): Order of the interpolation, 1\n for linear interpolation, 2 for cuadratic, 3 for cubic and so\n on. In case of curves and surfaces there is available\n interpolation up to degree 5. For higher dimensional objects\n only linear or nearest interpolation is available. Default\n lineal interpolation.\n smoothness_parameter (float, optional): Penalisation to perform\n smoothness interpolation. Option only available for curves and\n surfaces. If 0 the residuals of the interpolation will be 0.\n Defaults 0.\n monotone (boolean, optional): Performs monotone interpolation in\n curves using a PCHIP interpolator. Only valid for curves (domain\n dimension equal to 1) and interpolation order equal to 1 or 3.\n Defaults false.\n\n \"\"\"\n\n def __init__(self, interpolation_order=1, *, smoothness_parameter=0.,\n monotone=False):\n r\"\"\"Constructor of the SplineInterpolation.\n\n Args:\n interpolation_order (int, optional): Order of the interpolation, 1\n for linear interpolation, 2 for cuadratic, 3 for cubic and so\n on. In case of curves and surfaces there is available\n interpolation up to degree 5. For higher dimensional objects\n only linear or nearest interpolation is available. Default\n lineal interpolation.\n smoothness_parameter (float, optional): Penalisation to perform\n smoothness interpolation. Option only available for curves and\n surfaces. If 0 the residuals of the interpolation will be 0.\n Defaults 0.\n monotone (boolean, optional): Performs monotone interpolation in\n curves using a PCHIP interpolation. Only valid for curves\n (domain dimension equal to 1) and interpolation order equal\n to 1 or 3.\n Defaults false.\n\n \"\"\"\n self._interpolation_order = interpolation_order\n self._smoothness_parameter = smoothness_parameter\n self._monotone = monotone\n\n @property\n def interpolation_order(self):\n \"Returns the interpolation order\"\n return self._interpolation_order\n\n @property\n def smoothness_parameter(self):\n \"Returns the smoothness parameter\"\n return self._smoothness_parameter\n\n @property\n def monotone(self):\n \"Returns flag to perform monotone interpolation\"\n return self._monotone\n\n def _build_interpolator(self, fdatagrid):\n\n if fdatagrid.dim_domain == 1:\n return _SplineList1D(\n fdatagrid=fdatagrid,\n interpolation_order=self.interpolation_order,\n smoothness_parameter=self.smoothness_parameter,\n monotone=self.monotone)\n\n elif self.monotone:\n raise ValueError(\"Monotone interpolation is only supported with \"\n \"domain dimension equal to 1.\")\n\n elif fdatagrid.dim_domain == 2:\n return _SplineList2D(\n fdatagrid=fdatagrid,\n interpolation_order=self.interpolation_order,\n smoothness_parameter=self.smoothness_parameter)\n\n else:\n return _SplineListND(\n fdatagrid=fdatagrid,\n interpolation_order=self.interpolation_order,\n smoothness_parameter=self.smoothness_parameter)\n\n def evaluate(self, fdata, eval_points, *, aligned=True):\n\n spline_list = self._build_interpolator(fdata)\n\n return spline_list.evaluate(fdata, eval_points, aligned=aligned)\n\n def __repr__(self):\n \"\"\"repr method of the interpolation\"\"\"\n return (f\"{type(self).__name__}(\"\n f\"interpolation_order={self.interpolation_order}, \"\n f\"smoothness_parameter={self.smoothness_parameter}, \"\n f\"monotone={self.monotone})\")\n\n def __eq__(self, other):\n \"\"\"Equality operator between SplineInterpolation\"\"\"\n return (super().__eq__(other) and\n self.interpolation_order == other.interpolation_order and\n self.smoothness_parameter == other.smoothness_parameter and\n self.monotone == other.monotone)\n", "\"\"\"\nBoxplot\n=======\n\nShows the use of the functional Boxplot applied to the Canadian Weather\ndataset.\n\"\"\"\n\n# Author: Amanda Hernando Bernabé\n# License: MIT\n\n# sphinx_gallery_thumbnail_number = 2\n\nfrom skfda import datasets\nfrom skfda.exploratory.depth import ModifiedBandDepth, IntegratedDepth\nfrom skfda.exploratory.visualization import Boxplot\n\nimport matplotlib.pyplot as plt\nimport numpy as np\n\n\n##############################################################################\n# First, the Canadian Weather dataset is downloaded from the package 'fda' in\n# CRAN. It contains a FDataGrid with daily temperatures and precipitations,\n# that is, it has a 2-dimensional image. We are interested only in the daily\n# average temperatures, so we will use the first coordinate.\ndataset = datasets.fetch_weather()\nfd = dataset[\"data\"]\nfd_temperatures = fd.coordinates[0]\n\n##############################################################################\n# The data is plotted to show the curves we are working with. They are divided\n# according to the target. In this case, it includes the different climates to\n# which the weather stations belong to.\n\n# Each climate is assigned a color. Defaults to grey.\ncolormap = plt.cm.get_cmap('seismic')\nlabel_names = dataset[\"target_names\"]\nnlabels = len(label_names)\nlabel_colors = colormap(np.arange(nlabels) / (nlabels - 1))\n\nfd_temperatures.plot(group=dataset[\"target\"],\n group_colors=label_colors,\n group_names=label_names)\n\n\n##############################################################################\n# We instantiate a :class:`~skfda.exploratory.visualization.Boxplot`\n# object with the data, and we call its\n# :func:`~skfda.exploratory.visualization.Boxplot.plot` function to show the\n# graph.\n#\n# By default, only the part of the outlier curves which falls out of the\n# central regions is plotted. We want the entire curve to be shown, that is\n# why the ``show_full_outliers`` parameter is set to True.\n\nfdBoxplot = Boxplot(fd_temperatures)\nfdBoxplot.show_full_outliers = True\n\nfdBoxplot.plot()\n\n##############################################################################\n# We can observe in the boxplot the median in black, the central region (where\n# the 50% of the most centered samples reside) in pink and the envelopes and\n# vertical lines in blue. The outliers detected, those samples with at least a\n# point outside the outlying envelope, are represented with a red dashed line.\n# The colors can be customized.\n#\n# The outliers are shown below with respect to the other samples.\n\ncolor = 0.3\noutliercol = 0.7\n\nfd_temperatures.plot(group=fdBoxplot.outliers.astype(int),\n group_colors=colormap([color, outliercol]),\n group_names=[\"nonoutliers\", \"outliers\"])\n\n##############################################################################\n# The curves pointed as outliers are are those curves with significantly lower\n# values than the rest. This is the expected result due to the depth measure\n# used, :func:`~skfda.exploratory.depth.IntegratedDepth`, which ranks\n# the samples according to their magnitude.\n#\n# The :class:`~skfda.exploratory.visualization.Boxplot` object admits any\n# :ref:`depth measure <depth-measures>` defined or customized by the user. Now\n# the call is done with the :class:`~skfda.exploratory.depth.ModifiedBandDepth`\n# and the factor is reduced in order to designate some samples as outliers\n# (otherwise, with this measure and the default factor, none of the curves are\n# pointed out as outliers). We can see that the outliers detected belong to\n# the Pacific and Arctic climates which are less common to find in Canada. As\n# a consequence, this measure detects better shape outliers compared to the\n# previous one.\n\nfdBoxplot = Boxplot(\n fd_temperatures, depth_method=ModifiedBandDepth(), factor=0.4)\nfdBoxplot.show_full_outliers = True\n\nfdBoxplot.plot()\n\n##############################################################################\n# Another functionality implemented in this object is the enhanced functional\n# boxplot, which can include other central regions, apart from the central or\n# 50% one.\n#\n# In the following instantiation, the\n# :func:`~skfda.exploratory.depth.IntegratedDepth` is used and the 25% and\n# 75% central regions are specified.\n\nfdBoxplot = Boxplot(fd_temperatures, depth_method=IntegratedDepth(),\n prob=[0.75, 0.5, 0.25])\nfdBoxplot.plot()\n\n##############################################################################\n# The above two lines could be replaced just by fdBoxplot inside a notebook\n# since the default representation of the\n# :class:`~skfda.exploratory.visualization.Boxplot` is the image of the plot.\n", "\nfrom fdasrsf.utility_functions import optimum_reparam\nimport scipy.integrate\nfrom sklearn.base import BaseEstimator, TransformerMixin\nfrom sklearn.utils.validation import check_is_fitted\n\nimport numpy as np\n\nfrom . import invert_warping\nfrom ... import FDataGrid\nfrom ..._utils import check_is_univariate\nfrom ...representation.interpolation import SplineInterpolation\nfrom ._warping import _normalize_scale\nfrom .base import RegistrationTransformer\n\n\n__author__ = \"Pablo Marcos Manchón\"\n__email__ = \"[email protected]\"\n\n###############################################################################\n# Based on the original implementation of J. Derek Tucker in #\n# *fdasrsf_python* (https://github.com/jdtuck/fdasrsf_python) #\n# and *ElasticFDA.jl* (https://github.com/jdtuck/ElasticFDA.jl). #\n###############################################################################\n\n\nclass SRSF(BaseEstimator, TransformerMixin):\n r\"\"\"Square-Root Slope Function (SRSF) transform.\n\n Let :math:`f : [a,b] \\rightarrow \\mathbb{R}` be an absolutely continuous\n function, the SRSF transform is defined as\n\n .. math::\n SRSF(f(t)) = sgn(f(t)) \\sqrt{|\\dot f(t)|} = q(t)\n\n This representation it is used to compute the extended non-parametric\n Fisher-Rao distance between functions, wich under the SRSF representation\n becomes the usual :math:`\\mathbb{L}^2` distance between functions.\n See [SK16-4-6]_ .\n\n The inverse SRSF transform is defined as\n\n .. math::\n f(t) = f(a) + \\int_{a}^t q(t)|q(t)|dt .\n\n This transformation is a mapping up to constant. Given the SRSF and the\n initial value :math:`f(a)` the original function can be obtained, for this\n reason it is necessary to store the value :math:`f(a)` during the fit,\n which is dropped due to derivation. If it is applied the inverse\n transformation without fit the estimator it is assumed that :math:`f(a)=0`.\n\n Attributes:\n eval_points (array_like, optional): Set of points where the\n functions are evaluated, by default uses the sample points of the\n fdatagrid.\n initial_value (float, optional): Initial value to apply in the\n inverse transformation. If `None` there are stored the initial\n values of the functions during the transformation to apply\n during the inverse transformation. Defaults None.\n\n Note:\n Due to the use of derivatives it is recommended that the samples are\n sufficiently smooth, or have passed a smoothing preprocessing before,\n in order to achieve good results.\n\n References:\n .. [SK16-4-6] Srivastava, Anuj & Klassen, Eric P. (2016). Functional\n and shape data analysis. In *Square-Root Slope Function\n Representation* (pp. 91-93). Springer.\n\n Examples:\n\n Create a toy dataset and apply the transformation and its inverse.\n\n >>> from skfda.datasets import make_sinusoidal_process\n >>> from skfda.preprocessing.registration.elastic import SRSF\n >>> fd = make_sinusoidal_process(error_std=0, random_state=0)\n >>> srsf = SRSF()\n >>> srsf\n SRSF(...)\n\n Fits the estimator (to apply the inverse transform) and apply the SRSF\n\n >>> q = srsf.fit_transform(fd)\n\n Apply the inverse transform.\n\n >>> fd_pull_back = srsf.inverse_transform(q)\n\n The original and the pull back `fd` are almost equal\n\n >>> zero = fd - fd_pull_back\n >>> zero.data_matrix.flatten().round(3)\n array([ 0., 0., 0., ..., -0., -0., -0.])\n\n \"\"\"\n\n def __init__(self, output_points=None, initial_value=None):\n \"\"\"Initializes the transformer.\n\n Args:\n eval_points: (array_like, optional): Set of points where the\n functions are evaluated, by default uses the sample points of\n the :class:`FDataGrid <skfda.FDataGrid>` transformed.\n initial_value (float, optional): Initial value to apply in the\n inverse transformation. If `None` there are stored the initial\n values of the functions during the transformation to apply\n during the inverse transformation. Defaults None.\n\n \"\"\"\n self.output_points = output_points\n self.initial_value = initial_value\n\n def fit(self, X=None, y=None):\n \"\"\"This transformer do not need to be fitted.\n\n Args:\n X (Ignored): Present for API conventions.\n y (Ignored): Present for API conventions.\n\n Returns:\n (Estimator): self\n\n \"\"\"\n return self\n\n def transform(self, X: FDataGrid, y=None):\n r\"\"\"Computes the square-root slope function (SRSF) transform.\n\n Let :math:`f : [a,b] \\rightarrow \\mathbb{R}` be an absolutely continuous\n function, the SRSF transform is defined as [SK16-4-6-1]_:\n\n .. math::\n\n SRSF(f(t)) = sgn(f(t)) \\sqrt{\\dot f(t)|} = q(t)\n\n Args:\n X (:class:`FDataGrid`): Functions to be transformed.\n y (Ignored): Present for API conventions.\n\n Returns:\n :class:`FDataGrid`: SRSF functions.\n\n Raises:\n ValueError: If functions are not univariate.\n\n References:\n .. [SK16-4-6-1] Srivastava, Anuj & Klassen, Eric P. (2016).\n Functional and shape data analysis. In *Square-Root Slope\n Function Representation* (pp. 91-93). Springer.\n\n \"\"\"\n check_is_univariate(X)\n\n if self.output_points is None:\n output_points = X.grid_points[0]\n else:\n output_points = self.output_points\n\n g = X.derivative()\n\n # Evaluation with the corresponding interpolation\n data_matrix = g(output_points)[..., 0]\n\n # SRSF(f) = sign(f) * sqrt|Df| (avoiding multiple allocation)\n sign_g = np.sign(data_matrix)\n data_matrix = np.abs(data_matrix, out=data_matrix)\n data_matrix = np.sqrt(data_matrix, out=data_matrix)\n data_matrix *= sign_g\n\n # Store the values of the transformation\n if self.initial_value is None:\n a = X.domain_range[0][0]\n self.initial_value_ = X(a).reshape(X.n_samples, 1, X.dim_codomain)\n\n return X.copy(data_matrix=data_matrix, grid_points=output_points)\n\n def inverse_transform(self, X: FDataGrid, y=None):\n r\"\"\"Computes the inverse SRSF transform.\n\n Given the srsf and the initial value the original function can be\n obtained as [SK16-4-6-2]_ :\n\n .. math::\n f(t) = f(a) + \\int_{a}^t q(t)|q(t)|dt\n\n where :math:`q(t)=SRSF(f(t))`.\n\n If it is applied this inverse transformation without fitting the\n estimator it is assumed that :math:`f(a)=0`.\n\n Args:\n X (:class:`FDataGrid`): SRSF to be transformed.\n y (Ignored): Present for API conventions.\n\n Returns:\n :class:`FDataGrid`: Functions in the original space.\n\n Raises:\n ValueError: If functions are multidimensional.\n\n References:\n .. [SK16-4-6-2] Srivastava, Anuj & Klassen, Eric P. (2016).\n Functional and shape data analysis. In *Square-Root Slope\n Function Representation* (pp. 91-93). Springer.\n\n \"\"\"\n check_is_univariate(X)\n\n if self.initial_value is None and not hasattr(self, 'initial_value_'):\n raise AttributeError(\"When initial_value=None is expected a \"\n \"previous transformation of the data to \"\n \"store the initial values to apply in the \"\n \"inverse transformation. Also it is possible \"\n \"to fix these values setting the attribute\"\n \"initial value without a previous \"\n \"transformation.\")\n\n if self.output_points is None:\n output_points = X.grid_points[0]\n else:\n output_points = self.output_points\n\n data_matrix = X(output_points)\n\n data_matrix *= np.abs(data_matrix)\n\n f_data_matrix = scipy.integrate.cumtrapz(data_matrix, x=output_points,\n axis=1, initial=0)\n\n # If the transformer was fitted, sum the initial value\n if self.initial_value is None:\n f_data_matrix += self.initial_value_\n else:\n f_data_matrix += self.initial_value\n\n return X.copy(data_matrix=f_data_matrix, grid_points=output_points)\n\n\ndef _elastic_alignment_array(template_data, q_data,\n eval_points, penalty, grid_dim):\n r\"\"\"Wrapper between the cython interface and python.\n\n Selects the corresponding routine depending on the dimensions of the\n arrays.\n\n Args:\n template_data (numpy.ndarray): Array with the srsf of the template.\n q_data (numpy.ndarray): Array with the srsf of the curves\n to be aligned.\n eval_points (numpy.ndarray): Discretisation points of the functions.\n penalty (float): Penalisation term.\n grid_dim (int): Dimension of the grid used in the alignment algorithm.\n\n Return:\n (numpy.ndarray): Array with the same shape than q_data with the srsf of\n the functions aligned to the template(s).\n \"\"\"\n\n return optimum_reparam(np.ascontiguousarray(template_data.T),\n np.ascontiguousarray(eval_points),\n np.ascontiguousarray(q_data.T),\n method=\"DP2\",\n lam=penalty, grid_dim=grid_dim).T\n\n\nclass ElasticRegistration(RegistrationTransformer):\n r\"\"\"Align a FDatagrid using the SRSF framework.\n\n Let :math:`f` be a function of the functional data object wich will be\n aligned to the template :math:`g`. Calculates the warping wich minimises\n the Fisher-Rao distance between :math:`g` and the registered function\n :math:`f^*(t)=f(\\gamma^*(t))=f \\circ \\gamma^*`.\n\n .. math::\n \\gamma^* = argmin_{\\gamma \\in \\Gamma} d_{\\lambda}(f \\circ\n \\gamma, g)\n\n Where :math:`d_{\\lambda}` denotes the extended Fisher-Rao distance with a\n penalty term, used to control the amount of warping.\n\n .. math::\n d_{\\lambda}^2(f \\circ \\gamma, g) = \\| SRSF(f \\circ \\gamma)\n \\sqrt{\\dot{\\gamma}} - SRSF(g)\\|_{\\mathbb{L}^2}^2 + \\lambda\n \\mathcal{R}(\\gamma)\n\n In the implementation it is used as penalty term\n\n .. math::\n \\mathcal{R}(\\gamma) = \\|\\sqrt{\\dot{\\gamma}}- 1 \\|_{\\mathbb{L}^2}^2\n\n Wich restrict the amount of elasticity employed in the alignment.\n\n The registered function :math:`f^*(t)` can be calculated using the\n composition :math:`f^*(t)=f(\\gamma^*(t))`.\n\n If the template is not specified it is used the Karcher mean of the set of\n functions under the elastic metric to perform the alignment, also known as\n `elastic mean`, wich is the local minimum of the sum of squares of elastic\n distances. See :func:`~elastic_mean`.\n\n In [SK16-4-2]_ are described extensively the algorithms employed and\n the SRSF framework.\n\n Args:\n template (str, :class:`FDataGrid` or callable, optional): Template to\n align the curves. Can contain 1 sample to align all the curves to\n it or the same number of samples than the fdatagrid. By default\n `elastic mean`, in which case :func:`elastic_mean` is called.\n penalty_term (float, optional): Controls the amount of elasticity.\n Defaults to 0.\n output_points (array_like, optional): Set of points where the\n functions are evaluated, by default uses the sample points of the\n fdatagrid which will be transformed.\n grid_dim (int, optional): Dimension of the grid used in the DP\n alignment algorithm. Defaults 7.\n\n Attributes:\n template_ (:class:`FDataGrid`): Template learned during fitting,\n used for alignment in :meth:`transform`.\n warping_ (:class:`FDataGrid`): Warping applied during the last\n transformation.\n\n References:\n .. [SK16-4-2] Srivastava, Anuj & Klassen, Eric P. (2016). Functional\n and shape data analysis. In *Functional Data and Elastic\n Registration* (pp. 73-122). Springer.\n\n Examples:\n\n Elastic registration of with train/test sets.\n\n >>> from skfda.preprocessing.registration import \\\n ... ElasticRegistration\n >>> from skfda.datasets import make_multimodal_samples\n >>> X_train = make_multimodal_samples(n_samples=15, random_state=0)\n >>> X_test = make_multimodal_samples(n_samples=3, random_state=1)\n\n Fit the transformer, which learns the elastic mean of the train\n set as template.\n\n >>> elastic_registration = ElasticRegistration()\n >>> elastic_registration.fit(X_train)\n ElasticRegistration(...)\n\n Registration of the test set.\n\n >>> elastic_registration.transform(X_test)\n FDataGrid(...)\n\n \"\"\"\n\n def __init__(self, template=\"elastic mean\", penalty=0., output_points=None,\n grid_dim=7):\n \"\"\"Initializes the registration transformer\"\"\"\n\n self.template = template\n self.penalty = penalty\n self.output_points = output_points\n self.grid_dim = grid_dim\n\n def fit(self, X: FDataGrid=None, y=None):\n \"\"\"Fit the transformer.\n\n Learns the template used during the transformation.\n\n Args:\n X (FDataGrid, optionl): Functional samples used as training\n samples. If the template provided it is an FDataGrid this\n samples are it is not need to construct the template from the\n samples and this argument is ignored.\n y (Ignored): Present for API conventions.\n\n Returns:\n RegistrationTransformer: self.\n\n \"\"\"\n if isinstance(self.template, FDataGrid):\n self.template_ = self.template # Template already constructed\n elif X is None:\n raise ValueError(\"Must be provided a dataset X to construct the \"\n \"template.\")\n elif self.template == \"elastic mean\":\n self.template_ = elastic_mean(X)\n else:\n self.template_ = self.template(X)\n\n # Constructs the SRSF of the template\n srsf = SRSF(output_points=self.output_points, initial_value=0)\n self._template_srsf = srsf.fit_transform(self.template_)\n\n return self\n\n def transform(self, X: FDataGrid, y=None):\n \"\"\"Apply elastic registration to the data.\n\n Args:\n X (:class:`FDataGrid`): Functional data to be registered.\n y (ignored): Present for API conventions.\n\n Returns:\n :class:`FDataGrid`: Registered samples.\n\n \"\"\"\n check_is_fitted(self, '_template_srsf')\n check_is_univariate(X)\n\n if (len(self._template_srsf) != 1 and\n len(X) != len(self._template_srsf)):\n\n raise ValueError(\"The template should contain one sample to align \"\n \"all the curves to the same function or the \"\n \"same number of samples than X.\")\n\n srsf = SRSF(output_points=self.output_points, initial_value=0)\n fdatagrid_srsf = srsf.fit_transform(X)\n\n # Points of discretization\n if self.output_points is None:\n output_points = fdatagrid_srsf.grid_points[0]\n else:\n output_points = self.output_points\n\n # Discretizacion in evaluation points\n q_data = fdatagrid_srsf(output_points)[..., 0]\n template_data = self._template_srsf(output_points)[..., 0]\n\n if q_data.shape[0] == 1:\n q_data = q_data[0]\n\n if template_data.shape[0] == 1:\n template_data = template_data[0]\n\n # Values of the warping\n gamma = _elastic_alignment_array(template_data, q_data,\n _normalize_scale(output_points),\n self.penalty, self.grid_dim)\n\n # Normalize warping to original interval\n gamma = _normalize_scale(\n gamma, a=output_points[0], b=output_points[-1])\n\n # Interpolation\n interpolation = SplineInterpolation(\n interpolation_order=3, monotone=True)\n\n self.warping_ = FDataGrid(gamma, output_points,\n interpolation=interpolation)\n\n return X.compose(self.warping_, eval_points=output_points)\n\n def inverse_transform(self, X: FDataGrid, y=None):\n r\"\"\"Reverse the registration procedure previosly applied.\n\n Let :math:`gamma(t)` the warping applied to construct a registered\n functional datum :math:`f^*(t)=f(\\gamma(t))`.\n\n Given a functional datum :math:`f^*(t) it is computed\n :math:`\\gamma^{-1}(t)` to reverse the registration procedure\n :math:`f(t)=f^*(\\gamma^{-1}(t))`.\n\n Args:\n X (:class:`FDataGrid`): Functional data to apply the reverse\n transform.\n y (Ignored): Present for API conventions.\n\n Returns:\n :class:`FDataGrid`: Functional data compose by the inverse warping.\n\n Raises:\n ValueError: If the warpings :math:`\\gamma` were not build via\n :meth:`transform` or if the number of samples of `X` is different\n than the number of samples of the dataset previosly transformed.\n\n Examples:\n\n Center the datasets taking into account the misalignment.\n\n >>> from skfda.preprocessing.registration import \\\n ... ElasticRegistration\n >>> from skfda.datasets import make_multimodal_samples\n >>> X = make_multimodal_samples(random_state=0)\n\n Registration of the dataset.\n\n >>> elastic_registration = ElasticRegistration()\n >>> X = elastic_registration.fit_transform(X)\n\n Substract the elastic mean build as template during the\n registration and reverse the transformation.\n\n >>> X = X - elastic_registration.template_\n >>> X_center = elastic_registration.inverse_transform(X)\n >>> X_center\n FDataGrid(...)\n\n\n See also:\n :func:`invert_warping`\n\n \"\"\"\n if not hasattr(self, 'warping_'):\n raise ValueError(\"Data must be previosly transformed to apply the \"\n \"inverse transform\")\n elif len(X) != len(self.warping_):\n raise ValueError(\"Data must contain the same number of samples \"\n \"than the dataset previously transformed\")\n\n inverse_warping = invert_warping(self.warping_)\n\n return X.compose(inverse_warping, eval_points=self.output_points)\n\n\ndef warping_mean(warping, *, max_iter=100, tol=1e-6, step_size=.3):\n r\"\"\"Compute the karcher mean of a set of warpings.\n\n Let :math:`\\gamma_i i=1...n` be a set of warping functions\n :math:`\\gamma_i:[a,b] \\rightarrow [a,b]` in :math:`\\Gamma`, i.e.,\n monotone increasing and with the restriction :math:`\\gamma_i(a)=a \\,\n \\gamma_i(b)=b`.\n\n The karcher mean :math:`\\bar \\gamma` is defined as the warping that\n minimises locally the sum of Fisher-Rao squared distances.\n [SK16-8-3-2]_.\n\n .. math::\n \\bar \\gamma = argmin_{\\gamma \\in \\Gamma} \\sum_{i=1}^{n}\n d_{FR}^2(\\gamma, \\gamma_i)\n\n The computation is performed using the structure of Hilbert Sphere obtained\n after a transformation of the warpings, see [S11-3-3]_.\n\n Args:\n warping (:class:`~skfda.FDataGrid`): Set of warpings.\n max_iter (int): Maximum number of interations. Defaults to 100.\n tol (float): Convergence criterion, if the norm of the mean of the\n shooting vectors, :math:`| \\bar v |<tol`, the algorithm will stop.\n Defaults to 1e-5.\n step_size (float): Step size :math:`\\epsilon` used to update the mean.\n Default to 1.\n\n Return:\n (:class:`~skfda.FDataGrid`) Fdatagrid with the mean of the warpings. If\n shooting is True the shooting vectors will be returned in a tuple with\n the mean.\n\n References:\n .. [SK16-8-3-2] Srivastava, Anuj & Klassen, Eric P. (2016). Functional\n and shape data analysis. In *Template: Center of the Mean Orbit*\n (pp. 274-277). Springer.\n\n .. [S11-3-3] Srivastava, Anuj et. al. Registration of Functional Data\n Using Fisher-Rao Metric (2011). In *Center of an Orbit* (pp. 9-10).\n arXiv:1103.3817v2.\n \"\"\"\n\n eval_points = warping.grid_points[0]\n original_eval_points = eval_points\n\n # Rescale warping to (0, 1)\n if warping.grid_points[0][0] != 0 or warping.grid_points[0][-1] != 1:\n\n eval_points = _normalize_scale(eval_points)\n warping = FDataGrid(_normalize_scale(warping.data_matrix[..., 0]),\n _normalize_scale(warping.grid_points[0]))\n\n # Compute srsf of warpings and their mean\n srsf = SRSF(output_points=eval_points, initial_value=0)\n psi = srsf.fit_transform(warping)\n\n # Find psi closest to the mean\n psi_centered = psi - srsf.fit_transform(warping.mean())\n psi_data = psi_centered.data_matrix[..., 0]\n np.square(psi_data, out=psi_data)\n d = psi_data.sum(axis=1).argmin()\n\n # Get raw values to calculate\n mu = psi[d].data_matrix[0, ..., 0]\n psi = psi.data_matrix[..., 0]\n vmean = np.empty((1, len(eval_points)))\n\n # Construction of shooting vectors\n for _ in range(max_iter):\n\n vmean[0] = 0.\n # Compute shooting vectors\n for i in range(len(warping)):\n psi_i = psi[i]\n\n inner = scipy.integrate.simps(mu * psi_i, x=eval_points)\n inner = max(min(inner, 1), -1)\n\n theta = np.arccos(inner)\n\n if theta > 1e-10:\n vmean += theta / np.sin(theta) * (psi_i - np.cos(theta) * mu)\n\n # Mean of shooting vectors\n vmean /= warping.n_samples\n v_norm = np.sqrt(scipy.integrate.simps(np.square(vmean)))\n\n # Convergence criterion\n if v_norm < tol:\n break\n\n # Calculate exponential map of mu\n a = np.cos(step_size * v_norm)\n b = np.sin(step_size * v_norm) / v_norm\n mu = a * mu + b * vmean\n\n # Recover mean in original gamma space\n warping_mean = scipy.integrate.cumtrapz(np.square(mu, out=mu)[0],\n x=eval_points, initial=0)\n\n # Affine traslation to original scale\n warping_mean = _normalize_scale(warping_mean,\n a=original_eval_points[0],\n b=original_eval_points[-1])\n\n monotone_interpolation = SplineInterpolation(interpolation_order=3,\n monotone=True)\n\n mean = FDataGrid([warping_mean], grid_points=original_eval_points,\n interpolation=monotone_interpolation)\n\n return mean\n\n\ndef elastic_mean(fdatagrid, *, penalty=0., center=True, max_iter=20, tol=1e-3,\n initial=None, grid_dim=7, **kwargs):\n r\"\"\"Compute the karcher mean under the elastic metric.\n\n Calculates the karcher mean of a set of functional samples in the amplitude\n space :math:`\\mathcal{A}=\\mathcal{F}/\\Gamma`.\n\n Let :math:`q_i` the corresponding SRSF of the observation :math:`f_i`.\n The space :math:`\\mathcal{A}` is defined using the equivalence classes\n :math:`[q_i]=\\{ q_i \\circ \\gamma \\| \\gamma \\in \\Gamma \\}`, where\n :math:`\\Gamma` denotes the space of warping functions. The karcher mean\n in this space is defined as\n\n .. math::\n [\\mu_q] = argmin_{[q] \\in \\mathcal{A}} \\sum_{i=1}^n\n d_{\\lambda}^2([q],[q_i])\n\n Once :math:`[\\mu_q]` is obtained it is selected the element of the\n equivalence class which makes the mean of the warpings employed be the\n identity.\n\n See [SK16-8-3-1]_ and [S11-3]_.\n\n Args:\n fdatagrid (:class:`~skfda.FDataGrid`): Set of functions to compute the\n mean.\n penalty (float): Penalisation term. Defaults to 0.\n center (boolean): If true it is computed the mean of the warpings and\n used to select a central mean. Defaults True.\n max_iter (int): Maximum number of iterations. Defaults to 20.\n tol (float): Convergence criterion, the algorithm will stop if\n :math:`|mu_{(\\nu)} - mu_{(\\nu - 1)}|_2 / | mu_{(\\nu-1)} |_2 < tol`.\n initial (float): Value of the mean at the starting point. By default\n takes the average of the initial points of the samples.\n grid_dim (int, optional): Dimension of the grid used in the alignment\n algorithm. Defaults 7.\n ** kwargs : Named options to be pased to :func:`warping_mean`.\n\n Return:\n :class:`~skfda.FDataGrid`: FDatagrid with the mean of the functions.\n\n Raises:\n ValueError: If the object is multidimensional or the shape of the srsf\n do not match with the fdatagrid.\n\n References:\n .. [SK16-8-3-1] Srivastava, Anuj & Klassen, Eric P. (2016). Functional\n and shape data analysis. In *Karcher Mean of Amplitudes*\n (pp. 273-274). Springer.\n\n .. [S11-3] Srivastava, Anuj et. al. Registration of Functional Data\n Using Fisher-Rao Metric (2011). In *Karcher Mean and Function\n Alignment* (pp. 7-10). arXiv:1103.3817v2.\n\n \"\"\"\n check_is_univariate(fdatagrid)\n\n srsf_transformer = SRSF(initial_value=0)\n fdatagrid_srsf = srsf_transformer.fit_transform(fdatagrid)\n eval_points = fdatagrid.grid_points[0]\n\n eval_points_normalized = _normalize_scale(eval_points)\n y_scale = eval_points[-1] - eval_points[0]\n\n interpolation = SplineInterpolation(interpolation_order=3, monotone=True)\n\n # Discretisation points\n fdatagrid_normalized = FDataGrid(fdatagrid(eval_points) / y_scale,\n grid_points=eval_points_normalized)\n\n srsf = fdatagrid_srsf(eval_points)[..., 0]\n\n # Initialize with function closest to the L2 mean with the L2 distance\n centered = (srsf.T - srsf.mean(axis=0, keepdims=True).T).T\n\n distances = scipy.integrate.simps(np.square(centered, out=centered),\n eval_points_normalized, axis=1)\n\n # Initialization of iteration\n mu = srsf[np.argmin(distances)]\n mu_aux = np.empty(mu.shape)\n mu_1 = np.empty(mu.shape)\n\n # Main iteration\n for _ in range(max_iter):\n\n gammas = _elastic_alignment_array(\n mu, srsf, eval_points_normalized, penalty, grid_dim)\n gammas = FDataGrid(gammas, grid_points=eval_points_normalized,\n interpolation=interpolation)\n\n fdatagrid_normalized = fdatagrid_normalized.compose(gammas)\n srsf = srsf_transformer.transform(\n fdatagrid_normalized).data_matrix[..., 0]\n\n # Next iteration\n mu_1 = srsf.mean(axis=0, out=mu_1)\n\n # Convergence criterion\n mu_norm = np.sqrt(scipy.integrate.simps(np.square(mu, out=mu_aux),\n eval_points_normalized))\n\n mu_diff = np.sqrt(scipy.integrate.simps(np.square(mu - mu_1,\n out=mu_aux),\n eval_points_normalized))\n\n if mu_diff / mu_norm < tol:\n break\n\n mu = mu_1\n\n if initial is None:\n initial = fdatagrid.data_matrix[:, 0].mean()\n\n srsf_transformer.set_params(initial_value=initial)\n\n # Karcher mean orbit in space L2/Gamma\n karcher_mean = srsf_transformer.inverse_transform(\n fdatagrid.copy(data_matrix=[mu], grid_points=eval_points,\n sample_names=(\"Karcher mean\",)))\n\n if center:\n # Gamma mean in Hilbert Sphere\n mean_normalized = warping_mean(gammas, **kwargs)\n\n gamma_mean = FDataGrid(_normalize_scale(\n mean_normalized.data_matrix[..., 0],\n a=eval_points[0],\n b=eval_points[-1]),\n grid_points=eval_points)\n\n gamma_inverse = invert_warping(gamma_mean)\n\n karcher_mean = karcher_mean.compose(gamma_inverse)\n\n # Return center of the orbit\n return karcher_mean\n", "import dcor\n\nimport scipy.signal\nimport sklearn.base\nimport sklearn.utils\n\nimport numpy as np\n\nfrom ....representation import FDataGrid\n\n\ndef _compute_dependence(X, y, *, dependence_measure):\n '''\n Computes the dependence of each point in each trajectory in X with the\n corresponding class label in Y.\n '''\n\n # Move n_samples to the end\n # The shape is now input_shape + n_samples + n_output\n X = np.moveaxis(X, 0, -2)\n\n input_shape = X.shape[:-2]\n\n # Join input in a list for rowwise\n X = X.reshape(-1, X.shape[-2], X.shape[-1])\n\n if y.ndim == 1:\n y = np.atleast_2d(y).T\n Y = np.array([y] * len(X))\n\n dependence_results = dcor.rowwise(dependence_measure, X, Y)\n\n return dependence_results.reshape(input_shape)\n\n\ndef select_local_maxima(X, *, order: int=1):\n r'''\n Compute local maxima of an array.\n\n Points near the boundary are considered maxima looking only at one side.\n\n For flat regions only the boundary points of the flat region could be\n considered maxima.\n\n Parameters:\n\n X (numpy array): Where to compute the local maxima.\n order (callable): How many points on each side to look, to check if\n a point is a maximum in that interval.\n\n Examples:\n\n >>> from skfda.preprocessing.dim_reduction.variable_selection.\\\n ... maxima_hunting import select_local_maxima\n >>> import numpy as np\n\n >>> x = np.array([2, 1, 1, 1, 2, 3, 3, 3, 2, 3, 4, 3, 2])\n >>> select_local_maxima(x).astype(np.int_)\n array([ 0, 5, 7, 10])\n\n The ``order`` parameter can be used to check a larger interval to see\n if a point is still a maxima, effectively eliminating small local\n maxima.\n\n >>> x = np.array([2, 1, 1, 1, 2, 3, 3, 3, 2, 3, 4, 3, 2])\n >>> select_local_maxima(x, order=3).astype(np.int_)\n array([ 0, 5, 10])\n\n '''\n indexes = scipy.signal.argrelextrema(\n X, comparator=np.greater_equal, order=order)[0]\n\n # Discard flat\n maxima = X[indexes]\n\n left_points = np.take(X, indexes - 1, mode='clip')\n right_points = np.take(X, indexes + 1, mode='clip')\n\n is_not_flat = (maxima > left_points) | (maxima > right_points)\n\n return indexes[is_not_flat]\n\n\nclass MaximaHunting(sklearn.base.BaseEstimator, sklearn.base.TransformerMixin):\n r'''\n Maxima Hunting variable selection.\n\n This is a filter variable selection method for problems with a target\n variable. It evaluates a dependence measure between each point of the\n function and the target variable, and keeps those points in which this\n dependence is a local maximum.\n\n Selecting the local maxima serves two purposes. First, it ensures that\n the points that are relevant in isolation are selected, as they must\n maximice their dependence with the target variable. Second, the points\n that are relevant only because they are near a relevant point (and are\n thus highly correlated with it) are NOT selected, as only local maxima\n are selected, minimizing the redundancy of the selected variables.\n\n For a longer explanation about the method, and comparison with other\n functional variable selection methods, we refer the reader to the\n original article [1]_.\n\n Parameters:\n\n dependence_measure (callable): Dependence measure to use. By default,\n it uses the bias corrected squared distance correlation.\n local_maxima_selector (callable): Function to detect local maxima. The\n default is :func:`select_local_maxima` with ``order`` parameter\n equal to one. The original article used a similar function testing\n different values of ``order``.\n\n Examples:\n\n >>> from skfda.preprocessing.dim_reduction import variable_selection\n >>> from skfda.preprocessing.dim_reduction.variable_selection.\\\n ... maxima_hunting import select_local_maxima\n >>> from skfda.datasets import make_gaussian_process\n >>> from functools import partial\n >>> import skfda\n >>> import numpy as np\n\n We create trajectories from two classes, one with zero mean and the\n other with a peak-like mean. Both have Brownian covariance.\n\n >>> n_samples = 10000\n >>> n_features = 100\n >>>\n >>> def mean_1(t):\n ... return (np.abs(t - 0.25)\n ... - 2 * np.abs(t - 0.5)\n ... + np.abs(t - 0.75))\n >>>\n >>> X_0 = make_gaussian_process(n_samples=n_samples // 2,\n ... n_features=n_features,\n ... random_state=0)\n >>> X_1 = make_gaussian_process(n_samples=n_samples // 2,\n ... n_features=n_features,\n ... mean=mean_1,\n ... random_state=1)\n >>> X = skfda.concatenate((X_0, X_1))\n >>>\n >>> y = np.zeros(n_samples)\n >>> y [n_samples // 2:] = 1\n\n Select the relevant points to distinguish the two classes\n\n >>> local_maxima_selector = partial(select_local_maxima, order=10)\n >>> mh = variable_selection.MaximaHunting(\n ... local_maxima_selector=local_maxima_selector)\n >>> _ = mh.fit(X, y)\n >>> point_mask = mh.get_support()\n >>> points = X.grid_points[0][point_mask]\n >>> np.allclose(points, [0.5], rtol=0.1)\n True\n\n Apply the learned dimensionality reduction\n\n >>> X_dimred = mh.transform(X)\n >>> len(X.grid_points[0])\n 100\n >>> X_dimred.shape\n (10000, 1)\n\n References:\n\n .. [1] J. R. Berrendero, A. Cuevas, and J. L. Torrecilla, “Variable\n selection in functional data classification: a maxima-hunting\n proposal,” STAT SINICA, vol. 26, no. 2, pp. 619–638, 2016,\n doi: 10.5705/ss.202014.0014.\n\n '''\n\n def __init__(self,\n dependence_measure=dcor.u_distance_correlation_sqr,\n local_maxima_selector=select_local_maxima):\n self.dependence_measure = dependence_measure\n self.local_maxima_selector = local_maxima_selector\n\n def fit(self, X: FDataGrid, y):\n\n self.features_shape_ = X.data_matrix.shape[1:]\n self.dependence_ = _compute_dependence(\n X.data_matrix, y,\n dependence_measure=self.dependence_measure)\n\n self.indexes_ = self.local_maxima_selector(self.dependence_)\n\n sorting_indexes = np.argsort(self.dependence_[self.indexes_])[::-1]\n self.sorted_indexes_ = self.indexes_[sorting_indexes]\n\n return self\n\n def get_support(self, indices: bool=False):\n if indices:\n return self.indexes_\n else:\n mask = np.zeros(self.features_shape_[0:-1], dtype=bool)\n mask[self.indexes_] = True\n return mask\n\n def transform(self, X, y=None):\n\n sklearn.utils.validation.check_is_fitted(self)\n\n if X.data_matrix.shape[1:] != self.features_shape_:\n raise ValueError(\"The trajectories have a different number of \"\n \"points than the ones fitted\")\n\n return X.data_matrix[:, self.sorted_indexes_].reshape(X.n_samples, -1)\n", "\"\"\"Functional Data Boxplot Module.\n\nThis module contains the classes to construct the functional data boxplot and\nvisualize it.\n\n\"\"\"\nfrom abc import ABC, abstractmethod\nimport math\n\nimport matplotlib\n\nimport matplotlib.pyplot as plt\nimport numpy as np\n\nfrom ..depth import ModifiedBandDepth\nfrom ..outliers import _envelopes\nfrom ._utils import (_figure_to_svg, _get_figure_and_axes,\n _set_figure_layout_for_fdata, _set_labels)\n\n\n__author__ = \"Amanda Hernando Bernabé\"\n__email__ = \"[email protected]\"\n\n\nclass FDataBoxplot(ABC):\n \"\"\"Abstract class inherited by the Boxplot and SurfaceBoxplot classes.\n\n It the data of the functional boxplot or surface boxplot of a FDataGrid\n object, depending on the dimensions of the domain, 1 or 2 respectively.\n\n It forces to both classes, Boxplot and SurfaceBoxplot to conain at least\n the median, central and outlying envelopes and a colormap for their\n graphical representation, obtained calling the plot method.\n\n \"\"\"\n @abstractmethod\n def __init__(self, factor=1.5):\n if factor < 0:\n raise ValueError(\"The number used to calculate the \"\n \"outlying envelope must be positive.\")\n self._factor = factor\n\n @property\n def factor(self):\n return self._factor\n\n @property\n def fdatagrid(self):\n pass\n\n @property\n def median(self):\n pass\n\n @property\n def central_envelope(self):\n pass\n\n @property\n def non_outlying_envelope(self):\n pass\n\n @property\n def colormap(self):\n return self._colormap\n\n @colormap.setter\n def colormap(self, value):\n if not isinstance(value, matplotlib.colors.LinearSegmentedColormap):\n raise ValueError(\"colormap must be of type \"\n \"matplotlib.colors.LinearSegmentedColormap\")\n self._colormap = value\n\n @abstractmethod\n def plot(self, chart=None, *, fig=None, axes=None,\n n_rows=None, n_cols=None):\n pass\n\n def _repr_svg_(self):\n fig = self.plot()\n plt.close(fig)\n\n return _figure_to_svg(fig)\n\n\nclass Boxplot(FDataBoxplot):\n r\"\"\"Representation of the functional boxplot.\n\n Class implementing the functionl boxplot which is an informative\n exploratory tool for visualizing functional data, as well as its\n generalization, the enhanced functional boxplot. Only supports 1\n dimensional domain functional data.\n\n Based on the center outward ordering induced by a :ref:`depth measure\n <depth-measures>` for functional data, the descriptive statistics of a\n functional boxplot are: the envelope of the 50% central region, the median\n curve,and the maximum non-outlying envelope. In addition, outliers can be\n detected in a functional boxplot by the 1.5 times the 50% central region\n empirical rule, analogous to the rule for classical boxplots.\n\n Args:\n\n fdatagrid (FDataGrid): Object containing the data.\n depth_method (:ref:`depth measure <depth-measures>`, optional):\n Method used to order the data. Defaults to :func:`modified\n band depth\n <skfda.exploratory.depth.ModifiedBandDepth>`.\n prob (list of float, optional): List with float numbers (in the\n range from 1 to 0) that indicate which central regions to\n represent.\n Defaults to [0.5] which represents the 50% central region.\n factor (double): Number used to calculate the outlying envelope.\n\n Attributes:\n\n fdatagrid (FDataGrid): Object containing the data.\n median (array, (fdatagrid.dim_codomain, ngrid_points)): contains\n the median/s.\n central_envelope (array, (fdatagrid.dim_codomain, 2, ngrid_points)):\n contains the central envelope/s.\n non_outlying_envelope (array, (fdatagrid.dim_codomain, 2,\n ngrid_points)):\n contains the non-outlying envelope/s.\n colormap (matplotlib.colors.LinearSegmentedColormap): Colormap from\n which the colors to represent the central regions are selected.\n envelopes (array, (fdatagrid.dim_codomain * ncentral_regions, 2,\n ngrid_points)): contains the region envelopes.\n outliers (array, (fdatagrid.dim_codomain, fdatagrid.n_samples)):\n contains the outliers.\n barcol (string): Color of the envelopes and vertical lines.\n outliercol (string): Color of the ouliers.\n mediancol (string): Color of the median.\n show_full_outliers (boolean): If False (the default) then only the part\n outside the box is plotted. If True, complete outling curves are\n plotted.\n\n Representation in a Jupyter notebook:\n\n .. jupyter-execute::\n\n from skfda.datasets import make_gaussian_process\n from skfda.misc.covariances import Exponential\n from skfda.exploratory.visualization import Boxplot\n\n fd = make_gaussian_process(\n n_samples=20, cov=Exponential(), random_state=3)\n\n Boxplot(fd)\n\n\n Examples:\n\n Function :math:`f : \\mathbb{R}\\longmapsto\\mathbb{R}`.\n\n >>> from skfda import FDataGrid\n >>> from skfda.exploratory.visualization import Boxplot\n >>>\n >>> data_matrix = [[1, 1, 2, 3, 2.5, 2],\n ... [0.5, 0.5, 1, 2, 1.5, 1],\n ... [-1, -1, -0.5, 1, 1, 0.5],\n ... [-0.5, -0.5, -0.5, -1, -1, -1]]\n >>> grid_points = [0, 2, 4, 6, 8, 10]\n >>> fd = FDataGrid(data_matrix, grid_points, dataset_name=\"dataset\",\n ... argument_names=[\"x_label\"],\n ... coordinate_names=[\"y_label\"])\n >>> Boxplot(fd)\n Boxplot(\n FDataGrid=FDataGrid(\n array([[[ 1. ],\n [ 1. ],\n [ 2. ],\n [ 3. ],\n [ 2.5],\n [ 2. ]],\n [[ 0.5],\n [ 0.5],\n [ 1. ],\n [ 2. ],\n [ 1.5],\n [ 1. ]],\n [[-1. ],\n [-1. ],\n [-0.5],\n [ 1. ],\n [ 1. ],\n [ 0.5]],\n [[-0.5],\n [-0.5],\n [-0.5],\n [-1. ],\n [-1. ],\n [-1. ]]]),\n grid_points=(array([ 0., 2., 4., 6., 8., 10.]),),\n domain_range=((0.0, 10.0),),\n dataset_name='dataset',\n argument_names=('x_label',),\n coordinate_names=('y_label',),\n ...),\n median=array([[ 0.5],\n [ 0.5],\n [ 1. ],\n [ 2. ],\n [ 1.5],\n [ 1. ]]),\n central envelope=(array([[-1. ],\n [-1. ],\n [-0.5],\n [ 1. ],\n [ 1. ],\n [ 0.5]]), array([[ 0.5],\n [ 0.5],\n [ 1. ],\n [ 2. ],\n [ 1.5],\n [ 1. ]])),\n non-outlying envelope=(array([[-1. ],\n [-1. ],\n [-0.5],\n [ 1. ],\n [ 1. ],\n [ 0.5]]), array([[ 0.5],\n [ 0.5],\n [ 1. ],\n [ 2. ],\n [ 1.5],\n [ 1. ]])),\n envelopes=[(array([[-1. ],\n [-1. ],\n [-0.5],\n [ 1. ],\n [ 1. ],\n [ 0.5]]), array([[ 0.5],\n [ 0.5],\n [ 1. ],\n [ 2. ],\n [ 1.5],\n [ 1. ]]))],\n outliers=array([ True, False, False, True]))\n\n References:\n\n Sun, Y., & Genton, M. G. (2011). Functional Boxplots. Journal of\n Computational and Graphical Statistics, 20(2), 316-334.\n https://doi.org/10.1198/jcgs.2011.09224\n\n\n \"\"\"\n\n def __init__(self, fdatagrid, depth_method=ModifiedBandDepth(), prob=[0.5],\n factor=1.5):\n \"\"\"Initialization of the Boxplot class.\n\n Args:\n fdatagrid (FDataGrid): Object containing the data.\n depth_method (:ref:`depth measure <depth-measures>`, optional):\n Method used to order the data. Defaults to :func:`modified\n band depth\n <skfda.exploratory.depth.ModifiedBandDepth>`.\n prob (list of float, optional): List with float numbers (in the\n range from 1 to 0) that indicate which central regions to\n represent.\n Defaults to [0.5] which represents the 50% central region.\n factor (double): Number used to calculate the outlying envelope.\n\n \"\"\"\n FDataBoxplot.__init__(self, factor)\n\n if fdatagrid.dim_domain != 1:\n raise ValueError(\n \"Function only supports FDataGrid with domain dimension 1.\")\n\n if sorted(prob, reverse=True) != prob:\n raise ValueError(\n \"Probabilities required to be in descending order.\")\n\n if min(prob) < 0 or max(prob) > 1:\n raise ValueError(\"Probabilities must be between 0 and 1.\")\n\n self._envelopes = [None] * len(prob)\n\n depth = depth_method(fdatagrid)\n indices_descending_depth = (-depth).argsort(axis=0)\n\n # The median is the deepest curve\n self._median = fdatagrid[indices_descending_depth[0]\n ].data_matrix[0, ...]\n\n # Central region and envelope must be computed for outlier detection\n central_region = _envelopes._compute_region(\n fdatagrid, indices_descending_depth, 0.5)\n self._central_envelope = _envelopes._compute_envelope(central_region)\n\n # Non-outlying envelope\n non_outlying_threshold = _envelopes._non_outlying_threshold(\n self._central_envelope, factor)\n predicted_outliers = _envelopes._predict_outliers(\n fdatagrid, non_outlying_threshold)\n inliers = fdatagrid[predicted_outliers == 0]\n self._non_outlying_envelope = _envelopes._compute_envelope(inliers)\n\n # Outliers\n self._outliers = _envelopes._predict_outliers(\n fdatagrid, self._non_outlying_envelope)\n\n for i, p in enumerate(prob):\n region = _envelopes._compute_region(\n fdatagrid, indices_descending_depth, p)\n self._envelopes[i] = _envelopes._compute_envelope(region)\n\n self._fdatagrid = fdatagrid\n self._prob = prob\n self._colormap = plt.cm.get_cmap('RdPu')\n self.barcol = \"blue\"\n self.outliercol = \"red\"\n self.mediancol = \"black\"\n self._show_full_outliers = False\n\n @property\n def fdatagrid(self):\n return self._fdatagrid\n\n @property\n def median(self):\n return self._median\n\n @property\n def central_envelope(self):\n return self._central_envelope\n\n @property\n def non_outlying_envelope(self):\n return self._non_outlying_envelope\n\n @property\n def envelopes(self):\n return self._envelopes\n\n @property\n def outliers(self):\n return self._outliers\n\n @property\n def show_full_outliers(self):\n return self._show_full_outliers\n\n @show_full_outliers.setter\n def show_full_outliers(self, boolean):\n if not isinstance(boolean, bool):\n raise ValueError(\"show_full_outliers must be boolean type\")\n self._show_full_outliers = boolean\n\n def plot(self, chart=None, *, fig=None, axes=None,\n n_rows=None, n_cols=None):\n \"\"\"Visualization of the functional boxplot of the fdatagrid\n (dim_domain=1).\n\n Args:\n fig (figure object, optional): figure over with the graphs are\n plotted in case ax is not specified. If None and ax is also\n None, the figure is initialized.\n axes (list of axis objects, optional): axis over where the graphs\n are plotted. If None, see param fig.\n n_rows(int, optional): designates the number of rows of the figure\n to plot the different dimensions of the image. Only specified\n if fig and ax are None.\n n_cols(int, optional): designates the number of columns of the\n figure to plot the different dimensions of the image. Only\n specified if fig and ax are None.\n\n Returns:\n fig (figure): figure object in which the graphs are plotted.\n\n \"\"\"\n\n fig, axes = _get_figure_and_axes(chart, fig, axes)\n fig, axes = _set_figure_layout_for_fdata(\n self.fdatagrid, fig, axes, n_rows, n_cols)\n tones = np.linspace(0.1, 1.0, len(self._prob) + 1, endpoint=False)[1:]\n color = self.colormap(tones)\n\n if self.show_full_outliers:\n var_zorder = 1\n else:\n var_zorder = 4\n\n outliers = self.fdatagrid[self.outliers]\n\n for m in range(self.fdatagrid.dim_codomain):\n\n # Outliers\n for o in outliers:\n axes[m].plot(o.grid_points[0],\n o.data_matrix[0, :, m],\n color=self.outliercol,\n linestyle='--', zorder=1)\n\n for i in range(len(self._prob)):\n # central regions\n axes[m].fill_between(self.fdatagrid.grid_points[0],\n self.envelopes[i][0][..., m],\n self.envelopes[i][1][..., m],\n facecolor=color[i], zorder=var_zorder)\n\n # outlying envelope\n axes[m].plot(self.fdatagrid.grid_points[0],\n self.non_outlying_envelope[0][..., m],\n self.fdatagrid.grid_points[0],\n self.non_outlying_envelope[1][..., m],\n color=self.barcol, zorder=4)\n\n # central envelope\n axes[m].plot(self.fdatagrid.grid_points[0],\n self.central_envelope[0][..., m],\n self.fdatagrid.grid_points[0],\n self.central_envelope[1][..., m],\n color=self.barcol, zorder=4)\n\n # vertical lines\n index = math.ceil(self.fdatagrid.ncol / 2)\n x = self.fdatagrid.grid_points[0][index]\n axes[m].plot([x, x],\n [self.non_outlying_envelope[0][..., m][index],\n self.central_envelope[0][..., m][index]],\n color=self.barcol,\n zorder=4)\n axes[m].plot([x, x],\n [self.non_outlying_envelope[1][..., m][index],\n self.central_envelope[1][..., m][index]],\n color=self.barcol, zorder=4)\n\n # median sample\n axes[m].plot(self.fdatagrid.grid_points[0], self.median[..., m],\n color=self.mediancol, zorder=5)\n\n _set_labels(self.fdatagrid, fig, axes)\n\n return fig\n\n def __repr__(self):\n \"\"\"Return repr(self).\"\"\"\n return (f\"Boxplot(\"\n f\"\\nFDataGrid={repr(self.fdatagrid)},\"\n f\"\\nmedian={repr(self.median)},\"\n f\"\\ncentral envelope={repr(self.central_envelope)},\"\n f\"\\nnon-outlying envelope={repr(self.non_outlying_envelope)},\"\n f\"\\nenvelopes={repr(self.envelopes)},\"\n f\"\\noutliers={repr(self.outliers)})\").replace('\\n', '\\n ')\n\n\nclass SurfaceBoxplot(FDataBoxplot):\n r\"\"\"Representation of the surface boxplot.\n\n Class implementing the surface boxplot. Analogously to the functional\n boxplot, it is an informative exploratory tool for visualizing functional\n data with domain dimension 2. Nevertheless, it does not implement the\n enhanced surface boxplot.\n\n Based on the center outward ordering induced by a\n :ref:`depth measure <depth-measures>`\n for functional data, it represents the envelope of the\n 50% central region, the median curve, and the maximum non-outlying\n envelope.\n\n Args:\n\n fdatagrid (FDataGrid): Object containing the data.\n method (:ref:`depth measure <depth-measures>`, optional): Method\n used to order the data. Defaults to :class:`modified band depth\n <skfda.exploratory.depth.ModifiedBandDepth>`.\n prob (list of float, optional): List with float numbers (in the\n range from 1 to 0) that indicate which central regions to\n represent.\n Defaults to [0.5] which represents the 50% central region.\n factor (double): Number used to calculate the outlying envelope.\n\n Attributes:\n\n fdatagrid (FDataGrid): Object containing the data.\n median (array, (fdatagrid.dim_codomain, lx, ly)): contains\n the median/s.\n central_envelope (array, (fdatagrid.dim_codomain, 2, lx, ly)):\n contains the central envelope/s.\n non_outlying_envelope (array,(fdatagrid.dim_codomain, 2, lx, ly)):\n contains the non-outlying envelope/s.\n colormap (matplotlib.colors.LinearSegmentedColormap): Colormap from\n which the colors to represent the central regions are selected.\n boxcol (string): Color of the box, which includes median and central\n envelope.\n outcol (string): Color of the outlying envelope.\n\n Examples:\n\n Function :math:`f : \\mathbb{R^2}\\longmapsto\\mathbb{R}`.\n\n >>> from skfda import FDataGrid\n >>> data_matrix = [[[[1], [0.7], [1]],\n ... [[4], [0.4], [5]]],\n ... [[[2], [0.5], [2]],\n ... [[3], [0.6], [3]]]]\n >>> grid_points = [[2, 4], [3, 6, 8]]\n >>> fd = FDataGrid(data_matrix, grid_points, dataset_name=\"dataset\",\n ... argument_names=[\"x1_label\", \"x2_label\"],\n ... coordinate_names=[\"y_label\"])\n >>> SurfaceBoxplot(fd)\n SurfaceBoxplot(\n FDataGrid=FDataGrid(\n array([[[[ 1. ],\n [ 0.7],\n [ 1. ]],\n [[ 4. ],\n [ 0.4],\n [ 5. ]]],\n [[[ 2. ],\n [ 0.5],\n [ 2. ]],\n [[ 3. ],\n [ 0.6],\n [ 3. ]]]]),\n grid_points=(array([ 2., 4.]), array([ 3., 6., 8.])),\n domain_range=((2.0, 4.0), (3.0, 8.0)),\n dataset_name='dataset',\n argument_names=('x1_label', 'x2_label'),\n coordinate_names=('y_label',),\n extrapolation=None,\n ...),\n median=array([[[ 1. ],\n [ 0.7],\n [ 1. ]],\n [[ 4. ],\n [ 0.4],\n [ 5. ]]]),\n central envelope=(array([[[ 1. ],\n [ 0.7],\n [ 1. ]],\n [[ 4. ],\n [ 0.4],\n [ 5. ]]]),\n array([[[ 1. ],\n [ 0.7],\n [ 1. ]],\n [[ 4. ],\n [ 0.4],\n [ 5. ]]])),\n outlying envelope=(array([[[ 1. ],\n [ 0.7],\n [ 1. ]],\n [[ 4. ],\n [ 0.4],\n [ 5. ]]]),\n array([[[ 1. ],\n [ 0.7],\n [ 1. ]],\n [[ 4. ],\n [ 0.4],\n [ 5. ]]])))\n\n References:\n\n Sun, Y., & Genton, M. G. (2011). Functional Boxplots. Journal of\n Computational and Graphical Statistics, 20(2), 316-334.\n https://doi.org/10.1198/jcgs.2011.09224\n\n \"\"\"\n\n def __init__(self, fdatagrid, method=ModifiedBandDepth(), factor=1.5):\n \"\"\"Initialization of the functional boxplot.\n\n Args:\n fdatagrid (FDataGrid): Object containing the data.\n method (:ref:`depth measure <depth-measures>`, optional): Method\n used to order the data. Defaults to :class:`modified band depth\n <skfda.exploratory.depth.ModifiedBandDepth>`.\n prob (list of float, optional): List with float numbers (in the\n range from 1 to 0) that indicate which central regions to\n represent.\n Defaults to [0.5] which represents the 50% central region.\n factor (double): Number used to calculate the outlying envelope.\n\n \"\"\"\n FDataBoxplot.__init__(self, factor)\n\n if fdatagrid.dim_domain != 2:\n raise ValueError(\n \"Class only supports FDataGrid with domain dimension 2.\")\n\n depth = method(fdatagrid)\n indices_descending_depth = (-depth).argsort(axis=0)\n\n # The mean is the deepest curve\n self._median = fdatagrid.data_matrix[indices_descending_depth[0]]\n\n # Central region and envelope must be computed for outlier detection\n central_region = _envelopes._compute_region(\n fdatagrid, indices_descending_depth, 0.5)\n self._central_envelope = _envelopes._compute_envelope(central_region)\n\n # Non-outlying envelope\n non_outlying_threshold = _envelopes._non_outlying_threshold(\n self._central_envelope, factor)\n predicted_outliers = _envelopes._predict_outliers(\n fdatagrid, non_outlying_threshold)\n inliers = fdatagrid[predicted_outliers == 0]\n self._non_outlying_envelope = _envelopes._compute_envelope(inliers)\n\n self._fdatagrid = fdatagrid\n self.colormap = plt.cm.get_cmap('Greys')\n self._boxcol = 1.0\n self._outcol = 0.7\n\n @property\n def fdatagrid(self):\n return self._fdatagrid\n\n @property\n def median(self):\n return self._median\n\n @property\n def central_envelope(self):\n return self._central_envelope\n\n @property\n def non_outlying_envelope(self):\n return self._non_outlying_envelope\n\n @property\n def boxcol(self):\n return self._boxcol\n\n @boxcol.setter\n def boxcol(self, value):\n if value < 0 or value > 1:\n raise ValueError(\n \"boxcol must be a number between 0 and 1.\")\n\n self._boxcol = value\n\n @property\n def outcol(self):\n return self._outcol\n\n @outcol.setter\n def outcol(self, value):\n if value < 0 or value > 1:\n raise ValueError(\n \"outcol must be a number between 0 and 1.\")\n self._outcol = value\n\n def plot(self, chart=None, *, fig=None, axes=None,\n n_rows=None, n_cols=None):\n \"\"\"Visualization of the surface boxplot of the fdatagrid (dim_domain=2).\n\n Args:\n fig (figure object, optional): figure over with the graphs are\n plotted in case ax is not specified. If None and ax is also\n None, the figure is initialized.\n axes (list of axis objects, optional): axis over where the graphs\n are plotted. If None, see param fig.\n n_rows(int, optional): designates the number of rows of the figure\n to plot the different dimensions of the image. Only specified\n if fig and ax are None.\n n_cols(int, optional): designates the number of columns of the\n figure to plot the different dimensions of the image. Only\n specified if fig and ax are None.\n\n Returns:\n fig (figure): figure object in which the graphs are plotted.\n\n \"\"\"\n fig, axes = _get_figure_and_axes(chart, fig, axes)\n fig, axes = _set_figure_layout_for_fdata(\n self.fdatagrid, fig, axes, n_rows, n_cols)\n\n x = self.fdatagrid.grid_points[0]\n lx = len(x)\n y = self.fdatagrid.grid_points[1]\n ly = len(y)\n X, Y = np.meshgrid(x, y)\n\n for m in range(self.fdatagrid.dim_codomain):\n\n # mean sample\n axes[m].plot_wireframe(X, Y, np.squeeze(self.median[..., m]).T,\n rstride=ly, cstride=lx,\n color=self.colormap(self.boxcol))\n axes[m].plot_surface(X, Y, np.squeeze(self.median[..., m]).T,\n color=self.colormap(self.boxcol), alpha=0.8)\n\n # central envelope\n axes[m].plot_surface(\n X, Y, np.squeeze(self.central_envelope[0][..., m]).T,\n color=self.colormap(self.boxcol), alpha=0.5)\n axes[m].plot_wireframe(\n X, Y, np.squeeze(self.central_envelope[0][..., m]).T,\n rstride=ly, cstride=lx,\n color=self.colormap(self.boxcol))\n axes[m].plot_surface(\n X, Y, np.squeeze(self.central_envelope[1][..., m]).T,\n color=self.colormap(self.boxcol), alpha=0.5)\n axes[m].plot_wireframe(\n X, Y, np.squeeze(self.central_envelope[1][..., m]).T,\n rstride=ly, cstride=lx,\n color=self.colormap(self.boxcol))\n\n # box vertical lines\n for indices in [(0, 0), (0, ly - 1), (lx - 1, 0),\n (lx - 1, ly - 1)]:\n x_corner = x[indices[0]]\n y_corner = y[indices[1]]\n axes[m].plot(\n [x_corner, x_corner], [y_corner, y_corner],\n [\n self.central_envelope[1][..., m][indices[0],\n indices[1]],\n self.central_envelope[0][..., m][indices[0],\n indices[1]]],\n color=self.colormap(self.boxcol))\n\n # outlying envelope\n axes[m].plot_surface(\n X, Y,\n np.squeeze(self.non_outlying_envelope[0][..., m]).T,\n color=self.colormap(self.outcol), alpha=0.3)\n axes[m].plot_wireframe(\n X, Y,\n np.squeeze(self.non_outlying_envelope[0][..., m]).T,\n rstride=ly, cstride=lx,\n color=self.colormap(self.outcol))\n axes[m].plot_surface(\n X, Y,\n np.squeeze(self.non_outlying_envelope[1][..., m]).T,\n color=self.colormap(self.outcol), alpha=0.3)\n axes[m].plot_wireframe(\n X, Y,\n np.squeeze(self.non_outlying_envelope[1][..., m]).T,\n rstride=ly, cstride=lx,\n color=self.colormap(self.outcol))\n\n # vertical lines from central to outlying envelope\n x_index = math.floor(lx / 2)\n x_central = x[x_index]\n y_index = math.floor(ly / 2)\n y_central = y[y_index]\n axes[m].plot(\n [x_central, x_central], [y_central, y_central],\n [self.non_outlying_envelope[1][..., m][x_index, y_index],\n self.central_envelope[1][..., m][x_index, y_index]],\n color=self.colormap(self.boxcol))\n axes[m].plot(\n [x_central, x_central], [y_central, y_central],\n [self.non_outlying_envelope[0][..., m][x_index, y_index],\n self.central_envelope[0][..., m][x_index, y_index]],\n color=self.colormap(self.boxcol))\n\n _set_labels(self.fdatagrid, fig, axes)\n\n return fig\n\n def __repr__(self):\n \"\"\"Return repr(self).\"\"\"\n return ((f\"SurfaceBoxplot(\"\n f\"\\nFDataGrid={repr(self.fdatagrid)},\"\n f\"\\nmedian={repr(self.median)},\"\n f\"\\ncentral envelope={repr(self.central_envelope)},\"\n f\"\\noutlying envelope={repr(self.non_outlying_envelope)})\")\n .replace('\\n', '\\n '))\n" ]
[ [ "numpy.atleast_2d", "numpy.array", "numpy.identity" ], [ "numpy.asarray", "numpy.full" ], [ "scipy.interpolate.UnivariateSpline", "scipy.interpolate.RectBivariateSpline", "scipy.interpolate.RegularGridInterpolator", "numpy.apply_along_axis", "scipy.interpolate.PchipInterpolator", "numpy.zeros_like", "numpy.isscalar", "numpy.empty" ], [ "numpy.arange", "matplotlib.pyplot.cm.get_cmap" ], [ "numpy.square", "sklearn.utils.validation.check_is_fitted", "numpy.sqrt", "numpy.abs", "numpy.ascontiguousarray", "numpy.cos", "numpy.arccos", "numpy.sin", "numpy.sign", "numpy.argmin", "numpy.empty" ], [ "numpy.take", "numpy.atleast_2d", "numpy.moveaxis", "numpy.argsort", "numpy.zeros" ], [ "numpy.squeeze", "numpy.meshgrid", "matplotlib.pyplot.cm.get_cmap", "matplotlib.pyplot.close" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [ "0.14", "1.6", "1.10", "0.15", "1.4", "0.16", "1.9", "0.19", "1.5", "0.18", "1.2", "1.7", "1.0", "0.17", "1.3", "1.8" ], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
SeolhwaLee/DialoGPT
[ "45220d493e8d267d703a7abca0497753cc4cda6c" ]
[ "interact_dbdc.py" ]
[ "import json\nfrom os.path import abspath, dirname, exists, join\nimport argparse\nimport logging\nfrom tqdm import trange\nimport tqdm\nimport torch\nimport torch.nn.functional as F\nimport numpy as np\nimport socket\nimport os, sys\nimport re\nimport logging\nfrom functools import partial\nfrom demo_utils import download_model_folder\nimport argparse\nimport subprocess as sp\nimport time\n\nfrom pytorch_pretrained_bert import GPT2LMHeadModel, GPT2Tokenizer, GPT2Config\nfrom gpt2_training.train_utils import get_eval_list_same_length, load_model, boolean_string, fix_state_dict_namespace\n\nlogging.basicConfig(format='%(asctime)s - %(levelname)s - %(name)s - %(message)s',\n datefmt='%m/%d/%Y %H:%M:%S',\n level=logging.INFO)\nlogger = logging.getLogger(__name__)\n\nEOS_ID = 50256\n\n\ndef cut_seq_to_eos(sentence, remove_id=[-1]):\n sent = []\n for s in sentence:\n if s in remove_id:\n continue\n if s != EOS_ID:\n sent.append(s)\n else:\n break\n return sent\n\n\n### FROM HUGGING FACE REPO\ndef top_filtering(logits, top_k=0, top_p=0.0, threshold=-float('Inf'), filter_value=-float('Inf')):\n \"\"\" Filter a distribution of logits using top-k, top-p (nucleus) and/or threshold filtering\n Args:\n logits: logits distribution shape (vocabulary size)\n top_k: <=0: no filtering, >0: keep only top k tokens with highest probability.\n top_p: <=0.0: no filtering, >0.0: keep only a subset S of candidates, where S is the smallest subset\n whose total probability mass is greater than or equal to the threshold top_p.\n In practice, we select the highest probability tokens whose cumulative probability mass exceeds\n the threshold top_p.\n threshold: a minimal threshold to keep logits\n \"\"\"\n assert logits.dim() == 1 # Only work for batch size 1 for now - could update but it would obfuscate a bit the code\n top_k = min(top_k, logits.size(-1))\n if top_k > 0:\n # Remove all tokens with a probability less than the last token in the top-k tokens\n indices_to_remove = logits < torch.topk(logits, top_k)[0][..., -1, None]\n logits[indices_to_remove] = filter_value\n\n if top_p > 0.0:\n # Compute cumulative probabilities of sorted tokens\n sorted_logits, sorted_indices = torch.sort(logits, descending=True)\n cumulative_probabilities = torch.cumsum(F.softmax(sorted_logits, dim=-1), dim=-1)\n\n # Remove tokens with cumulative probability above the threshold\n sorted_indices_to_remove = cumulative_probabilities > top_p\n # Shift the indices to the right to keep also the first token above the threshold\n sorted_indices_to_remove[..., 1:] = sorted_indices_to_remove[..., :-1].clone()\n sorted_indices_to_remove[..., 0] = 0\n\n # Back to unsorted indices and set them to -infinity\n indices_to_remove = sorted_indices[sorted_indices_to_remove]\n logits[indices_to_remove] = filter_value\n\n indices_to_remove = logits < threshold\n logits[indices_to_remove] = filter_value\n return logits\n\n\ndef generate_next_token(model, input_ids, position_ids=None, token_type_ids=None, prev=None, temperature=1, top_k=0,\n top_p=0, past=None):\n with torch.no_grad():\n if not past:\n hidden_states, past = model.transformer(prev, position_ids, token_type_ids, past=past)\n else:\n hidden_states, past = model.transformer(prev, past=past)\n logits = model.lm_head(hidden_states)\n logits = logits[0, -1, :] / temperature\n logits = top_filtering(logits, top_k=top_k, top_p=top_p)\n probs = F.softmax(logits.unsqueeze(0), dim=-1)\n prev = torch.multinomial(probs, num_samples=1)\n return prev, probs[0][prev], past\n\n\ndef generate_sequence(model, input_ids, position_ids=None, token_type_ids=None, temperature=1, top_k=0, top_p=0,\n length=20, past=None, device='cuda'):\n output = input_ids.new_zeros([input_ids.size(0), 0])\n prev = input_ids\n for i in range(length):\n prev, probs, past = generate_next_token(model, input_ids, position_ids, token_type_ids, prev, temperature,\n top_k, top_p, past)\n output = torch.cat((output, prev), dim=1)\n return output\n\n\ndef run_model():\n parser = argparse.ArgumentParser()\n parser.add_argument('--model_name_or_path', type=str, default='',\n help='pretrained model name or path to local checkpoint')\n parser.add_argument(\"--seed\", type=int, default=42)\n parser.add_argument(\"--load_checkpoint\", '-c', type=str, default='')\n parser.add_argument(\"--fp16\", type=boolean_string, default=False)\n parser.add_argument(\"--max_seq_length\", type=int, default=128)\n\n parser.add_argument(\"--generation_length\", type=int, default=20)\n parser.add_argument(\"--max_history\", type=int, default=2)\n parser.add_argument(\"--chateval_multi\", type=boolean_string, default=False)\n\n parser.add_argument(\"--temperature\", type=float, default=1)\n parser.add_argument(\"--top_k\", type=int, default=0)\n parser.add_argument(\"--top_p\", type=float, default=0.9)\n\n parser.add_argument('--use_gpu', action='store_true')\n parser.add_argument(\"--gpu\", type=int, default=0)\n\n parser.add_argument(\"--conversation\", type=boolean_string, default=True,\n help='This is for the interactive conversation or save the history for the script')\n parser.add_argument(\"--eval_input\", type=str, default='', help='evaluation data input path')\n parser.add_argument(\"--eval_output\", type=str, default='', help='evaluation data output path')\n\n args = parser.parse_args()\n os.environ['CUDA_VISIBLE_DEVICES'] = str(args.gpu)\n\n device = torch.device(\"cuda\" if torch.cuda.is_available() and args.use_gpu else \"cpu\")\n n_gpu = torch.cuda.device_count()\n args.device, args.n_gpu = device, n_gpu\n\n np.random.seed(args.seed)\n torch.random.manual_seed(args.seed)\n torch.cuda.manual_seed(args.seed)\n\n #### load the GPT-2 model\n config = GPT2Config.from_json_file(os.path.join(args.model_name_or_path, './config.json'))\n enc = GPT2Tokenizer.from_pretrained(args.model_name_or_path)\n model = load_model(GPT2LMHeadModel(config), args.load_checkpoint, args, verbose=True)\n model.to(device)\n model.eval()\n\n history = []\n '''This is for the interactive conversation'''\n if args.conversation:\n while True:\n raw_text = input(\"USR >>> \")\n while not raw_text:\n print('Prompt should not be empty!')\n raw_text = input(\"USR >>> \")\n history.append(raw_text)\n context_tokens = sum([enc.encode(h) + [EOS_ID] for h in history], []) # + [EOS_ID]\n context_tokens = torch.tensor(context_tokens, device=device, dtype=torch.long).unsqueeze(0)\n position_ids = torch.arange(0, context_tokens.size(-1), dtype=torch.long, device=context_tokens.device)\n\n out = generate_sequence(model, context_tokens, position_ids=position_ids,\n length=args.generation_length, temperature=args.temperature,\n top_k=args.top_k, top_p=args.top_p)\n\n out = out.tolist()\n text = enc.decode(cut_seq_to_eos(out[0])).encode('ascii', 'ignore').decode('ascii')\n print(\"SYS >>> \", text)\n history.append(text)\n history = history[-(2 * args.max_history + 1):]\n\n\n else:\n script_input_path = str(args.eval_input)\n script_file = open(script_input_path, 'r', encoding='utf-8')\n\n script_out_path = str(args.eval_output)\n timestr = time.strftime(\"%Y%m%d-%H%M%S\")\n\n file_name = str(args.eval_input).split('/')[-1].split('.')[0]\n script_response = open(script_out_path + '/' + file_name + '_dialoGPT_medium_' + timestr + '.txt', 'w')\n for raw_text in script_file:\n\n print(\"input:\", raw_text)\n if args.chateval_multi == False:\n history.append(raw_text.replace('\\n', ''))\n elif args.chateval_multi == True:\n # if args.max_history == 2:\n # turn1 = raw_text.split('</s>')[0].lstrip().replace('’', '')\n # turn2 = raw_text.split('</s>')[1].lstrip().replace('’', '')\n # history.append(turn1.replace('\\n', ''))\n # history.append(turn2.replace('\\n', ''))\n # elif args.max_history == 3:\n # turn1 = raw_text.split('</s>')[0].lstrip().replace('’', '')\n # turn2 = raw_text.split('</s>')[1].lstrip().replace('’', '')\n # turn3 = raw_text.split('</s>')[2].lstrip().replace('’', '')\n # history.append(turn1.replace('\\n', ''))\n # history.append(turn2.replace('\\n', ''))\n # history.append(turn3.replace('\\n', ''))\n if len(raw_text) > 1:\n # n_turn = raw_text.split()[0]\n utter = raw_text.split()[1:]\n input_utter = ' '.join(utter)\n print(\"utter\", input_utter)\n script_response.write(\"%s\" % (raw_text))\n history.append(input_utter)\n continue\n # elif len(raw_text) < 1:\n # n_turn = 0 # turn count reset\n # history = [] # history reset\n # continue\n\n\n\n context_tokens = sum([enc.encode(h) + [EOS_ID] for h in history], []) # + [EOS_ID]\n context_tokens = torch.tensor(context_tokens, device=device, dtype=torch.long).unsqueeze(0)\n position_ids = torch.arange(0, context_tokens.size(-1), dtype=torch.long, device=context_tokens.device)\n\n out = generate_sequence(model, context_tokens, position_ids=position_ids,\n length=args.generation_length, temperature=args.temperature,\n top_k=args.top_k, top_p=args.top_p)\n\n out = out.tolist()\n text = enc.decode(cut_seq_to_eos(out[0])).encode('ascii', 'ignore').decode('ascii')\n print(\"SYS >>> \", text)\n\n # history = history[-(2 * args.max_history + 1):]\n # print(history)\n print(\"history test:\", history)\n # script_response.write(\"%s\\n\" % (raw_text))\n script_response.write(\"[ground]\\t%s\\n\" % (text))\n script_response.write(\"\\n\")\n if args.chateval_multi == True:\n history = []\n else:\n history.append(text)\n history = history[-(2 * args.max_history + 1):]\n script_response.close()\n print(\"script response complete!\")\n\n\nif __name__ == '__main__':\n\n PYTHON_EXE = 'python'\n MODEL_FOLDER = './models'\n DATA_FOLDER = './data'\n\n logging.basicConfig(\n format='%(asctime)s - %(levelname)s - %(name)s - %(message)s',\n datefmt='%m/%d/%Y %H:%M:%S', level=logging.INFO\n )\n logger = logging.getLogger(__name__)\n\n if os.path.exists(MODEL_FOLDER):\n print('Found existing ./models folder, skip creating a new one!')\n os.makedirs(MODEL_FOLDER, exist_ok=True)\n else:\n os.makedirs(MODEL_FOLDER)\n\n #########################################################################\n # Download Model\n #########################################################################\n logger.info('Downloading models...')\n download_model = partial(download_model_folder, DATA_FOLDER=MODEL_FOLDER)\n\n # model size: could be one of 'small' (GPT2 with 117M), 'medium'(345M) or 'large' (1542M)\n # dataset: one of 'multiref' or 'dstc'\n # from_scratch: True : load model trained from scratch or False: load model trained from fine-tuning the GPT-2\n target_folder = download_model(model_size='medium', dataset='multiref', from_scratch=False)\n logger.info('Done!\\n')\n\n run_model()\n\n\n" ]
[ [ "torch.nn.functional.softmax", "torch.cuda.manual_seed", "numpy.random.seed", "torch.cat", "torch.random.manual_seed", "torch.multinomial", "torch.tensor", "torch.no_grad", "torch.sort", "torch.cuda.is_available", "torch.topk", "torch.cuda.device_count" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
uashogeschoolutrecht/bbmd
[ "40a5beb0554df00b512e672bf5be8297d0523b9b" ]
[ "bbmd/bmr/base.py" ]
[ "import re\n\nimport numpy as np\nimport scipy.special as special\nfrom scipy import stats\n\nfrom ..utils import get1Dkernel, get_summary_stats\n\n\nclass BMRBase(object):\n\n def clear_results(self):\n self.results = None\n self.model_average = None\n\n def _set_priors(self, priors):\n if priors is None:\n priors = np.repeat(1, len(self.session.models))\n else:\n if len(priors) != len(self.session.models):\n raise ValueError('Unknown number of priors')\n\n priors = np.array(priors, dtype=np.float64)\n priors = priors / priors.sum()\n self.priors = priors\n\n def validate_inputs(self):\n # check and set priors\n self._set_priors(self._priors)\n domains = self.session.get_bmr_adversity_value_domains()\n\n # check bmr\n if hasattr(self, 'bmr'):\n domain = domains['bmr_domain']\n if not domain[0] < self.bmr < domain[1]:\n raise ValueError(\n 'BMR not in allowable domain: {} ({} - {})'\n .format(self.bmr, domain[0], domain[1]))\n\n # check adversity value\n if hasattr(self, 'adversity_value'):\n domain = self.get_adversity_domain(domains)\n if not domain[0] < self.adversity_value < domain[1]:\n raise ValueError(\n 'Adversity value not in allowable domain: {} ({} - {})'\n .format(self.adversity_value, domain[0], domain[1]))\n\n def calculate(self, session):\n self.session = session\n self.validate_inputs()\n self.results = [\n self.calculate_for_model(model)\n for model in self.session.models\n ]\n self.model_average = self.calc_model_average()\n\n def get_adversity_domain(self, domains):\n raise NotImplementedError('Abstract method')\n\n def get_bmr_vector(self, model):\n raise NotImplementedError('Abstract method')\n\n NAME_REGEX = re.compile(r'([a-z])([A-Z])')\n\n def __str__(self):\n if hasattr(self, 'name'):\n return self.name\n params = []\n if hasattr(self, 'bmr'):\n params.append('bmr={}'.format(self.bmr))\n if hasattr(self, 'adversity_value'):\n params.append('{}={}'.format(\n self.ADVERSITY_VERBOSE_NAME, self.adversity_value))\n name = re.sub(self.NAME_REGEX, '\\g<1> \\g<2>', self.__class__.__name__)\n return '{} ({})'.format(name, ', '.join(params))\n\n def calculate_for_model(self, model):\n bmd_vector = self.get_bmr_vector(model) * model.get_max_dose()\n n_total = bmd_vector.size\n non_nans = bmd_vector[~np.isnan(bmd_vector)]\n n_non_nans = non_nans.size\n return {\n 'n_total': n_total,\n 'n_non_nans': n_non_nans,\n 'bmd': non_nans,\n 'kernel': get1Dkernel(non_nans, steps=100j),\n 'stats': get_summary_stats(bmd_vector),\n }\n\n def get_model_posterior_weights(self, models):\n # same method as session._calc_model_weights except we in include\n # a prior vector; the session assumes an equal prior for all models\n # but here we drop that assumption in a case a user wants to remove\n # a model.\n matrix = np.empty((\n len(models),\n models[0].model_weight_vector.size\n ), dtype=np.float64)\n priors = self.priors\n\n # build inputs\n for i, model in enumerate(models):\n matrix[i, :] = model.model_weights\n\n matrix = np.exp(matrix - matrix.min(axis=0))\n matrix = (matrix.T * priors).T\n weights = np.divide(matrix, matrix.sum(axis=0)).mean(axis=1)\n return weights\n\n def calc_model_average(self):\n model_posterior_weights = \\\n self.get_model_posterior_weights(self.session.models)\n\n vsize = min([d['n_non_nans'] for d in self.results])\n\n bmd = np.empty(shape=(model_posterior_weights.size, vsize))\n\n for i in xrange(bmd.shape[0]):\n bmd[i, :] = self.results[i]['bmd'][:vsize] * \\\n model_posterior_weights[i]\n\n bmd = bmd.sum(axis=0)\n\n return dict(\n model_posterior_weights=model_posterior_weights,\n n_total=self.results[0]['n_total'],\n n_non_nans=vsize,\n bmd=bmd,\n kernel=get1Dkernel(bmd, steps=100j),\n stats=get_summary_stats(bmd),\n )\n\n @classmethod\n def get_related_models(cls, bmrs):\n # group together extra/added bmr models if possible\n related = []\n skip_index = None\n for i in range(len(bmrs) - 1):\n\n if i == skip_index:\n continue\n\n a = bmrs[i]\n b = bmrs[i + 1]\n cnames = [\n getattr(a, 'DUAL_TYPE', None),\n getattr(b, 'DUAL_TYPE', None)\n ]\n if a.__class__.__base__ == b.__class__.__base__ and \\\n a.DUAL and b.DUAL and \\\n 'Extra' in cnames and 'Added' in cnames and \\\n a.bmr == b.bmr:\n related.append([a, b])\n skip_index = i + 1\n else:\n related.append([a])\n\n return related\n\n\nclass DichotomousBase(BMRBase):\n\n DUAL = True\n\n def __init__(self, bmr, priors=None, **kwargs):\n self.bmr = bmr\n self._priors = priors\n self.name = kwargs.get('name', str(self))\n\n\nclass CentralTendencyBase(BMRBase):\n\n DUAL = False\n\n def __init__(self, adversity_value, priors=None, **kwargs):\n self.adversity_value = adversity_value\n self._priors = priors\n self.name = kwargs.get('name', str(self))\n\n\nclass HybridBase(BMRBase):\n\n DUAL = True\n\n def __init__(self, adversity_value, bmr, priors=None, **kwargs):\n self.adversity_value = adversity_value\n self.bmr = bmr\n self._priors = priors\n self.name = kwargs.get('name', str(self))\n\n def calc_bmd_quantile_hybrid(self, model, isExtra=True):\n # Adversity defined based on a quantile of the control, such as 99th\n # percentile of control\n sigma = model.parameters['sigma']\n cutoff_log = stats.norm.ppf(\n self.adversity_value,\n np.log(model.get_control_vector()),\n sigma\n )\n fn = self.quantile_at_bmd_extra if (isExtra) else \\\n self.quantile_at_bmd_added\n quantile = fn(model, self.adversity_value)\n mean_log = self.cutoff_quantile_to_mean(cutoff_log, quantile, sigma)\n return model.calc_central_tendency(mean_log)\n\n def calc_bmd_cutoff_hybrid(self, model, isExtra=True):\n # Adversity defined based on an absolute cutoff value\n sigma = model.parameters['sigma']\n log_cutoff = np.log(self.adversity_value)\n quantal_cutoff = stats.norm.cdf(\n log_cutoff,\n np.log(model.get_control_vector()),\n sigma\n )\n fn = self.quantile_at_bmd_extra if (isExtra) else \\\n self.quantile_at_bmd_added\n quantile = fn(model, quantal_cutoff)\n mean_log = self.cutoff_quantile_to_mean(log_cutoff, quantile, sigma)\n return model.calc_central_tendency(mean_log)\n\n def quantile_at_bmd_added(self, model, quantile_at_control):\n return quantile_at_control - self.bmr \\\n if (model.response_direction == 1) \\\n else quantile_at_control + self.bmr\n\n def quantile_at_bmd_extra(self, model, quantile_at_control):\n return (1. - self.bmr) * quantile_at_control \\\n if (model.response_direction == 1) \\\n else self.bmr + (1. - self.bmr) * quantile_at_control\n\n def cutoff_quantile_to_mean(self, cutoff, quantile, sigma):\n # Calculate mean value (on the log-scale) using cutoff and quantile,\n # output is the median value (on regular scale) of DR model\n return np.exp(cutoff - sigma * np.sqrt(2.) *\n special.erfinv(2. * quantile - 1.))\n" ]
[ [ "numpy.log", "numpy.sqrt", "scipy.special.erfinv", "numpy.isnan", "numpy.array", "numpy.empty" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [ "0.13", "0.14", "0.15", "1.4", "0.16", "0.19", "0.18", "1.2", "0.12", "1.0", "0.17", "1.3" ], "tensorflow": [] } ]
lamfeeling/Stein-Density-Ratio-Estimation
[ "f3b8a3975d99ace5875e603414e0e6d989ceb1d6" ]
[ "examples/nn_MNIST/demo_NN_KSD.py" ]
[ "from sdre.helper import *\r\nfrom scipy.optimize import minimize, Bounds, NonlinearConstraint\r\nfrom multiprocessing import Pool\r\nfrom socket import gethostname\r\nfrom time import time\r\nfrom sdre.trainnn import lastlayer\r\nfrom kgof.kernel import KGauss\r\nimport kgof.util as util\r\nfrom scipy import io as sio\r\n\r\noptparam = load('optpara.npy',allow_pickle=True)\r\nd = 784\r\nn = 100\r\ndimTheta = 20\r\n\r\nf = lambda X: lastlayer(optparam,X.T).T\r\n\r\ndef logpBar(x, theta):\r\n theta = vstack([theta])\r\n return dot(theta.T,f(x))\r\n\r\ngLogP = grad(logpBar)\r\n\r\ndef KSD(XData, k1, k2, k3, k4, theta):\r\n g = gLogP(XData, theta)\r\n\r\n t2 = zeros([n,n])\r\n for i in range(d):\r\n t2 = t2 + k2[i]*g[i,:]\r\n \r\n t3 = zeros([n,n])\r\n for i in range(d):\r\n t3 = t3 + (k3[i].T*g[i,:]).T\r\n\r\n return sum((dot(g.T,g)*k1 + t2 + t3 + k4).flatten())/n**2\r\n\r\ndef callbackF(Xi):\r\n global Nfeval\r\n print(\"iter {0:4d}, theta norm: {1:5.4f}\".format(Nfeval, linalg.norm(Xi)))\r\n Nfeval += 1\r\n\r\ndef infer(digit, XData):\r\n random.seed(1); print('digit:', digit)\r\n n0 = XData.shape[1]\r\n idx = random.permutation(n0)\r\n XData = XData[:,idx[:n]]\r\n\r\n sig2 = util.meddistance(XData.T, subsample=1000)**2\r\n\r\n kG = KGauss(sig2)\r\n k1 = kG.eval(XData.T,XData.T)\r\n k4 = kG.gradXY_sum(XData.T,XData.T)\r\n\r\n k2 = []\r\n for i in range(d):\r\n k2.append(kG.gradX_Y(XData.T, XData.T,i))\r\n \r\n k3 = []\r\n for i in range(d):\r\n k3.append(kG.gradY_X(XData.T, XData.T,i))\r\n\r\n obj = lambda theta:KSD(XData, k1, k2, k3, k4, array([theta]).T)\r\n grad_obj = grad(obj)\r\n hess_obj = jacobian(grad_obj)\r\n\r\n x0 = random.randn(dimTheta)\r\n t0 = time()\r\n res = minimize(obj, x0, jac=grad_obj, method='BFGS',callback=callbackF, \r\n options={'disp': True, 'maxiter': 10000})\r\n print('elapsed:', time() - t0)\r\n\r\n theta = res.x\r\n print('estimate', theta)\r\n print('\\noptimizaiton result', res.status)\r\n if res.status < 0:\r\n return -1\r\n\r\n sio.savemat('out/nn %s %d.mat' % (gethostname(), digit),\r\n {'theta': theta, 'status': res.status})\r\n return obj(res.x)\r\n\r\nif __name__ == '__main__':\r\n print(\"Loading training data...\")\r\n test_images, test_labels, train_images, train_labels = loadMNIST(False)\r\n test_imagesO, test_labelsO, train_imagesO, train_labelsO = loadMNIST(False)\r\n\r\n for digit in range(10):\r\n Nfeval = 1\r\n XData = []; \r\n for i in range(train_images.shape[0]):\r\n if train_labels[i,digit]==1:\r\n XData.append(train_images[i,:])\r\n XData = vstack(XData).T\r\n\r\n infer(digit,XData)\r\n\r\n theta = sio.loadmat('out/nn %s %d.mat' % (gethostname(), digit))['theta']\r\n XData = []\r\n for i in range(test_images.shape[0]):\r\n if test_labels[i,digit]==1:\r\n XData.append(test_images[i,:])\r\n XData = vstack(XData).T\r\n ll = logpBar(XData, theta.T)\r\n\r\n XData0 = []\r\n for i in range(test_imagesO.shape[0]):\r\n if test_labels[i,digit]==1:\r\n XData0.append(test_imagesO[i,:])\r\n XData0 = vstack(XData0).T\r\n sio.savemat('out/mnistD_KSD %d.mat' % digit,{'theta':theta,'ll':ll,'XData':XData,'XData0':XData0})\r\n" ]
[ [ "scipy.optimize.minimize", "scipy.io.savemat" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [ "0.13", "1.6", "0.14", "1.10", "0.15", "1.4", "1.3", "1.9", "0.19", "1.5", "0.18", "1.2", "1.7", "0.12", "1.0", "0.17", "0.16", "1.8" ], "tensorflow": [] } ]
www2171668/alf
[ "6e3731fc559d3b4e6b5b9ed6251fff728a560d64", "6e3731fc559d3b4e6b5b9ed6251fff728a560d64", "6e3731fc559d3b4e6b5b9ed6251fff728a560d64", "6e3731fc559d3b4e6b5b9ed6251fff728a560d64", "6e3731fc559d3b4e6b5b9ed6251fff728a560d64", "6e3731fc559d3b4e6b5b9ed6251fff728a560d64" ]
[ "alf/algorithms/ppo_loss.py", "alf/algorithms/actor_critic_loss.py", "alf/experience_replayers/replay_buffer_test.py", "alf/optimizers/optimizers.py", "alf/examples/ppo_babyai.py", "alf/examples/tutorial/on_policy_interfaces_conf.py" ]
[ "# Copyright (c) 2019 Horizon Robotics. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Loss for PPO algorithm.\"\"\"\n\nimport torch\n\nimport alf\n\nfrom alf.algorithms.actor_critic_loss import ActorCriticLoss\nfrom alf.utils.losses import element_wise_squared_loss\nfrom alf.utils import value_ops\n\n\[email protected]\nclass PPOLoss(ActorCriticLoss):\n \"\"\"PPO loss.\"\"\"\n\n def __init__(self,\n gamma=0.99,\n td_error_loss_fn=element_wise_squared_loss,\n td_lambda=0.95,\n normalize_advantages=True,\n advantage_clip=None,\n entropy_regularization=None,\n td_loss_weight=1.0,\n importance_ratio_clipping=0.2,\n log_prob_clipping=0.0,\n check_numerics=False,\n debug_summaries=False,\n name='PPOLoss'):\n \"\"\"\n Implement the simplified surrogate loss in equation (9) of `Proximal\n Policy Optimization Algorithms <https://arxiv.org/abs/1707.06347>`_.\n\n The total loss equals to\n\n .. code-block:: python\n\n (policy_gradient_loss # (L^{CLIP} in equation (9))\n + td_loss_weight * td_loss # (L^{VF} in equation (9))\n - entropy_regularization * entropy)\n\n This loss works with ``PPOAlgorithm``. The advantages and returns are\n pre-computed by ``PPOAlgorithm.preprocess()``. One known difference with\n `baselines.ppo2` is that value estimation is not clipped here, while\n `baselines.ppo2` also clipped value if it deviates from returns too\n much.\n\n Args:\n gamma (float|list[float]): A discount factor for future rewards. For\n multi-dim reward, this can also be a list of discounts, each\n discount applies to a reward dim.\n td_errors_loss_fn (Callable): A function for computing the TD errors\n loss. This function takes as input the target and the estimated\n Q values and returns the loss for each element of the batch.\n td_lambda (float): Lambda parameter for TD-lambda computation.\n normalize_advantages (bool): If True, normalize advantage to zero\n mean and unit variance within batch for caculating policy\n gradient.\n advantage_clip (float): If set, clip advantages to :math:`[-x, x]`\n entropy_regularization (float): Coefficient for entropy\n regularization loss term.\n td_loss_weight (float): the weigt for the loss of td error.\n importance_ratio_clipping (float): Epsilon in clipped, surrogate\n PPO objective. See the cited paper for more detail.\n log_prob_clipping (float): If >0, clipping log probs to the range\n ``(-log_prob_clipping, log_prob_clipping)`` to prevent ``inf/NaN``\n values.\n check_numerics (bool): If true, checking for ``NaN/Inf`` values. For\n debugging only.\n name (str):\n \"\"\"\n\n super(PPOLoss, self).__init__(\n gamma=gamma,\n td_error_loss_fn=td_error_loss_fn,\n use_gae=True,\n td_lambda=td_lambda,\n use_td_lambda_return=True,\n normalize_advantages=normalize_advantages,\n advantage_clip=advantage_clip,\n entropy_regularization=entropy_regularization,\n td_loss_weight=td_loss_weight,\n debug_summaries=debug_summaries,\n name=name)\n\n self._importance_ratio_clipping = importance_ratio_clipping\n self._log_prob_clipping = log_prob_clipping\n self._check_numerics = check_numerics\n\n def _pg_loss(self, info, advantages):\n scope = alf.summary.scope(self._name)\n importance_ratio, importance_ratio_clipped = value_ops.action_importance_ratio(\n action_distribution=info.action_distribution,\n collect_action_distribution=info.rollout_action_distribution,\n action=info.action,\n clipping_mode='double_sided',\n scope=scope,\n importance_ratio_clipping=self._importance_ratio_clipping,\n log_prob_clipping=self._log_prob_clipping,\n check_numerics=self._check_numerics,\n debug_summaries=self._debug_summaries)\n # Pessimistically choose the maximum objective value for clipped and\n # unclipped importance ratios.\n pg_objective = -importance_ratio * advantages\n pg_objective_clipped = -importance_ratio_clipped * advantages\n policy_gradient_loss = torch.max(pg_objective, pg_objective_clipped)\n\n if self._debug_summaries and alf.summary.should_record_summaries():\n with scope:\n alf.summary.histogram('pg_objective', pg_objective)\n alf.summary.histogram('pg_objective_clipped',\n pg_objective_clipped)\n\n if self._check_numerics:\n assert torch.all(torch.isfinite(policy_gradient_loss))\n\n return policy_gradient_loss\n\n def _calc_returns_and_advantages(self, info, value):\n return info.returns, info.advantages\n", "# Copyright (c) 2019 Horizon Robotics. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom collections import namedtuple\n\nimport torch\nimport numpy as np\n\nimport alf\nfrom alf.data_structures import LossInfo\nfrom alf.utils.losses import element_wise_squared_loss\nfrom alf.utils.summary_utils import safe_mean_hist_summary\nfrom alf.utils import tensor_utils, dist_utils, value_ops\nfrom .algorithm import Loss\n\nActorCriticLossInfo = namedtuple(\"ActorCriticLossInfo\",\n [\"pg_loss\", \"td_loss\", \"neg_entropy\"])\n\n\ndef _normalize_advantages(advantages, variance_epsilon=1e-8):\n # advantages is of shape [T, B] or [T, B, N], where N is reward dim\n # this function normalizes over all elements in the input advantages\n shape = advantages.shape\n # shape: [TB, 1] or [TB, N]\n advantages = advantages.reshape(np.prod(advantages.shape[:2]), -1)\n\n adv_mean = advantages.mean(0)\n adv_var = torch.var(advantages, dim=0, unbiased=False)\n\n normalized_advantages = (\n (advantages - adv_mean) / (torch.sqrt(adv_var) + variance_epsilon))\n return normalized_advantages.reshape(*shape)\n\n\[email protected]\nclass ActorCriticLoss(Loss):\n def __init__(self,\n gamma=0.99,\n td_error_loss_fn=element_wise_squared_loss,\n use_gae=False,\n td_lambda=0.95,\n use_td_lambda_return=True,\n normalize_advantages=False,\n advantage_clip=None,\n entropy_regularization=None,\n td_loss_weight=1.0,\n debug_summaries=False,\n name=\"ActorCriticLoss\"):\n \"\"\"An actor-critic loss equals to\n\n .. code-block:: python\n\n (policy_gradient_loss\n + td_loss_weight * td_loss\n - entropy_regularization * entropy)\n\n Args:\n gamma (float|list[float]): A discount factor for future rewards. For\n multi-dim reward, this can also be a list of discounts, each\n discount applies to a reward dim.\n td_errors_loss_fn (Callable): A function for computing the TD errors\n loss. This function takes as input the target and the estimated\n Q values and returns the loss for each element of the batch.\n use_gae (bool): If True, uses generalized advantage estimation for\n computing per-timestep advantage. Else, just subtracts value\n predictions from empirical return.\n use_td_lambda_return (bool): Only effective if use_gae is True.\n If True, uses ``td_lambda_return`` for training value function.\n ``(td_lambda_return = gae_advantage + value_predictions)``.\n td_lambda (float): Lambda parameter for TD-lambda computation.\n normalize_advantages (bool): If True, normalize advantage to zero\n mean and unit variance within batch for caculating policy\n gradient. This is commonly used for PPO.\n advantage_clip (float): If set, clip advantages to :math:`[-x, x]`\n entropy_regularization (float): Coefficient for entropy\n regularization loss term.\n td_loss_weight (float): the weigt for the loss of td error.\n \"\"\"\n super().__init__(name=name)\n\n self._td_loss_weight = td_loss_weight\n self._name = name\n self._gamma = torch.tensor(gamma)\n self._td_error_loss_fn = td_error_loss_fn\n self._use_gae = use_gae\n self._lambda = td_lambda\n self._use_td_lambda_return = use_td_lambda_return\n self._normalize_advantages = normalize_advantages\n assert advantage_clip is None or advantage_clip > 0, (\n \"Clipping value should be positive!\")\n self._advantage_clip = advantage_clip\n self._entropy_regularization = entropy_regularization\n self._debug_summaries = debug_summaries\n\n @property\n def gamma(self):\n return self._gamma.clone()\n\n def forward(self, info):\n \"\"\"Cacluate actor critic loss. The first dimension of all the tensors is\n time dimension and the second dimesion is the batch dimension.\n\n Args:\n info (namedtuple): information for calculating loss. All tensors are\n time-major. It should contain the following fields:\n - reward:\n - step_type:\n - discount:\n - action:\n - action_distribution:\n - value:\n Returns:\n LossInfo: with ``extra`` being ``ActorCriticLossInfo``.\n \"\"\"\n\n value = info.value\n returns, advantages = self._calc_returns_and_advantages(info, value)\n\n if self._debug_summaries and alf.summary.should_record_summaries():\n with alf.summary.scope(self._name):\n\n def _summarize(v, r, adv, suffix):\n alf.summary.scalar(\"values\" + suffix, v.mean())\n alf.summary.scalar(\"returns\" + suffix, r.mean())\n safe_mean_hist_summary('advantages' + suffix, adv)\n alf.summary.scalar(\n \"explained_variance_of_return_by_value\" + suffix,\n tensor_utils.explained_variance(v, r))\n\n if value.ndim == 2:\n _summarize(value, returns, advantages, '')\n else:\n for i in range(value.shape[2]):\n suffix = '/' + str(i)\n _summarize(value[..., i], returns[..., i],\n advantages[..., i], suffix)\n\n if self._normalize_advantages:\n advantages = _normalize_advantages(advantages)\n\n if self._advantage_clip:\n advantages = torch.clamp(advantages, -self._advantage_clip,\n self._advantage_clip)\n\n if info.reward_weights != ():\n advantages = (advantages * info.reward_weights).sum(-1)\n pg_loss = self._pg_loss(info, advantages.detach())\n\n td_loss = self._td_error_loss_fn(returns.detach(), value)\n\n if td_loss.ndim == 3:\n td_loss = td_loss.mean(dim=2)\n\n loss = pg_loss + self._td_loss_weight * td_loss\n\n entropy_loss = ()\n if self._entropy_regularization is not None:\n entropy, entropy_for_gradient = dist_utils.entropy_with_fallback(\n info.action_distribution, return_sum=False)\n entropy_loss = alf.nest.map_structure(lambda x: -x, entropy)\n loss -= self._entropy_regularization * sum(\n alf.nest.flatten(entropy_for_gradient))\n\n return LossInfo(\n loss=loss,\n extra=ActorCriticLossInfo(\n td_loss=td_loss, pg_loss=pg_loss, neg_entropy=entropy_loss))\n\n def _pg_loss(self, info, advantages):\n action_log_prob = dist_utils.compute_log_probability(\n info.action_distribution, info.action)\n return -advantages * action_log_prob\n\n def _calc_returns_and_advantages(self, info, value):\n\n if info.reward.ndim == 3:\n # [T, B, D] or [T, B, 1]\n discounts = info.discount.unsqueeze(-1) * self._gamma\n else:\n # [T, B]\n discounts = info.discount * self._gamma\n\n returns = value_ops.discounted_return(\n rewards=info.reward,\n values=value,\n step_types=info.step_type,\n discounts=discounts)\n returns = tensor_utils.tensor_extend(returns, value[-1])\n\n if not self._use_gae:\n advantages = returns - value\n else:\n advantages = value_ops.generalized_advantage_estimation(\n rewards=info.reward,\n values=value,\n step_types=info.step_type,\n discounts=discounts,\n td_lambda=self._lambda)\n advantages = tensor_utils.tensor_extend_zero(advantages)\n if self._use_td_lambda_return:\n returns = advantages + value\n\n return returns, advantages\n\n def calc_loss(self, info):\n return self(info)\n", "# Copyright (c) 2020 Horizon Robotics and ALF Contributors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport itertools\nimport torch\n\nfrom absl.testing import parameterized\n\nimport alf\nfrom alf import data_structures as ds\nfrom alf.utils.data_buffer import RingBuffer\nfrom alf.utils.data_buffer_test import get_batch, DataItem, RingBufferTest\nfrom alf.experience_replayers.replay_buffer import ReplayBuffer\nfrom alf.algorithms.data_transformer import HindsightExperienceTransformer\n\n\nclass ReplayBufferTest(RingBufferTest):\n def tearDown(self):\n super().tearDown()\n\n def test_replay_with_hindsight_relabel(self):\n self.max_length = 8\n torch.manual_seed(0)\n\n replay_buffer = ReplayBuffer(\n data_spec=self.data_spec,\n num_environments=2,\n max_length=self.max_length,\n keep_episodic_info=True,\n step_type_field=\"t\",\n with_replacement=True)\n\n transform = HindsightExperienceTransformer(\n self.data_spec,\n her_proportion=0.8,\n achieved_goal_field=\"o.a\",\n desired_goal_field=\"o.g\")\n\n steps = [\n [\n ds.StepType.FIRST, # will be overwritten\n ds.StepType.MID, # idx == 1 in buffer\n ds.StepType.LAST,\n ds.StepType.FIRST,\n ds.StepType.MID,\n ds.StepType.MID,\n ds.StepType.LAST,\n ds.StepType.FIRST,\n ds.StepType.MID # idx == 0\n ],\n [\n ds.StepType.FIRST, # will be overwritten in RingBuffer\n ds.StepType.LAST, # idx == 1 in RingBuffer\n ds.StepType.FIRST,\n ds.StepType.MID,\n ds.StepType.MID,\n ds.StepType.LAST,\n ds.StepType.FIRST,\n ds.StepType.MID,\n ds.StepType.MID # idx == 0\n ]\n ]\n # insert data that will be overwritten later\n for b, t in list(itertools.product(range(2), range(8))):\n batch = get_batch([b], self.dim, t=steps[b][t], x=0.1 * t + b)\n replay_buffer.add_batch(batch, batch.env_id)\n # insert data\n for b, t in list(itertools.product(range(2), range(9))):\n batch = get_batch([b], self.dim, t=steps[b][t], x=0.1 * t + b)\n replay_buffer.add_batch(batch, batch.env_id)\n\n # Test padding\n idx = torch.tensor([[7, 0, 0, 6, 3, 3, 3, 0], [6, 0, 5, 2, 2, 2, 0,\n 6]])\n pos = replay_buffer._pad(idx, torch.tensor([[0] * 8, [1] * 8]))\n self.assertTrue(\n torch.equal(\n pos,\n torch.tensor([[15, 16, 16, 14, 11, 11, 11, 16],\n [14, 16, 13, 10, 10, 10, 16, 14]],\n dtype=torch.int64)))\n\n # Verify _index is built correctly.\n # Note, the _index_pos 8 represents headless timesteps, which are\n # outdated and not the same as the result of padding: 16.\n pos = torch.tensor([[15, 8, 8, 14, 11, 11, 11, 16],\n [14, 8, 13, 10, 10, 10, 16, 14]])\n\n self.assertTrue(torch.equal(replay_buffer._indexed_pos, pos))\n self.assertTrue(\n torch.equal(replay_buffer._headless_indexed_pos,\n torch.tensor([10, 9])))\n\n # Save original exp for later testing.\n g_orig = replay_buffer._buffer.o[\"g\"].clone()\n r_orig = replay_buffer._buffer.reward.clone()\n\n # HER selects indices [0, 2, 3, 4] to relabel, from all 5:\n # env_ids: [[0, 0], [1, 1], [0, 0], [1, 1], [0, 0]]\n # pos: [[6, 7], [1, 2], [1, 2], [3, 4], [5, 6]] + 8\n # selected: x x x x\n # future: [ 7 2 2 4 6 ] + 8\n # g [[.7,.7],[0, 0], [.2,.2],[1.4,1.4],[.6,.6]] # 0.1 * t + b with default 0\n # reward: [[-1,0], [-1,-1],[-1,0], [-1,0], [-1,0]] # recomputed with default -1\n env_ids = torch.tensor([0, 0, 1, 0])\n dist = replay_buffer.steps_to_episode_end(\n replay_buffer._pad(torch.tensor([7, 2, 4, 6]), env_ids), env_ids)\n self.assertEqual(list(dist), [1, 0, 1, 0])\n\n # Test HER relabeled experiences\n res, info = replay_buffer.get_batch(5, 2)\n res = res._replace(batch_info=info)\n res = transform.transform_experience(res)\n\n self.assertEqual(list(res.o[\"g\"].shape), [5, 2])\n\n # Test relabeling doesn't change original experience\n self.assertTrue(torch.allclose(r_orig, replay_buffer._buffer.reward))\n self.assertTrue(torch.allclose(g_orig, replay_buffer._buffer.o[\"g\"]))\n\n # test relabeled goals\n g = torch.tensor([0.7, 0., .2, 1.4, .6]).unsqueeze(1).expand(5, 2)\n self.assertTrue(torch.allclose(res.o[\"g\"], g))\n\n # test relabeled rewards\n r = torch.tensor([[-1., 0.], [-1., -1.], [-1., 0.], [-1., 0.],\n [-1., 0.]])\n self.assertTrue(torch.allclose(res.reward, r))\n\n # Gold standard functions to test HER.\n def episode_end_indices(self, b):\n \"\"\"Compute episode ending indices in RingBuffer b.\n\n Args:\n b (ReplayBuffer): HER ReplayBuffer object.\n Returns:\n epi_ends (tensor): shape ``(2, E)``, ``epi_ends[0]`` are the\n ``env_ids``, ``epi_ends[1]`` are the ending positions of the\n episode ending/LAST steps.\n We assume every possible ``env_id`` is present.\n \"\"\"\n step_types = alf.nest.get_field(b._buffer, b._step_type_field)\n epi_ends = torch.where(step_types == ds.StepType.LAST)\n epi_ends = alf.nest.map_structure(lambda d: d.type(torch.int64),\n epi_ends)\n # if an env has no LAST step, populate with pos - 1\n last_step_pos = b.circular(b._current_pos - 1)\n all_envs = torch.arange(b._num_envs)\n non_last_step_envs = torch.where(\n step_types[(all_envs, last_step_pos)] != ds.StepType.LAST)[0]\n epi_ends = (torch.cat([epi_ends[0], non_last_step_envs]),\n torch.cat([epi_ends[1],\n last_step_pos[non_last_step_envs]]))\n return epi_ends\n\n # Another gold standard function\n def steps_to_episode_end(self, b, env_ids, idx):\n \"\"\"Compute the distance to the closest episode end in future.\n\n Args:\n b (ReplayBuffer): HER ReplayBuffer object.\n env_ids (tensor): shape ``L``.\n idx (tensor): shape ``L``, indexes of the current timesteps in\n the replay buffer.\n Returns:\n tensor of shape ``L``.\n \"\"\"\n epi_ends = self.episode_end_indices(b)\n MAX_INT = 1000000000\n pos = b._pad(idx, env_ids)\n padded_ends = b._pad(epi_ends[1], epi_ends[0])\n min_dist = torch.ones_like(pos)\n # Using a loop over envs reduces memory by num_envs^3.\n # Due to the small memory footprint, speed is also much faster.\n for env_id in range(b._num_envs):\n (pos_env_index, ) = torch.where(env_ids == env_id)\n (end_env_index, ) = torch.where(epi_ends[0] == env_id)\n _pos = torch.gather(pos, dim=0, index=pos_env_index)\n _ends = torch.gather(padded_ends, dim=0, index=end_env_index)\n L = _pos.shape[0]\n E = _ends.shape[0]\n dist = _ends.unsqueeze(0).expand(L, E) - _pos.unsqueeze(1).expand(\n L, E)\n positive_dist = torch.where(\n dist < 0, torch.tensor(MAX_INT, dtype=torch.int64), dist)\n _min_dist, _ = torch.min(positive_dist, dim=1)\n min_dist.scatter_(dim=0, index=pos_env_index, src=_min_dist)\n return min_dist\n\n def generate_step_types(self, num_envs, max_steps, end_prob):\n steps = torch.tensor([ds.StepType.MID] * max_steps * num_envs)\n # start with FIRST\n env_firsts = torch.arange(num_envs)\n steps[env_firsts * max_steps] = torch.tensor([ds.StepType.FIRST])\n # randomly insert episode ends (no overlapping positions)\n segs = int(max_steps * num_envs * end_prob)\n ends = (torch.arange(segs) * (1. / end_prob)).type(torch.int64)\n ends += (torch.rand(segs) * (1. / end_prob - 1) + 1).type(torch.int64)\n steps[ends] = torch.tensor([ds.StepType.LAST]).expand(segs)\n valid_starts, = torch.where(\n ends +\n 1 != torch.arange(max_steps, num_envs * max_steps, max_steps))\n steps[(ends + 1)[valid_starts]] = torch.tensor(\n [ds.StepType.FIRST]).expand(valid_starts.shape[0])\n return steps\n\n @parameterized.parameters([\n (False, False),\n (False, True),\n (True, False),\n ])\n def test_replay_buffer(self, allow_multiprocess, with_replacement):\n replay_buffer = ReplayBuffer(\n data_spec=self.data_spec,\n num_environments=self.num_envs,\n max_length=self.max_length,\n allow_multiprocess=allow_multiprocess)\n\n batch1 = get_batch([0, 4, 7], self.dim, t=0, x=0.1)\n replay_buffer.add_batch(batch1, batch1.env_id)\n self.assertEqual(replay_buffer._current_size,\n torch.tensor([1, 0, 0, 0, 1, 0, 0, 1]))\n self.assertEqual(replay_buffer._current_pos,\n torch.tensor([1, 0, 0, 0, 1, 0, 0, 1]))\n self.assertRaises(AssertionError, replay_buffer.get_batch, 8, 1)\n\n batch2 = get_batch([1, 2, 3, 5, 6], self.dim, t=0, x=0.2)\n replay_buffer.add_batch(batch2, batch2.env_id)\n self.assertEqual(replay_buffer._current_size,\n torch.tensor([1, 1, 1, 1, 1, 1, 1, 1]))\n self.assertEqual(replay_buffer._current_pos,\n torch.tensor([1, 1, 1, 1, 1, 1, 1, 1]))\n\n batch = replay_buffer.gather_all()\n self.assertEqual(list(batch.t.shape), [8, 1])\n # test that RingBuffer detaches gradients of inputs\n self.assertFalse(batch.x.requires_grad)\n\n self.assertRaises(AssertionError, replay_buffer.get_batch, 8, 2)\n replay_buffer.get_batch(13, 1)[0]\n\n batch = replay_buffer.get_batch(8, 1)[0]\n # squeeze the time dimension\n batch = alf.nest.map_structure(lambda bat: bat.squeeze(1), batch)\n bat1 = alf.nest.map_structure(lambda bat: bat[batch1.env_id], batch)\n bat2 = alf.nest.map_structure(lambda bat: bat[batch2.env_id], batch)\n self.assertEqual(bat1.env_id, batch1.env_id)\n self.assertEqual(bat1.x, batch1.x)\n self.assertEqual(bat1.t, batch1.t)\n self.assertEqual(bat2.env_id, batch2.env_id)\n self.assertEqual(bat2.x, batch2.x)\n self.assertEqual(bat2.t, batch2.t)\n\n for t in range(1, 10):\n batch3 = get_batch([0, 4, 7], self.dim, t=t, x=0.3)\n j = t + 1\n s = min(t + 1, self.max_length)\n replay_buffer.add_batch(batch3, batch3.env_id)\n self.assertEqual(replay_buffer._current_size,\n torch.tensor([s, 1, 1, 1, s, 1, 1, s]))\n self.assertEqual(replay_buffer._current_pos,\n torch.tensor([j, 1, 1, 1, j, 1, 1, j]))\n\n batch2 = get_batch([1, 2, 3, 5, 6], self.dim, t=1, x=0.2)\n replay_buffer.add_batch(batch2, batch2.env_id)\n batch = replay_buffer.get_batch(8, 1)[0]\n # squeeze the time dimension\n batch = alf.nest.map_structure(lambda bat: bat.squeeze(1), batch)\n bat3 = alf.nest.map_structure(lambda bat: bat[batch3.env_id], batch)\n bat2 = alf.nest.map_structure(lambda bat: bat[batch2.env_id], batch)\n self.assertEqual(bat3.env_id, batch3.env_id)\n self.assertEqual(bat3.x, batch3.x)\n self.assertEqual(bat2.env_id, batch2.env_id)\n self.assertEqual(bat2.x, batch2.x)\n\n batch = replay_buffer.get_batch(8, 2)[0]\n t2 = []\n t3 = []\n for t in range(2):\n batch_t = alf.nest.map_structure(lambda b: b[:, t], batch)\n bat3 = alf.nest.map_structure(lambda bat: bat[batch3.env_id],\n batch_t)\n bat2 = alf.nest.map_structure(lambda bat: bat[batch2.env_id],\n batch_t)\n t2.append(bat2.t)\n self.assertEqual(bat3.env_id, batch3.env_id)\n self.assertEqual(bat3.x, batch3.x)\n self.assertEqual(bat2.env_id, batch2.env_id)\n self.assertEqual(bat2.x, batch2.x)\n t3.append(bat3.t)\n\n # Test time consistency\n self.assertEqual(t2[0] + 1, t2[1])\n self.assertEqual(t3[0] + 1, t3[1])\n\n batch = replay_buffer.get_batch(128, 2)[0]\n self.assertEqual(batch.t[:, 0] + 1, batch.t[:, 1])\n self.assertEqual(list(batch.t.shape), [128, 2])\n\n batch = replay_buffer.get_batch(10, 2)[0]\n self.assertEqual(batch.t[:, 0] + 1, batch.t[:, 1])\n self.assertEqual(list(batch.t.shape), [10, 2])\n\n batch = replay_buffer.get_batch(4, 2)[0]\n self.assertEqual(batch.t[:, 0] + 1, batch.t[:, 1])\n self.assertEqual(list(batch.t.shape), [4, 2])\n\n # Test gather_all()\n # Exception because the size of all the environments are not same\n self.assertRaises(AssertionError, replay_buffer.gather_all)\n\n for t in range(2, 10):\n batch4 = get_batch([1, 2, 3, 5, 6], self.dim, t=t, x=0.4)\n replay_buffer.add_batch(batch4, batch4.env_id)\n batch = replay_buffer.gather_all()\n self.assertEqual(list(batch.t.shape), [8, 4])\n\n # Test clear()\n replay_buffer.clear()\n self.assertEqual(replay_buffer.total_size, 0)\n\n def test_recent_data_and_without_replacement(self):\n num_envs = 4\n max_length = 100\n replay_buffer = ReplayBuffer(\n data_spec=self.data_spec,\n num_environments=num_envs,\n max_length=max_length,\n with_replacement=False,\n recent_data_ratio=0.5,\n recent_data_steps=4)\n replay_buffer.add_batch(get_batch([0, 1, 2, 3], self.dim, t=0, x=0.))\n batch, info = replay_buffer.get_batch(4, 1)\n self.assertEqual(info.env_ids, torch.tensor([0, 1, 2, 3]))\n\n replay_buffer.add_batch(get_batch([0, 1, 2, 3], self.dim, t=1, x=1.0))\n batch, info = replay_buffer.get_batch(8, 1)\n self.assertEqual(info.env_ids, torch.tensor([0, 1, 2, 3] * 2))\n\n for t in range(2, 32):\n replay_buffer.add_batch(\n get_batch([0, 1, 2, 3], self.dim, t=t, x=t))\n batch, info = replay_buffer.get_batch(32, 1)\n self.assertEqual(info.env_ids[16:], torch.tensor([0, 1, 2, 3] * 4))\n # The first half is from recent data\n self.assertEqual(info.env_ids[:16], torch.tensor([0, 1, 2, 3] * 4))\n self.assertEqual(\n info.positions[:16],\n torch.tensor([28] * 4 + [29] * 4 + [30] * 4 + [31] * 4))\n\n def test_num_earliest_frames_ignored_uniform(self):\n num_envs = 4\n max_length = 100\n replay_buffer = ReplayBuffer(\n data_spec=self.data_spec,\n num_environments=num_envs,\n max_length=max_length,\n keep_episodic_info=False,\n num_earliest_frames_ignored=2)\n\n replay_buffer.add_batch(get_batch([0, 1, 2, 3], self.dim, t=0, x=0.))\n # not enough data\n self.assertRaises(AssertionError, replay_buffer.get_batch, 1, 1)\n\n replay_buffer.add_batch(get_batch([0, 1, 2, 3], self.dim, t=1, x=0.))\n # not enough data\n self.assertRaises(AssertionError, replay_buffer.get_batch, 1, 1)\n\n replay_buffer.add_batch(get_batch([0, 1, 2, 3], self.dim, t=2, x=0.))\n for _ in range(10):\n batch, batch_info = replay_buffer.get_batch(1, 1)\n self.assertEqual(batch.t, torch.tensor([[2]]))\n\n def test_num_earliest_frames_ignored_priortized(self):\n replay_buffer = ReplayBuffer(\n data_spec=self.data_spec,\n num_environments=self.num_envs,\n max_length=self.max_length,\n num_earliest_frames_ignored=2,\n keep_episodic_info=False,\n prioritized_sampling=True)\n\n batch1 = get_batch([1], self.dim, x=0.25, t=0)\n replay_buffer.add_batch(batch1, batch1.env_id)\n # not enough data\n self.assertRaises(AssertionError, replay_buffer.get_batch, 1, 1)\n\n batch2 = get_batch([1], self.dim, x=0.25, t=1)\n replay_buffer.add_batch(batch2, batch1.env_id)\n # not enough data\n self.assertRaises(AssertionError, replay_buffer.get_batch, 1, 1)\n\n batch3 = get_batch([1], self.dim, x=0.25, t=2)\n replay_buffer.add_batch(batch3, batch1.env_id)\n for _ in range(10):\n batch, batch_info = replay_buffer.get_batch(1, 1)\n self.assertEqual(batch_info.env_ids,\n torch.tensor([1], dtype=torch.int64))\n self.assertEqual(batch_info.importance_weights, 1.)\n self.assertEqual(batch_info.importance_weights, torch.tensor([1.]))\n self.assertEqual(batch.t, torch.tensor([[2]]))\n\n def test_prioritized_replay(self):\n replay_buffer = ReplayBuffer(\n data_spec=self.data_spec,\n num_environments=self.num_envs,\n max_length=self.max_length,\n prioritized_sampling=True)\n self.assertRaises(AssertionError, replay_buffer.get_batch, 1, 1)\n\n batch1 = get_batch([1], self.dim, x=0.25, t=0)\n replay_buffer.add_batch(batch1, batch1.env_id)\n\n batch, batch_info = replay_buffer.get_batch(1, 1)\n self.assertEqual(batch_info.env_ids,\n torch.tensor([1], dtype=torch.int64))\n self.assertEqual(batch_info.importance_weights, 1.)\n self.assertEqual(batch_info.importance_weights, torch.tensor([1.]))\n self.assertRaises(AssertionError, replay_buffer.get_batch, 1, 2)\n\n batch2 = get_batch([1], self.dim, x=0.5, t=1)\n replay_buffer.add_batch(batch1, batch1.env_id)\n\n batch, batch_info = replay_buffer.get_batch(4, 2)\n self.assertEqual(batch_info.env_ids,\n torch.tensor([1], dtype=torch.int64))\n self.assertEqual(batch_info.importance_weights, torch.tensor([1.]))\n self.assertEqual(batch_info.importance_weights, torch.tensor([1.] * 4))\n\n batch, batch_info = replay_buffer.get_batch(1000, 1)\n n0 = (replay_buffer.circular(batch_info.positions) == 0).sum()\n n1 = (replay_buffer.circular(batch_info.positions) == 1).sum()\n self.assertEqual(n0, 500)\n self.assertEqual(n1, 500)\n replay_buffer.update_priority(\n env_ids=torch.tensor([1, 1], dtype=torch.int64),\n positions=torch.tensor([0, 1], dtype=torch.int64),\n priorities=torch.tensor([0.5, 1.5]))\n batch, batch_info = replay_buffer.get_batch(1000, 1)\n n0 = (replay_buffer.circular(batch_info.positions) == 0).sum()\n n1 = (replay_buffer.circular(batch_info.positions) == 1).sum()\n self.assertEqual(n0, 250)\n self.assertEqual(n1, 750)\n\n batch2 = get_batch([0, 2], self.dim, x=0.5, t=1)\n replay_buffer.add_batch(batch2, batch2.env_id)\n batch, batch_info = replay_buffer.get_batch(1000, 1)\n\n def _get(env_id, pos):\n flag = ((batch_info.env_ids == env_id) *\n (batch_info.positions == replay_buffer._pad(pos, env_id)))\n w = batch_info.importance_weights[torch.nonzero(\n flag, as_tuple=True)[0]]\n return flag.sum(), w\n\n n0, w0 = _get(0, 0)\n n1, w1 = _get(1, 0)\n n2, w2 = _get(1, 1)\n n3, w3 = _get(2, 0)\n self.assertEqual(n0, 300)\n self.assertEqual(n1, 100)\n self.assertEqual(n2, 300)\n self.assertEqual(n3, 300)\n self.assertTrue(torch.all(w0 == 1.2))\n self.assertTrue(torch.all(w1 == 0.4))\n self.assertTrue(torch.all(w2 == 1.2))\n self.assertTrue(torch.all(w3 == 1.2))\n\n replay_buffer.update_priority(\n env_ids=torch.tensor([1, 2], dtype=torch.int64),\n positions=torch.tensor([1, 0], dtype=torch.int64),\n priorities=torch.tensor([1.0, 1.0]))\n batch, batch_info = replay_buffer.get_batch(1000, 1)\n\n n0, w0 = _get(0, 0)\n n1, w1 = _get(1, 0)\n n2, w2 = _get(1, 1)\n n3, w3 = _get(2, 0)\n self.assertEqual(n0, 375)\n self.assertEqual(n1, 125)\n self.assertEqual(n2, 250)\n self.assertEqual(n3, 250)\n self.assertTrue(torch.all(w0 == 1.5))\n self.assertTrue(torch.all(w1 == 0.5))\n self.assertTrue(torch.all(w2 == 1.0))\n self.assertTrue(torch.all(w3 == 1.0))\n\n\nif __name__ == '__main__':\n alf.test.main()\n", "# Copyright (c) 2020 Horizon Robotics and ALF Contributors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport copy\nimport numpy as np\nimport torch\nfrom typing import Callable\n\nimport alf\nfrom alf.utils import common\nfrom alf.utils import tensor_utils\nfrom . import adam_tf, adamw\n\n\ndef _rbf_func(x):\n r\"\"\"\n Compute the rbf kernel and its gradient w.r.t. first entry\n :math:`K(x, x), \\nabla_x K(x, x)`, for computing ``svgd``_grad.\n\n Args:\n x (Tensor): set of N particles, shape (N x D), where D is the\n dimenseion of each particle\n\n Returns:\n :math:`K(x, x)` (Tensor): the RBF kernel of shape (N x N)\n :math:`\\nabla_x K(x, x)` (Tensor): the derivative of RBF kernel of shape (N x N x D)\n\n \"\"\"\n N, D = x.shape\n diff = x.unsqueeze(1) - x.unsqueeze(0) # [N, N, D]\n dist_sq = torch.sum(diff**2, -1) # [N, N]\n h, _ = torch.median(dist_sq.view(-1), dim=0)\n if h == 0.:\n h = torch.ones_like(h)\n else:\n h = h / max(np.log(N), 1.)\n\n kappa = torch.exp(-dist_sq / h) # [N, N]\n kappa_grad = -2 * kappa.unsqueeze(-1) * diff / h # [N, N, D]\n return kappa, kappa_grad\n\n\ndef _score_func(x, alpha=1e-5):\n r\"\"\"\n Compute the stein estimator of the score function\n :math:`\\nabla\\log q = -(K + \\alpha I)^{-1}\\nabla K`,\n for computing ``gfsf``_grad.\n\n Args:\n x (Tensor): set of N particles, shape (N x D), where D is the\n dimenseion of each particle\n alpha (float): weight of regularization for inverse kernel\n this parameter turns out to be crucial for convergence.\n\n Returns:\n :math:`\\nabla\\log q` (Tensor): the score function of shape (N x D)\n\n \"\"\"\n N, D = x.shape\n diff = x.unsqueeze(1) - x.unsqueeze(0) # [N, N, D]\n dist_sq = torch.sum(diff**2, -1) # [N, N]\n h, _ = torch.median(dist_sq.view(-1), dim=0)\n if h == 0.:\n h = torch.ones_like(h)\n else:\n h = h / max(np.log(N), 1.)\n\n kappa = torch.exp(-dist_sq / h) # [N, N]\n kappa_inv = torch.inverse(kappa + alpha * torch.eye(N)) # [N, N]\n kappa_grad = -2 * kappa.unsqueeze(-1) * diff / h # [N, N, D]\n kappa_grad = kappa_grad.sum(0) # [N, D]\n\n return -kappa_inv @ kappa_grad\n\n\ndef wrap_optimizer(cls):\n \"\"\"A helper function to construct torch optimizers with\n params as [{'params': []}]. After construction, new parameter\n groups can be added by using the add_param_group() method.\n\n This wrapper also clips gradients first before calling ``step()``.\n \"\"\"\n NewClsName = cls.__name__ + \"_\"\n NewCls = type(NewClsName, (cls, ), {})\n NewCls.counter = 0\n\n @common.add_method(NewCls)\n def __init__(self,\n *,\n gradient_clipping=None,\n clip_by_global_norm=False,\n parvi=None,\n repulsive_weight=1.,\n name=None,\n **kwargs):\n \"\"\"\n Args:\n gradient_clipping (float): If not None, serve as a positive threshold\n clip_by_global_norm (bool): If True, use `tensor_utils.clip_by_global_norm`\n to clip gradient. If False, use `tensor_utils.clip_by_norms` for\n each grad.\n parvi (string): if not ``None``, paramters with attribute\n ``ensemble_group`` will be updated by particle-based vi algorithm\n specified by ``parvi``, options are [``svgd``, ``gfsf``],\n\n * Stein Variational Gradient Descent (SVGD)\n\n Liu, Qiang, and Dilin Wang. \"Stein Variational Gradient Descent:\n A General Purpose Bayesian Inference Algorithm.\" NIPS. 2016.\n\n * Wasserstein Gradient Flow with Smoothed Functions (GFSF)\n\n Liu, Chang, et al. \"Understanding and accelerating particle-based\n variational inference.\" ICML. 2019.\n\n To work with the ``parvi`` option, the parameters added to the\n optimizer (by ``add_param_group``) should have an (int) attribute\n ``ensemble_group``. See ``FCBatchEnsemble`` as an example.\n\n repulsive_weight (float): the weight of the repulsive gradient term\n for parameters with attribute ``ensemble_group``.\n name (str): the name displayed when summarizing the gradient norm. If\n None, then a global name in the format of \"class_name_i\" will be\n created, where \"i\" is the global optimizer id.\n kwargs: arguments passed to the constructor of the underline torch\n optimizer. If ``lr`` is given and it is a ``Callable``, it is\n treated as a learning rate scheduler and will be called everytime\n when ``step()`` is called to get the latest learning rate.\n Available schedulers are in ``alf.utils.schedulers``.\n \"\"\"\n self._lr_scheduler = None\n if \"lr\" in kwargs:\n lr = kwargs[\"lr\"]\n if isinstance(lr, Callable):\n self._lr_scheduler = lr\n kwargs[\"lr\"] = float(lr())\n\n super(NewCls, self).__init__([{'params': []}], **kwargs)\n self._gradient_clipping = gradient_clipping\n self._clip_by_global_norm = clip_by_global_norm\n self._parvi = parvi\n if parvi is not None:\n assert parvi in ['svgd', 'gfsf'\n ], (\"parvi method %s is not supported.\" % (parvi))\n self._repulsive_weight = repulsive_weight\n self.name = name\n if name is None:\n self.name = NewClsName + str(NewCls.counter)\n NewCls.counter += 1\n\n @common.add_method(NewCls)\n def step(self, closure=None):\n \"\"\"This function first clips the gradients if needed, then call the\n parent's ``step()`` function.\n \"\"\"\n if self._lr_scheduler is not None:\n lr = float(self._lr_scheduler())\n for param_group in self.param_groups:\n param_group['lr'] = lr\n if self._gradient_clipping is not None:\n params = []\n for param_group in self.param_groups:\n params.extend(param_group[\"params\"])\n grads = alf.nest.map_structure(lambda p: p.grad, params)\n if self._clip_by_global_norm:\n _, global_norm = tensor_utils.clip_by_global_norm(\n grads, self._gradient_clipping, in_place=True)\n if alf.summary.should_record_summaries():\n alf.summary.scalar(\"global_grad_norm/%s\" % self.name,\n global_norm)\n else:\n tensor_utils.clip_by_norms(\n grads, self._gradient_clipping, in_place=True)\n\n if self._parvi is not None:\n self._parvi_step()\n\n super(NewCls, self).step(closure=closure)\n\n @common.add_method(NewCls)\n def _parvi_step(self):\n for param_group in self.param_groups:\n if \"parvi_grad\" in param_group:\n params = param_group['params']\n batch_size = params[0].shape[0]\n params_tensor = torch.cat(\n [p.view(batch_size, -1) for p in params],\n dim=-1) # [N, D], D=dim(params)\n if self._parvi == 'svgd':\n # [N, N], [N, N, D]\n kappa, kappa_grad = _rbf_func(params_tensor)\n grads_tensor = torch.cat(\n [p.grad.view(batch_size, -1) for p in params],\n dim=-1).detach() # [N, D]\n kernel_logp = torch.matmul(kappa,\n grads_tensor) / batch_size\n svgd_grad = torch.split(\n kernel_logp -\n self._repulsive_weight * kappa_grad.mean(0),\n [p.nelement() // batch_size for p in params],\n dim=-1)\n for i in range(len(params)):\n grad = params[i].grad.view(batch_size, -1)\n grad.copy_(svgd_grad[i])\n else:\n logq_grad = _score_func(params_tensor) # [N, D]\n gfsf_grad = torch.split(\n logq_grad,\n [p.nelement() // batch_size for p in params],\n dim=-1)\n for i in range(len(params)):\n grad = params[i].grad.view(batch_size, -1)\n grad.add_(self._repulsive_weight * gfsf_grad[i])\n\n @common.add_method(NewCls)\n def add_param_group(self, param_group):\n \"\"\"This function first splits the input param_group into multiple\n param_groups according to their ``ensemble_group`` attributes, then\n calls the parent's ``add_param_group()`` function to add each of\n them to the optimizer.\n \"\"\"\n assert isinstance(param_group, dict), \"param_group must be a dict\"\n\n params = param_group[\"params\"]\n if isinstance(params, torch.Tensor):\n param_group['params'] = [params]\n elif isinstance(params, set):\n raise TypeError('Please use a list instead.')\n else:\n param_group['params'] = list(params)\n\n len_params = len(param_group['params'])\n std_param_group = []\n ensemble_param_groups = [[] for i in range(len_params)]\n group_batch_sizes = [0] * len_params\n for param in param_group['params']:\n if not isinstance(param, torch.Tensor):\n raise TypeError(\"optimizer can only optimize Tensors, \"\n \"but one of the params is \" +\n torch.typename(param))\n if hasattr(param, 'ensemble_group'):\n assert isinstance(\n param.ensemble_group,\n int), (\"ensemble_group attribute mis-specified.\")\n ensemble_group_id = param.ensemble_group\n if group_batch_sizes[ensemble_group_id] == 0:\n group_batch_sizes[ensemble_group_id] = param.shape[0]\n else:\n assert param.shape[0] == group_batch_sizes[\n ensemble_group_id], (\n \"batch_size of params does not match that of the \"\n \"ensemble param_group %d.\" % (ensemble_group_id))\n ensemble_param_groups[ensemble_group_id].append(param)\n else:\n std_param_group.append(param)\n\n if len(alf.nest.flatten(ensemble_param_groups)) > 0:\n if len(std_param_group) > 0:\n super(NewCls, self).add_param_group({\n 'params': std_param_group\n })\n for ensemble_param_group in ensemble_param_groups:\n if len(ensemble_param_group) > 0:\n super(NewCls, self).add_param_group({\n 'params': ensemble_param_group,\n 'parvi_grad': True\n })\n else:\n super(NewCls, self).add_param_group(param_group)\n\n return NewCls\n\n\nAdam = alf.configurable('Adam')(wrap_optimizer(torch.optim.Adam))\n\n# TODO: uncomment this after removing `adamw.py`\n#AdamW = alf.configurable('AdamW')(wrap_optimizer(torch.optim.AdamW))\nAdamW = alf.configurable('AdamW')(wrap_optimizer(adamw.AdamW))\n\nSGD = alf.configurable('SGD')(wrap_optimizer(torch.optim.SGD))\n\nAdamTF = alf.configurable('AdamTF')(wrap_optimizer(adam_tf.AdamTF))\n", "# Copyright (c) 2021 Horizon Robotics and ALF Contributors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom functools import partial\nimport torch\n\nimport alf\nfrom alf.algorithms.agent import Agent\nfrom alf.algorithms.encoding_algorithm import EncodingAlgorithm\nfrom alf.algorithms.ppo_algorithm import PPOAlgorithm, PPOLoss\nfrom alf.algorithms.trac_algorithm import TracAlgorithm\nfrom alf.environments import suite_babyai, alf_wrappers\n\nalf.config(\n 'TrainerConfig',\n algorithm_ctor=Agent,\n unroll_length=64,\n mini_batch_size=32,\n num_iterations=50000,\n num_updates_per_train_iter=2,\n eval_interval=1000,\n debug_summaries=True,\n summarize_grads_and_vars=True,\n summary_interval=100,\n use_rollout_state=True,\n)\n\nalf.config(\n 'suite_babyai.load',\n mode='sent',\n alf_env_wrappers=[alf_wrappers.ActionObservationWrapper])\n\nalf.config(\n 'create_environment',\n env_load_fn=suite_babyai.load,\n env_name=[\n \"BabyAI-GoToObj-v0\", \"BabyAI-GoToRedBallGrey-v0\",\n \"BabyAI-GoToRedBall-v0\", \"BabyAI-GoToLocal-v0\", \"BabyAI-PickupLoc-v0\"\n ],\n num_parallel_environments=64,\n batched_wrappers=(alf_wrappers.CurriculumWrapper, ),\n)\n\nobservation_spec = alf.get_observation_spec()\naction_spec = alf.get_action_spec()\nmission_spec = observation_spec['observation']['mission']\nvocab_size = mission_spec.maximum + 1\n\nencoding_dim = 128\nactivation = torch.relu_\nfc_layers_params = (256, 256)\n\nobservation_preprocessors = {\n \"image\":\n torch.nn.Sequential(\n alf.layers.Permute(2, 0, 1), alf.layers.Cast(),\n alf.layers.Conv2D(3, encoding_dim, kernel_size=3),\n alf.layers.Conv2D(encoding_dim, encoding_dim, kernel_size=3),\n alf.layers.Reshape((encoding_dim, -1)), alf.layers.Transpose()),\n \"direction\":\n torch.nn.Sequential(\n torch.nn.Embedding(4, encoding_dim),\n alf.layers.Reshape((1, encoding_dim))),\n \"mission\":\n torch.nn.Sequential(\n torch.nn.Embedding(vocab_size, encoding_dim),\n alf.layers.Reshape((-1, encoding_dim)))\n}\ninput_preprocessors = {\n 'observation':\n observation_preprocessors,\n 'prev_action':\n torch.nn.Sequential(\n torch.nn.Embedding(7, encoding_dim),\n alf.layers.Reshape((1, encoding_dim)))\n}\n\nencoder_cls = partial(\n alf.networks.TransformerNetwork,\n input_preprocessors=input_preprocessors,\n memory_size=8,\n core_size=1,\n num_prememory_layers=0,\n num_memory_layers=4,\n num_attention_heads=3,\n d_ff=encoding_dim,\n centralized_memory=True)\n\nrepr_learner_cls = partial(EncodingAlgorithm, encoder_cls=encoder_cls)\n\nactor_network_ctor = partial(\n alf.networks.ActorDistributionNetwork,\n fc_layer_params=fc_layers_params,\n activation=activation,\n discrete_projection_net_ctor=alf.networks.CategoricalProjectionNetwork)\n\nvalue_network_ctor = partial(\n alf.networks.ValueNetwork,\n fc_layer_params=fc_layers_params,\n activation=activation)\n\nalf.config(\n 'PPOLoss',\n entropy_regularization=0.0,\n gamma=0.99,\n normalize_advantages=True,\n td_lambda=0.95,\n td_error_loss_fn=alf.utils.losses.element_wise_squared_loss,\n check_numerics=True)\n\nalf.config(\n 'PPOAlgorithm',\n actor_network_ctor=actor_network_ctor,\n value_network_ctor=value_network_ctor,\n loss_class=PPOLoss)\n\nalf.config(\n 'TracAlgorithm',\n ac_algorithm_cls=PPOAlgorithm,\n action_dist_clip_per_dim=0.01)\n\nalf.config('EntropyTargetAlgorithm', initial_alpha=0.001)\n\nalf.config(\n 'Agent',\n representation_learner_cls=repr_learner_cls,\n optimizer=alf.optimizers.AdamTF(lr=1e-4),\n rl_algorithm_cls=TracAlgorithm,\n enforce_entropy_target=True)\n", "# Copyright (c) 2021 Horizon Robotics and ALF Contributors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom functools import partial\n\nimport torch\n\nimport alf\nfrom alf.algorithms.actor_critic_algorithm import ActorCriticAlgorithm\nfrom alf.data_structures import namedtuple\n\nMyACInfo = namedtuple(\"MyACInfo\", [\"ac\", \"zeros\"])\n\n\nclass MyACAlgorithm(ActorCriticAlgorithm):\n def rollout_step(self, inputs, state):\n alg_step = super().rollout_step(inputs, state)\n action = alg_step.output\n zeros = torch.zeros_like(action)\n print(\"rollout_step: \", zeros.shape)\n alg_step = alg_step._replace(\n info=MyACInfo(ac=alg_step.info, zeros=zeros))\n return alg_step\n\n def calc_loss(self, info: MyACInfo):\n zeros = info.zeros\n print(\"calc_loss: \", zeros.shape)\n return super().calc_loss(info.ac)\n\n def after_update(self, root_inputs, info: MyACInfo):\n zeros = info.zeros\n print(\"after_update: \", zeros.shape)\n super().after_update(root_inputs, info.ac)\n\n def after_train_iter(self, root_inputs, rollout_info: MyACInfo):\n zeros = rollout_info.zeros\n print(\"after_train_iter: \", zeros.shape)\n super().after_train_iter(root_inputs, rollout_info.ac)\n\n\n# configure which RL algorithm to use\nalf.config(\n 'TrainerConfig',\n algorithm_ctor=partial(\n MyACAlgorithm, optimizer=alf.optimizers.Adam(lr=1e-3)),\n num_iterations=1)\n" ]
[ [ "torch.isfinite", "torch.max" ], [ "torch.sqrt", "torch.tensor", "numpy.prod", "torch.var", "torch.clamp" ], [ "torch.all", "torch.cat", "torch.manual_seed", "torch.min", "torch.equal", "torch.tensor", "torch.rand", "torch.where", "torch.arange", "torch.gather", "torch.allclose", "torch.nonzero", "torch.ones_like" ], [ "numpy.log", "torch.typename", "torch.sum", "torch.eye", "torch.exp", "torch.matmul", "torch.ones_like" ], [ "torch.nn.Embedding" ], [ "torch.zeros_like" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
zhaodongsun/pnp_dip
[ "f8f3802af8c607b3063fc7b92e20729f148d36c1" ]
[ "experiments/superresolution/batch_DIP_TV_subgrad.py" ]
[ "import os\n# os.environ['CUDA_VISIBLE_DEVICES'] = '1'\nos.chdir(os.path.dirname(os.path.abspath(__file__)))\n\nimport numpy as np\nimport torch\nfrom skimage.metrics import peak_signal_noise_ratio\nfrom matplotlib.pyplot import imread, imsave\nfrom skimage.transform import resize\nimport time\nimport sys\nimport glob\n\nsys.path.append('../')\n\nfrom admm_utils import *\nfrom torch import optim\nfrom models import *\ntorch.backends.cudnn.enabled = True\ntorch.backends.cudnn.benchmark = True\n\ndef run(f_name, specific_result_dir, noise_sigma, num_iter, GD_lr):\n img = imread(f_name)[:,:,:3]\n if img.dtype == 'uint8':\n img = img.astype('float32') / 255 # scale to [0, 1]\n elif img.dtype == 'float32':\n img = img.astype('float32')\n else:\n raise TypeError()\n img = np.clip(resize(img, (128, 128)), 0, 1)\n imsave(specific_result_dir + 'true.png', img)\n if len(img.shape) == 2:\n img = img[:,:,np.newaxis]\n num_channels = 1\n else:\n num_channels = 3\n\n img = img.transpose((2, 0, 1))\n x_true = torch.from_numpy(img).unsqueeze(0).type(dtype)\n\n # A = torch.zeros(num_measurements, x_true.numel()).normal_().type(dtype) / math.sqrt(num_measurements)\n A, At, _, down_img= A_superresolution(2, x_true.shape)\n b = A(x_true.reshape(-1,))\n b = torch.clamp(b + noise_sigma * torch.randn(b.shape).type(dtype), 0, 1)\n imsave(specific_result_dir+'corrupted.png', down_img(x_true).cpu().numpy()[0].transpose((1,2,0)))\n\n def fn(x): return torch.norm(A(x.reshape(-1)) - b) ** 2 / 2\n\n # G = skip(3, 3,\n # num_channels_down = [16, 32, 64, 128, 128, 128],\n # num_channels_up = [16, 32, 64, 128, 128, 128],\n # num_channels_skip = [4, 4, 4, 4, 4, 4],\n # filter_size_up = [7, 7, 5, 5, 3, 3],filter_size_down = [7, 7, 5, 5, 3, 3], filter_skip_size=1,\n # upsample_mode='bilinear', # downsample_mode='avg',\n # need1x1_up=False,\n # need_sigmoid=True, need_bias=True, pad='reflection', act_fun='LeakyReLU').type(dtype)\n # G = skip(3, 3,\n # num_channels_down=[128, 128, 128, 128, 128],\n # num_channels_up=[128, 128, 128, 128, 128],#[16, 32, 64, 128, 128],\n # num_channels_skip=[4, 4, 4, 4, 4],\n # filter_size_up=3, filter_size_down=3, filter_skip_size=1,\n # upsample_mode='bilinear', # downsample_mode='avg',\n # need1x1_up=True,\n # need_sigmoid=True, need_bias=True, pad='reflection', act_fun='LeakyReLU').type(dtype)\n G = get_net(3, 'skip', 'reflection',\n skip_n33d=128, \n skip_n33u=128, \n skip_n11=4, \n num_scales=5,\n upsample_mode='bilinear').type(dtype)\n z = torch.zeros_like(x_true).type(dtype).normal_()\n\n z.requires_grad = False\n opt = optim.Adam(G.parameters(), lr=GD_lr)\n\n record = {\"psnr_gt\": [],\n \"mse_gt\": [],\n \"total_loss\": [],\n \"prior_loss\": [],\n \"fidelity_loss\": [],\n \"cpu_time\": [],\n }\n\n results = None\n for t in range(num_iter):\n x = G(z)\n fidelity_loss = fn(x)\n\n tv_loss = (torch.sum(torch.abs(x[:, :, :, :-1] - x[:, :, :, 1:])) + torch.sum(torch.abs(x[:, :, :-1, :] - x[:, :, 1:, :])))\n total_loss = fidelity_loss + 0.01 * tv_loss\n opt.zero_grad()\n total_loss.backward()\n opt.step()\n\n\n if results is None:\n results = x.detach().cpu().numpy()\n else:\n results = results * 0.99 + x.detach().cpu().numpy() * 0.01\n\n psnr_gt = peak_signal_noise_ratio(x_true.cpu().numpy(), results)\n mse_gt = np.mean((x_true.cpu().numpy() - results) ** 2)\n\n if (t + 1) % 250 == 0:\n if num_channels == 3:\n imsave(specific_result_dir + 'iter%d_PSNR_%.2f.png'%(t, psnr_gt), results[0].transpose((1,2,0)))\n else:\n imsave(specific_result_dir + 'iter%d_PSNR_%.2f.png'%(t, psnr_gt), results[0, 0], cmap='gray')\n\n\n record[\"psnr_gt\"].append(psnr_gt)\n record[\"mse_gt\"].append(mse_gt)\n record[\"fidelity_loss\"].append(fidelity_loss.item())\n record[\"cpu_time\"].append(time.time())\n if (t + 1) % 10 == 0:\n print('Img %d Iteration %5d PSRN_gt: %.2f MSE_gt: %e' % (f_num, t + 1, psnr_gt, mse_gt))\n np.savez(specific_result_dir+'record', **record)\n\n# torch.manual_seed(500)\nif torch.cuda.is_available():\n dtype = torch.cuda.FloatTensor\nelse:\n dtype = torch.FloatTensor\n\ndataset_dir = '../../data/'\nresults_dir = '../../data/results/DIP_tv_subgrad_sr/'\nos.makedirs(results_dir)\nf_name_list = glob.glob('../../data/*.jpg')\n\nfor f_num, f_name in enumerate(f_name_list):\n\n specific_result_dir = results_dir+str(f_num)+'/'\n os.makedirs(specific_result_dir)\n run(f_name = f_name,\n specific_result_dir = specific_result_dir,\n noise_sigma = 10 / 255,\n num_iter = 5000,\n GD_lr=0.01)\n" ]
[ [ "matplotlib.pyplot.imsave", "torch.abs", "numpy.savez", "matplotlib.pyplot.imread", "torch.randn", "torch.zeros_like", "torch.from_numpy", "torch.cuda.is_available" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
aimuch/AIEnvConfig
[ "4ccd54e9c601e8c91efebcec1a50115d75d0cf96", "4ccd54e9c601e8c91efebcec1a50115d75d0cf96" ]
[ "src/tensorrt/tensorrt-4.0.1.6/examples-python/resnet_as_a_service/resnet_as_a_service.py", "src/tensorrt/tensorrt-6.0.1.5/samples/python/uff_ssd/utils/mAP.py" ]
[ "#\n# Copyright 1993-2018 NVIDIA Corporation. All rights reserved.\n#\n# NOTICE TO LICENSEE:\n#\n# This source code and/or documentation (\"Licensed Deliverables\") are\n# subject to NVIDIA intellectual property rights under U.S. and\n# international Copyright laws.\n#\n# These Licensed Deliverables contained herein is PROPRIETARY and\n# CONFIDENTIAL to NVIDIA and is being provided under the terms and\n# conditions of a form of NVIDIA software license agreement by and\n# between NVIDIA and Licensee (\"License Agreement\") or electronically\n# accepted by Licensee. Notwithstanding any terms or conditions to\n# the contrary in the License Agreement, reproduction or disclosure\n# of the Licensed Deliverables to any third party without the express\n# written consent of NVIDIA is prohibited.\n#\n# NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE\n# LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE\n# SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS\n# PROVIDED \"AS IS\" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND.\n# NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED\n# DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY,\n# NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.\n# NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE\n# LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY\n# SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY\n# DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,\n# WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS\n# ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE\n# OF THESE LICENSED DELIVERABLES.\n#\n# U.S. Government End Users. These Licensed Deliverables are a\n# \"commercial item\" as that term is defined at 48 C.F.R. 2.101 (OCT\n# 1995), consisting of \"commercial computer software\" and \"commercial\n# computer software documentation\" as such terms are used in 48\n# C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government\n# only as a commercial end item. Consistent with 48 C.F.R.12.212 and\n# 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all\n# U.S. Government End Users acquire the Licensed Deliverables with\n# only those rights set forth herein.\n#\n# Any use of the Licensed Deliverables in individual and commercial\n# software must include, in the user documentation and internal\n# comments to the code, the above Disclaimer and U.S. Government End\n# Users Notice.\n#\n\nimport json\nfrom json import encoder\nimport numpy as np\nimport argparse\n\ntry:\n from flask import Flask, request, jsonify\nexcept ImportError as err:\n raise ImportError(\"\"\"ERROR: Failed to import module ({})\nPlease make sure you have Flask installed.\nFor installation instructions, see:\nhttp://flask.pocoo.org/\"\"\".format(err))\n\ntry:\n from PIL import Image\nexcept ImportError as err:\n raise ImportError(\"\"\"ERROR: Failed to import module ({})\nPlease make sure you have Pillow installed.\nFor installation instructions, see:\nhttp://pillow.readthedocs.io/en/stable/installation.html\"\"\".format(err))\n\nfrom tensorrt.lite import Engine\nfrom tensorrt.infer import LogSeverity\n\nPARSER = argparse.ArgumentParser(description=\"Example of how to create a Caffe based TensorRT Engine and run inference\")\nPARSER.add_argument('datadir', help='Path to Python TensorRT data directory (realpath)')\n\nARGS = PARSER.parse_args()\nDATA = ARGS.datadir\nLABELS = open(DATA + '/resnet50/class_labels.txt', 'r').read().split('\\n') #Get label information\n\nALLOWED_EXTENTIONS = set(['jpg', 'jpeg'])\ndef allowed_file(filename):\n return '.' in filename and filename.rsplit('.',1)[1] in ALLOWED_EXTENTIONS\n\n#Covert image to CHW Numpy array (TensorRT expects CHW data)\ndef image_to_np_CHW(image): return np.asarray(image.resize((engine.input_dim[0].H(), engine.input_dim[0].W()),\n Image.ANTIALIAS)).transpose([2,0,1]).astype(engine.data_type.input_type())\n\n#Post Processing Callback, Should take a 5D Tensor, run post processing and return a single object\ndef analyze(output_data):\n #Results from the engine are returned as a list of 5D numpy arrays:\n # (Number of Batches x Batch Size x C x H x W)\n output = output_data.reshape(len(LABELS))\n\n # Get result\n top = np.argmax(output)\n top = LABELS[top]\n\n # Get top5\n top5 = np.argpartition(output, -5, axis=-1)[-5:]\n top5 = top5[np.argsort(output[top5])][::-1]\n top5_classes = []\n for i in top5:\n top5_classes.append((LABELS[i], output[i]))\n\n return [top, top5_classes]\n\n#Arguments to create lite engine\nnetwork = {\"framework\":\"tf\", #Source framework\n \"path\":DATA+\"/resnet50/resnet50-infer-5.pb\", #Path to frozen model\n \"input_nodes\":{\"input\":(3,224,224)}, #Dictionary of input nodes and their associated dimensions\n \"output_nodes\":[\"GPU_0/tower_0/Softmax\"], #List of output nodes\n \"logger_severity\":LogSeverity.INFO, #Debugging info\n \"postprocessors\":{\"GPU_0/tower_0/Softmax\":analyze}} #Postprocessor function table\n\nengine = Engine(**network)\n\n#Web service\napp = Flask(__name__)\[email protected](\"/classify\", methods=[\"POST\"])\ndef json_classify():\n if request.method == 'POST':\n img = Image.open(request.files['file'])\n #Format image to Numpy CHW and run inference, get the results of the single output node\n results = engine.infer(image_to_np_CHW(img))[0]\n #Retrive the results created by the post processor callback\n top_class_label, top5 = results[0], results[1]\n\n #Format data for JSON\n top5_str = []\n for t in top5:\n top5_str.append((t[0], str(t[1])))\n classification_data = {\"top_class\": top_class_label, \"top5\": top5_str}\n\n return jsonify (\n data = classification_data\n )\n\n else:\n return jsonify (\n error = \"Invalid Request Type\"\n )\n\[email protected](\"/\", methods=['GET', 'POST'])\ndef html_classify():\n if request.method == 'POST':\n file = request.files['file']\n if file and allowed_file(file.filename):\n img = Image.open(request.files['file'])\n #Format image to Numpy CHW and run inference, get the results of the single output node\n results = engine.infer(image_to_np_CHW(img))[0]\n #Retrive the results created by the post processor callback\n top_class_label, top5 = results[0], results[1]\n\n #Format data for JSON\n top5_str = \"\"\n for t in top5:\n top5_str += (\"<li>\" + t[0] + \": \" + str(t[1]) + \"</li>\")\n\n return (\"<!doctype html>\"\n \"<title> Resnet as a Service </title>\"\n \"<h1> Classifed </h1>\"\n \"<p> Looks like a \" + top_class_label + \"</p>\"\n \"<h2> Top 5 </h2>\"\n \"<ul>\"\n \"\" + top5_str + \"\"\n \"</ul>\")\n else:\n return '''Invalid Upload'''\n\n return '''\n <!doctype html>\n <title>Resnet as a Service</title>\n <h1>Upload new File</h1>\n <form method=post enctype=multipart/form-data>\n <p><input type=file name=file>\n <input type=submit value=Upload>\n </form>\n '''\n\nif __name__ == \"__main__\":\n app.run()\n", "#\n# Copyright 1993-2019 NVIDIA Corporation. All rights reserved.\n#\n# NOTICE TO LICENSEE:\n#\n# This source code and/or documentation (\"Licensed Deliverables\") are\n# subject to NVIDIA intellectual property rights under U.S. and\n# international Copyright laws.\n#\n# These Licensed Deliverables contained herein is PROPRIETARY and\n# CONFIDENTIAL to NVIDIA and is being provided under the terms and\n# conditions of a form of NVIDIA software license agreement by and\n# between NVIDIA and Licensee (\"License Agreement\") or electronically\n# accepted by Licensee. Notwithstanding any terms or conditions to\n# the contrary in the License Agreement, reproduction or disclosure\n# of the Licensed Deliverables to any third party without the express\n# written consent of NVIDIA is prohibited.\n#\n# NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE\n# LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE\n# SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS\n# PROVIDED \"AS IS\" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND.\n# NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED\n# DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY,\n# NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.\n# NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE\n# LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY\n# SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY\n# DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,\n# WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS\n# ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE\n# OF THESE LICENSED DELIVERABLES.\n#\n# U.S. Government End Users. These Licensed Deliverables are a\n# \"commercial item\" as that term is defined at 48 C.F.R. 2.101 (OCT\n# 1995), consisting of \"commercial computer software\" and \"commercial\n# computer software documentation\" as such terms are used in 48\n# C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government\n# only as a commercial end item. Consistent with 48 C.F.R.12.212 and\n# 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all\n# U.S. Government End Users acquire the Licensed Deliverables with\n# only those rights set forth herein.\n#\n# Any use of the Licensed Deliverables in individual and commercial\n# software must include, in the user documentation and internal\n# comments to the code, the above Disclaimer and U.S. Government End\n# Users Notice.\n#\n\n# VOC mAP computation, based on https://github.com/amdegroot/ssd.pytorch\nimport os\nimport sys\nimport pickle\nimport numpy as np\n\nif sys.version_info[0] == 2:\n import xml.etree.cElementTree as ET\nelse:\n import xml.etree.ElementTree as ET\n\nimport utils.voc as voc_utils\nfrom utils.paths import PATHS\n\n\ndef parse_voc_annotation_xml(voc_annotiotion_xml):\n \"\"\"Parse VOC annotation XML file.\n\n VOC image annotations are described in XML files\n shipped with VOC dataset, with one XML file per each image.\n This function reads relevant object detection data from given\n file and saves it to Python data structures.\n\n Args:\n voc_annotation_xml (str): VOC annotation XML file path\n\n Returns:\n Python list of object detections metadata.\n \"\"\"\n tree = ET.parse(voc_annotiotion_xml)\n size = tree.find('size')\n objects = []\n for obj in tree.findall('object'):\n obj_struct = {}\n obj_struct['image_width'] = size.find('width').text\n obj_struct['image_height'] = size.find('height').text\n obj_struct['name'] = obj.find('name').text\n obj_struct['pose'] = obj.find('pose').text\n obj_struct['truncated'] = int(obj.find('truncated').text)\n obj_struct['difficult'] = int(obj.find('difficult').text)\n bbox = obj.find('bndbox')\n # Coordinates in VOC XMLs are in [1, 256] format, but we use [0, 255]\n obj_struct['bbox'] = [int(bbox.find('xmin').text) - 1,\n int(bbox.find('ymin').text) - 1,\n int(bbox.find('xmax').text) - 1,\n int(bbox.find('ymax').text) - 1]\n objects.append(obj_struct)\n return objects\n\ndef get_voc_results_file_template(cls, results_dir):\n \"\"\"Fetches inference detection result file path for given class.\n\n During TensorRT/Tensorflow inference, we save class detections into\n separate files, for later mAP computation. This function fetches\n paths of these files.\n\n Args:\n cls (str): VOC class label\n results_dir (str): path of directory containing detection results\n\n Returns:\n str: Detection results path for given class.\n \"\"\"\n # VOCdevkit/VOC2007/results/det_test_aeroplane.txt\n filename = 'det_test_{}.txt'.format(cls)\n if not os.path.exists(results_dir):\n os.makedirs(results_dir)\n path = os.path.join(results_dir, filename)\n return path\n\ndef do_python_eval(results_dir):\n cachedir = PATHS.get_voc_annotation_cache_path()\n aps = []\n for i, cls in enumerate(voc_utils.VOC_CLASSES_LIST):\n filename = get_voc_results_file_template(cls, results_dir)\n rec, prec, ap = voc_eval(\n filename,\n PATHS.get_voc_image_set_path(),\n cls, cachedir,\n ovthresh=0.5)\n aps += [ap]\n print('AP for {} = {:.4f}'.format(cls, ap))\n print('Mean AP = {:.4f}'.format(np.mean(aps)))\n\ndef voc_ap(rec, prec):\n ap = 0.\n for t in np.arange(0., 1.1, 0.1):\n if np.sum(rec >= t) == 0:\n p = 0\n else:\n p = np.max(prec[rec >= t])\n ap = ap + p / 11.\n return ap\n\ndef read_voc_annotations(annotations_dir, image_numbers):\n if not os.path.isdir(annotations_dir):\n os.makedirs(annotations_dir)\n annotations_file = os.path.join(annotations_dir, 'annots.pkl')\n if not os.path.isfile(annotations_file):\n # If annotations were not present, compute them\n detections = {}\n for i, image_num in enumerate(image_numbers):\n detections[image_num] = parse_voc_annotation_xml(\n PATHS.get_voc_annotation_path().format(image_num))\n if i % 100 == 0:\n print('Reading annotation for {:d}/{:d}'.format(\n i + 1, len(image_numbers)))\n # Save\n print('Saving cached annotations to {:s}'.format(annotations_file))\n with open(annotations_file, 'wb') as f:\n pickle.dump(detections, f)\n else:\n # If annotations were present, load them\n with open(annotations_file, 'rb') as f:\n detections = pickle.load(f)\n return detections\n\ndef extract_class_detetions(voc_detections, classname, image_numbers):\n class_detections = {}\n for image_num in image_numbers:\n R = [obj for obj in voc_detections[image_num] if obj['name'] == classname]\n image_bboxes = [x['bbox'] for x in R]\n\n # Transform VOC bboxes to make them describe pre-resized 300x300 images\n for idx, bbox in enumerate(image_bboxes):\n bbox = np.array(bbox).astype(np.float32)\n width = float(R[0]['image_width'])\n height = float(R[0]['image_height'])\n bbox[0] *= (300.0 / width)\n bbox[2] *= (300.0 / width)\n bbox[1] *= (300.0 / height)\n bbox[3] *= (300.0 / height)\n image_bboxes[idx] = bbox\n image_bboxes = np.array(image_bboxes)\n difficult = np.array([x['difficult'] for x in R]).astype(np.bool)\n det = [False] * len(R)\n class_detections[image_num] = {\n 'bbox': image_bboxes,\n 'difficult': difficult,\n 'det': det\n }\n\n return class_detections\n\ndef voc_eval(detpath,\n imagesetfile,\n classname,\n cachedir,\n ovthresh=0.5):\n with open(imagesetfile, 'r') as f:\n lines = f.readlines()\n image_numbers = [x.strip() for x in lines]\n\n voc_detections = read_voc_annotations(cachedir, image_numbers)\n class_detections = extract_class_detetions(voc_detections, classname,\n image_numbers)\n\n is_detection_difficult = np.concatenate(\n [class_detections[image_num]['difficult'] for image_num in image_numbers]\n )\n not_difficult_count = sum(~is_detection_difficult)\n\n # Read detections outputed by model\n detfile = detpath.format(classname)\n with open(detfile, 'r') as f:\n lines = f.readlines()\n\n if any(lines):\n splitlines = [x.strip().split(' ') for x in lines]\n image_ids = [x[0] for x in splitlines]\n confidence = np.array([float(x[1]) for x in splitlines])\n bboxes = np.array([[float(z) for z in x[2:]] for x in splitlines])\n\n # sort by confidence\n sorted_ind = np.argsort(-confidence)\n sorted_scores = np.sort(-confidence)\n bboxes = bboxes[sorted_ind, :]\n image_ids = [image_ids[x] for x in sorted_ind]\n\n # Go down dets and mark TPs and FPs\n num_detections = len(image_ids)\n tp = np.zeros(num_detections)\n fp = np.zeros(num_detections)\n for detection in range(num_detections):\n R = class_detections[image_ids[detection]]\n bbox = bboxes[detection, :].astype(float)\n ovmax = -np.inf\n bbox_gt = R['bbox'].astype(float)\n if bbox_gt.size > 0:\n # compute overlaps\n # intersection\n ixmin = np.maximum(bbox_gt[:, 0], bbox[0])\n iymin = np.maximum(bbox_gt[:, 1], bbox[1])\n ixmax = np.minimum(bbox_gt[:, 2], bbox[2])\n iymax = np.minimum(bbox_gt[:, 3], bbox[3])\n iw = np.maximum(ixmax - ixmin, 0.)\n ih = np.maximum(iymax - iymin, 0.)\n inters = iw * ih\n uni = ((bbox[2] - bbox[0]) * (bbox[3] - bbox[1]) +\n (bbox_gt[:, 2] - bbox_gt[:, 0]) *\n (bbox_gt[:, 3] - bbox_gt[:, 1]) - inters)\n overlaps = inters / uni\n ovmax = np.max(overlaps)\n jmax = np.argmax(overlaps)\n\n if ovmax > ovthresh:\n if not R['difficult'][jmax]:\n if not R['det'][jmax]:\n tp[detection] = 1.\n R['det'][jmax] = 1\n else:\n fp[detection] = 1.\n else:\n fp[detection] = 1.\n\n # Compute precision and recall\n fp = np.cumsum(fp)\n tp = np.cumsum(tp)\n rec = tp / float(not_difficult_count)\n # avoid divide by zero in case the first detection matches a difficult\n # ground truth\n prec = tp / np.maximum(tp + fp, np.finfo(np.float64).eps)\n ap = voc_ap(rec, prec)\n else:\n rec = -1.\n prec = -1.\n ap = -1.\n\n return rec, prec, ap\n" ]
[ [ "numpy.argsort", "numpy.argmax", "numpy.argpartition" ], [ "numpy.maximum", "numpy.minimum", "numpy.arange", "numpy.cumsum", "numpy.sort", "numpy.finfo", "numpy.concatenate", "numpy.max", "numpy.argmax", "numpy.mean", "numpy.argsort", "numpy.array", "numpy.zeros", "numpy.sum" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
yecharlie/convnet3d
[ "0b2771eec149b196ef59b58d09eef71c9b201d40", "0b2771eec149b196ef59b58d09eef71c9b201d40" ]
[ "tests/utils/test_nms.py", "convnet3d/utils/transform.py" ]
[ "import numpy as np\nfrom numpy.testing import assert_array_equal\n\nfrom convnet3d.utils.nms import nmsOverlaps\nfrom convnet3d.utils.annotations import computeOverlaps\n\n\ndef test_nms_overlaps():\n boxes = np.array([\n [0, 2, 0, 2, 0, 2], # suppressed\n [0, 2, 0, 2, 0, 2],\n [2, 5, 2, 5, 4, 6],\n [1, 4, 1, 4, 4, 6]\n ])\n scores = np.array([0.5, 0.7, 0.9, 0.8])\n\n overlaps = computeOverlaps(boxes, boxes)\n actual = nmsOverlaps(overlaps, scores, threshold=0.5)\n expected = [2, 3, 1]\n assert_array_equal(actual, expected)\n", "import numpy as np\n\nDEFAULT_PRNG = np.random\n\n# def transformBbox(matrix, translate, center, box):\n#\n# '''\n# Note that in SimpleITK transform parameters are applied from output sapce to input space.\n# xi = A(Xo - C) + T + C\n# where A:linear transform matrix, C:center, T:translate, which implies:\n# xo = A^-1(xi - C - T) + C\n# '''\n# x1,x2,y1,y2,z1,z2 = box\n# points = np.array([\n# [x1, x2, x1, x2, x1, x2, x1, x2],\n# [y1, y1, y2, y2, y1, y1, y2, y2],\n# [z1, z1, z1, z1, z2, z2, z2, z2]\n# ])\n# matrix = np.array(matrix.copy())\n# translate = np.array(translate)\n# center = np.array(center)\n#\n# points -= np.expand_dims(translate + center, axis=-1)\n# inv_ma = np.linalg.inv(matrix)\n# points = inv_ma.dot(points)\n# points += np.expand_dims(center, axis=-1)\n#\n# min_corner = points.min(axis=1)\n# max_corner = points.max(axis=1)\n# print('min_corner shape',min_corner.shape)\n#\n# transformed = np.zeros(6)\n# transformed[::2] = min_corner[:]\n# transformed[1::2] = max_corner[:]\n# return transformed\n\n\ndef transformBbox(box, transform):\n '''\n Note that in SimpleITK transform parameters are applied from output sapce to input space.\n xi = A(Xo - C) + T + C\n where A:linear transform matrix, C:center, T:translate, which implies:\n xo = A^-1(xi - C - T) + C\n '''\n x1, x2, y1, y2, z1, z2 = box\n points = np.array([\n [x1, x2, x1, x2, x1, x2, x1, x2],\n [y1, y1, y2, y2, y1, y1, y2, y2],\n [z1, z1, z1, z1, z2, z2, z2, z2]\n ], dtype=np.float64) # double for TransformPoint\n inverse = transform.GetInverse()\n for i in range(points.shape[1]):\n points[:, i] = np.array(inverse.TransformPoint(points[:, i]))\n\n# matrix = np.array(matrix.copy())\n# translate = np.array(translate)\n# center = np.array(center)\n#\n# points -= np.expand_dims(translate + center, axis=-1)\n# inv_ma = np.linalg.inv(matrix)\n# points = inv_ma.dot(points)\n# points += np.expand_dims(center, axis=-1)\n\n min_corner = points.min(axis=1)\n max_corner = points.max(axis=1)\n# print('min_corner shape',min_corner.shape)\n\n transformed = np.zeros(6)\n transformed[::2] = min_corner[:]\n transformed[1::2] = max_corner[:]\n return transformed\n\n\ndef _randomVector(min, max, prng=DEFAULT_PRNG):\n min = np.array(min)\n max = np.array(max)\n assert min.shape == max.shape\n assert len(min.shape) == 1\n return prng.uniform(min, max)\n\n\ndef randomTranslation(min, max, prng=DEFAULT_PRNG):\n return _randomVector(min, max, prng)\n\n\ndef scaling(factor):\n return np.array([\n [factor[0], 0, 0],\n [0, factor[1], 0],\n [0, 0, factor[2]]\n ])\n\n\ndef randomScaling(min, max, prng=DEFAULT_PRNG):\n return scaling(_randomVector(min, max, prng))\n\n\ndef horizontalRotation(angle):\n return np.array([\n [np.cos(angle), -np.sin(angle), 0],\n [np.sin(angle), np.cos(angle), 0],\n [0, 0, 1]\n ])\n\n\ndef randomHorRotation(min, max, prng=DEFAULT_PRNG):\n return horizontalRotation(prng.uniform(min, max))\n\n\ndef randomFlip(flip_x_chance, flip_y_chance, prng=DEFAULT_PRNG):\n flip_x = prng.uniform(0, 1) < flip_x_chance\n flip_y = prng.uniform(0, 1) < flip_y_chance\n '''\n 1 - 2 * flip_x == -1 if flip_x else 1\n 1 - 2 * flip_y ...\n 1 - 2 * (flip_x ^ flip_y)) ...\n '''\n return scaling((1 - 2 * flip_x, 1 - 2 * flip_y, -2 * (flip_x ^ flip_y) + 1))\n\n\ndef randomTransform(\n min_scaling=(1, 1, 1),\n max_scaling=(1, 1, 1),\n min_horizontal_rotation=0,\n max_horizontal_rotation=0,\n flip_x_chance=0,\n flip_y_chance=0,\n min_translation=(0, 0, 0),\n max_translation=(0, 0, 0),\n prng=DEFAULT_PRNG\n):\n linear = np.linalg.multi_dot([\n randomScaling(min_scaling, max_scaling, prng),\n randomHorRotation(min_horizontal_rotation, max_horizontal_rotation, prng),\n randomFlip(flip_x_chance, flip_y_chance, prng)\n ])\n translation = randomTranslation(min_translation, max_translation)\n return linear, translation\n\n\ndef randomTransformGenerator(\n prng=None,\n min_scaling=(1, 1, 1),\n max_scaling=(1, 1, 1),\n min_horizontal_rotation=0,\n max_horizontal_rotation=0,\n flip_x_chance=0,\n flip_y_chance=0,\n min_translation=(0, 0, 0),\n max_translation=(0, 0, 0),\n):\n if prng is None:\n prng = np.random.RandomState()\n\n # adjust params for transform\n # Internally, parameters are set for mapping from output space to input sapce.\n min_scaling = np.array(min_scaling)\n max_scaling = np.array(max_scaling)\n min_scaling_inv = np.min([1 / min_scaling, 1 / max_scaling], axis=0)\n max_scaling_inv = np.max([1 / min_scaling, 1 / max_scaling], axis=0)\n\n min_rotation_inv = np.min([-min_horizontal_rotation, -max_horizontal_rotation], axis=0)\n max_rotation_inv = np.max([-min_horizontal_rotation, -max_horizontal_rotation], axis=0)\n\n # Note that the flip parameters are equivalent (A = ~A)\n\n min_translation = np.array(min_translation)\n max_translation = np.array(max_translation)\n min_translation_inv = np.min([-min_translation, -max_translation], axis=0)\n max_translation_inv = np.max([-min_translation, -max_translation], axis=0)\n\n while True:\n yield randomTransform(\n min_scaling = min_scaling_inv,\n max_scaling = max_scaling_inv,\n min_horizontal_rotation = min_rotation_inv,\n max_horizontal_rotation = max_rotation_inv,\n flip_x_chance = flip_x_chance,\n flip_y_chance = flip_y_chance,\n min_translation = min_translation_inv,\n max_translation = max_translation_inv,\n prng = prng\n )\n" ]
[ [ "numpy.testing.assert_array_equal", "numpy.array" ], [ "numpy.min", "numpy.cos", "numpy.sin", "numpy.max", "numpy.array", "numpy.zeros", "numpy.random.RandomState" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
kcetskcaz/stylize-datasets
[ "715dc571fb01d9cec4e2a68b7f7f38f2b9a945a6" ]
[ "stylize.py" ]
[ "#!/usr/bin/env python\nimport argparse\nfrom function import adaptive_instance_normalization\nimport net\nfrom pathlib import Path\nfrom PIL import Image\nimport random\nimport torch\nimport torch.nn as nn\nimport torchvision.transforms\nfrom torchvision.utils import save_image\nfrom tqdm import tqdm\nimport numpy as np\nfrom skimage.metrics import structural_similarity as ssim\nimport matplotlib.pyplot as plt\nimport os\nfrom PIL import ImageFile\nImageFile.LOAD_TRUNCATED_IMAGES = True\n\nparser = argparse.ArgumentParser(description='This script applies the AdaIN style transfer method to arbitrary datasets.')\nparser.add_argument('--content-dir', type=str,\n help='Directory path to a batch of content images')\nparser.add_argument('--style-dir', type=str,\n help='Directory path to a batch of style images')\nparser.add_argument('--output-dir', type=str, default='output',\n help='Directory to save the output images')\nparser.add_argument('--num-styles', type=int, default=1, help='Number of styles to create for each image (default: 1)')\nparser.add_argument('--alpha', type=float, default=1.0,\n help='The weight that controls the degree of stylization. Should be between 0 and 1')\nparser.add_argument('--extensions', nargs='+', type=str, default=['png', 'jpeg', 'jpg'], help='List of image extensions to scan style and content directory for (case sensitive), default: png, jpeg, jpg')\n\n# Advanced options\nparser.add_argument('--content-size', type=int, default=0,\n help='New (minimum) size for the content image, keeping the original size if set to 0')\nparser.add_argument('--style-size', type=int, default=512,\n help='New (minimum) size for the style image, keeping the original size if set to 0')\nparser.add_argument('--crop', type=int, default=0,\n help='If set to anything else than 0, center crop of this size will be applied to the content image after resizing in order to create a squared image (default: 0)')\nparser.add_argument('--ssim-threshold', type=float, default=0.4, help=\"SSIM threshold- images below this threshold are regenerated (Default: 0.4)\")\nparser.add_argument('--n_retries', type=int, default=20, help=\"Number of times to re-attempt stylization before taking the best image from the past N stylizations (Default: 10)\")\n# random.seed(131213)\n\ndef input_transform(size, crop):\n transform_list = []\n if size != 0:\n transform_list.append(torchvision.transforms.Resize(size))\n if crop != 0:\n transform_list.append(torchvision.transforms.CenterCrop(crop))\n transform_list.append(torchvision.transforms.ToTensor())\n transform = torchvision.transforms.Compose(transform_list)\n return transform\n\ndef style_transfer(vgg, decoder, content, style, alpha=1.0):\n assert (0.0 <= alpha <= 1.0)\n content_f = vgg(content)\n style_f = vgg(style)\n feat = adaptive_instance_normalization(content_f, style_f)\n feat = feat * alpha + content_f * (1 - alpha)\n return decoder(feat)\n\ndef main():\n args = parser.parse_args()\n\n print(f'=> Using SSIM Threshold {args.ssim_threshold}')\n # set content and style directories\n content_dir = Path(args.content_dir)\n style_dir = Path(args.style_dir)\n style_dir = style_dir.resolve()\n output_dir = Path(args.output_dir)\n output_dir = output_dir.resolve()\n assert style_dir.is_dir(), 'Style directory not found'\n\n # collect content files\n extensions = args.extensions\n assert len(extensions) > 0, 'No file extensions specified'\n content_dir = Path(content_dir)\n content_dir = content_dir.resolve()\n assert content_dir.is_dir(), 'Content directory not found'\n dataset = []\n for ext in extensions:\n dataset += list(content_dir.rglob('*.' + ext))\n\n assert len(dataset) > 0, 'No images with specified extensions found in content directory' + content_dir\n content_paths = sorted(dataset)\n print('Found %d content images in %s' % (len(content_paths), content_dir))\n\n # collect style files\n styles = []\n for ext in extensions:\n styles += list(style_dir.rglob('*.' + ext))\n\n assert len(styles) > 0, 'No images with specified extensions found in style directory' + style_dir\n styles = sorted(styles)\n print('Found %d style images in %s' % (len(styles), style_dir))\n\n decoder = net.decoder\n vgg = net.vgg\n\n device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n\n decoder.eval()\n vgg.eval()\n\n decoder.load_state_dict(torch.load('models/decoder.pth'))\n vgg.load_state_dict(torch.load('models/vgg_normalised.pth'))\n vgg = nn.Sequential(*list(vgg.children())[:31])\n\n vgg.to(device)\n decoder.to(device)\n\n content_tf = input_transform(args.content_size, args.crop)\n style_tf = input_transform(args.style_size, 0)\n\n\n # disable decompression bomb errors\n Image.MAX_IMAGE_PIXELS = None\n skipped_imgs = []\n num_written = 0\n # actual style transfer as in AdaIN\n with tqdm(total=len(content_paths)) as pbar:\n for content_path in content_paths:\n try:\n content_img = Image.open(content_path).convert('RGB')\n for style_path in random.sample(styles, args.num_styles):\n content = content_tf(content_img)\n\n style_img = Image.open(style_path).convert('RGB')\n style = style_tf(style_img)\n style = style.to(device).unsqueeze(0)\n content = content.to(device).unsqueeze(0)\n\n # Loop until stylized image is above ssim_thresh or N times, whichever comes first\n n_retries = 0\n curr_ssim = 0\n outputs = []\n output_ssims = []\n while curr_ssim < args.ssim_threshold and n_retries < args.n_retries:\n if n_retries > 0:\n style_path = random.sample(styles, 1)[0]\n style_img = Image.open(style_path).convert('RGB')\n style = style_tf(style_img)\n style = style.to(device).unsqueeze(0)\n\n with torch.no_grad():\n output = style_transfer(vgg, decoder, content, style,\n args.alpha)\n output = output.cpu()\n # Get the source image as numpy\n np_content = np.array(content_img)\n # Take the output image and resize so the dimensions match the content\n np_output = output[0, :]\n # Make sure the shape is (W, H, C)\n np_output = np_output.transpose(0, 2).transpose(0, 1).numpy()\n np_output = Image.fromarray(np.uint8(np_output * 255))\n np_output = np.array(np_output.resize(np_content.shape[:2][::-1]))\n\n # Compute the ssim between the content and the output\n curr_ssim = ssim(np_content, np_output, data_range=np_output.max() - np_output.min(),\n multichannel=True)\n # Store the output and the current ssim\n outputs.append(output)\n output_ssims.append(curr_ssim)\n n_retries += 1\n\n # if (len(output_ssims) < args.n_retries and len(output_ssims) > 5) and num_written < 5:\n # worst = np.array(output_ssims).argmin()\n # base_name = os.path.basename(str(content_path)).split('.')[0]\n # save_image(outputs[worst], f'/media/zsteck/storage/lowshot/experiments/synthetic/{base_name}_rejected.png',\n # padding=0)\n # save_image(output, f'/media/zsteck/storage/lowshot/experiments/synthetic/{base_name}_accepted.png', padding=0)\n # save_image(content.cpu(), f'/media/zsteck/storage/lowshot/experiments/synthetic/{base_name}.png',\n # padding=0)\n # num_written += 1\n\n # If the last ssim val is less than the threshold, select the output image (this needs to be assigned to the output variable\n if curr_ssim < args.ssim_threshold:\n output_ssims = np.array(output_ssims)\n best_idx = output_ssims.argmax()\n output = outputs[best_idx]\n print(\n f'=> No image passed threshold after {n_retries}. Taking best image with {output_ssims[best_idx]} SSIM value')\n\n rel_path = content_path.relative_to(content_dir)\n out_dir = output_dir.joinpath(rel_path.parent)\n\n # create directory structure if it does not exist\n if not out_dir.is_dir():\n out_dir.mkdir(parents=True)\n\n content_name = content_path.stem\n style_name = style_path.stem\n out_filename = content_name + '-stylized-' + style_name + content_path.suffix\n output_name = out_dir.joinpath(out_filename)\n\n save_image(output, output_name, padding=0) # default image padding is 2.\n style_img.close()\n content_img.close()\n except Exception as e:\n print(e)\n print('Skipping stylization of %s due to an error' %(content_path))\n skipped_imgs.append(content_path)\n continue\n finally:\n pbar.update(1)\n \n if(len(skipped_imgs) > 0):\n with open(output_dir.joinpath('skipped_imgs.txt'), 'w') as f:\n for item in skipped_imgs:\n f.write(\"%s\\n\" % item)\n\nif __name__ == '__main__':\n main()\n" ]
[ [ "torch.load", "numpy.uint8", "torch.no_grad", "torch.cuda.is_available", "numpy.array" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
JinHai-CN/cudf
[ "fd17f2d4cabe86e11e7f172b5b5903bdd5604d81", "fd17f2d4cabe86e11e7f172b5b5903bdd5604d81" ]
[ "python/cudf/_gdf.py", "python/cudf/tests/test_replace.py" ]
[ "# Copyright (c) 2018, NVIDIA CORPORATION.\n\n\"\"\"\nThis file provide binding to the libgdf library.\n\"\"\"\nimport contextlib\nimport itertools\n\nimport numpy as np\nimport pandas as pd\nimport pyarrow as pa\n\nfrom libgdf_cffi import ffi, libgdf\nfrom librmm_cffi import librmm as rmm\nimport nvcategory\n\nfrom cudf.utils import cudautils\nfrom cudf.utils.utils import calc_chunk_size, mask_dtype, mask_bitsize\n\n\ndef unwrap_devary(devary):\n ptrval = devary.device_ctypes_pointer.value\n ptrval = ptrval or ffi.NULL # replace None with NULL\n return ffi.cast('void*', ptrval)\n\n\ndef unwrap_mask(devary):\n ptrval = devary.device_ctypes_pointer.value\n ptrval = ptrval or ffi.NULL # replace None with NULL\n return ffi.cast('gdf_valid_type*', ptrval), ptrval\n\n\ndef columnview_from_devary(devary, dtype=None):\n return _columnview(size=devary.size, data=unwrap_devary(devary),\n mask=ffi.NULL, dtype=dtype or devary.dtype,\n null_count=0, nvcat=None)\n\n\ndef _columnview(size, data, mask, dtype, null_count, nvcat):\n colview = ffi.new('gdf_column*')\n extra_dtype_info = ffi.new('gdf_dtype_extra_info*')\n extra_dtype_info.time_unit = libgdf.TIME_UNIT_NONE\n if nvcat is not None:\n extra_dtype_info.category = ffi.cast('void*', nvcat.get_cpointer())\n else:\n extra_dtype_info.category = ffi.NULL\n\n if mask is None:\n null_count = 0\n mask = ffi.NULL\n\n libgdf.gdf_column_view_augmented(\n colview,\n data,\n mask,\n size,\n np_to_gdf_dtype(dtype),\n null_count,\n extra_dtype_info[0],\n )\n\n return colview\n\n\ndef columnview(size, data, mask=None, dtype=None, null_count=None,\n nvcat=None):\n \"\"\"\n Make a column view.\n\n Parameters\n ----------\n size : int\n Data count.\n data : Buffer\n The data buffer.\n mask : Buffer; optional\n The mask buffer.\n dtype : numpy.dtype; optional\n The dtype of the data. Defaults to *data.dtype*.\n \"\"\"\n def unwrap(buffer):\n if buffer is None:\n return ffi.NULL\n assert buffer.mem.is_c_contiguous(), \"libGDF expects contiguous memory\"\n devary = buffer.to_gpu_array()\n return unwrap_devary(devary)\n\n if mask is not None:\n assert null_count is not None\n\n dtype = dtype or data.dtype\n if pd.api.types.is_categorical_dtype(dtype):\n dtype = data.dtype\n\n return _columnview(size=size, data=unwrap(data), mask=unwrap(mask),\n dtype=dtype, null_count=null_count, nvcat=nvcat)\n\n\ndef apply_binaryop(binop, lhs, rhs, out):\n \"\"\"Apply binary operator *binop* to operands *lhs* and *rhs*.\n The result is stored to *out*.\n\n Returns the number of null values.\n \"\"\"\n args = (lhs.cffi_view, rhs.cffi_view, out.cffi_view)\n # apply binary operator\n binop(*args)\n # validity mask\n if out.has_null_mask:\n return apply_mask_and(lhs, rhs, out)\n else:\n return 0\n\n\ndef apply_unaryop(unaop, inp, out):\n \"\"\"Apply unary operator *unaop* to *inp* and store to *out*.\n\n \"\"\"\n args = (inp.cffi_view, out.cffi_view)\n # apply unary operator\n unaop(*args)\n\n\ndef apply_mask_and(col, mask, out):\n args = (col.cffi_view, mask.cffi_view, out.cffi_view)\n libgdf.gdf_validity_and(*args)\n nnz = count_nonzero_mask(out.mask.mem, size=len(out))\n return len(out) - nnz\n\n\nnp_gdf_dict = {\n np.float64: libgdf.GDF_FLOAT64,\n np.float32: libgdf.GDF_FLOAT32,\n np.int64: libgdf.GDF_INT64,\n np.int32: libgdf.GDF_INT32,\n np.int16: libgdf.GDF_INT16,\n np.int8: libgdf.GDF_INT8,\n np.bool_: libgdf.GDF_INT8,\n np.datetime64: libgdf.GDF_DATE64,\n np.object_: libgdf.GDF_STRING_CATEGORY,\n np.str_: libgdf.GDF_STRING_CATEGORY,\n }\n\n\ndef np_to_gdf_dtype(dtype):\n \"\"\"Util to convert numpy dtype to gdf dtype.\n \"\"\"\n return np_gdf_dict[np.dtype(dtype).type]\n\n\ndef gdf_to_np_dtype(dtype):\n \"\"\"Util to convert gdf dtype to numpy dtype.\n \"\"\"\n return np.dtype({\n libgdf.GDF_FLOAT64: np.float64,\n libgdf.GDF_FLOAT32: np.float32,\n libgdf.GDF_INT64: np.int64,\n libgdf.GDF_INT32: np.int32,\n libgdf.GDF_INT16: np.int16,\n libgdf.GDF_INT8: np.int8,\n libgdf.GDF_DATE64: np.datetime64,\n libgdf.N_GDF_TYPES: np.int32,\n libgdf.GDF_CATEGORY: np.int32,\n libgdf.GDF_STRING_CATEGORY: np.object_,\n }[dtype])\n\n\ndef np_to_pa_dtype(dtype):\n \"\"\"Util to convert numpy dtype to PyArrow dtype.\n \"\"\"\n return {\n np.float64: pa.float64(),\n np.float32: pa.float32(),\n np.int64: pa.int64(),\n np.int32: pa.int32(),\n np.int16: pa.int16(),\n np.int8: pa.int8(),\n np.bool_: pa.int8(),\n np.datetime64: pa.date64(),\n np.object_: pa.string(),\n np.str_: pa.string(),\n }[np.dtype(dtype).type]\n\n\ndef apply_reduce(fn, inp):\n # allocate output+temp array\n outsz = libgdf.gdf_reduction_get_intermediate_output_size()\n out = rmm.device_array(outsz, dtype=inp.dtype)\n # call reduction\n fn(inp.cffi_view, unwrap_devary(out), outsz)\n # return 1st element\n return out[0]\n\n\n_join_how_api = {\n 'inner': libgdf.gdf_inner_join,\n 'outer': libgdf.gdf_full_join,\n 'left': libgdf.gdf_left_join,\n}\n\n_join_method_api = {\n 'sort': libgdf.GDF_SORT,\n 'hash': libgdf.GDF_HASH\n}\n\n\ndef cffi_view_to_column_mem(cffi_view):\n gdf_dtype = cffi_view.dtype\n if gdf_dtype == libgdf.GDF_STRING_CATEGORY:\n data_ptr = int(ffi.cast(\"uintptr_t\", cffi_view.data))\n # We need to create this just to make sure the memory is properly freed\n data = rmm.device_array_from_ptr(\n data_ptr,\n nelem=cffi_view.size,\n dtype='int32',\n finalizer=rmm._make_finalizer(data_ptr, 0)\n )\n nvcat_ptr = int(ffi.cast(\"uintptr_t\", cffi_view.dtype_info.category))\n nvcat_obj = nvcategory.bind_cpointer(nvcat_ptr)\n nvstr_obj = nvcat_obj.to_strings()\n mask = None\n if cffi_view.valid:\n mask_ptr = int(ffi.cast(\"uintptr_t\", cffi_view.valid))\n mask = rmm.device_array_from_ptr(\n mask_ptr,\n nelem=calc_chunk_size(cffi_view.size, mask_bitsize),\n dtype=mask_dtype,\n finalizer=rmm._make_finalizer(mask_ptr, 0)\n )\n return nvstr_obj, mask\n else:\n intaddr = int(ffi.cast(\"uintptr_t\", cffi_view.data))\n data = rmm.device_array_from_ptr(\n intaddr,\n nelem=cffi_view.size,\n dtype=gdf_to_np_dtype(cffi_view.dtype),\n finalizer=rmm._make_finalizer(intaddr, 0)\n )\n mask = None\n if cffi_view.valid:\n intaddr = int(ffi.cast(\"uintptr_t\", cffi_view.valid))\n mask = rmm.device_array_from_ptr(\n intaddr,\n nelem=calc_chunk_size(cffi_view.size, mask_bitsize),\n dtype=mask_dtype,\n finalizer=rmm._make_finalizer(intaddr, 0)\n )\n\n return data, mask\n\n\[email protected]\ndef apply_join(col_lhs, col_rhs, how, method='hash'):\n \"\"\"Returns a tuple of the left and right joined indices as gpu arrays.\n \"\"\"\n if(len(col_lhs) != len(col_rhs)):\n msg = \"Unequal #columns in list 'col_lhs' and list 'col_rhs'\"\n raise ValueError(msg)\n\n joiner = _join_how_api[how]\n method_api = _join_method_api[method]\n gdf_context = ffi.new('gdf_context*')\n\n if method == 'hash':\n libgdf.gdf_context_view(gdf_context, 0, method_api, 0, 0, 0)\n elif method == 'sort':\n libgdf.gdf_context_view(gdf_context, 1, method_api, 0, 0, 0)\n else:\n msg = \"method not supported\"\n raise ValueError(msg)\n\n col_result_l = columnview(0, None, dtype=np.int32)\n col_result_r = columnview(0, None, dtype=np.int32)\n\n if(how in ['left', 'inner']):\n list_lhs = []\n list_rhs = []\n for i in range(len(col_lhs)):\n list_lhs.append(col_lhs[i].cffi_view)\n list_rhs.append(col_rhs[i].cffi_view)\n\n # Call libgdf\n\n joiner(len(col_lhs), list_lhs, list_rhs, col_result_l,\n col_result_r, gdf_context)\n else:\n joiner(col_lhs[0].cffi_view, col_rhs[0].cffi_view, col_result_l,\n col_result_r)\n\n # Extract result\n\n left = rmm.device_array_from_ptr(ptr=col_result_l.data,\n nelem=col_result_l.size,\n dtype=np.int32)\n\n right = rmm.device_array_from_ptr(ptr=col_result_r.data,\n nelem=col_result_r.size,\n dtype=np.int32)\n\n yield(left, right)\n\n libgdf.gdf_column_free(col_result_l)\n libgdf.gdf_column_free(col_result_r)\n\n\ndef apply_prefixsum(col_inp, col_out, inclusive):\n libgdf.gdf_prefixsum(col_inp, col_out, inclusive)\n\n\ndef apply_segsort(col_keys, col_vals, segments, descending=False,\n plan=None):\n \"\"\"Inplace segemented sort\n\n Parameters\n ----------\n col_keys : Column\n col_vals : Column\n segments : device array\n \"\"\"\n # prepare\n nelem = len(col_keys)\n if nelem == segments.size:\n # As many seguments as there are elements.\n # Nothing to do.\n return\n\n if plan is None:\n plan = SegmentedRadixortPlan(nelem, col_keys.dtype, col_vals.dtype,\n descending=descending)\n\n plan.sort(segments, col_keys, col_vals)\n return plan\n\n\nclass SegmentedRadixortPlan(object):\n def __init__(self, nelem, key_dtype, val_dtype, descending=False):\n begin_bit = 0\n self.sizeof_key = key_dtype.itemsize\n self.sizeof_val = val_dtype.itemsize\n end_bit = self.sizeof_key * 8\n plan = libgdf.gdf_segmented_radixsort_plan(nelem, descending,\n begin_bit, end_bit)\n self.plan = plan\n self.nelem = nelem\n self.is_closed = False\n self.setup()\n\n def __del__(self):\n if not self.is_closed:\n self.close()\n\n def close(self):\n libgdf.gdf_segmented_radixsort_plan_free(self.plan)\n self.is_closed = True\n self.plan = None\n\n def setup(self):\n libgdf.gdf_segmented_radixsort_plan_setup(self.plan, self.sizeof_key,\n self.sizeof_val)\n\n def sort(self, segments, col_keys, col_vals):\n seg_dtype = np.uint32\n segsize_limit = 2 ** 16 - 1\n\n d_fullsegs = rmm.device_array(segments.size + 1, dtype=seg_dtype)\n d_begins = d_fullsegs[:-1]\n d_ends = d_fullsegs[1:]\n\n # Note: .astype is required below because .copy_to_device\n # is just a plain memcpy\n d_begins.copy_to_device(cudautils.astype(segments, dtype=seg_dtype))\n d_ends[-1:].copy_to_device(np.require([self.nelem], dtype=seg_dtype))\n\n # The following is to handle the segument size limit due to\n # max CUDA grid size.\n range0 = range(0, segments.size, segsize_limit)\n range1 = itertools.chain(range0[1:], [segments.size])\n for s, e in zip(range0, range1):\n segsize = e - s\n libgdf.gdf_segmented_radixsort(self.plan,\n col_keys.cffi_view,\n col_vals.cffi_view,\n segsize,\n unwrap_devary(d_begins[s:]),\n unwrap_devary(d_ends[s:]))\n\n\ndef hash_columns(columns, result, initial_hash_values=None):\n \"\"\"Hash the *columns* and store in *result*.\n Returns *result*\n \"\"\"\n assert len(columns) > 0\n assert result.dtype == np.int32\n # No-op for 0-sized\n if len(result) == 0:\n return result\n col_input = [col.cffi_view for col in columns]\n col_out = result.cffi_view\n ncols = len(col_input)\n hashfn = libgdf.GDF_HASH_MURMUR3\n if initial_hash_values is None:\n initial_hash_values = ffi.NULL\n else:\n initial_hash_values = unwrap_devary(initial_hash_values)\n libgdf.gdf_hash(ncols, col_input, hashfn, initial_hash_values, col_out)\n return result\n\n\ndef hash_partition(input_columns, key_indices, nparts, output_columns):\n \"\"\"Partition the input_columns by the hash values on the keys.\n\n Parameters\n ----------\n input_columns : sequence of Column\n key_indices : sequence of int\n Indices into `input_columns` that indicates the key columns.\n nparts : int\n number of partitions\n\n Returns\n -------\n partition_offsets : list of int\n Each index indicates the start of a partition.\n \"\"\"\n assert len(input_columns) == len(output_columns)\n\n col_inputs = [col.cffi_view for col in input_columns]\n col_outputs = [col.cffi_view for col in output_columns]\n offsets = ffi.new('int[]', nparts)\n hashfn = libgdf.GDF_HASH_MURMUR3\n\n libgdf.gdf_hash_partition(\n len(col_inputs),\n col_inputs,\n key_indices,\n len(key_indices),\n nparts,\n col_outputs,\n offsets,\n hashfn\n )\n\n offsets = list(offsets)\n return offsets\n\n\ndef _column_concat(cols_to_concat, output_col):\n col_inputs = [col.cffi_view for col in cols_to_concat]\n libgdf.gdf_column_concat(output_col.cffi_view, col_inputs, len(col_inputs))\n return output_col\n\n\ndef count_nonzero_mask(mask, size):\n assert mask.size * mask_bitsize >= size\n nnz = ffi.new('int*')\n nnz[0] = 0\n mask_ptr, addr = unwrap_mask(mask)\n\n if addr != ffi.NULL:\n libgdf.gdf_count_nonzero_mask(mask_ptr, size, nnz)\n\n return nnz[0]\n\n\n_GDF_COLORS = {\n 'green': libgdf.GDF_GREEN,\n 'blue': libgdf.GDF_BLUE,\n 'yellow': libgdf.GDF_YELLOW,\n 'purple': libgdf.GDF_PURPLE,\n 'cyan': libgdf.GDF_CYAN,\n 'red': libgdf.GDF_RED,\n 'white': libgdf.GDF_WHITE,\n 'darkgreen': libgdf.GDF_DARK_GREEN,\n 'orange': libgdf.GDF_ORANGE,\n}\n\n\ndef str_to_gdf_color(s):\n \"\"\"Util to convert str to gdf_color type.\n \"\"\"\n return _GDF_COLORS[s.lower()]\n\n\ndef nvtx_range_push(name, color='green'):\n \"\"\"\n Demarcate the beginning of a user-defined NVTX range.\n\n Parameters\n ----------\n name : str\n The name of the NVTX range\n color : str\n The color to use for the range.\n Can be named color or hex RGB string.\n \"\"\"\n name_c = ffi.new(\"char[]\", name.encode('ascii'))\n\n try:\n color = int(color, 16) # only works if color is a hex string\n libgdf.gdf_nvtx_range_push_hex(name_c, ffi.cast('unsigned int', color))\n except ValueError:\n color = str_to_gdf_color(color)\n libgdf.gdf_nvtx_range_push(name_c, color)\n\n\ndef nvtx_range_pop():\n \"\"\" Demarcate the end of the inner-most range.\n \"\"\"\n libgdf.gdf_nvtx_range_pop()\n\n\ndef rmm_initialize():\n rmm.initialize()\n return True\n\n\ndef rmm_finalize():\n rmm.finalize()\n return True\n\n\n_GDF_QUANTILE_METHODS = {\n 'linear': libgdf.GDF_QUANT_LINEAR,\n 'lower': libgdf.GDF_QUANT_LOWER,\n 'higher': libgdf.GDF_QUANT_HIGHER,\n 'midpoint': libgdf.GDF_QUANT_MIDPOINT,\n 'nearest': libgdf.GDF_QUANT_NEAREST,\n}\n\n\ndef get_quantile_method(method):\n \"\"\"Util to convert method to gdf gdf_quantile_method.\n \"\"\"\n return _GDF_QUANTILE_METHODS[method]\n\n\ndef quantile(column, quant, method, exact):\n \"\"\" Calculate the `quant` quantile for the column\n Returns value with the quantile specified by quant\n \"\"\"\n gdf_context = ffi.new('gdf_context*')\n method_api = _join_method_api['sort']\n libgdf.gdf_context_view(gdf_context, 0, method_api, 0, 0, 0)\n # libgdf.gdf_context_view(gdf_context, 0, method_api, 0)\n # px = ffi.new(\"double *\")\n res = []\n for q in quant:\n px = ffi.new(\"double *\")\n if exact:\n libgdf.gdf_quantile_exact(column.cffi_view,\n get_quantile_method(method),\n q,\n ffi.cast('void *', px),\n gdf_context)\n else:\n libgdf.gdf_quantile_approx(column.cffi_view,\n q,\n ffi.cast('void *', px),\n gdf_context)\n res.append(px[0])\n return res\n", "import pytest\n\nimport numpy as np\nimport pandas as pd\n\nfrom cudf.dataframe import Series, DataFrame\nfrom cudf.tests.utils import assert_eq\n\n\ndef test_series_replace():\n a1 = np.array([0, 1, 2, 3, 4])\n\n # Numerical\n a2 = np.array([5, 1, 2, 3, 4])\n sr1 = Series(a1)\n sr2 = sr1.replace(0, 5)\n np.testing.assert_equal(sr2.to_array(), a2)\n\n # Categorical\n psr3 = pd.Series([\"one\", \"two\", \"three\"], dtype='category')\n psr4 = psr3.replace(\"one\", \"two\")\n sr3 = Series.from_pandas(psr3)\n sr4 = sr3.replace(\"one\", \"two\")\n pd.testing.assert_series_equal(sr4.to_pandas(), psr4)\n\n # List input\n a6 = np.array([5, 6, 2, 3, 4])\n sr6 = sr1.replace([0, 1], [5, 6])\n np.testing.assert_equal(sr6.to_array(), a6)\n\n a7 = np.array([5.5, 6.5, 2, 3, 4])\n sr7 = sr1.replace([0, 1], [5.5, 6.5])\n np.testing.assert_equal(sr7.to_array(), a7)\n\n # Series input\n a8 = np.array([5, 5, 5, 3, 4])\n sr8 = sr1.replace(sr1[:3], 5)\n np.testing.assert_equal(sr8.to_array(), a8)\n\n\ndef test_dataframe_replace():\n # numerical\n pdf1 = pd.DataFrame({'a': [0, 1, 2, 3], 'b': [0, 1, 2, 3]})\n gdf1 = DataFrame.from_pandas(pdf1)\n pdf2 = pdf1.replace(0, 4)\n gdf2 = gdf1.replace(0, 4)\n pd.testing.assert_frame_equal(gdf2.to_pandas(), pdf2)\n\n # categorical\n pdf4 = pd.DataFrame({'a': ['one', 'two', 'three'],\n 'b': ['one', 'two', 'three']}, dtype='category')\n gdf4 = DataFrame.from_pandas(pdf4)\n pdf5 = pdf4.replace('two', 'three')\n gdf5 = gdf4.replace('two', 'three')\n pd.testing.assert_frame_equal(gdf5.to_pandas(), pdf5)\n\n # list input\n pdf6 = pdf1.replace([0, 1], [4, 5])\n gdf6 = gdf1.replace([0, 1], [4, 5])\n pd.testing.assert_frame_equal(gdf6.to_pandas(), pdf6)\n\n pdf7 = pdf1.replace([0, 1], 4)\n gdf7 = gdf1.replace([0, 1], 4)\n pd.testing.assert_frame_equal(gdf7.to_pandas(), pdf7)\n\n # dict input:\n pdf8 = pdf1.replace({'a': 0, 'b': 0}, {'a': 4, 'b': 5})\n gdf8 = gdf1.replace({'a': 0, 'b': 0}, {'a': 4, 'b': 5})\n pd.testing.assert_frame_equal(gdf8.to_pandas(), pdf8)\n\n pdf9 = pdf1.replace({'a': 0}, {'a': 4})\n gdf9 = gdf1.replace({'a': 0}, {'a': 4})\n pd.testing.assert_frame_equal(gdf9.to_pandas(), pdf9)\n\n\[email protected]('data_dtype', ['int8', 'int16', 'int32', 'int64',\n 'float32', 'float64'])\[email protected]('fill_dtype', ['int8', 'int16', 'int32', 'int64',\n 'float32', 'float64'])\[email protected](\n 'fill_type',\n ['scalar', 'series'])\[email protected](\n 'null_value',\n [None, np.nan])\[email protected](\n 'inplace',\n [True, False])\ndef test_series_fillna_numerical(data_dtype, fill_dtype,\n fill_type, null_value, inplace):\n # TODO: These tests should use Pandas' nullable int type\n # when we support a recent enough version of Pandas\n # https://pandas.pydata.org/pandas-docs/stable/user_guide/integer_na.html\n\n if fill_type == 'scalar':\n fill_value = np.random.randint(0, 5)\n expect = np.array(\n [0, 1, fill_value, 2, fill_value],\n dtype=data_dtype)\n elif fill_type == 'series':\n data = np.random.randint(0, 5, (5,))\n fill_value = pd.Series(data, dtype=data_dtype)\n expect = np.array(\n [0, 1, fill_value[2], 2, fill_value[4]],\n dtype=data_dtype)\n\n sr = Series([0, 1, null_value, 2, null_value], dtype=data_dtype)\n result = sr.fillna(fill_value, inplace=inplace)\n\n if inplace:\n result = sr\n\n got = result.to_array()\n\n np.testing.assert_equal(expect, got)\n\n\[email protected](\n 'fill_type',\n ['scalar', 'series'])\[email protected](\n 'null_value',\n [None, np.nan])\[email protected](\n 'inplace',\n [True, False])\ndef test_fillna_categorical(fill_type, null_value, inplace):\n data = pd.Series(['a', 'b', 'a', null_value, 'c', null_value],\n dtype='category')\n sr = Series.from_pandas(data)\n\n if fill_type == 'scalar':\n fill_value = 'c'\n expect = pd.Series(['a', 'b', 'a', 'c', 'c', 'c'],\n dtype='category')\n elif fill_type == 'series':\n fill_value = pd.Series(['c', 'c', 'c', 'c', 'c', 'a'],\n dtype='category')\n expect = pd.Series(['a', 'b', 'a', 'c', 'c', 'a'],\n dtype='category')\n\n got = sr.fillna(fill_value, inplace=inplace)\n\n if inplace:\n got = sr\n\n assert_eq(expect, got)\n\n\[email protected](\n 'fill_type',\n ['scalar', 'series'])\[email protected](\n 'inplace',\n [True, False])\ndef test_fillna_datetime(fill_type, inplace):\n psr = pd.Series(pd.date_range('2010-01-01', '2020-01-10', freq='1y'))\n\n if fill_type == 'scalar':\n fill_value = pd.Timestamp('2010-01-02')\n elif fill_type == 'series':\n fill_value = psr + pd.Timedelta('1d')\n\n psr[[5, 9]] = None\n sr = Series.from_pandas(psr)\n\n expect = psr.fillna(fill_value)\n got = sr.fillna(fill_value, inplace=inplace)\n\n if inplace:\n got = sr\n\n assert_eq(expect, got)\n\n\[email protected](\n 'fill_type',\n ['scalar', 'series', 'dict'])\[email protected](\n 'inplace',\n [True, False])\ndef test_fillna_dataframe(fill_type, inplace):\n pdf = pd.DataFrame({'a': [1, 2, None], 'b': [None, None, 5]})\n gdf = DataFrame.from_pandas(pdf)\n\n if fill_type == 'scalar':\n fill_value = 5\n elif fill_type == 'series':\n fill_value = Series([3, 4, 5])\n else:\n fill_value = {'a': 5, 'b': Series([3, 4, 5])}\n\n expect = pdf.fillna(fill_value)\n got = gdf.fillna(fill_value, inplace=inplace)\n\n if inplace:\n got = gdf\n\n assert_eq(expect, got)\n\n\[email protected](\n 'data_dtype',\n ['int8', 'int16', 'int32', 'int64'])\ndef test_series_fillna_invalid_dtype(data_dtype):\n gdf = Series([1, 2, None, 3], dtype=data_dtype)\n fill_value = 2.5\n with pytest.raises(TypeError) as raises:\n gdf.fillna(fill_value)\n raises.match(\"Cannot safely cast non-equivalent {} to {}\".format(\n np.dtype(type(fill_value)).type.__name__,\n gdf.dtype.type.__name__\n ))\n" ]
[ [ "pandas.api.types.is_categorical_dtype", "numpy.require", "numpy.dtype" ], [ "numpy.testing.assert_equal", "pandas.Series", "pandas.Timestamp", "pandas.DataFrame", "pandas.Timedelta", "pandas.date_range", "numpy.array", "numpy.random.randint" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "0.19", "1.1", "1.5", "1.2", "0.24", "0.20", "1.0", "0.25", "1.3" ], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "0.19", "1.1", "1.5", "1.2", "0.24", "0.20", "1.0", "0.25", "1.3" ], "scipy": [], "tensorflow": [] } ]
shub659/StressNet-Detecting-stress-from-thermal-videos
[ "89a06014ba2c456482d1d427cbac0171e477492a" ]
[ "isti_predictor/visualizer/visualizer.py" ]
[ "import numpy as np\nimport cv2\nimport torch\nimport os\nimport sys\nimport random\nimport torch.nn as nn\nimport torch.utils.data as tdata\nimport glob\nfrom matplotlib import pyplot as plt\n\nsys.path.append(\".\")\nfrom visualizer_dataloader import thermaldataset\n\n\ndef visualize(data_sample):\n\tdata = data_sample['data']\n\tlabel = data_sample['label']\n\tfeetin_frame = data_sample['feetin_frame']\n\tfeetin_ts = data_sample['feetin_ts']\n\tvid_fname = data_sample['filename']\n\tprint(vid_fname[0])\n\tfname = f'../vid_data/{vid_fname[0]}.avi'\n\t_, d, h, w = data.shape\n\tout = cv2.VideoWriter(fname, cv2.VideoWriter_fourcc(*'DIVX'), 15, (w,h), isColor=True)\n\tprint(data.numpy().shape, label.shape, feetin_frame, feetin_ts)\n\tf_count = 0\n\tfor i in range(d):\n\t\tvid_i = data[0][i].numpy()\n\t\tecg_i = label[0][i].numpy().flatten()\n\t\tfig, ax = plt.subplots(figsize=(2.4, 1.6))\n\t\tax.plot(ecg_i)\n\t\tfig.canvas.draw()\n\t\tnp_plot = np.array(fig.canvas.renderer.buffer_rgba())\n\t\tvid_i = cv2.cvtColor(vid_i, cv2.COLOR_GRAY2BGR)\n\t\t#np_plot = cv2.cvtColor(np_plot, cv2.CV_BGRA2HSV)\n\t\t#print(\"shape of plot and img\", np_plot.shape, vid_i.shape)\n\t\tvid_i[0:160,:,:] = np_plot[:,:,0:3]\n\t\tif(i == feetin_frame-4): f_count = 15\n\t\tif(f_count>0):\n\t\t\tcv2.putText(vid_i, 'FeetIn Water', (160,120), cv2.FONT_HERSHEY_SIMPLEX, 4, (0, 0, 255) ,\\\n\t\t\t\t\t\t2, cv2.LINE_AA)\n\t\t\tf_count = f_count-1\n\t\tplt.close()\n\t\tout.write(vid_i)\n\tout.release()\n\treturn\n\n\t\n#file usage : python visualizer.py ../data/test_label ../data/mat_files ../data/sync_data \n\nif __name__=='__main__':\n\tlabel_name = sys.argv[1]\n\tir_vid_name = sys.argv[2]\n\tsync_sig_name = sys.argv[3]\n\t\n\tprint(label_name, ir_vid_name)\n\tlabel = \"{}/\".format(label_name)\n\tir_video = \"{}/\".format(ir_vid_name)\n\tprint(label, ir_video)\n\t\n\t\n\tvisualize_dataset = thermaldataset(\n label = \"{}/\".format(label_name), \n ir_video = \"{}/\".format(ir_vid_name), \n sync_sig = \"{}/\".format(sync_sig_name), \n phase='train'\n )\n\n\ttrainloader = torch.utils.data.DataLoader(visualize_dataset,batch_size=1,shuffle=True,num_workers=1)\n\tfor data_sample in trainloader:\n\t\ttry:\n\t\t\tif(data_sample == -1):\n\t\t\t\tprint(\"Value -1 returned\")\n\t\t\t\tcontinue\n\t\texcept:\n\t\t\tpass\n\t\tvisualize(data_sample)\n" ]
[ [ "torch.utils.data.DataLoader", "matplotlib.pyplot.subplots", "matplotlib.pyplot.close" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
knaidoo29/magpie
[ "efab3c2666aab2c928ca12a631758bc1b43c149c", "efab3c2666aab2c928ca12a631758bc1b43c149c" ]
[ "magpie/randoms/polar.py", "magpie/randoms/usphere.py" ]
[ "import numpy as np\n\n\ndef randoms_polar(size, r_min=0., r_max=1., phi_min=0., phi_max=2.*np.pi):\n \"\"\"Generates randoms for polar coordinates. Default will produce randoms within\n a unit circle. This can be specified to a ring segment, i.e. with inner radius\n r_min and outer radius r_max and specifying the angle of the ring segment.\n\n Parameters\n ----------\n size : int\n Number of randoms.\n r_min : float\n Minimum r.\n r_max : float\n Maximum r.\n phi_min : float\n Minimum phi.\n phi_max : float\n Maximum phi.\n\n Returns\n -------\n r : array\n Random radial coordinates.\n phi : array\n Random phi coordinates.\n \"\"\"\n # uniform randoms\n u_r = np.random.random_sample(size)\n u_phi = np.random.random_sample(size)\n # random r and phi within a given range\n r = np.sqrt((r_max**2.-r_min**2.)*u_r + r_min**2.)\n phi = (phi_max-phi_min)*u_phi + phi_min\n return r, phi\n", "import numpy as np\nimport healpy as hp\n\nfrom .. import coords\nfrom .. import pixel\n\n\ndef randoms_usphere(size, phi_min=0., phi_max=2*np.pi, theta_min=0., theta_max=np.pi):\n \"\"\"\n Random points on the unit sphere or more generally across the surface of a sphere. The\n default will give randoms on the full unit sphere.\n\n Note\n ----\n Coordinate convention:\n - phi lies in the range [0, 2pi]\n - theta lies in the rang [0, pi].\n\n Parameters\n ----------\n size : int\n Number of randoms to generate.\n phi_min : float\n Minimum longitude in radians.\n phi_max : float\n Maximum longitude in radians.\n theta_min : float\n Minimum latitude in radians.\n theta_max : float\n Maximum longitude in radians.\n\n Returns\n -------\n phi : array\n Random phi.\n theta : array\n Random theta.\n \"\"\"\n # uniform randoms\n u_phi = np.random.random_sample(size)\n u_theta = np.random.random_sample(size)\n # random phi and theta within a given range\n phi = phi_min + (phi_max - phi_min)*u_phi\n theta = np.arccos(np.cos(theta_min) - (np.cos(theta_min) - np.cos(theta_max))*u_theta)\n return phi, theta\n\n# Old way to generate randoms in a healpix pixel, keep for benchmarking.\n#\n# def randoms_healpix_pixel(size, pix, nside):\n# \"\"\"Returns roughly `size` number of randoms inside a HEALPix pixel.\n#\n# Parameters\n# ----------\n# size : int\n# Average number of randoms per pixel.\n# pix : int\n# Pixel identifier for healpix map.\n# nside : int\n# Nside of the healpix map.\n#\n# Returns\n# -------\n# phi : array\n# Random phi (latitude angle) in radians.\n# theta : array\n# Random theta (longitude angle) in radians.\n# \"\"\"\n# # Find healpix pixel boundaries.\n# pix_bound = hp.boundaries(nside, pix, step=1, nest=False)\n# theta_bound, phi_bound = hp.vec2ang(pix_bound.T)\n# # Check if pixel crosses over the phi 2pi to 0 divide, if it does we shift boundaries below pi by 2pi.\n# # shuffle is used to ensure that we know this procedure has been done, and can undo this shift\n# # in the generated randoms.\n# if phi_bound.max() - phi_bound.min() > np.pi:\n# condition = np.where(phi_bound < np.pi)[0]\n# phi_bound[condition] += 2.*np.pi\n# shuffle = True\n# else:\n# shuffle = False\n# # Calculate the area of the unit sphere segment for which we are actually getting randoms, and compare to the pixel area.\n# usphere_area = coords.usphere_area(phi_bound.min(), phi_bound.max(), theta_bound.min(), theta_bound.max())\n# # Adjust the size of randoms to account for the size difference of the pixel vs the unit sphere segment.\n# adjusted_size = int(1.05 * size * (usphere_area/hp.nside2pixarea(nside)))\n# phi_len = 0\n# while phi_len < size:\n# # Get randoms in the unit sphere segment\n# phi_rand, theta_rand = randoms_usphere(adjusted_size, phi_min=phi_bound.min(), phi_max=phi_bound.max(),\n# theta_min=theta_bound.min(), theta_max=theta_bound.max())\n# # If shuffle is True we need to shift randoms with phi above 2pi by -2pi.\n# if shuffle == True:\n# condition = np.where(phi_rand > 2.*np.pi)[0]\n# phi_rand[condition] -= 2.*np.pi\n# # Cut randoms to only randoms within the desired pixel\n# pix_rand = hp.ang2pix(nside, theta_rand, phi_rand)\n# condition = np.where(pix_rand == pix)[0]\n# _phi, _theta = phi_rand[condition], theta_rand[condition]\n# # Concatenate to previous randoms\n# if phi_len == 0:\n# phi, theta = _phi, _theta\n# else:\n# phi = np.concatenate([phi, _phi])\n# theta = np.concatenate([theta, _theta])\n# # check size\n# if len(phi) > size:\n# phi, theta = phi[:size], theta[:size]\n# phi_len = len(phi)\n# else:\n# phi_len = len(phi)\n# return phi, theta\n\n\ndef _randoms_healpix_xy(size, p, nside):\n \"\"\"Generates randoms points in a healpix pixel in healpix x and y coordinates.\n\n Parameters\n ----------\n size : int\n Average number of randoms per pixel.\n p : int\n Healpix pixel index.\n nside : int\n Healpix Nside.\n\n Returns\n -------\n xrand : array\n Random x in the healpix pixel.\n yrand : array\n Random y in the healpix pixel.\n \"\"\"\n delta = pixel._healpix_get_delta(nside)\n xp, yp = pixel.healpix_pix2xy(p, nside)\n ux = np.random.random_sample(size)\n uy = np.random.random_sample(size)\n xrand = np.zeros(size)\n yrand = np.zeros(size)\n cond = np.where(ux <= 0.5)[0]\n xrand[cond] = xp + delta*(np.sqrt(2*ux[cond]) - 1)\n yrand[cond] = 2*(xrand[cond] - xp + delta)*uy[cond] + (-xrand[cond] + xp - delta + yp)\n cond = np.where(ux > 0.5)[0]\n xrand[cond] = xp + delta*(1 - np.sqrt(2*(1-ux[cond])))\n yrand[cond] = 2*(-xrand[cond] + xp + delta)*uy[cond] + (xrand[cond] - xp - delta + yp)\n xrand = xrand % (2*np.pi)\n return xrand, yrand\n\n\ndef randoms_healpix_pixel(size, p, nside):\n \"\"\"Returns roughly `size` number of randoms inside a HEALPix pixel.\n\n Parameters\n ----------\n size : int\n Average number of randoms per pixel.\n p : int\n Pixel identifier for healpix map.\n nside : int\n Nside of the healpix map.\n\n Returns\n -------\n phi : array\n Random phi (latitude angle) in radians.\n theta : array\n Random theta (longitude angle) in radians.\n \"\"\"\n xrand, yrand = _randoms_healpix_xy(size, p, nside)\n phi, theta = coords.healpix_xy2ang(xrand, yrand)\n return phi, theta\n" ]
[ [ "numpy.sqrt", "numpy.random.random_sample" ], [ "numpy.sqrt", "numpy.cos", "numpy.random.random_sample", "numpy.where", "numpy.zeros" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
pranaymethuku/models
[ "7bb793554ef39ab06513138cc9e6be5eb3144bc6" ]
[ "research/object_detection/pre_training_scripts/xml_to_csv.py" ]
[ "\"\"\"\nCreated on Sun Apr 5 2020\n@author: Ruksana Kabealo, Pranay Methuku, Abirami Senthilvelan, Malay Shah\n\nClass: CSE 5915 - Information Systems\nSection: 6pm TR, Spring 2020\nProf: Prof. Jayanti\n\nA Python 3 script to perform the following tasks in order:\n 1) look at source directory, \n 2) extract xml annotations\n 3) save its corresponding compilation into a csv file\n\nAssumptions:\n Annotation files all correspond to .jpg images\n\nUsage:\n python3 xml_to_csv.py --source=path/to/source --csv-file=path/to/csv/file\n\nExamples:\n python3 auto_label.py -s=./tier1/test -c=../tier1/test_labels.csv\n\"\"\"\n\nimport os\nimport glob\nimport pandas as pd\nimport xml.etree.ElementTree as ET\nimport argparse\n\ndef retrieve_df(directory_path):\n \"\"\"\n helper function to take in a directory\n and compile a DataFrame using them\n \"\"\"\n xml_list = []\n # iterate through all the xml files in directory\n for xml_file in glob.glob(directory_path + '/*.xml'):\n tree = ET.parse(xml_file)\n root = tree.getroot()\n column_names = ['filename', 'width', 'height', 'class', 'xmin', 'ymin', 'xmax', 'ymax']\n # get xml tags corresponding to column_names from file and create a row\n for member in root.findall('object'):\n value = (root.find('filename').text, # filename\n int(root.find('size')[0].text), # width\n int(root.find('size')[1].text), # height\n member[0].text, # class\n int(member[4][0].text), # xmin\n int(member[4][1].text), # ymin\n int(member[4][2].text), # xmax\n int(member[4][3].text) # ymax\n )\n xml_list.append(value)\n return pd.DataFrame(xml_list, columns=column_names)\n\nif __name__ == \"__main__\":\n\n # set up command line\n parser = argparse.ArgumentParser()\n parser.add_argument(\"-s\", \"--source\", type=str, default=\"train\",\n help=\"Path to the source folder to look from, train folder by default\")\n parser.add_argument(\"-c\", \"--csv-file\", type=str, default=\"train_labels.csv\",\n help=\"Path to a CSV file to output the annotations into\")\n args = parser.parse_args()\n\n xml_df = retrieve_df(os.path.join(os.getcwd(), args.source))\n xml_df.to_csv(args.csv_file, index=False)\n print('Successfully converted the annotations in {} to a file {}.'.format(args.source, args.csv_file))\n" ]
[ [ "pandas.DataFrame" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "0.19", "1.1", "1.5", "1.2", "0.24", "0.20", "1.0", "0.25", "1.3" ], "scipy": [], "tensorflow": [] } ]
Thehunk1206/Classical-ML-Algorithms
[ "93704dc4e5e6afdbec2ae0032a86cc6eaef05432" ]
[ "Kmeans/kmeans.py" ]
[ "from typing import List\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom mpl_toolkits import mplot3d\nfrom sklearn.datasets import make_blobs, make_classification, make_swiss_roll, make_moons \n\nimport sys\n\n# Create a class for k-means clustering algorithm\nclass KMeansClustering(object):\n def __init__(self, K:int, max_iter:int = 200) -> None:\n super().__init__()\n self.K = K\n self.max_iter = max_iter\n self.num_datapoints, self.num_feat = X.shape\n self.fitted_centroids = None\n self.inertia = 0\n\n def init_centroids(self, X:np.ndarray) -> np.ndarray:\n # centroids = np.zeros(shape=(self.K, self.num_feat))\n # for k in range(self.K):\n # centroid = X[np.random.randint(1,len(X))]\n # centroids[k] = centroid\n # return centroids\n\n centroids = []\n centroids.append(X[np.random.randint(1,len(X))])\n for _ in range(self.K-1):\n distances = []\n for x in X:\n d = sys.maxsize\n for i in range(len(centroids)):\n temp_distance = np.sqrt(np.sum((x - centroids[i])**2))\n if temp_distance < d:\n d = temp_distance\n distances.append(d)\n distances = np.array(distances)\n max_idx = np.argmax(distances)\n centroids.append(X[max_idx])\n distances = []\n return np.array(centroids)\n \n def create_clusters(self, X:np.ndarray, centroids:np.ndarray) -> List[list]:\n clusters = [[] for _ in range(self.K)] # Create K empty clusters\n for p_idx, p in enumerate(X):\n closest_centroid = np.argmin(np.sqrt(np.sum((p - centroids)**2, axis=1))) # Find closest centroid for each point using Euclidian distance\n clusters[closest_centroid].append(p_idx) # assign each data point_idx to the cluster(Centroid)\n return clusters\n \n def update_centroid(self, X:np.ndarray, clusters:List[list])-> np.ndarray:\n centroids = np.zeros(shape=(self.K, self.num_feat))\n for idx, cluster in enumerate(clusters):\n new_centroid = np.mean(X[cluster], axis=0)\n centroids[idx] = new_centroid\n return centroids\n\n def plot_cluster(self, centroids, x, y):\n plt.scatter(x[:,0], x[:,1], c=y, s=50, cmap='viridis')\n plt.scatter(centroids[:,0], centroids[:,1], c='black', s=100, alpha=0.7, marker='x')\n plt.show()\n \n def plot_3d_cluster(self, centroids, x, y):\n ax = plt.axes(projection='3d')\n ax.scatter3D(x[:,0], x[:,1], x[:,2], c=y, s=20, alpha =0.3,cmap='viridis')\n ax.scatter3D(centroids[:,0], centroids[:,1], centroids[:,2], c='black', s=100, alpha=1.0, marker='o')\n plt.show()\n\n def get_y_label(self, clusters:List[list], X:np.ndarray):\n y_label = np.zeros(shape=(self.num_datapoints))\n for idx, cluster in enumerate(clusters):\n for point_idx in cluster:\n y_label[point_idx] = idx\n return y_label\n\n def predict(self, X:np.ndarray):\n pass\n\n def fit(self, X:np.ndarray):\n centroids = self.init_centroids(X)\n for i in range(self.max_iter):\n clusters = self.create_clusters(X, centroids)\n prev_centroids = centroids\n centroids = self.update_centroid(X, clusters)\n print(f'Centroids at iter {i+1}: {centroids[0]}')\n\n diff = prev_centroids - centroids\n if diff.any() < 0.0001:\n break\n\n self.fitted_centroids_ = centroids\n\n y_label = self.get_y_label(clusters, X)\n\n if self.num_feat == 2:\n self.plot_cluster(centroids,X, y_label)\n elif self.num_feat == 3:\n self.plot_3d_cluster(centroids, X, y_label)\n \n return y_label\n \n\nif __name__ == \"__main__\":\n np.random.seed(45)\n K = 3\n num_of_features = 3\n num_of_samples = 1000\n X, _ = make_blobs(n_samples=num_of_samples, centers=K, n_features=num_of_features, cluster_std=2.0, random_state=1)\n # X, _ = make_classification(n_samples=num_of_samples, n_features=num_of_features, n_redundant=0, n_informative=2, n_classes=K, n_clusters_per_class=1)\n # X, _ = make_moons(n_samples=num_of_samples, noise=0.1)\n\n kmeans = KMeansClustering(K, max_iter=30)\n y_label = kmeans.fit(X)\n \n " ]
[ [ "matplotlib.pyplot.scatter", "numpy.random.seed", "matplotlib.pyplot.show", "matplotlib.pyplot.axes", "numpy.argmax", "numpy.mean", "numpy.array", "numpy.zeros", "numpy.sum", "sklearn.datasets.make_blobs" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
rustatian/ml_samples
[ "688e8b73db62105e62bc8c690f02ae03b4a3abfa" ]
[ "Tools/freeze_session_converter.py" ]
[ "import tensorflow as tf\nfrom keras import backend as K\nfrom keras.models import load_model\n\n\ndef freeze_session(session, keep_var_names=None, output_names=None, clear_devices=True):\n \"\"\"\n Freezes the state of a session into a prunned computation graph.\n\n Creates a new computation graph where variable nodes are replaced by\n constants taking their current value in the session. The new graph will be\n prunned so subgraphs that are not neccesary to compute the requested\n outputs are removed.\n @param session The TensorFlow session to be frozen.\n @param keep_var_names A list of variable names that should not be frozen,\n or None to freeze all the variables in the graph.\n @param output_names Names of the relevant graph outputs.\n @param clear_devices Remove the device directives from the graph for better portability.\n @return The frozen graph definition.\n \"\"\"\n from tensorflow.python.framework.graph_util import convert_variables_to_constants\n graph = session.graph\n with graph.as_default():\n freeze_var_names = list(set(v.op.name for v in tf.global_variables()).difference(keep_var_names or []))\n output_names = output_names or []\n output_names += [v.op.name for v in tf.global_variables()]\n input_graph_def = graph.as_graph_def()\n if clear_devices:\n for node in input_graph_def.node:\n node.device = \"\"\n frozen_graph = convert_variables_to_constants(session, input_graph_def,\n output_names, freeze_var_names)\n return frozen_graph\n\n\nmodel = load_model('model.h5')\nfrozen_graph = freeze_session(K.get_session(), output_names=[model.output.op.name])\ntf.train.write_graph(frozen_graph, \"/\", \"my_model.pb\", as_text=False)" ]
[ [ "tensorflow.python.framework.graph_util.convert_variables_to_constants", "tensorflow.global_variables", "tensorflow.train.write_graph" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10" ] } ]
uct-cbio/cbio_proteomics
[ "a62ff04bd052dfd0dd312153ccf05c545b7dfcdb" ]
[ "bin/python/mq_blast_orfs2refproteome.py" ]
[ "#!/usr/bin/env python3\n\nimport pandas as pd\nimport numpy as np\nimport pandas as pd\nimport sys\nimport importlib.machinery\nimport Bio; from Bio import SeqIO\nimport os\nimport shutil\nfrom collections import defaultdict\nfrom Bio.SeqRecord import SeqRecord\nfrom Bio.Seq import Seq\nfrom Bio.Seq import translate\nimport json\nimport pickle\nimport yaml\nimport subprocess\n\nconfig = yaml.load(open(sys.argv[1]), Loader=yaml.Loader)\noutput = os.path.abspath( sys.argv[2])\n\nref = output + '/uniprot/{}/{}_{}.fasta'.format(config['reference_proteome_id'], config['reference_proteome_id'], config['reference_taxid'])\nnew=output+'/blast/orfs2proteins/{}/{}_{}.fasta'.format(config['reference_proteome_id'], config['reference_proteome_id'],config['reference_taxid'])\n\nnewfolder=output+'/blast/orfs2proteins/{}/'.format(config['reference_proteome_id'])\n\nref_id = config['reference_proteome_id']\n\nos.mkdir(output +'/blast/orfs2proteins/{}'.format(config['reference_proteome_id']))\n\ncmd=\"cp {} {} && cd {} && makeblastdb -in {} -dbtype 'prot' -out {}\".format(ref, newfolder, newfolder, new, ref_id )\n\nprocess = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE)\nprocess.wait()\nassert process.returncode == 0\n \nquery=output + '/fasta/nr.fasta'\noutfmt=5\n\nout=output + '/blast/orfs2proteins/{}.xml'.format(config['reference_proteome_id'])\n\ndb = newfolder + '/{}'.format(config['reference_proteome_id'])\n\nnum_threads=5\n\ncmd=\"blastp -query {} -outfmt {} -out {} -db {} -max_target_seqs 500 -max_hsps 1 -num_threads {} -evalue 0.0001\".format(query, outfmt, out, db, num_threads)\n\nprocess = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE)\n\nprocess.wait()\n\nassert process.returncode == 0\n\ncmd=\"blast_XML_to_csv.py {} {} {} {}\".format(out, query, out +'.csv', 500)\n\nprocess = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE)\n\nprocess.wait()\n\nassert process.returncode == 0\n\ndata = pd.read_csv(out+'.csv')\n\ndata = data[(data['_alignment_rank']==1) & (data['_hsp_rank']==1)]\n\nmp = defaultdict(list)\n\ndef get_mapping(df):\n global mp\n ids = df['blast_record.query'].split()[1].split(';')\n evalue= df['hsp.expect']\n for i in ids:\n i = i.split('|')[1]\n if evalue < 0.0001:\n mp[i].append(df['_alignment.entry'])\n\ndata.apply(get_mapping, axis=1)\n\nmp = json.dumps(mp)\n\nw= open(output +'/blast/orfs2proteins/{}_mapping.json'.format(config['reference_proteome_id']),'w')\nw.write(mp)\nw.close()\n\n\n\n\n\n\n" ]
[ [ "pandas.read_csv" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "2.0", "1.4", "1.1", "1.5", "1.2", "1.3" ], "scipy": [], "tensorflow": [] } ]
dasUtsav/face-detect-disrupt
[ "f4b9734e755642dfcfdc20046b770925990b0314" ]
[ "example.py" ]
[ "import torch\nimport torchvision\nimport torchvision.transforms as transforms\nimport torchvision.utils as vutils\nfrom torch.autograd import Variable\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.optim as optim\nimport torch.backends.cudnn as cudnn\nfrom models.vgg import VGG\nfrom models.lenet import LeNet\nimport models.resnet as resnet\nimport models.densenet as densenet\nimport models.alexnet as alexnet\nimport models.googlenet as googlenet\nimport attacks\nimport numpy as np\nimport pdb\nimport pandas as pd\nimport os\nimport data_loader\nimport utils\n\nuse_cuda = torch.cuda.is_available()\ni = 0 # Epsilon counter for logging\n\n\ndef load_cifar():\n \"\"\"\n Load and normalize the training and test data for CIFAR10\n \"\"\"\n print('==> Preparing data..')\n transform_train = transforms.Compose([\n transforms.RandomCrop(32, padding=4),\n transforms.RandomHorizontalFlip(),\n transforms.ToTensor(),\n transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)),\n ])\n\n transform_test = transforms.Compose([\n transforms.ToTensor(),\n transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)),\n ])\n\n trainset = torchvision.datasets.CIFAR10(\n root='./data', train=True, download=True, transform=transform_train)\n trainloader = torch.utils.data.DataLoader(\n trainset, batch_size=1024, shuffle=True, num_workers=8)\n\n testset = torchvision.datasets.CIFAR10(\n root='./data', train=False, download=True, transform=transform_test)\n testloader = torch.utils.data.DataLoader(\n testset, batch_size=128, shuffle=False, num_workers=8)\n return trainloader, testloader\n\n\ndef load_lfw():\n\n file_ext = 'jpg' # observe, no '.' before jpg\n\n dataset_path = './data/lfw'\n\n pairs_path = './data/pairs.txt'\n\n pairs = utils.read_pairs(pairs_path)\n path_list, issame_list = utils.get_paths(\n args.dataset_path, pairs, file_ext)\n\n print('==> Preparing data..')\n # Define data transforms\n RGB_MEAN = [0.485, 0.456, 0.406]\n RGB_STD = [0.229, 0.224, 0.225]\n test_transform = transforms.Compose([\n transforms.Scale((250, 250)), # make 250x250\n transforms.CenterCrop(150), # then take 150x150 center crop\n # resized to the network's required input size\n transforms.Scale((224, 224)),\n transforms.ToTensor(),\n transforms.Normalize(mean=RGB_MEAN,\n std=RGB_STD),\n ])\n\n # Create data loader\n test_loader = torch.utils.data.DataLoader(\n data_loader.LFWDataset(\n path_list, issame_list, test_transform),\n batch_size=args.batch_size, shuffle=False)\n\n return test_loader\n\n\ndef train(model, optimizer, criterion, trainloader, architecture, attacker=None, num_epochs=25, freq=10, early_stopping=True):\n \"\"\"\n Train the model with the optimizer and criterion for num_epochs epochs on data trainloader.\n attacker is an object that produces adversial inputs given regular inputs.\n Return the accuracy on the normal inputs and on the perturbed inputs.\n\n To save time, only perturb inputs on the last epoch, at the frequency freq.\n \"\"\"\n for epoch in range(num_epochs):\n running_loss = 0.0\n total, correct, correct_adv, total_adv = 0.0, 0.0, 0.0, 1.0\n early_stop_param = 0.01\n for i, data in enumerate(trainloader):\n inputs, labels = data\n inputs = Variable(\n (inputs.cuda() if use_cuda else inputs), requires_grad=True)\n labels = Variable(\n (labels.cuda() if use_cuda else labels), requires_grad=False)\n\n y_hat = model(inputs)\n loss = criterion(y_hat, labels)\n\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n\n _, predicted = torch.max(y_hat.data, 1)\n total += labels.size(0)\n correct += predicted.eq(labels.data).sum()\n\n # print statistics\n running_loss = loss.data[0]\n\n if attacker:\n # only perturb inputs on the last epoch, to save time\n # if (i+1) % freq == 0: # and (epoch == num_epochs - 1):\n adv_inputs, adv_labels, num_unperturbed = attacker.attack(\n inputs, labels, model, optimizer)\n correct_adv += num_unperturbed\n total_adv += labels.size(0)\n\n if (i+1) % freq == 0:\n print('[%s: %d, %5d] loss: %.4f' % (architecture, epoch + 1, i + 1, running_loss / 2),\n correct/total, correct_adv/total_adv)\n if early_stopping:\n if running_loss < early_stop_param:\n print(\"Early Stopping !!!!!!!!!!\")\n break\n running_loss = 0.0\n\n return correct/total, correct_adv/total_adv\n\n\ndef test(model, criterion, testloader, attacker, model_name, att_name):\n \"\"\"\n Test the model with the data from testloader.\n attacker is an object that produces adversial inputs given regular inputs.\n Return the accuracy on the normal inputs and the unperturbed inputs.\n \"\"\"\n epsilons = [0.0, 0.2, 0.4, 0.6, 0.8, 1.0]\n resultsDF = pd.DataFrame(\n columns=('Model', 'Attacker', 'Epsilon', 'Test_acc', 'Test_att_acc'))\n global i\n for epsilon in epsilons:\n correct, correct_adv, total = 0.0, 0.0, 0.0\n for data in testloader:\n inputs, labels = data\n inputs = Variable(\n (inputs.cuda() if use_cuda else inputs), requires_grad=True)\n labels = Variable(\n (labels.cuda() if use_cuda else labels), requires_grad=False)\n y_hat = model(inputs)\n loss = criterion(y_hat, labels)\n loss.backward()\n\n predicted = torch.max(y_hat.data, 1)[1]\n correct += predicted.eq(labels.data).sum()\n\n _, adv_labels, num_unperturbed = attacker.attack(\n inputs, labels, model, epsilon)\n adv_inputs = attacker.perturb(inputs, epsilon=epsilon)\n correct_adv += num_unperturbed\n\n total += labels.size(0)\n\n fake = adv_inputs\n samples_name = 'images/'+name+str(epsilon) + '_samples.png'\n vutils.save_image(fake.data, samples_name)\n print(('Test Acc Acc: %.4f | Test Attacked Acc; %.4f'\n % (100.*correct/total, 100.*correct_adv/total)))\n resultsDF.loc[i] = [model_name, att_name,\n epsilon, correct/total, correct_adv/total]\n i = i + 1\n resultsDF.to_csv('DCGAN_attack_results.csv', mode='a',\n header=(not os.path.exists('DCGAN_attack_results.csv')))\n pdb.set_trace()\n return correct/total, correct_adv/total\n\n\ndef prep(model):\n if model and use_cuda:\n model.cuda()\n model = torch.nn.DataParallel(\n model, device_ids=list(range(torch.cuda.device_count())))\n cudnn.benchmark = True\n return model\n\n\nif __name__ == \"__main__\":\n trainloader, testloader = load_cifar()\n criterion = nn.CrossEntropyLoss()\n do_train = True\n architectures = [\n (VGG, 'VGG16', 50),\n (resnet.ResNet18, 'res18', 500),\n (densenet.densenet_cifar, 'dense121', 500),\n (alexnet.AlexNet, 'alex', 500),\n (googlenet.GoogLeNet, 'googlenet', 500),\n (LeNet, 'lenet', 250)\n ]\n\n for init_func, name, epochs in architectures:\n for tr_adv in [False, True]:\n print(name, tr_adv)\n model = prep(init_func())\n attacker = attacks.DCGAN(train_adv=tr_adv)\n\n optimizer = optim.Adam(model.parameters(), lr=1e-4)\n if do_train:\n train_acc, train_adv_acc = train(model, optimizer,\n criterion, trainloader, name, attacker, num_epochs=epochs)\n suffix = '_AT' if tr_adv else ''\n attacker.save(\n 'saved/{0}{1}_nodrop_joey_attacker_0.0010.pth'.format(name, suffix))\n torch.save(model.state_dict(),\n 'saved/{0}{1}_no_drop_joey.pth'.format(name, suffix))\n else:\n attacker.load('saved/res18_nodrop_joey_attacker_0.0010.pth')\n model.load_state_dict(torch.load('saved/dense121_joey.pth'))\n tr_adv = False\n suffix = '_AT' if tr_adv else ''\n attacker_name = 'res18_no_drop' + suffix\n name = name + suffix\n test_acc, test_adv_acc = test(model, criterion, testloader,\n attacker, name, attacker_name)\n\n pdb.set_trace()\n suffix = '_AT' if tr_adv else ''\n attacker.save(\n 'saved/{0}{1}_attacker_0.01.pth'.format(name, suffix))\n torch.save(model.state_dict(),\n 'saved/{0}{1}.pth'.format(name, suffix))\n\n \"\"\"\n\tmodel = prep(VGG('VGG16'))\n\tmodel2 = prep(VGG('VGG16'))\n\n\t# use default hyperparams for best results!\n\t# attacker = attacks.FGSM()\n\t# attacker = attacks.CarliniWagner(verbose=True)\n\tattacker = attacks.DCGAN(train_adv=False)\n\n\tcriterion = nn.CrossEntropyLoss()\n\n\t# train first model adversarially\n\toptimizer = optim.SGD(model.parameters(), lr=1e-3, momentum=0.9, weight_decay=5e-4)\n\ttrain_acc, train_adv_acc = train(model, optimizer, criterion, trainloader, attacker, num_epochs=50)\n\ttest_acc, test_adv_acc = test(model, criterion, testloader, attacker)\n\tattacker.save('VGG_attack_0.005.pth')\n\ttorch.save(model.state_dict(), 'VGG_50.pth')\n\t\"\"\"\n\n \"\"\"\n\t# train second model normally\n\t# attacker.load('VGG_attack_0.005.pth')\n\tmodel2.load_state_dict(torch.load('VGG_50.pth'))\n\n\toptimizer2 = optim.SGD(model2.parameters(), lr=1e-3, momentum=0.9, weight_decay=5e-4)\n\ttrain_acc, train_adv_acc = train(model2, optimizer2, criterion, trainloader, num_epochs=50)\n\ttorch.save(model2.state_dict(), 'resnet_50.pth')\n\n\ttest_acc, test_adv_acc = test(model2, criterion, testloader, attacker)\n\n\t# print 'Train accuracy of the network on the 10000 test images:', train_acc, train_adv_acc\n print 'Test accuracy of the network on the 10000 test images:', test_acc, test_adv_acc\n\t\"\"\"\n" ]
[ [ "torch.nn.CrossEntropyLoss", "torch.max", "torch.load", "torch.utils.data.DataLoader", "pandas.DataFrame", "torch.cuda.is_available", "torch.cuda.device_count" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "0.19", "1.1", "1.5", "1.2", "0.24", "0.20", "1.0", "0.25", "1.3" ], "scipy": [], "tensorflow": [] } ]
yunjung-lee/class_python_data
[ "67ceab73e67ec63d408894a6ab016a8d25a4e30b" ]
[ "day05_tmp.py" ]
[ "#이미지 영역 지정\n\nimport scipy as sp\nimport numpy as np\nimport scipy.ndimage\nimport matplotlib.pyplot as plt\n\ndef flood_fill(test_array,h_max=255):\n input_array = np.copy(test_array)\n el = sp.ndimage.generate_binary_structure(2,2).astype(np.int)\n inside_mask = sp.ndimage.binary_erosion(~np.isnan(input_array), structure=el)\n output_array = np.copy(input_array)\n output_array[inside_mask]=h_max\n output_old_array = np.copy(input_array)\n output_old_array.fill(0)\n el = sp.ndimage.generate_binary_structure(2,1).astype(np.int)\n while not np.array_equal(output_old_array, output_array):\n output_old_array = np.copy(output_array)\n output_array = np.maximum(input_array,sp.ndimage.grey_erosion(output_array, size=(3,3), footprint=el))\n return output_array\n\nx = plt.imread(\"test.jpg\")\n# \"convert\" to grayscale and invert\nbinary = 255-x[:,:,0]\n\nfilled = flood_fill(binary)\n\nplt.imshow(filled)\n\n# in tkinter\n\n\n\n#\n# The PIL library itself provides no GUI code --what you are asking for is an application with a GUI. I'd suggest using Tkinter + PIL, but there is no way it is trivial - you will have to handle the mouse clicks, create a rectangle object tracking it, have a way to \"reset\" the rectangle, and so on.\n#\n# Unfortunatelly the Canvas Tkinter widget which is used to draw things on is poorly documented - you will have to read trough it here: http://www.pythonware.com/library/tkinter/introduction/canvas.htm\n#\n# Bellow there is an example code that reads an image file from the disk and draws it on a tkinter window. As you can see, here is some object juggling to get it right.\nimport Tkinter\nimport Image, ImageTk, ImageDraw\n\nimage_file = \"svg.png\"\n\nw = Tkinter.Tk()\n\nimg = Image.open(image_file)\nwidth, height = img.size\nca = Tkinter.Canvas(w, width=width, height=height)\nca.pack()\nphotoimg = ImageTk.PhotoImage(\"RGB\", img.size)\nphotoimg.paste(img)\nca.create_image(width//2,height//2, image=photoimg)\nTkinter.mainloop()\n" ]
[ [ "matplotlib.pyplot.imshow", "numpy.array_equal", "numpy.isnan", "matplotlib.pyplot.imread", "scipy.ndimage.generate_binary_structure", "numpy.copy", "scipy.ndimage.grey_erosion" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [ "1.7", "1.0", "0.10", "1.2", "0.14", "0.19", "1.5", "0.12", "0.17", "0.13", "1.6", "1.4", "1.9", "1.3", "1.10", "0.15", "0.18", "0.16", "1.8" ], "tensorflow": [] } ]
Anirudh-Swaminathan/3d_euclidean_planning
[ "95e8cb52233ce87d36553942f66f5acc32d4c605" ]
[ "src/main_ompl.py" ]
[ "#!/usr/bin/python\n\n# Created by anicodebreaker on May 14, 2020\nimport OMPLPlanner\nfrom pathlib import Path\nimport numpy as np\nimport time\nimport matplotlib.pyplot as plt\n\nplt.ion()\nfrom mpl_toolkits.mplot3d import Axes3D\nfrom mpl_toolkits.mplot3d.art3d import Poly3DCollection\nfrom pyrr import aabb\n\n\ndef tic():\n return time.time()\n\n\ndef toc(tstart, nm=\"\"):\n taken = time.time() - tstart\n print('%s took: %s sec.\\n' % (nm, taken))\n return taken\n\n\ndef load_map(fname):\n \"\"\"\n Loads the bounady and blocks from map file fname.\n\n boundary = [['xmin', 'ymin', 'zmin', 'xmax', 'ymax', 'zmax','r','g','b']]\n\n blocks = [['xmin', 'ymin', 'zmin', 'xmax', 'ymax', 'zmax','r','g','b'],\n ...,\n ['xmin', 'ymin', 'zmin', 'xmax', 'ymax', 'zmax','r','g','b']]\n \"\"\"\n mapdata = np.loadtxt(fname, dtype={'names': ('type', 'xmin', 'ymin', 'zmin', 'xmax', 'ymax', 'zmax', 'r', 'g', 'b'),\n 'formats': ('S8', 'f', 'f', 'f', 'f', 'f', 'f', 'f', 'f', 'f')})\n blockIdx = mapdata['type'] == b'block'\n boundary = mapdata[~blockIdx][['xmin', 'ymin', 'zmin', 'xmax', 'ymax', 'zmax', 'r', 'g', 'b']].view('<f4').reshape(\n -1, 11)[:, 2:]\n blocks = mapdata[blockIdx][['xmin', 'ymin', 'zmin', 'xmax', 'ymax', 'zmax', 'r', 'g', 'b']].view('<f4').reshape(-1,\n 11)[\n :, 2:]\n return boundary, blocks\n\n\ndef draw_map(boundary, blocks, start, goal):\n \"\"\"\n Visualization of a planning problem with environment boundary, obstacle blocks, and start and goal points\n \"\"\"\n fig = plt.figure()\n ax = fig.add_subplot(111, projection='3d')\n hb = draw_block_list(ax, blocks)\n hs = ax.plot(start[0:1], start[1:2], start[2:], 'ro', markersize=7, markeredgecolor='k')\n hg = ax.plot(goal[0:1], goal[1:2], goal[2:], 'go', markersize=7, markeredgecolor='k')\n ax.set_xlabel('X')\n ax.set_ylabel('Y')\n ax.set_zlabel('Z')\n ax.set_xlim(boundary[0, 0], boundary[0, 3])\n ax.set_ylim(boundary[0, 1], boundary[0, 4])\n ax.set_zlim(boundary[0, 2], boundary[0, 5])\n return fig, ax, hb, hs, hg\n\n\ndef draw_block_list(ax, blocks):\n '''\n Subroutine used by draw_map() to display the environment blocks\n '''\n v = np.array([[0, 0, 0], [1, 0, 0], [1, 1, 0], [0, 1, 0], [0, 0, 1], [1, 0, 1], [1, 1, 1], [0, 1, 1]],\n dtype='float')\n f = np.array([[0, 1, 5, 4], [1, 2, 6, 5], [2, 3, 7, 6], [3, 0, 4, 7], [0, 1, 2, 3], [4, 5, 6, 7]])\n clr = blocks[:, 6:] / 255\n n = blocks.shape[0]\n d = blocks[:, 3:6] - blocks[:, :3]\n vl = np.zeros((8 * n, 3))\n fl = np.zeros((6 * n, 4), dtype='int64')\n fcl = np.zeros((6 * n, 3))\n for k in range(n):\n vl[k * 8:(k + 1) * 8, :] = v * d[k] + blocks[k, :3]\n fl[k * 6:(k + 1) * 6, :] = f + k * 8\n fcl[k * 6:(k + 1) * 6, :] = clr[k, :]\n\n if type(ax) is Poly3DCollection:\n ax.set_verts(vl[fl])\n else:\n pc = Poly3DCollection(vl[fl], alpha=0.25, linewidths=1, edgecolors='k')\n pc.set_facecolor(fcl)\n h = ax.add_collection3d(pc)\n return h\n\n\ndef intersect(start, end, box):\n fst = 0\n fet = 1\n # print(box.shape)\n bomin = aabb.minimum(box)\n bomax = aabb.maximum(box)\n # print(bomin.shape, bomax.shape)\n for i in range(3):\n bmin = bomin[i]\n bmax = bomax[i]\n si = start[i]\n ei = end[i]\n if si < ei:\n if si > bmax or ei < bmin:\n return False\n di = ei - si\n st = (bmin - si) / di if si < bmin else 0\n et = (bmax - si) / di if ei > bmax else 1\n else:\n if ei > bmax or si < bmin:\n return False\n di = ei - si\n st = (bmax - si) / di if si > bmax else 0\n et = (bmin - si) / di if ei < bmin else 1\n if st > fst:\n fst = st\n if et < fet:\n fet = et\n if fet < fst:\n return False\n return True\n\n\ndef check_collision(bo, bl, pth):\n \"\"\"\n A method to check for any collisions with objects in the path\n :param bo: boundary\n :param bl: blocks\n :param pth: path to check collisions for\n :return: collided boolean -> True if collision occured. False if not\n \"\"\"\n node = pth[0]\n # create a list of AABBs for pyrr\n blk_list = list()\n for k in range(bl.shape[0]):\n mi = bl[k, :3].reshape(1, 3)\n ma = bl[k, 3:6].reshape(1, 3)\n ab = np.vstack((mi, ma))\n abblk = aabb.create_from_points(ab)\n # print(mi.shape, ma.shape, ab.shape, abblk.shape)\n blk_list.append(abblk)\n\n for i in range(1, len(pth)):\n next = pth[i]\n # Check if this direction is valid\n # Checking if the considered node is outside the bounds\n if (next[0] < bo[0, 0] or next[0] > bo[0, 3] or\n next[1] < bo[0, 1] or next[1] > bo[0, 4] or\n next[2] < bo[0, 2] or next[2] > bo[0, 5]):\n print(\"Collision occurred at index: {}\\n Path went out of bounds!\".format(i))\n return True\n\n # loop through all the blocks in the environment\n for k in range(bl.shape[0]):\n # check if next node is inside some block\n if (bl[k, 0] < next[0] < bl[k, 3] and\n bl[k, 1] < next[1] < bl[k, 4] and\n bl[k, 2] < next[2] < bl[k, 5]):\n print(\"Collision occurred at index {}, for block index {}.\\nNew node is inside the block!\".format(i, k))\n return True\n # check if ray from node to next intersects AABB\n rfi = intersect(node, next, blk_list[k])\n rbi = intersect(next, node, blk_list[k])\n if rfi and rbi:\n # it means the collision occurred, and it occurred in-between the points\n print(\"Collision occurred at index {}, for block index {}.\\nCollision occurred between these 2 points!\"\n .format(i, k))\n print(\"The start point is {} and the end point is {}.\\nThe bbox is parameterized as {}\"\n .format(node, next, blk_list[k]))\n return True\n node = next\n return False\n\n\ndef runtest(mapfile, start, goal, bpi, bpp, a, p, mpn, verbose=True):\n \"\"\"\n This function:\n * load the provided mapfile\n * creates a motion planner\n * plans a path from start to goal\n * checks whether the path is collision free and reaches the goal\n * computes the path length as a sum of the Euclidean norm of the path segments\n \"\"\"\n # Load a map and instantiate a motion planner\n boundary, blocks = load_map(mapfile)\n pth_im = \"{}{}/bias_{}/{}/\".format(bpi, a, p, mpn)\n pth_pr = \"{}{}/bias_{}/{}/\".format(bpp, a, p, mpn)\n\n # TODONE: replace this with your own planner implementation\n MP = OMPLPlanner.RRTStarPlanner(boundary, blocks, p)\n\n # Display the environment\n if verbose:\n fig, ax, hb, hs, hg = draw_map(boundary, blocks, start, goal)\n\n print(\"This map is bound from {} to {}\".format(boundary[0, :3], boundary[0, 3:6]))\n print(\"The start node is: {}\".format(start))\n print(\"The goal node is: {}\".format(goal))\n print(\"The number of obstacles are: {}\".format(blocks.shape[0]))\n\n # Call the motion planner\n t0 = tic()\n pathObj, pathlength = MP.plan(start, goal)\n print(\"pathObj is: {}; type is: {}\".format(pathObj, type(pathObj)))\n dur = toc(t0, \"{} algorithm applied on {} map\".format(a, mpn))\n\n pthStr = pathObj.printAsMatrix()\n print(\"Path computed is: {}\".format(pthStr), type(pthStr))\n # Plot the path\n if verbose:\n Path(pth_pr).mkdir(parents=True, exist_ok=True)\n with open(pth_pr + \"path.txt\", \"w\") as f:\n f.write(pthStr)\n path = np.loadtxt(pth_pr + \"path.txt\")\n print(path.shape)\n ax.plot(path[:, 0], path[:, 1], path[:, 2], 'r-')\n # save the plot to file\n try:\n Path(pth_im).mkdir(parents=True, exist_ok=True)\n plt.savefig(pth_im + \"path.png\", bbox_inches='tight')\n plt.show(block=True)\n except Exception as e:\n print(\"Error! Could not save image! Message is {}.\".format(e))\n\n # TODONE: You should verify whether the path actually intersects any of the obstacles in continuous space\n # TODONE: You can implement your own algorithm or use an existing library for segment and\n # axis-aligned bounding box (AABB) intersection\n collision = check_collision(boundary, blocks, path)\n if collision:\n print(\"Collision Occurred!\")\n else:\n print(\"Path has no collisions!\")\n goal_reached = sum((path[-1] - goal) ** 2) <= 0.1\n success = (not collision) and goal_reached\n # pathlength = np.sum(np.sqrt(np.sum(np.diff(path, axis=0) ** 2, axis=1)))\n # numPthNodes = len(path)\n numPthNodes = path.shape[0]\n if verbose:\n Path(pth_pr).mkdir(parents=True, exist_ok=True)\n with open(pth_pr + \"props.txt\", \"w\") as f:\n f.write(\n \"{} algorithm applied on {} map took {} seconds to plan.\\n\".format(a, mpn, dur))\n f.write(\"Successful Path?: {}\\n\".format(success))\n f.write(\"Final path has collisions?: {}\\n\".format(collision))\n f.write(\"Reached Goal?: {}\\n\".format(goal_reached))\n f.write(\"Path Length: {}\\n\".format(pathlength))\n f.write(\"Number of nodes in path: {}\\n\".format(numPthNodes))\n return success, pathlength\n\n\ndef test_single_cube(bpi, bpa, a, p, verbose=False):\n print('Running single cube test...\\n')\n start = np.array([2.3, 2.3, 1.3])\n goal = np.array([7.0, 7.0, 5.5])\n map = \"single_cube\"\n success, pathlength = runtest('./maps/single_cube.txt', start, goal, bpi, bpa, a, p, map, verbose)\n print('Success: %r' % success)\n print('Path length: %d' % pathlength)\n print('\\n')\n\n\ndef test_maze(bpi, bpp, a, p, verbose=False):\n print('Running maze test...\\n')\n start = np.array([0.0, 0.0, 1.0])\n goal = np.array([12.0, 12.0, 5.0])\n map = \"maze\"\n success, pathlength = runtest('./maps/maze.txt', start, goal, bpi, bpp, a, p, map, verbose)\n print('Success: %r' % success)\n print('Path length: %d' % pathlength)\n print('\\n')\n\n\ndef test_window(bpi, bpp, a, p, verbose=False):\n print('Running window test...\\n')\n start = np.array([0.2, -4.9, 0.2])\n goal = np.array([6.0, 18.0, 3.0])\n map = \"window\"\n success, pathlength = runtest('./maps/window.txt', start, goal, bpi, bpp, a, p, map, verbose)\n print('Success: %r' % success)\n print('Path length: %d' % pathlength)\n print('\\n')\n\n\ndef test_tower(bpi, bpp, a, p, verbose=False):\n print('Running tower test...\\n')\n start = np.array([2.5, 4.0, 0.5])\n goal = np.array([4.0, 2.5, 19.5])\n map = \"tower\"\n success, pathlength = runtest('./maps/tower.txt', start, goal, bpi, bpp, a, p, map, verbose)\n print('Success: %r' % success)\n print('Path length: %d' % pathlength)\n print('\\n')\n\n\ndef test_flappy_bird(bpi, bpp, a, p, verbose=False):\n print('Running flappy bird test...\\n')\n start = np.array([0.5, 2.5, 5.5])\n goal = np.array([19.0, 2.5, 5.5])\n map = \"flappy_bird\"\n success, pathlength = runtest('./maps/flappy_bird.txt', start, goal, bpi, bpp, a, p, map, verbose)\n print('Success: %r' % success)\n print('Path length: %d' % pathlength)\n print('\\n')\n\n\ndef test_room(bpi, bpp, a, p, verbose=False):\n print('Running room test...\\n')\n start = np.array([1.0, 5.0, 1.5])\n goal = np.array([9.0, 7.0, 1.5])\n map = \"room\"\n success, pathlength = runtest('./maps/room.txt', start, goal, bpi, bpp, a, p, map, verbose)\n print('Success: %r' % success)\n print('Path length: %d' % pathlength)\n print('\\n')\n\n\ndef test_monza(bpi, bpp, a, p, verbose=False):\n print('Running monza test...\\n')\n start = np.array([0.5, 1.0, 4.9])\n goal = np.array([3.8, 1.0, 0.1])\n map = \"monza\"\n success, pathlength = runtest('./maps/monza.txt', start, goal, bpi, bpp, a, p, map, verbose)\n print('Success: %r' % success)\n print('Path length: %d' % pathlength)\n print('\\n')\n\n\nif __name__ == \"__main__\":\n base_pth_img = \"./path_images/\"\n base_pth_prop = \"./path_properties/\"\n\n # resolution of discretization of planner\n algo = \"rrt_star\"\n # property -> resolution for A*; sampling weight to goal for RRT*\n prop = 0.05\n # test_single_cube(base_pth_img, base_pth_prop, algo, prop, True)\n # test_maze(base_pth_img, base_pth_prop, algo, prop, True)\n # test_flappy_bird(base_pth_img, base_pth_prop, algo, prop, True)\n # test_monza(base_pth_img, base_pth_prop, algo, prop, True)\n # test_window(base_pth_img, base_pth_prop, algo, prop, True)\n # test_tower(base_pth_img, base_pth_prop, algo, prop, True)\n test_room(base_pth_img, base_pth_prop, algo, prop, True)\n" ]
[ [ "numpy.vstack", "matplotlib.pyplot.show", "matplotlib.pyplot.savefig", "numpy.loadtxt", "numpy.array", "numpy.zeros", "matplotlib.pyplot.ion", "matplotlib.pyplot.figure" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
PipGrylls/r-test
[ "5a307f0e9018bcdaf1a745b346cc0dc23a528410" ]
[ "bin/get_schedules.py" ]
[ "\"\"\"Create schedule for the workshop.\n\nDetermines which lesson schedules are required by reading _config.yml. The\nschedule for each lesson is modified by a delta time to account for different\nstart times to what is in the schedule. The schedules are written to HTML in an\n(n x 2) array, with the first column being filled first (in date order) just\nlike in an academic journal. This script updates _includes/rsg/schedule.html,\nand creates a detailed 00-schedule.md file for each lesson.\n\"\"\"\n\nimport datetime\nimport yaml\nimport math\nimport pandas\nimport glob\nimport textwrap\nfrom bs4 import BeautifulSoup as bs\nfrom pathlib import Path\nimport string\nfrom enum import Enum\nimport dateutil\n\n\nclass LessonType(Enum):\n \"\"\"Enum for the different types of lessons.\n \"\"\"\n markdown = \"episode\"\n r_markdown = \"episode_r\"\n\n\ndef get_yaml_config():\n \"\"\"Open the YAML config file for the website.\n\n Returns\n -------\n config: dict\n The configuration for the website.\n \"\"\"\n with open(\"_config.yml\", \"r\") as fp:\n config = yaml.load(fp, yaml.Loader)\n\n return config\n\n\ndef get_date_object(datestr):\n \"\"\"Convert a date string into a datetime object.\n\n On failure to convert a string, None is returned.\n\n Parameters\n ----------\n datestr: str\n The string of the date, in format YYYY-MM-DD. But any which dateutil\n accepts is also acceptable.\n\n Returns\n -------\n date: datetime.date\n The date object. If unable to parse, then None is returned instead.\n \"\"\"\n if datestr is None:\n return None\n\n if isinstance(datestr, datetime.date):\n return datestr\n elif not isinstance(datestr, str):\n raise ValueError(f\"datestr is not a string but {type(datestr)}\")\n\n try:\n date = dateutil.parser.parse(datestr).date()\n except dateutil.parser.ParserError:\n date = None\n\n return date\n\n\ndef get_time_object(time_string):\n \"\"\"Convert a string into a datetime object.\n\n If unable to parse the string, a ValueError exception is raised as we\n cannot continue when there are missing time objects.\n\n Parameters\n ----------\n time_string: str\n The time string to convert.\n\n Returns\n -------\n time_object: datetime.datetime\n The converted string as a datetime object.\n \"\"\"\n if type(time_string) is str:\n try:\n time = datetime.datetime.strptime(time_string, \"%I:%M %p\") # start-time: 9:30 am\n except ValueError:\n time = datetime.datetime.strptime(time_string, \"%H:%M\") # start-time: \"9:30\"\n elif type(time_string) is int:\n hours, minutes = divmod(time_string, 60)\n time = datetime.datetime.strptime(f\"{hours}:{minutes}\", \"%H:%M\") # start-time: 9:30\n else:\n raise ValueError(f\"start-time {time_string} is an invalid format: accept 24 hr (15:00) or 12 hr with am/pm (3:00 pm)\")\n\n return time\n\ndef create_detailed_lesson_schedules(lesson_name, lesson_type, start_time):\n \"\"\"Create a detailed lesson schedule landing page for each lesson.\n\n The schedule is based on a modifed version of syllabus.html to work better\n with the workshop format. This function also renames the ordering of\n lessons, so the schedule will always be lesson 00.\n\n Parameters\n ----------\n lesson_name: str\n The name of the lesson.\n lesson_type: LessonType\n The type of lesson.\n start_time: str\n The start time of the lesson.\n \"\"\"\n if lesson_type == LessonType.markdown:\n file_ext = \"md\"\n containing_directory = f\"collections/_episodes/{lesson_name}-lesson\"\n else:\n containing_directory = f\"collections/_episodes_rmd/{lesson_name}-lesson\"\n file_ext = \"Rmd\"\n\n for i, file in enumerate(sorted(glob.glob(f\"{containing_directory}/[0-9]*.{file_ext}\"))):\n filepath = Path(file)\n new_file_name = f\"{i + 1:02d}{filepath.stem.lstrip(string.digits)}.{file_ext}\"\n filepath.rename(f\"{containing_directory}/{new_file_name}\")\n\n schedule_markdown = textwrap.dedent(f\"\"\"---\n title: Lesson Schedule\n slug: {lesson_name}-schedule\n layout: schedule\n ---\n {{% include syllabus.html name=\"{lesson_name}\" start_time={start_time} %}}\n \"\"\")\n\n with open(f\"{containing_directory}/00-schedule.md\", \"w\") as fp:\n fp.write(\"\\n\".join([line.lstrip() for line in schedule_markdown.splitlines()]))\n\n\ndef create_index_schedules(schedules):\n \"\"\"Write the new schedule to _includes/rsg/schedule.html.\n\n The schedules which are passed are ordered by the date of the lessons, and\n are displayed in a two column format. The first column is filled up first,\n followed by the second column.\n\n Parameters\n ----------\n schedules: list[dict]\n The list of schedules to write to the file. Each schedule is a dict\n with keys \"date\" which is the date for the lesson and \"schedule\" which\n is the html table for the schedule.\n \"\"\"\n html = \"<div class=\\\"row\\\">\"\n n_lessons = len(schedules)\n n_rows = math.ceil(n_lessons / 2)\n ordered_schedules = sorted(schedules, key=lambda x: x[\"date\"])\n\n for i in range(n_rows):\n left_idx = i\n html += ordered_schedules[left_idx][\"schedule\"]\n right_idx = i + n_rows\n if right_idx > n_lessons - 1:\n continue\n html += ordered_schedules[right_idx][\"schedule\"]\n\n html += \"</div>\"\n\n with open(\"_includes/rsg/schedule.html\", \"w\") as fp:\n fp.write(bs(html, \"html.parser\").prettify())\n\n\ndef main():\n \"\"\"Main function of the script.\n\n Handles all of the top level logic, for iterating through lessons to create\n the schedule HTML. Each lesson (and day) schedule is placed into an list,\n which is put into date order and written to HTML. Additionally, this script\n also creates a 00-schedule.md file for each lesson, which is used to create\n a detailed syllabus.\n \"\"\"\n website_config = get_yaml_config()\n\n # Try to parse the start and end date for the workshop, to check that lessons\n # are in the correct time frame. If the date is not a valid date, i.e. if it\n # still says FIXME, then we do not check the start and end date.\n\n workshop_start_date = get_date_object(website_config.get(\"startdate\", None))\n workshop_end_date = get_date_object(website_config.get(\"enddate\", None))\n\n # Iterate over each lesson, to add their schedule to the html_schedules string\n\n lessons = website_config.get(\"lessons\", None)\n if not lessons:\n raise ValueError(\"No lessons found in the workshop configuration file (_config.yml)\")\n lesson_schedules = []\n\n for lesson in lessons:\n lesson_type = LessonType(lesson.get(\"type\", None)) # have to differentiate between markdown and r-markdown lessons\n lesson_title = lesson.get(\"title\", None)\n lesson_name = lesson.get(\"gh-name\", None)\n lesson_dates = lesson.get(\"date\", None) # can be a list\n lesson_starts = lesson.get(\"start-time\", None) # can be a list\n\n if [thing for thing in (lesson_name, lesson_dates, lesson_title, lesson_starts) if thing is None]:\n raise ValueError(f\"gh-name, date, title, and start-time are required for each lesson\")\n\n # Since we allow multiple dates and start times per lesson, we need to be\n # able to iterate over even single values so turn into list. When done,\n # convert the dates from str to datetime.date objects.\n\n if type(lesson_dates) is not list:\n lesson_dates = [lesson_dates]\n if type(lesson_starts) is not list:\n lesson_starts = [lesson_starts]\n\n lesson_dates = [get_date_object(date) for date in lesson_dates]\n\n # Get the schedule(s) for the lesson into a dataframe and also the html\n # so we can search for the permalinks\n\n with open(f\"_includes/rsg/{lesson_name}-lesson/schedule.html\", \"r\") as fp:\n schedule_html = fp.read()\n\n soup = bs(schedule_html, \"html.parser\")\n all_schedules = pandas.read_html(schedule_html, flavor=\"lxml\")\n\n if len(all_schedules) != len(lesson_dates):\n raise ValueError(f\"There are not the same number of lesson dates for the number of schedules for\"\n \" {lesson_name} lesson\")\n if len(all_schedules) != len(lesson_starts):\n raise ValueError(f\"There are not the same number of lesson start times for the number of schedules for\"\n \" {lesson_name} lesson\")\n\n # Loop over each schedule table, if the lesson has multiple schedules\n\n for i, schedule in enumerate(all_schedules):\n\n schedule.columns = [\"time\", \"session\"]\n permalink = soup.find_all(\"a\", href=True)[i][\"href\"] # assume each table has a permalink to a lesson\n start_time = get_time_object(lesson_starts[i])\n original_start = get_time_object(schedule[\"time\"][0])\n datestr = lesson_dates[i].strftime(\"%d %B %Y\")\n\n if workshop_start_date and lesson_dates[i] < workshop_start_date:\n raise ValueError(f\"The date for {lesson_name} day {i + 1} is before the workshop start date\")\n if workshop_end_date and lesson_dates[i] > workshop_end_date:\n raise ValueError(f\"The date for {lesson_name} day {i + 1} is after the workshop end date\")\n\n # Calculate the time difference between the start time and the start\n # time in the original schedule. This delta time (in minutes) is added\n # to each time in the original schedule\n\n delta_minutes = divmod((start_time - original_start).total_seconds(), 60)[0]\n\n # Construct the schedule table for this lesson, adding delta_minutes to\n # each original entry, and add the schedule table to the html template\n\n if len(all_schedules) > 1:\n title = f\"Day {i + 1}: {lesson_title}\"\n else:\n title = lesson_title\n\n table = f\"\"\"\n <div class=\"col-md-6\">\n <a href=\"{lesson_name}-schedule\"><h3>{title}</h3></a>\n <h4>{datestr}</h4>\n <table class=\"table table-striped\">\n \"\"\"\n\n for time, session in zip(schedule[\"time\"], schedule[\"session\"]):\n actual_time = datetime.datetime.strptime(time, \"%H:%M\") + datetime.timedelta(minutes=delta_minutes)\n table += f\"<tr> <td> {actual_time.hour:02d}:{actual_time.minute:02d} </td> <td> {session} </td> </tr>\\n\"\n\n table += \"\"\"\n </table>\n </div>\n \"\"\"\n\n lesson_schedules.append({\"date\": lesson_dates[i], \"schedule\": table})\n\n start_time = get_time_object(lesson_starts[0])\n start_time_minutes = start_time.hour * 60 + start_time.minute\n create_detailed_lesson_schedules(lesson_name, lesson_type, start_time_minutes)\n\n create_index_schedules(lesson_schedules)\n\n\nif __name__ == \"__main__\":\n main()\n" ]
[ [ "pandas.read_html" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "0.19", "1.1", "1.5", "1.2", "0.24", "0.20", "1.0", "0.25", "1.3" ], "scipy": [], "tensorflow": [] } ]
stancld/metrics
[ "d35c3b5cff21e68e6620ebfc9a84e60dc4559e92", "d35c3b5cff21e68e6620ebfc9a84e60dc4559e92" ]
[ "tests/text/test_rouge.py", "tests/retrieval/test_ndcg.py" ]
[ "# Copyright The PyTorch Lightning team.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom functools import partial\nfrom typing import Sequence\n\nimport pytest\nimport torch\n\nfrom tests.text.helpers import TextTester\nfrom tests.text.inputs import _inputs_multiple_references, _inputs_single_sentence_single_reference\nfrom torchmetrics.functional.text.rouge import rouge_score\nfrom torchmetrics.text.rouge import ROUGEScore\nfrom torchmetrics.utilities.imports import _NLTK_AVAILABLE, _ROUGE_SCORE_AVAILABLE\n\nif _ROUGE_SCORE_AVAILABLE:\n from rouge_score.rouge_scorer import RougeScorer\n from rouge_score.scoring import BootstrapAggregator\nelse:\n RougeScorer, BootstrapAggregator = object, object\n\nROUGE_KEYS = (\"rouge1\", \"rouge2\", \"rougeL\", \"rougeLsum\")\n\n\ndef _compute_rouge_score(\n preds: Sequence[str],\n targets: Sequence[Sequence[str]],\n use_stemmer: bool,\n rouge_level: str,\n metric: str,\n accumulate: str,\n):\n \"\"\"Evaluates rouge scores from rouge-score package for baseline evaluation.\"\"\"\n if isinstance(targets, list) and all(isinstance(target, str) for target in targets):\n targets = [targets] if isinstance(preds, str) else [[target] for target in targets]\n\n if isinstance(preds, str):\n preds = [preds]\n\n if isinstance(targets, str):\n targets = [[targets]]\n\n scorer = RougeScorer(ROUGE_KEYS, use_stemmer=use_stemmer)\n aggregator = BootstrapAggregator()\n\n for target_raw, pred_raw in zip(targets, preds):\n list_results = [scorer.score(target, pred_raw) for target in target_raw]\n aggregator_avg = BootstrapAggregator()\n\n if accumulate == \"best\":\n key_curr = list(list_results[0].keys())[0]\n all_fmeasure = torch.tensor([v[key_curr].fmeasure for v in list_results])\n highest_idx = torch.argmax(all_fmeasure).item()\n aggregator.add_scores(list_results[highest_idx])\n elif accumulate == \"avg\":\n for _score in list_results:\n aggregator_avg.add_scores(_score)\n _score = {rouge_key: scores.mid for rouge_key, scores in aggregator_avg.aggregate().items()}\n aggregator.add_scores(_score)\n else:\n raise ValueError(f\"Got unknown accumulate value {accumulate}. Expected to be one of ['best', 'avg']\")\n\n rs_scores = aggregator.aggregate()\n rs_result = getattr(rs_scores[rouge_level].mid, metric)\n return rs_result\n\n\[email protected](not _NLTK_AVAILABLE, reason=\"test requires nltk\")\[email protected](\n [\"pl_rouge_metric_key\", \"use_stemmer\"],\n [\n (\"rouge1_precision\", True),\n (\"rouge1_recall\", True),\n (\"rouge1_fmeasure\", False),\n (\"rouge2_precision\", False),\n (\"rouge2_recall\", True),\n (\"rouge2_fmeasure\", True),\n (\"rougeL_precision\", False),\n (\"rougeL_recall\", False),\n (\"rougeL_fmeasure\", True),\n (\"rougeLsum_precision\", True),\n (\"rougeLsum_recall\", False),\n (\"rougeLsum_fmeasure\", False),\n ],\n)\[email protected](\n [\"preds\", \"targets\"],\n [\n (_inputs_multiple_references.preds, _inputs_multiple_references.targets),\n ],\n)\[email protected](\"accumulate\", [\"avg\", \"best\"])\nclass TestROUGEScore(TextTester):\n @pytest.mark.parametrize(\"ddp\", [False, True])\n @pytest.mark.parametrize(\"dist_sync_on_step\", [False, True])\n def test_rouge_score_class(\n self, ddp, dist_sync_on_step, preds, targets, pl_rouge_metric_key, use_stemmer, accumulate\n ):\n metric_args = {\"use_stemmer\": use_stemmer, \"accumulate\": accumulate}\n rouge_level, metric = pl_rouge_metric_key.split(\"_\")\n rouge_metric = partial(\n _compute_rouge_score, use_stemmer=use_stemmer, rouge_level=rouge_level, metric=metric, accumulate=accumulate\n )\n self.run_class_metric_test(\n ddp=ddp,\n preds=preds,\n targets=targets,\n metric_class=ROUGEScore,\n sk_metric=rouge_metric,\n dist_sync_on_step=dist_sync_on_step,\n metric_args=metric_args,\n key=pl_rouge_metric_key,\n )\n\n def test_rouge_score_functional(self, preds, targets, pl_rouge_metric_key, use_stemmer, accumulate):\n metric_args = {\"use_stemmer\": use_stemmer, \"accumulate\": accumulate}\n\n rouge_level, metric = pl_rouge_metric_key.split(\"_\")\n rouge_metric = partial(\n _compute_rouge_score, use_stemmer=use_stemmer, rouge_level=rouge_level, metric=metric, accumulate=accumulate\n )\n self.run_functional_metric_test(\n preds,\n targets,\n metric_functional=rouge_score,\n sk_metric=rouge_metric,\n metric_args=metric_args,\n key=pl_rouge_metric_key,\n )\n\n\ndef test_rouge_metric_raises_errors_and_warnings():\n \"\"\"Test that expected warnings and errors are raised.\"\"\"\n if not _NLTK_AVAILABLE:\n with pytest.raises(\n ValueError,\n match=\"ROUGE metric requires that nltk is installed.\"\n \"Either as `pip install torchmetrics[text]` or `pip install nltk`\",\n ):\n ROUGEScore()\n\n\ndef test_rouge_metric_wrong_key_value_error():\n key = (\"rouge1\", \"rouge\")\n\n with pytest.raises(ValueError):\n ROUGEScore(rouge_keys=key)\n\n with pytest.raises(ValueError):\n rouge_score(\n _inputs_single_sentence_single_reference.preds,\n _inputs_single_sentence_single_reference.targets,\n rouge_keys=key,\n accumulate=\"best\",\n )\n", "# Copyright The PyTorch Lightning team.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nimport numpy as np\nimport pytest\nfrom sklearn.metrics import ndcg_score\nfrom torch import Tensor\n\nfrom tests.helpers import seed_all\nfrom tests.retrieval.helpers import (\n RetrievalMetricTester,\n _concat_tests,\n _default_metric_class_input_arguments_ignore_index,\n _default_metric_class_input_arguments_with_non_binary_target,\n _default_metric_functional_input_arguments_with_non_binary_target,\n _errors_test_class_metric_parameters_k,\n _errors_test_class_metric_parameters_with_nonbinary,\n _errors_test_functional_metric_parameters_k,\n _errors_test_functional_metric_parameters_with_nonbinary,\n)\nfrom torchmetrics.functional.retrieval.ndcg import retrieval_normalized_dcg\nfrom torchmetrics.retrieval.retrieval_ndcg import RetrievalNormalizedDCG\n\nseed_all(42)\n\n\ndef _ndcg_at_k(target: np.ndarray, preds: np.ndarray, k: int = None):\n \"\"\"Adapting `from sklearn.metrics.ndcg_score`.\"\"\"\n assert target.shape == preds.shape\n assert len(target.shape) == 1 # works only with single dimension inputs\n\n if target.shape[0] < 2: # ranking is equal to ideal ranking with a single document\n return np.array(1.0)\n\n preds = np.expand_dims(preds, axis=0)\n target = np.expand_dims(target, axis=0)\n\n return ndcg_score(target, preds, k=k)\n\n\nclass TestNDCG(RetrievalMetricTester):\n @pytest.mark.parametrize(\"ddp\", [True, False])\n @pytest.mark.parametrize(\"dist_sync_on_step\", [True, False])\n @pytest.mark.parametrize(\"empty_target_action\", [\"skip\", \"neg\", \"pos\"])\n @pytest.mark.parametrize(\"ignore_index\", [None, 3]) # avoid setting 0, otherwise test with all 0 targets will fail\n @pytest.mark.parametrize(\"k\", [None, 1, 4, 10])\n @pytest.mark.parametrize(**_default_metric_class_input_arguments_with_non_binary_target)\n def test_class_metric(\n self,\n ddp: bool,\n indexes: Tensor,\n preds: Tensor,\n target: Tensor,\n dist_sync_on_step: bool,\n empty_target_action: str,\n ignore_index: int,\n k: int,\n ):\n metric_args = dict(empty_target_action=empty_target_action, k=k, ignore_index=ignore_index)\n\n self.run_class_metric_test(\n ddp=ddp,\n indexes=indexes,\n preds=preds,\n target=target,\n metric_class=RetrievalNormalizedDCG,\n sk_metric=_ndcg_at_k,\n dist_sync_on_step=dist_sync_on_step,\n metric_args=metric_args,\n )\n\n @pytest.mark.parametrize(\"ddp\", [True, False])\n @pytest.mark.parametrize(\"dist_sync_on_step\", [True, False])\n @pytest.mark.parametrize(\"empty_target_action\", [\"skip\", \"neg\", \"pos\"])\n @pytest.mark.parametrize(\"k\", [None, 1, 4, 10])\n @pytest.mark.parametrize(**_default_metric_class_input_arguments_ignore_index)\n def test_class_metric_ignore_index(\n self,\n ddp: bool,\n indexes: Tensor,\n preds: Tensor,\n target: Tensor,\n dist_sync_on_step: bool,\n empty_target_action: str,\n k: int,\n ):\n metric_args = dict(empty_target_action=empty_target_action, k=k, ignore_index=-100)\n\n self.run_class_metric_test(\n ddp=ddp,\n indexes=indexes,\n preds=preds,\n target=target,\n metric_class=RetrievalNormalizedDCG,\n sk_metric=_ndcg_at_k,\n dist_sync_on_step=dist_sync_on_step,\n metric_args=metric_args,\n )\n\n @pytest.mark.parametrize(**_default_metric_functional_input_arguments_with_non_binary_target)\n @pytest.mark.parametrize(\"k\", [None, 1, 4, 10])\n def test_functional_metric(self, preds: Tensor, target: Tensor, k: int):\n self.run_functional_metric_test(\n preds=preds,\n target=target,\n metric_functional=retrieval_normalized_dcg,\n sk_metric=_ndcg_at_k,\n metric_args={},\n k=k,\n )\n\n @pytest.mark.parametrize(**_default_metric_class_input_arguments_with_non_binary_target)\n def test_precision_cpu(self, indexes: Tensor, preds: Tensor, target: Tensor):\n self.run_precision_test_cpu(\n indexes=indexes,\n preds=preds,\n target=target,\n metric_module=RetrievalNormalizedDCG,\n metric_functional=retrieval_normalized_dcg,\n )\n\n @pytest.mark.parametrize(**_default_metric_class_input_arguments_with_non_binary_target)\n def test_precision_gpu(self, indexes: Tensor, preds: Tensor, target: Tensor):\n self.run_precision_test_gpu(\n indexes=indexes,\n preds=preds,\n target=target,\n metric_module=RetrievalNormalizedDCG,\n metric_functional=retrieval_normalized_dcg,\n )\n\n @pytest.mark.parametrize(\n **_concat_tests(\n _errors_test_class_metric_parameters_with_nonbinary,\n _errors_test_class_metric_parameters_k,\n )\n )\n def test_arguments_class_metric(\n self, indexes: Tensor, preds: Tensor, target: Tensor, message: str, metric_args: dict\n ):\n if target.is_floating_point():\n pytest.skip(\"NDCG metric works with float target input\")\n\n self.run_metric_class_arguments_test(\n indexes=indexes,\n preds=preds,\n target=target,\n metric_class=RetrievalNormalizedDCG,\n message=message,\n metric_args=metric_args,\n exception_type=ValueError,\n kwargs_update={},\n )\n\n @pytest.mark.parametrize(\n **_concat_tests(\n _errors_test_functional_metric_parameters_with_nonbinary,\n _errors_test_functional_metric_parameters_k,\n )\n )\n def test_arguments_functional_metric(self, preds: Tensor, target: Tensor, message: str, metric_args: dict):\n if target.is_floating_point():\n pytest.skip(\"NDCG metric works with float target input\")\n\n self.run_functional_metric_arguments_test(\n preds=preds,\n target=target,\n metric_functional=retrieval_normalized_dcg,\n message=message,\n exception_type=ValueError,\n kwargs_update=metric_args,\n )\n" ]
[ [ "torch.argmax", "torch.tensor" ], [ "numpy.array", "sklearn.metrics.ndcg_score", "numpy.expand_dims" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
desbarmitar/monsterforge
[ "75a2d2f305bff329d3c640f18cefa8a1381df8d2" ]
[ "paperminis/generate_minis.py" ]
[ "import io\nimport logging\nimport re\nfrom collections import Counter\nfrom zipfile import ZIP_DEFLATED, ZipFile\n\nimport cv2 as cv\nimport numpy as np\nfrom PIL import Image, ImageDraw, ImageFont\nfrom greedypacker import BinManager\n\nfrom paperminis.models import Creature, CreatureQuantity\nfrom paperminis.utils import download_image\nfrom .items import Item\n\nlogger = logging.getLogger(\"django\")\n\n\nclass MiniBuilder:\n\n def __init__(self, user):\n\n # user\n self.user = user\n self.sanitize = re.compile('[^a-zA-Z0-9\\(\\)\\_@]', re.UNICODE) # sanitize user input\n self.clean_email = self.sanitize.sub('', self.user.email)\n\n # TODO Clear this var\n self.file_name_body = self.clean_email\n self.creatures = []\n self.creature_counter = None\n self.minis = []\n self.sheets = None\n self.zip_container = None\n\n # Settings Containers\n self.print_margin = None\n self.dpmm = None # not fully supported setting yet, leave at 10\n self.grid_size = None\n self.enumerate = False\n self.force_name = None\n self.base_shape = None\n self.fixed_height = False\n self.darken = None\n self.font = cv.FONT_HERSHEY_SIMPLEX\n self.paper_format = None\n self.canvas = None\n\n # clear download cache for each run\n download_image.cache_clear()\n\n def add_bestiary(self, pk):\n creature_quantities = CreatureQuantity.objects.filter(owner=self.user, bestiary=pk)\n\n bestiary_name = self.sanitize.sub('', creature_quantities.first().bestiary.name)\n if self.file_name_body == self.clean_email:\n self.file_name_body = bestiary_name\n else:\n self.file_name_body += '_' + bestiary_name\n if creature_quantities:\n creatures = []\n for cq in creature_quantities:\n creatures.extend([cq.creature] * cq.quantity)\n self.add_creatures(creatures)\n else:\n return False\n\n def add_creatures(self, creatures):\n if isinstance(creatures, Creature):\n # add single creature\n self.creatures.append(creatures)\n elif all(isinstance(c, Creature) for c in creatures):\n # add list of creatures\n self.creatures.extend(creatures)\n else:\n return False\n\n def load_settings(self,\n paper_format='a4',\n print_margin=np.array([3.5, 4]),\n grid_size=24,\n base_shape='square',\n enumerate=False,\n force_name='no_force',\n fixed_height=False,\n darken=0):\n\n self.print_margin = print_margin\n self.dpmm = 10 # not fully supported setting yet, leave at 10\n self.grid_size = grid_size\n self.enumerate = enumerate\n self.force_name = force_name\n self.base_shape = base_shape\n self.fixed_height = fixed_height\n self.darken = darken\n self.paper_format = paper_format\n paper = {'a3': np.array([297, 420]),\n 'a4': np.array([210, 297]),\n 'letter': np.array([216, 279]),\n 'legal': np.array([216, 356]),\n 'tabloid': np.array([279, 432])}\n self.canvas = (paper[paper_format] - 2 * self.print_margin) * self.dpmm\n\n def build_all_and_zip(self):\n if self.enumerate:\n # if enumerate is true, settings are always loaded\n self.creature_counter = Counter([c.name for c in self.creatures])\n self.creature_counter = {key: val for key, val in self.creature_counter.items() if val > 1}\n\n self.minis = []\n for creature in self.creatures:\n mini = self.build_mini(creature)\n if not isinstance(mini, str):\n self.minis.append(mini)\n else:\n print('{} skipped with error: {}'.format(creature.name, mini))\n\n self.sheets = self.build_sheets(self.minis)\n self.zip_container = self.save_and_zip(self.sheets)\n logger.info(download_image.cache_info())\n return self.zip_container\n\n def build_mini(self, creature):\n if not hasattr(self, 'grid_size'):\n # check if settings loaded manually, otherwise load default settings\n self.load_settings()\n\n if not isinstance(creature, Creature):\n return 'Object is not a Creature.'\n\n if creature.img_url == '':\n return 'No image url found.'\n\n # Size-based settings in mm\n # after the change to how the font is handled, some settings here are obsolete\n # I will keep them in for now\n min_height_mm = 40\n if creature.size in ['S', 'T']:\n m_width = int(self.grid_size / 2)\n max_height_mm = 30\n n_height = 6\n font_size = 1.15 # opencv \"height\"\n font_height = 40 # PIL drawing max height for n_height = 8\n font_width = 1\n enum_size = 1.2\n enum_width = 3\n elif creature.size == 'M':\n m_width = self.grid_size\n max_height_mm = 40\n n_height = 8\n font_size = 1.15 # opencv \"height\"\n font_height = 50 # PIL drawing max height for n_height = 8\n font_width = 1\n enum_size = 2.2\n enum_width = 3\n elif creature.size == 'L':\n m_width = self.grid_size * 2\n max_height_mm = 50\n n_height = 10\n font_size = 2\n font_height = 70\n font_width = 2\n enum_size = 5 * self.grid_size / 24\n enum_width = 8 * self.grid_size / 24\n elif creature.size == 'H':\n m_width = self.grid_size * 3\n max_height_mm = 60 if not self.paper_format == 'letter' else 51\n n_height = 12\n font_size = 2.5\n font_height = 80\n font_width = 2\n enum_size = 8\n enum_width = 16\n elif creature.size == 'G':\n m_width = self.grid_size * 4\n max_height_mm = 80 if not self.paper_format == 'letter' else 73\n n_height = 14\n font_size = 3\n font_height = 100\n font_width = 3\n enum_size = 14\n enum_width = 32\n else:\n return 'Invalid creature size.'\n ## end of settings\n\n # mm to px\n width = m_width * self.dpmm\n name_height = n_height * self.dpmm\n base_height = m_width * self.dpmm\n max_height = max_height_mm * self.dpmm\n if self.fixed_height:\n min_height = max_height\n else:\n min_height = min_height_mm * self.dpmm\n\n text = creature.name\n\n # scale for grid size\n enum_size = int(np.ceil(enum_size * self.grid_size / 24))\n enum_width = int(np.ceil(enum_size * self.grid_size / 24))\n min_height = int(np.ceil(min_height * self.grid_size / 24))\n\n ## OPENCV versions (with an attempt to use utf-8 but I couldn't get it to work) of the nameplate.\n # It is now done with PIL to have UTF-8 support.\n # name plate\n # if creature.show_name:\n # n_img = np.zeros((name_height, width, 3), np.uint8) + 255\n # x_margin = 0\n # y_margin = 0\n # # find optimal font size\n # while x_margin < 2 or y_margin < 10:\n # font_size = round(font_size - 0.05, 2)\n # textsize = cv.getTextSize(text, self.font, font_size, font_width)[0]\n # x_margin = n_img.shape[1] - textsize[0]\n # y_margin = n_img.shape[0] - textsize[1]\n # # print(font_size, x_margin, y_margin)\n # # write text\n # textX = np.floor_divide(x_margin, 2)\n # textY = np.floor_divide(n_img.shape[0] + textsize[1], 2)\n #\n # cv.putText(n_img, text, (textX, textY), self.font, font_size, (0, 0, 0), font_width, cv.LINE_AA)\n # cv.rectangle(n_img, (0, 0), (n_img.shape[1] - 1, n_img.shape[0] - 1), (0, 0, 0), thickness=1)\n # # img = cv.circle(img, (100, 400), 20, (255,0,0), 3)\n # if creature.show_name:\n # n_img = np.zeros((name_height, width, 3), np.uint8) + 255\n # ft = cv.freetype.createFreeType2()\n # ft.loadFontData(fontFileName='DejaVuSans.ttf', id=0)\n # x_margin = 0\n # y_margin = 0\n # # find optimal font size\n # while x_margin < 2 or y_margin < 10:\n # font_size = round(font_size - 0.05, 2)\n # textsize = ft.getTextSize(text, font_size, font_width)[0]\n # x_margin = n_img.shape[1] - textsize[0]\n # y_margin = n_img.shape[0] - textsize[1]\n # # print(font_size, x_margin, y_margin)\n # # write text\n # textX = np.floor_divide(x_margin, 2)\n # textY = np.floor_divide(n_img.shape[0] + textsize[1], 2)\n #\n # ft.putText(n_img, text, (textX, textY), font_size, (0, 0, 0), font_width, cv.LINE_AA)\n # cv.rectangle(n_img, (0, 0), (n_img.shape[1] - 1, n_img.shape[0] - 1), (0, 0, 0), thickness=1)\n # # img = cv.circle(img, (100, 400), 20, (255,0,0), 3)\n\n ## nameplate\n show_name = \"\"\n\n if self.force_name == \"force_name\":\n show_name = True\n elif self.force_name == \"force_blank\":\n show_name = False\n else:\n show_name = creature.show_name\n\n if show_name:\n # PIL fix for utf-8 characters\n n_img_pil = Image.new(\"RGB\", (width, name_height), (255, 255, 255))\n x_margin = 0\n y_margin = 0\n # find optimal font size\n while x_margin < 2 or y_margin < 10:\n # print(font_height)\n unicode_font = ImageFont.truetype(\"paperminis/DejaVuSans.ttf\", font_height)\n font_height = round(font_height - 2, 2)\n textsize = unicode_font.getsize(text)\n im_w, im_h = n_img_pil.size\n x_margin = im_w - textsize[0]\n y_margin = im_h - textsize[1]\n # write text\n textX = x_margin // 2\n textY = y_margin // 2\n draw = ImageDraw.Draw(n_img_pil)\n draw.text((textX, textY), text, font=unicode_font, fill=(0, 0, 0))\n n_img = np.array(n_img_pil)\n cv.rectangle(n_img, (0, 0), (n_img.shape[1] - 1, n_img.shape[0] - 1), (0, 0, 0), thickness=1)\n else:\n n_img = np.zeros((1, width, 3), np.uint8)\n\n ## mimiature image\n m_img = download_image(creature.img_url)\n\n # fix grayscale images\n\n try:\n if len(m_img.shape) == 2:\n m_img = cv.cvtColor(m_img, cv.COLOR_GRAY2RGB)\n except:\n return 'Image could not be found or loaded.'\n\n # replace alpha channel with white for pngs (with fix for grayscale images)\n if m_img.shape[2] == 4:\n alpha_channel = m_img[:, :, 3]\n mask = (alpha_channel == 0)\n mask = np.dstack((mask, mask, mask))\n color = m_img[:, :, :3]\n color[mask] = 255\n m_img = color\n\n # find optimal size of image\n # leave 1 pixel on each side for black border\n if m_img.shape[1] > width - 2:\n f = (width - 2) / m_img.shape[1]\n m_img = cv.resize(m_img, (0, 0), fx=f, fy=f)\n white_vert = np.zeros((m_img.shape[0], 1, 3), np.uint8) + 255\n m_img = np.concatenate((white_vert, m_img, white_vert), axis=1)\n\n if m_img.shape[0] > max_height - 2:\n f = (max_height - 2) / m_img.shape[0]\n m_img = cv.resize(m_img, (0, 0), fx=f, fy=f)\n white_horiz = np.zeros((1, m_img.shape[1], 3), np.uint8) + 255\n m_img = np.concatenate((white_horiz, m_img, white_horiz), axis=0)\n\n if m_img.shape[1] < width:\n diff = width - m_img.shape[1]\n left = np.floor_divide(diff, 2)\n right = left\n if diff % 2 == 1: right += 1\n m_img = np.concatenate((np.zeros((m_img.shape[0], left, 3), np.uint8) + 255, m_img,\n np.zeros((m_img.shape[0], right, 3), np.uint8) + 255), axis=1)\n\n if m_img.shape[0] < min_height:\n diff = min_height - m_img.shape[0]\n top = np.floor_divide(diff, 2)\n bottom = top\n if diff % 2 == 1: bottom += 1\n if creature.position == Creature.WALKING:\n m_img = np.concatenate((np.zeros((diff, m_img.shape[1], 3), np.uint8) + 255, m_img), axis=0)\n elif creature.position == Creature.HOVERING:\n m_img = np.concatenate((np.zeros((top, m_img.shape[1], 3), np.uint8) + 255, m_img,\n np.zeros((bottom, m_img.shape[1], 3), np.uint8) + 255), axis=0)\n elif creature.position == Creature.FLYING:\n m_img = np.concatenate((m_img, np.zeros((diff, m_img.shape[1], 3), np.uint8) + 255), axis=0)\n else:\n return 'Position setting is invalid. Chose Walking, Hovering or Flying.'\n\n # draw border\n cv.rectangle(m_img, (0, 0), (m_img.shape[1] - 1, m_img.shape[0] - 1), (0, 0, 0), thickness=1)\n\n ## flipped miniature image\n m_img_flipped = np.flip(m_img, 0)\n if self.darken:\n # change Intensity (V-Value) in HSV color space\n hsv = cv.cvtColor(m_img_flipped, cv.COLOR_BGR2HSV)\n h, s, v = cv.split(hsv)\n # darkening factor between 0 and 1\n factor = max(min((1 - self.darken / 100), 1), 0)\n v[v < 255] = v[v < 255] * (factor)\n final_hsv = cv.merge((h, s, v))\n m_img_flipped = cv.cvtColor(final_hsv, cv.COLOR_HSV2BGR)\n\n ## base\n bgr_color = tuple(int(creature.color[i:i + 2], 16) for i in (4, 2, 0))\n demi_base = base_height // 2\n if creature.size == 'G':\n feet_mod = 1\n else:\n feet_mod = 2\n base_height = int(np.floor(demi_base * feet_mod))\n b_img = np.zeros((base_height, width, 3), np.uint8) + 255\n # fill base\n if self.base_shape == 'square':\n cv.rectangle(b_img, (0, 0), (b_img.shape[1] - 1, demi_base - 1), bgr_color, thickness=-1)\n cv.rectangle(b_img, (0, 0), (b_img.shape[1] - 1, b_img.shape[0] - 1), (0, 0, 0), thickness=1)\n elif self.base_shape == 'circle':\n cv.rectangle(b_img, (0, 0), (b_img.shape[1] - 1, demi_base - 1), bgr_color, thickness=-1)\n cv.rectangle(b_img, (0, 0), (b_img.shape[1] - 1, b_img.shape[0] - 1), (0, 0, 0), thickness=1)\n cv.ellipse(b_img, (width // 2, 0), (width // 2, width // 2), 0, 0, 180, bgr_color, -1)\n cv.ellipse(b_img, (width // 2, 0), (width // 2, width // 2), 0, 0, 180, (0, 0, 0), 2)\n if feet_mod >= 2:\n cv.ellipse(b_img, (width // 2, base_height), (width // 2, width // 2), 0, 180, 360, (0, 0, 0), 2)\n cv.line(b_img, (0, base_height), (width, base_height), (0, 0, 0), 3)\n elif self.base_shape == 'hexagon':\n half = width // 2\n hexagon_bottom = np.array([(0, 0), (width // 4, half), (width // 4 * 3, half), (width, 0)], np.int32)\n hexagon_top = np.array([(0, width), (width // 4, half), (width // 4 * 3, half), (width, width)], np.int32)\n cv.fillConvexPoly(b_img, hexagon_bottom, bgr_color, 1)\n if feet_mod >= 2:\n cv.polylines(b_img, [hexagon_top], True, (0, 0, 0), 2)\n else:\n return 'Invalid base shape. Choose square, hexagon or circle.'\n\n # enumerate\n if self.enumerate and creature.name in self.creature_counter:\n # print(creature.name, self.creature_counter[creature.name])\n text = str(self.creature_counter[creature.name])\n textsize = cv.getTextSize(text, self.font, enum_size, enum_width)[0]\n x_margin = b_img.shape[1] - textsize[0]\n y_margin = b_img.shape[0] - textsize[1]\n\n # Number color\n if creature.color == 'ffffff':\n enum_color = (0, 0, 0)\n else:\n enum_color = (255, 255, 255)\n\n textX = np.floor_divide(x_margin, 2)\n textY = np.floor_divide(demi_base + textsize[1], 2)\n cv.putText(b_img, text, (textX, textY), self.font, enum_size, enum_color, enum_width, cv.LINE_AA)\n\n self.creature_counter[creature.name] -= 1\n\n ## construct full miniature\n img = np.concatenate((m_img, n_img, b_img), axis=0)\n # m_img_flipped = np.flip(m_img, 0)\n\n nb_flipped = np.rot90(np.concatenate((n_img, b_img), axis=0), 2)\n img = np.concatenate((nb_flipped, m_img_flipped, img), axis=0)\n\n ## Save image (not needed; only for debug/dev)\n # RGB_img = cv.cvtColor(img, cv.COLOR_BGR2RGB)\n # im_pil = Image.fromarray(RGB_img)\n # im_pil.save(self.save_dir + creature.name + \".png\", dpi=(25.4 * self.dpmm, 25.4 * self.dpmm))\n\n return img\n\n def build_sheets(self, minis):\n M = BinManager(self.canvas[0], self.canvas[1], pack_algo='guillotine', heuristic='best_shortside',\n wastemap=True, rotation=True)\n its = {}\n item_id = 0\n for m in minis:\n its[item_id] = m\n item = Item(m.shape[1], m.shape[0], item_id)\n M.add_items(item)\n item_id += 1\n\n M.execute()\n\n result = M.bins\n\n sheets = []\n for r in result:\n img = np.zeros((int(self.canvas[1]), int(self.canvas[0]), 3), np.uint8) + 255\n for it in r.items:\n # print(it)\n x = int(it.x)\n y = int(it.y)\n w = int(it.width)\n h = int(it.height)\n it_id = int(it.item_id)\n m_img = its[it_id]\n test = m_img\n if w > h: # rotated\n m_img = np.rot90(m_img, axes=(1, 0))\n shape = m_img.shape\n # print('x',x,'y',y,'shape',m_img.shape)\n img[y:y + shape[0], x:x + shape[1], :] = m_img\n sheets.append(img)\n\n return sheets\n\n def show_sheets(self, sheets):\n sheet_nr = 1\n for sheet in sheets:\n RGB_img = cv.cvtColor(sheet, cv.COLOR_BGR2RGB)\n img_small = cv.resize(sheet, (0, 0), fx=.4, fy=.4)\n cv.imshow('Img', img_small)\n cv.waitKey(0)\n\n def save_and_zip(self, sheets):\n sheet_nr = 1\n zip_memory = io.BytesIO()\n zipfile = ZipFile(zip_memory, mode='a', compression=ZIP_DEFLATED)\n\n for sheet in sheets:\n img_buffer = io.BytesIO()\n rgb_img = cv.cvtColor(sheet, cv.COLOR_BGR2RGB)\n im_pil = Image.fromarray(rgb_img)\n im_pil.save(img_buffer, dpi=(25.4 * self.dpmm, 25.4 * self.dpmm), format='PNG')\n img_buffer.seek(0)\n zipfile.writestr('sheet_' + str(sheet_nr) + '.png', img_buffer.getbuffer())\n sheet_nr += 1\n\n zipfile.close()\n return zip_memory\n" ]
[ [ "numpy.rot90", "numpy.floor_divide", "numpy.dstack", "numpy.concatenate", "numpy.ceil", "numpy.floor", "numpy.array", "numpy.flip", "numpy.zeros" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
tardis-sn/tardisanalysis
[ "c326d0d9559e77366e5d833aef1667020a529b65" ]
[ "tardis_kromer_plot.py" ]
[ "\"\"\"A simple plotting tool to create spectral diagnostics plots similar to those\noriginally proposed by M. Kromer (see, for example, Kromer et al. 2013, figure\n4).\n\"\"\"\nimport logging\nimport numpy as np\nimport astropy.units as units\nimport astropy.constants as csts\nimport pandas as pd\n\ntry:\n import astropy.modeling.blackbody as abb\nexcept ImportError: # for astropy version < 2.0\n import astropy.analytic_functions as abb\nimport matplotlib\nimport matplotlib.pyplot as plt\nimport matplotlib.patches as patches\nimport matplotlib.lines as lines\nimport matplotlib.cm as cm\nfrom tardis_minimal_model import minimal_model\nfrom tardis.util.base import (\n species_string_to_tuple,\n species_tuple_to_string,\n roman_to_int,\n int_to_roman,\n)\n\nplt.rcdefaults()\n\nlogger = logging.getLogger(__name__)\n\nelements = pd.read_csv(\"elements.csv\", names=[\"chem_symbol\", \"atomic_no\"])\ninv_elements = pd.Series(\n elements[\"chem_symbol\"], index=elements[\"atomic_no\"]\n ).to_dict()\n\nclass tardis_kromer_plotter(object):\n \"\"\"A plotter, generating spectral diagnostics plots as proposed by M.\n Kromer.\n With this tool, a specific visualisation of Tardis spectra may be produced.\n It illustrates which elements predominantly contribute to the emission and\n absorption part of the emergent (virtual) packet spectrum.\n Once a model is defined, a series of queries is performed on the packet\n property arrays. The results are cached and the \"Kromer\" plot is produced\n with the main method of this class, namely with ~generate_plot.\n Parameters\n ----------\n mdl : minimal_model\n a minimal_model object containing the Tardis run\n mode : str, optional\n 'real' (default) or 'virtual'; determines which packet population is\n used to generate the Kromer plot.\n Notes\n -----\n For this to work, the model must be generated by a Tardis calculation using\n the virtual packet logging capability. This requires a compilation with the\n --with-vpacket-logging flag.\n This way of illustrating the spectral synthesis process was introduced by\n M. Kromer (see e.g. [1]_).\n References\n ----------\n .. [1] Kromer et al. \"SN 2010lp - Type Ia Supernova from a Violent Merger\n of Two Carbon-Oxygen White Dwarfs\" ApjL, 2013, 778, L18\n \"\"\"\n\n def __init__(self, mdl, mode=\"real\"):\n\n self._mode = None\n self.mode = mode\n\n self._mdl = None\n self._zmax = 100\n self._cmap = cm.jet\n self._xlim = None\n self._ylim = None\n self._twinx = False\n\n self._bins = None\n self._ax = None\n self._pax = None\n\n self._noint_mask = None\n self._escat_mask = None\n self._escatonly_mask = None\n self._line_mask = None\n self._lam_escat = None\n self._lam_noint = None\n self._weights_escat = None\n self._weights_noint = None\n self._line_in_infos = None\n self._line_in_nu = None\n self._line_in_L = None\n self._line_out_infos = None\n self._line_out_nu = None\n self._line_out_L = None\n\n self.mdl = mdl\n\n @property\n def mode(self):\n \"\"\"packet mode - use real or virtual packets for plotting\"\"\"\n return self._mode\n\n @mode.setter\n def mode(self, val):\n known_modes = [\"real\", \"virtual\"]\n try:\n assert val in known_modes\n except AssertionError:\n raise ValueError(\"unknown mode\")\n self._mode = val\n\n @property\n def mdl(self):\n \"\"\"Tardis model object\"\"\"\n return self._mdl\n\n @mdl.setter\n def mdl(self, val):\n try:\n assert type(val) == minimal_model\n except AssertionError:\n raise ValueError(\"'mdl' must be either a minimal_model\")\n\n if val.mode != self.mode:\n raise ValueError(\n \"packet mode of minimal_model doesn't\" \" match requested mode\"\n )\n if not val.readin:\n raise ValueError(\"passing empty minimal_model; read in data first\")\n\n self._reset_cache()\n self._mdl = val\n\n @property\n def zmax(self):\n \"\"\"Maximum atomic number\"\"\"\n return self._zmax\n\n @property\n def cmap(self):\n \"\"\"Colour map, used to highlight the different atoms\"\"\"\n return self._cmap\n\n @property\n def ax(self):\n \"\"\"Main axes, containing the emission part of the Kromer plot\"\"\"\n return self._ax\n\n @property\n def pax(self):\n \"\"\"Secondary axes, containing the absorption part of the Kromer plot\"\"\"\n return self._pax\n\n @property\n def bins(self):\n \"\"\"frequency binning for the spectral visualisation\"\"\"\n return self._bins\n\n @property\n def xlim(self):\n \"\"\"wavelength limits\"\"\"\n return self._xlim\n\n @property\n def ylim(self):\n \"\"\"Flux limits\"\"\"\n return self._ylim\n\n @property\n def twinx(self):\n \"\"\"switch to decide where to place the absorption part of the Kromer\n plot\"\"\"\n return self._twinx\n\n @property\n def noint_mask(self):\n \"\"\"Masking array, highlighting the packets that never interacted\"\"\"\n if self._noint_mask is None:\n self._noint_mask = self.mdl.last_interaction_type == -1\n return self._noint_mask\n\n @property\n def escat_mask(self):\n \"\"\"Masking array, highlighting the packets that performed Thomson\n scatterings\"\"\"\n if self._escat_mask is None:\n self._escat_mask = self.mdl.last_interaction_type == 1\n return self._escat_mask\n\n @property\n def escatonly_mask(self):\n \"\"\"Masking array, highlighting the packets that only performed Thomson\n scatterings\"\"\"\n if self._escatonly_mask is None:\n tmp = (\n (self.mdl.last_line_interaction_in_id == -1) * (self.escat_mask)\n ).astype(np.bool)\n self._escatonly_mask = tmp\n return self._escatonly_mask\n\n @property\n def line_mask(self):\n \"\"\"Mask array, highlighting packets whose last interaction was with a\n line\"\"\"\n if self._line_mask is None:\n self._line_mask = (self.mdl.last_interaction_type > -1) * (\n self.mdl.last_line_interaction_in_id > -1\n )\n return self._line_mask\n\n @property\n def lam_noint(self):\n \"\"\"Wavelength of the non-interacting packets\"\"\"\n if self._lam_noint is None:\n self._lam_noint = (csts.c.cgs / (self.mdl.packet_nus[self.noint_mask])).to(\n units.AA\n )\n return self._lam_noint\n\n @property\n def lam_escat(self):\n \"\"\"Wavelength of the purely electron scattering packets\"\"\"\n if self._lam_escat is None:\n self._lam_escat = (\n csts.c.cgs / (self.mdl.packet_nus[self.escatonly_mask])\n ).to(units.AA)\n return self._lam_escat\n\n @property\n def weights_escat(self):\n \"\"\"luminosity of the only electron scattering packets\"\"\"\n if self._weights_escat is None:\n self._weights_escat = (\n self.mdl.packet_energies[self.escatonly_mask]\n / self.mdl.time_of_simulation\n )\n return self._weights_escat\n\n @property\n def weights_noint(self):\n \"\"\"luminosity of the non-interacting packets\"\"\"\n if self._weights_noint is None:\n self._weights_noint = (\n self.mdl.packet_energies[self.noint_mask] / self.mdl.time_of_simulation\n )\n return self._weights_noint\n\n @property\n def line_out_infos(self):\n \"\"\"Line ids of the transitions packets were emitted last\"\"\"\n if self._line_out_infos is None:\n tmp = self.mdl.last_line_interaction_out_id\n ids = tmp[self.line_mask]\n self._line_out_infos = self.mdl.lines.iloc[ids]\n return self._line_out_infos\n\n @property\n def line_out_nu(self):\n \"\"\"frequency of the transitions packets were emitted last\"\"\"\n if self._line_out_nu is None:\n self._line_out_nu = self.mdl.packet_nus[self.line_mask]\n return self._line_out_nu\n\n @property\n def line_out_L(self):\n \"\"\"luminosity of the line interaction packets\"\"\"\n if self._line_out_L is None:\n tmp = self.mdl.packet_energies\n self._line_out_L = tmp[self.line_mask]\n return self._line_out_L\n\n @property\n def line_in_infos(self):\n \"\"\"Line ids of the transitions packets were last absorbed\"\"\"\n if self._line_in_infos is None:\n tmp = self.mdl.last_line_interaction_in_id\n ids = tmp[self.line_mask]\n self._line_in_infos = self.mdl.lines.iloc[ids]\n return self._line_in_infos\n\n @property\n def line_in_nu(self):\n \"\"\"frequencies of the transitions packets were last absorbed\"\"\"\n if self._line_in_nu is None:\n nus = self.mdl.last_interaction_in_nu\n self._line_in_nu = nus[self.line_mask]\n return self._line_in_nu\n\n @property\n def line_in_L(self):\n \"\"\"luminosity of the line interaction packets\"\"\"\n if self._line_in_L is None:\n tmp = self.mdl.packet_energies\n self._line_in_L = tmp[self.line_mask]\n return self._line_in_L\n\n @property\n def line_info(self):\n \"\"\"produces list of elements to be included in the kromer plot\"\"\"\n # gets list of elements and number of emitted packets\n self.last_line_interaction_out_id = self.line_out_infos\n self.last_line_interaction_out_angstrom = self.line_out_nu.to(\n units.Angstrom, equivalencies=units.spectral()\n )\n\n self.last_line_interaction_out_id[\n \"emitted_wavelength\"\n ] = self.last_line_interaction_out_angstrom\n\n self.line_out_infos_within_xlims = self.last_line_interaction_out_id.loc[\n (\n self.last_line_interaction_out_id.emitted_wavelength\n >= self._xlim[0]\n )\n & (\n self.last_line_interaction_out_id.emitted_wavelength\n <= self._xlim[1]\n )\n ]\n\n # gets list of elements and number of absorbed packets\n self.last_line_interaction_in_id = self.line_in_infos\n self.last_line_interaction_in_angstrom = self.line_in_nu.to(\n units.Angstrom, equivalencies=units.spectral()\n )\n\n self.last_line_interaction_in_id[\n \"emitted_wavelength\"\n ] = self.last_line_interaction_in_angstrom\n\n self.line_in_infos_within_xlims = self.last_line_interaction_in_id.loc[\n (\n self.last_line_interaction_in_id.emitted_wavelength\n >= self._xlim[0]\n )\n & (\n self.last_line_interaction_in_id.emitted_wavelength\n <= self._xlim[1]\n )\n ]\n\n self.line_in_and_out_infos_within_xlims = pd.concat([self.line_in_infos_within_xlims, self.line_out_infos_within_xlims])\n\n # this generates the 4-digit ID for all transitions in the model\n # (e.g. Fe III line --> 2602)\n self.line_in_and_out_infos_within_xlims[\"ion_id\"] = (\n self.line_in_and_out_infos_within_xlims[\"atomic_number\"] * 100\n + self.line_in_and_out_infos_within_xlims[\"ion_number\"]\n )\n\n # this is a list that will hold which elements should all be in the\n # same colour. This is used if the user requests a mix of ions and\n # elements.\n self.keep_colour = []\n # this reads in the species specified by user and generates the 4-digit\n # ID keys for them\n if self._species_list is not None:\n # create a list of the ions ids requested by species_list\n requested_species_ids = []\n # check if there are any digits in the species list. If there are\n # then exit\n # species_list should only contain species in the Roman numeral\n # format, e.g. Si II, and each ion must contain a space\n if any(char.isdigit() for char in \" \".join(self._species_list)) == True:\n raise ValueError(\n \"All species must be in Roman numeral form, e.g. Si II\"\n )\n else:\n # go through each of the request species. Check whether it is\n # an element or ion (ions have spaces). If it is an element,\n # add all possible ions to the ions list. Otherwise just add\n # the requested ion\n for species in self._species_list:\n if \" \" in species:\n requested_species_ids.append(\n [\n species_string_to_tuple(species)[0] * 100\n + species_string_to_tuple(species)[1]\n ]\n )\n else:\n atomic_number = elements.loc[elements['chem_symbol'] == species.lower(), 'atomic_no'].values[0]\n requested_species_ids.append(\n [atomic_number * 100 + i for i in np.arange(atomic_number)]\n )\n self.keep_colour.append(atomic_number)\n self.requested_species_ids = [\n species_id for list in requested_species_ids for species_id in list\n ]\n\n # now we are getting the list of unique values for 'ion_id' if we would\n # like to use species. Otherwise we get unique atomic numbers\n if self._species_list is not None:\n self._elements_in_kromer_plot = np.c_[\n np.unique(\n self.line_in_and_out_infos_within_xlims.ion_id.values,\n return_counts=True,\n )\n ]\n else:\n self._elements_in_kromer_plot = np.c_[\n np.unique(\n self.line_in_and_out_infos_within_xlims.atomic_number.values,\n return_counts=True,\n )\n ]\n\n return self._elements_in_kromer_plot\n\n def _reset_cache(self):\n \"\"\"Reset cached variables - only needed in case the model is changed\n after initialisation\"\"\"\n\n self._noint_mask = None\n self._escat_mask = None\n self._escatonly_mask = None\n self._line_mask = None\n self._lam_escat = None\n self._lam_noint = None\n self._weights_escat = None\n self._weights_noint = None\n self._line_in_infos = None\n self._line_in_nu = None\n self._line_in_L = None\n self._line_out_infos = None\n self._line_out_nu = None\n self._line_out_L = None\n\n def generate_plot(\n self,\n ax=None,\n cmap=cm.jet,\n bins=None,\n xlim=None,\n ylim=None,\n nelements=None,\n twinx=False,\n species_list=None,\n ):\n \"\"\"Generate the actual \"Kromer\" plot\n Parameters\n ----------\n ax : matplotlib.axes or None\n axes object into which the emission part of the Kromer plot should\n be plotted; if None, a new one is generated (default None)\n cmap : matplotlib.cm.ListedColormap or None\n color map object used for the illustration of the different atomic\n contributions (default matplotlib.cm.jet)\n bins : np.ndarray or None\n array of the wavelength bins used for the illustration of the\n atomic contributions; if None, the same binning as for the stored\n virtual spectrum is used (default None)\n xlim : tuple or array-like or None\n wavelength limits for the display; if None, the x-axis is\n automatically scaled (default None)\n ylim : tuple or array-like or None\n flux limits for the display; if None, the y-axis is automatically\n scaled (default None)\n nelements: int or None\n number of elements that should be included in the Kromer plots.\n The top nelements are determined based on those with the most packet\n interactions\n twinx : boolean\n determines where the absorption part of the Kromer plot is placed,\n if True, the absorption part is attached at the top of the main\n axes box, otherwise it is placed below the emission part (default\n False)\n species_list: list of strings or None\n list of strings containing the names of species that should be included in the Kromer plots,\n e.g. ['Si II', 'Ca II']\n Returns\n -------\n fig : matplotlib.figure\n figure instance containing the plot\n \"\"\"\n self._ax = None\n self._pax = None\n\n self._cmap = cmap\n self._ax = ax\n self._ylim = ylim\n self._twinx = twinx\n\n # the species list can contain either a specific element, a specific\n # ion, a range of ions, or any combination of these if the list\n # contains a range of ions, separate each one into a new entry in the\n # species list\n full_species_list = []\n if species_list is not None:\n for species in species_list:\n # check if a hyphen is present. If it is, then it indicates a\n # range of ions. Add each ion in that range to the list\n if \"-\" in species:\n element = species.split(\" \")[0]\n first_ion_numeral = roman_to_int(\n species.split(\" \")[-1].split(\"-\")[0]\n )\n second_ion_numeral = roman_to_int(\n species.split(\" \")[-1].split(\"-\")[-1]\n )\n for i in np.arange(first_ion_numeral, second_ion_numeral + 1):\n full_species_list.append(element + \" \" + int_to_roman(i))\n else:\n full_species_list.append(species)\n self._species_list = full_species_list\n else:\n self._species_list = None\n\n \n if xlim is None:\n self._xlim = [\n np.min(self.mdl.spectrum_wave).value,\n np.max(self.mdl.spectrum_wave).value,\n ]\n else:\n self._xlim = xlim\n\n if bins is None:\n self._bins = self.mdl.spectrum_wave[::-1]\n else:\n self._bins = bins\n\n \n \n # get the elements/species to be included in the plot\n self._elements_in_kromer_plot = self.line_info\n\n\n # if no nelements and no species list is specified, then the number of\n # elements to be included in the colourbar is determined from the list\n # of unique elements that appear in the model\n if nelements is None and species_list is None:\n self._nelements = len(np.unique(self.line_in_and_out_infos_within_xlims.atomic_number.values))\n elif nelements is None and species_list is not None:\n # if species_list has been specified, then the number of elements\n # to be included is set to the length of that list\n self._nelements = len(self._species_list)\n else:\n # if nelements has been specified, then the number of elements to\n # be included is set to the length of that list\n self._nelements = nelements\n\n\n # if the length of self._elements_in_kromer_plot exceeds the requested\n # number of elements to be included in the colourbar, then this if\n # statement applies\n if self._species_list is not None:\n # if we have specified a species list then only take those species\n # that are requested\n mask = np.in1d(\n self._elements_in_kromer_plot[:, 0], self.requested_species_ids\n )\n self._elements_in_kromer_plot = self._elements_in_kromer_plot[mask]\n elif len(self._elements_in_kromer_plot) > self._nelements:\n # if nelements is specified, then sort to find the top contributing\n # elements, pick the top nelements, and sort back by atomic number\n self._elements_in_kromer_plot = self._elements_in_kromer_plot[\n np.argsort(self._elements_in_kromer_plot[:, 1])[::-1]\n ]\n self._elements_in_kromer_plot = self._elements_in_kromer_plot[\n : self._nelements\n ]\n self._elements_in_kromer_plot = self._elements_in_kromer_plot[\n np.argsort(self._elements_in_kromer_plot[:, 0])\n ]\n else:\n # if the length of self._elements_in_kromer_plot is less than the\n # requested number of elements in the model, then this requested\n # length is updated to be the length of length of\n # self._elements_in_kromer_plot\n self._nelements = len(self._elements_in_kromer_plot)\n\n\n\n # this will reset nelements if species_list is turned on\n # it's possible to request a species that doesn't appear in the plot\n # this will ensure that species isn't counted when determining labels\n # and colours\n if self._species_list is not None:\n labels = []\n for species in self._species_list:\n if \" \" in species:\n atomic_number = species_string_to_tuple(species)[0]\n ion_number = species_string_to_tuple(species)[1]\n\n species_id = atomic_number * 100 + ion_number\n if species_id in self._elements_in_kromer_plot:\n labels.append(species)\n else:\n labels.append(species)\n self._nelements = len(labels)\n\n \n \n \n self._axes_handling_preparation()\n self._generate_emission_part()\n self._generate_photosphere_part()\n self._generate_and_add_colormap()\n self._generate_and_add_legend()\n self._paxes_handling_preparation()\n self._generate_absorption_part()\n self._axis_handling_label_rescale()\n\n return plt.gcf()\n\n def _axes_handling_preparation(self):\n \"\"\"prepare the main axes; create a new axes if none exists\"\"\"\n\n if self._ax is None:\n self._ax = plt.figure().add_subplot(111)\n\n def _paxes_handling_preparation(self):\n \"\"\"prepare the axes for the absorption part of the Kromer plot\n according to the twinx value\"\"\"\n\n if self.twinx:\n self._pax = self._ax.twinx()\n else:\n self._pax = self._ax\n\n def _generate_emission_part(self):\n \"\"\"generate the emission part of the Kromer plot\"\"\"\n\n lams = [self.lam_noint, self.lam_escat]\n weights = [self.weights_noint, self.weights_escat]\n colors = [\"black\", \"grey\"]\n\n # if species_list is entered, the ion_id will be used to determine the\n # colours, etc\n if self._species_list is not None:\n values_to_compare = np.unique(\n self.line_in_and_out_infos_within_xlims.ion_id.values,\n return_counts=False,\n )\n else:\n # otherwise, if there is no species_list, then the atomic_number i\n #s used for colours, etc.\n values_to_compare = np.unique(\n self.line_in_and_out_infos_within_xlims.atomic_number.values,\n return_counts=False,\n )\n\n # this first for loop is to go through all elements and colour all\n # elements as 'Other' if they weren't requested or among the top\n # nelements. The reason to do it twice is to ensure that the colours\n # are stacked appropriately, e.g. all 'other' are together\n other_species_lams = []\n other_species_weights = []\n for zi in values_to_compare:\n # zi is the unique 4-digit code for the species in the model\n # determining the atomic and ion numbers for all ions in our model\n if self._species_list is not None:\n ion_number = zi % 100\n atomic_number = (zi - ion_number) / 100\n else:\n atomic_number = zi\n\n # if the ion is not included in our list for the colourbar, then\n # its contribution is added here to the miscellaneous grey shaded\n # region of the plot\n if zi not in self._elements_in_kromer_plot[:, 0]:\n # if species_list is given then use the atomic number and\n # ion_number to peforming masking\n if self._species_list is not None:\n mask = (\n self.line_out_infos.atomic_number.values == atomic_number\n ) & (self.line_out_infos.ion_number.values == ion_number)\n else:\n # otherwise only elements are plotted, so only use the\n # atomic number\n mask = self.line_out_infos.atomic_number.values == atomic_number\n\n other_species_lams += (csts.c.cgs / (self.line_out_nu[mask])).to(units.AA).value.tolist()\n other_species_weights += (self.line_out_L[mask] / self.mdl.time_of_simulation).value.tolist()\n\n other_species_lams = other_species_lams * units.AA\n other_species_weights = other_species_weights * units.erg / units.s\n\n lams.append(other_species_lams)\n weights.append(other_species_weights)\n colors.append(\"silver\")\n\n ii = 0\n # this is a variable that will allow for situations where elements and\n # ions are requested in the same list this will ensure that any ions\n # for a requested element will all be coloured the same\n previous_atomic_number = 0\n for zi in values_to_compare:\n # zi is the unique 4-digit code for the species in the model\n # determining the atomic and ion numbers for all ions in our model\n if self._species_list is not None:\n ion_number = zi % 100\n atomic_number = (zi - ion_number) / 100\n else:\n atomic_number = zi\n\n # if the ion is included in our list for the colourbar, then its\n # contribution is added here as a colour to the plot\n if zi in self._elements_in_kromer_plot[:, 0]:\n # if this is the first ion, don't update the colour\n if (previous_atomic_number == 0):\n ii = ii\n previous_atomic_number = atomic_number\n elif atomic_number in self.keep_colour:\n # if this ion is grouped into an element, check whether\n # this is the first ion of that element to occur if it is,\n # then update the colour. If it isn't then don't update the\n # colour\n if previous_atomic_number == atomic_number:\n ii = ii\n previous_atomic_number = atomic_number\n else:\n ii = ii +1\n previous_atomic_number = atomic_number\n else:\n ii = ii + 1\n previous_atomic_number = atomic_number\n if self._species_list is not None:\n mask = (\n self.line_out_infos.atomic_number.values == atomic_number\n ) & (self.line_out_infos.ion_number.values == ion_number)\n else:\n mask = self.line_out_infos.atomic_number.values == atomic_number\n\n lams.append((csts.c.cgs / (self.line_out_nu[mask])).to(units.AA))\n weights.append(self.line_out_L[mask] / self.mdl.time_of_simulation)\n colors.append(self.cmap(float(ii) / float(self._nelements)))\n\n Lnorm = 0\n for w, lam in zip(weights, lams):\n Lnorm += np.sum(w[(lam >= self.bins[0]) * (lam <= self.bins[-1])])\n\n lams = [tmp_lam.value for tmp_lam in lams]\n weights = [tmp_wt.value for tmp_wt in weights]\n ret = self.ax.hist(\n lams,\n bins=self.bins.value,\n stacked=True,\n histtype=\"stepfilled\",\n density=True,\n weights=weights,\n )\n\n for i, col in enumerate(ret[-1]):\n for reti in col:\n reti.set_facecolor(colors[i])\n reti.set_edgecolor(colors[i])\n reti.set_linewidth(0)\n reti.xy[:, 1] *= Lnorm.to(\"erg / s\").value\n\n self.ax.plot(\n self.mdl.spectrum_wave,\n self.mdl.spectrum_luminosity,\n color=\"blue\",\n drawstyle=\"steps-post\",\n lw=0.5,\n )\n\n def _generate_photosphere_part(self):\n \"\"\"generate the photospheric input spectrum part of the Kromer plot\"\"\"\n\n Lph = (\n abb.blackbody_lambda(self.mdl.spectrum_wave, self.mdl.t_inner)\n * 4\n * np.pi ** 2\n * self.mdl.R_phot ** 2\n * units.sr\n ).to(\"erg / (AA s)\")\n\n self.ax.plot(self.mdl.spectrum_wave, Lph, color=\"red\", ls=\"dashed\")\n\n def _generate_absorption_part(self):\n \"\"\"generate the absorption part of the Kromer plot\"\"\"\n\n lams = []\n weights = []\n colors = []\n\n if self._species_list is not None:\n values_to_compare = np.unique(\n self.line_in_and_out_infos_within_xlims.ion_id.values,\n return_counts=False,\n )\n else:\n values_to_compare = np.unique(\n self.line_in_and_out_infos_within_xlims.atomic_number.values,\n return_counts=False,\n )\n\n other_species_lams = []\n other_species_weights = []\n for zi in values_to_compare:\n # zi is the unique 4-digit code for the species in the model\n # determining the atomic and ion numbers for all ions in our model\n if self._species_list is not None:\n ion_number = zi % 100\n atomic_number = (zi - ion_number) / 100\n else:\n atomic_number = zi\n\n # if the ion is not included in our list for the colourbar, then\n # its contribution is added here to the miscellaneous grey shaded\n # region of the plot\n if zi not in self._elements_in_kromer_plot[:, 0]:\n\n if self._species_list is not None:\n mask = (\n self.line_out_infos.atomic_number.values == atomic_number\n ) & (self.line_out_infos.ion_number.values == ion_number)\n else:\n mask = self.line_out_infos.atomic_number.values == atomic_number\n\n other_species_lams += (csts.c.cgs / (self.line_in_nu[mask])).to(units.AA).value.tolist()\n other_species_weights += (self.line_in_L[mask] / self.mdl.time_of_simulation).value.tolist()\n\n other_species_lams = other_species_lams * units.AA\n other_species_weights = other_species_weights * units.erg / units.s\n\n lams.append(other_species_lams)\n weights.append(other_species_weights)\n colors.append(\"silver\")\n\n\n ii = 0\n previous_atomic_number = 0\n for zi in values_to_compare:\n # zi is the unique 4-digit code for the species in the model\n # determining the atomic and ion numbers for all ions in our model\n if self._species_list is not None:\n ion_number = zi % 100\n atomic_number = (zi - ion_number) / 100\n else:\n atomic_number = zi\n\n # if the ion is included in our list for the colourbar, then its\n # contribution is added here as a unique colour to the plot\n if zi in self._elements_in_kromer_plot[:, 0]:\n # if this is the first ion, don't update the colour\n if (previous_atomic_number == 0):\n ii = ii\n previous_atomic_number = atomic_number\n elif atomic_number in self.keep_colour:\n # if this ion is grouped into an element, check whether\n # this is the first ion of that element to occur if it is,\n # then update the colour. If it isn't then don't update the\n # colour\n if previous_atomic_number == atomic_number:\n ii = ii\n previous_atomic_number = atomic_number\n else:\n ii = ii +1\n previous_atomic_number = atomic_number\n else:\n ii = ii + 1\n previous_atomic_number = atomic_number\n if self._species_list is not None:\n mask = (\n self.line_out_infos.atomic_number.values == atomic_number\n ) & (self.line_out_infos.ion_number.values == ion_number)\n else:\n mask = self.line_out_infos.atomic_number.values == atomic_number\n\n lams.append((csts.c.cgs / (self.line_in_nu[mask])).to(units.AA))\n weights.append(self.line_in_L[mask] / self.mdl.time_of_simulation)\n colors.append(self.cmap(float(ii) / float(self._nelements)))\n\n Lnorm = 0\n for w, lam in zip(weights, lams):\n Lnorm -= np.sum(w[(lam >= self.bins[0]) * (lam <= self.bins[-1])])\n\n lams = [tmp_l.value for tmp_l in lams]\n weights = [tmp_wt.value for tmp_wt in weights]\n ret = self.pax.hist(\n lams,\n bins=self.bins.value,\n stacked=True,\n histtype=\"stepfilled\",\n density=True,\n weights=weights,\n )\n\n for i, col in enumerate(ret[-1]):\n for reti in col:\n reti.set_facecolor(colors[i])\n reti.set_edgecolor(colors[i])\n reti.set_linewidth(0)\n reti.xy[:, 1] *= Lnorm.to(\"erg / s\").value\n\n def _generate_and_add_colormap(self):\n \"\"\"generate the custom color map, linking colours with atomic\n numbers\"\"\"\n\n values = [\n self.cmap(float(i) / float(self._nelements)) for i in range(self._nelements)\n ]\n\n custcmap = matplotlib.colors.ListedColormap(values)\n bounds = np.arange(self._nelements) + 0.5\n norm = matplotlib.colors.Normalize(vmin=0, vmax=self._nelements)\n mappable = cm.ScalarMappable(norm=norm, cmap=custcmap)\n mappable.set_array(np.linspace(1, self.zmax + 1, 256))\n\n # if a species_list has been specified...\n if self._species_list is not None:\n labels = []\n for zi in self._elements_in_kromer_plot:\n\n ion_number = zi[0] % 100\n atomic_number = (zi[0] - ion_number) / 100\n\n ion_numeral = int_to_roman(ion_number + 1)\n # using elements dictionary to get atomic symbol for the\n # species\n atomic_symbol = inv_elements[atomic_number].capitalize()\n\n # if the element was requested, and not a specific ion, then\n # add the element symbol to the label list\n if (atomic_number in self.keep_colour) & (atomic_symbol not in labels):\n # compiling the label, and adding it to the list\n label = f\"{atomic_symbol}\"\n labels.append(label)\n elif atomic_number not in self.keep_colour:\n # otherwise add the ion to the label list\n label = f\"{atomic_symbol}$\\,${ion_numeral}\"\n labels.append(label)\n\n else:\n # if no species_list specified, generate the labels this way\n labels = [\n inv_elements[zi].capitalize()\n for zi in self._elements_in_kromer_plot[:, 0]\n ]\n\n mainax = self.ax\n cbar = plt.colorbar(mappable, ax=mainax)\n cbar.set_ticks(bounds)\n cbar.set_ticklabels(labels)\n\n def _generate_and_add_legend(self):\n \"\"\"add legend\"\"\"\n\n spatch = patches.Patch(color=\"silver\", label=\"Other species\")\n gpatch = patches.Patch(color=\"grey\", label=\"e-scattering\")\n bpatch = patches.Patch(color=\"black\", label=\"Photosphere\")\n\n bline = lines.Line2D([], [], color=\"blue\", label=\"Virtual spectrum\")\n phline = lines.Line2D(\n [], [], color=\"red\", ls=\"dashed\", label=\"L at photosphere\"\n )\n\n self.ax.legend(handles=[phline, bline, spatch, gpatch, bpatch])\n\n def _axis_handling_label_rescale(self):\n \"\"\"add axis labels and perform axis scaling\"\"\"\n\n if self.ylim is None:\n self.ax.autoscale(axis=\"y\")\n else:\n self.ax.set_ylim(self.ylim)\n\n self._ylim = self.ax.get_ylim()\n\n if self.xlim is None:\n self.ax.autoscale(axis=\"x\")\n else:\n self.ax.set_xlim(self.xlim)\n\n self._xlim = self.ax.get_xlim()\n\n if self.twinx:\n self.pax.set_ylim([-self.ylim[-1], -self.ylim[0]])\n self.pax.set_yticklabels([])\n else:\n self.pax.set_ylim([-self.ylim[-1], self.ylim[-1]])\n self.pax.set_xlim(self.xlim)\n\n self.ax.set_xlabel(r\"$\\lambda$ [$\\mathrm{\\AA}$]\")\n ylabel = r\"$L_{\\mathrm{\\lambda}}$ [$\\mathrm{erg\\,s^{-1}\\,\\AA^{-1}}$]\"\n self.ax.set_ylabel(ylabel)\n" ]
[ [ "pandas.Series", "numpy.linspace", "numpy.in1d", "numpy.max", "matplotlib.patches.Patch", "pandas.read_csv", "numpy.unique", "numpy.arange", "matplotlib.pyplot.gcf", "matplotlib.cm.ScalarMappable", "matplotlib.pyplot.rcdefaults", "matplotlib.pyplot.figure", "pandas.concat", "numpy.min", "matplotlib.colors.ListedColormap", "numpy.argsort", "numpy.sum", "matplotlib.lines.Line2D", "matplotlib.colors.Normalize", "matplotlib.pyplot.colorbar" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "2.0", "1.4", "1.1", "1.5", "1.2", "1.3" ], "scipy": [], "tensorflow": [] } ]
valdersoul/NeuralDialog-LAED
[ "a0a6ccdc54cf17c1815ed956f5454d7102fb18d2" ]
[ "laed/main.py" ]
[ "# -*- coding: utf-8 -*-\n# author: Tiancheng Zhao\nfrom __future__ import print_function\nimport numpy as np\nfrom laed.models.model_bases import summary\nimport torch\nfrom laed.dataset.corpora import PAD, EOS, EOT\nfrom laed.enc2dec.decoders import TEACH_FORCE, GEN, DecoderRNN\nfrom laed.utils import get_dekenize\nimport os\nfrom collections import defaultdict\nimport logging\n\nlogger = logging.getLogger()\n\n\nclass LossManager(object):\n def __init__(self):\n self.losses = defaultdict(list)\n self.backward_losses = []\n\n def add_loss(self, loss, out=None):\n for key, val in loss.items():\n if key == 'kl_w':\n self.losses[key].append(val)\n continue\n if key != out and val is not None and type(val) is not bool:\n self.losses[key].append(val.item())\n\n def add_backward_loss(self, loss):\n self.backward_losses.append(loss.item())\n\n def clear(self):\n self.losses = defaultdict(list)\n self.backward_losses = []\n\n def pprint(self, name, window=None, prefix=None):\n str_losses = []\n for key, loss in self.losses.items():\n if loss is None:\n continue\n avg_loss = np.average(loss) if window is None else np.average(loss[-window:])\n str_losses.append(\"{} {:.3f}\".format(key, avg_loss))\n if 'nll' in key:\n str_losses.append(\"PPL {:.3f}\".format(np.exp(avg_loss)))\n if prefix:\n return \"{}: {} {}\".format(prefix, name, \" \".join(str_losses))\n else:\n return \"{} {}\".format(name, \" \".join(str_losses))\n\n def avg_loss(self):\n return np.mean(self.backward_losses)\n\n\ndef get_sent(model, de_tknize, data, b_id, attn=None, attn_ctx=None, stop_eos=True, stop_pad=True):\n ws = []\n attn_ws = []\n has_attn = attn is not None and attn_ctx is not None\n for t_id in range(data.shape[1]):\n w = model.vocab[data[b_id, t_id]]\n if has_attn:\n a_val = np.max(attn[b_id, t_id])\n if a_val > 0.1:\n a = np.argmax(attn[b_id, t_id])\n attn_w = model.vocab[attn_ctx[b_id, a]]\n attn_ws.append(\"{}({})\".format(attn_w, a_val))\n if (stop_eos and w in [EOS, EOT]) or (stop_pad and w == PAD):\n if w == EOT:\n ws.append(w)\n break\n if w != PAD:\n ws.append(w)\n\n att_ws = \"Attention: {}\".format(\" \".join(attn_ws)) if attn_ws else \"\"\n if has_attn:\n return de_tknize(ws), att_ws\n else:\n try:\n return de_tknize(ws), \"\"\n except:\n return \" \".join(ws), \"\"\n\n\ndef train(model, train_feed, valid_feed, test_feed, config, evaluator, gen=None):\n if gen is None:\n gen = generate\n\n patience = 10 # wait for at least 10 epoch before stop\n valid_loss_threshold = np.inf\n best_valid_loss = np.inf\n batch_cnt = 0\n optimizer = model.get_optimizer(config)\n done_epoch = 0\n train_loss = LossManager()\n model.train()\n logger.info(summary(model, show_weights=False))\n logger.info(\"**** Training Begins ****\")\n logger.info(\"**** Epoch 0/{} ****\".format(config.max_epoch))\n while True:\n train_feed.epoch_init(config, verbose=done_epoch==0, shuffle=True)\n while True:\n batch = train_feed.next_batch()\n if batch is None:\n break\n\n optimizer.zero_grad()\n loss = model(batch, mode=TEACH_FORCE, global_t=batch_cnt+1)\n if model.flush_valid:\n logger.info(\"Flush previous valid loss\")\n best_valid_loss = np.inf\n model.flush_valid = False\n optimizer = model.get_optimizer(config)\n\n model.backward(batch_cnt, loss)\n optimizer.step()\n batch_cnt += 1\n train_loss.add_loss(loss)\n\n if batch_cnt % config.print_step == 0:\n logger.info(train_loss.pprint(\"Train\", window=config.print_step,\n prefix=\"{}/{}-({:.3f})\".format(batch_cnt % config.ckpt_step,\n config.ckpt_step,\n model.kl_w)))\n\n if batch_cnt % config.ckpt_step == 0:\n logger.info(\"\\n=== Evaluating Model ===\")\n logger.info(train_loss.pprint(\"Train\"))\n done_epoch += 1\n\n # validation\n valid_loss = validate(model,valid_feed, config, batch_cnt)\n\n # generating\n gen(model, test_feed, config, evaluator, num_batch=config.preview_batch_num)\n\n # update early stopping stats\n if valid_loss < best_valid_loss:\n if valid_loss <= valid_loss_threshold * config.improve_threshold:\n patience = max(patience,\n done_epoch * config.patient_increase)\n valid_loss_threshold = valid_loss\n logger.info(\"Update patience to {}\".format(patience))\n\n if config.save_model:\n logger.info(\"Model Saved.\")\n torch.save(model.state_dict(),\n os.path.join(config.session_dir, \"model\"))\n\n best_valid_loss = valid_loss\n\n if done_epoch >= config.max_epoch \\\n or config.early_stop and patience <= done_epoch:\n if done_epoch < config.max_epoch:\n logger.info(\"!!Early stop due to run out of patience!!\")\n\n logger.info(\"Best validation loss %f\" % best_valid_loss)\n\n return\n\n # exit eval model\n model.train()\n train_loss.clear()\n logger.info(\"\\n**** Epcoch {}/{} ****\".format(done_epoch,\n config.max_epoch))\n\n\ndef validate(model, valid_feed, config, batch_cnt=None):\n model.eval()\n valid_feed.epoch_init(config, shuffle=False, verbose=True)\n losses = LossManager()\n while True:\n batch = valid_feed.next_batch()\n if batch is None:\n break\n loss = model(batch, mode=TEACH_FORCE)\n losses.add_loss(loss, 'bow')\n losses.add_backward_loss(model.model_sel_loss(loss, batch_cnt))\n\n valid_loss = losses.avg_loss()\n logger.info(losses.pprint(valid_feed.name))\n logger.info(\"Total valid loss {}\".format(valid_loss))\n\n # print topic-words\n #model.print_top_words()\n\n return valid_loss\n\n\ndef generate(model, data_feed, config, evaluator, num_batch=1, dest_f=None):\n model.eval()\n de_tknize = get_dekenize()\n\n def write(msg):\n if msg is None or msg == '':\n return\n if dest_f is None:\n logger.info(msg)\n else:\n dest_f.write(msg + '\\n')\n\n data_feed.epoch_init(config, shuffle=num_batch is not None, verbose=False)\n evaluator.initialize()\n logger.info(\"Generation: {} batches\".format(data_feed.num_batch\n if num_batch is None\n else num_batch))\n while True:\n batch = data_feed.next_batch()\n if batch is None or (num_batch is not None\n and data_feed.ptr > num_batch):\n break\n outputs, labels = model(batch, mode=GEN, gen_type=config.gen_type)\n\n # move from GPU to CPU\n labels = labels.cpu()\n pred_labels = [t.cpu().data.numpy() for t in\n outputs[DecoderRNN.KEY_SEQUENCE]]\n pred_labels = np.array(pred_labels, dtype=int).squeeze(-1).swapaxes(0,1)\n true_labels = labels.data.numpy()\n # get attention if possible\n if config.use_attn or config.use_ptr:\n pred_attns = [t.cpu().data.numpy() for t in outputs[DecoderRNN.KEY_ATTN_SCORE]]\n pred_attns = np.array(pred_attns, dtype=float).squeeze(2).swapaxes(0,1)\n else:\n pred_attns = None\n\n # get last 1 context\n ctx = batch.get('contexts')\n ctx_len = batch.get('context_lens')\n domains = batch.domains\n\n # logger.info the batch in String.\n for b_id in range(pred_labels.shape[0]):\n pred_str, attn = get_sent(model, de_tknize, pred_labels, b_id, attn=pred_attns)\n true_str, _ = get_sent(model, de_tknize, true_labels, b_id)\n prev_ctx = \"\"\n if ctx is not None:\n ctx_str, _ = get_sent(model, de_tknize, ctx[:, ctx_len[b_id]-1, :], b_id)\n prev_ctx = \"Source: {}\".format(ctx_str)\n\n domain = domains[b_id]\n evaluator.add_example(true_str, pred_str, domain)\n if num_batch is None or num_batch <= 2:\n write(prev_ctx)\n write(\"{}:: True: {} ||| Pred: {}\".format(domain, true_str, pred_str))\n if attn:\n write(\"[[{}]]\".format(attn))\n\n write(evaluator.get_report(include_error=dest_f is not None))\n logger.info(\"Generation Done\")\n\n\n" ]
[ [ "numpy.max", "numpy.argmax", "numpy.mean", "numpy.average", "numpy.array", "numpy.exp" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
dipesh1432/nbsite
[ "866c6d747879b9a4b88e0a30a43e35b9802645bd", "866c6d747879b9a4b88e0a30a43e35b9802645bd" ]
[ "examples/sites/holoviews/holoviews/plotting/plotly/chart3d.py", "examples/sites/holoviews/holoviews/plotting/bokeh/path.py" ]
[ "import numpy as np\nimport plotly.graph_objs as go\nfrom matplotlib.cm import get_cmap\nfrom plotly import colors\nfrom plotly.tools import FigureFactory as FF\nfrom plotly.graph_objs import Scene, XAxis, YAxis, ZAxis\n\ntry:\n from plotly.figure_factory._trisurf import trisurf as trisurface\nexcept ImportError:\n pass\n\nimport param\n\nfrom ...core.options import SkipRendering\nfrom .element import ElementPlot, ColorbarPlot\nfrom .chart import ScatterPlot\n\nclass Chart3DPlot(ElementPlot):\n\n aspect = param.Parameter(default='cube')\n\n camera_angle = param.NumericTuple(default=(0.2, 0.5, 0.1, 0.2))\n\n camera_position = param.NumericTuple(default=(0.1, 0, -0.1))\n\n camera_zoom = param.Integer(default=3)\n\n projection = param.String(default='3d')\n\n def init_layout(self, key, element, ranges):\n l, b, zmin, r, t, zmax = self.get_extents(element, ranges)\n\n xd, yd, zd = (element.get_dimension(i) for i in range(3))\n xaxis = dict(range=[l, r], title=xd.pprint_label)\n if self.logx:\n xaxis['type'] = 'log'\n\n yaxis = dict(range=[b, t], title=yd.pprint_label)\n if self.logy:\n yaxis['type'] = 'log'\n\n zaxis = dict(range=[zmin, zmax], title=zd.pprint_label)\n if self.logz:\n zaxis['type'] = 'log'\n\n opts = {}\n if self.aspect == 'cube':\n opts['aspectmode'] = 'cube'\n else:\n opts['aspectmode'] = 'manual'\n opts['aspectratio'] = self.aspect\n scene = Scene(xaxis=XAxis(xaxis), yaxis=YAxis(yaxis),\n zaxis=ZAxis(zaxis), **opts)\n\n return dict(width=self.width, height=self.height,\n title=self._format_title(key, separator=' '),\n plot_bgcolor=self.bgcolor, scene=scene)\n\n\nclass SurfacePlot(ColorbarPlot, Chart3DPlot):\n\n graph_obj = go.Surface\n\n style_opts = ['opacity', 'lighting', 'lightposition', 'cmap']\n\n def graph_options(self, element, ranges):\n opts = super(SurfacePlot, self).graph_options(element, ranges)\n style = self.style[self.cyclic_index]\n copts = self.get_color_opts(element.vdims[0], element, ranges, style)\n return dict(opts, **copts)\n\n\n def get_data(self, element, ranges):\n return (), dict(x=element.dimension_values(0, False),\n y=element.dimension_values(1, False),\n z=element.dimension_values(2, flat=False))\n\n\nclass Scatter3dPlot(ScatterPlot, Chart3DPlot):\n\n graph_obj = go.Scatter3d\n\n def get_data(self, element, ranges):\n return (), dict(x=element.dimension_values(0),\n y=element.dimension_values(1),\n z=element.dimension_values(2))\n\n\nclass TrisurfacePlot(ColorbarPlot, Chart3DPlot):\n\n style_opts = ['cmap']\n\n def get_data(self, element, ranges):\n try:\n from scipy.spatial import Delaunay\n except:\n SkipRendering(\"SciPy not available, cannot plot Trisurface\")\n x, y, z = (element.dimension_values(i) for i in range(3))\n points2D = np.vstack([x, y]).T\n tri = Delaunay(points2D)\n simplices = tri.simplices\n return (x, y, z, simplices, self.colorbar, 'black', None), {}\n\n def graph_options(self, element, ranges):\n opts = self.style[self.cyclic_index]\n if 'cmap' in opts:\n cmap = opts.pop('cmap')\n if cmap in colors.PLOTLY_SCALES:\n opts['colormap'] = colors.PLOTLY_SCALES[cmap]\n else:\n cmap = get_cmap(cmap)\n opts['colormap'] = [cmap(i) for i in np.linspace(0, 1)]\n return opts\n\n def init_graph(self, plot_args, plot_kwargs):\n if hasattr(FF, '_trisurf'):\n trisurf = FF._trisurf(*plot_args[:-1], **plot_kwargs)\n else:\n trisurf = trisurface(*plot_args, **plot_kwargs)\n return trisurf[0]\n", "from collections import defaultdict\n\nimport param\nimport numpy as np\n\nfrom bokeh.models import HoverTool, FactorRange\n\nfrom ...core import util\nfrom .element import ColorbarPlot, LegendPlot, line_properties, fill_properties\nfrom .util import expand_batched_style\n\n\nclass PathPlot(ColorbarPlot):\n\n color_index = param.ClassSelector(default=None, class_=(util.basestring, int),\n allow_None=True, doc=\"\"\"\n Index of the dimension from which the color will the drawn\"\"\")\n\n show_legend = param.Boolean(default=False, doc=\"\"\"\n Whether to show legend for the plot.\"\"\")\n\n style_opts = line_properties + ['cmap']\n _plot_methods = dict(single='multi_line', batched='multi_line')\n _mapping = dict(xs='xs', ys='ys')\n _batched_style_opts = line_properties\n\n def _hover_opts(self, element):\n cdim = element.get_dimension(self.color_index)\n if self.batched:\n dims = list(self.hmap.last.kdims)+self.hmap.last.last.vdims\n else:\n dims = list(self.overlay_dims.keys())+self.hmap.last.vdims\n if cdim not in dims and cdim is not None:\n dims.append(cdim)\n return dims, {}\n\n\n def _get_hover_data(self, data, element):\n \"\"\"\n Initializes hover data based on Element dimension values.\n \"\"\"\n if not any(isinstance(t, HoverTool) for t in self.state.tools) or self.static_source:\n return\n\n for k, v in self.overlay_dims.items():\n dim = util.dimension_sanitizer(k.name)\n if dim not in data:\n data[dim] = [v for _ in range(len(list(data.values())[0]))]\n\n\n def get_data(self, element, ranges, style):\n cdim = element.get_dimension(self.color_index)\n if cdim: cidx = element.get_dimension_index(cdim)\n inds = (1, 0) if self.invert_axes else (0, 1)\n mapping = dict(self._mapping)\n if not cdim:\n if self.static_source:\n data = {}\n else:\n paths = element.split(datatype='array', dimensions=element.kdims)\n xs, ys = ([path[:, idx] for path in paths] for idx in inds)\n data = dict(xs=xs, ys=ys)\n return data, mapping, style\n\n dim_name = util.dimension_sanitizer(cdim.name)\n if not self.static_source:\n paths, cvals = [], []\n for path in element.split(datatype='array'):\n splits = [0]+list(np.where(np.diff(path[:, cidx])!=0)[0]+1)\n for (s1, s2) in zip(splits[:-1], splits[1:]):\n cvals.append(path[s1, cidx])\n paths.append(path[s1:s2+1, :2]) \n xs, ys = ([path[:, idx] for path in paths] for idx in inds)\n data = dict(xs=xs, ys=ys, **{dim_name: np.array(cvals)})\n cmapper = self._get_colormapper(cdim, element, ranges, style)\n mapping['line_color'] = {'field': dim_name, 'transform': cmapper}\n self._get_hover_data(data, element)\n return data, mapping, style\n\n\n def get_batched_data(self, element, ranges=None):\n data = defaultdict(list)\n\n zorders = self._updated_zorders(element)\n for (key, el), zorder in zip(element.data.items(), zorders):\n self.set_param(**self.lookup_options(el, 'plot').options)\n style = self.lookup_options(el, 'style')\n style = style.max_cycles(len(self.ordering))[zorder]\n self.overlay_dims = dict(zip(element.kdims, key))\n eldata, elmapping, style = self.get_data(el, ranges, style)\n for k, eld in eldata.items():\n data[k].extend(eld)\n\n # Skip if data is empty\n if not eldata:\n continue\n\n # Apply static styles\n nvals = len(list(eldata.values())[0])\n sdata, smapping = expand_batched_style(style, self._batched_style_opts,\n elmapping, nvals)\n elmapping.update({k: v for k, v in smapping.items() if k not in elmapping})\n for k, v in sdata.items():\n data[k].extend(list(v))\n\n return data, elmapping, style\n\n \nclass ContourPlot(LegendPlot, PathPlot):\n\n color_index = param.ClassSelector(default=0, class_=(util.basestring, int),\n allow_None=True, doc=\"\"\"\n Index of the dimension from which the color will the drawn\"\"\")\n\n show_legend = param.Boolean(default=False, doc=\"\"\"\n Whether to show legend for the plot.\"\"\")\n\n _color_style = 'line_color'\n\n def _hover_opts(self, element):\n if self.batched:\n dims = list(self.hmap.last.kdims)+self.hmap.last.last.vdims\n else:\n dims = list(self.overlay_dims.keys())+self.hmap.last.vdims\n return dims, {}\n \n def _get_hover_data(self, data, element):\n \"\"\"\n Initializes hover data based on Element dimension values.\n If empty initializes with no data.\n \"\"\"\n if not any(isinstance(t, HoverTool) for t in self.state.tools) or self.static_source:\n return\n\n for d in element.vdims:\n dim = util.dimension_sanitizer(d.name)\n if dim not in data:\n data[dim] = element.dimension_values(d, expanded=False)\n elif isinstance(data[dim], np.ndarray) and data[dim].dtype.kind == 'M':\n data[dim+'_dt_strings'] = [d.pprint_value(v) for v in data[dim]]\n\n for k, v in self.overlay_dims.items():\n dim = util.dimension_sanitizer(k.name)\n if dim not in data:\n data[dim] = [v for _ in range(len(list(data.values())[0]))]\n\n def get_data(self, element, ranges, style):\n paths = element.split(datatype='array', dimensions=element.kdims)\n if self.static_source:\n data = dict()\n else:\n inds = (1, 0) if self.invert_axes else (0, 1)\n xs, ys = ([path[:, idx] for path in paths] for idx in inds)\n data = dict(xs=xs, ys=ys)\n mapping = dict(self._mapping)\n\n if None not in [element.level, self.color_index] and element.vdims:\n cdim = element.vdims[0]\n else:\n cidx = self.color_index+2 if isinstance(self.color_index, int) else self.color_index\n cdim = element.get_dimension(cidx)\n if cdim is None:\n return data, mapping, style\n\n ncontours = len(paths)\n dim_name = util.dimension_sanitizer(cdim.name)\n if element.level is not None:\n values = np.full(ncontours, float(element.level))\n else:\n values = element.dimension_values(cdim, expanded=False)\n data[dim_name] = values\n factors = list(np.unique(values)) if values.dtype.kind in 'SUO' else None\n cmapper = self._get_colormapper(cdim, element, ranges, style, factors)\n mapping[self._color_style] = {'field': dim_name, 'transform': cmapper}\n self._get_hover_data(data, element)\n if self.show_legend:\n mapping['legend'] = dim_name\n return data, mapping, style\n\n\nclass PolygonPlot(ContourPlot):\n\n style_opts = ['cmap'] + line_properties + fill_properties\n _plot_methods = dict(single='patches', batched='patches')\n _batched_style_opts = line_properties + fill_properties\n _color_style = 'fill_color'\n" ]
[ [ "numpy.linspace", "scipy.spatial.Delaunay", "numpy.vstack", "matplotlib.cm.get_cmap" ], [ "numpy.array", "numpy.diff", "numpy.unique" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
pyxidr/data-science-consulting-part1
[ "b8a34f55d9ab7580d85e27da66abc8d25b7bd3e4" ]
[ "src/python/modules/client_data.py" ]
[ "\"\"\"\nCopyright (c) 2019, Pyxidr and/or its affiliates. All rights reserved.\n\nRedistribution and use in source and binary forms, with or without\nmodification, are permitted provided that the following conditions\nare met:\n\n - Redistributions of source code must retain the above copyright\n notice, this list of conditions and the following disclaimer.\n\n - Redistributions in binary form must reproduce the above copyright\n notice, this list of conditions and the following disclaimer in the\n documentation and/or other materials provided with the distribution.\n\n - Neither the name of Pyxidr or the names of its\n contributors may be used to endorse or promote products derived\n from this software without specific prior written permission.\n\nTHIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS\nIS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,\nTHE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR\nPURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR\nCONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,\nEXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,\nPROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR\nPROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF\nLIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING\nNEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS\nSOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\n===============================================================================\n\n-- client_data.py --\n\nProvides subroutines for populating SQL database with data\nassociated with power generating assets.\n\nTo do:\n - None at the moment\n\"\"\"\n\nimport os\nimport datetime\nimport math\nimport logging\nimport pandas as pd\n\nfrom modules.utils import Months\nfrom modules.utils import convert_to_numeric\n\nlogger = logging.getLogger('make_dataset')\n\n\nclass ClientDataError(Exception):\n '''\n Class used for throwing errors.\n '''\n\n\ndef populate_client_data(conn, parameters):\n '''\n Populates the SQL database with client's data.\n '''\n logger.info('. Populating data associated with generating assets')\n cursor = conn.cursor()\n _populate_hourly_prices(conn, cursor, parameters['Prices']['Hourly'])\n _populate_daily_prices(conn, cursor, parameters['Prices']['Daily'])\n _populate_generation(conn, cursor, parameters['Generation'])\n\n\ndef _populate_hourly_prices(conn, cursor, parameters):\n '''\n Populates hourly prices.\n '''\n f_in_name = parameters[0] # There is only one file\n\n # -- Power prices --\n\n logger.info('.. Reading \\'{}\\''.format(os.path.basename(f_in_name)))\n\n # Put all the data into a dataframe\n df = pd.read_excel(f_in_name)\n\n # Get product ID's\n cursor.execute(\n '''\n select id\n from tbl_ref_price_products\n where product = \\'DAH\\';\n ''')\n product_id = cursor.fetchone()[0]\n\n # Delete any previous records\n cursor.execute('delete from tbl_hist_intradayprices ' +\n 'where product_id = {};'.format(product_id))\n conn.commit()\n\n logger.info('.. Processing power spot prices')\n\n list_of_records = list()\n cur_row = 0\n while cur_row < len(df.index):\n if isinstance(df.iat[cur_row, 4], datetime.date):\n # For each hour\n for h in range(0, 24):\n if not math.isnan(df.iat[cur_row, h + 6]):\n record = dict({\n \"datehour\": datetime.datetime(\n df.iat[cur_row, 4].year,\n df.iat[cur_row, 4].month,\n df.iat[cur_row, 4].day, h, 0),\n \"product_id\": product_id,\n \"price\": convert_to_numeric(df.iat[cur_row, h + 6])\n })\n list_of_records.append(record)\n cur_row += 1\n df2 = pd.DataFrame(list_of_records)\n df2.to_sql('tbl_hist_intradayprices', conn,\n if_exists='append', index=False)\n conn.commit()\n\n\ndef _populate_daily_prices(conn, cursor, parameters):\n '''\n Populates daily prices.\n '''\n f_in_name = parameters[0] # There is only one file\n\n # -- Gas prices --\n\n logger.info('.. Reading gas prices from \\'{}\\''\n .format(os.path.basename(f_in_name)))\n\n # Put all the data into a dataframe\n df = pd.read_excel(f_in_name, sheet_name='Gas')\n\n # Get product ID's\n cursor.execute(\n '''\n select id\n from tbl_ref_price_products\n where product = \\'Z1\\';\n ''')\n product_id = cursor.fetchone()[0]\n\n # Delete any previous records\n cursor.execute('delete from tbl_hist_dailyprices ' +\n 'where product_id = {};'.format(product_id))\n conn.commit()\n\n logger.info('.. Processing gas cash prices')\n\n list_of_records = list()\n cur_row = 3\n while cur_row < len(df.index):\n if isinstance(df.iat[cur_row, 3], datetime.date):\n try:\n record = dict({\n \"date\": df.iat[cur_row, 3],\n \"product_id\": product_id,\n \"bid\": convert_to_numeric(df.iat[cur_row, 4]),\n \"ask\": convert_to_numeric(df.iat[cur_row, 4]),\n \"bid_size\": 0,\n \"ask_size\": 0\n })\n list_of_records.append(record)\n except KeyError:\n logger.warning('** Warning: Does not recognize price type ' +\n '\\'{}\\'.'.format(df.iat[cur_row, 0]))\n cur_row += 1\n df2 = pd.DataFrame(list_of_records)\n df2.to_sql('tbl_hist_dailyprices', conn, if_exists='append', index=False)\n conn.commit()\n\n # -- Carbon prices --\n\n logger.info('.. Reading carbon prices from \\'{}\\''\n .format(os.path.basename(f_in_name)))\n\n # Put all the data into a dataframe\n df = pd.read_excel(f_in_name, sheet_name='Carbon')\n\n # Get the product ID\n cursor.execute(\n '''\n select id\n from tbl_ref_price_products\n where product = \\'Carbon\\';\n ''')\n product_id = cursor.fetchone()[0]\n\n # Delete any previous records\n cursor.execute('delete from tbl_hist_dailyprices ' +\n 'where product_id = {};'.format(product_id))\n conn.commit()\n\n logger.info('.. Processing carbon prices')\n\n list_of_records = list()\n cur_row = 4\n while cur_row < len(df.index):\n if isinstance(df.iat[cur_row, 0], datetime.date):\n record = dict({\n \"date\": df.iat[cur_row, 0],\n \"product_id\": product_id,\n \"bid\": convert_to_numeric(df.iat[cur_row, 1]),\n \"ask\": convert_to_numeric(df.iat[cur_row, 1]),\n \"bid_size\": 0,\n \"ask_size\": 0\n })\n list_of_records.append(record)\n cur_row += 1\n df2 = pd.DataFrame(list_of_records)\n df2.to_sql('tbl_hist_dailyprices', conn, if_exists='append', index=False)\n conn.commit()\n\n\ndef _populate_generation(conn, cursor, parameters):\n '''\n Populates historical generation.\n '''\n f_in_name = parameters[0] # There is only one file\n\n logger.info('.. Reading generation from \\'{}\\''\n .format(os.path.basename(f_in_name)))\n\n # Put all the data into a dataframe\n df = pd.read_excel(f_in_name)\n\n # Get generating plant ID\n cursor.execute(\n '''\n select id\n from tbl_ref_power_plants\n where name = \\'PP\\';\n ''')\n plant_id = cursor.fetchone()[0]\n\n # Delete any previous records\n cursor.execute('delete from tbl_hist_generation ' +\n 'where plant_id = {};'.format(plant_id))\n conn.commit()\n\n logger.info('.. Processing generation for PP')\n\n list_of_records = list()\n cur_row = 3\n while cur_row < len(df.index):\n if isinstance(df.iat[cur_row, 1], datetime.datetime):\n record = dict({\n # Make sure we get a \"clean\" hour\n \"datehour\": datetime.datetime(\n df.iat[cur_row, 1].year,\n df.iat[cur_row, 1].month,\n df.iat[cur_row, 1].day,\n df.iat[cur_row, 1].hour),\n \"plant_id\": plant_id,\n \"generation\": convert_to_numeric(df.iat[cur_row, 2])\n })\n list_of_records.append(record)\n cur_row += 1\n df2 = pd.DataFrame(list_of_records)\n df2.to_sql('tbl_hist_generation', conn, if_exists='append', index=False)\n conn.commit()\n" ]
[ [ "pandas.read_excel", "pandas.DataFrame" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "1.3", "0.19", "1.1", "1.5", "0.24", "0.20", "1.0", "0.25", "1.2" ], "scipy": [], "tensorflow": [] } ]
PasaLab/YAO
[ "2e70203197cd79f9522d65731ee5dc0eb236b005", "2e70203197cd79f9522d65731ee5dc0eb236b005", "2e70203197cd79f9522d65731ee5dc0eb236b005" ]
[ "Liquid-job-benchmarks/scripts/tf_cnn_benchmarks/models/experimental/deepspeech.py", "Liquid-optimizer/main.py", "Liquid-job-benchmarks/scripts/tf_cnn_benchmarks/models/tf1_only/mobilenet_test.py" ]
[ "# Copyright 2017 The TensorFlow Authors. All Rights Reserved.\r\n#\r\n# Licensed under the Apache License, Version 2.0 (the \"License\");\r\n# you may not use this file except in compliance with the License.\r\n# You may obtain a copy of the License at\r\n#\r\n# http://www.apache.org/licenses/LICENSE-2.0\r\n#\r\n# Unless required by applicable law or agreed to in writing, software\r\n# distributed under the License is distributed on an \"AS IS\" BASIS,\r\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r\n# See the License for the specific language governing permissions and\r\n# limitations under the License.\r\n# ==============================================================================\r\n\"\"\"DeepSpeech2 model configuration.\r\n\r\nReferences:\r\n https://arxiv.org/abs/1512.02595\r\n Deep Speech 2: End-to-End Speech Recognition in English and Mandarin\r\n\"\"\"\r\n\r\nfrom __future__ import absolute_import\r\nfrom __future__ import division\r\nfrom __future__ import print_function\r\n\r\nimport itertools\r\n\r\nimport numpy as np\r\nfrom six.moves import xrange # pylint: disable=redefined-builtin\r\nimport tensorflow.compat.v1 as tf\r\nimport constants\r\nfrom cnn_util import log_fn\r\nfrom models import model as model_lib\r\nfrom tensorflow.python.ops import variables # pylint: disable=g-direct-tensorflow-import\r\n\r\n\r\nclass DeepSpeechDecoder(object):\r\n \"\"\"Greedy decoder implementation for Deep Speech model.\"\"\"\r\n\r\n def __init__(self, labels, blank_index=28):\r\n \"\"\"Decoder initialization.\r\n\r\n Arguments:\r\n labels: a string specifying the speech labels for the decoder to use.\r\n blank_index: an integer specifying index for the blank character. Defaults\r\n to 28.\r\n \"\"\"\r\n self.labels = labels\r\n self.blank_index = blank_index\r\n self.int_to_char = dict([(i, c) for (i, c) in enumerate(labels)])\r\n\r\n def convert_to_string(self, sequence):\r\n \"\"\"Convert a sequence of indexes into corresponding string.\"\"\"\r\n return ''.join([self.int_to_char[i] for i in sequence])\r\n\r\n def wer(self, decode, target):\r\n \"\"\"Computes the Word Error Rate (WER).\r\n\r\n WER is defined as the edit distance between the two provided sentences after\r\n tokenizing to words.\r\n\r\n Args:\r\n decode: string of the decoded output.\r\n target: a string for the ground truth label.\r\n\r\n Returns:\r\n A float number for the WER of the current decode-target pair.\r\n \"\"\"\r\n try:\r\n from nltk.metrics import distance # pylint: disable=g-import-not-at-top\r\n except ImportError as e:\r\n if 'nltk.metrics' not in e.message:\r\n raise\r\n raise ImportError('To use the experimental deepspeech model, you must '\r\n 'pip install -U nltk')\r\n\r\n # Map each word to a new char.\r\n words = set(decode.split() + target.split())\r\n word2char = dict(zip(words, range(len(words))))\r\n\r\n new_decode = [chr(word2char[w]) for w in decode.split()]\r\n new_target = [chr(word2char[w]) for w in target.split()]\r\n\r\n return distance.edit_distance(''.join(new_decode), ''.join(new_target))\r\n\r\n def cer(self, decode, target):\r\n \"\"\"Computes the Character Error Rate (CER).\r\n\r\n CER is defined as the edit distance between the two given strings.\r\n\r\n Args:\r\n decode: a string of the decoded output.\r\n target: a string for the ground truth label.\r\n\r\n Returns:\r\n A float number denoting the CER for the current sentence pair.\r\n \"\"\"\r\n try:\r\n from nltk.metrics import distance # pylint: disable=g-import-not-at-top\r\n except ImportError as e:\r\n if 'nltk.metrics' not in e.message:\r\n raise\r\n raise ImportError('To use the experimental deepspeech model, you must '\r\n 'pip install -U nltk')\r\n return distance.edit_distance(decode, target)\r\n\r\n def decode(self, char_indexes):\r\n \"\"\"Decode the best guess from logits using greedy algorithm.\"\"\"\r\n # Merge repeated chars.\r\n merge = [k for k, _ in itertools.groupby(char_indexes)]\r\n # Remove the blank index in the decoded sequence.\r\n merge_remove_blank = []\r\n for k in merge:\r\n if k != self.blank_index:\r\n merge_remove_blank.append(k)\r\n\r\n return self.convert_to_string(merge_remove_blank)\r\n\r\n def decode_logits(self, logits):\r\n \"\"\"Decode the best guess from logits using greedy algorithm.\"\"\"\r\n # Choose the class with maximimum probability.\r\n best = list(np.argmax(logits, axis=1))\r\n return self.decode(best)\r\n\r\n\r\nclass DeepSpeech2Model(model_lib.Model):\r\n \"\"\"Define DeepSpeech2 model.\"\"\"\r\n\r\n # Supported rnn cells.\r\n SUPPORTED_RNNS = {\r\n 'lstm': tf.nn.rnn_cell.BasicLSTMCell,\r\n 'rnn': tf.nn.rnn_cell.RNNCell,\r\n 'gru': tf.nn.rnn_cell.GRUCell,\r\n }\r\n\r\n # Parameters for batch normalization.\r\n BATCH_NORM_EPSILON = 1e-5\r\n BATCH_NORM_DECAY = 0.997\r\n\r\n # Filters of convolution layer\r\n CONV_FILTERS = 32\r\n\r\n def __init__(self,\r\n num_rnn_layers=5,\r\n rnn_type='lstm',\r\n is_bidirectional=True,\r\n rnn_hidden_size=800,\r\n use_bias=True,\r\n params=None):\r\n \"\"\"Initialize DeepSpeech2 model.\r\n\r\n Args:\r\n num_rnn_layers: an integer, the number of rnn layers (default: 5).\r\n rnn_type: a string, one of the supported rnn cells: gru, rnn or lstm.\r\n is_bidirectional: a boolean to indicate if the rnn layer is bidirectional.\r\n rnn_hidden_size: an integer for the number of hidden units in the RNN\r\n cell.\r\n use_bias: a boolean specifying whether to use a bias in the last fc layer.\r\n params: the params from BenchmarkCNN.\r\n \"\"\"\r\n super(DeepSpeech2Model, self).__init__(\r\n 'deepspeech2',\r\n batch_size=128,\r\n learning_rate=0.0005,\r\n fp16_loss_scale=128,\r\n params=params)\r\n self.num_rnn_layers = num_rnn_layers\r\n self.rnn_type = rnn_type\r\n self.is_bidirectional = is_bidirectional\r\n self.rnn_hidden_size = rnn_hidden_size\r\n self.use_bias = use_bias\r\n self.num_feature_bins = 161\r\n self.max_time_steps = 3494\r\n self.max_label_length = 576\r\n\r\n def _batch_norm(self, inputs, training):\r\n \"\"\"Batch normalization layer.\r\n\r\n Note that the momentum to use will affect validation accuracy over time.\r\n Batch norm has different behaviors during training/evaluation. With a large\r\n momentum, the model takes longer to get a near-accurate estimation of the\r\n moving mean/variance over the entire training dataset, which means we need\r\n more iterations to see good evaluation results. If the training data is\r\n evenly distributed over the feature space, we can also try setting a smaller\r\n momentum (such as 0.1) to get good evaluation result sooner.\r\n\r\n Args:\r\n inputs: input data for batch norm layer.\r\n training: a boolean to indicate if it is in training stage.\r\n\r\n Returns:\r\n tensor output from batch norm layer.\r\n \"\"\"\r\n return tf.layers.batch_normalization(\r\n inputs=inputs,\r\n momentum=DeepSpeech2Model.BATCH_NORM_DECAY,\r\n epsilon=DeepSpeech2Model.BATCH_NORM_EPSILON,\r\n fused=True,\r\n training=training)\r\n\r\n def _conv_bn_layer(self, inputs, padding, filters, kernel_size, strides,\r\n layer_id, training):\r\n \"\"\"Defines 2D convolutional + batch normalization layer.\r\n\r\n Args:\r\n inputs: input data for convolution layer.\r\n padding: padding to be applied before convolution layer.\r\n filters: an integer, number of output filters in the convolution.\r\n kernel_size: a tuple specifying the height and width of the 2D convolution\r\n window.\r\n strides: a tuple specifying the stride length of the convolution.\r\n layer_id: an integer specifying the layer index.\r\n training: a boolean to indicate which stage we are in (training/eval).\r\n\r\n Returns:\r\n tensor output from the current layer.\r\n \"\"\"\r\n # Perform symmetric padding on the feature dimension of time_step\r\n # This step is required to avoid issues when RNN output sequence is shorter\r\n # than the label length.\r\n inputs = tf.pad(\r\n inputs,\r\n [[0, 0], [padding[0], padding[0]], [padding[1], padding[1]], [0, 0]])\r\n inputs = tf.layers.conv2d(\r\n inputs=inputs,\r\n filters=filters,\r\n kernel_size=kernel_size,\r\n strides=strides,\r\n padding='valid',\r\n use_bias=False,\r\n activation=tf.nn.relu6,\r\n name='cnn_{}'.format(layer_id))\r\n return self._batch_norm(inputs, training)\r\n\r\n def _rnn_layer(self, inputs, rnn_cell, rnn_hidden_size, layer_id,\r\n use_batch_norm, is_bidirectional, training):\r\n \"\"\"Defines a batch normalization + rnn layer.\r\n\r\n Args:\r\n inputs: input tensors for the current layer.\r\n rnn_cell: RNN cell instance to use.\r\n rnn_hidden_size: an integer for the dimensionality of the rnn output\r\n space.\r\n layer_id: an integer for the index of current layer.\r\n use_batch_norm: a boolean specifying whether to perform batch\r\n normalization on input states.\r\n is_bidirectional: a boolean specifying whether the rnn layer is\r\n bi-directional.\r\n training: a boolean to indicate which stage we are in (training/eval).\r\n\r\n Returns:\r\n tensor output for the current layer.\r\n \"\"\"\r\n if use_batch_norm:\r\n inputs = self._batch_norm(inputs, training)\r\n\r\n # Construct forward/backward RNN cells.\r\n fw_cell = rnn_cell(\r\n num_units=rnn_hidden_size, name='rnn_fw_{}'.format(layer_id))\r\n\r\n if is_bidirectional:\r\n bw_cell = rnn_cell(\r\n num_units=rnn_hidden_size, name='rnn_bw_{}'.format(layer_id))\r\n outputs, _ = tf.nn.bidirectional_dynamic_rnn(\r\n cell_fw=fw_cell,\r\n cell_bw=bw_cell,\r\n inputs=inputs,\r\n dtype=tf.float32,\r\n swap_memory=True)\r\n rnn_outputs = tf.concat(outputs, -1)\r\n else:\r\n rnn_outputs = tf.nn.dynamic_rnn(\r\n fw_cell, inputs, dtype=tf.float32, swap_memory=True)\r\n\r\n return rnn_outputs\r\n\r\n def get_input_data_types(self, subset):\r\n \"\"\"Returns the list of data types of the inputs.\"\"\"\r\n del subset # Same data types for both train and validation subsets.\r\n return [self.data_type, tf.int32, tf.int32, tf.int32]\r\n\r\n def get_input_shapes(self, subset):\r\n \"\"\"Returns the list of shapes of the padded inputs.\"\"\"\r\n del subset # Same shapes for both train and validation subsets\r\n return [\r\n [self.batch_size, self.max_time_steps, self.num_feature_bins, 1],\r\n [self.batch_size, self.max_label_length],\r\n [self.batch_size, 1],\r\n [self.batch_size, 1],\r\n ]\r\n\r\n def get_synthetic_inputs(self, input_name, nclass):\r\n inputs = tf.random_uniform(self.get_input_shapes('train')[0],\r\n dtype=self.get_input_data_types('train')[0])\r\n inputs = variables.VariableV1(inputs, trainable=False,\r\n collections=[tf.GraphKeys.LOCAL_VARIABLES],\r\n name=input_name)\r\n labels = tf.convert_to_tensor(\r\n np.random.randint(28, size=[self.batch_size, self.max_label_length]))\r\n input_lengths = tf.convert_to_tensor(\r\n [self.max_time_steps] * self.batch_size)\r\n label_lengths = tf.convert_to_tensor(\r\n [self.max_label_length] * self.batch_size)\r\n return [inputs, labels, input_lengths, label_lengths]\r\n\r\n # TODO(laigd): support fp16.\r\n # TODO(laigd): support multiple gpus.\r\n def build_network(self, inputs, phase_train=True, nclass=29):\r\n \"\"\"Builds the forward pass of the deepspeech2 model.\r\n\r\n Args:\r\n inputs: The input list of the model.\r\n phase_train: True during training. False during evaluation.\r\n nclass: Number of classes that the input spectrogram can belong to.\r\n\r\n Returns:\r\n A BuildNetworkResult which contains the logits and model-specific extra\r\n information.\r\n \"\"\"\r\n inputs = inputs[0] # Get the spectrogram feature.\r\n\r\n # Two cnn layers.\r\n inputs = self._conv_bn_layer(\r\n inputs,\r\n padding=(20, 5),\r\n filters=DeepSpeech2Model.CONV_FILTERS,\r\n kernel_size=(41, 11),\r\n strides=(2, 2),\r\n layer_id=1,\r\n training=phase_train)\r\n\r\n inputs = self._conv_bn_layer(\r\n inputs,\r\n padding=(10, 5),\r\n filters=DeepSpeech2Model.CONV_FILTERS,\r\n kernel_size=(21, 11),\r\n strides=(2, 1),\r\n layer_id=2,\r\n training=phase_train)\r\n\r\n # output of conv_layer2 with the shape of\r\n # [batch_size (N), times (T), features (F), channels (C)].\r\n # Convert the conv output to rnn input.\r\n\r\n # batch_size = tf.shape(inputs)[0]\r\n feat_size = inputs.get_shape().as_list()[2]\r\n inputs = tf.reshape(\r\n inputs,\r\n [self.batch_size, -1, feat_size * DeepSpeech2Model.CONV_FILTERS])\r\n\r\n # RNN layers.\r\n rnn_cell = DeepSpeech2Model.SUPPORTED_RNNS[self.rnn_type]\r\n for layer_counter in xrange(self.num_rnn_layers):\r\n # No batch normalization on the first layer.\r\n use_batch_norm = (layer_counter != 0)\r\n inputs = self._rnn_layer(inputs, rnn_cell, self.rnn_hidden_size,\r\n layer_counter + 1, use_batch_norm,\r\n self.is_bidirectional, phase_train)\r\n\r\n # FC layer with batch norm.\r\n inputs = self._batch_norm(inputs, phase_train)\r\n logits = tf.layers.dense(inputs, nclass, use_bias=self.use_bias)\r\n\r\n return model_lib.BuildNetworkResult(logits=logits, extra_info=None)\r\n\r\n def loss_function(self, inputs, build_network_result):\r\n \"\"\"Computes the ctc loss for the current batch of predictions.\r\n\r\n Args:\r\n inputs: the input list of the model.\r\n build_network_result: a BuildNetworkResult returned by build_network().\r\n\r\n Returns:\r\n The loss tensor of the model.\r\n \"\"\"\r\n logits = build_network_result.logits\r\n actual_time_steps = inputs[2]\r\n probs = tf.nn.softmax(logits)\r\n ctc_time_steps = tf.shape(probs)[1]\r\n ctc_input_length = tf.to_float(\r\n tf.multiply(actual_time_steps, ctc_time_steps))\r\n ctc_input_length = tf.to_int32(\r\n tf.floordiv(ctc_input_length, tf.to_float(self.max_time_steps)))\r\n\r\n label_length = inputs[3]\r\n label_length = tf.to_int32(tf.squeeze(label_length))\r\n ctc_input_length = tf.to_int32(tf.squeeze(ctc_input_length))\r\n\r\n labels = inputs[1]\r\n sparse_labels = tf.to_int32(\r\n tf.keras.backend.ctc_label_dense_to_sparse(labels, label_length))\r\n y_pred = tf.log(\r\n tf.transpose(probs, perm=[1, 0, 2]) + tf.keras.backend.epsilon())\r\n\r\n losses = tf.expand_dims(\r\n tf.nn.ctc_loss(\r\n labels=sparse_labels,\r\n inputs=y_pred,\r\n sequence_length=ctc_input_length,\r\n ignore_longer_outputs_than_inputs=True),\r\n axis=1)\r\n loss = tf.reduce_mean(losses)\r\n return loss\r\n\r\n PROBABILITY_TENSOR = 'deepspeech2_prob'\r\n LABEL_TENSOR = 'deepspeech2_label'\r\n\r\n def accuracy_function(self, inputs, logits):\r\n \"\"\"Returns the ops to evaluate the model performance.\"\"\"\r\n # Get probabilities of each predicted class\r\n probs = tf.nn.softmax(logits)\r\n assert probs.shape.as_list()[0] == self.batch_size\r\n return {\r\n (constants.UNREDUCED_ACCURACY_OP_PREFIX + self.PROBABILITY_TENSOR):\r\n probs,\r\n (constants.UNREDUCED_ACCURACY_OP_PREFIX + self.LABEL_TENSOR):\r\n inputs[1],\r\n }\r\n\r\n def postprocess(self, results):\r\n \"\"\"Postprocess results returned from model in Python.\"\"\"\r\n probs = results[self.PROBABILITY_TENSOR]\r\n\r\n total_wer, total_cer = 0, 0\r\n speech_labels = \" abcdefghijklmnopqrstuvwxyz'-\"\r\n greedy_decoder = DeepSpeechDecoder(speech_labels)\r\n\r\n # Evaluate the performance using WER (Word Error Rate) and CER (Character\r\n # Error Rate) as metrics.\r\n targets = results[self.LABEL_TENSOR] # The ground truth transcript\r\n for i in range(self.batch_size):\r\n # Decode string.\r\n predicted_str = greedy_decoder.decode_logits(probs[i])\r\n expected_str = greedy_decoder.decode(targets[i])\r\n # Compute CER.\r\n total_cer += (greedy_decoder.cer(predicted_str, expected_str) /\r\n len(expected_str))\r\n # Compute WER.\r\n total_wer += (greedy_decoder.wer(predicted_str, expected_str) /\r\n len(expected_str.split()))\r\n\r\n # Get mean value\r\n total_cer /= self.batch_size\r\n total_wer /= self.batch_size\r\n\r\n log_fn('total CER: {:f}; total WER: {:f}; total example: {:d}.'.format(\r\n total_cer, total_wer, self.batch_size))\r\n # TODO(laigd): get rid of top_N_accuracy bindings in benchmark_cnn.py\r\n return {'top_1_accuracy': 0., 'top_5_accuracy': 0.}\r\n", "import pandas as pd\r\nimport numpy as np\r\nimport os\r\nfrom sklearn.model_selection import train_test_split\r\nfrom model_tensorflow import train, predict\r\n\r\n\r\nframe = \"tensorflow\"\r\n\r\n\r\nclass Config:\r\n\t# feature_columns = list(range(0,8))\r\n\t# label_columns = [5,6,7]\r\n\tfeature_columns = list([2,5])#comment yqy\r\n\t# feature_columns = list([2]) #add yqy\r\n\tlabel_columns = [5]\r\n\tfeature_and_label_columns = feature_columns + label_columns\r\n\tlabel_in_feature_columns = (lambda x, y: [x.index(i) for i in y])(feature_columns, label_columns)\r\n\r\n\tpredict_day = 1\r\n\r\n\t# input_size = len(feature_columns)#comment yqy\r\n\tinput_size = len( list([2]))#add yqy\r\n\toutput_size = len(label_columns)\r\n\r\n\thidden_size = 128\r\n\tlstm_layers = 2\r\n\tdropout_rate = 0.2\r\n\ttime_step = 5\r\n\r\n\t# do_train = True\r\n\tdo_train = True\r\n\tdo_predict = True\r\n\tadd_train = False\r\n\tshuffle_train_data = True\r\n\r\n\t# train_data_rate = 0.95 #comment yqy\r\n\ttrain_data_rate = 1 #add yqy\r\n\tvalid_data_rate = 0.15\r\n\r\n\tbatch_size = 64\r\n\tlearning_rate = 0.001\r\n\tepoch = 20\r\n\tpatience = 5\r\n\trandom_seed = 42\r\n\r\n\tdo_continue_train = False\r\n\tcontinue_flag = \"\"\r\n\tif do_continue_train:\r\n\t\tshuffle_train_data = False\r\n\t\tbatch_size = 1\r\n\t\tcontinue_flag = \"continue_\"\r\n\r\n\t#comment yqy\r\n\ttrain_data_path = \"./data/stock_data.csv\"\r\n\tmodel_save_path = \"./checkpoint/\"\r\n\tfigure_save_path = \"./figure/\"\r\n\t#comment end\r\n\t# add yqy\r\n\t# train_data_path = \"./data/stock_data_30.csv\"\r\n\t# model_save_path = \"./checkpoint/30/\"\r\n\t# figure_save_path = \"./figure/30/\"\r\n\t# add end\r\n\tdo_figure_save = False\r\n\tif not os.path.exists(model_save_path):\r\n\t\tos.mkdir(model_save_path)\r\n\tif not os.path.exists(figure_save_path):\r\n\t\tos.mkdir(figure_save_path)\r\n\r\n\tused_frame = frame\r\n\tmodel_postfix = {\"pytorch\": \".pth\", \"keras\": \".h5\", \"tensorflow\": \".ckpt\"}\r\n\tmodel_name = \"model_\" + continue_flag + used_frame + model_postfix[used_frame]\r\n\r\n\r\nclass Data:\r\n\tdef __init__(self, config):\r\n\t\tself.config = config\r\n\t\tself.data, self.data_column_name = self.read_data()\r\n\r\n\t\tself.data_num = self.data.shape[0]\r\n\t\tself.train_num = int(self.data_num * self.config.train_data_rate)\r\n\r\n\t\tself.mean = np.mean(self.data, axis=0)\r\n\t\tself.std = np.std(self.data, axis=0)\r\n\t\tself.norm_data = (self.data - self.mean) / self.std\r\n\r\n\t\tself.start_num_in_test = 0\r\n\r\n\tdef read_data(self):\r\n\t\tinit_data = pd.read_csv(self.config.train_data_path,\r\n\t\t usecols=self.config.feature_and_label_columns)\r\n\t\treturn init_data.values, init_data.columns.tolist()\r\n\r\n\tdef get_train_and_valid_data(self):\r\n\t\t# feature_data = self.norm_data[:self.train_num] # comment yqy\r\n\t\tfeature_data = self.norm_data[:self.train_num][:,1][:,np.newaxis] # add yqy\r\n\t\tlabel_data = self.norm_data[self.config.predict_day: self.config.predict_day + self.train_num,\r\n\t\t self.config.label_in_feature_columns]\r\n\t\tif not self.config.do_continue_train:\r\n\t\t\ttrain_x = [feature_data[i:i + self.config.time_step] for i in range(self.train_num - self.config.time_step)]\r\n\t\t\ttrain_y = [label_data[i:i + self.config.time_step] for i in range(self.train_num - self.config.time_step)]\r\n\t\telse:\r\n\t\t\ttrain_x = [\r\n\t\t\t\tfeature_data[start_index + i * self.config.time_step: start_index + (i + 1) * self.config.time_step]\r\n\t\t\t\tfor start_index in range(self.config.time_step)\r\n\t\t\t\tfor i in range((self.train_num - start_index) // self.config.time_step)]\r\n\t\t\ttrain_y = [\r\n\t\t\t\tlabel_data[start_index + i * self.config.time_step: start_index + (i + 1) * self.config.time_step]\r\n\t\t\t\tfor start_index in range(self.config.time_step)\r\n\t\t\t\tfor i in range((self.train_num - start_index) // self.config.time_step)]\r\n\r\n\t\ttrain_x, train_y = np.array(train_x), np.array(train_y)\r\n\r\n\t\ttrain_x, valid_x, train_y, valid_y = train_test_split(train_x, train_y, test_size=self.config.valid_data_rate,\r\n\t\t random_state=self.config.random_seed,\r\n\t\t shuffle=self.config.shuffle_train_data)\r\n\t\treturn train_x, valid_x, train_y, valid_y\r\n\r\n\tdef get_test_data(self, return_label_data=False):\r\n\t\tfeature_data = self.norm_data[self.train_num:]\r\n\t\tself.start_num_in_test = feature_data.shape[0] % self.config.time_step\r\n\t\ttime_step_size = feature_data.shape[0] // self.config.time_step\r\n\r\n\t\ttest_x = [feature_data[self.start_num_in_test + i * self.config.time_step: self.start_num_in_test + (\r\n\t\t\t\ti + 1) * self.config.time_step]\r\n\t\t for i in range(time_step_size)]\r\n\t\tif return_label_data:\r\n\t\t\tlabel_data = self.norm_data[self.train_num + self.start_num_in_test:, self.config.label_in_feature_columns]\r\n\t\t\treturn np.array(test_x), label_data\r\n\t\treturn np.array(test_x)\r\n\r\n\t# add yqy\r\n\tdef get_test_data_yqy(self, test_data_yqy=None):\r\n\t\tif test_data_yqy is None:\r\n\t\t\ttest_data_yqy = []\r\n\t\t# test_data_yqy=test_data_yqy[1:21]\r\n\t\tfeature_data=(test_data_yqy - self.mean) / self.std\r\n\t\ttest_x=[feature_data]\r\n\t\treturn np.array(test_x)\r\n\t# add end\r\n\r\n\r\ndef draw(config, origin_data, predict_norm_data):\r\n\tlabel_norm_data = origin_data.norm_data[origin_data.train_num + origin_data.start_num_in_test:,\r\n\t config.label_in_feature_columns]\r\n\tassert label_norm_data.shape[0] == predict_norm_data.shape[\r\n\t\t0], \"The element number in origin and predicted data is different\"\r\n\r\n\tlabel_name = [origin_data.data_column_name[i] for i in config.label_in_feature_columns]\r\n\tlabel_column_num = len(config.label_columns)\r\n\r\n\tloss = np.mean((label_norm_data[config.predict_day:] - predict_norm_data[:-config.predict_day]) ** 2, axis=0)\r\n\tprint(\"The mean squared error of stock {} is \".format(label_name), loss)\r\n\r\n\tlabel_X = range(origin_data.data_num - origin_data.train_num - origin_data.start_num_in_test)\r\n\tpredict_X = [x + config.predict_day for x in label_X]\r\n\r\n\tlabel_data = label_norm_data * origin_data.std[config.label_in_feature_columns] + \\\r\n\t origin_data.mean[config.label_in_feature_columns]\r\n\r\n\tpredict_data = predict_norm_data * origin_data.std[config.label_in_feature_columns] + \\\r\n\t origin_data.mean[config.label_in_feature_columns]\r\n\r\n\tprint(label_data)\r\n\tprint(\"____________________________________________\")\r\n\tprint(predict_data)\r\n\r\n\r\ndef draw_yqy(config, origin_data, predict_norm_data,mean_yqy,std_yqy):# 这里origin_data等同于test_data_values_yqy\r\n\tlabel_norm_data = (origin_data - mean_yqy) / std_yqy\r\n\tassert label_norm_data.shape[0] == predict_norm_data.shape[0], \"The element number in origin and predicted data is different\"\r\n\r\n\t#label_norm_data=label_norm_data[:,1]\r\n\tlabel_name = 'high'\r\n\tlabel_column_num = 1\r\n\r\n\tloss = np.mean((label_norm_data[config.predict_day:][:,1][:,np.newaxis] - predict_norm_data[:-config.predict_day][0:]) ** 2, axis=0)\r\n\r\n\t# loss = np.mean((label_norm_data[config.predict_day:][:,5][:,np.newaxis] - predict_norm_data[:-config.predict_day][0:]) ** 2, axis=0)\r\n\t# loss2 = np.mean((label_norm_data[config.predict_day:][:,6][:,np.newaxis] - predict_norm_data[:-config.predict_day][0:]) ** 2, axis=0)\r\n\t# loss3 = np.mean((label_norm_data[config.predict_day:][:,7][:,np.newaxis] - predict_norm_data[:-config.predict_day][0:]) ** 2, axis=0)\r\n\r\n\r\n\r\n\tprint(\"The mean squared error of stock {} is \".format(label_name), loss)\r\n\r\n\t# label_X = range(origin_data.data_num - origin_data.train_num - origin_data.start_num_in_test)\r\n\t# predict_X = [x + config.predict_day for x in label_X]\r\n\r\n\tlabel_data = label_norm_data[:,1] * std_yqy[1]+ mean_yqy[1]\r\n\r\n\tpredict_data = predict_norm_data * std_yqy[1]+ mean_yqy[1]\r\n\r\n\tprint(label_data)\r\n\tprint(predict_data)\r\n\t# print(label_data[-1])\r\n\t# print(predict_data[-1][0])\r\n\r\ndef main(config):\r\n\tnp.random.seed(config.random_seed)\r\n\tdata_gainer = Data(config)\r\n\r\n\t# add yqy\r\n\tmean_yqy=Data(config).mean\r\n\tstd_yqy=Data(config).std\r\n\t#add end\r\n\r\n\r\n\tif config.do_train:\r\n\t\ttrain_X, valid_X, train_Y, valid_Y = data_gainer.get_train_and_valid_data()\r\n\t\ttrain(config, train_X, train_Y, valid_X, valid_Y)\r\n\r\n\tif config.do_predict:\r\n\t\t# add yqy\r\n\t\ttest_data_yqy = pd.read_csv(\"./data/test_data.csv\",usecols=list([2,5]))\r\n\t\ttest_data_values_yqy=test_data_yqy.values\r\n\t\t# test_data_yqy=[104.3,104.39]\r\n\t\ttest_X =data_gainer.get_test_data_yqy(test_data_values_yqy)\r\n\t\t# add end\r\n\t\t# test_X, test_Y = data_gainer.get_test_data(return_label_data=True)# comment yqy\r\n\t\t# pred_result = predict(config, test_X)\r\n\t\tpred_result = predict(config,test_X[:,:,0][:,:,np.newaxis])\r\n\t\t# draw(config, data_gainer, pred_result)# comment yqy\r\n\t\tdraw_yqy(config, test_data_values_yqy, pred_result,mean_yqy,std_yqy)\r\n\r\n\r\nif __name__ == \"__main__\":\r\n\timport argparse\r\n\r\n\tparser = argparse.ArgumentParser()\r\n\t# parser.add_argument(\"-t\", \"--do_train\", default=False, type=bool, help=\"whether to train\")\r\n\t# parser.add_argument(\"-p\", \"--do_predict\", default=True, type=bool, help=\"whether to train\")\r\n\t# parser.add_argument(\"-b\", \"--batch_size\", default=64, type=int, help=\"batch size\")\r\n\t# parser.add_argument(\"-e\", \"--epoch\", default=20, type=int, help=\"epochs num\")\r\n\targs = parser.parse_args()\r\n\r\n\tcon = Config()\r\n\tfor key in dir(args):\r\n\t\tif not key.startswith(\"_\"):\r\n\t\t\tsetattr(con, key, getattr(args, key))\r\n\r\n\tmain(con)\r\n", "# Copyright 2018 The TensorFlow Authors. All Rights Reserved.\r\n#\r\n# Licensed under the Apache License, Version 2.0 (the \"License\");\r\n# you may not use this file except in compliance with the License.\r\n# You may obtain a copy of the License at\r\n#\r\n# http://www.apache.org/licenses/LICENSE-2.0\r\n#\r\n# Unless required by applicable law or agreed to in writing, software\r\n# distributed under the License is distributed on an \"AS IS\" BASIS,\r\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r\n# See the License for the specific language governing permissions and\r\n# limitations under the License.\r\n# ==============================================================================\r\n\"\"\"Tests for mobilenet_v2, branched from slim for fp16 performance study.\"\"\"\r\n\r\nfrom __future__ import absolute_import\r\nfrom __future__ import division\r\nfrom __future__ import print_function\r\n\r\nimport copy\r\n\r\nimport tensorflow.compat.v1 as tf\r\n\r\nfrom models.tf1_only import mobilenet\r\nfrom models.tf1_only import mobilenet_conv_blocks as ops\r\nfrom models.tf1_only import mobilenet_v2\r\nfrom tensorflow.contrib import slim\r\n\r\n\r\ndef find_ops(optype):\r\n \"\"\"Find ops of a given type in graphdef or a graph.\r\n\r\n Args:\r\n optype: operation type (e.g. Conv2D)\r\n Returns:\r\n List of operations.\r\n \"\"\"\r\n gd = tf.get_default_graph()\r\n return [var for var in gd.get_operations() if var.type == optype]\r\n\r\n\r\nclass MobilenetV2Test(tf.test.TestCase):\r\n\r\n def setUp(self): # pylint: disable=g-missing-super-call\r\n tf.reset_default_graph()\r\n\r\n def testCreation(self):\r\n spec = dict(mobilenet_v2.V2_DEF)\r\n _, ep = mobilenet.mobilenet(\r\n tf.placeholder(tf.float32, (10, 224, 224, 16)), conv_defs=spec)\r\n num_convs = len(find_ops('Conv2D'))\r\n\r\n # This is mostly a sanity test. No deep reason for these particular\r\n # constants.\r\n #\r\n # All but first 2 and last one have two convolutions, and there is one\r\n # extra conv that is not in the spec. (logits)\r\n self.assertEqual(num_convs, len(spec['spec']) * 2 - 2)\r\n # Check that depthwise are exposed.\r\n for i in range(2, 17):\r\n self.assertIn('layer_%d/depthwise_output' % i, ep)\r\n\r\n def testCreationNoClasses(self):\r\n spec = copy.deepcopy(mobilenet_v2.V2_DEF)\r\n net, ep = mobilenet.mobilenet(\r\n tf.placeholder(tf.float32, (10, 224, 224, 16)), conv_defs=spec,\r\n num_classes=None)\r\n self.assertIs(net, ep['global_pool'])\r\n\r\n def testImageSizes(self):\r\n for input_size, output_size in [(224, 7), (192, 6), (160, 5),\r\n (128, 4), (96, 3)]:\r\n tf.reset_default_graph()\r\n _, ep = mobilenet_v2.mobilenet(\r\n tf.placeholder(tf.float32, (10, input_size, input_size, 3)))\r\n\r\n self.assertEqual(ep['layer_18/output'].get_shape().as_list()[1:3],\r\n [output_size] * 2)\r\n\r\n def testWithSplits(self):\r\n spec = copy.deepcopy(mobilenet_v2.V2_DEF)\r\n spec['overrides'] = {\r\n (ops.expanded_conv,): dict(split_expansion=2),\r\n }\r\n _, _ = mobilenet.mobilenet(\r\n tf.placeholder(tf.float32, (10, 224, 224, 16)), conv_defs=spec)\r\n num_convs = len(find_ops('Conv2D'))\r\n # All but 3 op has 3 conv operatore, the remainign 3 have one\r\n # and there is one unaccounted.\r\n self.assertEqual(num_convs, len(spec['spec']) * 3 - 5)\r\n\r\n def testWithOutputStride8(self):\r\n out, _ = mobilenet.mobilenet_base(\r\n tf.placeholder(tf.float32, (10, 224, 224, 16)),\r\n conv_defs=mobilenet_v2.V2_DEF,\r\n output_stride=8,\r\n scope='MobilenetV2')\r\n self.assertEqual(out.get_shape().as_list()[1:3], [28, 28])\r\n\r\n def testDivisibleBy(self):\r\n tf.reset_default_graph()\r\n mobilenet_v2.mobilenet(\r\n tf.placeholder(tf.float32, (10, 224, 224, 16)),\r\n conv_defs=mobilenet_v2.V2_DEF,\r\n divisible_by=16,\r\n min_depth=32)\r\n s = [op.outputs[0].get_shape().as_list()[-1] for op in find_ops('Conv2D')]\r\n s = set(s)\r\n self.assertSameElements([32, 64, 96, 160, 192, 320, 384, 576, 960, 1280,\r\n 1001], s)\r\n\r\n def testDivisibleByWithArgScope(self):\r\n tf.reset_default_graph()\r\n # Verifies that depth_multiplier arg scope actually works\r\n # if no default min_depth is provided.\r\n with slim.arg_scope((mobilenet.depth_multiplier,), min_depth=32):\r\n mobilenet_v2.mobilenet(\r\n tf.placeholder(tf.float32, (10, 224, 224, 2)),\r\n conv_defs=mobilenet_v2.V2_DEF, depth_multiplier=0.1)\r\n s = [op.outputs[0].get_shape().as_list()[-1] for op in find_ops('Conv2D')]\r\n s = set(s)\r\n self.assertSameElements(s, [32, 192, 128, 1001])\r\n\r\n def testFineGrained(self):\r\n tf.reset_default_graph()\r\n # Verifies that depth_multiplier arg scope actually works\r\n # if no default min_depth is provided.\r\n\r\n mobilenet_v2.mobilenet(\r\n tf.placeholder(tf.float32, (10, 224, 224, 2)),\r\n conv_defs=mobilenet_v2.V2_DEF, depth_multiplier=0.01,\r\n finegrain_classification_mode=True)\r\n s = [op.outputs[0].get_shape().as_list()[-1] for op in find_ops('Conv2D')]\r\n s = set(s)\r\n # All convolutions will be 8->48, except for the last one.\r\n self.assertSameElements(s, [8, 48, 1001, 1280])\r\n\r\n def testMobilenetBase(self):\r\n tf.reset_default_graph()\r\n # Verifies that mobilenet_base returns pre-pooling layer.\r\n with slim.arg_scope((mobilenet.depth_multiplier,), min_depth=32):\r\n net, _ = mobilenet_v2.mobilenet_base(\r\n tf.placeholder(tf.float32, (10, 224, 224, 16)),\r\n conv_defs=mobilenet_v2.V2_DEF, depth_multiplier=0.1)\r\n self.assertEqual(net.get_shape().as_list(), [10, 7, 7, 128])\r\n\r\n def testWithOutputStride16(self):\r\n tf.reset_default_graph()\r\n out, _ = mobilenet.mobilenet_base(\r\n tf.placeholder(tf.float32, (10, 224, 224, 16)),\r\n conv_defs=mobilenet_v2.V2_DEF,\r\n output_stride=16)\r\n self.assertEqual(out.get_shape().as_list()[1:3], [14, 14])\r\n\r\n def testWithOutputStride8AndExplicitPadding(self):\r\n tf.reset_default_graph()\r\n out, _ = mobilenet.mobilenet_base(\r\n tf.placeholder(tf.float32, (10, 224, 224, 16)),\r\n conv_defs=mobilenet_v2.V2_DEF,\r\n output_stride=8,\r\n use_explicit_padding=True,\r\n scope='MobilenetV2')\r\n self.assertEqual(out.get_shape().as_list()[1:3], [28, 28])\r\n\r\n def testWithOutputStride16AndExplicitPadding(self):\r\n tf.reset_default_graph()\r\n out, _ = mobilenet.mobilenet_base(\r\n tf.placeholder(tf.float32, (10, 224, 224, 16)),\r\n conv_defs=mobilenet_v2.V2_DEF,\r\n output_stride=16,\r\n use_explicit_padding=True)\r\n self.assertEqual(out.get_shape().as_list()[1:3], [14, 14])\r\n\r\n def testBatchNormScopeDoesNotHaveIsTrainingWhenItsSetToNone(self):\r\n sc = mobilenet.training_scope(is_training=None)\r\n self.assertNotIn('is_training', sc[slim.arg_scope_func_key(\r\n slim.batch_norm)])\r\n\r\n def testBatchNormScopeDoesHasIsTrainingWhenItsNotNone(self):\r\n sc = mobilenet.training_scope(is_training=False)\r\n self.assertIn('is_training', sc[slim.arg_scope_func_key(slim.batch_norm)])\r\n sc = mobilenet.training_scope(is_training=True)\r\n self.assertIn('is_training', sc[slim.arg_scope_func_key(slim.batch_norm)])\r\n sc = mobilenet.training_scope()\r\n self.assertIn('is_training', sc[slim.arg_scope_func_key(slim.batch_norm)])\r\n\r\n\r\nif __name__ == '__main__':\r\n tf.disable_v2_behavior()\r\n tf.test.main()\r\n" ]
[ [ "tensorflow.compat.v1.concat", "tensorflow.compat.v1.shape", "tensorflow.compat.v1.nn.bidirectional_dynamic_rnn", "numpy.random.randint", "tensorflow.compat.v1.reshape", "tensorflow.compat.v1.multiply", "numpy.argmax", "tensorflow.compat.v1.nn.ctc_loss", "tensorflow.compat.v1.layers.batch_normalization", "tensorflow.compat.v1.transpose", "tensorflow.compat.v1.nn.dynamic_rnn", "tensorflow.compat.v1.nn.softmax", "tensorflow.compat.v1.reduce_mean", "tensorflow.python.ops.variables.VariableV1", "tensorflow.compat.v1.keras.backend.ctc_label_dense_to_sparse", "tensorflow.compat.v1.layers.dense", "tensorflow.compat.v1.keras.backend.epsilon", "tensorflow.compat.v1.convert_to_tensor", "tensorflow.compat.v1.to_float", "tensorflow.compat.v1.squeeze", "tensorflow.compat.v1.pad" ], [ "pandas.read_csv", "numpy.random.seed", "sklearn.model_selection.train_test_split", "numpy.std", "numpy.mean", "numpy.array" ], [ "tensorflow.compat.v1.get_default_graph", "tensorflow.contrib.slim.arg_scope", "tensorflow.compat.v1.test.main", "tensorflow.compat.v1.disable_v2_behavior", "tensorflow.compat.v1.placeholder", "tensorflow.contrib.slim.arg_scope_func_key", "tensorflow.compat.v1.reset_default_graph" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [ "2.0", "1.4", "1.1", "1.5", "1.2", "1.3" ], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
kevinwss/Deep-SAD-Baseline
[ "b704725cc44ab5e7aa9bb09503a4c5f244fa907b" ]
[ "src/datasets/mnist.py" ]
[ "from torch.utils.data import Subset\nfrom PIL import Image\nfrom torchvision.datasets import MNIST\nfrom base.torchvision_dataset import TorchvisionDataset\nfrom .preprocessing import create_semisupervised_setting\n\nimport torch\nimport torchvision.transforms as transforms\nimport random\n\nimport numpy as np\n\nclass MNIST_Dataset(TorchvisionDataset):\n\n def __init__(self, root: str, normal_class: int = 0, known_outlier_class: int = 1, n_known_outlier_classes: int = 0,\n ratio_known_normal: float = 0.0, ratio_known_outlier: float = 0.0, ratio_pollution: float = 0.0):\n super().__init__(root)\n\n # Define normal and outlier classes\n self.n_classes = 2 # 0: normal, 1: outlier\n '''\n self.normal_classes = tuple([normal_class])\n self.outlier_classes = list(range(0, 10))\n self.outlier_classes.remove(normal_class)\n self.outlier_classes = tuple(self.outlier_classes)\n '''\n self.normal_classes = tuple([0,1,2,3,4,5,6,7,8,9])\n self.outlier_classes = []\n\n\n if n_known_outlier_classes == 0:\n self.known_outlier_classes = ()\n elif n_known_outlier_classes == 1:\n self.known_outlier_classes = tuple([known_outlier_class])\n else:\n self.known_outlier_classes = tuple(random.sample(self.outlier_classes, n_known_outlier_classes))\n\n # MNIST preprocessing: feature scaling to [0, 1]\n transform = transforms.ToTensor()\n #target_transform = transforms.Lambda(lambda x: int(x in self.outlier_classes))\n target_transform = None\n\n \n # Get train set\n train_set = MyMNIST(root=self.root, train=True, transform=transform, target_transform=target_transform,\n download=True)\n\n # Create semi-supervised setting\n idx, _, semi_targets = create_semisupervised_setting(train_set.targets.cpu().data.numpy(), self.normal_classes,\n self.outlier_classes, self.known_outlier_classes,\n ratio_known_normal, ratio_known_outlier, ratio_pollution)\n print(\"semi_targets\",len(semi_targets))\n print(\"idx\",len(idx))\n #train_set.semi_targets[idx] = torch.tensor(semi_targets) # set respective semi-supervised labels\n\n # Subset train_set to semi-supervised setup\n self.train_set = Subset(train_set, idx)\n\n # Get test set\n self.test_set = MyMNIST(root=self.root, train=False, transform=transform, target_transform=target_transform,\n download=True)\n\n\nclass MyMNIST(MNIST):\n \"\"\"\n Torchvision MNIST class with additional targets for the semi-supervised setting and patch of __getitem__ method\n to also return the semi-supervised target as well as the index of a data sample.\n \"\"\"\n\n def __init__(self, *args, **kwargs):\n super(MyMNIST, self).__init__(*args, **kwargs)\n\n self.semi_targets = torch.zeros_like(self.targets)\n self.anomaly_rate = 0.05\n self.semi_label_rate = 0.01\n\n def get_anomaly(anomaly_data):\n n_anomaly = len(anomaly_data)\n dim = 28\n #print(\"anomaly_data\",anomaly_data.shape)\n a1,a2 = anomaly_data[:n_anomaly//2,:dim//2,:],anomaly_data[:n_anomaly//2,dim//2:,:]\n b1,b2 = anomaly_data[n_anomaly//2:,:dim//2,:],anomaly_data[n_anomaly//2:,dim//2:,:]\n\n #print(\"a1\",a1.shape)\n #print(\"b2\",b2.shape)\n anomaly_data1 = np.concatenate((a1,b2),axis = 1)\n anomaly_data2 = np.concatenate((b1,a2),axis = 1)\n anomaly_data = np.concatenate((anomaly_data1,anomaly_data2),axis = 0)\n return anomaly_data\n\n if not self.train:\n #pass\n test_data_normal = self.test_data[:9000,:,:]\n test_data_anomaly = get_anomaly(self.test_data[9000:,:,:])\n\n data = np.concatenate((test_data_normal,test_data_anomaly),axis = 0)\n targets = np.array([0]*(len(test_data_normal)) + [1]*len(test_data_anomaly))\n #np.random.seed(1)\n #np.random.shuffle(data)\n #np.random.seed(1)\n #np.random.shuffle(targets)\n\n self.data = torch.from_numpy(data)\n self.targets = torch.from_numpy(targets)\n\n else:\n n_train = len(self.train_data)\n n_normal = n_train - int(self.anomaly_rate*n_train)\n n_anomaly = int(self.anomaly_rate*n_train)\n normal_train = self.train_data[:n_normal,:,:]\n tobe_anomaly_train = self.train_data[n_normal:,:,:]\n print(\"normal_train\",len(normal_train))\n print(\"tobe_anomaly_train\",len(tobe_anomaly_train))\n\n anomaly_train = get_anomaly(tobe_anomaly_train)\n print(\"anomaly_train\",len(anomaly_train))\n\n data = np.concatenate((normal_train,anomaly_train),axis = 0)\n\n semi_target = np.array([0 for _ in range(n_normal-50)] + [1 for _ in range(50)] + [-1 for _ in range(50)] + [0 for _ in range(n_anomaly)])\n self.semi_target = torch.from_numpy(semi_target)\n #np.random.seed(1)\n #np.random.shuffle(data)\n #print(\"data\",len(data))\n #print(\"self.data\",len(self.data))\n self.data = torch.from_numpy(data)\n\n def __getitem__(self, index):\n \"\"\"Override the original method of the MNIST class.\n Args:\n index (int): Index\n\n Returns:\n tuple: (image, target, semi_target, index)\n \"\"\"\n img, target, semi_target = self.data[index], int(self.targets[index]), int(self.semi_targets[index])\n #img, target, semi_target = self.data[index], int(self.targets[index]), int(self.targets[index])\n # doing this so that it is consistent with all other datasets\n # to return a PIL Image\n img = Image.fromarray(img.numpy(), mode='L')\n\n if self.transform is not None:\n img = self.transform(img)\n\n if self.target_transform is not None:\n target = self.target_transform(target)\n\n return img, target, semi_target, index\n" ]
[ [ "torch.utils.data.Subset", "torch.from_numpy", "torch.zeros_like", "numpy.concatenate" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
palatinuse/feast
[ "7d8126b5d934683469c16fd715f5dc11a6307e6b" ]
[ "sdk/python/feast/job.py" ]
[ "from typing import List\nfrom urllib.parse import urlparse\n\nimport fastavro\nimport grpc\nimport pandas as pd\nfrom google.protobuf.json_format import MessageToJson\n\nfrom feast.constants import CONFIG_TIMEOUT_KEY\nfrom feast.constants import FEAST_DEFAULT_OPTIONS as defaults\nfrom feast.core.CoreService_pb2 import ListIngestionJobsRequest\nfrom feast.core.CoreService_pb2_grpc import CoreServiceStub\nfrom feast.core.IngestionJob_pb2 import IngestionJob as IngestJobProto\nfrom feast.core.IngestionJob_pb2 import IngestionJobStatus\nfrom feast.core.Store_pb2 import Store\nfrom feast.feature_set import FeatureSetRef\nfrom feast.serving.ServingService_pb2 import (\n DATA_FORMAT_AVRO,\n JOB_STATUS_DONE,\n GetJobRequest,\n)\nfrom feast.serving.ServingService_pb2 import Job as JobProto\nfrom feast.serving.ServingService_pb2_grpc import ServingServiceStub\nfrom feast.source import Source\nfrom feast.staging.storage_client import get_staging_client\nfrom feast.wait import wait_retry_backoff\nfrom tensorflow_metadata.proto.v0 import statistics_pb2\n\n# Maximum no of seconds to wait until the retrieval jobs status is DONE in Feast\n# Currently set to the maximum query execution time limit in BigQuery\nDEFAULT_TIMEOUT_SEC: int = 21600\n\n# Maximum no of seconds to wait before reloading the job status in Feast\nMAX_WAIT_INTERVAL_SEC: int = 60\n\n\nclass RetrievalJob:\n \"\"\"\n A class representing a job for feature retrieval in Feast.\n \"\"\"\n\n def __init__(\n self,\n job_proto: JobProto,\n serving_stub: ServingServiceStub,\n auth_metadata_plugin: grpc.AuthMetadataPlugin = None,\n ):\n \"\"\"\n Args:\n job_proto: Job proto object (wrapped by this job object)\n serving_stub: Stub for Feast serving service\n auth_metadata_plugin: plugin to fetch auth metadata\n \"\"\"\n self.job_proto = job_proto\n self.serving_stub = serving_stub\n self.auth_metadata = auth_metadata_plugin\n\n @property\n def id(self):\n \"\"\"\n Getter for the Job Id\n \"\"\"\n return self.job_proto.id\n\n @property\n def status(self):\n \"\"\"\n Getter for the Job status from Feast Core\n \"\"\"\n return self.job_proto.status\n\n def reload(self):\n \"\"\"\n Reload the latest job status\n Returns: None\n \"\"\"\n self.job_proto = self.serving_stub.GetJob(\n GetJobRequest(job=self.job_proto),\n metadata=self.auth_metadata.get_signed_meta() if self.auth_metadata else (),\n ).job\n\n def get_avro_files(self, timeout_sec: int = int(defaults[CONFIG_TIMEOUT_KEY])):\n \"\"\"\n Wait until job is done to get the file uri to Avro result files on\n Google Cloud Storage.\n\n Args:\n timeout_sec (int):\n Max no of seconds to wait until job is done. If \"timeout_sec\"\n is exceeded, an exception will be raised.\n\n Returns:\n str: Google Cloud Storage file uris of the returned Avro files.\n \"\"\"\n\n def try_retrieve():\n self.reload()\n return None, self.status == JOB_STATUS_DONE\n\n wait_retry_backoff(\n retry_fn=try_retrieve,\n timeout_secs=timeout_sec,\n timeout_msg=\"Timeout exceeded while waiting for result. Please retry \"\n \"this method or use a longer timeout value.\",\n )\n\n if self.job_proto.error:\n raise Exception(self.job_proto.error)\n\n if self.job_proto.data_format != DATA_FORMAT_AVRO:\n raise Exception(\n \"Feast only supports Avro data format for now. Please check \"\n \"your Feast Serving deployment.\"\n )\n\n return [urlparse(uri) for uri in self.job_proto.file_uris]\n\n def result(self, timeout_sec: int = int(defaults[CONFIG_TIMEOUT_KEY])):\n \"\"\"\n Wait until job is done to get an iterable rows of result. The row can\n only represent an Avro row in Feast 0.3.\n\n Args:\n timeout_sec (int):\n Max no of seconds to wait until job is done. If \"timeout_sec\"\n is exceeded, an exception will be raised.\n\n Returns:\n Iterable of Avro rows.\n \"\"\"\n uris = self.get_avro_files(timeout_sec)\n for file_uri in uris:\n file_obj = get_staging_client(file_uri.scheme).download_file(file_uri)\n file_obj.seek(0)\n avro_reader = fastavro.reader(file_obj)\n\n for record in avro_reader:\n yield record\n\n def to_dataframe(\n self, timeout_sec: int = int(defaults[CONFIG_TIMEOUT_KEY])\n ) -> pd.DataFrame:\n \"\"\"\n Wait until a job is done to get an iterable rows of result. This method\n will split the response into chunked DataFrame of a specified size to\n to be yielded to the instance calling it.\n\n Args:\n max_chunk_size (int):\n Maximum number of rows that the DataFrame should contain.\n\n timeout_sec (int):\n Max no of seconds to wait until job is done. If \"timeout_sec\"\n is exceeded, an exception will be raised.\n\n Returns:\n pd.DataFrame:\n Pandas DataFrame of the feature values.\n \"\"\"\n records = [r for r in self.result(timeout_sec=timeout_sec)]\n return pd.DataFrame.from_records(records)\n\n def to_chunked_dataframe(\n self,\n max_chunk_size: int = -1,\n timeout_sec: int = int(defaults[CONFIG_TIMEOUT_KEY]),\n ) -> pd.DataFrame:\n \"\"\"\n Wait until a job is done to get an iterable rows of result. This method\n will split the response into chunked DataFrame of a specified size to\n to be yielded to the instance calling it.\n\n Args:\n max_chunk_size (int):\n Maximum number of rows that the DataFrame should contain.\n\n timeout_sec (int):\n Max no of seconds to wait until job is done. If \"timeout_sec\"\n is exceeded, an exception will be raised.\n\n Returns:\n pd.DataFrame:\n Pandas DataFrame of the feature values.\n \"\"\"\n\n # Object is Avro row type object, refer to self.result function for this type\n records: List[dict] = []\n\n # Max chunk size defined by user\n for result in self.result(timeout_sec=timeout_sec):\n result.append(records)\n if len(records) == max_chunk_size:\n df = pd.DataFrame.from_records(records)\n records.clear() # Empty records array\n yield df\n\n # Handle for last chunk that is < max_chunk_size\n if not records:\n yield pd.DataFrame.from_records(records)\n\n def __iter__(self):\n return iter(self.result())\n\n def statistics(\n self, timeout_sec: int = int(defaults[CONFIG_TIMEOUT_KEY])\n ) -> statistics_pb2.DatasetFeatureStatisticsList:\n \"\"\"\n Get statistics computed over the retrieved data set. Statistics will only be computed for\n columns that are part of Feast, and not the columns that were provided.\n\n Args:\n timeout_sec (int):\n Max no of seconds to wait until job is done. If \"timeout_sec\"\n is exceeded, an exception will be raised.\n\n Returns:\n DatasetFeatureStatisticsList containing statistics of Feast features over the retrieved dataset.\n \"\"\"\n self.get_avro_files(timeout_sec) # wait for job completion\n if self.job_proto.error:\n raise Exception(self.job_proto.error)\n return self.job_proto.dataset_feature_statistics_list\n\n\nclass IngestJob:\n \"\"\"\n Defines a job for feature ingestion in feast.\n \"\"\"\n\n def __init__(\n self,\n job_proto: IngestJobProto,\n core_stub: CoreServiceStub,\n auth_metadata_plugin: grpc.AuthMetadataPlugin = None,\n ):\n \"\"\"\n Construct a native ingest job from its protobuf version.\n\n Args:\n job_proto: Job proto object to construct from.\n core_stub: stub for Feast CoreService\n auth_metadata_plugin: plugin to fetch auth metadata\n \"\"\"\n self.proto = job_proto\n self.core_svc = core_stub\n self.auth_metadata = auth_metadata_plugin\n\n def reload(self):\n \"\"\"\n Update this IngestJob with the latest info from Feast\n \"\"\"\n # pull latest proto from feast core\n response = self.core_svc.ListIngestionJobs(\n ListIngestionJobsRequest(\n filter=ListIngestionJobsRequest.Filter(id=self.id)\n ),\n metadata=self.auth_metadata.get_signed_meta() if self.auth_metadata else (),\n )\n self.proto = response.jobs[0]\n\n @property\n def id(self) -> str:\n \"\"\"\n Getter for IngestJob's job id.\n \"\"\"\n return self.proto.id\n\n @property\n def external_id(self) -> str:\n \"\"\"\n Getter for IngestJob's external job id.\n \"\"\"\n self.reload()\n return self.proto.external_id\n\n @property\n def status(self) -> IngestionJobStatus: # type: ignore\n \"\"\"\n Getter for IngestJob's status\n \"\"\"\n self.reload()\n return self.proto.status\n\n @property\n def feature_sets(self) -> List[FeatureSetRef]:\n \"\"\"\n Getter for the IngestJob's feature sets\n \"\"\"\n # convert featureset protos to native objects\n return [\n FeatureSetRef.from_proto(fs) for fs in self.proto.feature_set_references\n ]\n\n @property\n def source(self) -> Source:\n \"\"\"\n Getter for the IngestJob's data source.\n \"\"\"\n return Source.from_proto(self.proto.source)\n\n @property\n def stores(self) -> List[Store]:\n \"\"\"\n Getter for the IngestJob's target feast store.\n \"\"\"\n return list(self.proto.stores)\n\n def wait(self, status: IngestionJobStatus, timeout_secs: int = 300): # type: ignore\n \"\"\"\n Wait for this IngestJob to transtion to the given status.\n Raises TimeoutError if the wait operation times out.\n\n Args:\n status: The IngestionJobStatus to wait for.\n timeout_secs: Maximum seconds to wait before timing out.\n \"\"\"\n # poll & wait for job status to transition\n wait_retry_backoff(\n retry_fn=(lambda: (None, self.status == status)), # type: ignore\n timeout_secs=timeout_secs,\n timeout_msg=\"Wait for IngestJob's status to transition timed out\",\n )\n\n def __str__(self):\n # render the contents of ingest job as human readable string\n self.reload()\n return str(MessageToJson(self.proto))\n\n def __repr__(self):\n # render the ingest job as human readable string\n return f\"IngestJob<{self.id}>\"\n" ]
[ [ "pandas.DataFrame.from_records" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "0.19", "1.1", "1.5", "1.2", "0.24", "0.20", "1.0", "0.25", "1.3" ], "scipy": [], "tensorflow": [] } ]